metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jgao18/gimme-shelter",
"score": 2
}
|
#### File: gimme-shelter/eb-flask/application.py
```python
from flask import Flask, render_template, request, json, redirect, session
from flaskext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
import googlemaps.client
import copy
from datetime import datetime, date
app = Flask(__name__)
mysql = MySQL()
app.secret_key = 'secretkey'
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'password'
app.config['MYSQL_DATABASE_DB'] = 'InShelter'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
@app.route("/")
def main():
if session.get('user'):
userInfo = {"id": session['id'], "username": session['user'], "firstName": session['fname'], "lastName": session['lname'], "location": session['location']}
return render_template('user-home.html', userInfo = userInfo)
else:
return render_template('index.html')
@app.route('/showOrgRegPage')
def showOrgRegPage():
return render_template('org-signup.html')
@app.route('/showUserNavPage')
def showUserNavPage():
if session.get('user'):
userInfo = {"id": session['id'], "username": session['user'], "firstName": session['fname'], "lastName": session['lname'], "location": session['location']}
return render_template('user-home.html', userInfo = userInfo)
else:
return render_template('index.html')
@app.route('/showOrgNavPage')
def showOrgNavPage():
if session.get('user'):
userInfo = {"id": session['id'], "username": session['user'], "orgName": session['orgname'],"location": session['location']}
return render_template('org-home.html', userInfo = userInfo)
else:
return render_template('org-signup.html')
@app.route('/showUserProfilePage')
def showUserProfilePage():
if session.get('user'):
userInfo = {"id": session['id'], "username": session['user'], "firstName": session['fname'], "lastName": session['lname'], "location": session['location']}
return render_template('user-profile.html', userInfo = userInfo)
else:
return render_template('index.html')
@app.route('/showOrgProfilePage')
def showOrgProfilePage():
if session.get('user'):
userInfo = {"id": session['id'], "username": session['user'], "orgName": session['orgname'],"location": session['location']}
return render_template('org-profile.html', userInfo = userInfo)
else:
return render_template('org-signup.html')
@app.route('/showErrorPage')
def showErrorPage():
return render_template('error.html')
@app.route('/signUp',methods=['POST','GET'])
def signUp():
# read the posted values from the UI
_firstName = request.form['inputFirstName']
_lastName = request.form['inputLastName']
_username = request.form['inputUsername']
_password = request.form['inputPassword']
_location = request.form['inputLocation']
# validate the received values
if _firstName and _lastName and _username and _password and _location:
json.dumps({'message':'User created successfully !'})
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(_password)
cursor.callproc('sp_createResident',(_firstName,_lastName,_username,_hashed_password,_location))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'User created successfully !', 'username':_username, 'firstName':_firstName, 'lastName':_lastName,})
else:
return json.dumps({'message':str(data[0])})
else:
return json.dumps({'message':'missing fields'})
@app.route('/orgSignUp',methods=['POST','GET'])
def orgSignUp():
# read the posted values from the UI
_orgName = request.form['inputOrgName']
_username = request.form['inputUsername']
_password = request.form['inputPassword']
_location = request.form['inputLocation']
_beds = request.form['inputBeds']
# validate the received values
if _orgName and _username and _password and _location and _beds:
json.dumps({'message':'User created successfully !'})
print "hello"
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(_password)
cursor.callproc('sp_createOrg',(_orgName,_username,_hashed_password,_location,_beds))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'User created successfully !', 'username':_username, 'orgName':_orgName,})
else:
return json.dumps({'message':str(data[0])})
else:
return json.dumps({'message':'missing fields'})
@app.route('/validateLogin',methods=['POST'])
def validateLogin():
try:
_username = request.form['loginUsername']
_password = request.form['loginPassword']
con = mysql.connect()
cursor = con.cursor()
cursor.callproc('sp_validateUserLogin',(_username,))
data = cursor.fetchall()
if len(data) > 0:
if check_password_hash(str(data[0][2]),_password):
session['id'] = data[0][0]
session['user'] = data[0][1]
session['fname'] = data[0][3]
session['lname'] = data[0][4]
session['location'] = data[0][5]
return json.dumps({'message':'success'})
else:
return json.dumps({'message':'error'})
else:
return json.dumps({'message':'error'})
except Exception as e:
return json.dumps({'message':'error'})
finally:
cursor.close()
con.close()
@app.route('/validateOrgLogin',methods=['POST'])
def validateOrgLogin():
try:
_username = request.form['loginUsername']
_password = request.form['loginPassword']
con = mysql.connect()
cursor = con.cursor()
cursor.callproc('sp_validateOrgLogin',(_username,))
data = cursor.fetchall()
if len(data) > 0:
if check_password_hash(str(data[0][2]),_password):
session['id'] = data[0][0]
session['user'] = data[0][1]
session['orgname'] = data[0][3]
session['location'] = data[0][5]
return json.dumps({'message':'success'})
else:
return json.dumps({'message':'error'})
else:
return json.dumps({'message':'error'})
except Exception as e:
return json.dumps({'message':'error'})
finally:
cursor.close()
con.close()
@app.route('/logout')
def logout():
session.pop('user',None)
return redirect('/')
@app.route('/saveUserProfile',methods=['POST','GET'])
def saveUserProfile():
_firstName = request.form['uFirstName'] or None
_lastName = request.form['uLastName'] or None
_language = request.form['uLanguage'] or None
_email = request.form['uEmail'] or None
_birthday = request.form['uBdayY'] + "-" + request.form['uBdayM'] + "-" + request.form['uBdayD'] or None
_gender = request.form['uGender'][0] or None
_ssn = request.form['uSSN1'] + request.form['uSSN2'] + request.form['uSSN3'] or None
_phone = request.form['uPhone1'] + request.form['uPhone2'] + request.form['uPhone3'] or None
_location = request.form['uLocation'] or None
_sleepout = int(request.form['sleepout'])
_vethl = int(request.form['vethl'])
_eserve = int(request.form['eserve'])
_harm = int(request.form['harm'])
_legal = int(request.form['legal'])
_exploit = int(request.form['exploit'])
_money = int(request.form['money'])
_meaning = int(request.form['meaning'])
_selfcare = int(request.form['selfcare'])
_social = int(request.form['social'])
_physical = int(request.form['physical'])
_substance = int(request.form['substance'])
_mental = int(request.form['mental'])
_medication = int(request.form['medication'])
_abuse = int(request.form['abuse'])
# validate the received values
if True:
if _birthday == '--':
_birthday = None
else:
_birthday = datetime.strptime(_birthday, '%Y-%m-%d')
if _ssn != None:
_ssn = int(_ssn)
if _phone != None:
_phone = int(_phone)
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('sp_saveProfile',(session['user'], _firstName,_lastName,_language,_email,_birthday,_gender,_ssn,_phone,_location,_sleepout,_vethl,_eserve,_harm,_legal,_exploit,_money,_meaning,_selfcare,_social,_physical,_substance,_mental,_medication,_abuse))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'User modified successfully!'})
else:
return json.dumps({'message':str(data[0])})
else:
return json.dumps({'message':'errors'})
# MATCH CODE
def getPersonLocation(personId):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT Location FROM Resident WHERE Id='" + str(personId) + "';")
return cursor.fetchone()[0]
def calculate_age(born):
today = date.today()
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
def getElegibleShelters(personId):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT DoB, Sex, Sleep_outside, Veteran_homeless, Emergency_service, Harm, Legal, Exploitation, Money, Meaningful, Self_care, Social, Physical, Substance, Mental, Medication, Abuse FROM Resident where Id = '" + str(personId) + "';")
personStats = cursor.fetchone()
dob = personStats[0]
sex = personStats[1]
personStats = personStats[2:]
if dob != None and calculate_age(dob) >= 60:
personStats = personStats + (1,)
else:
personStats = personStats + (0,)
cursor = conn.cursor()
cursor.execute("SELECT Id, Takes_men, Takes_women, beds_avail, Sleep_outside, Veteran_homeless, Emergency_service, Harm, Legal, Exploitation, Money, Meaningful, Self_care, Social, Physical, Substance, Mental, Medication, Abuse, Elderly FROM Shelter")
shelterStats = cursor.fetchall()
elegible = []
for shelterStat in shelterStats:
isElegible = True
if sex == 'm':
if shelterStat[1] != 1:
isElegible = False
elif sex == 'f':
if shelterStat[2] != 1:
isElegible = False
if shelterStat[3] <= 0:
isElegible = False
for i in range(4, len(personStats)):
if shelterStat[i] != 2 and shelterStat[i] != None and personStats[i] != None: # 2 is don't care condition
isElegible = isElegible and shelterStat[i] == personStats[i]
if isElegible:
elegible.append(shelterStat)
return [x[0] for x in elegible]
def getShelterInfo(shelterIds):
conn = mysql.connect()
cursor = conn.cursor()
shelterLocs = []
for i in range(0, len(shelterIds)):
cursor.execute("SELECT name, addr, beds_avail, closes_by FROM Shelter WHERE Id=" + str(shelterIds[i]) + ";")
tup = cursor.fetchone()
lst = list(tup)
lst[3] = str(lst[3])
tup = tuple(lst)
shelterLocs.append(tup)
return shelterLocs
def convertGmapsData(result, numberOfAddresses):
rowsArray = result['rows']
distanceValues = [0 for x in range(numberOfAddresses)]
distanceText = copy.deepcopy(distanceValues)
for i in range(0, numberOfAddresses, 1):
distanceValues[i] = rowsArray[0]['elements'][i]['distance']['value']
distanceText[i] = rowsArray[0]['elements'][i]['distance']['text']
return [distanceValues, distanceText]
@app.route('/match',methods=['POST','GET'])
def match():
personId = session['id'];
personLocation = getPersonLocation(personId)
shelters = getElegibleShelters(personId)
shelterInfo = getShelterInfo(shelters)
gmaps = googlemaps.Client(key='<KEY>')
y = [str(x[1]) for x in shelterInfo]
result = gmaps.distance_matrix(personLocation, y)
if (result['status'] != 'OK'):
print("Query failed: returned status" + result['status'])
exit()
rowsDict = result['rows'][0]
convertedData = convertGmapsData(result, len(shelters))
new = []
for i in range(0, len(shelters)):
new.append((convertedData[0][i], shelters[i], convertedData[1][i]) + shelterInfo[i])
new = sorted(new, key=lambda x: x[0])
#new = [x[1:] for x in new]
print new
userInfo = {"id": session['id'], "username": session['user'], "firstName": session['fname'], "lastName": session['lname'], "location": session['location']}
return render_template('user-shelters.html', userInfo = userInfo, shelterInfo = new)
# get other relevant data about shelters and package for flask displsy (unknown)
@app.route('/reserve',methods=['POST','GET'])
def reserve():
_shelterID = request.form['shelterno']
print _shelterID
conn = mysql.connect()
cursor = conn.cursor()
cursor.callproc('sp_createReservation',(session['id'], _shelterID, datetime.today()))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return json.dumps({'message':'User modified successfully!'})
else:
return json.dumps({'message':str(data[0])})
if __name__ == "__main__":
app.run()
```
|
{
"source": "jgarabedian/blm-twitter",
"score": 3
}
|
#### File: blm-twitter/components/tweetcard.py
```python
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
def create_deck(df: pd.DataFrame, city: str) -> html.Div:
"""
Create a deck of tweets
:param df: dataframe to get tweets of
:param city: string of city to filter on
:return: list of child elements
"""
if city is not None:
df = df[df['city'] == city]
list_items = []
for index, row in df.iterrows():
if row['sentiment'] == 'positive':
text_color = "text-danger"
elif row['sentiment'] == 'neutral':
text_color = "text-light"
else:
text_color = "text-info"
tweet = dbc.ListGroupItem(
children=[
html.Div(
className="tweetcard",
children=[
html.Div(
className="tweetcard__icon",
children=[
html.Img(
src="/assets/images/Twitter_Logo_Blue.png",
className="tweetcard__icon__logo"
)
]
),
html.Div(
className="tweetcard__content text-wrap",
children=[
row['text'],
html.Div(
children=[
row['city'], html.Div(
className=text_color,
children=[row['sentiment']])
],
className="tweetcard__content__meta font-italic text-secondary"
),
]
)
]
)
],
action=True,
className="text-break text-wrap",
)
list_items.append(tweet)
return list_items
```
|
{
"source": "jgarabedian/starlink-updates",
"score": 3
}
|
#### File: jgarabedian/starlink-updates/TwitterApi.py
```python
import tweepy
from geopy.geocoders import Nominatim
import time
from dotenv import load_dotenv
import os
from pprint import pprint
load_dotenv()
def limit_handler(cursor):
try:
while True:
try:
yield cursor.next()
except StopIteration:
return
except tweepy.RateLimitError:
time.sleep(1000)
def get_geocode(search, radius=50):
geolocator = Nominatim(user_agent="starlink-updates")
location = geolocator.geocode(search)
return f'{location.latitude},{location.longitude},{radius}km'
class TwitterClient:
def __init__(self):
consumer_key = os.getenv("KEY")
consumer_secret = os.getenv("SECRET")
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
ACCESS_TOKEN_SECRET = os.getenv("ACCESS_TOKEN_SECRET")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
self.api = tweepy.API(auth)
def search_tweets(self, location_search=None, radius=None):
if location_search:
geocode = get_geocode(location_search, radius)
else:
geocode=None
tweets = []
tweet_text = []
for tweet in limit_handler(tweepy.Cursor(self.api.search, 'Starlink', geocode=geocode, lang='en', truncated=False).items(30)):
try:
# pprint(tweet)
if tweet.retweet_count > 0:
if tweet.text not in tweet_text:
tweets.append(tweet)
else:
tweet_text.append(tweet.text)
tweets.append(tweet)
except StopIteration:
return tweets
except tweepy.TweepError as e:
print(e.reason)
# tweets = self.api.search('Starlink', count=2, truncated=False)
return tweets
if __name__ == "__main__":
twitter = TwitterClient()
tweets = twitter.search_tweets('United Kingdom')
print(len(tweets))
```
|
{
"source": "jgarber623/openstates.org",
"score": 2
}
|
#### File: management/commands/export_csv.py
```python
import csv
import datetime
import tempfile
import zipfile
import boto3
from django.core.management.base import BaseCommand
from django.db.models import F
from opencivicdata.legislative.models import (
LegislativeSession,
Bill,
BillAbstract,
BillAction,
BillTitle,
BillIdentifier,
RelatedBill,
BillSponsorship,
BillDocument,
BillVersion,
BillDocumentLink,
BillVersionLink,
BillSource,
VoteEvent,
PersonVote,
VoteCount,
VoteSource,
)
from ...models import DataExport
from utils.common import abbr_to_jid
def export_csv(filename, data, zf):
num = len(data)
if not num:
return
headers = data[0].keys()
with tempfile.NamedTemporaryFile("w") as f:
print("writing", filename, num, "records")
of = csv.DictWriter(f, headers)
of.writeheader()
of.writerows(data)
f.flush()
zf.write(f.name, filename)
return num
def export_session(state, session):
sobj = LegislativeSession.objects.get(
jurisdiction_id=abbr_to_jid(state), identifier=session
)
bills = Bill.objects.filter(legislative_session=sobj).values(
"id",
"identifier",
"title",
"classification",
"subject",
session_identifier=F("legislative_session__identifier"),
jurisdiction=F("legislative_session__jurisdiction__name"),
organization_classification=F("from_organization__classification"),
)
if not bills.count():
print(f"no bills for {state} {session}")
return
filename = f"{state}_{session}.zip"
zf = zipfile.ZipFile(filename, "w")
ts = datetime.datetime.utcnow()
zf.writestr(
"README",
f"""Open States Data Export
State: {state}
Session: {session}
Generated At: {ts}
CSV Format Version: 2.0
""",
)
export_csv(f"{state}/{session}/{state}_{session}_bills.csv", bills, zf)
for Model, fname in (
(BillAbstract, "bill_abstracts"),
(BillTitle, "bill_titles"),
(BillIdentifier, "bill_identifiers"),
(BillAction, "bill_actions"),
(BillSource, "bill_sources"),
(RelatedBill, "bill_related_bills"),
(BillSponsorship, "bill_sponsorships"),
(BillDocument, "bill_documents"),
(BillVersion, "bill_versions"),
):
subobjs = Model.objects.filter(bill__legislative_session=sobj).values()
export_csv(f"{state}/{session}/{state}_{session}_{fname}.csv", subobjs, zf)
subobjs = BillDocumentLink.objects.filter(
document__bill__legislative_session=sobj
).values()
export_csv(
f"{state}/{session}/{state}_{session}_bill_document_links.csv", subobjs, zf
)
subobjs = BillVersionLink.objects.filter(
version__bill__legislative_session=sobj
).values()
export_csv(
f"{state}/{session}/{state}_{session}_bill_version_links.csv", subobjs, zf
)
# TODO: BillActionRelatedEntity
# Votes
votes = VoteEvent.objects.filter(legislative_session=sobj).values(
"id",
"identifier",
"motion_text",
"motion_classification",
"start_date",
"result",
"organization_id",
"bill_id",
"bill_action_id",
jurisdiction=F("legislative_session__jurisdiction__name"),
session_identifier=F("legislative_session__identifier"),
)
export_csv(f"{state}/{session}/{state}_{session}_votes.csv", votes, zf)
for Model, fname in (
(PersonVote, "vote_people"),
(VoteCount, "vote_counts"),
(VoteSource, "vote_sources"),
):
subobjs = Model.objects.filter(vote_event__legislative_session=sobj).values()
export_csv(f"{state}/{session}/{state}_{session}_{fname}.csv", subobjs, zf)
return filename
def upload_and_publish(state, session, filename):
sobj = LegislativeSession.objects.get(
jurisdiction_id=abbr_to_jid(state), identifier=session
)
s3 = boto3.client("s3")
BULK_S3_BUCKET = "data.openstates.org"
BULK_S3_PATH = "csv/latest/"
s3_url = f"https://{BULK_S3_BUCKET}/{BULK_S3_PATH}{filename}"
s3.upload_file(
filename,
BULK_S3_BUCKET,
BULK_S3_PATH + filename,
ExtraArgs={"ACL": "public-read"},
)
print("uploaded", s3_url)
obj, created = DataExport.objects.update_or_create(
session=sobj, defaults=dict(url=s3_url)
)
class Command(BaseCommand):
help = "export data as CSV"
def add_arguments(self, parser):
parser.add_argument("state")
parser.add_argument("sessions", nargs="*")
parser.add_argument("--all", action="store_true")
def handle(self, *args, **options):
state = options["state"]
sessions = [
s.identifier
for s in LegislativeSession.objects.filter(
jurisdiction_id=abbr_to_jid(state)
)
]
if options["all"]:
options["sessions"] = sessions
if not options["sessions"]:
print("available sessions:")
for session in sessions:
print(" ", session)
else:
for session in options["sessions"]:
if session in sessions:
filename = export_session(state, session)
if filename:
upload_and_publish(state, session, filename)
```
#### File: geo/shapefiles/definitions.py
```python
from datetime import date
import boundaries
import os
import us
OGRIndexError = Exception
states = [s for s in us.STATES_AND_TERRITORIES if s not in us.OBSOLETE]
state_fips = {s.fips: s.abbr.lower() for s in states}
def tiger_namer(feature):
global OGRIndexError
global state_fips
try:
fips_code = feature.get("STATEFP")
except OGRIndexError:
fips_code = feature.get("STATEFP10")
try:
name = feature.get("NAMELSAD")
except OGRIndexError:
name = feature.get("NAMELSAD10")
try:
geoid = feature.get("GEOID")
except OGRIndexError:
geoid = feature.get("GEOID10")
state_abbrev = state_fips[fips_code].upper()
name = name.encode("utf8").decode("latin-1")
resp = u"{0} {1} {2}".format(state_abbrev, name, geoid)
return resp
def geoid_tiger_namer(feature):
try:
geoid = feature.get("GEOID")
except OGRIndexError:
geoid = feature.get("GEOID10")
return geoid
def nh_12_namer(feature):
"""
New Hampshire's floterial district shapefiles have only one field:
an abbreviated district name ("AA#" format). This has to be
crosswalked to useful information.
The crosswalk is roughly based on this Census file:
www2.census.gov/geo/docs/maps-data/data/NH_2012_Floterials.txt
"""
abbr = feature.get("NHHouse201")
# There are two shapefiles that don't correspond to any floterial
# These need unique IDs, which end with 'zzz' so that they'll be ignored
if not abbr:
import datetime
unique_key = datetime.datetime.now()
return "{}zzz".format(unique_key)
path = os.path.join(
os.path.abspath(os.getcwd()), "shapefiles", "nh_12_crosswalk.csv"
)
with open(path, "r") as f:
# Due to a bug in `boundaries`, need to `import csv` here
import csv
reader = list(csv.DictReader(f))
(row,) = [x for x in reader if x["NHHouse201"] == abbr]
STATE_ABBREV = "NH"
name = row["NAMELSAD"]
geoid = row["GEOID"]
resp = "{0} {1} {2}".format(STATE_ABBREV, name, geoid)
return resp
def geoid_nh_12_namer(feature):
abbr = feature.get("NHHouse201")
if not abbr:
import datetime
unique_key = datetime.datetime.now()
return "{}zzz".format(unique_key)
path = os.path.join(
os.path.abspath(os.getcwd()), "shapefiles", "nh_12_crosswalk.csv"
)
with open(path, "r") as f:
# Due to a bug in `boundaries`, need to `import csv` here
import csv
reader = list(csv.DictReader(f))
(row,) = [x for x in reader if x["NHHouse201"] == abbr]
geoid = row["GEOID"]
return geoid
class index_namer(object):
def __init__(self, prefix):
self.prefix = prefix
self.count = 0
def __call__(self, feature):
self.count += 1
return "{0}{1}".format(self.prefix, self.count)
CENSUS_URL = "http://www.census.gov/geo/maps-data/data/tiger.html"
LAST_UPDATE = date(2018, 1, 24)
defaults = dict(
last_updated=LAST_UPDATE,
domain="United States",
authority="US Census Bureau",
source_url=CENSUS_URL,
license_URL=CENSUS_URL,
data_url=CENSUS_URL,
notes="",
extra="{}",
)
FIRST_YEAR = 2017
LAST_YEAR = 2018
YEARS = range(FIRST_YEAR, LAST_YEAR + 1)
for year in YEARS:
# Most types of Census data follow a common pattern
for type_ in ["sldu", "sldl"]:
boundary_info = dict(
slug="{}-{}".format(type_, year % 2000),
singular="{}-{}".format(type_, year % 2000),
file="{}-{}/".format(type_, year % 2000),
name_func=tiger_namer,
id_func=geoid_tiger_namer,
# Although the Census files are published in the autumn,
# they take effect retroactively as of the start of their year
start_date=date(year, 1, 1),
end_date=date(year, 12, 31),
encoding="latin-1",
**defaults
)
if year == LAST_YEAR:
# This is the most recent information we have,
# so keep using it until the boundaries are updated
del boundary_info["end_date"]
boundaries.register(**boundary_info)
boundaries.register(
"nh-12",
singular="nh-12",
file="nh-12/",
name_func=nh_12_namer,
id_func=geoid_nh_12_namer,
start_date=date(2012, 1, 1),
last_updated=LAST_UPDATE,
domain="United States",
authority="NH Office of Energy and Planning",
source_url="http://www.nh.gov/oep/planning/services/gis/political-districts.htm",
license_URL="http://www.nh.gov/oep/planning/services/gis/political-districts.htm",
data_url="ftp://pubftp.nh.gov/OEP/NHHouseDists2012.zip",
notes="",
extra="{}",
)
```
#### File: openstates.org/profiles/admin.py
```python
from django.contrib import admin
from profiles.models import Profile, Subscription, Notification
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ("user", "email", "feature_subscriptions")
list_editable = ("feature_subscriptions",)
search_fields = ("user__email",)
def email(self, p):
return p.user.email
@admin.register(Subscription)
class SubscriptionAdmin(admin.ModelAdmin):
list_display = ("__str__", "user", "subscription_type", "active")
list_filter = ("active",)
autocomplete_fields = ("sponsor", "bill")
@admin.register(Notification)
class NotificationAdmin(admin.ModelAdmin):
list_display = ("id", "email", "sent", "num_bill_updates", "num_query_updates")
search_fields = ("email",)
ordering = ("sent",)
date_hierarchy = "sent"
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
```
#### File: openstates.org/profiles/apps.py
```python
from django.apps import AppConfig
def create_key_for_verified_user(sender, **kwargs):
from simplekeys.models import Key, Tier
email = kwargs["email_address"]
try:
Key.objects.get(email=email.email)
except Key.DoesNotExist:
Key.objects.create(
tier=Tier.objects.get(slug="default"),
status="a",
email=email.email,
name=email.email,
)
def create_profile(sender, instance, **kwargs):
from profiles.models import Profile
Profile.objects.get_or_create(user=instance)
class ProfilesConfig(AppConfig):
name = "profiles"
def ready(self):
from allauth.account.signals import email_confirmed
from django.db.models.signals import post_save
from django.contrib.auth.models import User
email_confirmed.connect(create_key_for_verified_user)
post_save.connect(create_profile, sender=User)
```
#### File: management/commands/update_materialized_views.py
```python
from django.core.management.base import BaseCommand
from django.db import transaction, connection
class Command(BaseCommand):
help = "update materialized views"
def add_arguments(self, parser):
parser.add_argument("--initial", action="store_true")
def handle(self, *args, **options):
concurrent = "CONCURRENTLY"
# initial run can't be concurrent
if options["initial"]:
concurrent = ""
with transaction.atomic():
with connection.cursor() as cursor:
query = f"REFRESH MATERIALIZED VIEW {concurrent} public_billstatus"
print(query)
cursor.execute(query)
```
#### File: openstates.org/public/models.py
```python
from django.db import models
from opencivicdata.core.models import Person, Organization
from opencivicdata.legislative.models import Bill
from utils.people import get_current_role, current_role_filters
from utils.common import pretty_url, abbr_to_jid
class PersonProxy(Person):
class Meta:
proxy = True
@staticmethod
def search_people(query, *, state=None, current=True):
if current:
people = PersonProxy.objects.filter(
*current_role_filters(),
memberships__organization__classification__in=[
"upper",
"lower",
"legislature",
],
name__icontains=query
)
else:
people = PersonProxy.objects.filter(name__icontains=query)
if state:
people = people.filter(
memberships__organization__jurisdiction_id=abbr_to_jid(state)
)
people = people.prefetch_related(
"memberships", "memberships__organization", "memberships__post"
)
return people
@staticmethod
def get_current_legislators_with_roles(chambers):
return PersonProxy.objects.filter(
*current_role_filters(), memberships__organization__in=chambers
).prefetch_related(
"memberships", "memberships__organization", "memberships__post"
)
@property
def current_role(self):
if not getattr(self, "_current_role", None):
self._current_role = get_current_role(self)
try:
self._current_role["district"] = int(self._current_role["district"])
except ValueError:
pass
return self._current_role
def pretty_url(self):
return pretty_url(self)
def committee_memberships(self):
return self.memberships.filter(organization__classification="committee").all()
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"image": self.image,
"current_role": self.current_role,
"pretty_url": self.pretty_url(),
}
class OrganizationProxy(Organization):
class Meta:
proxy = True
def pretty_url(self):
return pretty_url(self)
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"chamber": self.parent.classification,
"pretty_url": self.pretty_url(),
"member_count": self.member_count,
}
class BillStatus(models.Model):
bill = models.OneToOneField(Bill, on_delete=models.DO_NOTHING, primary_key=True)
first_action_date = models.CharField(max_length=25)
latest_action_date = models.CharField(max_length=25)
latest_action_description = models.TextField()
latest_passage_date = models.CharField(max_length=25)
class Meta:
managed = False
```
#### File: public/tests/test_bill_views.py
```python
import pytest
from graphapi.tests.utils import populate_db
from opencivicdata.core.models import Person
from opencivicdata.legislative.models import VoteEvent
@pytest.mark.django_db
def setup():
populate_db()
BILLS_QUERY_COUNT = 9
ALASKA_BILLS = 12
@pytest.mark.django_db
def test_bills_view_basics(client, django_assert_num_queries):
with django_assert_num_queries(BILLS_QUERY_COUNT):
resp = client.get("/ak/bills/")
assert resp.status_code == 200
assert resp.context["state"] == "ak"
assert resp.context["state_nav"] == "bills"
assert len(resp.context["chambers"]) == 2
assert len(resp.context["sessions"]) == 2
assert "nature" in resp.context["subjects"]
assert len(resp.context["sponsors"]) == 7
assert len(resp.context["classifications"]) == 3
# 10 random bills, 2 full featured
assert len(resp.context["bills"]) == ALASKA_BILLS
@pytest.mark.django_db
def test_bills_view_query(client, django_assert_num_queries):
# title search works
with django_assert_num_queries(BILLS_QUERY_COUNT):
resp = client.get("/ak/bills/?query=moose")
assert resp.status_code == 200
assert len(resp.context["bills"]) == 1
# search in body works
resp = client.get("/ak/bills/?query=gorgonzola")
assert resp.status_code == 200
assert len(resp.context["bills"]) == 1
# test that a query doesn't alter the search options
assert len(resp.context["chambers"]) == 2
assert len(resp.context["sessions"]) == 2
assert "nature" in resp.context["subjects"]
assert len(resp.context["subjects"]) > 10
assert len(resp.context["sponsors"]) == 7
assert len(resp.context["classifications"]) == 3
@pytest.mark.django_db
def test_bills_view_query_bill_id(client, django_assert_num_queries):
# query by bill id
with django_assert_num_queries(BILLS_QUERY_COUNT):
resp = client.get("/ak/bills/?query=HB 1")
assert resp.status_code == 200
assert len(resp.context["bills"]) == 1
# case insensitive
resp = client.get("/ak/bills/?query=hb 1")
assert resp.status_code == 200
assert len(resp.context["bills"]) == 1
@pytest.mark.django_db
def test_bills_view_chamber(client, django_assert_num_queries):
with django_assert_num_queries(BILLS_QUERY_COUNT):
upper = len(client.get("/ak/bills/?chamber=upper").context["bills"])
with django_assert_num_queries(BILLS_QUERY_COUNT):
lower = len(client.get("/ak/bills/?chamber=lower").context["bills"])
assert upper + lower == ALASKA_BILLS
@pytest.mark.django_db
def test_bills_view_session(client, django_assert_num_queries):
with django_assert_num_queries(BILLS_QUERY_COUNT):
b17 = len(client.get("/ak/bills/?session=2017").context["bills"])
with django_assert_num_queries(BILLS_QUERY_COUNT):
b18 = len(client.get("/ak/bills/?session=2018").context["bills"])
assert b17 + b18 == ALASKA_BILLS
@pytest.mark.django_db
def test_bills_view_sponsor(client, django_assert_num_queries):
amanda = Person.objects.get(name="<NAME>")
with django_assert_num_queries(BILLS_QUERY_COUNT):
assert len(client.get(f"/ak/bills/?sponsor={amanda.id}").context["bills"]) == 2
@pytest.mark.django_db
def test_bills_view_classification(client, django_assert_num_queries):
bills = len(client.get("/ak/bills/?classification=bill").context["bills"])
resolutions = len(
client.get("/ak/bills/?classification=resolution").context["bills"]
)
assert (
len(
client.get("/ak/bills/?classification=constitutional+amendment").context[
"bills"
]
)
== 2
)
assert bills + resolutions == ALASKA_BILLS
@pytest.mark.django_db
def test_bills_view_subject(client, django_assert_num_queries):
with django_assert_num_queries(BILLS_QUERY_COUNT):
assert len(client.get(f"/ak/bills/?subjects=nature").context["bills"]) == 2
@pytest.mark.django_db
def test_bills_view_status(client, django_assert_num_queries):
with django_assert_num_queries(BILLS_QUERY_COUNT):
assert (
len(client.get(f"/ak/bills/?status=passed-lower-chamber").context["bills"])
== 1
)
@pytest.mark.django_db
def test_bill_view(client, django_assert_num_queries):
with django_assert_num_queries(16):
resp = client.get("/ak/bills/2018/HB1/")
assert resp.status_code == 200
assert resp.context["state"] == "ak"
assert resp.context["state_nav"] == "bills"
assert resp.context["bill"].identifier == "HB 1"
assert len(resp.context["sponsorships"]) == 2
assert len(resp.context["actions"]) == 3
assert len(resp.context["votes"]) == 1
assert len(resp.context["versions"]) == 2
assert len(resp.context["documents"]) == 2
assert resp.context["read_link"] == "https://example.com/f.pdf"
assert resp.context["stages"][1] == {
"date": "2018-03-01",
"stage": "Alaska House",
"text": "Passed Alaska House",
}
@pytest.mark.django_db
def test_vote_view(client, django_assert_num_queries):
vid = VoteEvent.objects.get(motion_text="Vote on House Passage").id.split("/")[1]
with django_assert_num_queries(7):
resp = client.get(f"/vote/{vid}/")
assert resp.status_code == 200
assert resp.context["state"] == "ak"
assert resp.context["state_nav"] == "bills"
assert len(resp.context["person_votes"]) == 5
# vote counts in order, yes, no, others
assert resp.context["vote_counts"][0].option == "yes"
assert resp.context["vote_counts"][1].option == "no"
assert resp.context["vote_counts"][0].value == 1
assert resp.context["vote_counts"][1].value == 4
# sorted list of (party, counts) pairs
assert resp.context["party_votes"][0][0] == "Democratic"
assert resp.context["party_votes"][0][1]["no"] == 1
assert resp.context["party_votes"][0][1]["yes"] == 0
assert resp.context["party_votes"][1][0] == "Republican"
assert resp.context["party_votes"][1][1]["no"] == 2
assert resp.context["party_votes"][1][1]["yes"] == 1
assert resp.context["party_votes"][2][0] == "Unknown"
assert resp.context["party_votes"][2][1]["no"] == 1
@pytest.mark.django_db
def test_bills_feed(client):
resp = client.get(f"/ak/bills/feed/")
assert resp.status_code == 200
```
#### File: public/views/fallback.py
```python
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
import boto3
from botocore.errorfactory import ClientError
from ..models import PersonProxy
def fallback(request):
BUCKET_NAME = "legacy.openstates.org"
key = request.path.lstrip("/") + "index.html"
s3 = boto3.client("s3")
try:
obj = s3.get_object(Bucket=BUCKET_NAME, Key=key)
return HttpResponse(obj["Body"].read())
except ClientError:
raise Http404(request.path + "index.html")
def legislator_fallback(request, legislator_id):
try:
p = PersonProxy.objects.get(
identifiers__scheme="legacy_openstates",
identifiers__identifier=legislator_id,
)
return redirect(p.pretty_url(), permanent=True)
except PersonProxy.DoesNotExist:
return fallback(request)
```
#### File: openstates.org/utils/people.py
```python
import datetime
from django.db.models import Q
from .common import jid_to_abbr
def get_current_role(person):
today = datetime.date.today().strftime("%Y-%m-%d")
party = None
post = None
state = None
chamber = None
# assume that this person object was fetched with appropriate
# related data, if not this can get expensive
for membership in person.memberships.all():
if not membership.end_date or membership.end_date > today:
if membership.organization.classification == "party":
party = membership.organization.name
elif membership.organization.classification in (
"upper",
"lower",
"legislature",
):
chamber = membership.organization.classification
state = jid_to_abbr(membership.organization.jurisdiction_id)
post = membership.post
return {
"party": party,
"chamber": chamber,
"state": state,
"district": post.label if post else "",
"division_id": post.division_id if post else "",
"role": post.role if post else "",
}
def current_role_filters():
today = datetime.date.today().isoformat()
return [
Q(memberships__start_date="") | Q(memberships__start_date__lte=today),
Q(memberships__end_date="") | Q(memberships__end_date__gte=today),
]
```
|
{
"source": "jgarciab/evolveUnimodal",
"score": 3
}
|
#### File: jgarciab/evolveUnimodal/runSimulations.py
```python
import numpy as np
from numpy import random
import os
from scipy.stats import gamma, expon
import statsmodels.api as sm
import pylab as plt
class differential_evolution_optimizer(object):
"""
This is a python implementation of differential evolution
It assumes an evaluator class is passed in that has the following
functionality
data members:
n :: The number of parameters
domain :: a list [(low,high)]*n
with approximate upper and lower limits for each parameter
x :: a place holder for a final solution
also a function called 'target' is needed.
This function should take a parameter vector as input and return a the function to be minimized.
The code below was implemented on the basis of the following sources of information:
1. http://www.icsi.berkeley.edu/~storn/code.html
2. http://www.daimi.au.dk/~krink/fec05/articles/JV_ComparativeStudy_CEC04.pdf
3. http://ocw.mit.edu/NR/rdonlyres/Sloan-School-of-Management/15-099Fall2003/A40397B9-E8FB-4B45-A41B-D1F69218901F/0/ses2_storn_price.pdf
The developers of the differential evolution method have this advice:
(taken from ref. 1)
If you are going to optimize your own objective function with DE, you may try the
following classical settings for the input file first: Choose method e.g. DE/rand/1/bin,
set the number of parents NP to 10 times the number of parameters, select weighting
factor F=0.8, and crossover constant CR=0.9. It has been found recently that selecting
F from the interval [0.5, 1.0] randomly for each generation or for each difference
vector, a technique called dither, improves convergence behaviour significantly,
especially for noisy objective functions. It has also been found that setting CR to a
low value, e.g. CR=0.2 helps optimizing separable functions since it fosters the search
along the coordinate axes. On the contrary this choice is not effective if parameter
dependence is encountered, something which is frequently occuring in real-world optimization
problems rather than artificial test functions. So for parameter dependence the choice of
CR=0.9 is more appropriate. Another interesting empirical finding is that rasing NP above,
say, 40 does not substantially improve the convergence, independent of the number of
parameters. It is worthwhile to experiment with these suggestions. Make sure that you
initialize your parameter vectors by exploiting their full numerical range, i.e. if a
parameter is allowed to exhibit values in the range [-100, 100] it's a good idea to pick
the initial values from this range instead of unnecessarily restricting diversity.
Keep in mind that different problems often require different settings for NP, F and CR
(have a look into the different papers to get a feeling for the settings). If you still
get misconvergence you might want to try a different method. We mostly use DE/rand/1/... or DE/best/1/... .
The crossover method is not so important although <NAME> claims that binomial is never
worse than exponential. In case of misconvergence also check your choice of objective
function. There might be a better one to describe your problem. Any knowledge that you
have about the problem should be worked into the objective function. A good objective
function can make all the difference.
Note: NP is called population size in the routine below.)
Note: [0.5,1.0] dither is the default behavior unless f is set to a value other then None.
"""
def __init__(self,
evaluator,
population_size=50,
f=None,
cr=0.9,
eps=1e-2,
n_cross=1,
max_iter=10000,
monitor_cycle=200,
out=None,
show_progress=False,
save_progress=False,
show_progress_nth_cycle=1,
insert_solution_vector=None,
dither_constant=0.4,
movAverageMutationRate = 0.,
noise=0):
self.movAverageMutationRate=movAverageMutationRate
self.dither=dither_constant
self.noise = noise
self.show_progress=show_progress
self.save_progress=save_progress
self.show_progress_nth_cycle=show_progress_nth_cycle
self.evaluator = evaluator
self.population_size = population_size
self.f = f
self.cr = cr
self.n_cross = n_cross
self.max_iter = max_iter
self.monitor_cycle = monitor_cycle
self.vector_length = evaluator.n
self.eps = eps
self.population = []
self.seeded = False
if insert_solution_vector is not None:
assert len( insert_solution_vector )==self.vector_length
self.seeded = insert_solution_vector
for ii in xrange(self.population_size):
self.population.append( np.zeros(self.vector_length))
self.scores = np.zeros(self.population_size) + 1000.
self.optimize()
self.best_score = np.min( self.scores )
self.best_vector = self.population[( self.scores ).argmin() ]
self.evaluator.x = self.best_vector
if self.show_progress:
self.evaluator.print_status(
np.min(self.scores),
np.mean(self.scores),
self.population[ ( self.scores ).argmin() ],
'Final')
def optimize(self):
# open file
# initialise the population please
self.make_random_population()
# score the population please
self.score_population()
converged = False
monitor_score = np.min( self.scores )
self.count = 0
cx = 0
while not converged:
self.evolve()
location = (self.scores).argmin()
if self.show_progress:
if self.count%self.show_progress_nth_cycle==0:
# make here a call to a custom print_status function in the evaluator function
# the function signature should be (min_target, mean_target, best vector)
self.evaluator.print_status(
np.min(self.scores),
np.mean(self.scores),
self.population[ ( self.scores ).argmin() ],
self.count)
if self.save_progress:
self.evaluator.fname.write("%d, %f, %f" %(self.count,np.min(self.scores),np.mean(self.scores)))
for item in self.population[ ( self.scores ).argmin() ]:
self.evaluator.fname.write(", %e" % item)
if self.count%20==0:
print self.count, self.evaluator.fname.name, np.min(self.scores), self.population[ ( self.scores ).argmin() ]
#print self.count
#vector = self.population[ ( self.scores ).argmin()][:-1]
#x = np.linspace(0.01, 100., num=100) # values for x-axis
#d = np.zeros(100)
#for jj in range(0,len(vector)-1,3):
#d += vector[jj]*gamma.pdf(x, vector[jj+1], loc=0, scale=vector[jj+2]) # probability distribution
#plt.plot(d)
#plt.show()
self.evaluator.fname.write("\n")
self.count += 1
if self.count%self.monitor_cycle==0:
if (monitor_score-np.min(self.scores) ) < self.eps:
converged = True
else:
monitor_score = np.min(self.scores)
rd = (np.mean(self.scores) - np.min(self.scores) )
rd = rd*rd/(np.min(self.scores)*np.min(self.scores) + self.eps )
if ( rd < self.eps):
cx += 1
if self.count>=self.max_iter :
converged = True
if cx > 20:
converged = True
if self.save_progress:
self.evaluator.fname.close()
return None
def make_random_population(self):
for ii in xrange(self.vector_length):
delta = self.evaluator.domain[ii][1]-self.evaluator.domain[ii][0]
offset = self.evaluator.domain[ii][0]
random_values = np.random.random(self.population_size)
random_values = random_values*delta+offset
# now please place these values ni the proper places in the
# vectors of the population we generated
for vector, item in zip(self.population,random_values):
vector[ii] = item
if self.seeded is not False:
self.population[0] = self.seeded
self.upper_bound = np.asarray([_[1] for _ in self.evaluator.bounder])
self.lower_bound = np.asarray([_[0] for _ in self.evaluator.bounder])
"""
for vector in self.population:
x = np.linspace(0.01, 100., num=100) # values for x-axis
d = np.zeros(100)
for jj in range(0,len(vector)-1,3):
d += vector[jj]*gamma.pdf(x, vector[jj+1], loc=0, scale=vector[jj+2]) # probability distribution
d /= np.sum(d)
plt.plot(d)
plt.show()
"""
def score_population(self):
for ii,vector in enumerate(self.population):
tmp_score = self.evaluator.target(vector,0)
self.scores[ii]=tmp_score
def evolve(self):
#print self.scores[(self.scores ).argmin()]
for ii in xrange(self.population_size):
if self.noise != 0:
self.scores[ii] = self.evaluator.target( self.population[ii],self.count )
np.random.seed()
permut = np.random.permutation(self.population_size-1)
# make parent indices
i1=permut[0]
if (i1>=ii):
i1+=1
i2=permut[1]
if (i2>=ii):
i2+=1
i3=permut[2]
if (i3>=ii):
i3+=1
"""
x1 = self.population[ i1 ]
x2 = self.population[ i2 ]
x3 = self.population[ i3 ]
if self.f is None:
use_f = random.random()/2.0 + 0.5
else:
use_f = self.f
vi = x1 + use_f*(x2-x3)
# crossover
mask = np.random.random(self.vector_length)
test_vector = (mask < 0.9)*vi + (mask>0.9)*self.population[ii]
test_vector[test_vector<self.lower_bound] = self.lower_bound[test_vector<self.lower_bound]
test_vector[test_vector>self.upper_bound] = self.upper_bound[test_vector>self.upper_bound]
"""
if self.count < 50 or np.random.random()<0.8:
x1 = self.population[ i1 ]#self.population[ i1 ]#
else:
x1 = self.population[ ( self.scores ).argmin()]#self.population[ i1 ]#self.population[ i1 ]#
x2 = self.population[ i2 ]
x3 = self.population[ i3 ]
if self.f is None:
use_f = random.random()/2.0 + 0.5
else:
use_f = self.f
vi = x1 + use_f*(x2-x3)
# crossover
mask = np.random.random(self.vector_length)
test_vector = (mask < 0.9)*vi + (mask>0.9)*self.population[ii]
test_vector[test_vector<self.lower_bound] = self.lower_bound[test_vector<self.lower_bound]
test_vector[test_vector>self.upper_bound] = self.upper_bound[test_vector>self.upper_bound]
# moving average
if np.random.random() < self.movAverageMutationRate:
rN = 3#np.random.randint(2,5)*2-1
t1,t2= np.sum(test_vector[:40]),np.sum(test_vector[40:-1])
test_vector = np.concatenate([test_vector[:rN/2], (np.convolve(test_vector[:-1]**rN, np.ones((rN,))/float(rN), mode='valid'))**rN,test_vector[(-rN-1)/2:-1]**rN,[test_vector[-1]]])
test_vector[:40] /= np.sum(test_vector[:40]) / t1
test_vector[40:-1] /= np.sum(test_vector[40:-1]) / t2
if np.random.random() < self.movAverageMutationRate:
if random.random() < 0.5:
test_vector[:40] = 1./2 * (test_vector[:40]+ test_vector[1:41])
test_vector[40:-2] = 1./2 * (test_vector[41:-1]+ test_vector[40:-2])
else:
test_vector[:40] = 1./2 * (test_vector[:40]+ test_vector[1:41])
test_vector[41:-1] = 1./2 * (test_vector[41:-1]+ test_vector[40:-2])
if np.random.random() < self.movAverageMutationRate:
if random.random() < 0.5:
test_vector[:40] *= 1.01
else:
test_vector[40:-1] *= 1.01
# bounder
test_score = self.evaluator.target( test_vector,self.count )
if test_score < self.scores[ii]:
self.scores[ii] = test_score
self.population[ii] = test_vector
def show_population(self):
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
for vec in self.population:
print list(vec)
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
class Init(object):
def __init__(self,evaluator, suddenness,numChanges,args,dim,noise = 0):
evaluator.numEnv = int(args[0])
if noise == 0 or evaluator.numEnv == 0:
y = [0]*1000
if evaluator.numEnv == 0:
lenIter = 1000
else:
lenIter = 2
else:
x = np.linspace(0.0, 100., num=101)
tt =expon.pdf(x,scale=noise,loc=0)
tt = tt/np.sum(tt)
if evaluator.numEnv == 2:
lenIter = 200
else:
lenIter = 50
y = []
for i,t in enumerate(tt):
y += [int(x[i])]*int(lenIter*2*t)
evaluator.noise = y
costArr = ['0','0','0.01','0.03','0.1']
cost = costArr[suddenness]
if evaluator.numEnv == 0:
a = float(args[args.find("0Env_")+5] + "." + args[args.find("0Env_")+6:-2])
j = 1.5/a
np.random.seed(2+0)
x = np.linspace(0.01, 100., num=101)
tt =gamma.pdf(x,a,scale=j,loc=0)
tt = tt/np.sum(tt)
y = []
for i,t in enumerate(tt):
y += [int(11*x[i])]*int(1000*t)
evaluator.env = np.random.choice([int(_) for _ in y],size=len(y),replace=False)
print set(evaluator.env)
evaluator.trajectory = dict()
i = 0
for s in range(len(evaluator.env)):
i += int(10000/numChanges)
evaluator.trajectory[i] = s
if evaluator.numEnv == 1:
s = int(args[args.find("0Env_")+6:-2])
print(s)
evaluator.env = [s,s]
if 1:
evaluator.trajectory = dict()
evaluator.trajectory[1000] = 0
elif evaluator.numEnv == 2:
evaluator.env = [0,100]
if args[-4] == 'A': x2 = 0.999999 #1000000
elif args[-4] == 'B': x2 = 0.999998 #1000000
elif args[-4] == 'C': x2 = 0.999995 #1000000
elif args[-4] == 'E': x2 = 0.99999 #100000
elif args[-4] == 'F': x2 = 0.99998 #50000
elif args[-4] == 'G': x2 = 0.99995 #20000
elif args[-4] == 'V': x2 = 0.9999 #10000
elif args[-4] == 'W': x2 = 0.9998 #5000
elif args[-4] == 'X': x2 = 0.9995 #2000
elif args[-4] == 'H': x2 = 0.999 #1000
elif args[-4] == 'I': x2 = 0.9960#80 #500
elif args[-4] == 't': x2 = 0.9958#79 #400
elif args[-4] == 'j': x2 = 0.9956#78 #333
elif args[-4] == 'k': x2 = 0.9954#77 #434
elif args[-4] == 's': x2 = 0.9952#76 #434
elif args[-4] == 'm': x2 = 0.9950#75 #434
elif args[-4] == 'n': x2 = 0.9948#74 #434
#elif args[-4] == 'I': x2 = 0.9980#56#80 #500
#elif args[-4] == 't': x2 = 0.9979#54#79 #400
##elif args[-4] == 'j': x2 = 0.9978#52#78 #333
#elif args[-4] == 'k': x2 = 0.9977#50#77 #434
#elif args[-4] == 's': x2 = 0.9976#48#76 #434
#elif args[-4] == 'm': x2 = 0.9975#46#75 #434
#elif args[-4] == 'n': x2 = 0.9974#44#74 #434
elif args[-4] == 'o': x2 = 0.9973 #434
elif args[-4] == 'p': x2 = 0.9972 #434
elif args[-4] == 'q': x2 = 0.9971 #434
elif args[-4] == 'r': x2 = 0.997 #434
elif args[-4] == 'J': x2 = 0.995 #200
elif args[-4] == 'L': x2 = 0.99 #100
if args[-3] == 'V': x3 = 0.9999
elif args[-3] == 'H': x3 = 0.999
elif args[-3] == 'L': x3 = 0.99
elif args[-3] == 'A': x3 = 0.999999 #1000000
if args[-6] == 'P':
evaluator.trajectory = dict()
s = 1
i = 0
while(len(evaluator.trajectory)<lenIter):
if s == 0:
#v5 (Very low freq in High stress)
i += int(np.ceil(1000.*1./(1-x2)/numChanges))
else:
i += int(np.ceil(1000.*1./(1-x3)/numChanges))
s = (s-1)*(-1)
evaluator.trajectory[i] = s
elif evaluator.numEnv == 3:
evaluator.env = [0,11,100]
if args[-5] == 'A': x1 = 0.999999 #1000000
elif args[-5] == 'E': x1 = 0.99999 #100000
elif args[-5] == 'V': x1 = 0.9999 #10000
elif args[-5] == 'H': x1 = 0.999 #1000
elif args[-5] == 'L': x1 = 0.99 #100
if args[-4] == 'A': x2 = 0.999999 #1000000
elif args[-4] == 'E': x2 = 0.99999 #100000
elif args[-4] == 'V': x2 = 0.9999 #10000
elif args[-4] == 'H': x2 = 0.999 #1000
elif args[-4] == 'L': x2 = 0.99 #100
if args[-3] == 'H': x3 = 0.999
if args[-7] == 'P':
#Regular
evaluator.trajectory = dict()
envOrder = [0,1,0,2]
s = 1
i = 0
while(len(evaluator.trajectory)<2*lenIter):
if envOrder[s%4] == 1:
i += int(np.ceil(1./(1-x2)/numChanges))
elif envOrder[s%4] == 2:
i += int(np.ceil(1./(1-x3)/numChanges))
else:
i += int(0.5*np.ceil(1./(1-x1)/numChanges))
s+=1
evaluator.trajectory[i] = envOrder[s%4]
if args[-2] == 'S':
evaluator.arrayCost = []
evaluator.arrayCost.append(np.loadtxt('allCostsSt_S'+'0'+'.txt'))
evaluator.arrayCost.append(np.loadtxt('allCostsSt_S'+cost+'.txt'))
evaluator.selection = 1
elif args[-2] == 'W':
evaluator.arrayCost = []
evaluator.arrayCost.append(np.loadtxt('allCostsSt_W'+'0'+'.txt'))
evaluator.arrayCost.append(np.loadtxt('allCostsSt_W'+cost+'.txt'))
evaluator.selection = 0
else:
print "Finish with SS or WS"
raise
evaluator.optVal = [evaluator.arrayCost[1][:,i].argmax() for i in range(101)]
evaluator.gamma1Env = np.loadtxt("gamma1EnvOptimum.txt")
## Global variables
evaluator.sud = suddenness
evaluator.trajectoryX = evaluator.trajectory
evaluator.trajectory = sorted([_ for _ in evaluator.trajectory])
print evaluator.trajectoryX
class EvolveNoiseFromHistLogNormal(object):
def __init__(self, suddenness,numChanges,args,dim,noise = 0):
self.fname = open("./dataDE/"+str(noise)+args+str(dim)+str(suddenness)+str(numChanges)+"0obs.txt","w")
Init(self,suddenness,numChanges,args,dim,noise)
self.x = None
self.n = dim*3+1
self.dim = dim*3+1
if dim == 1:
self.domain = [(0.,1.), (0.5,100.),(10.,400.)] + [(0,1)]
self.bounder = [(0.,10.), (0.5,100),(10.,4000.)] +[(0,1)]
else:
if dim %2 != 0:
dim -= 1
print "Dimensions reduced"
self.domain = [(0.,1.), (0.5,2),(10.,400.),(0.,1.), (2,100),(10.,400.)]*(dim/2) + [(0,1)]
self.bounder = [(0.,10.), (0.5,100),(10,4000.),(0.,10.), (0.5,100),(10.,4000.)]*(dim/2) + [(0,1)]
self.optimizer = differential_evolution_optimizer(self,max_iter=500 ,population_size=40,
n_cross=1,cr=0.9, eps=1e-15, show_progress=False,save_progress=True,noise=noise)
def target(self, vector,seed):
random.seed(100*seed+0)
x = np.linspace(0.01, 10000., num=100) # values for x-axis
d = np.zeros(100)
w = 0
for jj in range(0,len(vector)-1,3):
d += vector[jj]*gamma.cdf(x, vector[jj+1], loc=0, scale=vector[jj+2]) # probability distribution
w += vector[jj]
d = np.diff(np.concatenate([[0],d]))
sense = np.round(vector[-1])
timePointAll = d/w
timePoint = np.copy(timePointAll)
currEnv = 1
sumT = 0
prevchange = 0
np.random.shuffle(self.noise)
for i,change in enumerate(self.trajectory):
if currEnv == 0:
env = self.env[currEnv] + self.noise[i]
temp = np.copy(timePointAll)
else:
env = self.env[currEnv] - self.noise[i]
a,b = self.gamma1Env[:,env]
temp = np.diff(np.concatenate([[0],gamma.cdf(x, a, loc=0, scale=b)]))# probability distribution
if sense == 1:
opt = self.arrayCost[1][:,env]
else:
opt = self.arrayCost[0][:,env]
inter = change-prevchange
#print "1",i,currEnv,env,inter,change
prevchange = change
if sense == 0 or self.sud == 0:
growth = np.sum(timePoint[opt>-1]*2**opt[opt>-1])
if growth == 0: return 1.
sumT += 1.*inter*np.log2(growth)
else:
t2 = temp
#First see who grows
growth = np.sum(timePoint[opt>-1]*2**opt[opt>-1])
if growth == 0: return 1.
#Now switch. Fast changes
sumT += 1.*np.log2(growth)
sumT += 1.*(inter-1)*np.log2(np.sum(t2[opt>-1]*2**opt[opt>-1]))
#print 1.*np.log(growth),1.*(inter-1)*np.log(np.sum(t2 + t2 * opt))
currEnv = self.trajectoryX[change]
#print "2",i,currEnv,env,inter,change
fitness = sumT/self.trajectory[-1]#np.exp(sumT/self.trajectory[-1])-1.
#print fitness
if 0:
penalty = 0.1*np.sum(np.abs(np.diff(timePointAll))>0.01) #0.1 for each sudden change in concentration
fitness = fitness-penalty
else:
fitness = fitness
if np.isnan(fitness): return 2.
else: return -fitness
def print_status(self, mins,means,vector,txt):
print txt,mins, means, list(vector)
class EvolveNoiseFromHistStd(object):
def __init__(self, suddenness,numChanges,args,dim,noise = 0):
Init(self,suddenness,numChanges,args,dim,noise)
self.fname = open("./dataDE/"+str(noise)+args+str(dim)+str(suddenness)+str(numChanges)+"0STDobs.txt","w")
self.x = None
self.n = 101
self.dim = 101
self.domain = [(0.,1.)] *100 + [(0,1)]
self.bounder = [(0.,1.)] *100 + [(0,1)]
self.optimizer = differential_evolution_optimizer(self,max_iter=500 ,population_size=500,
n_cross=1,cr=0.9, eps=1e-15, show_progress=False,
save_progress=True,movAverageMutationRate = 0.1 ,noise=noise)
def target(self, vector,seed):
random.seed(100*seed+0)
d = vector[:-1]
sense = np.round(vector[-1])
timePointAll = d/np.sum(d)
timePoint = np.copy(timePointAll)
currEnv = 1
sumT = 0
prevchange = 0
np.random.shuffle(self.noise)
for i,change in enumerate(self.trajectory):
if currEnv == 0:
env = self.env[currEnv] + self.noise[i]
temp = np.copy(timePointAll)
else:
env = self.env[currEnv] - self.noise[i]
temp = np.zeros(100)
temp[self.optVal[env]] = 1.
if sense == 1:
opt = self.arrayCost[1][:,env]
else:
opt = self.arrayCost[0][:,env]
inter = change-prevchange
#print inter, envX[currEnv]
prevchange = change
if sense == 0 or self.sud == 0:
growth = np.sum(timePoint[opt>-1]*2**opt[opt>-1])
if growth == 0: return 1.
sumT += 1.*inter*np.log2(growth)
else:
t2 = temp
#First see who grows
growth = np.sum(timePoint[opt>-1]*2**opt[opt>-1])
if growth == 0: return 1.
#Now switch. Fast changes
sumT += 1.*np.log2(growth)
sumT += 1.*(inter-1)*np.log2(np.sum(t2[opt>-1]*2**opt[opt>-1]))
#print 1.*np.log(growth),1.*(inter-1)*np.log(np.sum(t2 + t2 * opt))
currEnv = self.trajectoryX[change]
#fitness = np.exp(sumT/self.trajectory[-1])-1.
fitness = sumT/self.trajectory[-1]
if 0:
penalty = 0.1*np.sum(np.abs(np.diff(timePointAll))>0.01) #0.1 for each sudden change in concentration
fitness = fitness-penalty
else:
fitness = fitness
if np.isnan(fitness): return 2.
else: return -fitness
def print_status(self, mins,means,vector,txt):
print txt,mins, means, list(vector)
def run(pF):
import time
random.seed(64+0)
if pF[3] == 100:
fname = str(pF[4])+pF[2]+str(pF[3])+str(pF[0])+str(pF[1])+"0STDobs.txt"
else:
fname = str(pF[4])+pF[2]+str(pF[3])+str(pF[0])+str(pF[1])+"0obs.txt"
if fname in os.listdir('./dataDE/'):
print fname, os.path.getsize('./dataDE/'+fname)
if os.path.getsize('./dataDE/'+fname) > 1000:
print time.ctime(os.path.getmtime('./dataDE/'+fname))
pass#return None
if pF[3] == 100:
EvolveNoiseFromHistStd(pF[0],pF[1],pF[2],dim=pF[3],noise=pF[4])
else:
EvolveNoiseFromHistLogNormal(pF[0],pF[1],pF[2],dim=pF[3],noise=pF[4])
#
def main():
from multiprocessing import Pool #Allows parallel processing
possibleFactors = []
"""
## This creates the optimal distributions for each stress levels.
for stress in range(0,101):
if stress < 10:
s = "0"+str(stress)
else:
s = str(stress)
name = "1Env_"+s+"SS"
pF =(0,1,name,1,0)
EvolveNoiseFromHistLogNormal(pF[0],pF[1],pF[2],dim=pF[3],noise=pF[4])
"""
"""
## Data for Fig. 2 and 3
names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [1,2]:
for noise in [0]:
for name in names:
possibleFactors.append((sudden,numChanges,name,dim,noise))
"""
"""
## Data for Fig. S2
names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [100]:
for noise in [0]:
for name in names:
possibleFactors.append((sudden,numChanges,name,dim,noise))
"""
"""
## Data for Fig. S3
names =["2Env_NN_PEAHWS","2Env_NN_PEEHWS","2Env_NN_PEVHWS","2Env_NN_PEHHWS","2Env_NN_PELHWS","2Env_NN_PEIHWS","2Env_NN_PEtHWS","2Env_NN_PEjHWS","2Env_NN_PEkHWS","2Env_NN_PEsHWS","2Env_NN_PEmHWS", "2Env_NN_PEnHWS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [2,100]:
for noise in [0]:
for name in names:
possibleFactors.append((sudden,numChanges,name,dim,noise))
"""
"""
## Data for Fig. 4 (noise). Change in this file all "0obs.txt" for "1obs.txt" and "2obs.txt" to create the 3 replications.
names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
possibleFactors = []
for dim in [1,2]:
for noise in [0.25, 0.5,0.75,1,1.5,2,3,4,5]:
for name in names:
possibleFactors.append((0,10,name,dim,noise))
possibleFactors.append((1,10,name,dim,noise))
possibleFactors.append((0,100,"2Env_NN_PEAHSS",dim,noise))
possibleFactors.append((0,100,"2Env_NN_PEEHSS",dim,noise))
possibleFactors.append((0,100,"2Env_NN_PEVHSS",dim,noise))
possibleFactors.append((0,10,"2Env_NN_PEHHSS",dim,noise))
possibleFactors.append((0,1,"2Env_NN_PELHSS",dim,noise))
possibleFactors.append((1,100,"2Env_NN_PEAHSS",dim,noise))
possibleFactors.append((1,100,"2Env_NN_PEEHSS",dim,noise))
possibleFactors.append((1,100,"2Env_NN_PEVHSS",dim,noise))
possibleFactors.append((1,10,"2Env_NN_PEHHSS",dim,noise))
possibleFactors.append((1,1,"2Env_NN_PELHSS",dim,noise))
"""
"""
## Data for Fig. 4 (3 Environments)
possibleFactors = []
for dim in [1,2,100]:
for noise in [0]:
for end in ["A","E","V","H","L"]:
possibleFactors.append((0,10,"3Env_0102_PEA"+end+"HSS",dim,noise))
possibleFactors.append((0,10,"3Env_0102_PEE"+end+"HSS",dim,noise))
#possibleFactors.append((0,10,"3Env_0102_PEV"+end+"HSS",dim,noise))
#possibleFactors.append((0,10,"3Env_0102_PEH"+end+"HSS",dim,noise))
#possibleFactors.append((0,10,"3Env_0102_PEL"+end+"HSS",dim,noise))
#possibleFactors.append((1,100,"3Env_0102_PEA"+end+"HSS",dim,noise))
#possibleFactors.append((1,100,"3Env_0102_PEE"+end+"HSS",dim,noise))
#possibleFactors.append((1,100,"3Env_0102_PEV"+end+"HSS",dim,noise))
#possibleFactors.append((1,10,"3Env_0102_PEH"+end+"HSS",dim,noise))
#possibleFactors.append((1,1,"3Env_0102_PEL"+end+"HSS",dim,noise))
"""
pool = Pool(processes=8)
pool.map(run, possibleFactors)
pool.close()
pool.join() #zombie processes without this, will fill up memory
print "OK"
if __name__ == "__main__":
main()
#EvolveNoiseFromHistStd(1,1,"2Env_NN_PEVHSS",dim=100,noise=0)
```
|
{
"source": "jgarciab/guidedupe",
"score": 3
}
|
#### File: guidedupe/src/main.py
```python
from PySide6.QtWidgets import QMainWindow, QApplication, QFileDialog
from PySide6.QtUiTools import QUiLoader
import sys
import csvhelpers
import dedupe
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
#Load UI created in QTDesign
loader = QUiLoader()
self.ui = loader.load("ui/mainwindow.ui", None)
self.ui.show()
self.gridStackedWidget = self.ui.gridStackedWidget
#Define the two buttons to load the files
self.file1PushButton = self.ui.file1PushButton
self.file2PushButton = self.ui.file2PushButton
#Boxes where the file path is displayed
self.file1LineEdit = self.ui.file1LineEdit
self.file2LineEdit = self.ui.file2LineEdit
#Connect to the boxes to select the file
self.file1PushButton.clicked.connect(lambda: self._open_file_dialog(self.file1LineEdit))
self.file2PushButton.clicked.connect(lambda: self._open_file_dialog(self.file2LineEdit))
#When the load data button is clicked, run run_csvlink
self.ui.loadPushButton.clicked.connect(self.run_csvlink)
#Move to the next tab (select columns)
self.ui.loadPushButton.clicked.connect(lambda: self.gridStackedWidget.setCurrentIndex(1))
self.ui.backPushButton.clicked.connect(lambda: self.gridStackedWidget.setCurrentIndex(0))
#When continue is clicked
self.ui.continuePushButton.clicked.connect(self.define_field_names)
self.ui.continuePushButton.clicked.connect(lambda: self.gridStackedWidget.setCurrentIndex(2))
# self.ui.file1SelectColumn1.currentTextChanged()
# self.ui.file1SelectColumn2.currentTextChanged()
# self.ui.file1SelectColumn3.currentTextChanged()
def _open_file_dialog(self, line_edit):
#Open the file
filename = QFileDialog.getOpenFileName(self, "Open File", "", "Data (*.csv *.tsv *.txt *.xlsx)")[0]
line_edit.setText(filename)
def read_file(self,filename):
return open(filename, encoding="utf-8").read()
def run_csvlink(self):
#Read the data
self.data_1 = csvhelpers.readData(self.read_file(self.file1LineEdit.text()), "",
delimiter=",",
prefix=None)
self.data_2 = csvhelpers.readData(self.read_file(self.file2LineEdit.text()), "",
delimiter=",",
prefix=None)
self.select_columns()
def select_columns(self):
#Select column names
cols1 = list(list(self.data_1.values())[0].keys())
cols1 = [_ for _ in cols1 if _ != "unique_id" ]
cols2 = list(list(self.data_2.values())[0].keys())
cols2 = [_ for _ in cols2 if _ != "unique_id" ]
#Define boxes
self.boxes1 = [self.ui.file1SelectColumn1,self.ui.file1SelectColumn2,self.ui.file1SelectColumn3]
self.boxes2 = [self.ui.file2SelectColumn1,self.ui.file2SelectColumn2,self.ui.file2SelectColumn3]
#Set up the options (columns)
for box in self.boxes1:
box.addItems(cols1)
for box in self.boxes2:
box.addItems(cols2)
def define_field_names(self):
#Cols in data1
self.field_names_1 = [_.currentText() for _ in self.boxes1 if _.currentText() != ""]
self.field_names_2 = [_.currentText() for _ in self.boxes2 if _.currentText() != ""]
#Remap columns if necesarry
if self.field_names_1 != self.field_names_2:
for record_id, record in self.data_2.items():
remapped_record = {}
for new_field, old_field in zip(self.field_names_1,
self.field_names_2):
remapped_record[new_field] = record[old_field]
self.data_2[record_id] = remapped_record
def training(self):
# Start up dedupe
deduper = dedupe.RecordLink(self.field_definition)
# Speed up by finding identical matches
fields = {variable.field for variable in deduper.data_model.primary_fields}
(nonexact_1,
nonexact_2,
exact_pairs) = self.exact_matches(data_1, data_2, fields)
# Set up our data sample
#TODO display logging.info('taking a sample of %d possible pairs', self.sample_size)
deduper.sample(nonexact_1, nonexact_2, self.sample_size)
# Perform standard training procedures
self.dedupe_training(deduper)
#TODO: Diplay "blocking"
#TODO display logging.info('finding a good threshold with a recall_weight of %s' %
# self.recall_weight)
threshold = deduper.threshold(data_1, data_2,
recall_weight=self.recall_weight)
#TODO display logging.info('clustering...')
clustered_dupes = deduper.match(data_1, data_2, threshold)
clustered_dupes.extend(exact_pairs)
#TODO display logging.info('# duplicate sets %s' % len(clustered_dupes))
#Save the file to the downloads folder (TODO: Ask where to save it)
self.download_file(clustered_dupes)
def download_file(self,clustered_dupes):
#Select folder
#Select filename
write_function = csvhelpers.writeLinkedResults
with open(self.output_file, 'w', encoding='utf-8') as output_file:
write_function(clustered_dupes, self.input_1, self.input_2,
"~/Downloads/output_dedupe.csv", False)
def exact_matches(data_1, data_2, match_fields):
nonexact_1 = {}
nonexact_2 = {}
exact_pairs = []
redundant = {}
for key, record in data_1.items():
record_hash = hash(tuple(record[f] for f in match_fields))
redundant[record_hash] = key
for key_2, record in data_2.items():
record_hash = hash(tuple(record[f] for f in match_fields))
if record_hash in redundant:
key_1 = redundant[record_hash]
exact_pairs.append(((key_1, key_2), 1.0))
del redundant[record_hash]
else:
nonexact_2[key_2] = record
for key_1 in redundant.values():
nonexact_1[key_1] = data_1[key_1]
return nonexact_1, nonexact_2, exact_pairs
if __name__ == "__main__":
app = QApplication([])
window = MainWindow()
# Start the event loop
sys.exit(app.exec())
```
|
{
"source": "jgarciaf106/react-flask-boilerplate",
"score": 3
}
|
#### File: src/api/routes.py
```python
from flask import Flask, request, jsonify, url_for, Blueprint
from api.models import db, User
from api.utils import generate_sitemap, APIException
api = Blueprint('api', __name__)
# Sample api route
@api.route('/hello', methods=['POST', 'GET'])
def handle_hello():
response_body = {
"message": "Hello! I'm a message that came from the backend"
}
return jsonify(response_body), 200
```
|
{
"source": "J-Garcke-SCAI/jaxkern",
"score": 3
}
|
#### File: jaxkern/jaxkern/data.py
```python
from typing import Tuple
import jax.numpy as np
import numpy as onp
from sklearn.utils import check_random_state
def get_data(
N: int = 30,
input_noise: float = 0.15,
output_noise: float = 0.15,
N_test: int = 400,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, None]:
"""Generates a simple non-linear function"""
onp.random.seed(0)
X = np.linspace(-1, 1, N)
Y = X + 0.2 * np.power(X, 3.0) + 0.5 * np.power(0.5 + X, 2.0) * np.sin(4.0 * X)
Y += output_noise * onp.random.randn(N)
Y -= np.mean(Y)
Y /= np.std(Y)
X += input_noise * onp.random.randn(N)
assert X.shape == (N,)
assert Y.shape == (N,)
X_test = np.linspace(-1.2, 1.2, N_test)
return X[:, None], Y[:, None], X_test[:, None], None
def near_square_wave(
n_train: int = 80,
input_noise: float = 0.15,
output_noise: float = 0.3,
n_test: int = 400,
random_state: int = 123,
):
"""Generates a near-square wave"""
# function
f = lambda x: np.sin(1.0 * np.pi / 1.6 * np.cos(5 + 0.5 * x))
# create clean inputs
x_mu = np.linspace(-10, 10, n_train)
# clean outputs
y = f(x_mu)
# generate noise
x_rng = check_random_state(random_state)
y_rng = check_random_state(random_state + 1)
# noisy inputs
x = x_mu + input_noise * x_rng.randn(x_mu.shape[0])
# noisy outputs
y = f(x_mu) + output_noise * y_rng.randn(x_mu.shape[0])
# test points
x_test = np.linspace(-12, 12, n_test) + x_rng.randn(n_test)
y_test = f(np.linspace(-12, 12, n_test))
x_test = np.sort(x_test)
return x[:, None], y[:, None], x_test[:, None], y_test
```
#### File: jaxkern/jaxkern/dependence.py
```python
from jaxkern.dist import sqeuclidean_distance
from typing import Callable, Dict
import jax
import jax.numpy as np
from jaxkern.kernels import gram, covariance_matrix
from jaxkern.utils import centering
jax_np = jax.numpy.ndarray
def hsic(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
bias: bool = False,
) -> float:
"""Normalized HSIC (Tangent Kernel Alignment)
A normalized variant of HSIC method which divides by
the HS-Norm of each dataset.
Parameters
----------
X : jax.numpy.ndarray
the input value for one dataset
Y : jax.numpy.ndarray
the input value for the second dataset
kernel : Callable
the kernel function to be used for each of the kernel
calculations
params_x : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for X
params_y : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for Y
Returns
-------
cka_value : float
the normalized hsic value.
Notes
-----
This is a metric that is similar to the correlation, [0,1]
"""
# kernel matrix
Kx = covariance_matrix(kernel, params_x, X, X)
Ky = covariance_matrix(kernel, params_y, Y, Y)
Kx = centering(Kx)
Ky = centering(Ky)
hsic_value = np.sum(Kx * Ky)
if bias:
bias = 1 / (Kx.shape[0] ** 2)
else:
bias = 1 / (Kx.shape[0] - 1) ** 2
return bias * hsic_value
def nhsic_cka(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
) -> float:
"""Normalized HSIC (Tangent Kernel Alignment)
A normalized variant of HSIC method which divides by
the HS-Norm of each dataset.
Parameters
----------
X : jax.numpy.ndarray
the input value for one dataset
Y : jax.numpy.ndarray
the input value for the second dataset
kernel : Callable
the kernel function to be used for each of the kernel
calculations
params_x : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for X
params_y : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for Y
Returns
-------
cka_value : float
the normalized hsic value.
Notes
-----
This is a metric that is similar to the correlation, [0,1]
References
----------
"""
# calculate hsic normally (numerator)
# Pxy = hsic(X, Y, kernel, params_x, params_y)
# # calculate denominator (normalize)
# Px = np.sqrt(hsic(X, X, kernel, params_x, params_x))
# Py = np.sqrt(hsic(Y, Y, kernel, params_y, params_y))
# # print(Pxy, Px, Py)
# # kernel tangent alignment value (normalized hsic)
# cka_value = Pxy / (Px * Py)
Kx = covariance_matrix(kernel, params_x, X, X)
Ky = covariance_matrix(kernel, params_y, Y, Y)
Kx = centering(Kx)
Ky = centering(Ky)
cka_value = np.sum(Kx * Ky) / np.linalg.norm(Kx) / np.linalg.norm(Ky)
return cka_value
def nhsic_nbs(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
) -> float:
"""Normalized Bures Similarity (NBS)
A normalized variant of HSIC method which divides by
the HS-Norm of the eigenvalues of each dataset.
..math::
\\rho(K_x, K_y) = \\
\\text{Tr} ( K_x^{1/2} K_y K_x^{1/2)})^{1/2} \\
\ \\text{Tr} (K_x) \\text{Tr} (K_y)
Parameters
----------
X : jax.numpy.ndarray
the input value for one dataset
Y : jax.numpy.ndarray
the input value for the second dataset
kernel : Callable
the kernel function to be used for each of the kernel
calculations
params_x : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for X
params_y : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for Y
Returns
-------
cka_value : float
the normalized hsic value.
Notes
-----
This is a metric that is similar to the correlation, [0,1]
References
----------
@article{JMLR:v18:16-296,
author = {<NAME> and <NAME> and <NAME> and <NAME>},
title = {Quantifying the Informativeness of Similarity Measurements},
journal = {Journal of Machine Learning Research},
year = {2017},
volume = {18},
number = {76},
pages = {1-61},
url = {http://jmlr.org/papers/v18/16-296.html}
}
"""
# calculate hsic normally (numerator)
# Pxy = hsic(X, Y, kernel, params_x, params_y)
# # calculate denominator (normalize)
# Px = np.sqrt(hsic(X, X, kernel, params_x, params_x))
# Py = np.sqrt(hsic(Y, Y, kernel, params_y, params_y))
# # print(Pxy, Px, Py)
# # kernel tangent alignment value (normalized hsic)
# cka_value = Pxy / (Px * Py)
Kx = covariance_matrix(kernel, params_x, X, X)
Ky = covariance_matrix(kernel, params_y, Y, Y)
Kx = centering(Kx)
Ky = centering(Ky)
# numerator
numerator = np.real(np.linalg.eigvals(np.dot(Kx, Ky)))
# clip rogue numbers
numerator = np.sqrt(np.clip(numerator, 0.0))
numerator = np.sum(numerator)
# denominator
denominator = np.sqrt(np.trace(Kx) * np.trace(Ky))
# return nbs value
return numerator / denominator
def nhsic_ka(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
) -> float:
Kx = covariance_matrix(kernel, params_x, X, X)
Ky = covariance_matrix(kernel, params_y, Y, Y)
cka_value = np.sum(Kx * Ky) / np.linalg.norm(Kx) / np.linalg.norm(Ky)
return cka_value
def nhsic_cca(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
epsilon: float = 1e-5,
bias: bool = False,
) -> float:
"""Normalized HSIC (Tangent Kernel Alignment)
A normalized variant of HSIC method which divides by
the HS-Norm of each dataset.
Parameters
----------
X : jax.numpy.ndarray
the input value for one dataset
Y : jax.numpy.ndarray
the input value for the second dataset
kernel : Callable
the kernel function to be used for each of the kernel
calculations
params_x : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for X
params_y : Dict[str, float]
a dictionary of parameters to be used for calculating the
kernel function for Y
Returns
-------
cka_value : float
the normalized hsic value.
Notes
-----
This is a metric that is similar to the correlation, [0,1]
"""
n_samples = X.shape[0]
# kernel matrix
Kx = gram(kernel, params_x, X, X)
Ky = gram(kernel, params_y, Y, Y)
# center kernel matrices
Kx = centering(Kx)
Ky = centering(Ky)
K_id = np.eye(Kx.shape[0])
Kx_inv = np.linalg.inv(Kx + epsilon * n_samples * K_id)
Ky_inv = np.linalg.inv(Ky + epsilon * n_samples * K_id)
Rx = np.dot(Kx, Kx_inv)
Ry = np.dot(Ky, Ky_inv)
hsic_value = np.sum(Rx * Ry)
if bias:
bias = 1 / (Kx.shape[0] ** 2)
else:
bias = 1 / (Kx.shape[0] - 1) ** 2
return bias * hsic_value
def _hsic_uncentered(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
) -> float:
"""A method to calculate the uncentered HSIC version"""
# kernel matrix
Kx = gram(kernel, params_x, X, X)
Ky = gram(kernel, params_y, Y, Y)
#
K = np.dot(Kx, Ky.T)
hsic_value = np.mean(K)
return hsic_value
def mmd_mi(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
) -> float:
"""Maximum Mean Discrepancy
Parameters
----------
X : jax.numpy.ndarray
array-like of shape (n_samples, n_features)
Y : np.ndarray
The data matrix.
Notes
-----
This method is equivalent to the HSIC method.
"""
# calculate kernel matrices
Kx = gram(kernel, params_x, X, X)
Ky = gram(kernel, params_y, Y, Y)
# center kernel matrices
Kx = centering(Kx)
Ky = centering(Ky)
# get the expectrations
A = np.mean(Kx * Ky)
B = np.mean(np.mean(Kx, axis=0) * np.mean(Ky, axis=0))
C = np.mean(Kx) * np.mean(Ky)
# calculate the mmd value
mmd_value = A - 2 * B + C
return mmd_value
def mmd(
X: np.ndarray,
Y: np.ndarray,
kernel: Callable,
params_x: Dict[str, float],
params_y: Dict[str, float],
params_xy: Dict[str, float],
bias: bool = False,
center: bool = False,
) -> float:
"""Maximum Mean Discrepancy
Parameters
----------
X : jax.numpy.ndarray
array-like of shape (n_samples, n_features)
Y : np.ndarray
The data matrix.
Notes
-----
This method is equivalent to the HSIC method.
"""
n_samples, m_samples = X.shape[0], Y.shape[0]
# constants
a00 = 1.0 / (n_samples * (n_samples - 1.0))
a11 = 1.0 / (m_samples * (m_samples - 1.0))
a01 = -1.0 / (n_samples * m_samples)
# kernel matrices
Kx = gram(kernel, params_x, X, X)
Ky = gram(kernel, params_y, Y, Y)
Kxy = gram(kernel, params_xy, X, Y)
if bias:
mmd = np.mean(Kx) + np.mean(Ky) - 2 * np.mean(Kxy)
return np.where(mmd >= 0.0, np.sqrt(mmd), 0.0)
else:
return (
2 * a01 * np.mean(Kxy)
+ a00 * (np.sum(Kx) - n_samples)
+ a11 * (np.sum(Ky) - m_samples)
)
```
#### File: jaxkern/scripts/demo_hsic.py
```python
import jax
import jax.numpy as np
import numpy as onp
from jaxkern.kernels import covariance_matrix, gram, rbf_kernel
from jaxkern.kernels.dependence import nhsic_cka, hsic, nhsic_ka, nhsic_cca
from jaxkern.kernels.utils import centering, gamma_from_sigma
from jaxkern.kernels.utils.sigma import estimate_sigma_median
def main():
# generate some fake linear data
onp.random.seed(123)
X = onp.random.randn(1000, 2)
Y = 2 * X + 0.05 * onp.random.randn(1000, 2)
# calculate the kernel matrix
sigma_x = estimate_sigma_median(X, X) # estimate sigma value
params_x = {"gamma": gamma_from_sigma(sigma_x)}
sigma_y = estimate_sigma_median(Y, Y) # estimate sigma value
params_y = {"gamma": gamma_from_sigma(sigma_y)}
# calculate hsic
hsic_value = hsic(X, Y, rbf_kernel, params_x, params_y)
print(f"HSIC: {hsic_value:.4f}")
# calculate centered kernel alignment
cka_value = nhsic_cka(X, Y, rbf_kernel, params_x, params_y)
print(f"nHSIC (CKA): {cka_value:.4f}")
nhsic_cca_value = nhsic_cca(X, Y, rbf_kernel, params_x, params_y)
print(f"nHSIC (CCA): {nhsic_cca_value:.4f}")
# calculate kernel alignment
ka_value = nhsic_ka(X, Y, rbf_kernel, params_x, params_y)
print(f"nHSIC (CCA): {ka_value:.4f}")
if __name__ == "__main__":
main()
```
#### File: jaxkern/tests/test_dists.py
```python
import jax.numpy as np
import numpy as onp
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics.pairwise import euclidean_distances
from jaxkern.dist import distmat, pdist_squareform, sqeuclidean_distance
onp.random.seed(123)
def test_distmat():
X = onp.random.rand(100, 2)
dist = euclidean_distances(X, X, squared=True)
dist_ = distmat(sqeuclidean_distance, X, X)
onp.testing.assert_array_almost_equal(dist, onp.array(dist_))
def test_pdist_squareform():
X = onp.random.randn(100, 2)
dist = squareform(pdist(X, metric="sqeuclidean"))
dist_ = pdist_squareform(X, X)
onp.testing.assert_array_almost_equal(dist, onp.array(dist_), decimal=5)
```
|
{
"source": "jgard1/continual-learning",
"score": 2
}
|
#### File: jgard1/continual-learning/ablated_startle_exemplars.py
```python
import abc
import torch
from torch import nn
from torch.nn import functional as F
import utils
import copy
import numpy as np
from kmeans_pytorch import kmeans, kmeans_predict
import logging
import heapq
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class ExemplarHandler(nn.Module, metaclass=abc.ABCMeta):
"""Abstract module for a classifier that can store and use exemplars.
Adds a exemplar-methods to subclasses, and requires them to provide a 'feature-extractor' method."""
def __init__(self):
super().__init__()
# list with exemplar-sets
self.exemplar_sets = [] #--> each exemplar_set is an <np.array> of N images with shape (N, Ch, H, W)
self.exemplar_means = []
self.compute_means = True
# settings
self.memory_budget = 2000
self.norm_exemplars = True
self.herding = True
def _device(self):
return next(self.parameters()).device
def _is_on_cuda(self):
return next(self.parameters()).is_cuda
@abc.abstractmethod
def feature_extractor(self, images):
pass
####----MANAGING EXEMPLAR SETS----####
# compute the familiarity score for a candidate
def startle(self, cur_features, candidate):
n = cur_features.shape[0]
candidate_expanded = candidate.expand_as(cur_features)
# cos = nn.CosineSimilarity(dim=1, eps=1e-6)
# all_cos_sims = cos(cur_features, candidate_expanded)
dists = (cur_features - candidate_expanded).pow(2).sum(dim=1).squeeze() # (batch_size, n_classes)
startle = torch.sum(dists, dim = 0).item() /n
# startle = 1/familiarity
# logging.info("startle: "+str(startle))
return startle
# simple helper function for
def exclude_idx(self, start_set, idx):
mod_set = copy.deepcopy(start_set)
# super skecthy not sure if it does expected behavior
if idx < start_set.shape[0] - 1:
mod_set = torch.cat((mod_set[:idx,:], mod_set[(idx + 1):, :]), axis = 0)
else:
mod_set = mod_set[:idx,:]
return mod_set
def reduce_exemplar_sets(self, m):
for y, P_y in enumerate(self.exemplar_sets):
self.exemplar_sets[y] = P_y[:m]
def construct_exemplar_set(self, dataset, n):
'''Construct set of [n] exemplars from [dataset] using 'herding'.
Note that [dataset] should be from specific class; selected sets are added to [self.exemplar_sets] in order.'''
# set model to eval()-mode
logging.info("entered ExemplarHandler.construct_exemplar_set(self, dataset, n)")
mode = self.training
self.eval()
n_max = len(dataset)
exemplar_set = []
if self.herding:
logging.info("herding enabled")
# compute features for each example in [dataset]
first_entry = True
dataloader = utils.get_data_loader(dataset, 128, cuda=self._is_on_cuda())
for (image_batch, _) in dataloader:
image_batch = image_batch.to(self._device())
with torch.no_grad():
feature_batch = self.feature_extractor(image_batch).cpu()
if first_entry:
features = feature_batch
first_entry = False
else:
features = torch.cat([features, feature_batch], dim=0)
if self.norm_exemplars:
features = F.normalize(features, p=2, dim=1)
# one by one, select exemplar that makes mean of all exemplars as close to [class_mean] as possible
exemplar_features = torch.zeros_like(features[:min(n, n_max)])
# initialize a min pq for getting rid of most familiar items
num_exemplars = min(n, n_max)
start_set = features[0:num_exemplars]
heap = []
for feature_idx, feature in enumerate(start_set):
mod_set = self.exclude_idx(start_set, feature_idx)
startle = self.startle(mod_set, feature)
heap.append((startle, feature_idx))
heapq.heapify(heap)
min_startle, cur_min_idx = heapq.heappop(heap)
# logging.info("heap: "+str(heap))
# iterate through remaining features, greedily maximizing startle
idxs = [v for k, v in heap]
cur_set = features[idxs]
for idx in range(num_exemplars, len(features)):
feature = features[idx]
mod_set = self.exclude_idx(start_set, feature_idx)
startle = self.startle(mod_set, feature)
if startle > min_startle:
min_startle = startle
heapq.heappush(heap, (startle, idx))
min_startle, cur_min_idx = heapq.heappop(heap)
idxs = [v for k, v in heap]
cur_set = features[idxs]
all_idxs = idxs + [cur_min_idx]
for k, idx in enumerate(all_idxs):
exemplar_set.append(dataset[idx][0].numpy())
exemplar_features[k] = copy.deepcopy(features[idx])
else:
logging.info("herding not enabled")
indeces_selected = np.random.choice(n_max, size=min(n, n_max), replace=False)
for k in indeces_selected:
exemplar_set.append(dataset[k][0].numpy())
# add this [exemplar_set] as a [n]x[ich]x[isz]x[isz] to the list of [exemplar_sets]
self.exemplar_sets.append(np.array(exemplar_set))
# set mode of model back
self.train(mode=mode)
####----CLASSIFICATION----####
# from https://stackoverflow.com/questions/21030391/how-to-normalize-an-array-in-numpy-to-a-unit-vector
def np_normalize(self, v):
norm=np.linalg.norm(v, ord=1)
if norm==0:
norm=np.finfo(v.dtype).eps
return v/norm
def classify_with_exemplars(self, x, allowed_classes=None):
"""Classify images by nearest-means-of-exemplars (after transform to feature representation)
INPUT: x = <tensor> of size (bsz,ich,isz,isz) with input image batch
allowed_classes = None or <list> containing all "active classes" between which should be chosen
OUTPUT: preds = <tensor> of size (bsz,)"""
# Set model to eval()-mode
mode = self.training
self.eval()
batch_size = x.size(0)
# Do the exemplar-means need to be recomputed?
if self.compute_means:
exemplar_means = [] #--> list of 1D-tensors (of size [feature_size]), list is of length [n_classes]
for P_y in self.exemplar_sets:
exemplars = []
# Collect all exemplars in P_y into a <tensor> and extract their features
for ex in P_y:
exemplars.append(torch.from_numpy(ex))
exemplars = torch.stack(exemplars).to(self._device())
with torch.no_grad():
features = self.feature_extractor(exemplars)
if self.norm_exemplars:
features = F.normalize(features, p=2, dim=1)
# Calculate their mean and add to list
mu_y = features.mean(dim=0, keepdim=True)
if self.norm_exemplars:
mu_y = F.normalize(mu_y, p=2, dim=1)
exemplar_means.append(mu_y.squeeze()) # -> squeeze removes all dimensions of size 1
# Update model's attributes
self.exemplar_means = exemplar_means
self.compute_means = False
# Reorganize the [exemplar_means]-<tensor>
exemplar_means = self.exemplar_means if allowed_classes is None else [
self.exemplar_means[i] for i in allowed_classes
]
means = torch.stack(exemplar_means) # (n_classes, feature_size)
means = torch.stack([means] * batch_size) # (batch_size, n_classes, feature_size)
means = means.transpose(1, 2) # (batch_size, feature_size, n_classes)
# Extract features for input data (and reorganize)
with torch.no_grad():
feature = self.feature_extractor(x) # (batch_size, feature_size)
if self.norm_exemplars:
feature = F.normalize(feature, p=2, dim=1)
feature = feature.unsqueeze(2) # (batch_size, feature_size, 1)
feature = feature.expand_as(means) # (batch_size, feature_size, n_classes)
# For each data-point in [x], find which exemplar-mean is closest to its extracted features
dists = (feature - means).pow(2).sum(dim=1).squeeze() # (batch_size, n_classes)
_, preds = dists.min(1)
# Set mode of model back
self.train(mode=mode)
return preds
```
#### File: jgard1/continual-learning/kmeans_exemplars.py
```python
import abc
import torch
from torch import nn
from torch.nn import functional as F
import utils
import copy
import numpy as np
from kmeans_pytorch import kmeans, kmeans_predict
import logging
#
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class ExemplarHandler(nn.Module, metaclass=abc.ABCMeta):
"""Abstract module for a classifier that can store and use exemplars.
Adds a exemplar-methods to subclasses, and requires them to provide a 'feature-extractor' method."""
def __init__(self):
super().__init__()
# list with exemplar-sets
self.exemplar_sets = [] #--> each exemplar_set is an <np.array> of N images with shape (N, Ch, H, W)
self.exemplar_means = []
self.compute_means = True
# settings
self.memory_budget = 2000
self.norm_exemplars = True
self.herding = True
def _device(self):
return next(self.parameters()).device
def _is_on_cuda(self):
return next(self.parameters()).is_cuda
@abc.abstractmethod
def feature_extractor(self, images):
pass
####----MANAGING EXEMPLAR SETS----####
# get the point nearest the centroid
def get_point_nearest_centroid(self, centroid, cluster_features, original_idxs):
# https://discuss.pytorch.org/t/among-a-set-of-reference-vectors-how-to-return-the-closest-one-to-a-given-vector/20423
# logging.info("get_point_nearest_centroid: cluster_features = "+str(cluster_features))
# logging.info("centroid = "+str(centroid))
distances = torch.sqrt(torch.sum((cluster_features.squeeze(1) - centroid) ** 2, dim=1))
# logging.info("distances = "+str(distances))
# logging.info("get_point_nearest_centroid: cluster_features.shape = "+str(cluster_features.shape))
# logging.info("centroid.shape = "+str(centroid.shape))
# logging.info("distances.shape = "+str(distances.shape))
# memes = torch.unsqueeze(distances, 0)
# logging.info("memes.shape = "+str(memes.shape))
min_index = np.argmin(distances.numpy())
original_idx = original_idxs[str(min_index)]
return cluster_features[min_index], original_idx
# returns list of tupples of form [(feature, original dataset idx),...]
def get_all_points_in_cluster(self, features, cluster_ids_x, cluster_number):
# error memes here. This line from
# https://stackoverflow.com/questions/47863001/how-pytorch-tensor-get-the-index-of-specific-value
# logging.info("get_all_points_in_cluster: cluster_number="+str(cluster_number))
# logging.info("get_all_points_in_cluster: cluster_ids_x="+str(cluster_ids_x))
original_dataset_idxs = ((cluster_ids_x == cluster_number).nonzero(as_tuple=False))
# logging.info("kmeans_exemplars.py: original_datset_idxs = "+str(original_dataset_idxs))
ret_features = features[original_dataset_idxs]
cluster_to_original_idxs = {}
for cluster_idx, original_idx in enumerate(original_dataset_idxs):
cluster_to_original_idxs[str(cluster_idx)] = original_idx
return ret_features, cluster_to_original_idxs
def get_cluster_exemplars(self, features, num_clusters):
logging.info("total number of features: "+str(len(features)))
indices = torch.randperm(len(features))[:3000]
subsample = features[indices]
cluster_ids_x, cluster_centers = kmeans(
X=subsample, num_clusters=num_clusters, distance='euclidean', device=self._device()
)
original_idx_map = {}
ret_features = []
for cluster_number, centroid in enumerate(cluster_centers):
cluster_features, cluster_to_original_idxs = self.get_all_points_in_cluster(subsample, cluster_ids_x, cluster_number)
selected_feature, selected_feature_idx = self.get_point_nearest_centroid(centroid, cluster_features, cluster_to_original_idxs)
ret_features.append(selected_feature)
# maps back to idx in entire dataset of features
original_idx_map[str(cluster_number)] = selected_feature_idx
return torch.stack(ret_features), original_idx_map
def reduce_exemplar_sets(self, m):
for y, P_y in enumerate(self.exemplar_sets):
self.exemplar_sets[y] = P_y[:m]
def construct_exemplar_set(self, dataset, n):
'''Construct set of [n] exemplars from [dataset] using 'herding'.
Note that [dataset] should be from specific class; selected sets are added to [self.exemplar_sets] in order.'''
# set model to eval()-mode
logging.info("entered ExemplarHandler.construct_exemplar_set(self, dataset, n)")
mode = self.training
self.eval()
n_max = len(dataset)
exemplar_set = []
if self.herding:
logging.info("herding enabled")
# compute features for each example in [dataset]
first_entry = True
dataloader = utils.get_data_loader(dataset, 128, cuda=self._is_on_cuda())
for (image_batch, _) in dataloader:
image_batch = image_batch.to(self._device())
with torch.no_grad():
feature_batch = self.feature_extractor(image_batch).cpu()
if first_entry:
features = feature_batch
first_entry = False
else:
features = torch.cat([features, feature_batch], dim=0)
if self.norm_exemplars:
features = F.normalize(features, p=2, dim=1)
# josh memes mod: here the features become just the near centroids
logging.info("Doing herding, creating a total of "+str(min(n, n_max))+" clusters.")
features_kmeans, original_idxs_map = self.get_cluster_exemplars(features, min(n, n_max))
# calculate mean of all features
class_mean = torch.mean(features_kmeans, dim=0, keepdim=True)
if self.norm_exemplars:
class_mean = F.normalize(class_mean, p=2, dim=1)
# one by one, select exemplar that makes mean of all exemplars as close to [class_mean] as possible
exemplar_features = torch.zeros_like(features_kmeans[:min(n, n_max)])
list_of_selected = []
for k in range(min(n, n_max)):
if k>0:
# logging.info("k>0")
exemplar_sum = torch.sum(exemplar_features[:k], dim=0).unsqueeze(0)
features_means = (features_kmeans + exemplar_sum)/(k+1)
features_dists = features_means - class_mean
# logging.info("exemplar_sum: "+str(exemplar_sum))
# logging.info("features_dists: "+str(features_dists))
else:
# logging.info("k=0")
features_dists = features_kmeans - class_mean
# logging.info("exemplar_sum: "+str(exemplar_sum))
# logging.info("features_dists: "+str(features_dists))
#####################################################################################
#####################################################################################
# <NAME>mes mod: changed index_selected so that it uses our next level shit
# index_selected = np.argmin(torch.norm(features_dists, p=2, dim=1))
features_dists = features_dists.squeeze(1)
# logging.info("features_dists.shape: "+str(features_dists.shape))
# logging.info("torch.norm(features_dists, p=2, dim=1).shape: "+str(torch.norm(features_dists, p=2, dim=1).shape))
shortlist_idx_selected = (np.argmin(torch.norm(features_dists, p=2, dim=1))).item()
# logging.info("shortlist_idxs_selected.shape: "+str(shortlist_idxs_selected.shape))
# logging.info("shortlist_idxs_selected: "+str(shortlist_idxs_selected))
# logging.info("original_idxs_map: "+str(original_idxs_map))
index_selected = original_idxs_map[str(shortlist_idx_selected)].item()
# logging.info("just selected: index_selected: "+str(index_selected))
# END JOSH Memes mod ################################################################
#####################################################################################
if index_selected in list_of_selected:
logging.info("error: index_selected: "+str(index_selected))
logging.info("error: list_of_selected: "+str(list_of_selected))
raise ValueError("Exemplars should not be repeated!!!!")
list_of_selected.append(index_selected)
exemplar_set.append(dataset[index_selected][0].numpy())
exemplar_features[k] = copy.deepcopy(features_kmeans[shortlist_idx_selected])
# make sure this example won't be selected again
features_kmeans[shortlist_idx_selected] = features_kmeans[shortlist_idx_selected] + 100000000
else:
logging.info("herding not enabled")
indeces_selected = np.random.choice(n_max, size=min(n, n_max), replace=False)
for k in indeces_selected:
exemplar_set.append(dataset[k][0].numpy())
# add this [exemplar_set] as a [n]x[ich]x[isz]x[isz] to the list of [exemplar_sets]
self.exemplar_sets.append(np.array(exemplar_set))
# set mode of model back
self.train(mode=mode)
####----CLASSIFICATION----####
def classify_with_exemplars(self, x, allowed_classes=None):
"""Classify images by nearest-means-of-exemplars (after transform to feature representation)
INPUT: x = <tensor> of size (bsz,ich,isz,isz) with input image batch
allowed_classes = None or <list> containing all "active classes" between which should be chosen
OUTPUT: preds = <tensor> of size (bsz,)"""
# Set model to eval()-mode
mode = self.training
self.eval()
batch_size = x.size(0)
# Do the exemplar-means need to be recomputed?
if self.compute_means:
exemplar_means = [] #--> list of 1D-tensors (of size [feature_size]), list is of length [n_classes]
for P_y in self.exemplar_sets:
exemplars = []
# Collect all exemplars in P_y into a <tensor> and extract their features
for ex in P_y:
exemplars.append(torch.from_numpy(ex))
exemplars = torch.stack(exemplars).to(self._device())
with torch.no_grad():
features = self.feature_extractor(exemplars)
if self.norm_exemplars:
features = F.normalize(features, p=2, dim=1)
# Calculate their mean and add to list
mu_y = features.mean(dim=0, keepdim=True)
if self.norm_exemplars:
mu_y = F.normalize(mu_y, p=2, dim=1)
exemplar_means.append(mu_y.squeeze()) # -> squeeze removes all dimensions of size 1
# Update model's attributes
self.exemplar_means = exemplar_means
self.compute_means = False
# Reorganize the [exemplar_means]-<tensor>
exemplar_means = self.exemplar_means if allowed_classes is None else [
self.exemplar_means[i] for i in allowed_classes
]
means = torch.stack(exemplar_means) # (n_classes, feature_size)
means = torch.stack([means] * batch_size) # (batch_size, n_classes, feature_size)
means = means.transpose(1, 2) # (batch_size, feature_size, n_classes)
# Extract features for input data (and reorganize)
with torch.no_grad():
feature = self.feature_extractor(x) # (batch_size, feature_size)
if self.norm_exemplars:
feature = F.normalize(feature, p=2, dim=1)
feature = feature.unsqueeze(2) # (batch_size, feature_size, 1)
feature = feature.expand_as(means) # (batch_size, feature_size, n_classes)
# For each data-point in [x], find which exemplar-mean is closest to its extracted features
dists = (feature - means).pow(2).sum(dim=1).squeeze() # (batch_size, n_classes)
_, preds = dists.min(1)
# Set mode of model back
self.train(mode=mode)
return preds
```
|
{
"source": "jgard1/COS598D_Assignment3",
"score": 3
}
|
#### File: nas/pdarts/darts_trainer.py
```python
import copy
import logging
import torch
import torch.nn as nn
from nni.nas.pytorch.trainer import Trainer
from nni.nas.pytorch.utils import AverageMeterGroup
from darts_mutator import DartsMutator
logger = logging.getLogger(__name__)
class DartsTrainer(Trainer):
"""
DARTS trainer.
Parameters
----------
model : nn.Module
PyTorch model to be trained.
loss : callable
Receives logits and ground truth label, return a loss tensor.
metrics : callable
Receives logits and ground truth label, return a dict of metrics.
optimizer : Optimizer
The optimizer used for optimizing the model.
num_epochs : int
Number of epochs planned for training.
dataset_train : Dataset
Dataset for training. Will be split for training weights and architecture weights.
dataset_valid : Dataset
Dataset for testing.
mutator : DartsMutator
Use in case of customizing your own DartsMutator. By default will instantiate a DartsMutator.
batch_size : int
Batch size.
workers : int
Workers for data loading.
device : torch.device
``torch.device("cpu")`` or ``torch.device("cuda")``.
log_frequency : int
Step count per logging.
callbacks : list of Callback
list of callbacks to trigger at events.
arc_learning_rate : float
Learning rate of architecture parameters.
unrolled : float
``True`` if using second order optimization, else first order optimization.
"""
def __init__(self, model, loss, metrics,
optimizer, num_epochs, dataset_train, dataset_valid,
mutator=None, batch_size=64, workers=4, device=None, log_frequency=None,
callbacks=None, arc_learning_rate=3.0E-4, unrolled=False):
super().__init__(model, mutator if mutator is not None else DartsMutator(model),
loss, metrics, optimizer, num_epochs, dataset_train, dataset_valid,
batch_size, workers, device, log_frequency, callbacks)
# print("\n\n\n\n\ncallbacks in darts initializer: "+str(callbacks))
# print("\n\n\n\n\n\nDOGECOIN\n\n\n\n\n\n\n\n")
self.ctrl_optim = torch.optim.Adam(self.mutator.parameters(), arc_learning_rate, betas=(0.5, 0.999),
weight_decay=1.0E-3)
self.unrolled = unrolled
n_train = len(self.dataset_train)
split = n_train // 2
indices = list(range(n_train))
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split])
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:])
self.train_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=batch_size,
sampler=train_sampler,
num_workers=workers)
self.valid_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=workers)
self.test_loader = torch.utils.data.DataLoader(self.dataset_valid,
batch_size=batch_size,
num_workers=workers)
def train_one_epoch(self, epoch):
self.model.train()
self.mutator.train()
meters = AverageMeterGroup()
for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(self.train_loader, self.valid_loader)):
trn_X, trn_y = trn_X.to(self.device), trn_y.to(self.device)
val_X, val_y = val_X.to(self.device), val_y.to(self.device)
# phase 1. architecture step
self.ctrl_optim.zero_grad()
if self.unrolled:
self._unrolled_backward(trn_X, trn_y, val_X, val_y)
else:
self._backward(val_X, val_y)
self.ctrl_optim.step()
# phase 2: child network step
self.optimizer.zero_grad()
logits, loss = self._logits_and_loss(trn_X, trn_y)
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 5.) # gradient clipping
self.optimizer.step()
metrics = self.metrics(logits, trn_y)
metrics["loss"] = loss.item()
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
# print("<NAME>")
logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1,
self.num_epochs, step + 1, len(self.train_loader), meters)
# cur_time = timeit.default_timer()
# time_diff = cur_time - started_time
# logger.info("memes")
logger.info(str(meters))
def validate_one_epoch(self, epoch):
self.model.eval()
self.mutator.eval()
meters = AverageMeterGroup()
with torch.no_grad():
self.mutator.reset()
for step, (X, y) in enumerate(self.test_loader):
X, y = X.to(self.device), y.to(self.device)
logits = self.model(X)
metrics = self.metrics(logits, y)
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
logger.info("Epoch [%s/%s] Step [%s/%s] %s", epoch + 1,
self.num_epochs, step + 1, len(self.test_loader), meters)
def _logits_and_loss(self, X, y):
self.mutator.reset()
logits = self.model(X)
loss = self.loss(logits, y)
self._write_graph_status()
return logits, loss
def _backward(self, val_X, val_y):
"""
Simple backward with gradient descent
"""
_, loss = self._logits_and_loss(val_X, val_y)
loss.backward()
def _unrolled_backward(self, trn_X, trn_y, val_X, val_y):
"""
Compute unrolled loss and backward its gradients
"""
backup_params = copy.deepcopy(tuple(self.model.parameters()))
# do virtual step on training data
lr = self.optimizer.param_groups[0]["lr"]
momentum = self.optimizer.param_groups[0]["momentum"]
weight_decay = self.optimizer.param_groups[0]["weight_decay"]
self._compute_virtual_model(trn_X, trn_y, lr, momentum, weight_decay)
# calculate unrolled loss on validation data
# keep gradients for model here for compute hessian
_, loss = self._logits_and_loss(val_X, val_y)
w_model, w_ctrl = tuple(self.model.parameters()), tuple(self.mutator.parameters())
w_grads = torch.autograd.grad(loss, w_model + w_ctrl)
d_model, d_ctrl = w_grads[:len(w_model)], w_grads[len(w_model):]
# compute hessian and final gradients
hessian = self._compute_hessian(backup_params, d_model, trn_X, trn_y)
with torch.no_grad():
for param, d, h in zip(w_ctrl, d_ctrl, hessian):
# gradient = dalpha - lr * hessian
param.grad = d - lr * h
# restore weights
self._restore_weights(backup_params)
def _compute_virtual_model(self, X, y, lr, momentum, weight_decay):
"""
Compute unrolled weights w`
"""
# don't need zero_grad, using autograd to calculate gradients
_, loss = self._logits_and_loss(X, y)
gradients = torch.autograd.grad(loss, self.model.parameters())
with torch.no_grad():
for w, g in zip(self.model.parameters(), gradients):
m = self.optimizer.state[w].get("momentum_buffer", 0.)
w = w - lr * (momentum * m + g + weight_decay * w)
def _restore_weights(self, backup_params):
with torch.no_grad():
for param, backup in zip(self.model.parameters(), backup_params):
param.copy_(backup)
def _compute_hessian(self, backup_params, dw, trn_X, trn_y):
"""
dw = dw` { L_val(w`, alpha) }
w+ = w + eps * dw
w- = w - eps * dw
hessian = (dalpha { L_trn(w+, alpha) } - dalpha { L_trn(w-, alpha) }) / (2*eps)
eps = 0.01 / ||dw||
"""
self._restore_weights(backup_params)
norm = torch.cat([w.view(-1) for w in dw]).norm()
eps = 0.01 / norm
if norm < 1E-8:
logger.warning("In computing hessian, norm is smaller than 1E-8, cause eps to be %.6f.", norm.item())
dalphas = []
for e in [eps, -2. * eps]:
# w+ = w + eps*dw`, w- = w - eps*dw`
with torch.no_grad():
for p, d in zip(self.model.parameters(), dw):
p += e * d
_, loss = self._logits_and_loss(trn_X, trn_y)
dalphas.append(torch.autograd.grad(loss, self.mutator.parameters()))
dalpha_pos, dalpha_neg = dalphas # dalpha { L_trn(w+) }, # dalpha { L_trn(w-) }
hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]
return hessian
```
#### File: experiment/config/remote.py
```python
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Union
import warnings
from .base import ConfigBase, PathLike
from .common import TrainingServiceConfig
from . import util
__all__ = ['RemoteConfig', 'RemoteMachineConfig']
@dataclass(init=False)
class RemoteMachineConfig(ConfigBase):
host: str
port: int = 22
user: str
password: Optional[str] = None
ssh_key_file: PathLike = None #'~/.ssh/id_rsa'
ssh_passphrase: Optional[str] = None
use_active_gpu: bool = False
max_trial_number_per_gpu: int = 1
gpu_indices: Optional[Union[List[int], str]] = None
python_path: Optional[str] = None
_canonical_rules = {
'ssh_key_file': util.canonical_path,
'gpu_indices': lambda value: [int(idx) for idx in value.split(',')] if isinstance(value, str) else value,
}
_validation_rules = {
'port': lambda value: 0 < value < 65536,
'max_trial_number_per_gpu': lambda value: value > 0,
'gpu_indices': lambda value: all(idx >= 0 for idx in value) and len(value) == len(set(value))
}
def validate(self):
super().validate()
if self.password is None and not Path(self.ssh_key_file).is_file():
raise ValueError(f'Password is not provided and cannot find SSH key file "{self.ssh_key_file}"')
if self.password:
warnings.warn('Password will be exposed through web UI in plain text. We recommend to use SSH key file.')
@dataclass(init=False)
class RemoteConfig(TrainingServiceConfig):
platform: str = 'remote'
reuse_mode: bool = False
machine_list: List[RemoteMachineConfig]
def __init__(self, **kwargs):
kwargs = util.case_insensitive(kwargs)
kwargs['machinelist'] = util.load_config(RemoteMachineConfig, kwargs.get('machinelist'))
super().__init__(**kwargs)
_canonical_rules = {
'machine_list': lambda value: [config.canonical() for config in value]
}
_validation_rules = {
'platform': lambda value: (value == 'remote', 'cannot be modified')
}
```
|
{
"source": "jgardezi/BATrader",
"score": 2
}
|
#### File: BATrader/final/action.py
```python
import pprint
import queue as queue
from threading import Thread
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
import zmq
import argparse
from time import sleep
import BATrader as ba
from BATrader.market.push import push_to_alchemy
from BATrader.fileReader import read_ini_config
from BATrader.market.MarketReader import currentTime, currentTimeSecond, stopwatch, market_time_keeper, is_stock
from BATrader.final.config_manager import ConfigManager
from BATrader.final.portfolio import BacktestPortfolio, LivePortfolio
from BATrader.final.strategy import DemoStrategy, DemoFuturesMonitor
from BATrader.final.data_handler import SignalDict
from BATrader.final.execution import SpExecutionHandler
from BATrader.logger import Logger
import pandas as pd
# =============================================================================
# Component
# =============================================================================
from BATrader.final.data_handler import PostgresDataHandler, MemoryDict
from BATrader.final.execution import TraderExecutionHandler
# =============================================================================
# For concurrency -> init_memory_db_pool
# =============================================================================
def task(s):
memory_db = MemoryDict()
obj = memory_db[s]
return obj
# =============================================================================
# Class
# =============================================================================
class MetaAction(object):
"""
Meta Action is a metaclass that forms the behavior of Trading, backtesting and monitoring
"""
def __init__(self, config_name):
# logger ( [Date]_[Machine_Name]_[Name_of_Pricer_Class]_[config] )
self.logger = Logger(
logname=ba.fr.path_output + 'logger\\{0}_{1}_{2}_{3}.log'.format(ba.dk.get_today(), ba.fr.current_machine,
self.__class__.__name__, config_name),
logger=ba.fr.current_machine + self.__class__.__name__).getlog()
# config
self.current_config_name = config_name
self.config = ConfigManager(config_name)
# list of subscribed symbol
self.symlist = [] # Subscribed stock
self.flist = [] # Subscribed futures
self.signal_dic = SignalDict() # monitor signal
self.dic = {}
# subcribed ticker : symlist + flist
self.subscribed_ticker = []
# Ticker list waiting to be subcribed
self.ticker_waiting_to_subscribe = []
# Event queue
self.events = queue.Queue()
# Socket
self.price_receiver = None
# stats
self.signals = 0
self.orders = 0
self.fills = 0
self.num_strats = 1
def connect_price_server(self):
"""
Connect to price server
Send we are using recv_pyobj, zeromq natively didn't know about the data.
It's just pickling the data and transfer.
so no filtering of messages can be dones
"""
if not self.price_receiver:
self.logger.debug("Created socket with : Pricer")
context = zmq.Context()
self.price_receiver = context.socket(zmq.SUB)
self.price_receiver.connect(self.config.address_pub)
#self.price_receiver.connect(self.config.address_stock)
#self.price_receiver.connect(self.config.address_futures)
self.price_receiver.setsockopt(zmq.SUBSCRIBE, b"") # it means subscribe all
def disconnect_price_server(self):
"""
Disconnect the price server
"""
if self.price_receiver:
self.logger.debug('Disconnect Price Server')
self.price_receiver.disconnect(self.config.address_pub)
# self.price_receiver.disconnect(self.config.address_stock)
# self.price_receiver.disconnect(self.config.address_futures)
self.price_receiver = None
def addticker(self, ticker: str):
"""
Pass in str
"""
self.ticker_waiting_to_subscribe.append(ticker)
def notify_pricer(self, work_message):
"""
Pricer listen work_message with a Pull
"""
context = zmq.Context()
zmq_socket = context.socket(zmq.PUSH)
zmq_socket.bind(self.config.address_ticker)
zmq_socket.send_pyobj(work_message)
sleep(0.1)
zmq_socket.unbind(self.config.address_ticker)
def subscribe_ticker(self, ticker: list):
"""
Subscribe the ticker
Args:
ticker:
"""
self.logger.info('Subcribing ticker to pricer')
# self.data_handler.init_memory_db(ticker)
work_message = {'subscribe': ticker}
self.notify_pricer(work_message)
self.subscribed_ticker += ticker
for sym in ticker:
if is_stock(sym):
self.symlist.append(sym)
else:
self.flist.append(sym)
print('Done.')
def unsubscribe_ticker(self, ticker: list):
"""
Unsubscribe the ticker
"""
self.logger.info('Unsubcribing')
work_message = {'unsubscribe': ticker}
self.notify_pricer(work_message)
self.subscribed_ticker = list(set(self.subscribed_ticker) - set(ticker))
for sym in ticker:
if is_stock(sym):
self.symlist -= [sym]
else:
self.flist -= [sym]
print('Done.')
def _output_performance(self):
"""
Outputs the strategy performance from the backtest.
"""
self.portfolio.create_equity_curve_dataframe()
print("Creating summary stats...")
stats = self.portfolio.output_summary_stats()
print("Creating equity curve...")
print(self.portfolio.equity_curve.tail(10))
pprint.pprint(stats)
print("Signals: %s" % self.signals)
print("Orders: %s" % self.orders)
print("Fills: %s" % self.fills)
def start_listening(self):
"""
Start Listening is normal price initiate event,
"""
# Connect the pricer
self.connect_price_server()
self.logger.info('Listening...')
self.logger.info('Start trading... Good luck today :)')
keep_running = True
while keep_running:
# Subscribe ticker here
if self.ticker_waiting_to_subscribe:
self.subscribe_ticker(self.ticker_waiting_to_subscribe)
self.ticker_waiting_to_subscribe = []
data = self.price_receiver.recv_pyobj()
#print(data)
if data['msg_type'] == 'data':
# Check is it our subscribed stock
if data['sym'] not in self.subscribed_ticker:
return
self.logger.debug('Got data of: {}'.format(data['sym']))
self.data_handler.update_bars(data)
# May be we can put some hack in here, so that we can just receive
# the price
# Second loop
while not self.events.empty():
event = self.events.get()
if event.type == 'MARKET':
# self.logger.info("Got an Market Event, calcualte signal now")
self.strategy.calculate_signals(event)
# self.logger.info("Update portfolio time index now")
if 'Monitor' not in self.strategy.strategy_id:
self.portfolio.update_timeindex(event)
elif event.type == 'MONITOR':
self.logger.info(event.msg_list)
push_to_alchemy(event.msg_for_send)
for signal_name in event.signal_name_list:
# Append the monitor signal msg to self.signal_dic
if not self.signal_dic[event.sym][signal_name]:
self.signal_dic[event.sym]['signal_msg'] += event.msg_list
self.signal_dic[event.sym][signal_name] = True
elif event.type == 'SIGNAL':
# self.logger.info("Got an Signal Event !!! now pass to portfolio to gen order")
self.signals += 1
self.portfolio.update_signal(event)
elif event.type == 'ORDER':
# self.logger.info("Got an Order Event, pass to execution handler to execute")
self.orders += 1
self.execution_handler.execute_order(event)
# event.print_order()
elif event.type == 'FILL':
# self.logger.info("Got an Fill Event")
self.fills += 1
self.portfolio.update_fill(event)
self.logger.info("Trade completed, return to action")
self.portfolio.print_portfolio()
elif data['msg_type'] == 'msg':
if data['msg_body'] == 'stop_running':
self.logger.info('Price server told me to shut down, I am going.')
#self.disconnect_price_server()
#self.logger.info('Disconnected price server.')
keep_running = False
if 'Monitor' not in self.strategy.strategy_id:
self._output_performance()
elif data['msg_body'] == 'refresh_done':
# actually we don't need a refrehed event
# after refreshed, just regen the target symlist
self.logger.debug("Pricer Refreshed, let me regen the target")
self.logger.debug("-----------------------------------------")
self.logger.debug("")
for sym in self.symlist_func():
if sym not in self.subscribed_ticker:
self.ticker_waiting_to_subscribe.append(sym)
elif data['msg_body'] == 'pricer_push':
# pricer wanna straightly sending push
# let Monitor push only
if 'Monitor' in self.strategy.strategy_id:
push_to_alchemy(data['push_msg'])
elif data['msg_body'] == 'observe_this':
# let Monitor observe only
if 'Monitor' in self.strategy.strategy_id:
self.ticker_waiting_to_subscribe.append(data['observe_list'])
class Trader(MetaAction):
"""
Enscapsulates the settings and components for carrying out
a real live trading.
The Final
v3.0 2015-10
V3.4 2017-10
V3.5 2018-03
V4.0 2018-04 Change TheFinal3 to Trader, migrating the new Trader class
v5.0 2018-05 Changed to three-pipeline model
v5.1 2018-08 adding some more
v5.2 2018-08 adding config
v5.3 2019-01 its too hard to implement this, will combined Trader class to Action
start()
------
the entry point, it repeatly runs gen_signal with a timer
gen_signal(self, symlist, pricer, cond_filter, tester)
Usage:
trader.gen_signal(trader._symlist_testlist(),\
trader._pricer_local_test,\
trader._filter_production,\
trader.algo_tester)
::: BUGS :::
20180202 The trader won't sleep when lunch! (20180403 Fixed)
20190510 The Trade action becomes "Trader", trying to stick to the OObacktest
Parameters:
data_handler_cls,
strategy_cls,
portfolio_cls,
execution_handler_cls,
symbol_list_func,
"""
def __init__(self, config_name, *args, **kwargs):
# Init base class, it's either production or debug
super(Trader, self).__init__(config_name)
self.params_dict = kwargs
self.data_handler_cls = args[0]
self.strategy_cls = args[1]
self.portfolio_cls = args[2]
self.execution_handler_cls = args[3]
self.symlist_func = lambda: args[4] # lazy execute
self.ticker_waiting_to_subscribe = self.symlist_func()
# find today and pday
self.tradeday = ba.dk.tradeday()
self.pTradeday = ba.dk.tradeday(1)
# Pass in or get it?
#self.initial_capital = self.params_dict['initial_capital']
# Generate Trading instances
self.logger.info("Creating DataHandler, Strategy, Portfolio and ExecutionHandler : Trader")
self.data_handler = self.data_handler_cls(self.events, self.logger,
**kwargs)
self.strategy = self.strategy_cls(self.data_handler, self.events, self.logger,
**kwargs)
self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.logger, self.tradeday,
**kwargs)
self.execution_handler = self.execution_handler_cls(self.events, self.portfolio, self.logger,
**kwargs)
def start_trading(self):
# Return true if success init, and clear the self.ticker_waiting_to_subscribe
loaded = self.data_handler.init_memory_db(self.ticker_waiting_to_subscribe)
if loaded:
self.subscribe_ticker(self.ticker_waiting_to_subscribe)
self.ticker_waiting_to_subscribe = []
# First construct the portfolio
self.portfolio.construct_portfolio(self.subscribed_ticker)
self.start_listening()
class Backtester(MetaAction):
"""
Enscapsulates the settings and components for carrying out
an event-driven backtest.
symbol_list, initial_capital, data_handler, execution_handler, portfolio, strategy,
backtest_timer= '0940'
Parameters:
csv_dir - The hard root to the CSV data directory.
symbol_list - The list of symbol strings.
intial_capital - The starting capital for the portfolio.
heartbeat - Backtest "heartbeat" in seconds
start_date - The start datetime of the strategy.
data_handler - (Class) Handles the market data feed.
execution_handler - (Class) Handles the orders/fills for trades.
portfolio - (Class) Keeps track of portfolio current and prior positions.
strategy - (Class) Generates signals based on market data.
20190709
Parameters:
data_handler_cls,
strategy_cls,
portfolio_cls,
execution_handler_cls,
symbol_list_func,
Must pass in kwargs:
initial_capital
inday
outday
"""
def __init__(self, config_name, *args, **kwargs):
# Init base class
super(Backtester, self).__init__(config_name)
self.params_dict = kwargs
self.data_handler_cls = args[0]
self.strategy_cls = args[1]
self.portfolio_cls = args[2]
self.execution_handler_cls = args[3]
self.symlist_func = lambda: args[4] # lazy execute
self.ticker_waiting_to_subscribe = self.symlist_func()
# Backtest params
self.initial_capital = self.params_dict['initial_capital']
self.insample_day = self.params_dict['inday']
self.outsample_day = self.params_dict['outday']
# For Backtest
self.backtest_day = None
# Generate Trading instances
self.logger.info("Creating DataHandler, Strategy, Portfolio and ExecutionHandler : Backtest")
self.data_handler = self.data_handler_cls(self.events, self.logger, **kwargs)
self.strategy = self.strategy_cls(self.data_handler, self.events, self.logger, **kwargs)
self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.logger, self.insample_day,
**kwargs)
self.execution_handler = self.execution_handler_cls(self.events, self.portfolio, self.logger)
def start_backtesting(self):
"""
Simulates the backtest and outputs portfolio performance.
It use zeromq
"""
# First told pricer to stop if it's backtesting
if self.current_config_name == 'backtest':
# First stop the pricer if it's running
work_message = dict(backtest_end='')
self.notify_pricer(work_message)
# Return true if success init, and clear the self.ticker_waiting_to_subscribe
loaded = self.data_handler.init_memory_db(self.ticker_waiting_to_subscribe)
if loaded:
self.subscribe_ticker(self.ticker_waiting_to_subscribe)
self.ticker_waiting_to_subscribe = []
if self.current_config_name == 'backtest':
self.logger.info('Running Monitor in backtest mode')
# get the insample outsample day
insample_day = self.params_dict['inday']
outsample_day = self.params_dict['outday']
self.trim_backtest_data(insample_day, outsample_day)
# Not impletment this right now
# self.portfolio.construct_portfolio(self.subscribed_ticker)
# backtest timer
if 'backtest_timer' in self.params_dict:
work_message = {'backtest_timer': self.params_dict['backtest_timer']}
self.notify_pricer(work_message)
# Tell pricer to start backtest
self.logger.info('Telling Pricer to start backtest')
work_message = {'backtest_begin': True}
self.notify_pricer(work_message)
self.start_listening()
def start_internal_backtesting(self):
"""
Backtesting without zeromq. All backtest runs internally
"""
"""
Executes the backtest.
"""
i = 0
while True:
i += 1
print(i)
# Update the market bars
if self.data_handler.continue_backtest == True:
self.data_handler.update_bars()
else:
break
# Handle the events
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
if event is not None:
if event.type == 'MARKET':
self.strategy.calculate_signals(event)
self.portfolio.update_timeindex(event)
elif event.type == 'SIGNAL':
self.signals += 1
self.portfolio.update_signal(event)
elif event.type == 'ORDER':
self.orders += 1
self.execution_handler.execute_order(event)
elif event.type == 'FILL':
self.fills += 1
self.portfolio.update_fill(event)
sleep(self.heartbeat)
"""
Outputs the strategy performance from the backtest.
"""
self.portfolio.create_equity_curve_dataframe()
print("Creating summary stats...")
stats = self.portfolio.output_summary_stats()
print("Creating equity curve...")
print(self.portfolio.equity_curve.tail(10))
pprint.pprint(stats)
print("Signals: %s" % self.signals)
print("Orders: %s" % self.orders)
print("Fills: %s" % self.fills)
def trim_backtest_data(self, insample_end_day, outsample_end_day):
"""
While we loaded all data,
we need to trim the data and left only what we needed
1. Trim the data bars in data handler
2. pass them to pricer
3. told pricer to begin backtest
"""
backtest_day = (insample_end_day, outsample_end_day)
self.backtest_day = backtest_day
# uppack the tuple before send, or it became tuple of tuple
self.logger.debug('Trimming the data %s %s' % (insample_end_day, outsample_end_day))
self.data_handler.trim_data(insample_end_day, outsample_end_day)
work_message = {'backtest_day': backtest_day}
self.notify_pricer(work_message)
# notify every outsample data to pricer by looping the db
for sym in list(self.data_handler.memory_outsample_db.keys()):
self.logger.debug('Passing Outsample Data : %s' % sym)
work_message = {'backtest_bars': self.data_handler.memory_outsample_db[sym],
'sym': sym}
self.notify_pricer(work_message)
if __name__ == '__main__':
print('Running in spyder')
parser = argparse.ArgumentParser(description='Action Director') # 動作指導
parser.add_argument("model", default='d', nargs='?') # read ini config
#parser.add_argument("strategy", nargs="?")
args = parser.parse_args()
dic = {'p': 'production',
'd': 'debug',
'b': 'backtest'}
# ============================================================================
# Production and debug
# =============================================================================
if args.model != 'b':
mode = 'sp'
if mode == 'sp':
trader = Trader(dic[args.model], PostgresDataHandler, DemoStrategy, LivePortfolio, SpExecutionHandler,
['HSIF'])
trader.start_trading()
else:
trader = Trader(dic[args.model], PostgresDataHandler, DemoStrategy, BacktestPortfolio, TraderExecutionHandler,
['HSIF'])
trader.start_trading()
# =============================================================================
# Backtest
# =============================================================================
elif args.model == 'b':
backtester = Backtester('backtest', PostgresDataHandler, DemoFuturesMonitor, BacktestPortfolio, TraderExecutionHandler,
['HSIF'],
initial_capital= 100000, inday='20190610', outday='20190611')
#backtester.start_backtesting()
# =============================================================================
# Reply
# replay is a kind of backtest, which is shorthanded for replaying
# current trade day market siutation
# =============================================================================
elif args.model == 'replay':
b = Backtester('backtest', PostgresDataHandler, DemoStrategy, BacktestPortfolio, TraderExecutionHandler,
['HSIF'],
inday=ba.dk.tradeday(1), outday=ba.dk.tradeday())
b.start_backtesting()
```
#### File: BATrader/final/filters.py
```python
import BATrader as ba
from BATrader.market.MarketReader import convert
import pandas as pd
from functools import partial
class Filters(object):
"""
FIlter
------------------------------------------
Filter is used when I can told you immediately when you ask for specific
group of stock. This Filter operate symlist in postgresql and deliver
dataframe on row.
Usage:
self.df = ba.fr.symlist
self.df1 = filter_by_mkt_value((0,'1B'))
self.df2 = filter_by_entry_cost('10K')
self.df3 = multiple_condition_filter(filter_by_mkt_value(('300M',0)),
filter_by_entry_cost('10K'),
filter_by_over_ma_60())
d = self.df.query("cap_in_num > 500000000 and (board == 'main' or board == 'gem')") # using query
# Return active stock only
ba.fr.symlist = filter_by_status(1)
NewStyle:
cond1 = stock['毛利率(%)'] > 30
cond2 = stock['營業利益率(%)'] > 30
stock[cond1 & cond2]
"""
def __init__(self):
self.df = ba.fr.init_symlist_once()
def filter_by_symlist(self, symlist: list) -> pd.DataFrame:
"""
filter by symlist, 931 µs
"""
return self.df[self.df.sym.isin(symlist)]
def filter_by_symlist2(self, symlist: list) -> pd.DataFrame:
"""
filter by symlist, slower.....4.22 ms
"""
return self.df.set_index('sym').loc[symlist].reset_index()
def filter_by_chi_name_contain(self, chi_word: str) -> pd.DataFrame:
"""
filter by chinese name contains
"""
return self.df[self.df.chi_name.str.contains(chi_word)]
def filter_by_status(self, status: int) -> pd.DataFrame:
"""
filter by stock trading status.
to simplify,
1 is 'Active', 0 is 'Suspended'
"""
dic = {1: 'Active', 0: 'Suspended'}
return self.df[self.df.status == dic[status]]
def filter_by_over_ma(self, period) -> pd.DataFrame:
return self.df[self.df.pClose > self.df['ma%s' % period]]
def filter_by_over_ma_60(self) -> pd.DataFrame:
return self.filter_by_over_ma(60)
def filter_by_over_ma_200(self) -> pd.DataFrame:
return self.filter_by_over_ma(200)
def filter_by_entry_cost(self, entry_cost) -> pd.DataFrame:
"""
filter by 每手入場費, coz always below entry cost we need so didn't use range
entry_cost = boardlot x p.close
int or str : directly
tuple : high-land, low-land
"""
self.df = ba.fr.symlist
self.df['entry_cost'] = self.df.boardlot * self.df.pClose
self.df = self.df[self.df.entry_cost.notnull()]
if type(entry_cost) == tuple:
return self.df[
(self.df.entry_cost > convert(entry_cost[0])) & (self.df.entry_cost < convert(entry_cost[1]))]
else:
return self.df[self.df.entry_cost < convert(entry_cost)]
def filter_by_board(self, board: str) -> pd.DataFrame:
"""
filter by input board
"""
if type(board) != str:
raise TypeError("main?gem?etf?")
return self.df[self.df.board == board]
def filter_by_mkt_value(self, mkt_value: tuple) -> pd.DataFrame:
"""
filter by market value in range
must be a tuple : (0, '500M')
30億以上: -> ('300M', 0)
"""
if type(mkt_value) != tuple:
raise TypeError("tuple : high-land, low-land")
if mkt_value[1] == 0:
return self.df[self.df.cap_in_num > convert(mkt_value[0])]
else:
return self.df[(self.df.cap_in_num > convert(mkt_value[0])) & (self.df.cap_in_num < convert(mkt_value[1]))]
def filter_by_amount(self, amount: float) -> pd.DataFrame:
"""
filter by amount (Turnover = P x Q)
must be a flow : (0.0)
"""
return self.df[(self.df.pAmount > amount)]
def filter_by_trade_price(self, bigger_or_smaller_sign, trade_price) -> pd.DataFrame:
"""
filter by Traded price(Last or Close)
"""
if type(trade_price) != float and type(trade_price) != int:
raise ValueError("int or float only : 0.0 or 0")
if bigger_or_smaller_sign == '>':
return self.df[(self.df.pClose > trade_price)]
if bigger_or_smaller_sign == '<':
return self.df[(self.df.pClose < trade_price)]
def filter_ot_list(self) -> pd.DataFrame:
return self.df[self.df.ot_and_rubbish == '1']
# ------ aastocks ------
def filter_by_aa_sector(self, sector):
"""
Better update the aa sector regularly
"""
return self.df.query("aa_sector == '{}'".format(sector))
def filter_by_aa_business(self, business):
"""
Better update the aa sector regularly
"""
return self.df.query("aa_business == '{}'".format(business))
# ------ Other ------
@staticmethod
def merge_everything(df1, df2) -> pd.DataFrame:
cols_to_use = df2.columns.difference(df1.columns)
df_ = pd.merge(df1.set_index('sym'), df2.set_index('sym')[cols_to_use], left_index=True, right_index=True,
how='inner').reset_index()
return df_
def multiple_condition_filter(self, *df_hub) -> pd.DataFrame:
"""
pass in multiple condition and return,
this looks complex but it's very convenient.
self.df3 = multiple_condition_filter(filter_by_mkt_value(('300M',0)),
filter_by_entry_cost('10K'),
filter_by_over_ma_60())
"""
# self.df = self.filter_by_status(1)
for d in df_hub:
self.df = self.merge_everything(self.df, d)
return self.df
@staticmethod
def get_the_symlist(df: pd.DataFrame) -> list:
"""
get the symlist by dataframe
"""
return df.sym.tolist()
if __name__ == '__main__':
flt = Filters()
```
#### File: BATrader/final/product.py
```python
import BATrader as ba
from collections import defaultdict
import pandas as pd
import numpy as np
import threading
# =============================================================================
# Good implementation of products
# =============================================================================
"""
2019-02 Actually we don't need the product to have ability to inspect itself
"""
class BaseClassProducts(dict):
"""
Derived from dictionary
"""
# Public
# symlist_index = defaultdict(list)
# quotes = defaultdict(list)
@classmethod
def find_by_sym(cls, sym):
'''
Using to find and return the instances
'''
return cls.symlist_index[sym][0]
@classmethod
def check_sym_instances(cls, sym):
'''
Return true if it is a instance of a 'sym'
'''
return sym in cls.symlist_index
@classmethod
def return_quotes(cls):
'''
Return the quotes of the class
'''
return cls.quotes
def shift(self, shift: int):
if shift > 0:
self.shifted_daybar = self.shifted_daybar.append(self.daybar[-shift:], sort=True)
self.daybar = self.daybar[:-shift]
def shift_by_day(self, day):
self.shifted_daybar = self.shifted_daybar.append(self.daybar[day:], sort= True)
self.daybar = self.daybar[:day]
#self.shifted_data1min = self.shifted_data1min.append(self.data1min[ba.dk.format_add_hyphen(day)])
#self.data1min = self.data1min[:ba.dk.format_add_hyphen(day)]
class Stock(BaseClassProducts):
"""
Pervious we use loaded_data to prevent dump from loading more than one time.
But actually we may want to keep that.(dump will load again) in case some data
updated to sql first and get it out again (like mmi)
MemoryDict is running dump only once. Because it call __missing__, so it's safe
to remove self.loaded_data. (2019-06-30)
"""
symlist_index = defaultdict(list)
quotes = defaultdict(dict) # for storing the latest quotes when update
# loaded data meta 預先知道什麼Data被load了,跟著下面更新
meta = ['daybar', 'data1min']
def __init__(self, sym):
# BaseClass_Products.__init__(self)
self.sym = sym
self.chi_name = ba.fr.get_chi_name(sym)
Stock.symlist_index[sym].append(self)
# Scan meta : sometimes we need to put some scan meta data to Stock obj
self.scanner_meta_data = {}
self.shifted_daybar = pd.DataFrame()
self.shifted_data1min = pd.DataFrame()
@classmethod
def find_by_sym(cls, sym):
return Stock.symlist_index[sym][0]
def display(self):
print("Symbol:", self.sym)
print("no. of EOD bar:", len(self.daybar))
print("no. of 1min bar:", len(self.data1min))
def dump(self, recalc= True, dayback= None):
"""
load some dump data
daybar and data1min is loaded from DB
recalc:
True will using tc recalc
dayback:
just load dayback data, must come with recalc= False
"""
print('Dumping : %s' % self.sym)
# Name
setattr(self, 'name', ba.fr.get_name(self.sym))
setattr(self, 'chi_name', ba.fr.get_chi_name(self.sym))
# OHLC
if recalc:
setattr(self, 'daybar', ba.tc.get_bar_ex_eod_db_recalc_with_meta(self.sym))
else:
if dayback:
setattr(self, 'daybar', ba.tc.get_bar_ex_eod_db(self.sym, dayback= dayback))
else:
setattr(self, 'daybar', ba.tc.get_bar_ex_eod_db(self.sym))
setattr(self, 'data1min', ba.rtq.get_1min_sql(self.sym, dayback=30))
def dump_meta_data(self):
self.load_hkex_news()
setattr(self, 'concat', pd.concat([self.daybar, self.mmi, self.details], sort=True))
self.load_ccass()
def dump_warrant_cbbc(self):
if not hasattr(self, 'warrant'):
self.warrant = ba.hsbc.get_warrant_group_by(self.sym)
if not hasattr(self, 'cbbc'):
self.cbbc = ba.hsbc.get_cbbc_group_by(self.sym)
def load_hkexnews(self):
"""
we always want this to be called once only. Since we can't backtest it easily.
Just save some energy in daily running
"""
if not hasattr(self, 'hkexnews'):
setattr(self, 'hkexnews', ba.hkex.hkexnews_single_stock_news(self.sym))
def load_ccass(self):
setattr(self, 'ccass', ba.ccass.CCASS(self.sym))
def load_mmi(self, dayback= 30):
setattr(self, 'mmi', ba.mmi.get_by_sym_sql(self.sym, dayback= dayback))
def load_details(self, dayback= 30, col_lst = []):
setattr(self, 'details', ba.etnet.get_by_sym_sql(self.sym, dayback= dayback, col_lst= col_lst))
def load_min_bar(self, min_: str):
setattr(self, 'data%smin' % min_,
ba.algo.make_min_bar(self.data1min, '%sMin' % min_, simple_mode= True))
def load_chi_name(self):
setattr(self, 'chi_name', ba.fr.get_chi_name(self.sym))
def _convert_tc_rtq_to_dataframe(self, dic):
df = pd.DataFrame.from_dict(dic, orient='index').T
df = pd.DataFrame.from_dict(dic, orient='index').T
df['Date'] = pd.to_datetime(df['Date'], format="%Y%m%d")
df['Open'] = df['Open'].astype(float)
df['High'] = df['High'].astype(float)
df['Low'] = df['Low'].astype(float)
df['Close'] = df['Close'].astype(float)
df['Vol'] = df['Vol'].astype(float)
df = df.drop_duplicates(subset='Date', keep='last')[['Date', 'Open', 'High', 'Low', 'Close', 'Vol']].set_index(
'Date')
return df
def calculate_meta(self):
"""
daybar_chgp : The last day Chgp
daybar_chg : The last day Chg
"""
try:
self.daybar_chgp = round(((self.daybar.Close.iloc[-1] / self.daybar.Close.iloc[-2]) - 1) * 100, 2)
self.daybar_chg = round((self.daybar.Close.iloc[-1] - self.daybar.Close.iloc[-2]), 2)
except:
self.daybar_chgp = round(self.daybar.Close.pct_change()[-1] * 100, 2)
self.daybar_chg = round((self.daybar.Close - self.daybar.Close.shift(1))[-1], 2)
def calculate_change(self):
self.daybar = ba.algo.calc_chg_chgp(self.daybar)
def update(self, data):
"""
data is a dict, defined in pricer, get_stock_quote
if adding field, need to change in the get_stock_quote function
update:
1. Return 5/15/30 mins bar
2. Returns Week / Month bar
"""
# print('Stock instance Updating : %s' % self.sym)
# Less performance:
# self.data1min = self.data1min.append(df).drop_duplicates(keep='last')
# better performance:
d = self.data1min.append(data['1min'])
# 1min bar
self.data1min = d[~d.index.duplicated(keep='last')]
# daybar
lastday = self.data1min.reset_index()["Datetime"].map(lambda t: t.date().strftime('%Y%m%d')).unique()[-1]
resampled_daybar = ba.algo.make_day_bar(self.data1min[lastday])
self.daybar = self.daybar.append(resampled_daybar, sort=True).reset_index().drop_duplicates(
subset='Date', keep='last').set_index('Date')
# calculate the meta
self.calculate_meta()
# try:
# # debug or production, tc is getting in realtime, and append to the bar
# self.daybar = self.daybar.append(self._convert_tc_rtq_to_dataframe(data['tc'])).reset_index().drop_duplicates(subset='Date', keep='last').set_index('Date')
# #Stock.quotes[self.sym].update(data['tc'])
# except:
# # while in backtest, tc dict is empty, so we make the bar on our own
# self.daybar = self.daybar.append(ba.algo.make_day_bar(self.data1min)).reset_index().drop_duplicates(subset='Date', keep='first').set_index('Date')
# Stock.quotes[self.sym].update(ba.algo.simulate_tc_dic(self.data1min))
# For better performance, just calcuate the target stocks, but not
# calc everytime when update
# setattr(self, 'data5min', ba.algo.make_min_bar(self.data1min, '5Min'))
# setattr(self, 'data30min', ba.algo.make_min_bar(self.data1min, '30Min'))
# setattr(self, 'weekbar', ba.algo.make_day_bar(self.daybar, 'W'))
# setattr(self, 'monthbar', ba.algo.make_day_bar(self.daybar, 'M'))
def make_bar(self):
"""
Sometimes we dont update, just need to make some bars
"""
if not hasattr(self, 'daybar') and not hasattr(self, 'data1min'):
print('No bar loaded')
return
else:
print('Make bar')
setattr(self, 'data5min', ba.algo.make_min_bar(self.data1min, '5Min'))
setattr(self, 'data15min', ba.algo.make_min_bar(self.data1min, '15Min'))
setattr(self, 'data30min', ba.algo.make_min_bar(self.data1min, '30Min'))
setattr(self, 'weekbar', ba.algo.make_day_bar(self.daybar, 'W'))
setattr(self, 'monthbar', ba.algo.make_day_bar(self.daybar, 'M'))
def make_bar_threading(self):
"""
Sometimes we dont update, just need to make some bars
"""
if not hasattr(self, 'daybar') and not hasattr(self, 'data1min'):
print('No bar loaded')
return
else:
min_dic = {'data5min': '5Min',
'data15min': '15Min',
'data30min': '30Min'}
day_dic = {'weekbar': 'W',
'monthbar': 'M'}
def minbar_worker(dic):
for k, v in dic.items():
setattr(self, k, ba.algo.make_min_bar(self.data1min, v))
def daybar_worker(dic):
for k, v in dic.items():
setattr(self, k, ba.algo.make_day_bar(self.daybar, v))
print('Making bar by thread...')
t1 = threading.Thread(target=daybar_worker, args=(day_dic,))
t2 = threading.Thread(target=minbar_worker, args=(min_dic,))
t1.start()
t2.start()
t1.join()
t2.join()
print('Done.')
def look(self):
ba.algo.set_data(self.daybar)
ba.algo.Signal_T3B()
ba.algo.Signal_T3B2()
ba.algo.Signal_BreakATR()
ba.algo.Signal_MA_converge()
ba.algo.Signal_Penetrate_ma60_ma200()
ba.algo.Signal_Blow_BBands()
print('================== T3B =====================')
print(ba.algo.data['Signal_T3B'][ba.algo.data['Signal_T3B'] == True][-5:])
print('================== T3B 2 =====================')
print(ba.algo.data['Signal_T3B2'][ba.algo.data['Signal_T3B2'] == True][-5:])
print('================== Break ATR =====================')
print(ba.algo.data['Signal_BreakATR'][ba.algo.data['Signal_BreakATR'] == True][-5:])
print('================== MA 5 10 20 =====================')
print(ba.algo.data['Signal_MA_Converge'][ba.algo.data['Signal_MA_Converge'] == True][-5:])
print('================== MA 60 200 =====================')
print(ba.algo.data['Signal_penetrate_ma60_ma200'][ba.algo.data['Signal_penetrate_ma60_ma200'] == True][-5:])
print('================== Blow BB =====================')
print(ba.algo.data['Signal_Blow_BBands'][ba.algo.data['Signal_Blow_BBands'] == True][-5:])
# ------ meta data ------
def get_pe(self, latest=True, exact_date=''):
value = ba.etnet.get_by_sym_sql(self.sym,
exact_date=str(exact_date),
col_lst=['P/E Ratio/Est.'])
value['PE'] = value['PE'].astype(float)
if latest:
pe = value['PE'].iat[-1]
setattr(self, 'PE', pe)
else:
pe = value['PE']
return pe
def get_pe_est(self, latest=True, exact_date=''):
value = ba.etnet.get_by_sym_sql(self.sym,
exact_date=str(exact_date),
col_lst=['P/E Ratio/Est.'])
value['PE_est'] = value['PE_est'].astype(float)
if latest:
pe_est = value['PE_est'].iat[-1]
setattr(self, 'PE_est', pe_est)
return pe_est
def get_pb(self, exact_date=''):
value = ba.etnet.get_by_sym_sql(self.sym, exact_date=str(exact_date), col_lst=['net_book_value'])
return value
def get_vwap(self, exact_date=''):
value = ba.etnet.get_by_sym_sql(self.sym, exact_date=str(exact_date), col_lst=['VWAP'])
return value
def get_close(self, exact_date=''):
value = ba.etnet.get_by_sym_sql(self.sym, exact_date=str(exact_date), col_lst=['Last'])
return value
def get_tick(self, exact_date=''):
return ba.etnet.get_by_sym_sql(self.sym, exact_date=str(exact_date), col_lst=['tick'])
def get_chg_chgp(self, exact_date=''):
return ba.etnet.get_by_sym_sql(self.sym, exact_date=str(exact_date), col_lst=['Chg', 'Chgp'])
def get_10Dreturn(self, exact_date=''):
return ba.etnet.get_by_sym_sql(self.sym, exact_date=str(exact_date), col_lst=['return_rate_10d'])
def get_peg(self, sym):
"""
假設公式 : 靜態市盈率 / 最近公怖的增長率
"""
c = lambda x: float(x) if x != '' else -1
growth = c(list(ba.analyst.aastocks_profit_loss(sym)['Net Profit Growth (%)'].values())[-1].replace('-', ''))
pe = self.get_pe(self.sym, latest=True)
PEG = pe / growth
if PEG < 0:
PEG = np.nan
return PEG
class Commodities(BaseClassProducts):
# symlist_index = defaultdict(list)
# quotes = defaultdict(dict)
meta = ['daybar', 'data1min']
def __init__(self, sym):
# BaseClass_Products.__init__(self)
self.sym = sym
# Commodities.symlist_index[sym].append(self)
def display(self):
print("Symbol:", self.sym)
print("no. of EOD bar:", len(self.daybar))
print("no. of 1min bar:", len(self.data1min))
def dump(self, recalc= True, dayback= None):
print('Dumping : %s' % self.sym)
# try:
setattr(self, 'data1min', ba.f.get_futures_sql(self.sym, dayback=30)) # 不包含夜期
setattr(self, 'daybar', ba.algo.make_day_bar(self.data1min))
# except Exception as e:
# print(str(e))
# setattr(self, 'data1min', ba.f.get_aastocks_intraday_futures(self.sym))
# setattr(self, 'daybar', ba.algo.make_day_bar(self.data1min))
def _init_f(self):
# init some main figures
return dict(
pClose=self.daybar.Close.iloc[-2],
pHigh=self.daybar.High.iloc[-2],
pLow=self.daybar.Low.iloc[-2],
pOpen=self.daybar.Open.iloc[-2],
pRange=self.daybar.High.iloc[-2] - self.daybar.Low.iloc[-2],
pBody=abs(self.daybar.Close.iloc[-2] - self.daybar.Open.iloc[-2]),
pVol=self.daybar.Vol.iloc[-2])
def calculate_meta(self):
self.daybar_chg = round((self.daybar.Close[-1] - self.daybar.Close.shift(1)[-1]), 2)
def update(self, data):
"""
data is a dict, defined in pricer, get_futures_quote
if adding field, need to change in the get_futures_quote function
"""
# print('Commodities instance Updating :', self.sym)
## better performance
d = self.data1min.append(data['1min'])
self.data1min = d[~d.index.duplicated(keep='last')]
self.daybar = ba.algo.make_day_bar(self.data1min)
self.calculate_meta()
# Commodities.quotes[self.sym].update(data['summary'])
# For better performance, just calcuate the target stocks, but not
# calc everytime when update
# setattr(self, 'data5min', ba.algo.make_min_bar(self.data1min, '5Min'))
# setattr(self, 'data30min', ba.algo.make_min_bar(self.data1min, '30Min'))
# setattr(self, 'weekbar', ba.algo.make_day_bar(self.daybar, 'W'))
# setattr(self, 'monthbar', ba.algo.make_day_bar(self.daybar, 'M'))
if __name__ == '__main__':
# f = Commodities('HSIF')
# f.dump()
from BATrader.utils.toolbox import timeit
s = Stock('620')
s.dump()
# s.calculate_meta()
# from BATrader.final.scanner import thirty_minues_blow_bbands, five_minutes_three_in_row, is_today_t3b, \
# is_today_break_atr, is_today_break_bb_daybar, is_today_over_ma_60_200
# is_today_t3b(s)
c = Commodities('HSIF')
c.dump()
# period_tuple = ('20190627', '20190628')
# # Calculate the day range
# rng = []
# for day in ba.dk.date_ranger(ba.dk.get_next_calendar_day(period_tuple[0]),
# ba.dk.get_next_calendar_day(period_tuple[1])):
# for t in pd.date_range("%s 9:30" % day, "%s 16:30" % day, freq='min'):
# if t not in pd.date_range("%s 12:01" % day, "%s 12:59" % day, freq='min'):
# rng.append(t)
```
#### File: BATrader/final/scanner.py
```python
import talib as ta
import BATrader as ba
from BATrader.final.product import Stock
from BATrader.final.data_handler import MemoryDict
from BATrader.algo.algo import last_signal_bool, chg, chgp
from BATrader.utils.toolbox import timeit
from BATrader.market.MarketReader import convert
# import backtrader as bt
# import backtrader.feeds as btfeeds
# import backtrader.indicators as btind
import pandas as pd
from queue import Queue
from threading import Thread
from BATrader.logger import Logger
from BATrader.final.filters import Filters
from collections import OrderedDict
from copy import copy
import inspect
def divided_and_show_pct(a, b):
"""
Two period of data change in pecentage (without %)
"""
return round((float(a) / float(b)) * 100, 2)
"""
# 劵商異動:持股量佔比日變動達2% 或 周變動達4% 或 月變動達10%
# 過去X period 中, 升跌Y點數或以上的日子有多少
# 中午收市後, 找一找30mins Break out的股票, 並pus
# 找30分鐘圖,調整完反彈的股票-e.g. 1108
# 現價對於上市價之比例
# 941 20140718 - 60/200 平均線乖離不超5%, 放量上攻, 連破2條平均線,open\<ma60\<ma200\<close
# 992 週線下破60天
# 當日夜晚8點, 至第二朝8點, 呢12個鐘發報既最新公告 (特別留意沒有炒作焦點一段時間的日子, 如大跌之後牛皮的日子)
## 每日報告:
大市上日PE
from HKEx import PE_Report
pe = PE_Report()
pe.output_excel()
## 星期報告:
#### 過去一個星期:
藍籌入面, 10大升幅, 整體升跌比例, 恒指升跌
主板入面, 10大升幅, 整體升跌比例
0~50億市值主板股票, 10大升跌
50~200億市值主板股票, 10大升跌
板塊升跌幅Heatmap
"""
# Vector Scanner
# http://www.bossqiao.com/02%20coding/
# ------ Scanner ------
class Scanner(object):
"""
Created on Sun May 15 23:37:02 2016
PriceServer v1 2016-05
RTQ update Robot
scan_list : the scan target
logic_dic : key-value pair, contain a callback function and params
scan_result : list
實現方法1:
Dataframe, if condition true then return 股票 sym
multiple conditions, set 左佢, return all matched
實現方法2:
由頭掃到落尾
last_week_up_x_pct
2018-10
新增一個filter class, 當立即可以用symlist回答的, 就用filter 去實現, 非常快
其次要做 dayback 計數的, 就用 scanner class.
Scanner Class 的重新定義: 理應是Cerebro的設計, 像容器一般把所有東西放進去, 按
個start就執行。 但scanner不同的地方是, 會有多於一個以上的condition, 而需要用到
的data也不同, 故strategry class可以滿足經常改變的部份。
2019-05
因為 Strategy 終於成形, 發現只要將 s (Stock obj) 傳入一個獨立logic, 再return
True False, 已經可以成為一個Condiction. 而且可以重用。
Scanner Class的設計絶妙, 彈性很大。
如果要位移 ( e.g. Close[0] -> Close[1] ) 可以在 s obj 做手腳腳
Usage:
scanner = Scanner()
scanner.addsymlist(ba.fr.get_main_gem())
# scanner.addfilter('and', filter_by_over_ma_60())
scanner.addfilter('and', flt.filter_by_amount(10000000.0))
scanner.addfilter('and', flt.filter_by_over_ma(60))
# scanner.addfilter('not', filter_ot_list())
scanner.addcondition(HighVolatilityStock)
# scanner.addstrategy(TestStrategy)
"""
def __init__(self, running_mode=0, cond_method= 0, recalc= False, dayback= None):
self.run_mode = {0: 'SingleThread', 1: 'Threading'}.get(running_mode)
self.cond_apply_method = {0: 'recursive', 1: 'independent'}.get(cond_method) # Recursive is one-and-one
self.strats = list() # From Cerebro, addstrategy will add to here
self.datas = list()
self.scan_list = [] # the scan target container of each loop
self.scan_list_groups = {} # Sometimes we needs "Sector" -> { Sector : [symlist] }
self.scan_result = [] # the scan result
self.filters_and = [] # match
self.filters_not = [] # not match
self.condition_list = {}
self.last_scan_result = {'result': OrderedDict()} # the last scan result
self.flt = Filters()
# Memory dict
MemoryDict.recalc = recalc
MemoryDict.dayback = dayback
self.memory_db = MemoryDict()
# For threading
self.queue = Queue()
# logger
self.logger = None
@timeit
def run(self):
"""
Run like cerebro
"""
# Logger should init when running, not the class init, otherwise
# the log will be hold
self.logger = Logger(logname=ba.fr.path_output + 'logger\\%s_scanner.log' % ba.dk.tradeday(),
logger='main').getlog()
# Every time to run, copy the loaded_full_list
# self.scan_list = copy(self.loaded_full_list)
# Clear the last scan result
self.last_scan_result = {'steps': OrderedDict()} # the last scan result
self.logger.debug('Running')
self.logger.info("Scanner mode activated : {}".format(self.run_mode))
if not self.scan_list:
self.logger.info('Scan list is empty')
return
else:
self.logger.info('Scan list size : %s' % len(self.scan_list))
# ---------- Filters ------------
if self.filters_and:
for f in self.filters_and:
self.scan_list = set(f).intersection(self.scan_list)
self.logger.info('Filtering and logic : %s' % len(self.scan_list))
if self.filters_not:
for f in self.filters_not:
self.scan_list = list(set(self.scan_list) - set(f))
self.logger.info('Filtering not logic : %s' % len(self.scan_list))
# ---------- Condition ----------
if self.condition_list:
# Single Thread
if self.run_mode == 'SingleThread':
for name, func_tuple in list(self.condition_list.items()):
self.logger.info('Scanning cond: {} with {} stocks.'.format(name, self.scan_list()))
# condition
func = func_tuple[0]
params_dict = func_tuple[1]
# loop tracker
steps_in_order_dic = self.last_scan_result['steps'][name] = {} # last dic
true_hit_list = []
self.scan_list.sort(key= int)
for sym in self.scan_list:
self.logger.debug('Scanning {}'.format(sym))
try:
s = self.memory_db[sym]
if func(s, **params_dict):
true_hit_list.append(sym)
steps_in_order_dic.update({sym: True})
if name in s.scanner_meta_data:
# If condition carry meta data, update to the key
steps_in_order_dic.update({sym: s.scanner_meta_data[name]})
except Exception as e:
self.logger.info('Scanner Error : %s, %s' % (sym, str(e)))
pass
if self.cond_apply_method == 'recursive':
# 使用上一層結果
self.scan_list = true_hit_list
# Last scan result
self.last_scan_result['result'] = true_hit_list
# Multi Thread
elif self.run_mode == 'Threading':
# First put symlist in queue
self.scan_list.sort(key= int)
for sym in self.scan_list:
self.queue.put(sym)
def scan(func, params_dict):
while not self.queue.empty():
sym = self.queue.get()
self.logger.debug('Scanning {}'.format(sym))
s = self.memory_db[sym]
if func(s, **params_dict):
true_hit_list.append(sym)
steps_in_order_dic.update({sym: True})
if name in s.scanner_meta_data:
steps_in_order_dic.update({sym: s.scanner_meta_data[name]})
for name, func_tuple in list(self.condition_list.items()):
self.logger.info('Scanning cond: {} with {} stocks.'.format(name, self.queue.qsize()))
# condition
func = func_tuple[0]
params_dict = func_tuple[1]
# loop tracker
steps_in_order_dic = self.last_scan_result['steps'][name] = {} # last dic
true_hit_list = []
# t1 = Thread(target= scan, name='T1', args=(func, params_dict))
# t2 = Thread(target= scan, name='T2', args=(func, params_dict))
# t3 = Thread(target= scan, name='T3', args=(func, params_dict))
# t4 = Thread(target= scan, name='T4', args=(func, params_dict))
#
# t1.start()
# t2.start()
# t3.start()
# t4.start()
#
# t1.join()
# t2.join()
# t3.join()
# t4.join()
NUM_THREADS = 4
threads = [Thread(target= scan, args=(func, params_dict)) for i in range(NUM_THREADS)] # Create 2 threads
list([th.start() for th in threads]) # Make all threads start their activities
list([th.join() for th in threads]) # block until all threads are terminated
if self.cond_apply_method == 'recursive':
for sym in true_hit_list:
self.queue.put(sym)
else:
# Put scan list back to queue
self.scan_list.sort(key= int)
for sym in self.scan_list:
self.queue.put(sym)
# Last scan result
self.last_scan_result['result'] = true_hit_list
# ---------- Finish ----------
# Write to TSCI, so we can view the scan result in tsci
if self.cond_apply_method == 'recursive':
ba.sm.tsci_write_symlist_to_file('scanner_result', self.scan_list)
self.logger.info('>>> Scan complete. Result: %s' % len(self.scan_list))
else:
self.logger.info('>>> Scan complete.')
return self.last_scan_result
@timeit
def run_groups_scan(self):
result = {}
if self.scan_list_groups:
for gp_name, gp_symlist in self.scan_list_groups.items():
self.setsymlist(gp_symlist)
result[gp_name] = self.run()
self.last_scan_result = result
return result
def result(self):
# or simply the list(list(self.last_scan_result.items())[-1][1].keys())
return self.scan_list
def setsymlist(self, symlist: list):
self.scan_list = symlist
self.loaded_full_list = symlist
def addsymlist(self, symlist: list):
self.scan_list = list(set(self.scan_list + symlist))
self.loaded_full_list = copy(self.scan_list)
def addgroups(self, symlist_groups: dict):
self.scan_list_groups = symlist_groups
def addfilter(self, and_or_not, df):
"""
'and' & df
'not' & df
"""
if and_or_not == 'and':
self.filters_and.append(self.flt.get_the_symlist(df))
elif and_or_not == 'not':
self.filters_not.append(self.flt.get_the_symlist(df))
def addcondition(self, condition, **kwargs):
self.condition_list[condition.__name__] = (condition, kwargs)
# self.condition_list.append(s: MemoryDict)
def make(self, symlist):
for s in symlist:
yield s
def make_queue(self, symlist):
for sym in symlist:
queue.put(sym)
return queue
def start_scan_hub(self):
print("Scanner mode activated.")
for i, scanner in enumerate(self.scanner_list):
if i == 0:
self.scan_symlist = self.symlist
print('Start Scanning the %s criteria in %s stocks: %s' % (i, len(self.scan_symlist), scanner.name))
scanner.symlist = self.scan_symlist
result = scanner.start_scan()
print('There is %s in result' % len(result))
print()
self.scan_symlist = result
self.result = self.scan_symlist
print('Here is the result: ', len(self.result))
def today_have_signal(self, algo, symlist='', dayback=0):
if not symlist:
symlist = self.fr.getCleanlist()
print(symlist)
sig_list = []
print('Scanning signal...')
for sym in symlist:
# print sym
d = algo(self.tc.get_bar_ex_eod_db(sym))
# print self.tradeday
try:
bool_ = d['Signal'].ix[self.tradeday]
except KeyError:
continue
if bool_:
sig_list.append(sym)
print('Done.')
return sig_list
# ------ Auxiliary related ------
def auxiliary_ground_rule(s: MemoryDict) -> bool:
"""
9日平均成交額要大於3百萬
成交量比前日大9倍
股價必須大於2毫
股價必須在 ma200 之上
Args:
s ():
Returns:
"""
daybar = s.daybar[-30:]
total_turn_day = 9
if len(daybar) < 9:
total_turn_day = len(daybar)
nine_day_turnover = ta.EMA(daybar.Turnover.values, total_turn_day)
cond1 = nine_day_turnover[-1] >= 3000000 # 9日平均成交額要大於3百萬 或 (IPO) 平均成交大於3百萬
cond2 = (daybar.Vol.iloc[-1] / daybar.Vol.iloc[-2]) > 9 # 成交量比前日大9倍
cond3 = daybar.Close.iloc[-1] >= 0.20 # 股價必須大於2毫
# cond4 = # 股價必須在 ma200 之上
if (cond1 | cond2) & cond3:
return True
return False
def filter_production(s: MemoryDict, target_chgp: float) -> bool:
"""
升幅% 大於 某個數值
"""
if s.daybar_chgp > target_chgp:
return True
return False
# ------ Momentum related ------
def price_up_vol_up(s: MemoryDict, in_row=1):
return last_signal_bool(ba.algo.Signal_p_up_vol_up(s.daybar, in_row))
def rainbow(s: MemoryDict, in_row=1):
return last_signal_bool(ba.algo.Signal_MAHL_x_day_up(s.daybar, in_row))
def moving_average_condition(s: MemoryDict):
"""
快線 高於 慢線
"""
def __init__(self, *args):
self.ma_period = args
def data(self, sym):
return ba.tc.get_bar_ex_eod_db(sym)
def logic(self, sym):
print('Moving Average (fast > slow) :', sym)
ma_fast_p = self.ma_period[0][0]['fast_period']
ma_slow_p = self.ma_period[0][0]['slow_period']
ba.algo.set_data(self.data(sym))
ba.algo.cmd_ema(ma_fast_p)
ba.algo.cmd_ema(ma_slow_p)
f = ba.algo.data['ema%s' % ma_fast_p].iloc[-1]
s = ba.algo.data['ema%s' % ma_slow_p].iloc[-1]
if f > s:
result.append((sym, f, s))
return True
def moving_average_golden_cross_condition(s: MemoryDict):
"""
黃金交叉
"""
def __init__(self, *args):
self.ma_period = args
def data(self, sym):
return ba.tc.get_bar_ex_eod_db(sym)
def logic(self, sym):
print('Moving Average Golden Cross :', sym)
ma_fast_p = self.ma_period[0][0]['fast_period']
ma_slow_p = self.ma_period[0][0]['slow_period']
ba.algo.set_data(self.data(sym))
ba.algo.cmd_ema(ma_fast_p)
ba.algo.cmd_ema(ma_slow_p)
f0 = ba.algo.data['ema%s' % ma_fast_p].iloc[-1]
s0 = ba.algo.data['ema%s' % ma_slow_p].iloc[-1]
f1 = ba.algo.data['ema%s' % ma_fast_p].iloc[-2]
s1 = ba.algo.data['ema%s' % ma_slow_p].iloc[-2]
if f0 > s0 and f1 < s1:
result.append((sym, f0, s0))
return True
def moving_average_death_cross_condition(s: MemoryDict):
"""
死亡交叉
"""
def __init__(self, *args):
self.ma_period = args
def data(self, sym):
return ba.tc.get_bar_ex_eod_db(sym)
def logic(self, sym):
print('Moving Average Death Cross :', sym)
ma_fast_p = self.ma_period[0][0]['fast_period']
ma_slow_p = self.ma_period[0][0]['slow_period']
ba.algo.set_data(self.data(sym))
ba.algo.cmd_ema(ma_fast_p)
ba.algo.cmd_ema(ma_slow_p)
f0 = ba.algo.data['ema%s' % ma_fast_p].iloc[-1]
s0 = ba.algo.data['ema%s' % ma_slow_p].iloc[-1]
f1 = ba.algo.data['ema%s' % ma_fast_p].iloc[-2]
s1 = ba.algo.data['ema%s' % ma_slow_p].iloc[-2]
if f0 < s0 and f1 > s1:
result.append((sym, f0, s0))
return True
def triple_ma_blazer(s: MemoryDict, *args) -> bool:
"""
兩 set ma 交叉
"""
g1 = (10, 20, 50)
match = False
d = ba.typical_ema_groups(s.daybar)
if d['ema10'].iloc[-1] > d['ema10'].iloc[-2] > d['ema10'].iloc[-3]:
if d['ema20'].iloc[-1] > d['ema20'].iloc[-2] > d['ema20'].iloc[-3]:
if d['ema50'].iloc[-1] > d['ema50'].iloc[-2] > d['ema50'].iloc[-3]:
match = True
return match
# ------ Demand related ------
def sudden_vol_blowing(s: MemoryDict):
"""
# 今日放量上升 (無消息, 無故大升)
今日升 加上 放量倍數
但是成交額都要跟得上, 起碼要1, 2千萬
"""
d = s.daybar
ratio = d.Vol.iat[-1] / d.Vol.iat[-2]
match = False
if ratio > self.times:
try:
turnover = ba.etnet.get_by_sym_sql(sym, exact_date=ba.dk.tradeday())['Turnover'].values[0]
if turnover >= self.turnover:
result.append((sym, ratio))
match = True
except:
pass
return match
def high_volatility_stock(s: MemoryDict):
"""
ATR 對比 Close 大於 0.05 及
ATR 對比 每格跳動 大於 30
"""
match = False
ba.algo.set_data(s.daybar)
ba.algo.cmd_atr(10)
d = ba.algo.data
d['p'] = d.ATR / d.Close
from BATrader.market.MarketReader import pricespread
if d.p.iloc[-1] >= 0.05:
if d.ATR.iloc[-1] / pricespread(d.Close.iloc[-1]) > 30:
s.scanner_meta_data[inspect.stack()[0][3]] = (d.ATR.iloc[-1], # ATR
d.ATR.iloc[-1] / pricespread(d.Close.iloc[-1]), # ATR / tick
d.p.iloc[-1]) # ATR / Close]
match = True
return match
def unknown_func_last(s: MemoryDict):
# Sector ond day momentum index
sector = ba.etnet.etnet_concept_dic['內房股']
# house_list = sm.fr.which_group(sector)
etnet.get_by_sym_sql('1313', col_lst=['PE'])
dic = etnet.etnet_concept_dic
for sector, symlist in list(dic.items()):
print(sector)
# path = self.path_input + 'sector.txt'
# with open(path,'a') as f:
# f.write(msg + '\n')
# 1 loops, best of 3: 37 s per loop
ignore_sector = ['農業股']
dayback = 1
result_dic = {}
df_dic = {}
single_stock_df = pd.DataFrame()
for chi_name, eng_name in list(etnet.etnet_concept_dic.items()):
# print eng_name
group = fr.which_group(eng_name)
print(chi_name, eng_name, group)
# Lv2 direct 做法
# d = rtq.get_aa_bunch_df(group, detail='lv2', col = ['Close','pClose'])
# d.Close = d.Close.astype('float')
# d.pClose = d.pClose.astype('float')
# Lv1 + DB 做法
d = rtq.get_aa_bunch_df(group, detail='lv1', col=['Close'])
t = tc.get_bar_ex_eod_db(group, date=dk.tradeday(dayback), col=['Close'])
t = t.reset_index().set_index('sym')[['Close']]
t.columns = ['pClose']
d = pd.concat([d, t], axis=1, sort=False)
d.Close = d.Close.astype('float')
d = d.dropna()
df_dic[eng_name] = d
d['SignleStockChgp'] = d.Close / d.pClose
single_stock_df = single_stock_df.append(d)
result_dic[chi_name] = d.Close.sum() / d.pClose.sum()
r = {}
for group, value in list(result_dic.items()):
r[group] = (value - 1) * 100
top20 = single_stock_df.sort_values('SignleStockChgp', ascending=False)[:20]
df = pd.DataFrame(list(r.items()), columns=['Group', 'Value'])
df.set_index('Group')
top20_sector = pd.concat(
[df.sort_values('Value', ascending=False)[:10], df.sort_values('Value', ascending=False)[-10:]], sort=False)
print(top20)
print(top20_sector)
single_stock_df.sort_values('SignleStockChgp', ascending=False)[:20]
for chi_name, eng_name in list(etnet.etnet_concept_dic.items()):
# print eng_name
group = fr.which_group(eng_name)
print(eng_name, group)
d = rtq.get_aa_bunch_df(group, detail='lv1', col=['Close'])
t = tc.get_bar_ex_eod_db(group, date='20170524', col='Close')
t = t.reset_index()
t = t.set_index('sym')[['Close']]
t.columns = ['pClose']
d = pd.concat([d, t], axis=1, sort=False)
sym = 2018
from etnet import Etnet
etnet = Etnet()
etnet.get_tick(str(sym))[-20:]
def tick_is_over_5day_average(s: MemoryDict):
def logic(self, sym):
print('Tick is over 5days Average :', sym)
df = ba.etnet.get_by_sym_sql(sym, col_lst=['tick'])
import talib as ta
df['ma5'] = ta.EMA(df['tick'].astype('float').values, 5)
if df.ma5.iloc[-1] > df.ma5.iloc[-2]:
return True
def inactive_and_thin(s: MemoryDict):
"""
if last 5 days average trading amount is less than 1m, eliminate
col name : inactive_or_thin
"""
s.load_details(col_lst = ['tick','Turnover'], dayback= 11)
data = s.details
win = 5
data['AvgT'] = data.Turnover.apply(lambda x: convert(x))
data['AvgTick'] = data['tick'].apply(lambda x: convert(x)).rolling(window= win).mean()
# if average turnover less than 1M and average Tick is less than 100 , kick out
if data.AvgT[-1] <= 1000000 or data.AvgTick[-1] <= 100:
return True
return False
def low_liquidity(s: MemoryDict):
"""
一隻股票在最新一天入面, 有超過5分鐘都冇成交, 代表liqudity低下, 或可剔除
"""
s.load_min_bar('5') # It's simple mode
lastday = s.data5min.reset_index()["Datetime"].map(lambda t: t.date().strftime('%Y%m%d')).unique()[-1]
# ba.algo.make_min_bar(s.data1min.last('1D'), '5Min')
# 壓縮成五分鐘燭 resample to 5 mins, 去掉唔要的時段
data5m = s.data5min[lastday]
# morning_session = data5m.between_time(start_time='09:30:00', end_time='11:59:00', include_end=True)
# afternoon_session = data5m.between_time(start_time='13:00:00', end_time='15:59:00', include_end=True)
# data5m = morning_session.append(afternoon_session)
# 超過5分鐘都冇成交, 代表liqudity低下, 或可剔除
# Adding half day filter : afternoon bar equal all zero bar
#if len(afternoon_session) == len(afternoon_session.loc[afternoon_session.Vol == 0.0]):
#if len(morning_session.loc[morning_session.Vol == 0.0]) > 2:
#return True
if len(data5m.loc[data5m.Vol == 0.0]) > 2:
return True
return False
# ------ Intraday related ------
def five_minutes_three_in_row(s: MemoryDict) -> bool:
"""
Signal_3_in_row
Args:
s ():
Returns:
"""
if int(s.data1min.index[-1].strftime('%M')) % 5 != 0:
return False
s.load_min_bar('5')
if last_signal_bool(ba.algo.Signal_3_in_row(s.data5min)):
return True
return False
def thirty_minues_blow_bbands(s: MemoryDict) -> bool:
if int(s.data1min.index[-1].strftime('%M')) % 30 != 0:
return False
#ba.algo.set_data(ba.algo.make_min_bar(s.data1min, '30Min'))
s.load_min_bar('30')
if last_signal_bool(ba.algo.Signal_Blow_BBands(s.data30min)):
return True
return False
# ------ Swing Trade related ------
def is_today_t3b(s: MemoryDict):
"""
# T3B2
ba.algo.Signal_T3B2()
generate_algo_msg('T3B2', ba.algo.data['Signal_T3B2'])
"""
return last_signal_bool(ba.algo.Signal_T3B2(s.daybar))
def is_today_break_atr(s: MemoryDict):
"""
# BreakATR
ba.algo.Signal_BreakATR()
generate_algo_msg('BreakATR', ba.algo.data['Signal_BreakATR'])
"""
return last_signal_bool(ba.algo.Signal_BreakATR(s.daybar))
def is_today_over_ma_60_200(s: MemoryDict):
"""
# >ma60/200
ba.algo.Signal_Penetrate_ma60_ma200()
generate_algo_msg('>ma60/200', ba.algo.data['Signal_penetrate_ma60_ma200'])
"""
return last_signal_bool(ba.algo.Signal_Penetrate_ma60_ma200(s.daybar))
def is_today_break_bb_daybar(s: MemoryDict):
"""
# breakBB_daybar
ba.algo.Signal_Blow_BBands()
generate_algo_msg('blowBB_daybar', ba.algo.data['Signal_Blow_BBands'])
"""
return last_signal_bool(ba.algo.Signal_Blow_BBands(s.daybar))
# ------ Week related ------
def last_week_down_x_pct(s: MemoryDict, pct):
"""
一個星期完結, 跌到 X % 以上
燭身不能是長下影線
"""
d = ba.algo.make_week_bar(s.daybar)
d = chgp(d)
if d['Chgp'].iloc[-1] < -pct:
s.scanner_meta_data[inspect.stack()[0][3]] = d['Chgp'].iloc[-1]
return True
return False
def last_week_up_x_pct(s: MemoryDict, pct):
"""
一個星期完結, 升到 X % 以上
燭身不能是長上影線
"""
d = ba.algo.make_week_bar(s.daybar)
d = chgp(d)
from BATrader.algo.algo import candle_wick
d = candle_wick(d)
if d['Chgp'].iloc[-1] > pct:
if d.Wick.iloc[-1] != 1:
s.scanner_meta_data[inspect.stack()[0][3]] = d['Chgp'].iloc[-1]
return True
return False
def scan_stock_relative_strong_weak():
"""
# 大市走資比例, 如大市
# 穿平均線比例 ( 10 , 20 , 60)
"""
df[df.pClose > df.ma60]
# ------ Fundamental related ------
def pe_is_below(s: MemoryDict, pe=25):
s.get_pe(s.sym)
match = False
if s.PE < pe:
match = True
return match
def ccass_trade_reach_certain_level_of_total_shares_condition(s: MemoryDict):
def data(self, sym):
day = ba.dk.tradeday()
print(day)
dic = ba.ccass.CCASS_flower_paper(sym, day)
vol = ba.tc.get_bar_ex_eod_db(sym).loc[day]['Vol'] # e.g. 1853028.0
dic.update({'vol': vol})
return dic
def logic(self, sym):
print('CCASS condition for :', sym)
d = self.data(sym)
cond1 = d['top10%'] > 80 # 集中度大於80
cond2 = divided_and_show_pct(d['total_in_ccass'], d['total_issued_shares']) < 30 # 在CCASS流通的股數非常少
cond3 = divided_and_show_pct(d['vol'], d['total_in_ccass']) > 3 # 當日交易股數非常多
if cond1 or cond2 or cond3:
result.append((sym, d['top10%'],
divided_and_show_pct(d['total_in_ccass'], d['total_issued_shares']),
divided_and_show_pct(d['vol'], d['total_in_ccass'])))
return True
def is_today_hkexnews_has_news(s: MemoryDict):
s.load_hkexnews()
# ------ MMI related ------
def typical_mmi(s: MemoryDict):
"""
(成交金融必須大於5百萬) 和 (Vol大於pVol 5倍) 和 ( A盤大於昨日A盤1.2倍 / 主動買入大於60 )
( A >= 55 和 A大過昨日 ) 和 (量 佔 昨天量比大於0.7) 和 (額 佔昨天額比大於0.7) 和 ( 額大於500萬 ) 或
(額 大於 昨天額 2倍) 和 (A大於60) 和 (額大於9百萬) 或
(額 大於 昨天額 6倍) 和 (額連續三日上升)
(量 額 A 都大過昨日) 或
(( d.Vol > d.Vol.shift(1)) & (d.Amount > d.Amount.shift(1)) & (d.A > d.A.shift(1))) |\
"""
s.load_mmi(dayback= 10)
return last_signal_bool(ba.algo.Signal_mmi(s.mmi))
if __name__ == '__main__':
flt = Filters()
# sym = '2331'
# md = MemoryDict()
# s = md[sym]
# match = typical_mmi(s)
# self.run_mode = {0: 'SingleThread', 1: 'Threading'}.get(running_mode)
# self.cond_apply_method = {0: 'recursive', 1: 'independent'} # Recursive is one-and-one
scanner = Scanner(1, 1, dayback=30)
# scanner.addgroups({'內銀股': ['939','1398','2628','3988']})
# scanner.addcondition(last_week_up_x_pct, pct=5)
# scanner.addcondition(last_week_down_x_pct, pct=5)
# scanner.run_groups_scan()
scanner.addsymlist(ba.fr.get_main_gem())
# scanner.addfilter('and', filter_by_over_ma_60())
# scanner.addfilter('and', flt.filter_by_amount(10000000.0))
# scanner.addfilter('and', flt.filter_by_over_ma(60))
# scanner.addfilter('not', filter_ot_list())
scanner.addcondition(inactive_and_thin)
scanner.addcondition(low_liquidity)
result = scanner.run()
```
|
{
"source": "jgardezi/Eurodollars",
"score": 2
}
|
#### File: Eurodollars/src/algorithm.py
```python
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
from datetime import datetime
import decimal
import threading
from enum import Enum, auto
class Position(Enum):
"""Enum defining either a long position or short position."""
LONG = auto()
SHORT = auto()
class EURUSDForexAlgo(QCAlgorithm):
"""QuantConnect Algorithm Class for trading the EURUSD forex pair."""
# symbol of the forex pair: European Euros and US Dollars
SYMBOL = "EURUSD"
# number of periods where the fast moving average is
# above or below the slow moving average before
# a trend is confirmed
HOURLY_TREND_PERIODS = 17
DAILY_TREND_PERIODS = 4
# limit for the number of trades per trend
TREND_LIMIT_NUM_TRADES = 3
# maximum holdings for each market direction
MAX_HOLDING_ONE_DIRECTION = 1
# units of currency for each trade
TRADE_SIZE = 5000
# take-proft and stop-loss offsets.
TP_OFFSET = decimal.Decimal(0.0007)
SL_OFFSET = decimal.Decimal(0.0017)
# stochastic indicator levels for overbought and oversold
STOCH_OVERBOUGHT_LEVEL = 80
STOCH_OVERSOLD_LEVEL = 20
# dictionary to keep track of associated take-profit and
# stop-loss orders
associatedOrders = {}
# concurrency control for the dictionary
associatedOrdersLock = threading.Lock()
def Initialize(self):
"""Method called to initialize the trading algorithm."""
# backtest testing range
self.SetStartDate(2008, 1, 1)
self.SetEndDate(2018, 12, 25)
# amount of cash to use for backtest
self.SetCash(1000)
# forex pair object
self.forexPair = self.AddForex(self.SYMBOL, Resolution.Hour)
# brokerage model dictates the costs, slippage model, and fees
# associated with the broker
self.SetBrokerageModel(BrokerageName.FxcmBrokerage)
# define a slow and fast moving average indicator
# slow moving average indicator: 200 periods
# fast moving average indicator: 50 periods
# these indicator objects are automatically updated
self.hourlySlowSMA = self.SMA(self.forexPair.Symbol, 200, Resolution.Hour)
self.hourlyFastSMA = self.SMA(self.forexPair.Symbol, 50, Resolution.Hour)
# define a pair of moving averages in order to confirm an
# alignment trend in the daily charts
# If both the hourly trend (using the 2 hourly SMAs above) and daily
# trend show the same trend direction,
# then the trend is a strong trend
self.dailySlowSMA = self.SMA(self.forexPair.Symbol, 21, Resolution.Daily)
self.dailyFastSMA = self.SMA(self.forexPair.Symbol, 7, Resolution.Daily)
# counters defining the number of periods of the ongoing trend
# (both the main hourly trend and the alignment daily trend)
self.hourlySMATrend = 0
self.dailySMATrend = 0
# number of trades executed in this trend
self.trendNumTrades = 0
# stochastic indicator
# stochastic period: 9
# stochastic k period: 9
# stochastic d period: 5
self.stoch = self.STO(self.forexPair.Symbol, 9, 9, 5, Resolution.Hour)
# keeps track of overbought/oversold conditions in the previous period
self.previousIsOverbought = None
self.previousIsOversold = None
# keeps track of the time of the previous period
self.previousTime = self.Time
def OnData(self, data):
"""Method called when new data is ready for each period."""
# only trade when the indicators are ready
if not self.hourlySlowSMA.IsReady or not self.hourlyFastSMA.IsReady or not self.stoch.IsReady:
return None
# trade only once per period
if self.previousTime.time().hour == self.Time.time().hour:
return None
self.periodPreUpdateStats()
price = data[self.forexPair.Symbol].Close
# if it is suitable to go long during this period
if (self.entrySuitability() == Position.LONG):
self.enterMarketOrderPosition(
symbol=self.forexPair.Symbol,
position=Position.LONG,
posSize=self.TRADE_SIZE,
tp=round(price + self.TP_OFFSET, 4),
sl=round(price - self.SL_OFFSET, 4))
# it is suitable to go short during this period
elif (self.entrySuitability() == Position.SHORT):
self.enterMarketOrderPosition(
symbol=self.forexPair.Symbol,
position=Position.SHORT,
posSize=self.TRADE_SIZE,
tp=round(price - self.TP_OFFSET, 4),
sl=round(price + self.SL_OFFSET, 4))
self.periodPostUpdateStats()
def entrySuitability(self):
"""Determines the suitability of entering a position for the current period.
Returns either Position.LONG, Position.SHORT, or None"""
# units of currency that the bot currently holds
holdings = self.Portfolio[self.forexPair.Symbol].Quantity
# conditions for going long (buying)
if (
# uptrend for a certain number of periods in both
# the main hourly trend and alignment daily trend
self.dailySMATrend >= self.DAILY_TREND_PERIODS and
self.hourlySMATrend >= self.HOURLY_TREND_PERIODS and
# if it is not oversold
self.stoch.StochD.Current.Value > self.STOCH_OVERSOLD_LEVEL and
# if it just recently stopped being oversold
self.previousIsOversold is not None and
self.previousIsOversold == True and
# if holdings does not exceed the limit for a direction
holdings < self.MAX_HOLDING_ONE_DIRECTION and
# if number of trades during this trend does not exceed
# the number of trades per trend
self.trendNumTrades < self.TREND_LIMIT_NUM_TRADES
):
return Position.LONG
# conditions for going short (selling)
elif (
# downtrend for a certain number of periods in both
# the main hourly trend and alignment daily trend
self.dailySMATrend <= -self.DAILY_TREND_PERIODS and
self.hourlySMATrend <= -self.HOURLY_TREND_PERIODS and
# if it is not overbought
self.stoch.StochD.Current.Value < self.STOCH_OVERBOUGHT_LEVEL and
# if it just recently stopped being overbought
self.previousIsOverbought is not None and
self.previousIsOverbought == True and
# if holdings does not exceed the limit for a direction
holdings > -self.MAX_HOLDING_ONE_DIRECTION and
# if number of trades during this trend does not exceed
# the number of trades per trend
self.trendNumTrades < self.TREND_LIMIT_NUM_TRADES
):
return Position.SHORT
# unsuitable to enter a position for now
return None
def periodPreUpdateStats(self):
"""Method called before considering trades for each period."""
# since this class's OnData() method is being called in each new
# hourly period, the daily stats should only be updated if
# the current date is different from the date of the previous
# invocation
if self.previousTime.date() != self.Time.date():
# uptrend: if the fast moving average is above the slow moving average
if self.dailyFastSMA.Current.Value > self.dailySlowSMA.Current.Value:
if self.dailySMATrend < 0:
self.dailySMATrend = 0
self.dailySMATrend += 1
# downtrend: if the fast moving average is below the slow moving average
elif self.dailyFastSMA.Current.Value < self.dailySlowSMA.Current.Value:
if self.dailySMATrend > 0:
self.dailySMATrend = 0
self.dailySMATrend -= 1
# uptrend: if the fast moving average is above the slow moving average
if self.hourlyFastSMA.Current.Value > self.hourlySlowSMA.Current.Value:
if self.hourlySMATrend < 0:
self.hourlySMATrend = 0
self.trendNumTrades = 0
self.hourlySMATrend += 1
# downtrend: if the fast moving average is below the slow moving average
elif self.hourlyFastSMA.Current.Value < self.hourlySlowSMA.Current.Value:
if self.hourlySMATrend > 0:
self.hourlySMATrend = 0
self.trendNumTrades = 0
self.hourlySMATrend -= 1
def periodPostUpdateStats(self):
"""Method called after considering trades for each period."""
if self.stoch.StochD.Current.Value <= self.STOCH_OVERSOLD_LEVEL:
self.previousIsOversold = True
else:
self.previousIsOversold = False
if self.stoch.StochD.Current.Value >= self.STOCH_OVERBOUGHT_LEVEL:
self.previousIsOverbought = True
else:
self.previousIsOverbought = False
self.previousTime = self.Time
def enterMarketOrderPosition(self, symbol, position, posSize, tp, sl):
"""Enter a position (either Position.LONG or Position.Short)
for the given symbol with the position size using a market order.
Associated take-profit (tp) and stop-loss (sl) orders are entered."""
self.associatedOrdersLock.acquire()
if position == Position.LONG:
self.Buy(symbol, posSize)
takeProfitOrderTicket = self.LimitOrder(symbol, -posSize, tp)
stopLossOrderTicket = self.StopMarketOrder(symbol, -posSize, sl)
elif position == Position.SHORT:
self.Sell(symbol, posSize)
takeProfitOrderTicket = self.LimitOrder(symbol, posSize, tp)
stopLossOrderTicket = self.StopMarketOrder(symbol, posSize, sl)
# associate the take-profit and stop-loss orders with one another
self.associatedOrders[takeProfitOrderTicket.OrderId] = stopLossOrderTicket
self.associatedOrders[stopLossOrderTicket.OrderId] = takeProfitOrderTicket
self.associatedOrdersLock.release()
self.trendNumTrades += 1
def OnOrderEvent(self, orderEvent):
"""Method called when an order has an event."""
# if the event associated with the order is about an
# order being fully filled
if orderEvent.Status == OrderStatus.Filled:
order = self.Transactions.GetOrderById(orderEvent.OrderId)
# if the order is a take-profit or stop-loss order
if order.Type == OrderType.Limit or order.Type == OrderType.StopMarket:
self.associatedOrdersLock.acquire()
# during volatile markets, the associated order and
# this order may have been triggered in quick
# succession, so this method is called twice
# with this order and the associated order.
# this prevents a runtime error in this case.
if order.Id not in self.associatedOrders:
self.associatedOrdersLock.release()
return
# obtain the associated order and cancel it.
associatedOrder = self.associatedOrders[order.Id]
associatedOrder.Cancel()
# remove the entries of this order and its
# associated order from the hash table.
del self.associatedOrders[order.Id]
del self.associatedOrders[associatedOrder.OrderId]
self.associatedOrdersLock.release()
def OnEndOfAlgorithm(self):
"""Method called when the algorithm terminates."""
# liquidate all holdings (all unrealized profits/losses will be realized).
# long and short positions are closed irrespective of profits/losses.
self.Liquidate(self.forexPair.Symbol)
```
|
{
"source": "jgarr16/Sublime-SwapBullets",
"score": 3
}
|
#### File: jgarr16/Sublime-SwapBullets/SwapStrings.py
```python
import sublime, sublime_plugin
class SwapBulletsCommand(sublime_plugin.TextCommand):
def run(self, edit, bulletA=None, bulletB=None):
if not bulletA and not bulletB:
inputView = sublime.active_window().show_input_panel(
"Specify the bullets which shall be swapped. <> functions as a separator.",
"\"<>'",
self.onConfirm,
None,
None
)
inputView.run_command("select_all")
else:
view = self.view
selection = view.sel()
bulletA, bulletB = self.ensureOrder(bulletA, bulletB)
for region in selection:
if region.a == region.b:
# use entire line if regions is only a point
region = view.line(region)
regionStr = view.substr(region)
if bulletB == "":
regionStr = regionStr.replace(bulletA, "")
else:
swapToken = self.generateSwapToken(regionStr, bulletA, bulletB)
regionStr = regionStr \
.replace(bulletA, swapToken) \
.replace(bulletB, bulletA) \
.replace(swapToken, bulletB)
view.replace(edit, region, regionStr)
def ensureOrder(self, bulletA, bulletB):
# ensures that len(bulletA) >= len(bulletB)
# this is important for the edge case in which bulletA is a substring of bulletB
if len(bulletB) > len(bulletA):
bulletA, bulletB = bulletB, bulletA
return bulletA, bulletB
def generateSwapToken(self, regionStr, bulletA, bulletB):
# requirements derived by the three replacements:
# 1: uncritical since bulletA >= bulletB.
# 2: bulletB must not be in swapToken.
# 3: swapToken must not be in bulletA and not in regionStr.
# mind that bulletA is not necessarily a substring of regionStr.
# choose swapToken so that bulletB cannot be in swapToken
swapToken = bulletB[:-1]
while swapToken in bulletA + regionStr:
# extend swapToken with a character so that it isn't bulletB
swapToken += chr(ord(bulletB[-1]) + 1 % 255)
return swapToken
def onConfirm(self, swapBullet):
if "<>" not in swapBullet:
sublime.status_message("No <> was found for swapping bullet.")
return
(a, b) = swapBullet.split("<>")
self.view.run_command("swap_bullets", dict(bulletA=a, bulletB=b))
```
|
{
"source": "jgarst37/Projects",
"score": 4
}
|
#### File: Projects/Solutions/fibonacci.py
```python
def fibonacci(num):
"""Returns a list of the first num members of the Fibonacci sequence
Args:
num: number of members to return
"""
sequence = [0, 1]
if num <= 2:
return sequence[0:num]
c = 0
while len(sequence) < num:
sequence.append(sequence[c] + sequence[c + 1])
c += 1
return sequence
def print_fibonacci(num):
"""Prints the first [num] Fibonacci numbers
Args:
num: number of Fibonacci numbers to return
"""
print(fibonacci(num))
if __name__ == "__main__":
"""Prompts the user for an integer n between 0 and 1000 and prints a list of the first n numbers in the Fibonacci sequence"""
while True:
num = input("Please enter how many Fibonacci numbers you want:")
if num.isdigit() and int(num) <= 1000:
num = int(num)
print_fibonacci(num)
break
print("Please enter an integer between 0 and 1000.")
```
#### File: Projects/Solutions/next_prime.py
```python
from prime_factors import get_primes
import re
def get_next_prime(num):
"""Returns the prime number after [num]
Args:
num: prime number before the one to be returned
"""
if num == 0:
return 2
elif num == 2:
return 3
primes = get_primes(num)
c = 2
is_prime = False
while is_prime == False:
next = num + c
is_prime = True
for prime in primes:
if next % prime == 0:
is_prime = False
c +=2
if is_prime:
return next
if __name__ == "__main__":
"""Prompts the user if they want another prime"""
current = 0
while True:
response = input("Do you want a prime number (y/n)?")
if re.match('[yY][eE]?[sS]?', response):
current = get_next_prime(current)
print(current)
else:
print("Okay! I'll stop.")
break
```
|
{
"source": "jgarte/furl",
"score": 2
}
|
#### File: jgarte/furl/setup.py
```python
import os
import sys
from os.path import dirname, join as pjoin
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
meta = {}
with open(pjoin('furl', '__version__.py')) as f:
exec(f.read(), meta)
class Publish(Command):
"""Publish to PyPI with twine."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('python setup.py sdist bdist_wheel')
sdist = 'dist/furl-%s.tar.gz' % meta['__version__']
wheel = 'dist/furl-%s-py2.py3-none-any.whl' % meta['__version__']
rc = os.system('twine upload "%s" "%s"' % (sdist, wheel))
sys.exit(rc)
class RunTests(TestCommand):
"""
Run the unit tests.
To test all supported Python versions (as specified in tox.ini) in
parallel, run
$ tox -p
By default, `python setup.py test` fails if tests/ isn't a Python
module; i.e. if the tests/ directory doesn't contain an __init__.py
file). But the tests/ directory shouldn't contain an __init__.py
file and tests/ shouldn't be a Python module. See
http://doc.pytest.org/en/latest/goodpractices.html
Running the unit tests manually here enables `python setup.py test`
without tests/ being a Python module.
"""
def run_tests(self):
from unittest import TestLoader, TextTestRunner
tests_dir = pjoin(dirname(__file__), 'tests/')
suite = TestLoader().discover(tests_dir)
result = TextTestRunner().run(suite)
sys.exit(0 if result.wasSuccessful() else -1)
setup(
name=meta['__title__'],
license=meta['__license__'],
version=meta['__version__'],
author=meta['__author__'],
author_email=meta['__contact__'],
url=meta['__url__'],
description=meta['__description__'],
long_description=(
'Information and documentation can be found at ' + meta['__url__']),
packages=find_packages(),
include_package_data=True,
platforms=['any'],
classifiers=[
'License :: Public Domain',
'Natural Language :: English',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: CPython',
],
tests_require=[
'flake8',
'six>=1.8.0',
],
install_requires=[
'six>=1.8.0',
'orderedmultidict>=1.0.1',
],
cmdclass={
'test': RunTests,
'publish': Publish,
},
)
```
|
{
"source": "jgarte/listools",
"score": 4
}
|
#### File: listools/flatools/flatten_reverse.py
```python
from .flatten import flatten
def flatten_reverse(input_list: list) -> list:
r"""flatools.flatten_reverse(input_list)
Completely flattens a list containing any number of nested subslists into a
reversed one dimensional list. Usage:
>>> alist = [[1, 4], [5, 7], [2], [9, 6, 10], [8, 3]]
>>> flatools.flatten_reverse(alist)
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
>>> alist = [1, 5, [3, [2, 4]]]
>>> flatools.flatten_reverse(alist)
[5, 4, 3, 2, 1]
The list can also be made out of floats:
>>> alist = [[1.73, -3.14, 9.41], [5.56, -1.03]]
>>> flatools.flatten_reverse(alist)
[9.41, 5.56, 1.73, -1.03, -3.14]
Or it can be made out of a mixture of integers and floats:
>>> alist = [[3, 1.4], [5, 7.8], [-3.1, 6.6]]
>>> flatools.flatten_reverse(alist)
[7.8, 6.6, 5, 3, 1.4, -3.1]
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
return sorted(flatten(input_list), reverse=True)
```
#### File: listools/flatools/flatten_single_type.py
```python
from .flatten import flatten
def flatten_single_type(input_list: list) -> bool:
r"""flatools.flatten_single_type(input_list)
Returns True if all elements of the flattened input_list are of the same
type and False if they are not. Usage:
>>> alist = [[1, 4], [5, 7], [2], [9, 6, 10], [8, 3]]
>>> flatools.flatten_single_type(alist)
True
>>> alist = [3, 4, [1, [5, 2]]]
>>> flatools.flatten_single_type(alist)
True
>>> alist = [[1.73, -3.14, 9.41], [5.56, -1.03]]
>>> flatools.flatten_single_type(alist)
True
>>> alist = [[3, 1.4], [5, 7.8], [-3.1, 6.6]]
>>> flatools.flatten_single_type(alist)
False
>>> alist = ['foo', ['bar', ('foo', 'bar')]]
>>> flatools.flatten_single_type(alist)
False
Note that empty lists return False:
>>> alist = []
>>> flatools.flatten_single_type(alist)
False
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
return len(set(map(type, flatten(input_list)))) == 1
```
#### File: listools/flatools/pflatten.py
```python
def pflatten(input_list: list, depth: int = 1) -> list:
r"""flatools.pflatten(input_list[, depth])
Partially flattens a list containing subslists as elements. Usage:
>>> alist = [[1, 2], [3, 4], [5], [6, 7, 8], [9, 10]]
>>> flatools.pflatten(alist)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> alist = [1, 2, [3, [[4], 5]]]
>>> flatools.pflatten(alist)
[1, 2, 3, [[4], 5]]
Use the depth argument (which should always be an integer) when wanting to
flatten nested sublists:
>>> alist = [1, 2, [3, [4, 5]]]
>>> flatools.pflatten(alist, depth=2)
[1, 2, 3, [4], 5]
>>> alist = [1, 2, [3, [4, 5]]]
>>> flatools.pflatten(alist, depth=3)
[1, 2, 3, 4, 5]
>>> alist = [1, 2, [3, [4, 5]]]
>>> flatools.pflatten(alist, depth=4)
[1, 2, 3, 4, 5]
Notice that the list themselves can be made out of any datatypes:
>>> alist = [1, [2.2, True], ['foo', [(1, 4), None]], [3+2j, {'a': 1}]]
>>> flatools.flatten(alist, depth=3)
[1, 2.2, True, 'foo', (1, 4), None, 3+2j, {'a': 1}]
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
if not isinstance(depth, int):
raise TypeError('\'depth\' must be \'int\'')
aux_list = input_list[:]
for _ in range(depth):
output_list = []
for element in aux_list:
if not isinstance(element, list):
output_list.append(element)
else:
output_list += element
aux_list = output_list[:]
return output_list
```
#### File: listools/iterz/zip_inf_cycle.py
```python
from itertools import count as _count
def zip_inf_cycle(*input_iters) -> tuple:
r"""iterz.zip_inf_cycle(*input_iters)
Similar to zip but cycles all lists indefinitely. Usage:
>>> alist = [1, 2]
>>> blist = [4, 5, 6, 7, 8]
>>> zip_inf_cycle_iter = iterz.zip_inf_cycle(alist, blist)
>>> for _ in range(9):
... print(zip_inf_cycle_iter.__next__())
1 4
2 5
1 6
2 7
1 8
2 4
1 5
2 6
1 7
It also works with multiple lists:
>>> alist = [1, 2]
>>> blist = [1, 2, 3]
>>> clist = [1, 2, 3, 4]
>>> dlist = [1, 2, 3, 4, 5]
>>> zip_inf_cycle_iter = iterz.zip_inf_cycle(alist, blist, clist, dlist)
>>> for i in range(7):
... print(zip_inf_cycle_iter.__next__())
1 1 1 1
2 2 2 2
1 3 3 3
2 1 4 4
1 2 1 5
1 3 2 1
2 1 3 2
In fact, it works with any iterable containing any datatypes:
>>> a = (1, 2, 3)
>>> b = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
>>> c = 'abcde'
>>> zip_inf_cycle_iter = iterz.zip_inf_cycle(a, b, c)
>>> for i in range(10):
... print(zip_inf_cycle_iter.__next__())
1 1.0 a
2 2.0 b
3 3.0 c
1 4.0 d
2 5.0 e
3 6.0 a
1 7.0 b
2 1.0 c
3 2.0 d
1 3.0 e
"""
for input_iter in input_iters:
try:
iterator = iter(input_iter)
except:
raise TypeError('\'*input_iters\' must be one or more \'iter\'')
if any(len(input_iter) == 0 for input_iter in input_iters):
raise IndexError('all elements of \'*input_iters\' must have len > 0')
for i in _count():
output_list = []
for input_iter in input_iters:
output_list.append(input_iter[i % len(input_iter)])
yield tuple(output_list)
```
#### File: listools/listutils/list_gcd.py
```python
from functools import reduce as _reduce
from math import gcd as _gcd
from typing import List
def list_gcd(input_list: List[int]) -> int:
r"""listutils.list_gcd(input_list)
This function returns the greatest common divisor of a list of integers.
Usage:
>>> alist = [8, 12]
>>> listutils.list_gcd(alist)
4
>>> alist = [74, 259, 185, 333]
>>> listutils.list_gcd(alist)
37
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
if not all(isinstance(element, int) for element in input_list):
raise TypeError('all elements of \'input_list\' must be \'int\'')
if len(input_list) == 0:
raise IndexError('\'input_list\' must have len > 0')
return _reduce(_gcd, input_list)
```
#### File: listools/listutils/period_len.py
```python
def period_len(input_list: list, ignore_partial_cycles: bool = False) -> int:
r"""listutils.period_len(input_list[, ignore_partial_cycles])
This function returns the length of the period of an input list. Usage:
>>> alist = [1, 2, 3, 1, 2, 3, 1, 2, 3]
>>> listutils.period_len(alist)
3
If a list is not periodic, the period length equals to the list size:
>>> alist = [3, 1, 4, 1, 5, 9, 2, 6]
>>> listutils.period_len(alist)
8
This function detects periodicity in lists with partial cycles:
>>> alist = [1, 2, 3, 1, 2, 3, 1]
>>> listutils.period_len(alist)
3
To disable this behaviour, use the ignore_partial_cycles argument:
>>> alist = [1, 2, 3, 1, 2, 3, 1]
>>> listutils.period_len(alist, ignore_partial_cycles=True)
7
If a list does not contain partial cycles, the ignore_partial_cycles
argument does not affect the result:
>>> alist = [1, 2, 3, 1, 2, 3]
>>> listutils.period_len(alist, ignore_partial_cycles=True)
3
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
if not isinstance(ignore_partial_cycles, bool):
raise TypeError('\'ignore_partial_cycles\' must be \'bool\'')
for period in range(1, len(input_list)):
if all(input_list[n] == input_list[n + period] \
for n in range(len(input_list) - period)):
if ignore_partial_cycles:
if len(input_list) % period != 0:
return len(input_list)
return period
return len(input_list)
```
#### File: listools/llogic/difference.py
```python
def difference(list_1: list, list_2: list) -> list:
r"""llogic.difference(list_1, list_2)
Returns the difference of two lists (omitting repetitions). The order
of the elements of the output depends on their order in the lists. The
order of the inputs lists does affect the result. Usage:
>>> alist = [1, 2, 3, 4, 5]
>>> blist = [7, 6, 5, 4, 3]
>>> llogic.difference(alist, blist)
[1, 2]
>>> llogic.difference(blist, alist)
[7, 6]
>>> alist = [1, 2, 3, 3, 4, 4, 5, 5, 5]
>>> blist = [3, 3, 4, 5, 5, 6]
>>> llogic.difference(alist, blist)
[1, 2]
Note that llogic.difference does not flatten the lists so nested lists
are of type list:
>>> alist = [3, 4, 1, 5, 2]
>>> blist = [1, 2, 3, 4, 5]
>>> llogic.difference(alist, blist)
[]
>>> alist = [3, 4, [1, [5, 2]]]
>>> blist = [1, 2, 3, 4, 5]
>>> llogic.difference(alist, blist)
[[1, [5, 2]]]
The lists can contain any datatype:
>>> alist = [1, 2.3, 'foo', (3, 7)]
>>> blist = ['foo', 7+3j, (3, 7)]
>>> llogic.difference(alist, blist)
[1, 2.3]
"""
if not isinstance(list_1, list):
raise TypeError('\'list_1\' must be \'list\'')
if not isinstance(list_2, list):
raise TypeError('\'list_2\' must be \'list\'')
output_list = []
for item in list_1:
if item not in list_2 and item not in output_list:
output_list.append(item)
return output_list
```
#### File: listools/llogic/is_descending.py
```python
def is_descending(input_list: list, step: int = -1) -> bool:
r"""llogic.is_descending(input_list[, step])
This function returns True if the input list is descending with a fixed
step, otherwise it returns False. Usage:
>>> alist = [3, 2, 1, 0]
>>> llogic.is_descending(alist)
True
The final value can be other than zero:
>>> alist = [12, 11, 10]
>>> llogic.is_descending(alist)
True
The list can also have negative elements:
>>> alist = [2, 1, 0, -1, -2]
>>> llogic.is_descending(alist)
True
It will return False if the list is not ascending:
>>> alist = [6, 5, 9, 2]
>>> llogic.is_descending(alist)
False
By default, the function uses steps of size 1 so the list below is not
considered as ascending:
>>> alist = [7, 5, 3, 1]
>>> llogic.is_descending(alist)
False
But the user can set the step argument to any value less than one:
>>> alist = [7, 5, 3, 1]
>>> step = -2
>>> llogic.is_descending(alist, step)
True
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
if not isinstance(step, int):
raise TypeError('\'step\' must be \'int\'')
if step > 1:
raise ValueError('\'step\' must be < 0')
aux_list = list(range(max(input_list), min(input_list)-1, step))
return input_list == aux_list
```
|
{
"source": "jgarte/mkdocstrings",
"score": 3
}
|
#### File: mkdocstrings/tests/test_python_handler.py
```python
from copy import deepcopy
from mkdocstrings.handlers.python import ( # noqa: WPS450
_sort_key_alphabetical,
_sort_key_source,
rebuild_category_lists,
sort_object,
)
def test_members_order():
"""Assert that members sorting functions work correctly."""
subcategories = {key: [] for key in ("attributes", "classes", "functions", "methods", "modules")}
categories = {"children": {}, **subcategories}
collected = {
"name": "root",
"children": {
"b": {"name": "b", "source": {"line_start": 0}, **categories},
"a": {"name": "a", **categories},
"z": {"name": "z", "source": {"line_start": 100}, **categories},
"no_name": {"source": {"line_start": 10}, **categories},
"c": {
"name": "c",
"source": {"line_start": 30},
"children": {
"z": {"name": "z", "source": {"line_start": 200}, **categories},
"a": {"name": "a", "source": {"line_start": 20}, **categories},
},
**subcategories,
},
},
"attributes": ["b", "c", "no_name", "z", "a"],
"classes": [],
"functions": [],
"methods": [],
"modules": [],
}
rebuild_category_lists(collected)
alphebetical = deepcopy(collected)
sort_object(alphebetical, _sort_key_alphabetical)
rebuilt_categories = {"children": [], **subcategories}
assert (
alphebetical["children"]
== alphebetical["attributes"]
== [
{"name": "a", **rebuilt_categories},
{"name": "b", "source": {"line_start": 0}, **rebuilt_categories},
{
"name": "c",
"source": {"line_start": 30},
"children": [
{"name": "a", "source": {"line_start": 20}, **rebuilt_categories},
{"name": "z", "source": {"line_start": 200}, **rebuilt_categories},
],
**subcategories,
},
{"name": "z", "source": {"line_start": 100}, **rebuilt_categories},
{"source": {"line_start": 10}, **rebuilt_categories},
]
)
source = deepcopy(collected)
sort_object(source, _sort_key_source)
assert (
source["children"]
== source["attributes"]
== [
{"name": "a", **rebuilt_categories},
{"name": "b", "source": {"line_start": 0}, **rebuilt_categories},
{"source": {"line_start": 10}, **rebuilt_categories},
{
"name": "c",
"source": {"line_start": 30},
"children": [
{"name": "a", "source": {"line_start": 20}, **rebuilt_categories},
{"name": "z", "source": {"line_start": 200}, **rebuilt_categories},
],
**subcategories,
},
{"name": "z", "source": {"line_start": 100}, **rebuilt_categories},
]
)
```
|
{
"source": "jgarte/pip-package-list",
"score": 3
}
|
#### File: pip-package-list/pippackagelist/identify_package_list_file_type.py
```python
import enum
class PackageListFileType(enum.Enum):
REQUIREMENTS_TXT = "requirements.txt"
SETUP_PY = "setup.py"
def identify_package_list_file_type(file_path: str) -> PackageListFileType:
if file_path.endswith("setup.py"):
return PackageListFileType.SETUP_PY
return PackageListFileType.REQUIREMENTS_TXT
```
#### File: pip-package-list/pippackagelist/list_packages_from_files.py
```python
from collections import defaultdict
from typing import Generator, List
from .entry import (
RequirementsConstraintsEntry,
RequirementsEditableEntry,
RequirementsEntry,
RequirementsIndexURLEntry,
RequirementsPackageEntry,
RequirementsRecursiveEntry,
RequirementsVCSPackageEntry,
RequirementsWheelPackageEntry,
)
from .error import ConstraintWithoutNameError
from .identify_package_list_file_type import (
PackageListFileType,
identify_package_list_file_type,
)
from .parse_requirements_txt import parse_requirements_txt
from .parse_setup_py import parse_setup_py
def _list_packages_from_files(
file_paths: List[str],
*,
recurse_recursive: bool = False,
recurse_editable: bool = False,
remove_editable: bool = False,
remove_recursive: bool = False,
remove_constraints: bool = False,
remove_vcs: bool = False,
remove_wheel: bool = False,
remove_unversioned: bool = False,
remove_index_urls: bool = False,
) -> Generator[RequirementsEntry, None, None]:
generators = []
for file_path in file_paths:
package_list_file_type = identify_package_list_file_type(file_path)
if package_list_file_type == PackageListFileType.REQUIREMENTS_TXT:
generators.append(parse_requirements_txt(file_path))
elif package_list_file_type == PackageListFileType.SETUP_PY:
generators.append(parse_setup_py(file_path))
while len(generators) > 0:
for requirement in generators[0]:
if isinstance(requirement, RequirementsRecursiveEntry):
if recurse_recursive:
generators.append(
parse_requirements_txt(requirement.absolute_path)
)
elif not remove_recursive:
yield requirement
elif isinstance(requirement, RequirementsConstraintsEntry):
if not remove_constraints:
yield requirement
elif isinstance(requirement, RequirementsEditableEntry):
if recurse_editable:
generators.append(
parse_setup_py(
requirement.resolved_absolute_path,
requirement.extras,
)
)
elif not remove_editable:
yield requirement
elif isinstance(requirement, RequirementsIndexURLEntry):
if not remove_index_urls:
yield requirement
elif isinstance(requirement, RequirementsVCSPackageEntry):
if not remove_vcs:
yield requirement
elif isinstance(requirement, RequirementsWheelPackageEntry):
if not remove_wheel:
yield requirement
elif isinstance(requirement, RequirementsPackageEntry):
if remove_unversioned and not requirement.version:
continue
else:
yield requirement
generators = generators[1:]
def _dedupe_requirements(
generator: Generator[RequirementsEntry, None, None]
) -> Generator[RequirementsEntry, None, None]:
"""Removes exact duplicates from the list of requirements.
Duplicates can happen as multiple files are merged together and they
all reefer to the same dependency. De-duping should have no impact
on the final result, it just makes the list easier to browse.
"""
unique_requirements = []
unique_requirements_strings = set()
for requirement in generator:
if str(requirement) in unique_requirements_strings:
continue
unique_requirements.append(requirement)
unique_requirements_strings.add(str(requirement))
for requirement in unique_requirements:
yield requirement
def _inline_constraints(
generator: Generator[RequirementsEntry, None, None]
) -> Generator[RequirementsEntry, None, None]:
"""Inlines constraints specified in constraint.txt files (specified by -c).
Constraints override what version of a package should be installed. They cannot
add new packages to the list. Any package in the constraints file that isn't
already in the list is ignored.
We have to be careful because the constraints file could contain multiple
entries for the same package. Each entry could have different markers.
Constraint files have the same syntax as requirements.txt files, but with
some restrictions:
* No nesting
* No editables
* All entries must have a name
"""
requirements = []
constraints = defaultdict(list)
for requirement in generator:
if not isinstance(requirement, RequirementsConstraintsEntry):
requirements.append(requirement)
continue
for constraint in parse_requirements_txt(requirement.absolute_path):
if constraint.package_name() is None:
raise ConstraintWithoutNameError(constraint)
constraints[constraint.package_name()].append(constraint)
for requirement in requirements:
if not requirement.package_name():
yield requirement
continue
requirement_constraints = constraints.get(requirement.package_name())
if requirement_constraints:
for constraint in requirement_constraints:
yield constraint
else:
yield requirement
def list_packages_from_files(
file_paths: List[str],
*,
recurse_recursive: bool = False,
recurse_editable: bool = False,
inline_constraints: bool = False,
remove_editable: bool = False,
remove_recursive: bool = False,
remove_constraints: bool = False,
remove_vcs: bool = False,
remove_wheel: bool = False,
remove_unversioned: bool = False,
remove_index_urls: bool = False,
dedupe: bool = False,
) -> Generator[RequirementsEntry, None, None]:
generator = _list_packages_from_files(
file_paths,
recurse_recursive=recurse_recursive,
recurse_editable=recurse_editable,
remove_editable=remove_editable,
remove_recursive=remove_recursive,
remove_constraints=remove_constraints,
remove_vcs=remove_vcs,
remove_wheel=remove_wheel,
remove_unversioned=remove_unversioned,
remove_index_urls=remove_index_urls,
)
if inline_constraints:
generator = _inline_constraints(generator)
if dedupe:
generator = _dedupe_requirements(generator)
for requirement in generator:
yield requirement
```
#### File: jgarte/pip-package-list/setup.py
```python
import distutils.cmd
import os
import subprocess
from setuptools import find_packages, setup
class BaseCommand(distutils.cmd.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def create_command(text, commands):
"""Creates a custom setup.py command."""
class CustomCommand(BaseCommand):
description = text
def run(self):
for cmd in commands:
subprocess.check_call(cmd)
return CustomCommand
with open(
os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8"
) as readme:
README = readme.read()
setup(
name="pip-package-list",
version="0.0.9",
packages=find_packages(),
include_package_data=True,
license="MIT License",
description="Generate a flat list of packages Pip would install.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Photonios/pip-package-list",
author="<NAME>",
author_email="<EMAIL>",
keywords=["pip", "package", "resolver", "list", "requirements"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
],
entry_points={
"console_scripts": ["pip-package-list=pippackagelist.__main__:main"]
},
python_requires=">=3.7",
install_requires=["setuptools"],
extras_require={
"test": ["pytest==5.2.2", "pytest-cov==2.8.1",],
"analysis": [
"black==19.10b0",
"flake8==3.7.7",
"autoflake==1.3",
"autopep8==1.4.4",
"isort==4.3.20",
"docformatter==1.3.1",
],
},
cmdclass={
"lint": create_command(
"Lints the code",
[["flake8", "setup.py", "pippackagelist", "tests"]],
),
"lint_fix": create_command(
"Lints the code",
[
[
"autoflake",
"--remove-all-unused-imports",
"-i",
"-r",
"setup.py",
"pippackagelist",
"tests",
],
["autopep8", "-i", "-r", "setup.py", "pippackagelist", "tests"],
],
),
"format": create_command(
"Formats the code",
[["black", "setup.py", "pippackagelist", "tests"]],
),
"format_verify": create_command(
"Checks if the code is auto-formatted",
[["black", "--check", "setup.py", "pippackagelist", "tests"]],
),
"format_docstrings": create_command(
"Auto-formats doc strings", [["docformatter", "-r", "-i", "."]]
),
"format_docstrings_verify": create_command(
"Verifies that doc strings are properly formatted",
[["docformatter", "-r", "-c", "."]],
),
"sort_imports": create_command(
"Automatically sorts imports",
[
["isort", "setup.py"],
["isort", "-rc", "pippackagelist"],
["isort", "-rc", "tests"],
],
),
"sort_imports_verify": create_command(
"Verifies all imports are properly sorted.",
[
["isort", "-c", "setup.py"],
["isort", "-c", "-rc", "pippackagelist"],
["isort", "-c", "-rc", "tests"],
],
),
"fix": create_command(
"Automatically format code and fix linting errors",
[
["python", "setup.py", "format"],
["python", "setup.py", "format_docstrings"],
["python", "setup.py", "sort_imports"],
["python", "setup.py", "lint_fix"],
["python", "setup.py", "lint"],
],
),
"verify": create_command(
"Verifies whether the code is auto-formatted and has no linting errors",
[
["python", "setup.py", "format_verify"],
["python", "setup.py", "format_docstrings_verify"],
["python", "setup.py", "sort_imports_verify"],
["python", "setup.py", "lint"],
],
),
"test": create_command(
"Runs all the tests",
[
[
"pytest",
"--cov=pippackagelist",
"--cov-report=term",
"--cov-report=xml:reports/xml",
"--cov-report=html:reports/html",
"--junitxml=reports/junit/tests.xml",
]
],
),
},
)
```
#### File: pip-package-list/tests/test_parse_requirements.py
```python
import os
import pytest
from pippackagelist.entry import (
RequirementsConstraintsEntry,
RequirementsDirectRefEntry,
RequirementsEditableEntry,
RequirementsEntrySource,
RequirementsIndexURLEntry,
RequirementsPackageEntry,
RequirementsRecursiveEntry,
RequirementsVCSPackageEntry,
RequirementsWheelPackageEntry,
)
from pippackagelist.parse_requirements_list import parse_requirements_list
source = RequirementsEntrySource(
path="requirements.txt", line=None, line_number=None,
)
@pytest.mark.parametrize(
"path", ["../bla.txt", "./bla.txt", "/test.txt", "../bla-r.txt"]
)
def test_parse_requirements_recursive_entry(path):
line = "-r %s" % path
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsRecursiveEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].absolute_path == os.path.realpath(
os.path.join(os.getcwd(), path)
)
@pytest.mark.parametrize(
"path", ["../bla.txt", "./bla.txt", "/test.txt", "../bla-c.txt"]
)
def test_parse_requirements_constraints_entry(path):
line = "-c %s" % path
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsConstraintsEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].absolute_path == os.path.realpath(
os.path.join(os.getcwd(), path)
)
@pytest.mark.parametrize(
"path", ["../bla", "./bla", "/mypackage", ".", "../bla-e.txt"]
)
def test_parse_requirements_editable_entry(path):
line = "-e %s" % path
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsEditableEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].absolute_path == os.path.realpath(
os.path.join(os.getcwd(), path)
)
def test_parse_requirements_editable_entry_with_extras():
line = "-e ./mypackage[extra1, extra2]"
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsEditableEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].absolute_path == os.path.realpath(
os.path.join(os.getcwd(), "./mypackage")
)
assert requirements[0].extras == ["extra1", "extra2"]
@pytest.mark.parametrize("vcs", ["git", "hg"])
@pytest.mark.parametrize(
"uri", ["https://github.com/org/repo", "[email protected]:org/repo.git"]
)
@pytest.mark.parametrize("tag", ["test", "1234", None])
@pytest.mark.parametrize("name", ["mypackage", "mypackage123", None])
def test_parse_requirements_vcs_package_entry(vcs, uri, tag, name):
line = f"{vcs}+{uri}"
if tag:
line += f"@{tag}"
if name:
line += f"#egg={name}"
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsVCSPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].vcs == vcs
assert requirements[0].uri == uri
assert requirements[0].tag == tag
assert requirements[0].name == name
@pytest.mark.parametrize("operator", ["==", ">=", ">", "<=", "<"])
def test_parse_requirements_package_entry(operator):
line = "django%s1.0" % operator
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].name == "django"
assert requirements[0].version == "1.0"
assert requirements[0].operator == operator
assert not requirements[0].markers
def test_parse_requirements_package_entry_no_operator():
line = "django"
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].name == "django"
assert not requirements[0].version
assert not requirements[0].operator
assert not requirements[0].markers
def test_parse_requirements_package_entry_with_markers():
line = 'django==1.2; sys_platform == "linux" and python_version < "3.9"'
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].name == "django"
assert requirements[0].version == "1.2"
assert requirements[0].operator == "=="
assert (
requirements[0].markers
== 'sys_platform == "linux" and python_version < "3.9"'
)
def test_parse_requirements_package_entry_with_extras():
line = "django[extra1, extra2]==1.2"
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].name == "django"
assert requirements[0].extras == ["extra1", "extra2"]
assert requirements[0].version == "1.2"
assert requirements[0].operator == "=="
assert not requirements[0].markers
def test_parse_requirements_wheel_package_entry():
line = "https://mywebsite.com/mywheel.whl"
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsWheelPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].uri == "https://mywebsite.com/mywheel.whl"
assert not requirements[0].name
assert not requirements[0].markers
def test_parse_requirements_wheel_package_entry_with_name():
line = 'https://mywebsite.com/mywheel.whl#egg=mypackage ; sys_platform == "linux"'
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsWheelPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].uri == "https://mywebsite.com/mywheel.whl"
assert requirements[0].name == "mypackage"
assert requirements[0].markers == 'sys_platform == "linux"'
def test_parse_requirements_direct_ref_package_entry():
line = "mypackage @ https://website.com/mypackage.zip"
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsDirectRefEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].name == "mypackage"
assert requirements[0].uri == "https://website.com/mypackage.zip"
assert not requirements[0].markers
def test_parse_requirements_direct_ref_package_entry_with_markers():
line = 'mypackage @ https://website.com/mypackage.zip ; sys_platform == "win32"'
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsDirectRefEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].name == "mypackage"
assert requirements[0].uri == "https://website.com/mypackage.zip"
assert requirements[0].markers == 'sys_platform == "win32"'
def test_parse_requirements_index_url():
line = "-i https://mypackages.com/repo/pypi"
requirements = list(parse_requirements_list(source, [line]))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsIndexURLEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == line
assert requirements[0].source.line_number == 1
assert requirements[0].url == "https://mypackages.com/repo/pypi"
def test_parse_requirements_skips_comments_and_blank_lines():
lines = [
"# this is a comment",
"",
"django==1.0",
" ",
" # another comment",
]
requirements = list(parse_requirements_list(source, lines))
assert len(requirements) == 1
assert isinstance(requirements[0], RequirementsPackageEntry)
def test_parse_requirements_ignores_leading_and_trailing_whitespace():
lines = [
" django==1.0 ",
" -r ./otherfile.txt ",
" -e ../",
" git+https://github.com/test/test@tag",
]
requirements = list(parse_requirements_list(source, lines))
assert len(requirements) == 4
assert isinstance(requirements[2], RequirementsEditableEntry)
assert isinstance(requirements[3], RequirementsVCSPackageEntry)
assert isinstance(requirements[0], RequirementsPackageEntry)
assert requirements[0].source.path == source.path
assert requirements[0].source.line == "django==1.0"
assert requirements[0].source.line_number == 1
assert requirements[0].name == "django"
assert requirements[0].version == "1.0"
assert requirements[0].operator == "=="
assert isinstance(requirements[1], RequirementsRecursiveEntry)
assert requirements[1].source.path == source.path
assert requirements[1].source.line == "-r ./otherfile.txt"
assert requirements[1].source.line_number == 2
assert requirements[1].absolute_path == os.path.join(
os.getcwd(), "otherfile.txt"
)
assert isinstance(requirements[2], RequirementsEditableEntry)
assert requirements[2].source.path == source.path
assert requirements[2].source.line == "-e ../"
assert requirements[2].source.line_number == 3
assert requirements[2].absolute_path == os.path.realpath(
os.path.join(os.getcwd(), "..")
)
assert isinstance(requirements[3], RequirementsVCSPackageEntry)
assert requirements[3].source.path == source.path
assert requirements[3].source.line == "git+https://github.com/test/test@tag"
assert requirements[3].source.line_number == 4
assert requirements[3].vcs == "git"
assert requirements[3].uri == "https://github.com/test/test"
assert requirements[3].tag == "tag"
```
|
{
"source": "jgarte/pip-requirements-parser",
"score": 2
}
|
#### File: jgarte/pip-requirements-parser/requirements_parser.py
```python
import pip
_pip_version = pip.__version__
if _pip_version < "10.0.0":
from pip.req import parse_requirements
from pip.download import PipSession
elif _pip_version < "20.0.0":
from pip._internal.req import parse_requirements
from pip._internal.req import PipSession
else:
from pip._internal.req import parse_requirements
from pip._internal.network.session import PipSession
def parse(filepath, links=False):
"""Returns a list of strings with the requirments registered in the file"""
requirements = []
for lib in parse_requirements(filepath, session=PipSession()):
if links and hasattr(lib.link, 'url'):
requirements.append(lib.link.url)
elif lib.req is not None:
requirements.append(str(lib.req))
return requirements
```
|
{
"source": "jgarwin95/Interstellar_Escort",
"score": 3
}
|
#### File: jgarwin95/Interstellar_Escort/Interstellar_Escort.py
```python
import pygame
import random
import os
import time
class Boundary:
'''Generate pygame display window.
Args:
width (int): width of display in number of pixels
height(int): height of display in number of pixels
'''
back_ground = pygame.image.load('Background_images/p.png')
def __init__(self, width, height):
self.width = width
self.height = height
self.window = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption('Interstellar Escort')
class Mothership:
'''Mothership object is displayed at the bottom of screen and the objective is to protect it.
Class Attributes:
image (pygame image): 50x500 image displayed onscreen
Attributes:
x (int): x coordinate image location
y (int): y coordinate image location
health_amt (int): initial health amount
damage_taken (int): initial damage amount
total (int): combination of health and damage amounts
hbar_x (int): x coordinate of health bar
hbar_y (int): y coordinate of health bar
hbar_length (int): length of health bar (constant for calcuation purposes)
health_width int(int): ratio of remaining health over total, multiplied by health bar length
'''
image = pygame.image.load('Mothership/mothership_3_2.png')
def __init__(self):
self.x = 0
self.y = 650
self.health_amt = 1000
self.damage_taken = 0
self.total = self.health_amt + self.damage_taken
self.hbar_x = 50
self.hbar_y = 690
self.hbar_length = 450 - self.hbar_x
self.health_width = round(self.hbar_length*(self.health_amt/self.total))
#self.damage_width = round(self.hbar_length*(self.damage_taken/self.total))
def update_damage(self):
'''Update instance's health amount and width of health bars'''
if self.health_amt > 1000: # if health is above initial amt of 1000 due to powerup reduce in size to original
self.health_amt = 1000
self.health_width = round(self.hbar_length*(self.health_amt/self.total))
def draw(self, window):
'''Draw sprite and health bars to screen
Args:
window (Boundary obj): window attribute of Boundary object
'''
window.blit(Mothership.image, (int(self.x), int(self.y)))
# Damage bar is constant length. Covered over by health bar.
pygame.draw.rect(window, (255,0,0), (self.hbar_x, self.hbar_y, self.hbar_length, 7))
# Draw over damage bar. Damage bar is revealed as health is depleted.
if self.health_amt > 0:
pygame.draw.rect(window, (0,255,0),(self.hbar_x, self.hbar_y, self.health_width, 7))
class Character:
'''Character class is main sprite (spaceship) for this game.
Class Attributes
center_images (pygame image): images displaying ship in upright postion
strafing_right_images (pygame image): intermediate images for right turns
strage_right_on (pygame image): final images for right turns
strafing_left_images (pygame image): intermediate images for left turns
strafe_left_on (pygame image): final images for left turns
Attributes
width (int): width of character image in pixels
height (int): height of character image in pixels
x (int): initial x coordinate position of character
y (int): initial y coordinate position of character
velocity (int): rate at which character moves from left to right
left (bool): indicate initial movement setting (for image displaying purposes)
right (bool): indicate initial movement setting
center (bool): indicate initial movement setting
'''
# images used when no keys are pressed
center_images = [pygame.image.load('main_sprite/planes_02A-center1.png'), pygame.image.load('main_sprite/planes_02A-center2.png'), pygame.image.load('main_sprite/planes_02A-center3.png'), pygame.image.load('main_sprite/planes_02A-center4.png')]
# images used inbetween full strafe right
strafing_right_images = [pygame.image.load('main_sprite/planes_02A-strafe_right5.png'),pygame.image.load('main_sprite/planes_02A-strafe_right6.png'),pygame.image.load('main_sprite/planes_02A-strafe_right7.png'),pygame.image.load('main_sprite/planes_02A-strafe_right8.png')]
# images used at full right strafe
strafe_right_on = [pygame.image.load('main_sprite/planes_02A-R9.png'), pygame.image.load('main_sprite/planes_02A-R10.png'), pygame.image.load('main_sprite/planes_02A-R11.png'), pygame.image.load('main_sprite/planes_02A-R12.png')]
# images used inbetween full strafe left
strafing_left_images = [pygame.image.load('main_sprite/planes_02A-strafe_left5.png'), pygame.image.load('main_sprite/planes_02A-strafe_left6.png'), pygame.image.load('main_sprite/planes_02A-strafe_left7.png'), pygame.image.load('main_sprite/planes_02A-strafe_left8.png')]
# images used at full left strafe
strafe_left_on = [pygame.image.load('main_sprite/planes_02A-L9.png'), pygame.image.load('main_sprite/planes_02A-L10.png'), pygame.image.load('main_sprite/planes_02A-L11.png'), pygame.image.load('main_sprite/planes_02A-L12.png')]
def __init__(self):
self.width = 96
self.height = 96
self.x = 200
self.y = 540
self.velocity = 5
self.left = False # Initial movement position states of sprite
self.right = False
self.center = True
def draw(self, left_right_frame, center_frame, most_recent_key, window):
'''Draw the mainsprite to the screen
Args
left_right_frame (int): incrementing number that controls which frame is selected for displaying when moving right/left
center_frame (int): incrementing number that controls which frame is selected when not turning
most_recent_key (str): most recently pressed movement key.
window (Boundary obj): screen on which image is displayed
'''
if self.center == True:
if left_right_frame < 4:
if most_recent_key == 'r':
window.blit(self.strafing_right_images[left_right_frame],
(self.x, self.y)) # level out spaceship upon returning to center
elif most_recent_key == 'l':
window.blit(self.strafing_left_images[left_right_frame],
(self.x, self.y)) # level out spacehip upon returning to center
else:
window.blit(self.center_images[center_frame],
(self.x, self.y)) # iterate through displaying center images
elif self.right == True:
if left_right_frame < 4: # first 4 frames are transition state
window.blit(self.strafing_right_images[left_right_frame],
(self.x, self.y)) # draw strafe right transition
else:
window.blit(self.strafe_right_on[left_right_frame % 4],
(self.x, self.y)) # draw final strafe right
elif self.left == True:
if left_right_frame < 4: # first 4 frames are transition state
window.blit(self.strafing_left_images[left_right_frame],
(self.x, self.y)) # draw strafe left transition
else:
window.blit(self.strafe_left_on[left_right_frame % 4],
(self.x, self.y)) # draw final strafe left
def move_left(self, boundary):
'''Move character in the left direction by velocity amount
Args
boundary (Boundary obj): Boundary width is used to know movement limit
'''
if self.x > 0: # keeping x coordinate within the bounds of the screen
self.x = self.x - self.velocity # move by velocity amt
def move_right(self, boundary):
'''Move character in the right direction by velocity amount
Args
boundary (Boundary obj): Boundary width is used to know movement limit
'''
if self.x < boundary.width - self.width:
self.x = self.x + self.velocity
def shoot(self, shot_type):
'''Generate ShooterObject object at the center position of the main sprite
Args
shot_type (str): specifies the type of shot generated. Could be used to change shot types in future use.
'''
# generate shot object at current sprite location, in the middle of the sprite
ShooterObject.shots_queue.append(ShooterObject(shot_type, (self.x + (self.width/2)), self.y))
class Asteroid:
'''Asteroid class generates asteroids images above the display height and progresses them downward
Class Attributes
astoird_images (dict): dictionary of asteroid pygame images with keys specifying the size of the asteroid
width_options (list): list containing the various width options
ast_diff_setting (dict): dictionary for difficulty setting.
Keys are levels of difficult and values are average number of game loops per asteroid generation
current_setting (int): current difficulty setting
maximum_asteroid_amount (int): limit on the current number of existing asteroid
Attributes
width (int): width of asteroid choosen
color_option (int): color of asteroid choosen
y (int): y coordinate of asteroid spawn
x (int): x coordinate of asteroid spawn
velocity (int): speed at which asteroid progresses down screen
damage_taken (int): amount of damage sustained
health_amt (int): amount of health
damage (int): amount of damage dealt
hbar_length (int): length of health bar
initial_health_width (int): length of health bar as a constant
destruction method (None): method by which the asteroid has been destroyed
'''
asteroid_images = {50:[pygame.image.load('Asteroids/res50.png'),pygame.image.load('Asteroids/res50_1.png'),pygame.image.load('Asteroids/res50_2.png'),pygame.image.load('Asteroids/res50_3.png'),pygame.image.load('Asteroids/res50_4.png')],\
60:[pygame.image.load('Asteroids/res60.png'),pygame.image.load('Asteroids/res60_1.png'),pygame.image.load('Asteroids/res60_2.png'),pygame.image.load('Asteroids/res60_3.png'),pygame.image.load('Asteroids/res60_4.png')],\
70:[pygame.image.load('Asteroids/res70.png'),pygame.image.load('Asteroids/res70_1.png'),pygame.image.load('Asteroids/res70_2.png'),pygame.image.load('Asteroids/res70_3.png'),pygame.image.load('Asteroids/res70_4.png')],\
80:[pygame.image.load('Asteroids/res80.png'),pygame.image.load('Asteroids/res80_1.png'),pygame.image.load('Asteroids/res80_2.png'),pygame.image.load('Asteroids/res80_3.png'),pygame.image.load('Asteroids/res80_4.png')],\
90:[pygame.image.load('Asteroids/res90.png'),pygame.image.load('Asteroids/res90_1.png'),pygame.image.load('Asteroids/res90_2.png'),pygame.image.load('Asteroids/res90_3.png'),pygame.image.load('Asteroids/res90_4.png')],\
100:[pygame.image.load('Asteroids/res100.png'),pygame.image.load('Asteroids/res100_1.png'),pygame.image.load('Asteroids/res100_2.png'),pygame.image.load('Asteroids/res100_3.png'),pygame.image.load('Asteroids/res100_4.png')]}
width_options = [x for x in range(50,110,10)]
asteroid_lst = []
ast_diff_setting = {1:1000, 2:800, 3: 600, 4: 400, 5:200, 6:100, 7:50}
current_setting = 6
maximum_asteroid_amount = 9
def __init__(self):
self.width = random.choice(Asteroid.width_options) # randomly choosing width option from width_options
self.color_option = random.randint(0,4) # randomly choosing an index number to pick from various images
self.y = self.width*-1 # spawns asteroids above game window
self.x = random.randrange(50, 500 - self.width) # asteroid spawn anywhere in x direction within game boundaries
if self.width < 80: # velocity is loosley tied to width
self.velocity = random.randint(2,3)
else:
self.velocity = random.randint(1,2)
self.damage_taken = 0 # the total health remains unchanged and is used to generate health bar ratio
self.health_amt = self.width*2 # health amount is directly related to the size of the asteroid
self.damage = self.width * 2 # damage dealt by asteroid is tied to size
self.hbar_length = round(self.width * 0.75) # constant length (should add up from the summations of health and damage bar widths)
self.hbar = round(self.hbar_length *
(self.health_amt/(self.health_amt + self.damage_taken))) # hbar length multiplied by percentage remaining
self.initial_health_width = self.hbar # new variable so that changing hbar will not affect the initial length of health bar
self.destruction_method = None # either destroyed by negative health or making contact with mothership
Asteroid.asteroid_lst.append(self)
def draw_asteroid(self, surface):
'''Draw asteroid on screen
Args
surface (boundary obj): surface upon which the asteroid is drawn
'''
surface.blit(Asteroid.asteroid_images[self.width][self.color_option], (self.x, self.y))
# creating damage bar (red)
if self.damage_taken > 0:
pygame.draw.rect(surface, (255,0,0), (self.x + round(self.width*0.1), round(self.y + self.width/2),
self.initial_health_width, 7))
# avialable health (green) is dependent on the ratio of health remaining to damage taken
pygame.draw.rect(surface, (0,255,0), (self.x + round(self.width*0.1), round(self.y + self.width/2), self.hbar, 7))
def update_health_bars(self):
'''Update health bars'''
self.hbar = round(self.hbar_length *
(self.health_amt/(self.health_amt + self.damage_taken))) # length multiplied by fraction of health remaining
def progress_down(self):
'''Move asteroids down screen'''
self.y += self.velocity
def generate_explosion(self):
'''Generate Explosion object at last coordinates'''
Explosion(self.x, self.y, self.width, self.destruction_method) #explosion occurs at asteroids last location
def __del__(self):
'''Delete Asteroid object'''
pass
class TimedPowerUp:
'''TimedPowerUp creates powerups, progresses them down screen, and grants main sprite effects that have a temporal component
Class Attributes
power_ups (dict): dictionary containing pygame images for the two different powerup images
power_up_optiosn (list): list contianing powerup options
current_powerups (list): list containing all TimedPowerUp objects that currently exist
activated (bool): determination if powerup has been activated
current_option (none): power up that was most recently generated
Args
p_type (str): the name of TimedPowerUp that is being generated
Attributes
width (int): width of powerup
height (int): height of powerup
x (int): x coordinate spawn location
y (int): y coordinate spawn location
velocity (int): movement speed
font_color (tuple): RGB value of font color
powerup_font (SysFont obj): pygame SysFont() contains font type, font size, and bold
powerup_text (pygame text obj): text generated
powerup_display_timer (int): timer used to mark how long the effect name is displayed on screen
effect_timer (int): timer used to mark long long the effect has been active
powerup_duration (int): duration of power up effect
'''
power_ups = {'Insta-Kill':pygame.image.load('powerups/icon-powerup.png'), 'Double XP':pygame.image.load('powerups/icon-special.png')}
power_up_options = ['Insta-Kill', 'Double XP']
current_powerups = []
activated = False
current_option = None
def __init__(self, p_type):
self.width = 25
self.height = 20
self.x = random.randint(25, 500 - (2*self.width)) # x coordinate choosen at random, spaced slightly from screen sides
self.y = -1 * self.height # spawn right above upper boundry
self.velocity = 3
self.font_color = (255,255,255) # color white
self.powerup_font = pygame.font.SysFont('comicsans', 80, 1) # Comicsans, 80 font height, and bold
TimedPowerUp.current_option = p_type # setting class attribute to most current generated powerup
self.powerup_text = self.powerup_font.render(p_type, 1, self.font_color)
self.powerup_display_timer = 0
self.effect_timer = 0
self.powerup_duration = 550 # duration is a set at 550 game loops
TimedPowerUp.current_powerups.append(self) # appending instance to list of current TimedPowerUp objects
def draw(self, window):
'''Draw powerup image on screen
Args
window (Boundary obj): surface to which the image is drawn
'''
if TimedPowerUp.activated == False: # Only display the powerup image if it hasn't been activated yet
window.blit(TimedPowerUp.power_ups[TimedPowerUp.current_option], (self.x, self.y))
elif TimedPowerUp.activated == True: # If activated, no longer display image. Display text instead
window.blit(self.powerup_text, (500//2 - self.powerup_text.get_width()//2, 700//2 - 100))
def progress(self):
'''Progress powerup down screen'''
self.y += self.velocity
def __del__(self):
'''Delete TimedPowerUp object'''
pass
class Health_PowerUp:
'''Generates health powerups which have the one time effect of return some health to the Mothership
Class Attributes
health_image (pygame image): image for health powerup
current_powerups (list): list containing all currently existing instances of Health_PowerUp
Attributes
width (int): width of power up
height (int): height of power up
x (int): x coordinate of spawn location
y (int): y coordinate of spawn location
health_add (int): amount of health granted upon activation
velocity (int): movement speed
activated (bool): whether or not the powerup has been activated
font_color (tuple): RGB value for font color
powerup_font (SysFont obj): font information; font type, size, bold
powerup_text (pygame text): text to be displayed
powerup_display_timer (int): timer for how long the text has been displayed to screen
'''
health_image = pygame.image.load('powerups/icon-health.png')
current_powerups = []
def __init__(self):
self.width = 25
self.height = 20
self.x = random.randint(25, 500 - (2*self.width)) # x coordinate choosen at random with slight spacing from walls
self.y = -1 * self.height # spawn right above upper boundry
self.health_add = 250 # amount of health returned to mothership
self.velocity = 3
self.activated = False # whether or not the powerup has been captured by main sprite
self.font_color = (255,255,255)
self.powerup_font = pygame.font.SysFont('comicsans', 80, 1)
self.powerup_text = self.powerup_font.render('Health' + ' +' + str(self.health_add), 1, self.font_color)
self.powerup_display_timer = 0
Health_PowerUp.current_powerups.append(self)
def draw(self, window):
'''Display health powerup image and text on screen
Args
windwo (Boundary obj): surface to which the image is drawn
'''
if self.activated == False: # if not activated yet, only display image and not text
window.blit(Health_PowerUp.health_image, (self.x, self.y))
if self.activated == True: # if activated, no longer display image and display text in the middle of the screen
window.blit(self.powerup_text, (500//2 - self.powerup_text.get_width()//2, 700//2 - 100))
def progress(self):
'''Progress health powerup down screen'''
self.y += self.velocity
def __del__(self):
'''Delete Health_PowerUp object'''
pass
class Explosion:
'''Generates explosion upon destruction of an asteroid
Class Attributes
explosion_lst (list): list containing all currently existing instances of the Explosion class
explostion_images (list): list containin pygame images of various explosion stages
Args
x (int): x coordinate of where explosion should occur
y (int): y coordinate of where explosion should occure
score_incr (int): amount by which score should incrase upon user destruction
method (str): string detailing how the asteroid was destroyed
Attributes
x (int): storage of input x argument
y (int): storage of input y argument
current_frame (int): number detailing which image is to be displayed
font (pygame font): font information (font type, size, bold)
font_color (tuple): RGB color value for font
ast_width (int): width of asteroid that generated explosion
score_incrase (int): amount by which the user's score will increase
text (pygame text): score increase text to be displayed to screen
text_loc (tuple): location of text on screen (x, y, width, height)
method (str): storage of input argument method
count (int): timer used to control displaying of score increase text to screen
explosion_sound (pygame Sound): sound generated upon explosion
'''
explosion_lst = []
explosion_images = [pygame.image.load(f'explosions/explosion-{x}.png') for x in range(1,12)]
def __init__(self, x, y, score_incr, method):
self.x = x
self.y = y
self.current_frame = 0
self.font = pygame.font.SysFont('comicsans',30,True)
self.font_color = (255,255,255)
self.ast_width = score_incr
# if a TimedPowerup is active and it is 'Double XP' then the score_increase value is double
if (TimedPowerUp.activated == True) and (TimedPowerUp.current_option == 'Double XP'):
self.score_increase = score_incr*2
else:
self.score_increase = score_incr
self.text = self.font.render(('+'+str(self.score_increase)), 1, self.font_color)
# text location is in middle of asteroid and adjusted for text height and width
self.text_loc = ((self.x + (self.ast_width//2) - (self.text.get_width()//2)),
(self.y + (self.ast_width//2) + (self.text.get_height()//2)))
self.method = method
self.count = 1 # timer used to display score increase
self.explosion_sound = pygame.mixer.Sound('audio/Explosion+1.wav')
self.explosion_sound.play()
Explosion.explosion_lst.append(self)
def draw(self,window):
'''Draw explosion image and text to screen
Args
window (Boundary obj): surface to which image is displayed
'''
self.count += 1 # increment count to know how many times draw() has been called
window.blit(Explosion.explosion_images[self.current_frame], (self.x, self.y))
if self.method == 'negative health': # indicates that asteroid was destroyed via user
if self.count % 3 == 0: # only display text every three calls to draw(). Gives fading effect
window.blit(self.text, self.text_loc)
def __del__(self):
'''Delete Explosion object'''
pass
class ShooterObject:
'''Shots generated py main sprite, progress up screen, and destroy asteroid
Class Attributes
Shots_queue (list): list containing all currently existing instances of the ShooterObject class
shot_rate (int): rate in frames per shot
Args
shot_type (str): designates the type of shot and subsequent attributes that go along with that shot type
ship_x_position (int): x coordinate of position of ship upon generation of shot
ship_y_position (int): y coordinate of position of ship upon generation og shot
Attributes
shot_type (str): storage of input arguement shot_type
width (int): width of shot in pixels
height (int): height of object in pixels
color (tuple): RGB value of shot color
velo (int): movement speed of shot
damage(int): damage amount delivered to asteroids
hit (bool): determine whether a shot has made contact with an asteroid
start_y (int): starting point of line that is a shot
end_y (int): ending point of a line that is a shot
start_x (int): starting x coordinate of a shot
'''
shots_queue = [] # shots generated stored here.
shot_rate = 15 # called with modulo operator in while loop to generate shots every so many loops. lower to increase
def __init__(self, shot_type, ship_x_position, ship_y_position):
self.shot_type = shot_type
if self.shot_type == 'normal':
self.width = 3
self.height = 10
self.color = (255,255,255)
self.velo = 3
self.damage = 20
self.hit = False # tells whether bullet has collided with an object
self.start_y = ship_y_position + 25 # y position of ship + 25 to move closer to ship
self.end_y = self.start_y - self.height # difference equal to shot height
self.start_x = ship_x_position # tying x coord position to that of the ship
def draw_line(self, surface):
'''Draw line on screen
Args
surface (Boundary obj): surface to which the image is drawn
'''
pygame.draw.line(surface, self.color, (int(self.start_x), int(self.start_y)),
(int(self.start_x), int(self.start_y) - self.height), self.width)
def progress(self):
'''Progress shot up screen'''
# move both the start and end y position by the velocity amount
self.start_y -= self.velo
self.end_y -= self.velo
def __del__(self):
'''Delete ShooterObject instance'''
pass
class Score:
'''Keep score and display score in upper right hand corner of screen
Attributes
score (int): initial score
x (int): x coordinate for displaying score text
y (int): y coordinate for displaying score text
score_lenth (int): current length of the score in terms of digits
color (tuple): RGB value of score text
font (pygame font): font information; font type, size, bold
'''
def __init__(self):
self.score = 0
self.x = 470
self.y = 10
self.score_length = 1
self.color = (255,255,255)
self.font = pygame.font.SysFont('comicsans', 30, True)
def shift_score(self):
'''Shift score over by 10'''
self.x -= 10
def draw_score(self, window):
'''Draw score in upper right hand corner of screen
Args
window (Boundary obj): surface to which the score is displayed
'''
# if the length of the current score isn't equal to the previously set score length, an addtional column has been added
if len(str(self.score)) != self.score_length:
self.score_length = len(str(self.score)) # reseting score length to new length
self.shift_score() # shifting text over
self.text = self.font.render(str(self.score), 1, self.color) # rendering text
window.blit(self.text, (self.x, self.y)) # displaying text
class Credits:
'''Credits are composed of end game messages and high scores, displayed at the end of the game
Args
score (int): Accumulated score from the game just played
Attributes
score (int): storage for input arguemnt score
color (tuple): RGB color value for displayed text
messages (list): list of various end game messages to be displayed
fonts (list): list of pygame fonts of various sizes
texts (list): list of rendered texts, generated from fonts and messages
text_widths (list): list of widths of text
text_heights (list): list of heights of text
x (int): x coordinate for text
y (int): y coordinate for text
file_contents (list): storage for highscores.txt
file (file obj): file object of highscores.txt in 'r' mode
outfile (file obj): creating new file object of highscores.txt in 'w' mode
'''
def __init__(self, score):
self.score = score
self.color = (255,255,255)
self.messages = ['GAME OVER', f'Your Score: {self.score}', 'Press any key to play again', 'High Scores:']
self.fonts = [pygame.font.SysFont('comicsans', 100, True), pygame.font.SysFont('comicsans', 50, True),
pygame.font.SysFont('comicsans', 30, True)]
self.texts = [self.fonts[0].render(self.messages[0], 1, self.color), self.fonts[1].render(self.messages[1], 1, self.color),
self.fonts[2].render(self.messages[2], 1, self.color), self.fonts[1].render(self.messages[3], 1, self.color)]
self.text_widths = [self.texts[0].get_width(), self.texts[1].get_width(), self.texts[2].get_width(), self.texts[3].get_width()]
self.text_heights = [self.texts[0].get_height(), self.texts[1].get_height(), self.texts[2].get_height(), self.texts[3].get_height()]
self.x = 250
self.y = 200
self.file_contents = [] # if highscores.txt does not exist then file_contents will remain empty
if os.path.exists('highscores.txt'): # if highscores exist then read into file_contents
self.file = open('highscores.txt', 'r')
self.file_contents = self.file.readlines()
self.file.close()
self.outfile = open('highscores.txt', 'w') # open up file in 'w' under same name to overwrite scores in new order
def write_highscores(self):
'''Determine if current score is high score and write to outfile'';
'''
self.file_contents.append(str(self.score) + '\n') # appending new score to file_contents
self.file_contents.sort(key=(lambda x: int(x)) ,reverse=True) # mapping each entry to int() and then sorting
if len(self.file_contents) == 6:
self.file_contents = self.file_contents[:-1] # deprecate score list if it has reached max length
for line in self.file_contents:
self.outfile.write(line) # writing to outfile for permanent storage
self.outfile.close()
def display_credits(self, window):
'''Display all end game text and contents of highscores file
Args
window (Boundary obj): surface to which all text is displayed
'''
window.blit(self.texts[0], (self.x - self.text_widths[0]//2, self.y - self.text_heights[0]//2 - 50))
window.blit(self.texts[1], (self.x - self.text_widths[1]//2, self.y - self.text_heights[1]//2 + self.text_heights[0] - 30))
window.blit(self.texts[2], (self.x - self.text_widths[2]//2, 650))
window.blit(self.texts[3], (self.x - self.text_widths[3]//2, 300))
self.init_score_pos = 350
for score in self.file_contents:
self.f = pygame.font.SysFont('comicsans', 30, True)
self.t = self.f.render(score[:-1], 1, self.color) # deprecate end of score so that \n characters aren't blitted to screen
window.blit(self.t, (self.x - self.t.get_width()//2, self.init_score_pos))
self.init_score_pos += 40 # iteratively move score position down screen
class GameStart:
'''Controls game loop and all functions within game loop; collisions, drawing, generating objects, key presses.
Attributes
clock (pygmae Clock obj): pygame clock object for controlling game rate
quit (bool): check if quit point during opening scene if used
run (bool): control enter and exit of game loop
display (Boundary obj): Creation of game window and dimension
opening_scene(bool): control enter and exit of opening scene
music (pygame music): game play music
main_sprite (Character obj): main character
mothership (Mothership obj): mothership character
score (Score obj): score object
count (int): count of current overall game loop
center_frame (int): counter telling which center image to be displayed
left_right_frame (int): counter telling which left/right image to be displayed
powerup_health_timer (int): cooldown timer for health power ups
powerup_timer (int): cooldown timer for remaining powerups
next_frame (time obj): time object for moving between main character frames
most_recent_key (none): contains most recently pressed key; 'l' or 'r'
'''
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.quit = False
self.run = True
self.display = Boundary(500, 700) # Game Boundary and Window dimensions disgnation
self.opening_scene = True
self.open_scene() # Call opening scene after creating game window and before characer objs are created
if self.quit == False: # Opening scene offers quit point. Need to check here
self.music = pygame.mixer.music.load('audio/Battle-SilverMoon.mp3')
pygame.mixer.music.play(-1)
self.main_sprite = Character() # initialize main sprite
self.mothership = Mothership() # initialize mothership
self.score = Score()
self.count = 0 # count running so that every X amount of loops, do Y
self.center_frame = 0
self.left_right_frame = 0
self.powerup_health_timer = 0 # two seperate timers for health powerups vs. other powerups
self.powerup_timer = 0 #
self.next_frame = time.time()
self.most_recent_key = None # input 'l' or 'r' depending on which directional was last used.
while self.run:
self.clock.tick(60) # controls FPS
if time.time() > self.next_frame:
self.center_frame = (self.center_frame + 1)%4 # rotate through four static images
self.next_frame += .03 # bumps self-made frame clock by 30ms (will only display new every 30)
self.left_right_frame += 1
self.handle_key_presses()
self.generate_shots() # generate ShooterObjects
# only call if less than the max amount.
if len(Asteroid.asteroid_lst) < Asteroid.maximum_asteroid_amount:
self.generate_asteroids()
self.generate_powerup()
self.handle_collisions()
if self.count % 5 == 0:
self.score.score += 1 # score increase every 5 loops.
self.redraw_window()
self.count += 1 # increment loop count
if self.mothership.health_amt <= 0: # end game if mothership has 0 or negative health
self.end_game()
break
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
pygame.quit()
def open_scene(self):
'''Display opening scene prior to entering game loop
Attributes
open_music (pygame music): pygame music played at opening scene
color (tuple): RGB color value of font
fonts (list): pygame fonts for various texts
titles (list): title for opening scene
title_location (int): y coordinate for title
body (list): list of messages for body text
instructions (list): list of game instructions to be displayed
'''
self.open_music = pygame.mixer.music.load('audio/Battle-Conflict.mp3')
pygame.mixer.music.play(-1)
self.color = (255,255,255)
self.fonts = [pygame.font.SysFont('comicsans',100,1), pygame.font.SysFont('comicsans',30,1)]
self.titles = ['Interstellar', 'Escort']
self.title_location = self.display.height * (1//10)
self.body = ["You are mankind's last hope!", 'Protect the Mothership at all costs', 'as it makes its way across the galaxy.',
'Beware of asteroid clusters!']
self.instructions = ['Press any key to begin', 'Use right and left arrow keys to move.']
while self.opening_scene == True: # while opening scene is True display text and background
self.display.window.blit(Boundary.back_ground, (0,0)) # display background
self.title_text = self.fonts[0].render(self.titles[0], 1, self.color)
self.title_text2 = self.fonts[0].render(self.titles[1], 1, self.color)
self.display.window.blit(self.title_text, ((self.display.width//2) - (self.title_text.get_width()//2), 70))
self.display.window.blit(self.title_text2, ((self.display.width//2) - (self.title_text2.get_width()//2), 130))
self.body_location = 300 # established in loop so it is reset each time
for body_text in self.body:
b_t = self.fonts[1].render(body_text, 1, self.color)
self.display.window.blit(b_t, ((self.display.width//2) - (b_t.get_width()//2), self.body_location))
self.body_location += 30 # move body text down 30 at a time
self.instructions_location = 600 # established in loop so it is reset each time
for instruction in self.instructions:
instructions_text = self.fonts[1].render(instruction, 1, self.color)
self.display.window.blit(instructions_text, ((self.display.width//2) - (instructions_text.get_width()//2),
self.instructions_location))
self.instructions_location += 30
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYUP: # game will start upon release of any key
self.opening_scene = False # kick back out to main loop
if event.type == pygame.QUIT:
self.opening_scene = False
self.quit = True
pygame.quit()
def end_game(self):
'''Create Credits object and restart game upon user input
Attributes
end_music (pygame music): music played at game over screen
game_over (Credits obj): credits object with the current final score
displaying_credits (bool): control for end game loop
'''
pygame.mixer.music.stop()
self.end_music = pygame.mixer.music.load('audio/Battle-Conflict.mp3')
pygame.mixer.music.play(-1)
self.game_over = Credits(self.score.score) # create credits obj
self.game_over.write_highscores()
self.displaying_credits = True
while self.displaying_credits:
#if self.displaying_credits == False:
# break
self.display.window.blit(Boundary.back_ground, (0,0)) # display background to screen
self.game_over.display_credits(self.display.window) # print credits to screen
pygame.display.update()
pygame.time.delay(2000) # delay pygame so key pressing at end of game doesn't auto restart
for event in pygame.event.get():
if event.type == pygame.KEYUP: # reset game upon pressing and release of key
# Reset all object class attribute list to empty for new game
Asteroid.asteroid_lst[:] = []
ShooterObject.shots_queue[:] = []
Explosion.explosion_lst[:] = []
Health_PowerUp.current_powerups[:] = []
# using initial game setup commands to reset everything upon restart.
self.music = pygame.mixer.music.load('audio/Battle-SilverMoon.mp3')
pygame.mixer.music.play(-1)
self.clock = pygame.time.Clock()
self.run = True
self.display = Boundary(500, 700)
self.main_sprite = Character()
self.mothership = Mothership()
self.score = Score()
self.count = 0
self.center_frame = 0
self.left_right_frame = 0
self.next_frame = time.time()
self.most_recent_key = None
self.powerup_timer = 0
self.displaying_credits = False
if event.type == pygame.QUIT:
self.displaying_credits = False
pygame.quit()
#break
def handle_key_presses(self):
'''Move character right and left, setting character movement states and correct frames to be displayed'''
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]: # left arrow key to move left
self.main_sprite.move_left(self.display) # using display as input to set boundaries for movement.
if self.main_sprite.left == False: # only allowing access to branch if False so it won't run while holding down key
self.main_sprite.left = True # sprite is now moving left
self.main_sprite.right = False # right and center are now both False
self.main_sprite.center = False
self.left_right_frame = 0 # resetting left&right frame count. Will help display intermediate strafe states
self.most_recent_key = 'l' # setting left so intermediate strafe images used to level out spaceship
elif keys[pygame.K_RIGHT]: # right arrow key to move right
self.main_sprite.move_right(self.display) # using display as input to set boundaries for movement.
if self.main_sprite.right == False: # only allowing access to branch if False so it won't run while holding down key
self.main_sprite.right = True
self.main_sprite.left = False
self.main_sprite.center = False
self.left_right_frame = 0
self.most_recent_key = 'r' # setting right so intermediate strafe images used to level out spaceship
else:
if self.main_sprite.center == False: # once right or left keys are let go, if statement will run
self.main_sprite.center = True
self.main_sprite.right = False
self.main_sprite.left = False
self.left_right_frame = 0 # resetting upon return to center will allow us to access intermediate strafe states
def generate_shots(self):
'''Generate shots fired from spaceship at a constant rate'''
if (self.count % ShooterObject.shot_rate == 0): # every 50 loops the spaceship will generate a ShooterObject(bullet)
self.main_sprite.shoot('normal') # normal indicates the bullet type and specifies its properties upon creation.
def generate_asteroids(self):
'''Generate asteroids at a random rate'''
# calling asteroid random number generator if numbers are the same then asteroid is generated and placed on screen
if Asteroid.ast_diff_setting[Asteroid.current_setting] == random.randint(0,Asteroid.ast_diff_setting[Asteroid.current_setting]):
self.a = Asteroid()
def generate_powerup(self):
'''Generate health and timed power ups at discrete intervals and game conditions'''
if self.count > self.powerup_health_timer: # health has its own timer and now is completely unlinked to other powerup generation
if self.mothership.health_amt != 1000: # only if the mothership has taken on some damage should powerups begin to generate
# powerup generation is a function of game health with a max generation rate of 300
if self.mothership.health_amt*2 == random.randint(0,self.mothership.health_amt*2 + 300):
self.p = Health_PowerUp()
self.powerup_health_timer = self.count + 500 # cooldown timer for power ups are set at ~8 seconds
if self.count > self.powerup_timer: # power up cooldown has expired
if TimedPowerUp.activated == False: # only allow power up generation if a powerup isn't in current use.
if self.mothership.health_amt >= 500: # havin' a good time then you should get a double XP
if 1000 == random.randint(0,1000):
TimedPowerUp('Double XP')
self.powerup_timer = self.count + 500 # setting cooldown for powerups ~8 seconds
if self.mothership.health_amt <= 500: # about t' die might need Insta-Kill
if 1000 == random.randint(0,1000):
TimedPowerUp('Insta-Kill')
self.powerup_timer = self.count + 500
def handle_collisions(self):
'''Loop through all object types on screen and determine if collisions have occurred'''
for powerup in Health_PowerUp.current_powerups:
if (powerup.x > self.main_sprite.x) and (powerup.x < self.main_sprite.x + self.main_sprite.width)\
and (powerup.y + powerup.height > self.main_sprite.y)\
and (powerup.y + powerup.height < self.main_sprite.y + self.main_sprite.height): # within boundaries of main sprite
if powerup.activated == False: # set so power up can only give mothership health once.
self.mothership.health_amt += powerup.health_add # increment mothership's health
self.mothership.update_damage() # update motherships damage
powerup.activated = True # activate powerup
if powerup.activated == True:
if powerup.powerup_display_timer > 25: # turn off powerup activate after counter has reached 25
powerup.activated = False
Health_PowerUp.current_powerups.pop(Health_PowerUp.current_powerups.index(powerup))
del powerup # remove powerup from instance list and delete
for t_powerup in TimedPowerUp.current_powerups:
if (t_powerup.x > self.main_sprite.x) and (t_powerup.x < self.main_sprite.x + self.main_sprite.width)\
and (t_powerup.y + t_powerup.height > self.main_sprite.y)\
and (t_powerup.y + t_powerup.height < self.main_sprite.y + self.main_sprite.height): #within boundaries
if TimedPowerUp.activated == False: # only turn switch if False, this keeps actions from repeating
TimedPowerUp.activated = True
t_powerup.effect_timer = self.count # setting powerup timer to current game loop number
if TimedPowerUp.activated == True:
if self.count - t_powerup.effect_timer > t_powerup.powerup_duration: # ~10 seconds worth of powerup
TimedPowerUp.activated = False # undos all effects from activation
TimedPowerUp.current_powerups.pop(TimedPowerUp.current_powerups.index(t_powerup))
del t_powerup # remove of instance list and delete
for bullet in ShooterObject.shots_queue:
for asteroid in Asteroid.asteroid_lst:
if (bullet.start_x >= asteroid.x) and (bullet.start_x <= asteroid.x + asteroid.width)\
and (bullet.end_y <= asteroid.y + asteroid.width): # check to see if bullet is within asteroid hit box
# if within hit box, then more complex calculation to see if bullet is within radius is performed.
if ((bullet.end_y - (asteroid.y + (asteroid.width/2)))**2 + (bullet.start_x - (asteroid.x + (asteroid.width/2)))**2)**0.5 < (asteroid.width/2):
bullet.hit = True # register hit and reduce asteroid health
if (TimedPowerUp.activated == True) and (TimedPowerUp.current_option == 'Insta-Kill'): # powerup effect
asteroid.health_amt = 0 # instantly reduct asteroid health to zero.
asteroid.damage_taken += bullet.damage
else:
asteroid.health_amt -= bullet.damage # if no powerup then just reduce ast health by bullet damage
asteroid.damage_taken += bullet.damage
asteroid.update_health_bars()
if (asteroid.health_amt <= 0) or (asteroid.y + asteroid.width > 650): # check deletion conditions
if asteroid.health_amt <= 0:
if (TimedPowerUp.activated == True) and (TimedPowerUp.current_option == 'Double XP'): # powerup effect
self.score.score += (asteroid.width * 2) # double the amount of XP you receive
else:
self.score.score += asteroid.width # increment score asteroid width amt
asteroid.destruction_method = 'negative health' # method informs that xp gain should be shown on screen
elif (asteroid.y + asteroid.width > 650): # has made contact with mothership
asteroid.destruction_method = 'off screen'
self.mothership.health_amt -= asteroid.damage # update mothership health and damage
self.mothership.damage_taken += asteroid.damage
self.mothership.update_damage()
Asteroid.asteroid_lst.pop(Asteroid.asteroid_lst.index(asteroid))
asteroid.generate_explosion() # generate asteroid before deleting obj
del asteroid
if (bullet.hit == True) or (bullet.start_y < 0): # check delete conditions
ShooterObject.shots_queue.pop(ShooterObject.shots_queue.index(bullet))
del bullet
def redraw_window(self):
'''Redraw all objects onto screen'''
self.display.window.blit(Boundary.back_ground, (0,0)) # redrawing background.
pygame.draw.rect(self.display.window, (255,255,255), (475, 75, 15, 550), 2) # empty rect for powerup display
self.mothership.draw(self.display.window) # draw mothership
self.score.draw_score(self.display.window) # draw score
self.main_sprite.draw(self.left_right_frame, self.center_frame,
self.most_recent_key, self.display.window) # draw sprite
# below progresses and draws asteroids/shooting objects
for shot in ShooterObject.shots_queue: # accessing every ShooterObject currently in creation (stored in current_shoots)
shot.progress() # progressing each shot down the screen
shot.draw_line(self.display.window) # drawing shot in new location
for ast in Asteroid.asteroid_lst: # iterating through list of asteroids generated
if self.count % 2 == 0: # move asteroids every other frame. keeps them from being too fast
ast.progress_down()
ast.draw_asteroid(self.display.window)
for powerup in Health_PowerUp.current_powerups:
if powerup.activated == True:
if (self.count % 5) and (powerup.powerup_display_timer < 25): # only display every five game loop frames if its been activate
powerup.draw(self.display.window)
powerup.powerup_display_timer += 1
else:
powerup.progress() # if in unactivated state then have it progress down the screen
powerup.draw(self.display.window)
for powerup in TimedPowerUp.current_powerups:
if TimedPowerUp.activated == True:
if (self.count % 5) and (powerup.powerup_display_timer < 25):
powerup.draw(self.display.window)
powerup.powerup_display_timer += 1
if self.count - powerup.effect_timer < powerup.powerup_duration: # if still under duration limit
# fill powerup bar on right of screen with yellow
pygame.draw.rect(self.display.window, (235, 204, 52), (475, 75, 15,
round(powerup.powerup_duration * (1 - (self.count - powerup.effect_timer)/powerup.powerup_duration))))
else:
powerup.progress() # if not activated progress down screen
powerup.draw(self.display.window)
for exp in Explosion.explosion_lst:
if self.count % 4 == 0: # switch explosion frame every four loops
exp.current_frame += 1
if exp.current_frame >= 11: # past final frame so remove from explosion list and del object
Explosion.explosion_lst.pop(Explosion.explosion_lst.index(exp))
del exp
else:
exp.draw(self.display.window)
def __del__(self):
'''Delete GameStart obj'''
pass
if __name__ == "__main__":
GameStart()
```
|
{
"source": "jgarza9788/AwakeUpWithSpotify",
"score": 2
}
|
#### File: jgarza9788/AwakeUpWithSpotify/alarmDataManager.py
```python
import os, json,datetime, re,time,subprocess
from pathlib import Path
dir = os.path.dirname(__file__)
settingsPath = os.path.join(dir,"settings.json").replace("\\","/")
settings = ""
fileLock = False
def lockFile():
fileLock = True
def unlockFile():
fileLock = False
def createSettings():
data = {}
data["disabledUntilAfter"] = 0
data["alarms"] = []
data["alarms"].append({
"enable": True,
"file": "alarms\\Awaken.mp3",
"volume": 0.05,
"time": "09:00",
"Su": False,
"M": True,
"T": True,
"W": True,
"R": True,
"F": True,
"Sa": False,
"exeDay": 0
})
with open(settingsPath, 'w') as outfile:
json.dump(data, outfile, indent=4)
# setSettings(data)
# with portalocker.Lock(settingsPath,'w', timeout=60) as outfile:
# json.dump(data, outfile, indent=4)
# # flush and sync to filesystem
# outfile.flush()
# os.fsync(outfile.fileno())
def getSettings():
data = ""
while fileLock:
print("file is Locked")
time.sleep(2)
lockFile()
if os.path.isfile(settingsPath):
try:
with open(settingsPath,'r') as json_file:
# print("allData:: \n" + str(settings) + "\n")
data = json.load(json_file)
# return json.load(json_file)
# with portalocker.Lock(settingsPath,'r', timeout=60) as json_file:
# return json.load(json_file)
# # flush and sync to filesystem
# outfile.flush()
# os.fsync(outfile.fileno())
except:
createSettings()
with open(settingsPath,'r') as f:
data = json.load(f)
else:
# print("*")
createSettings()
with open(settingsPath,'r') as f:
data = json.load(f)
# return json.load(f)
# with portalocker.Lock(settingsPath,'w', timeout=60) as f:
# return json.load(json_file)
# # flush and sync to filesystem
# outfile.flush()
# os.fsync(outfile.fileno())
unlockFile()
return data
# print(getSettings())
def setSettings(data):
while fileLock:
print("file is Locked")
time.sleep(2)
lockFile()
# print("data: \n" + str(data))
with open(settingsPath, 'w') as outfile:
json.dump(data, outfile, indent=4)
# with portalocker.Lock(settingsPath,'w', timeout=60) as outfile:
# json.dump(data, outfile, indent=4)
# # flush and sync to filesystem
# outfile.flush()
# os.fsync(outfile.fileno())
# fileLock = False
unlockFile()
def newAlarm(data):
if len(data["alarms"]) > 0:
data["alarms"].append({
"enable": data["alarms"][len(data["alarms"])-1]["enable"],
"file": data["alarms"][len(data["alarms"])-1]["file"],
"volume": data["alarms"][len(data["alarms"])-1]["volume"],
"time": data["alarms"][len(data["alarms"])-1]["time"],
"Su": data["alarms"][len(data["alarms"])-1]["Su"],
"M": data["alarms"][len(data["alarms"])-1]["M"],
"T": data["alarms"][len(data["alarms"])-1]["T"],
"W": data["alarms"][len(data["alarms"])-1]["W"],
"R": data["alarms"][len(data["alarms"])-1]["R"],
"F": data["alarms"][len(data["alarms"])-1]["F"],
"Sa": data["alarms"][len(data["alarms"])-1]["Sa"],
"exeDay": 0
})
else:
data["alarms"].append({
"enable": True,
"file": "alarms\\Awake.mp3",
"volume": 0.05,
"time": "09:00",
"Su": False,
"M": True,
"T": True,
"W": True,
"R": True,
"F": True,
"Sa": False,
"exeDay": 0
})
setSettings(data)
def getDay():
return int(str(datetime.datetime.now().date())[:10].replace("-",""))
def getTomorrow():
tomorrow = datetime.datetime.now().date() + datetime.timedelta(days=1)
return int(str(tomorrow)[:10].replace("-",""))
def intDayToString(intDay):
return str(intDay)[:4] +"-" + str(intDay)[-4:-2] +"-"+str(intDay)[-2:]
# print(intDayToString(20190203))
# exit()
def disableToday():
settings = getSettings()
settings["disabledUntilAfter"] = getDay()
setSettings(settings)
def disableTomorrow():
settings = getSettings()
settings["disabledUntilAfter"] = getTomorrow()
setSettings(settings)
def undisableAlarms():
settings = getSettings()
settings["disabledUntilAfter"] = 0
setSettings(settings)
def getDisabledUntilAfter():
settings = getSettings()
return settings["disabledUntilAfter"]
def getTempDisable():
settings = getSettings()
if settings["disabledUntilAfter"] == 0:
return ""
else:
return "alarms are disabled until after " + intDayToString(settings["disabledUntilAfter"])
def getStyle():
settings = getSettings()
if settings["disabledUntilAfter"] == 0:
return ""
else:
return "QLabel {padding-left:8px;padding:8px;background:rgba(255,0,0,255);color:black;font-weight:bold;}"
```
|
{
"source": "jgarza9788/tv_script_generation",
"score": 3
}
|
#### File: jgarza9788/tv_script_generation/scratch.py
```python
A = [0,0,0,0,0]
B = [1,2,3]
A.append(B)
print(A)
# import torch
# from torch.utils.data import TensorDataset, DataLoader
# class SimpleCustomBatch:
# def __init__(self, data):
# transposed_data = list(zip(*data))
# self.inp = torch.stack(transposed_data[0], 0)
# self.tgt = torch.stack(transposed_data[1], 0)
# # custom memory pinning method on custom type
# def pin_memory(self):
# self.inp = self.inp.pin_memory()
# self.tgt = self.tgt.pin_memory()
# return self
# def collate_wrapper(batch):
# return SimpleCustomBatch(batch)
# inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
# tgts = torch.arange(10 * 5, dtype=torch.float32).view(50, 1)
# print('inps',inps)
# print('tgts',tgts)
# dataset = TensorDataset(inps, tgts)
# loader = DataLoader(dataset, batch_size=2, collate_fn=collate_wrapper,
# pin_memory=True)
# for batch_ndx, sample in enumerate(loader):
# print(sample.inp.is_pinned())
# print(sample.tgt.is_pinned())
# import torch
# z = torch.IntTensor([0,1,2,3,4,5,6,7,8,9,10])
# z1 = torch.split(z, 5, dim=0)
# print(*z1,sep='\n')
# A = [0,0,0,0,0]
# B = [1,2,3]
# print(A+B)
```
|
{
"source": "jgarza9788/VAERS",
"score": 3
}
|
#### File: VAERS/Archive/VAERS_obsolete.py
```python
import pandas as pd
import numpy as np
import os
import sys
import re
from matplotlib import pyplot as plt
import json
from collections import Counter
DIR = os.path.dirname(os.path.realpath(__file__))
datapath = os.path.join(DIR,'AllVAERSDataCSVS')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
def get_file_list(path):
"""
returns a list of all the files in the given path/directory
"""
return [ os.path.join(path,f) for f in os.listdir(path)]
# def compile_data_all_VAERSDATA():
# """
# compiles
# """
# csv_files = get_file_list(datapath)
# data_files = [f for f in csv_files if re.search(r'\d{4}VAERSDATA',f)]
# df = pd.DataFrame()
# for i,f in enumerate(data_files):
# print(i,f)
# df0 = pd.read_csv(f,encoding='cp1252')
# df = pd.concat([df,df0])
# return df
def compile_files(directory,files):
"""
compiles/adds/unions multiple csv files together and returns a dataframe
"""
df = pd.DataFrame()
for i,f in enumerate(files):
# print(i,f)
df0 = pd.read_csv(os.path.join(directory,f),encoding='cp1252',low_memory=False)
df = pd.concat([df,df0])
return df
def load_json(path_to_file):
"""
loads json from file
"""
with open(path_to_file, 'r') as f:
return json.load(f)
def save_json(path_to_file,data):
"""
writes dict/json to file
"""
with open(path_to_file, 'w') as f:
json.dump(data, f)
def has_covid(text):
"""
returns 1 or 0 if text has \'COVID\' in it
"""
if re.search('COVID',text.upper()):
return 1
else:
return 0
def process_to_one_file():
"""
processes VAERS data from 2020 and 2021, creates all_data.json, and returns a dataframe
"""
print('process to one file\n\tthis might take a while...go get a drink☕🍷🍸🍹🍶🍺\n')
df_data = compile_files(datapath,['2020VAERSDATA.csv','2021VAERSDATA.csv'])
df_vax = compile_files(datapath,['2020VAERSVAX.csv','2021VAERSVAX.csv'])
df_sym = compile_files(datapath,['2020VAERSSYMPTOMS.csv','2021VAERSSYMPTOMS.csv'])
print("""
symptoms are contained in columns (up to 5 symptoms per event)
we must transform these symptoms into a single list for each event
""")
print('dedup-ing Symptoms')
vid = list(df_sym['VAERS_ID'].unique())
idf_sym = []
for index,v in enumerate(vid):
if index%100 == 0:
print('{:.2f}'.format(index/len(vid)))
temp = df_sym[df_sym['VAERS_ID'] == v]
temp = temp.to_dict(orient='records')
syms = []
for t in temp:
if isinstance(t['SYMPTOM1'],str):
syms.append(t['SYMPTOM1'])
if isinstance(t['SYMPTOM2'],str):
syms.append(t['SYMPTOM2'])
if isinstance(t['SYMPTOM3'],str):
syms.append(t['SYMPTOM3'])
if isinstance(t['SYMPTOM4'],str):
syms.append(t['SYMPTOM4'])
if isinstance(t['SYMPTOM5'],str):
syms.append(t['SYMPTOM5'])
idf_sym.append({'VAERS_ID':v,'SYMPTOMS':syms})
df_sym = pd.DataFrame(idf_sym)
print('merge data')
df = pd.merge(df_data,df_vax,how='outer',on='VAERS_ID')
df = df.drop_duplicates(ignore_index = True)
df = pd.merge(df,df_sym,how='outer',on='VAERS_ID')
df.reset_index()
# creating a new column depending if this is the covid vaccine or not
df['COVID_VAX'] = df['VAX_TYPE'].apply(has_covid)
df = df[df['COVID_VAX'] == 1]
# print(len(df))
print('all columns\n',df.columns)
print('top 50\n',df.head(50))
f0 = os.path.join(datapath,'all_data.csv') #not really needed...but some people might like a csv
f1 = os.path.join(datapath,'all_data.json')
df.to_csv(f0)
save_json(f1,df.to_dict(orient='records'))
print('saved: ',f0)
print('saved: ',f1)
return df
def break_down_2(_df,column):
"""
shows what values there are for a given column (with counts and percent)
"""
print('\nbreak down of {0}'.format(column))
df = pd.DataFrame(_df[column])
df = df.fillna('nan')
print('column'.ljust(10),'\t','value'.ljust(10),'\t','count'.ljust(10),'\t','percent'.ljust(10))
l = list(df[column].unique())
for i in l:
df0 = df[df[column]==i]
print(column.ljust(10),'\t',str(i).ljust(10),'\t',str(len(df0))[0:10].ljust(10),'\t','{:.2f}'.format((len(df0)/len(df))*100).ljust(10))
def process_symptoms_to_list(df,column='SYMPTOMS'):
"""
returns a list of symptoms for the dataframe
"""
s = df[column].to_list()
l = []
for i in s:
try:
for j in i:
if str(j) == 'nan':
pass
else:
l.append(str(j).upper())
except:
pass
return l
def break_down_3(_df,column,buckets,message=''):
"""
breaks a column down into buckets/bins
"""
print('\n\n',message,'\ncolumn: ',column, '\n','buckets: ', buckets)
df = pd.DataFrame(_df[column])
df['bucket'] = pd.cut(df[column], bins=buckets)
df = df.groupby(by='bucket').count()
df['percent'] = (df[column]/df[column].sum())*100
df['percent'] = df['percent'].round(2)
print(df)
def symptom_list(df,print_top=100,column='SYMPTOMS'):
"""
displays a list of the most popular symptoms
note: symptoms might be medical jargon or plain english
i.e. \"RASH\",\"ERYTHEMA\", and \"ITCHY RED SKIN\" would be reported as different items (for now)
"""
verbose = True
if print_top == 0:
verbose = False
if verbose:
print(
"""
note: symptoms might be medical jargon or plain english
i.e. \"RASH\",\"ERYTHEMA\", and \"ITCHY RED SKIN\" would be reported as different items
"""
)
symptoms = process_symptoms_to_list(df,column)
symp_count = len(symptoms)
symptoms = Counter(symptoms)
symptoms = symptoms.most_common()
# print(symptoms)
if verbose:
print('\ntop {print_top} symptoms'.format(print_top=print_top))
cs = [4,24,12,12]
if verbose:
print('#'.ljust(cs[0]),'symptom'.ljust(cs[1]),'value'.ljust(cs[2]),'percent'.ljust(cs[3]))
if verbose:
for index,i in enumerate(symptoms[0:print_top]):
print(
str(index).ljust(cs[0]),
str(i[0][0:cs[1]]).ljust(cs[1]),
str(i[1]).ljust(cs[2]),
'{:.2f}'.format((i[1]/symp_count)*100).ljust(cs[3])
)
return symptoms
def get_data():
"""
gets the data and returns a dataframe
"""
all_data = os.path.join(datapath,'all_data.json')
if os.path.isfile(all_data):
print('loading all_data.json')
df = load_json(all_data)
df = pd.DataFrame(df)
else:
print('processing the 2020-2021 files')
print("""
.../VAERS/AllVAERSDataCSVS/
2021VAERSVAX.csv
2021VAERSSYMPTOMS.csv
2021VAERSDATA.csv
2020VAERSVAX.csv
2020VAERSSYMPTOMS.csv
2020VAERSDATA.csv
""")
df = process_to_one_file()
print(df.columns)
return df
def breakdowns(df):
"""
breaks down the data
"""
ddf = df[df['DIED']=='Y']
break_down_3(df,'AGE_YRS',[0,15,25,35,45,55,65,75,85,500])
break_down_3(ddf,'AGE_YRS',[0,15,25,35,45,55,65,75,85,500],message='***deaths only***')
break_down_3(df,'NUMDAYS',[0,10,20,30,40,50,60])
break_down_3(ddf,'NUMDAYS',[0,10,20,30,40,50,60],message='***deaths only***')
break_down_2(df,'DIED')
break_down_2(df,'ER_VISIT')
break_down_2(df,'L_THREAT')
break_down_2(df,'RECOVD')
def percentages_2(vaers_min,vaers_max,vaers_label,vax_count,vax_label):
"""
prints the percents
"""
print(
'\n( {vaers_label} / {vax_label} ) * 100\n'.format(
vaers_label=vaers_label,
vax_label=vax_label
),
'{:.2f}'.format((vaers_min/vax_count)*100),
' - ',
'{:.2f}'.format((vaers_max/vax_count)*100),
'\n'
)
def percentages(df,vax_count,vax_label,vaers_min_adj=80,vaers_max_adj=120):
"""
calculates and prints the percentages
"""
print("""
VAERS only contains reported data and
\'...fewer than 1% of vaccine adverse events are reported.\'
resources:
https://digital.ahrq.gov/sites/default/files/docs/publication/r18hs017045-lazarus-final-report-2011.pdf (page 6)
""")
print('therfore, will be multiplying the VAERS counts by {0} and {1}'.format(vaers_min_adj,vaers_max_adj))
print('thus providing min and max percentages.')
VAERS_count = len(df)
VAERS_death_count = len(df[df['DIED']=='Y'])
VAERS_nrecovd_count = len(df[df['RECOVD']=='N'])
vc_min = VAERS_count * vaers_min_adj
vc_max = VAERS_count * vaers_max_adj
vdc_min = VAERS_death_count * vaers_min_adj
vdc_max = VAERS_death_count * vaers_max_adj
vnr_min = VAERS_nrecovd_count * vaers_min_adj
vnr_max = VAERS_nrecovd_count * vaers_max_adj
percentages_2(
VAERS_count * vaers_min_adj,
VAERS_count * vaers_max_adj,
'adverse reaction',
vax_count,
vax_label
)
percentages_2(
VAERS_death_count * vaers_min_adj,
VAERS_death_count * vaers_max_adj,
'adverse death',
vax_count,
vax_label
)
percentages_2(
VAERS_nrecovd_count * vaers_min_adj,
VAERS_nrecovd_count * vaers_max_adj,
'no recovery',
vax_count,
vax_label)
def symptom_filter_search(df, search_list):
data = df.to_dict(orient='records')
search_list = [i.upper() for i in search_list]
results = []
for d in data:
try:
d['SYMPTOMS'] = [i.upper() for i in d['SYMPTOMS'] if isinstance(i, str)]
symptom_match = list(set(d['SYMPTOMS']) & set(search_list))
d['SYMPTOMS_MATCH'] = symptom_match
d['SYMPTOMS_MATCH_LENGTH'] = len(symptom_match)
if len(symptom_match) > 0:
results.append(d)
except:
pass
return pd.DataFrame(results)
def print_row(items,column_lengths=[]):
row = ''
for index,i in enumerate(items):
try:
cl = column_lengths[index]
except IndexError:
cl = 20
row += str(i)[0:cl].ljust(cl)
print(row)
def main():
"""
this will do all the things
"""
#this might not work on linux or macOS
try:
os.system('cls')
except:
pass
#settings
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
df = get_data()
print('\n\n--------------------------------\n\n')
print('all data: ',len(df))
print('\n\n--------------------------------\n\n')
symptoms = symptom_list(df,print_top=100)
print('\n\n--------------------------------\n\n')
breakdowns(df)
print('\n\n--------------------------------\n\n')
full_VAX = 165*10**6 #from google 8/3/2021
print('according to a quick google search (on 8/3/2021) {0:,} have had two doses of the vaccine (full_vax)'.format(full_VAX))
percentages(df,full_VAX,'full_vax',80,120)
print('\n\n--------------------------------\n\n')
half_VAX = 191*10**6 #from google 8/3/2021
print('according to a quick google search (on 8/3/2021) {0:,} have had (at least) one dose of the vaccine (half_vax)'.format(half_VAX))
percentages(df,half_VAX,'half_vax',80,120)
def women_issues():
"""
neighbor (steve's wife...unknown name) mentioned symptoms after first dose of vaccine.
this is for her, but also other women that might be having these issues.
"""
#this might not work on linux or macOS
try:
os.system('cls')
except:
pass
#settings
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
df = get_data()
print('\n\n--------------------------------\n\n')
print('all data: ',len(df))
print(break_down_2(df,'SEX'))
print('\n\n--------------------------------\n\n')
print(
"""
~63% of the people who are vaccinated are women
https://www.statista.com/statistics/1212103/share-of-persons-initiating-covid-vaccinations-by-gender-us-first-month/
"""
)
print('\n\n--------------------------------\n\n')
women_repro_symptoms = [
'Intermenstrual bleeding',
'Menopause',
'Heavy menstrual bleeding',
'dysmenorrhoea',
'ABNORMAL UTERINE BLEEDING',
'MATERNAL EXPOSURE BEFORE PREGNANCY',
'MENSTRUATION IRREGULAR',
'Oligomenorrhea',
'OLIGOMENORRHOEA',
'POLYMENORRHOEA',
'MENSTRUAL DISORDER',
'OLIGOMENORRHOEA',
'ANOVULATORY CYCLE',
'OVULATION DELAYED',
'BACTERIAL VAGINOSIS',
'GYNAECOLOGICAL EXAMINATION ABNORMAL',
'OVARIAN CYST',
'BIOPSY UTERUS',
'UTERINE LEIOMYOMA',
'HOT FLUSH',
'BREAST TENDERNESS',
'BREAST SWELLING',
'BREAST PAIN',
'VAGINAL HAEMORRHAGE'
]
women_repro_symptoms = [i.upper() for i in women_repro_symptoms]
df_symptoms = symptom_filter_search(df,women_repro_symptoms)
# print('the people who have 1 or more women_repro_symptoms')
#women only
w_df = df[df['SEX']=='F']
u_df = df[df['SEX']=='U']
w_count = len(w_df) + (len(u_df)/2)
vaxxed = 191*10**6 #from google 8/3/2021 (one or more vaccination)
women_vaxxed = vaxxed * 0.63
vaers_ratio = ( len(df_symptoms)/w_count )
#based on the ratio of repro symptoms and vaers women
wrs = women_vaxxed * vaers_ratio
min_wrs = wrs * 0.80
max_wrs = wrs * 1.20
# minmin_wrs_percent = (min_wrs/min_F_vaers)*100
# minmax_wrs_percent = (min_wrs/max_F_vaers)*100
# maxmax_wrs_percent = (max_wrs/max_F_vaers)*100
# maxmin_wrs_percent = (max_wrs/min_F_vaers)*100
cl = [25,5,15]
print_row(['total vaxxed (1 or more)','','{:,.2f}'.format(vaxxed)],column_lengths=cl)
print_row(['women vaxxed ~0.63%','','{:,.2f}'.format(women_vaxxed)],column_lengths=cl)
print_row(['repro sympt / women count','','{:,.4f}'.format(vaers_ratio)],column_lengths=cl)
print_row(['women w/ repro symptoms','','{:,.2f}'.format(wrs)],column_lengths=cl)
print_row(['min women w/ repro symptoms','','{:,.2f}'.format(min_wrs)],column_lengths=cl)
print_row(['min women w/ repro symptoms','','{:,.2f}'.format(max_wrs)],column_lengths=cl)
print('\n\n--------------------------------\n\n')
print('most common to least common symptoms and how they compare to all_symptoms')
all_symptoms = symptom_list(df,print_top=0)
cl= [10,25,10,10]
print_row(['index','symptoms','count','percent of symptoms'],column_lengths=cl)
for index,i in enumerate(all_symptoms):
if i[0].upper() in women_repro_symptoms:
print_row(
[
index,
i[0],
'{:,.2f}'.format(i[1]),
'{:.2f}'.format((i[1]/len(all_symptoms))*100)
],
column_lengths=cl
)
print('\n\n--------------------------------\n\n')
file_name = os.path.join(datapath,'women_repro_symptoms_20210808.csv')
df_symptoms.to_csv(file_name)
print('saved: ',file_name)
if __name__ == '__main__':
# main()
women_issues()
# print_row(['One','Two','Three','Four'],[20,15,10,5])
# print_row(['One','Two','Three','Four'],[20,15,10,5])
# print_row(['One','Two','Three','Four'],[20,15,10,5])
# print_row(['One','Two','Three','Four'],[20,15,10,5])
```
|
{
"source": "jgarzik/arbot",
"score": 3
}
|
#### File: jgarzik/arbot/ethunits.py
```python
from decimal import Decimal
def toEther(wei):
return Decimal(wei) / Decimal('1000000000000000000')
```
|
{
"source": "jgarzik/pagedb",
"score": 2
}
|
#### File: jgarzik/pagedb/PageDb.py
```python
import struct
import json
import re
import os
import os.path
import mmap
import uuid
from TableRoot import TableRoot
import Block
import PDcodec_pb2
import RecLogger
from util import trywrite, isstr, readrecstr, writerecstr
class PDTableMeta(object):
def __init__(self, super):
# serialized
self.name = ''
self.uuid = uuid.uuid4()
self.root_id = -1
# only used at runtime
self.super = super
self.root = None
self.log_cache = {}
self.log_del_cache = set()
def flush_rootidx(self):
if not self.root.dirty:
return True
old_root_id = self.root.root_id
self.root.root_id = self.super.new_fileid()
if not self.root.dump():
self.root.root_id = old_root_id
return False
self.root_id = self.root.root_id
self.super.garbage_fileids.append(old_root_id)
return True
def checkpoint_initial(self):
writer = Block.BlockWriter(self.super)
keys = sorted(self.log_cache.keys())
for key in keys:
if not writer.push(key, self.log_cache[key]):
return False
if not writer.flush():
return False
self.root.v = writer.root_v
self.root.dirty = True
if not self.flush_rootidx():
return False
return True
def checkpoint_block(self, blkent, add_recs, del_recs):
# read old block data
block = Block.Block(self.super.dbdir, blkent.file_id)
if not block.open():
return None
blkvals = block.readall()
if blkvals is None:
return None
# merge old block data (blkvals), new block data (add_recs),
# and block data deletion notations (del_recs)
# into a single sorted stream of key/value pairs
writer = Block.BlockWriter(self.super)
idx_old = 0
idx_new = 0
idx_del = 0
while (idx_old < len(blkvals) and
idx_new < len(add_recs)):
have_old = idx_old < len(blkvals)
have_new = idx_new < len(add_recs)
have_del = idx_del < len(del_recs)
if (have_old and
((not have_new) or
(blkvals[idx_old][0] <= add_recs[idx_new][0]))):
tup = blkvals[idx_old]
idx_old += 1
else:
tup = add_recs[idx_new]
idx_new += 1
if have_del and (tup[0] == del_recs[idx_del]):
idx_del += 1
else:
if not writer.push(tup[0], tup[1]):
return None
if not writer.flush():
return None
return writer.root_v
def checkpoint(self):
if len(self.root.v) == 0:
return self.checkpoint_initial()
keys = sorted(self.log_cache.keys())
del_keys = sorted(self.log_del_cache)
keyidx = 0
del_keyidx = 0
blockidx = 0
last_block = len(self.root.v) - 1
new_root_v = []
root_dirty = False
while blockidx <= last_block:
ent = self.root.v[blockidx]
# accumulate new records belonging to this block
add_recs = []
while (keyidx < len(keys) and
((keys[keyidx] <= ent.key) or
(blockidx == last_block))):
tup = (keys[keyidx],
self.log_cache[keys[keyidx]])
add_recs.append(tup)
keyidx += 1
# accumulate record deletions belonging to this block
del_recs = []
while (del_keyidx < len(del_keys) and
(del_keys[del_keyidx] <= ent.key)):
del_recs.append(del_keys[del_keyidx])
del_keyidx += 1
# update block, or split into multiple blocks
if len(add_recs) > 0 or len(del_recs) > 0:
entlist = self.checkpoint_block(ent,
add_recs, del_recs)
if entlist is None:
return False
if (len(entlist) == 1 and
entlist[0].key == ent.key and
entlist[0].file_id == ent.file_id):
new_root_v.append(ent)
else:
new_root_v.extend(entlist)
root_dirty = True
else:
new_root_v.append(ent)
blockidx += 1
if root_dirty:
self.root.v = new_root_v
self.root.dirty = True
if not self.flush_rootidx():
return False
return False
def checkpoint_flush(self):
self.log_cache = {}
self.log_del_cache = set()
class PDSuper(object):
def __init__(self, dbdir):
self.version = 1
self.uuid = uuid.uuid4()
self.log_id = 1L
self.next_txn_id = 1L
self.next_file_id = 2L
self.tables = {}
self.dirty = False
# only used at runtime
self.dbdir = dbdir
self.garbage_fileids = []
def load(self):
try:
fd = os.open(self.dbdir + '/super', os.O_RDONLY)
map = mmap.mmap(fd, 0, mmap.MAP_SHARED, mmap.PROT_READ)
deser_ok = self.deserialize(map)
map.close()
os.close(fd)
if not deser_ok:
return False
except OSError:
return False
return True
def dump(self):
data = self.serialize()
try:
fd = os.open(self.dbdir + '/super.tmp',
os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0666)
ok = trywrite(fd, data)
os.fsync(fd)
os.close(fd)
if not ok:
os.unlink(self.dbdir + '/super.tmp')
return False
except OSError:
return False
try:
os.rename(self.dbdir + '/super.tmp',
self.dbdir + '/super')
except OSError:
os.unlink(self.dbdir + '/super.tmp')
return False
self.dirty = False
return True
def deserialize(self, s):
hdr = s[:8]
if hdr != 'SUPER ':
return False
tup = readrecstr(s[8:])
if tup is None:
return False
recname = tup[0]
data = tup[1]
if recname != 'SUPR':
return False
obj = PDcodec_pb2.Superblock()
try:
obj.ParseFromString(data)
except google.protobuf.message.DecodeError:
return None
self.log_id = obj.log_id
self.next_txn_id = obj.next_txn_id
self.next_file_id = obj.next_file_id
if (self.log_id < 1 or
self.next_txn_id < 1 or
self.next_file_id < 1):
return False
try:
self.uuid = uuid.UUID(obj.uuid)
except ValueError:
return False
for tm in obj.tables:
tablemeta = PDTableMeta(self)
tablemeta.name = tm.name
tablemeta.root_id = tm.root_id
try:
tablemeta.uuid = uuid.UUID(tm.uuid)
except ValueError:
return False
self.tables[tablemeta.name] = tablemeta
return True
def serialize(self):
obj = PDcodec_pb2.Superblock()
obj.uuid = self.uuid.hex
obj.log_id = self.log_id
obj.next_txn_id = self.next_txn_id
obj.next_file_id = self.next_file_id
for tablemeta in self.tables.itervalues():
tm = obj.tables.add()
tm.name = unicode(tablemeta.name)
tm.uuid = tablemeta.uuid.hex
tm.root_id = tablemeta.root_id
r = 'SUPER '
r += writerecstr('SUPR', obj.SerializeToString())
return r
def new_fileid(self):
rv = self.next_file_id
self.next_file_id += 1
self.dirty = True
return rv
def new_txnid(self):
rv = self.next_txn_id
self.next_txn_id += 1
self.dirty = True
return rv
class PageTxn(object):
def __init__(self, id):
self.id = id
self.log = []
def get(self, k):
for dr in reversed(self.log):
if dr.key == k:
if dr.recmask & RecLogger.LOGR_DELETE:
return None
return dr.v
return None
def exists(self, k):
for dr in reversed(self.log):
if dr.key == k:
if dr.recmask & RecLogger.LOGR_DELETE:
return False
return True
return False
class PageTable(object):
def __init__(self, db, tablemeta):
self.db = db
self.tablemeta = tablemeta
def put(self, txn, k, v):
dr = self.db.logger.data(self.tablemeta, txn, k, v)
if dr is None:
return False
txn.log.append(dr)
return True
def delete(self, txn, k):
if not self.exists(txn, k):
return False
dr = self.db.logger.data(self.tablemeta, txn, k, None, True)
if dr is None:
return False
txn.log.append(dr)
return True
def get(self, txn, k):
if txn and txn.exists(k):
return txn.get(k)
if k in self.tablemeta.log_del_cache:
return None
if k in self.tablemeta.log_cache:
return self.tablemeta.log_cache[k]
ent = self.tablemeta.root.lookup(k)
if ent is None:
return None
block = self.db.blockmgr.get(ent.file_id)
if block is None:
return None
blkent = block.lookup(k)
if blkent is None:
return None
return block.read_value(blkent)
def exists(self, txn, k):
if txn and txn.exists(k):
return True
if k in self.tablemeta.log_del_cache:
return False
if k in self.tablemeta.log_cache:
return True
ent = self.tablemeta.root.lookup(k)
if ent is None:
return False
block = self.db.blockmgr.get(ent.file_id)
if block is None:
return False
blkent = block.lookup(k)
if blkent is None:
return False
return True
class PageDb(object):
def __init__(self):
self.dbdir = None
self.super = None
self.logger = None
self.blockmgr = None
def open(self, dbdir):
self.dbdir = dbdir
self.super = PDSuper(dbdir)
if not self.super.load():
return False
if not self.read_logs():
return False
self.logger = RecLogger.RecLogger(dbdir, self.super.log_id)
if not self.logger.open():
return False
self.blockmgr = Block.BlockManager(dbdir)
return True
def apply_logdata(self, obj):
try:
tablemeta = self.super.tables[obj.table]
except KeyError:
return False
if obj.recmask & RecLogger.LOGR_DELETE:
tablemeta.log_del_cache.add(obj.key)
try:
del tablemeta.log_cache[obj.key]
except KeyError:
pass
else:
tablemeta.log_cache[obj.key] = obj.value
tablemeta.log_del_cache.discard(obj.key)
return True
def read_logtable(self, obj):
# TODO: logged table deletion unsupported
if obj.recmask & RecLogger.LOGR_DELETE:
return False
if obj.tabname in self.super.tables:
return False
tablemeta = PDTableMeta(self.super)
tablemeta.name = obj.tabname
tablemeta.root_id = obj.root_id
tablemeta.root = TableRoot(self.dbdir, tablemeta.root_id)
self.super.tables[obj.tabname] = tablemeta
self.super.dirty = True
return True
def read_superop(self, obj):
if obj.op == PDcodec_pb2.LogSuperOp.INC_TXN:
self.super.next_txn_id += 1
elif obj.op == PDcodec_pb2.LogSuperOp.INC_FILE:
self.super.next_file_id += 1
else:
return False
self.super.dirty = True
return True
def read_logtxn_start(self, txns, obj):
if obj.txn_id in txns:
return False
txn = PageTxn(obj.txn_id)
txns[obj.txn_id] = txn
return True
def read_logtxn_abort(self, txns, obj):
if obj.txn_id not in txns:
return False
del txns[obj.txn_id]
return True
def read_logtxn_commit(self, txns, obj):
if obj.txn_id not in txns:
return False
txn = txns[obj.txn_id]
del txns[obj.txn_id]
for dr in txn.log:
if not self.apply_logdata(dr):
return False
return True
def read_logdata(self, txns, obj):
if obj.txn_id not in txns:
return False
txn = txns[obj.txn_id]
txn.log.append(obj)
return True
def read_log(self, logger):
txns = {}
while True:
tup = logger.read()
if tup is None:
return True
recname = tup[0]
obj = tup[1]
if recname == RecLogger.LOGR_ID_TXN_START:
if not self.read_logtxn_start(txns, obj):
return False
elif recname == RecLogger.LOGR_ID_TXN_COMMIT:
if not self.read_logtxn_commit(txns, obj):
return False
elif recname == RecLogger.LOGR_ID_TXN_ABORT:
if not self.read_logtxn_abort(txns, obj):
return False
elif recname == RecLogger.LOGR_ID_DATA:
if not self.read_logdata(txns, obj):
return False
elif recname == RecLogger.LOGR_ID_TABLE:
if not self.read_logtable(obj):
return False
elif recname == RecLogger.LOGR_ID_SUPER:
if not self.read_superop(obj):
return False
def read_logs(self):
log_id = self.super.log_id
while True:
logger = RecLogger.RecLogger(self.dbdir, log_id)
if not logger.open(True):
if log_id == self.super.log_id:
return False
return True
if not logger.readreset():
return False
if not self.read_log(logger):
return False
log_id += 1
def create(self, dbdir):
if not os.path.isdir(dbdir):
return False
self.dbdir = dbdir
self.super = PDSuper(dbdir)
if not self.super.dump():
return False
self.logger = RecLogger.RecLogger(dbdir, self.super.log_id)
if not self.logger.open():
return False
self.blockmgr = Block.BlockManager(dbdir)
return True
def open_table(self, name):
try:
tablemeta = self.super.tables[name]
except KeyError:
return None
if tablemeta.root is None:
root = TableRoot(self.dbdir, tablemeta.root_id)
if not root.load():
return None
tablemeta.root = root
return PageTable(self, tablemeta)
def create_table(self, name):
m = re.search('^\w+$', name)
if m is None:
return False
if name in self.super.tables:
return False
tablemeta = PDTableMeta(self.super)
tablemeta.name = name
tablemeta.root_id = self.super.new_fileid()
tablemeta.root = TableRoot(self.dbdir, tablemeta.root_id)
if not tablemeta.root.dump():
return False
if not self.logger.superop(self.super,
PDcodec_pb2.LogSuperOp.INC_FILE):
return None
if not self.logger.tableop(tablemeta, None):
return None
self.super.tables[name] = tablemeta
self.super.dirty = True
return True
def txn_begin(self):
if not self.logger.superop(self.super,
PDcodec_pb2.LogSuperOp.INC_TXN):
return None
txn = PageTxn(self.super.new_txnid())
if not self.logger.txn_begin(txn):
return None
return txn
def txn_commit(self, txn, sync=True):
if not self.logger.txn_end(txn, True):
return False
if sync and not self.logger.sync():
return False
for dr in txn.log:
if not self.apply_logdata(dr):
return False
return True
def txn_abort(self, txn):
if not self.logger.txn_end(txn, False):
return False
return True
def checkpoint(self):
for tablemeta in self.super.tables.itervalues():
if not tablemeta.checkpoint():
return False
# alloc new log id, open new log
new_log_id = self.super.new_fileid()
new_logger = RecLogger.RecLogger(self.dbdir, new_log_id)
if not new_logger.open():
self.super.garbage_fileids.append(new_log_id)
return False
# swap in new log id into superblock, write superblock
old_log_id = self.super.log_id
self.super.log_id = new_log_id
if not self.super.dump():
self.super.log_id = old_log_id
self.super.garbage_fileids.append(new_log_id)
return False
# if we succeeded in switching to the newly committed
# data, flush cached log data just written to storage
for tablemeta in self.super.tables.itervalues():
tablemeta.checkpoint_flush()
# overwrite old logger, closing old log file
self.super.garbage_fileids.append(old_log_id)
self.logger = new_logger
# TODO: delete super.garbage_fileids
return True
```
|
{
"source": "JGASmits/gimmemotifs",
"score": 2
}
|
#### File: gimmemotifs/commands/location.py
```python
from gimmemotifs.fasta import Fasta
from gimmemotifs.motif import read_motifs
from gimmemotifs.utils import motif_localization
from multiprocessing import Pool
from gimmemotifs.config import MotifConfig
import os
def location(args):
"""
Creates histrogram of motif location.
Parameters
----------
args : argparse object
Command line arguments.
"""
fastafile = args.fastafile
pfmfile = args.pfmfile
lsize = args.size
if not lsize:
f = Fasta(fastafile)
lsize = len(f.items()[0][1])
f = None
jobs = []
motifs = read_motifs(pfmfile)
ids = [motif.id for motif in motifs]
if args.ids:
ids = args.ids.split(",")
n_cpus = int(MotifConfig().get_default_params()["ncpus"])
pool = Pool(processes=n_cpus, maxtasksperchild=1000)
for motif in motifs:
if motif.id in ids:
outfile = os.path.join("%s_histogram" % motif.id)
jobs.append(
pool.apply_async(
motif_localization, (fastafile, motif, lsize, outfile, args.cutoff)
)
)
for job in jobs:
job.get()
```
#### File: gimmemotifs/commands/logo.py
```python
import os
import sys
from gimmemotifs.motif import read_motifs
from gimmemotifs.utils import pfmfile_location
def logo(args):
if args.pfmfile is None and args.ids is None:
name = os.path.splitext(os.path.split(pfmfile_location(None))[-1])[0]
print(
"Use the -i argument to specify which motif ids you want to use for logos."
)
print("If you really want to create logos for all of the motifs in the default")
print("PFM file use the following command:")
print(f"gimme logo -p {name}")
sys.exit(1)
inputfile = args.pfmfile
motifs = read_motifs(inputfile)
if args.ids:
ids = args.ids.split(",")
motifs = [m for m in motifs if m.id in ids]
for motif in motifs:
motif.plot_logo(
fname="{}.png".format(motif.id), kind=args.kind, title=args.title
)
```
#### File: gimmemotifs/gimmemotifs/moap.py
```python
from __future__ import print_function
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
warnings.filterwarnings("ignore", message="sklearn.externals.joblib is deprecated")
import os
import sys
import shutil
try:
from itertools import izip
except ImportError:
izip = zip
import itertools
import logging
from multiprocessing import Pool
import pandas as pd
import numpy as np
from scipy.stats import hypergeom, mannwhitneyu
from statsmodels.sandbox.stats.multicomp import multipletests
from tqdm.auto import tqdm
# scikit-learn
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import MultiTaskLasso, BayesianRidge
from sklearn.preprocessing import scale, LabelEncoder
from lightning.classification import CDClassifier
from lightning.regression import CDRegressor
import xgboost
from gimmemotifs import __version__
from gimmemotifs.motif import read_motifs
from gimmemotifs.scanner import Scanner
from gimmemotifs.config import MotifConfig
from gimmemotifs.utils import pfmfile_location, as_fasta
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
logger = logging.getLogger("gimme.maelstrom")
FPR = 0.01
def scan_to_table(
input_table, genome, scoring, pfmfile=None, ncpus=None, zscore=True, gc=True
):
"""Scan regions in input table with motifs.
Parameters
----------
input_table : str
Filename of input table. Can be either a text-separated tab file or a
feather file.
genome : str
Genome name. Can be either the name of a FASTA-formatted file or a
genomepy genome name.
scoring : str
"count" or "score"
pfmfile : str, optional
Specify a PFM file for scanning.
ncpus : int, optional
If defined this specifies the number of cores to use.
Returns
-------
table : pandas.DataFrame
DataFrame with motif ids as column names and regions as index. Values
are either counts or scores depending on the 'scoring' parameter.s
"""
config = MotifConfig()
if pfmfile is None:
pfmfile = config.get_default_params().get("motif_db", None)
if pfmfile is not None:
pfmfile = os.path.join(config.get_motif_dir(), pfmfile)
if pfmfile is None:
raise ValueError("no pfmfile given and no default database specified")
logger.info("reading table")
if input_table.endswith("feather"):
df = pd.read_feather(input_table)
idx = df.iloc[:, 0].values
else:
df = pd.read_table(input_table, index_col=0, comment="#")
idx = df.index
regions = list(idx)
if len(regions) >= 1000:
check_regions = np.random.choice(regions, size=1000, replace=False)
else:
check_regions = regions
size = int(
np.median([len(seq) for seq in as_fasta(check_regions, genome=genome).seqs])
)
s = Scanner(ncpus=ncpus)
s.set_motifs(pfmfile)
s.set_genome(genome)
s.set_background(genome=genome, gc=gc, size=size)
scores = []
if scoring == "count":
logger.info("setting threshold")
s.set_threshold(fpr=FPR)
logger.info("creating count table")
for row in s.count(regions):
scores.append(row)
logger.info("done")
else:
s.set_threshold(threshold=0.0)
msg = "creating score table"
if zscore:
msg += " (z-score"
if gc:
msg += ", GC%"
msg += ")"
else:
msg += " (logodds)"
logger.info(msg)
for row in s.best_score(regions, zscore=zscore, gc=gc):
scores.append(row)
logger.info("done")
motif_names = [m.id for m in read_motifs(pfmfile)]
logger.info("creating dataframe")
return pd.DataFrame(scores, index=idx, columns=motif_names)
class Moap(object):
"""Moap base class.
Motif activity prediction.
"""
_predictors = {}
name = None
@classmethod
def create(cls, name, ncpus=None):
"""Create a Moap instance based on the predictor name.
Parameters
----------
name : str
Name of the predictor (eg. Xgboost, BayesianRidge, ...)
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Returns
-------
moap : Moap instance
moap instance.
"""
try:
return cls._predictors[name.lower()](ncpus=ncpus)
except KeyError:
raise Exception("Unknown class")
@classmethod
def register_predictor(cls, name):
"""Register method to keep list of predictors."""
def decorator(subclass):
"""Register as decorator function."""
cls._predictors[name.lower()] = subclass
subclass.name = name.lower()
return subclass
return decorator
@classmethod
def list_predictors(self):
"""List available predictors."""
return list(self._predictors.keys())
@classmethod
def list_classification_predictors(self):
"""List available classification predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "classification"]
@classmethod
def list_regression_predictors(self):
"""List available regression predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "regression"]
register_predictor = Moap.register_predictor
def br_fit(X, y):
model = BayesianRidge()
model.fit(X, y)
return model.coef_
def br_fit_star(args):
return br_fit(*args)
@register_predictor("BayesianRidge")
class BayesianRidgeMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using Bayesian Ridge Regression.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Coefficients of the regression model.
"""
self.act_description = "activity values: coefficients of the" "regression model"
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting BayesianRidge")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
logger.debug("Scaling motif scores")
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# logger.debug("Scaling y")
# Normalize across samples and features
# y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
logger.debug("Fitting model")
pool = Pool(self.ncpus)
coefs = [
x
for x in tqdm(
pool.imap(
br_fit_star,
izip(itertools.repeat(X), [y[col] for col in y.columns]),
),
total=len(y.columns),
)
]
logger.info("Done")
self.act_ = pd.DataFrame(coefs, columns=X.columns, index=y.columns).T
@register_predictor("Xgboost")
class XgboostRegressionMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using XGBoost.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Feature scores.
"""
self.act_description = "activity values: feature scores from" "fitted model"
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting XGBoostRegression")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# Normalize across samples and features
# y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
# Define model
xgb = xgboost.XGBRegressor(
n_estimators=500,
learning_rate=0.01,
nthread=self.ncpus,
min_child_weight=2,
max_depth=3,
subsample=0.8,
colsample_bytree=0.8,
objective="reg:squarederror",
)
logger.debug("xgb: 0%")
self.act_ = pd.DataFrame(index=X.columns)
# Fit model
for i, col in enumerate(tqdm(y.columns)):
xgb.fit(X, y[col].values)
d = xgb.get_booster().get_fscore()
self.act_[col] = [d.get(m, 0) for m in X.columns]
for motif in self.act_.index:
if self.act_.loc[motif, col] != 0:
high = df_y.loc[
df_X[motif] >= df_X[motif].quantile(0.75), col
].mean()
low = df_y.loc[
df_X[motif] <= df_X[motif].quantile(0.25), col
].mean()
if low > high:
self.act_.loc[motif, col] *= -1
logger.debug("..{}%".format(int(float(i + 1) / len(y.columns) * 100)))
logger.info("Done")
@register_predictor("LightningRegressor")
class LightningRegressionMoap(Moap):
def __init__(self, scale=True, cv=3, ncpus=None):
"""Predict motif activities using lightning CDRegressor
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
cv : int, optional, default 3
Cross-validation k-fold parameter.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted coefficients
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.act_description = "activity values: coefficients from " "fitted model"
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.kfolds = cv
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y, batch_size=50, shuffle=True, tmpdir=None):
logger.info("Fitting LightningRegression")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# Normalize across samples and features
# y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
if not y.shape[0] == X.shape[0]:
raise ValueError("number of regions is not equal")
# Define model
cd = CDRegressor(penalty="l1/l2", C=1.0)
parameters = {"alpha": [np.exp(-x) for x in np.arange(0, 10, 1 / 2)]}
clf = GridSearchCV(cd, parameters, n_jobs=self.ncpus)
if shuffle:
idx = list(y.sample(y.shape[1], axis=1, random_state=42).columns)
else:
idx = list(y.columns)
if tmpdir:
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
coefs = pd.DataFrame(index=X.columns)
start_i = 0
if tmpdir:
for i in range(0, len(idx), batch_size):
fname = os.path.join(tmpdir, "{}.feather".format(i))
if os.path.exists(fname) and os.path.exists(fname + ".done"):
tmp = pd.read_feather(fname)
tmp = tmp.set_index(tmp.columns[0])
coefs = coefs.join(tmp)
else:
logger.info("Resuming at batch {}".format(i))
start_i = i
break
for i in tqdm(range(start_i, len(idx), batch_size)):
split_y = y[idx[i : i + batch_size]]
# Fit model
clf.fit(X.values, split_y.values)
tmp = pd.DataFrame(
clf.best_estimator_.coef_.T, index=X.columns, columns=split_y.columns
)
if tmpdir:
fname = os.path.join(tmpdir, "{}.feather".format(i))
tmp.reset_index().rename(columns=str).to_feather(fname)
# Make sure we don't read corrupted files
open(fname + ".done", "a").close()
# Get coefficients
coefs = coefs.join(tmp)
# Get coefficients
self.act_ = coefs[y.columns]
logger.info("Done")
@register_predictor("LightningClassification")
class LightningClassificationMoap(Moap):
def __init__(self, scale=True, permute=False, ncpus=None):
"""Predict motif activities using lightning CDClassifier
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted coefficients
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.act_description = "activity values: coefficients from " "fitted model"
# self.cdc = CDClassifier(random_state=args.seed)
self.cdc = CDClassifier()
self.parameters = {
"penalty": ["l1/l2"],
"loss": ["squared_hinge"],
"multiclass": [True],
"max_iter": [20],
"alpha": [np.exp(-x) for x in np.arange(0, 10, 1 / 3.0)],
"C": [0.001, 0.01, 0.1, 0.5, 1.0],
"tol": [1e-3],
}
self.kfolds = 10
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.clf = GridSearchCV(self.cdc, self.parameters, cv=self.kfolds, n_jobs=ncpus)
self.scale = scale
self.permute = permute
self.act_ = None
self.sig_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting LightningClassification")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
idx = list(range(df_y.shape[0]))
y = df_y.iloc[idx]
X = df_X.loc[y.index].values
y = y.values.flatten()
# Convert (putative) string labels
label = LabelEncoder()
y = label.fit_transform(y)
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y)
logger.debug("Setting parameters through cross-validation")
# Determine best parameters based on CV
self.clf.fit(X_train, y_train)
logger.debug(
"Average score ({} fold CV): {}".format(
self.kfolds, self.clf.score(X_test, y_test)
)
)
logger.debug("Estimate coefficients using bootstrapping")
# Estimate coefficients using bootstrappig
# b = BaggingClassifier(self.clf.best_estimator_,
# max_samples=0.75, n_jobs=-1, random_state=state)
b = BaggingClassifier(self.clf.best_estimator_, max_samples=0.75, n_jobs=-1)
b.fit(X, y)
# Get mean coefficients
coeffs = np.array([e.coef_ for e in b.estimators_]).mean(axis=0)
# Create dataframe of predicted coefficients
if len(label.classes_) == 2:
self.act_ = pd.DataFrame(np.hstack((-coeffs.T, coeffs.T)))
else:
self.act_ = pd.DataFrame(coeffs.T)
# Convert labels back to original names
self.act_.columns = label.inverse_transform(range(len(label.classes_)))
self.act_.index = df_X.columns
if self.permute:
# Permutations
logger.debug("Permutations")
random_dfs = []
for _ in range(10):
y_random = np.random.permutation(y)
b.fit(X, y_random)
coeffs = np.array([e.coef_ for e in b.estimators_]).mean(axis=0)
if len(label.classes_) == 2:
random_dfs.append(pd.DataFrame(np.hstack((-coeffs.T, coeffs.T))))
else:
random_dfs.append(pd.DataFrame(coeffs.T))
random_df = pd.concat(random_dfs)
# Select cutoff based on percentile
high_cutoffs = random_df.quantile(0.99)
low_cutoffs = random_df.quantile(0.01)
# Set significance
self.sig_ = pd.DataFrame(index=df_X.columns)
self.sig_["sig"] = False
for col, c_high, c_low in zip(self.act_.columns, high_cutoffs, low_cutoffs):
self.sig_["sig"].loc[self.act_[col] >= c_high] = True
self.sig_["sig"].loc[self.act_[col] <= c_low] = True
logger.info("Done")
@register_predictor("MWU")
class MWUMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using Mann-Whitney U p-value
This method compares the motif score distribution of each
cluster versus the motif score distribution of all other
clusters.
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the Mann-Whitney U p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = (
"activity values: BH-corrected " "-log10 Mann-Whitney U p-value"
)
self.pref_table = "score"
self.supported_tables = ["score"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting MWU")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
# calculate Mann-Whitney U p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
for cluster in clusters:
pos = df_X[df_y.iloc[:, 0] == cluster]
neg = df_X[df_y.iloc[:, 0] != cluster]
p = []
for m in pos:
try:
p.append(mannwhitneyu(pos[m], neg[m], alternative="greater")[1])
except Exception as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("motif {} failed, setting to p = 1\n".format(m))
p.append(1)
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(), method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T), columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor("Hypergeom")
class HypergeomMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using hypergeometric p-value
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the hypergeometric p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = (
"activity values: -log10-transformed, BH-corrected "
"hypergeometric p-values"
)
self.pref_table = "count"
self.supported_tables = ["count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting Hypergeom")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
if set(df_X.dtypes) != set([np.dtype(int)]):
raise ValueError("need motif counts, not scores")
# calculate hypergeometric p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
M = df_X.shape[0]
for cluster in clusters:
pos = df_X[df_y.iloc[:, 0] == cluster]
neg = df_X[df_y.iloc[:, 0] != cluster]
pos_true = (pos > 0).sum(0)
pos_false = (pos == 0).sum(0)
neg_true = (neg > 0).sum(0)
p = []
for pt, pf, nt in zip(pos_true, pos_false, neg_true):
n = pt + nt
N = pt + pf
x = pt - 1
p.append(hypergeom.sf(x, M, n, N))
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(), method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T), columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor("RF")
class RFMoap(Moap):
def __init__(self, ncpus=None):
"""Predict motif activities using a random forest classifier
Parameters
----------
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
feature importances from the model
"""
self.act_ = None
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.act_description = (
"activity values: feature importances " "from fitted Random Forest model"
)
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting RF")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
le = LabelEncoder()
y = le.fit_transform(df_y.iloc[:, 0].values)
clf = RandomForestClassifier(n_estimators=100, n_jobs=self.ncpus)
# Multiclass
if len(le.classes_) > 2:
orc = OneVsRestClassifier(clf)
orc.fit(df_X.values, y)
importances = np.array([c.feature_importances_ for c in orc.estimators_]).T
else: # Only two classes
clf.fit(df_X.values, y)
importances = np.array(
[clf.feature_importances_, clf.feature_importances_]
).T
for i, _ in enumerate(le.classes_):
diff = df_X.loc[y == i].quantile(q=0.75) - df_X.loc[y != i].quantile(q=0.75)
sign = (diff >= 0) * 2 - 1
importances[:, i] *= sign
# create output DataFrame
self.act_ = pd.DataFrame(
importances,
columns=le.inverse_transform(range(len(le.classes_))),
index=df_X.columns,
)
logger.info("Done")
@register_predictor("Lasso")
class LassoMoap(Moap):
def __init__(self, scale=True, kfolds=4, alpha_stepsize=1.0, ncpus=None):
"""Predict motif activities using Lasso MultiTask regression
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
kfolds : integer, optional, default 5
number of kfolds for parameter search
alpha_stepsize : float, optional, default 1.0
stepsize for use in alpha gridsearch
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted motif activities
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.kfolds = kfolds
self.act_description = "activity values: coefficients from " "fitted model"
self.scale = scale
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
# initialize attributes
self.act_ = None
self.sig_ = None
mtk = MultiTaskLasso()
parameters = {"alpha": [np.exp(-x) for x in np.arange(0, 10, alpha_stepsize)]}
self.clf = GridSearchCV(
mtk, parameters, cv=kfolds, n_jobs=self.ncpus, scoring="r2"
)
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y, permute=False):
logger.info("Fitting Lasso")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
idx = list(range(df_y.shape[0]))
y = df_y.iloc[idx]
X = df_X.loc[y.index].values
y = y.values
# fit coefficients
coefs = self._get_coefs(X, y)
self.act_ = pd.DataFrame(coefs.T)
# convert labels back to original names
self.act_.columns = df_y.columns
self.act_.index = df_X.columns
if permute:
# Permutations
logger.info("permutations\n")
random_dfs = []
for _ in range(10):
y_random = y[np.random.permutation(range(y.shape[0]))]
coefs = self._get_coefs(X, y_random)
random_dfs.append(pd.DataFrame(coefs.T))
random_df = pd.concat(random_dfs)
# Select cutoff based on percentile
high_cutoffs = random_df.quantile(0.99)
low_cutoffs = random_df.quantile(0.01)
# Set significance
self.sig_ = pd.DataFrame(index=df_X.columns)
self.sig_["sig"] = False
for col, c_high, c_low in zip(self.act_.columns, high_cutoffs, low_cutoffs):
self.sig_["sig"].loc[self.act_[col] >= c_high] = True
self.sig_["sig"].loc[self.act_[col] <= c_low] = True
logger.info("Done")
def _get_coefs(self, X, y):
logger.info("set alpha through cross-validation\n")
# Determine best parameters based on CV
self.clf.fit(X, y)
logger.debug(
"average score ({} fold CV): {}".format(self.kfolds, self.clf.best_score_)
)
logger.info("Estimate coefficients using bootstrapping\n")
n_samples = 0.75 * X.shape[0]
max_samples = X.shape[0]
m = self.clf.best_estimator_
coefs = []
for _ in range(10):
idx = np.random.randint(0, n_samples, max_samples)
m.fit(X[idx], y[idx])
coefs.append(m.coef_)
coefs = np.array(coefs).mean(axis=0)
return coefs
def moap(
inputfile,
method="hypergeom",
scoring=None,
outfile=None,
motiffile=None,
pfmfile=None,
genome=None,
fpr=0.01,
ncpus=None,
subsample=None,
zscore=True,
gc=True,
):
"""Run a single motif activity prediction algorithm.
Parameters
----------
inputfile : str
:1File with regions (chr:start-end) in first column and either cluster
name in second column or a table with values.
method : str, optional
Motif activity method to use. Any of 'hypergeom', 'lasso',
'lightningclassification', 'lightningregressor', 'bayesianridge',
'rf', 'xgboost'. Default is 'hypergeom'.
scoring: str, optional
Either 'score' or 'count'
outfile : str, optional
Name of outputfile to save the fitted activity values.
motiffile : str, optional
Table with motif scan results. First column should be exactly the same
regions as in the inputfile.
pfmfile : str, optional
File with motifs in pwm format. Required when motiffile is not
supplied.
genome : str, optional
Genome name, as indexed by gimme. Required when motiffile is not
supplied
fpr : float, optional
FPR for motif scanning
ncpus : int, optional
Number of threads to use. Default is the number specified in the config.
zscore : bool, optional
Use z-score normalized motif scores.
gc : bool optional
Use GC% bins for z-score.
Returns
-------
pandas DataFrame with motif activity
"""
if scoring and scoring not in ["score", "count"]:
raise ValueError("valid values are 'score' and 'count'")
if inputfile.endswith("feather"):
df = pd.read_feather(inputfile)
df = df.set_index(df.columns[0])
else:
# read data
df = pd.read_table(inputfile, index_col=0, comment="#")
clf = Moap.create(method, ncpus=ncpus)
if clf.ptype == "classification":
if df.shape[1] != 1:
raise ValueError("1 column expected for {}".format(method))
else:
if np.dtype("object") in set(df.dtypes):
raise ValueError("columns should all be numeric for {}".format(method))
if motiffile is None:
if genome is None:
raise ValueError("need a genome")
pfmfile = pfmfile_location(pfmfile)
try:
motifs = read_motifs(pfmfile)
except Exception:
sys.stderr.write("can't read motifs from {}".format(pfmfile))
raise
# initialize scanner
s = Scanner(ncpus=ncpus)
s.set_motifs(pfmfile)
s.set_genome(genome)
s.set_background(genome=genome)
# scan for motifs
motif_names = [m.id for m in read_motifs(pfmfile)]
scores = []
if method == "classic" or scoring == "count":
logger.info("motif scanning (scores)")
scores = scan_to_table(
inputfile,
genome,
"count",
pfmfile=pfmfile,
ncpus=ncpus,
zscore=zscore,
gc=gc,
)
else:
logger.info("motif scanning (scores)")
scores = scan_to_table(
inputfile,
genome,
"score",
pfmfile=pfmfile,
ncpus=ncpus,
zscore=zscore,
gc=gc,
)
motifs = pd.DataFrame(scores, index=df.index, columns=motif_names)
elif isinstance(motiffile, pd.DataFrame):
motifs = motiffile
else:
motifs = pd.read_table(motiffile, index_col=0, comment="#")
if outfile and os.path.exists(outfile):
out = pd.read_table(outfile, index_col=0, comment="#")
ncols = df.shape[1]
if ncols == 1:
ncols = len(df.iloc[:, 0].unique())
if out.shape[0] == motifs.shape[1] and out.shape[1] == ncols:
logger.warn("%s output already exists... skipping", method)
return out
if subsample is not None:
n = int(subsample * df.shape[0])
logger.debug("Subsampling %d regions", n)
df = df.sample(n)
motifs = motifs.loc[df.index]
if method == "lightningregressor":
outdir = os.path.dirname(outfile)
tmpname = os.path.join(outdir, ".lightning.tmp")
clf.fit(motifs, df, tmpdir=tmpname)
shutil.rmtree(tmpname)
else:
clf.fit(motifs, df)
if outfile:
with open(outfile, "w") as f:
f.write("# maelstrom - GimmeMotifs version {}\n".format(__version__))
f.write("# method: {} with motif {}\n".format(method, scoring))
if genome:
f.write("# genome: {}\n".format(genome))
if isinstance(motiffile, str):
f.write("# motif table: {}\n".format(motiffile))
f.write("# {}\n".format(clf.act_description))
with open(outfile, "a") as f:
clf.act_.to_csv(f, sep="\t")
return clf.act_
```
#### File: gimmemotifs/gimmemotifs/plot.py
```python
from __future__ import print_function
from PIL import Image
import seaborn as sns
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib.colors import to_hex, Normalize, rgb2hex
from matplotlib.gridspec import GridSpec
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import os
import sys
from tempfile import NamedTemporaryFile
import numpy as np
# Clustering
from scipy.cluster import hierarchy as hier
from gimmemotifs import mytmpdir
# Matplotlib imports
import matplotlib as mpl
mpl.use("Agg", warn=False)
sns.set_style("white")
VALID_EXTENSIONS = [".png", ".pdf", ".svg", ".ps"]
def axes_off(ax):
"""Get rid of all axis ticks, lines, etc.
"""
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
def background_gradient(s, m, M, cmap="RdBu_r", low=0, high=0):
rng = M - m
norm = Normalize(m - (rng * low), M + (rng * high))
normed = norm(s.values)
c = [rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ["background-color: %s" % color for color in c]
def roc_plot(outfile, plot_x, plot_y, ids=None):
if ids is None:
ids = []
fig = plt.figure()
fig.add_subplot(111, aspect="equal")
if isinstance(plot_x[0], np.ndarray):
for _i, (x, y) in enumerate(zip(plot_x, plot_y)):
plt.plot(x, y)
else:
plt.plot(plot_x, plot_y)
plt.axis([0, 1, 0, 1])
plt.xlabel("1 - Specificity")
plt.ylabel("Sensitivity")
if len(ids) > 0:
plt.legend(ids, loc=(1.03, 0.2))
if not os.path.splitext(outfile)[-1] in VALID_EXTENSIONS:
outfile += ".png"
plt.savefig(outfile, dpi=300, bbox_inches="tight")
plt.close(fig)
def plot_histogram(
values, outfile, xrange=None, breaks=10, title=None, xlabel=None, color=10
):
plt.clf()
try:
# matplotlib >= 0.99
plt.hist(values, range=xrange, bins=breaks, edgecolor="black")
except Exception:
plt.hist(values, range=xrange, bins=breaks)
plt.xlim(xrange)
if title:
plt.title(title)
plt.ylabel("Frequency")
if xlabel:
plt.xlabel(xlabel)
if not outfile.endswith(".svg"):
outfile += ".svg"
plt.savefig(outfile, format="svg")
# Need to explicitly close, as otherwise a histogram will be shown
# when gimme_motifs() is run from a Jupyter notebook.
plt.close()
def match_plot(plotdata, outfile):
"""Plot list of motifs with database match and p-value
"param plotdata: list of (motif, dbmotif, pval)
"""
fig_h = 2
fig_w = 7
nrows = len(plotdata)
ncols = 2
fig = plt.figure(figsize=(fig_w, nrows * fig_h))
for i, (motif, dbmotif, pval) in enumerate(plotdata):
text = "Motif: %s\nBest match: %s\np-value: %0.2e" % (
motif.id,
dbmotif.id,
pval,
)
grid = ImageGrid(fig, (nrows, ncols, i * 2 + 1), nrows_ncols=(2, 1), axes_pad=0)
for j in range(2):
axes_off(grid[j])
tmp = NamedTemporaryFile(dir=mytmpdir(), suffix=".png", delete=False)
motif.plot_logo(fname=tmp.name, title=False)
grid[0].imshow(plt.imread(tmp.name), interpolation="none")
tmp = NamedTemporaryFile(dir=mytmpdir(), suffix=".png", delete=False)
dbmotif.plot_logo(fname=tmp.name, title=False)
grid[1].imshow(plt.imread(tmp.name), interpolation="none")
ax = plt.subplot(nrows, ncols, i * 2 + 2)
axes_off(ax)
ax.text(0, 0.5, text, horizontalalignment="left", verticalalignment="center")
plt.savefig(outfile, dpi=300, bbox_inches="tight")
plt.close(fig)
def diff_plot(
motifs,
pwms,
names,
freq,
counts,
bgfreq,
bgcounts,
outfile,
mindiff=0,
minenr=3,
minfreq=0.01,
):
w_ratio = np.array([14, len(names), len(names) + 1])
plot_order = [0, 1, 2]
nbar = 5
freq = np.array(freq)
counts = np.array(counts)
bgfreq = np.array([[x] for x in bgfreq])
enr = np.log2(np.divide(freq, bgfreq))
filt = np.ones(len(enr), dtype="bool")
filters = [
np.sum(enr > minenr, 1) > 0,
np.sum(freq > minfreq, 1) > 0,
(np.max(enr, 1) - np.min(enr, 1)) > mindiff,
np.sum(counts > 2, 1) > 0,
]
for f in filters:
filt = np.logical_and(filt, f)
motifs = np.array(motifs)[filt]
freq = freq[filt]
bgfreq = bgfreq[filt]
enr = enr[filt]
sys.stderr
for m, f, b, e in zip(motifs, freq, bgfreq, enr):
sys.stderr.write(
"{0}\t{1}\t{2}\t{3}\n".format(
m, "\t".join(str(x) for x in e), "\t".join(str(x) for x in f), b[0]
)
)
if len(freq) == 0:
sys.stderr.write("No enriched and/or differential motifs found.\n")
return
elif len(freq) >= 3:
z = hier.linkage(freq, method="complete", metric="correlation")
ind = hier.leaves_list(z)
else:
ind = np.arange(len(freq))
fig = plt.figure(
figsize=((5 + 0.75 * len(names)) * 3, (0.3 * len(motifs) + 1.5) * 3)
)
gs = GridSpec(
len(motifs) + 3 + nbar,
3,
height_ratios=[1] * nbar + [3] * (len(motifs) + 3),
width_ratios=w_ratio[plot_order],
)
# Colormaps
c1 = mpl.cm.RdBu
c2 = mpl.cm.Blues
# Frequency plot #
# Create axis
ax = plt.subplot(gs[nbar:-3, plot_order[2]])
# Plot frequencies
vmin = 0
vmax = 0.3
pfreq = np.hstack((freq, bgfreq))
ax.pcolormesh(pfreq[ind], cmap=c2, vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=c2, norm=Normalize(vmin=vmin, vmax=vmax))
# Show percentages
for y, row in enumerate(pfreq[ind]):
for x, val in enumerate(row):
v = vmax
if val >= (vmin + ((vmax - vmin) / 2)):
v = vmin
plt.text(
x + 0.5,
y + 0.5,
"{:.1%}".format(val),
ha="center",
va="center",
color=sm.to_rgba(v),
)
# Hide most labels
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
# Set the X labels
ticks = np.arange(len(names) + 1) + 0.5
plt.xticks(ticks, names + ["background"], rotation=30, ha="right")
ax.set_ylim(0, len(motifs))
# Title
plt.title("Frequency")
# Colorbar
# pylint: disable=protected-access
sm._A = []
cax = plt.subplot(gs[0, plot_order[2]])
cb = fig.colorbar(sm, cax=cax, ticks=[0, 0.3], orientation="horizontal")
cb.ax.set_xticklabels(["0%", "30%"])
# Enrichment plot
ax = plt.subplot(gs[nbar:-3, plot_order[1]])
vmin = -10
vmax = 10
ax.pcolormesh(enr[ind], cmap=c1, vmin=vmin, vmax=vmax)
for y, row in enumerate(enr[ind]):
for x, val in enumerate(row):
col = "black"
if val >= (vmin + ((vmax - vmin) / 8.0 * 7)):
col = "white"
elif val <= (vmin + ((vmax - vmin) / 8.0)):
col = "white"
plt.text(
x + 0.5,
y + 0.5,
"{:.1f}".format(val),
ha="center",
va="center",
color=col,
)
ticks = np.arange(len(names)) + 0.5
plt.xticks(ticks, names, rotation=30, ha="right")
# plt.setp(plt.xticks()[1], rotation=30)
# for label in labels:
# label.set_rotation(30)
ticks = np.arange(len(motifs)) + 0.5
plt.yticks(ticks, motifs[ind])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ax.set_ylim(0, len(motifs))
# Title
plt.title("Enrichment (log2)")
# Colorbar
sm = plt.cm.ScalarMappable(cmap=c1, norm=Normalize(vmin=vmin, vmax=vmax))
sm._A = []
cax = plt.subplot(gs[0, plot_order[1]])
cb = fig.colorbar(sm, cax=cax, ticks=[vmin, 0, vmax], orientation="horizontal")
cb.ax.set_xticklabels([vmin, 0, vmax])
# Motif logos
for i, motif in enumerate(motifs[ind][::-1]):
ax = plt.subplot(gs[i + nbar, plot_order[0]])
axes_off(ax)
tmp = NamedTemporaryFile(dir=mytmpdir(), suffix=".png")
pwms[motif].plot_logo(fname=tmp.name, title=False)
ax.imshow(plt.imread(tmp.name), interpolation="none")
# plt.show()
plt.savefig(outfile, dpi=300, bbox_inches="tight")
plt.close(fig)
def _tree_layout(node):
try:
from ete3 import AttrFace, faces
except ImportError:
print("Please install ete3 to use this functionality")
sys.exit(1)
if node.is_leaf():
nameFace = AttrFace("name", fsize=24, ftype="Nimbus Sans L")
faces.add_face_to_node(nameFace, node, 10, position="branch-right")
def _get_motif_tree(tree, data, circle=True, vmin=None, vmax=None):
try:
from ete3 import Tree, NodeStyle, TreeStyle
except ImportError:
print("Please install ete3 to use this functionality")
sys.exit(1)
t = Tree(tree)
# Determine cutoff for color scale
if not (vmin and vmax):
for i in range(90, 101):
minmax = np.percentile(data.values, i)
if minmax > 0:
break
if not vmin:
vmin = -minmax
if not vmax:
vmax = minmax
norm = Normalize(vmin=vmin, vmax=vmax, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap="RdBu_r")
m = 25 / data.values.max()
for node in t.traverse("levelorder"):
val = data[[l.name for l in node.get_leaves()]].values.mean()
style = NodeStyle()
style["size"] = 0
style["hz_line_color"] = to_hex(mapper.to_rgba(val))
style["vt_line_color"] = to_hex(mapper.to_rgba(val))
v = max(np.abs(m * val), 5)
style["vt_line_width"] = v
style["hz_line_width"] = v
node.set_style(style)
ts = TreeStyle()
ts.layout_fn = _tree_layout
ts.show_leaf_name = False
ts.show_scale = False
ts.branch_vertical_margin = 10
if circle:
ts.mode = "c"
ts.arc_start = 180 # 0 degrees = 3 o'clock
ts.arc_span = 180
return t, ts
def motif_tree_plot(outfile, tree, data, circle=True, vmin=None, vmax=None, dpi=300):
"""
Plot a "phylogenetic" tree
"""
# Define the tree
t, ts = _get_motif_tree(tree, data, circle, vmin, vmax)
# Save image
t.render(outfile, tree_style=ts, w=100, dpi=dpi, units="mm")
# Remove the bottom (empty) half of the figure
if circle:
img = Image.open(outfile)
size = img.size[0]
spacer = 50
img.crop((0, 0, size, size / 2 + spacer)).save(outfile)
```
#### File: gimmemotifs/gimmemotifs/shutils.py
```python
import os
import subprocess as sp
def which(fname):
"""Find location of executable."""
if "PATH" not in os.environ or not os.environ["PATH"]:
path = os.defpath
else:
path = os.environ["PATH"]
for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]:
p = os.path.abspath(p)
if os.access(p, os.X_OK) and not os.path.isdir(p):
return p
p = sp.Popen("locate %s" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
(stdout, stderr) = p.communicate()
if not stderr:
for p in stdout.decode().split("\n"):
if (
(os.path.basename(p) == fname)
and (os.access(p, os.X_OK))
and (not os.path.isdir(p))
):
return p
def find_by_ext(dirname, ext):
"""Find all files in a directory by extension."""
# Get all fasta-files
try:
files = os.listdir(dirname)
except OSError:
if os.path.exists(dirname):
cmd = 'find {0} -maxdepth 1 -name "*"'.format(dirname)
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, _stderr = p.communicate()
files = [os.path.basename(fname) for fname in stdout.decode().splitlines()]
else:
raise
retfiles = [
os.path.join(dirname, fname)
for fname in files
if os.path.splitext(fname)[-1] in ext
]
return retfiles
```
#### File: gimmemotifs/tools/dreme.py
```python
from .motifprogram import MotifProgram
import os
from subprocess import Popen, PIPE
from gimmemotifs.motif import read_motifs
class Dreme(MotifProgram):
"""
Predict motifs using DREME.
Reference: Bailey, 2011, https://doi.org/10.1093/bioinformatics/btr261
"""
def __init__(self):
self.name = "DREME"
self.cmd = "dreme-py3"
self.use_width = True
def _run_program(self, bin, fastafile, params=None):
"""
DREME Run and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
default_params = {"single": False, "number": 10}
if params is not None:
default_params.update(params)
outfile = os.path.join(self.tmpdir, "dreme.txt")
strand = " -norc "
number = default_params["number"]
cmd = [bin, "-p", fastafile, "-m", "%s" % number, "-oc", self.tmpdir]
if default_params["background"]:
cmd += ["-n", default_params["background"]]
if default_params["single"]:
cmd.append(strand)
p = Popen(cmd, bufsize=1, stderr=PIPE, stdout=PIPE)
stdout, stderr = p.communicate()
motifs = read_motifs(outfile, fmt="meme")
for motif in motifs:
motif.id = self.name + "_" + motif.id
return motifs, stdout, stderr
```
#### File: gimmemotifs/tools/posmo.py
```python
from .motifprogram import MotifProgram
import os
import shutil
from subprocess import Popen, PIPE
from gimmemotifs.motif import Motif
class Posmo(MotifProgram):
"""
Predict motifs using Posmo.
Reference:
"""
def __init__(self):
self.name = "Posmo"
self.cmd = "posmo"
self.use_width = True
def _run_program(self, bin, fastafile, params=None):
"""
Run Posmo and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
default_params = {}
if params is not None:
default_params.update(params)
width = params.get("width", 8)
basename = "posmo_in.fa"
new_file = os.path.join(self.tmpdir, basename)
shutil.copy(fastafile, new_file)
fastafile = new_file
# pfmfile = fastafile + ".pwm"
motifs = []
current_path = os.getcwd()
os.chdir(self.tmpdir)
for n_ones in range(4, min(width, 11), 2):
x = "1" * n_ones
outfile = "%s.%s.out" % (fastafile, x)
cmd = "%s 5000 %s %s 1.6 2.5 %s 200" % (bin, x, fastafile, width)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode()
stderr = stderr.decode()
context_file = fastafile.replace(
basename, "context.%s.%s.txt" % (basename, x)
)
cmd = "%s %s %s simi.txt 0.88 10 2 10" % (
bin.replace("posmo", "clusterwd"),
context_file,
outfile,
)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
stdout += out.decode()
stderr += err.decode()
if os.path.exists(outfile):
with open(outfile) as f:
motifs += self.parse(f, width, n_ones)
os.chdir(current_path)
return motifs, stdout, stderr
def parse(self, fo, width, seed=None):
"""
Convert Posmo output to motifs
Parameters
----------
fo : file-like
File object containing Posmo output.
Returns
-------
motifs : list
List of Motif instances.
"""
motifs = []
lines = [fo.readline() for x in range(6)]
while lines[0]:
matrix = [
[float(x) for x in line.strip().split("\t")] for line in lines[2:]
]
matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))]
m = Motif(matrix)
m.trim(0.1)
m.id = lines[0].strip().split(" ")[-1]
motifs.append(m)
lines = [fo.readline() for x in range(6)]
for i, motif in enumerate(motifs):
if seed:
motif.id = "%s_w%s.%s_%s" % (self.name, width, seed, i + 1)
else:
motif.id = "%s_w%s_%s" % (self.name, width, i + 1)
motif.trim(0.25)
return motifs
```
#### File: gimmemotifs/tools/prosampler.py
```python
from .motifprogram import MotifProgram
import os
from subprocess import Popen, PIPE
class ProSampler(MotifProgram):
"""
Predict motifs using ProSampler.
Reference: Li et al., 2019, doi: 10.1093/bioinformatics/btz290
"""
def __init__(self):
self.name = "ProSampler"
self.cmd = "ProSampler"
self.use_width = False
self.default_params = {"single": False, "background": None}
def _parse_params(self, params=None):
"""
Parse parameters.
Combine default and user-defined parameters.
"""
prm = super()._parse_params(params, needs_background=True)
prm["strand"] = " -p 2 "
if prm["single"]:
prm["strand"] = " -p 1 "
return prm
def _run_program(self, bin, fastafile, params=None):
"""
Run ProSampler and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
params = self._parse_params(params)
outfile = os.path.join(self.tmpdir, "ProSampler.meme")
stdout = ""
stderr = ""
cmd = "%s -i %s -b %s -o %s %s" % (
bin,
fastafile,
params["background"],
os.path.join(self.tmpdir, "ProSampler"),
params["strand"],
)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir)
out, err = p.communicate()
stdout += out.decode()
stderr += err.decode()
motifs, stdout, stderr = self._read_and_label_motifs(
outfile, stdout, stderr, fmt="meme"
)
return motifs, stdout, stderr
```
#### File: gimmemotifs/tools/trawler.py
```python
from .motifprogram import MotifProgram
import glob
import os
import shutil
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from gimmemotifs.motif import read_motifs
class Trawler(MotifProgram):
"""
Predict motifs using Trawler.
Reference: Ettwiller, 2010; PMID: 17589518
"""
def __init__(self):
self.name = "trawler"
self.cmd = "trawler"
self.use_width = False
self.default_params = {"single": False, "background": None}
def _parse_params(self, params=None):
"""
Parse parameters.
Combine default and user-defined parameters.
"""
prm = super().parse_params(params)
prm["strand"] = "double"
if prm["single"]:
prm["strand"] = "single"
return prm
def _run_program(self, bin, fastafile, params=None):
"""
Run Trawler and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
params = self._parse_params(params)
tmp = NamedTemporaryFile(mode="w", dir=self.tmpdir, delete=False)
shutil.copy(fastafile, tmp.name)
fastafile = tmp.name
current_path = os.getcwd()
os.chdir(self.dir())
motifs = []
stdout = ""
stderr = ""
for wildcard in [0, 1, 2]:
cmd = (
"%s -sample %s -background %s -directory %s -strand %s -wildcard %s"
% (
bin,
fastafile,
params["background"],
self.tmpdir,
params["strand"],
wildcard,
)
)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
stdout += out.decode()
stderr += err.decode()
os.chdir(current_path)
pfmfiles = glob.glob("{}/tmp*/result/*pwm".format(self.tmpdir))
if len(pfmfiles) > 0:
out_file = pfmfiles[0]
stdout += "\nOutfile: {}".format(out_file)
my_motifs = []
if os.path.exists(out_file):
my_motifs = read_motifs(out_file, fmt="pwm")
for m in motifs:
m.id = "{}_{}".format(self.name, m.id)
stdout += "\nTrawler: {} motifs".format(len(motifs))
# remove temporary files
if os.path.exists(tmp.name):
os.unlink(tmp.name)
for motif in my_motifs:
motif.id = "{}_{}_{}".format(self.name, wildcard, motif.id)
motifs += my_motifs
else:
stderr += "\nNo outfile found"
return motifs, stdout, stderr
```
#### File: gimmemotifs/tools/xxmotif.py
```python
from .motifprogram import MotifProgram
import os
from subprocess import Popen, PIPE
class XXmotif(MotifProgram):
"""
Predict motifs using XXmotif.
Reference:
"""
def __init__(self):
self.name = "XXmotif"
self.cmd = "XXmotif"
self.use_width = False
self.default_params = {
"single": False,
"background": None,
"analysis": "medium",
"number": 5,
"width": 10,
}
def _parse_params(self, params=None):
"""
Parse parameters.
Combine default and user-defined parameters.
"""
prm = super()._parse_params(params)
if prm["background"]:
# Absolute path, just to be sure
prm["background"] = " --negSet {0} ".format(prm["background"])
prm["strand"] = ""
if not prm["single"]:
prm["strand"] = " --revcomp "
return prm
def _run_program(self, bin, fastafile, params=None):
"""
Run XXmotif and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
params = self._parse_params(params)
outfile = os.path.join(
self.tmpdir, os.path.basename(fastafile.replace(".fa", ".pwm"))
)
stdout = ""
stderr = ""
cmd = "%s %s %s --localization --batch %s %s" % (
bin,
self.tmpdir,
fastafile,
params["background"],
params["strand"],
)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
stdout += out.decode()
stderr += err.decode()
motifs, stdout, stderr = self._read_and_label_motifs(
outfile, stdout, stderr, fmt="xxmotif"
)
return motifs, stdout, stderr
```
#### File: gimmemotifs/gimmemotifs/utils.py
```python
from __future__ import print_function
# Python imports
import os
import re
import sys
import hashlib
import logging
import mmap
import random
import six
import tempfile
import requests
from subprocess import Popen
from tempfile import NamedTemporaryFile
from shutil import copyfile
# External imports
from scipy import special
import numpy as np
import pybedtools
from genomepy import Genome
# gimme imports
from gimmemotifs.fasta import Fasta
from gimmemotifs.plot import plot_histogram
from gimmemotifs.rocmetrics import ks_pvalue
from gimmemotifs.config import MotifConfig
logger = logging.getLogger("gimme.utils")
# pylint: disable=no-member
lgam = special.gammaln
def rc(seq):
""" Return reverse complement of sequence """
d = str.maketrans("actgACTG", "tgacTGAC")
return seq[::-1].translate(d)
def narrowpeak_to_bed(inputfile, bedfile, size=0):
"""Convert narrowPeak file to BED file.
"""
p = re.compile(r"^(#|track|browser)")
warn_no_summit = True
with open(bedfile, "w") as f_out:
with open(inputfile) as f_in:
for line in f_in:
if p.search(line):
continue
vals = line.strip().split("\t")
start, end = int(vals[1]), int(vals[2])
if size > 0:
summit = int(vals[9])
if summit == -1:
if warn_no_summit:
logger.warn(
"No summit present in narrowPeak file, "
"using the peak center."
)
warn_no_summit = False
summit = (end - start) // 2
start = start + summit - (size // 2)
end = start + size
f_out.write("{}\t{}\t{}\t{}\n".format(vals[0], start, end, vals[6]))
def pfmfile_location(infile):
config = MotifConfig()
if infile is None:
infile = config.get_default_params().get("motif_db", None)
if infile is None:
raise ValueError(
"No motif file was given and no default "
"database specified in the config file."
)
if isinstance(infile, six.string_types):
if not os.path.exists(infile):
motif_dir = config.get_motif_dir()
checkfile = os.path.join(motif_dir, infile)
if os.path.exists(checkfile):
infile = checkfile
else:
for ext in [".pfm", ".pwm"]:
if os.path.exists(checkfile + ext):
infile = checkfile + ext
break
if not os.path.exists(infile):
raise ValueError("Motif file {} not found".format(infile))
return infile
def get_jaspar_motif_info(motif_id):
query_url = "http://jaspar.genereg.net/api/v1/matrix/{}?format=json"
result = requests.get(query_url.format(motif_id))
if not result.ok:
result.raise_for_status()
sys.exit()
return result.json()
def phyper_single(k, good, bad, N):
return np.exp(
lgam(good + 1)
- lgam(good - k + 1)
- lgam(k + 1)
+ lgam(bad + 1)
- lgam(bad - N + k + 1)
- lgam(N - k + 1)
- lgam(bad + good + 1)
+ lgam(bad + good - N + 1)
+ lgam(N + 1)
)
def phyper(k, good, bad, N):
""" Current hypergeometric implementation in scipy is broken,
so here's the correct version.
"""
pvalues = [phyper_single(x, good, bad, N) for x in range(k + 1, N + 1)]
return np.sum(pvalues)
def divide_file(fname, sample, rest, fraction, abs_max):
with open(fname) as f:
lines = f.readlines()
# random.seed()
random.shuffle(lines)
x = int(fraction * len(lines))
if x > abs_max:
x = abs_max
tmp = tempfile.NamedTemporaryFile(mode="w", delete=False)
# Fraction as sample
for line in lines[:x]:
tmp.write(line)
tmp.flush()
# Make sure it is sorted for tools that use this information (MDmodule)
stdout, stderr = Popen(
"sort -k4gr %s > %s" % (tmp.name, sample), shell=True
).communicate()
tmp.close()
if stderr:
print("Something went wrong.\nstdout: {}\nstderr; {}".format(stdout, stderr))
sys.exit()
# Rest
f = open(rest, "w")
for line in lines[x:]:
f.write(line)
f.close()
# if os.path.exists(tmp.name):
# os.unlink(tmp.name)
return x, len(lines[x:])
def divide_fa_file(fname, sample, rest, fraction, abs_max):
fa = Fasta(fname)
ids = fa.ids[:]
x = int(fraction * len(ids))
if x > abs_max:
x = abs_max
sample_seqs = random.sample(ids, x)
# Rest
f_sample = open(sample, "w")
f_rest = open(rest, "w")
for name, seq in fa.items():
if name in sample_seqs:
f_sample.write(">%s\n%s\n" % (name, seq))
else:
f_rest.write(">%s\n%s\n" % (name, seq))
f_sample.close()
f_rest.close()
return x, len(ids[x:])
def write_equalsize_bedfile(bedfile, size, outfile):
"""Read input from <bedfile>, set the size of all entries to <size> and
write the result to <outfile>.
Input file needs to be in BED or WIG format."""
if size <= 0:
copyfile(bedfile, outfile)
BUFSIZE = 10000
f = open(bedfile)
out = open(outfile, "w")
lines = f.readlines(BUFSIZE)
line_count = 0
while lines:
for line in lines:
line_count += 1
if (
not line.startswith("#")
and not line.startswith("track")
and not line.startswith("browser")
):
vals = line.strip().split("\t")
try:
start, end = int(vals[1]), int(vals[2])
except ValueError:
print(
"Error on line %s while reading %s. "
"Is the file in BED or WIG format?" % (line_count, bedfile)
)
sys.exit(1)
start = (start + end) // 2 - (size // 2)
# This shifts the center, but ensures the size is identical...
# maybe not ideal
if start < 0:
start = 0
end = start + size
# Keep all the other information in the bedfile if it's there
if len(vals) > 3:
out.write(
"%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:]))
)
else:
out.write("%s\t%s\t%s\n" % (vals[0], start, end))
lines = f.readlines(BUFSIZE)
out.close()
f.close()
def median_bed_len(bedfile):
f = open(bedfile)
lengths = []
for i, line in enumerate(f.readlines()):
if not (line.startswith("browser") or line.startswith("track")):
vals = line.split("\t")
try:
lengths.append(int(vals[2]) - int(vals[1]))
except ValueError:
sys.stderr.write(
"Error in line %s: "
"coordinates in column 2 and 3 need to be integers!\n" % (i)
)
sys.exit(1)
f.close()
return np.median(lengths)
def motif_localization(fastafile, motif, size, outfile, cutoff=0.9):
NR_HIST_MATCHES = 100
matches = motif.pwm_scan(Fasta(fastafile), cutoff=cutoff, nreport=NR_HIST_MATCHES)
if len(matches) > 0:
ar = []
for a in matches.values():
ar += a
matches = np.array(ar)
p = ks_pvalue(matches, size - len(motif))
plot_histogram(
matches - size / 2 + len(motif) / 2,
outfile,
xrange=(-size / 2, size / 2),
breaks=21,
title="%s (p=%0.2e)" % (motif.id, p),
xlabel="Position",
)
return motif.id, p
else:
return motif.id, 1.0
def parse_cutoff(motifs, cutoff, default=0.9):
""" Provide either a file with one cutoff per motif or a single cutoff
returns a hash with motif id as key and cutoff as value
"""
cutoffs = {}
if os.path.isfile(str(cutoff)):
for i, line in enumerate(open(cutoff)):
if line != "Motif\tScore\tCutoff\n":
try:
motif, _, c = line.strip().split("\t")
c = float(c)
cutoffs[motif] = c
except Exception as e:
sys.stderr.write(
"Error parsing cutoff file, line {0}: {1}\n".format(e, i + 1)
)
sys.exit(1)
else:
for motif in motifs:
cutoffs[motif.id] = float(cutoff)
for motif in motifs:
if motif.id not in cutoffs:
sys.stderr.write(
"No cutoff found for {0}, using default {1}\n".format(motif.id, default)
)
cutoffs[motif.id] = default
return cutoffs
def _treesort(order, nodeorder, nodecounts, tree):
# From the Pycluster library, <NAME>
# Find the order of the nodes consistent with the hierarchical clustering
# tree, taking into account the preferred order of nodes.
nNodes = len(tree)
nElements = nNodes + 1
neworder = np.zeros(nElements)
clusterids = np.arange(nElements)
for i in range(nNodes):
i1 = tree[i].left
i2 = tree[i].right
if i1 < 0:
order1 = nodeorder[-i1 - 1]
count1 = nodecounts[-i1 - 1]
else:
order1 = order[i1]
count1 = 1
if i2 < 0:
order2 = nodeorder[-i2 - 1]
count2 = nodecounts[-i2 - 1]
else:
order2 = order[i2]
count2 = 1
# If order1 and order2 are equal, their order is determined
# by the order in which they were clustered
if i1 < i2:
if order1 < order2:
increase = count1
else:
increase = count2
for j in range(nElements):
clusterid = clusterids[j]
if clusterid == i1 and order1 >= order2:
neworder[j] += increase
if clusterid == i2 and order1 < order2:
neworder[j] += increase
if clusterid == i1 or clusterid == i2:
clusterids[j] = -i - 1
else:
if order1 <= order2:
increase = count1
else:
increase = count2
for j in range(nElements):
clusterid = clusterids[j]
if clusterid == i1 and order1 > order2:
neworder[j] += increase
if clusterid == i2 and order1 <= order2:
neworder[j] += increase
if clusterid == i1 or clusterid == i2:
clusterids[j] = -i - 1
return np.argsort(neworder)
def number_of_seqs_in_file(fname):
try:
fa = Fasta(fname)
return len(fa)
except Exception:
pass
try:
bed = pybedtools.BedTool(fname)
return len([x for x in bed])
except Exception:
pass
sys.stderr.write("unknown filetype {}\n".format(fname))
sys.exit(1)
def determine_file_type(fname):
"""
Detect file type.
The following file types are supported:
BED, narrowPeak, FASTA, list of chr:start-end regions
If the extension is bed, fa, fasta or narrowPeak, we will believe this
without checking!
Parameters
----------
fname : str
File name.
Returns
-------
filetype : str
Filename in lower-case.
"""
if not (isinstance(fname, str)):
raise ValueError("{} is not a file name!", fname)
if not os.path.isfile(fname):
raise ValueError("{} is not a file!", fname)
ext = os.path.splitext(fname)[1].lower()
if ext in [".bed"]:
return "bed"
elif ext in [".fa", ".fasta"]:
return "fasta"
elif ext in [".narrowpeak"]:
return "narrowpeak"
try:
Fasta(fname)
return "fasta"
except Exception:
pass
# Read first line that is not a comment or an UCSC-specific line
p = re.compile(r"^(#|track|browser)")
with open(fname) as f:
for line in f.readlines():
line = line.strip()
if not p.search(line):
break
region_p = re.compile(r"^(.+):(\d+)-(\d+)$")
if region_p.search(line):
return "region"
else:
vals = line.split("\t")
if len(vals) >= 3:
try:
_, _ = int(vals[1]), int(vals[2])
except ValueError:
return "unknown"
if len(vals) == 10:
try:
_, _ = int(vals[4]), int(vals[9])
return "narrowpeak"
except ValueError:
# As far as I know there is no 10-column BED format
return "unknown"
return "bed"
# Catch-all
return "unknown"
def get_seqs_type(seqs):
"""
automagically determine input type
the following types are detected:
- Fasta object
- FASTA file
- list of regions
- region file
- BED file
"""
region_p = re.compile(r"^(.+):(\d+)-(\d+)$")
if isinstance(seqs, Fasta):
return "fasta"
elif isinstance(seqs, list) or isinstance(seqs, np.ndarray):
if len(seqs) == 0:
raise ValueError("empty list of sequences to scan")
else:
if region_p.search(seqs[0]):
return "regions"
else:
raise ValueError("unknown region type")
elif isinstance(seqs, str):
if os.path.isfile(seqs):
ftype = determine_file_type(seqs)
if ftype == "unknown":
raise ValueError("unknown type")
elif ftype == "narrowpeak":
raise ValueError("narrowPeak not yet supported in this function")
else:
return ftype + "file"
else:
raise ValueError("no file found with name {}".format(seqs))
else:
raise ValueError("unknown type {}".format(type(seqs).__name__))
def as_fasta(seqs, genome=None):
ftype = get_seqs_type(seqs)
if ftype == "fasta":
return seqs
elif ftype == "fastafile":
return Fasta(seqs)
else:
if genome is None:
raise ValueError("need genome to convert to FASTA")
tmpfa = NamedTemporaryFile()
if isinstance(genome, str):
genome = Genome(genome)
if isinstance(seqs, np.ndarray):
seqs = list(seqs)
genome.track2fasta(seqs, tmpfa.name)
return Fasta(tmpfa.name)
def file_checksum(fname):
"""Return md5 checksum of file.
Note: only works for files < 4GB.
Parameters
----------
filename : str
File used to calculate checksum.
Returns
-------
checkum : str
"""
size = os.path.getsize(fname)
with open(fname, "r+") as f:
checksum = hashlib.md5(mmap.mmap(f.fileno(), size)).hexdigest()
return checksum
def join_max(a, l, sep="", suffix=""):
lengths = [len(x) for x in a]
total = 0
for i, size in enumerate(lengths + [0]):
if total > (l - len(suffix)):
return sep.join(a[: i - 1]) + suffix
if i > 0:
total += 1
total += size
return sep.join(a)
def check_genome(genome):
"""Check if genome is a valid FASTA file or genomepy genome genome.
Parameters
----------
genome : str
Genome name or file to check.
Returns
-------
is_genome : bool
"""
try:
Genome(genome)
return True
except Exception:
pass
return False
```
|
{
"source": "jgasteiz/fuzzingtheweb",
"score": 2
}
|
#### File: fuzzingtheweb/blogadmin/forms.py
```python
from django import forms
from markitup.widgets import AdminMarkItUpWidget
from blog.models import Post
class BaseInput(forms.Widget):
class Meta:
abstract = True
class BaseModelForm(forms.ModelForm):
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super(BaseModelForm, self).__init__(*args, **kwargs)
self.set_required_fields()
self.set_markitup_fields()
def set_required_fields(self):
""" Sets the 'required' attribute to 'true' on necessary form fields """
for field in self.fields:
if self.fields[field].required:
self.fields[field].widget.attrs['required'] = 'true'
def set_markitup_fields(self):
for field in self.fields:
if type(self.fields[field].widget) == forms.Textarea:
self.fields[field].widget.attrs['class'] = 'markitup-editor'
class PostForm(BaseModelForm):
class Meta:
model = Post
```
#### File: fuzzingtheweb/blog/admin.py
```python
from django.contrib import admin
from markitup.widgets import AdminMarkItUpWidget
from .models import Post, NavItem, Widget, Tag, File
class PostAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'slug', 'published', 'live')
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'body':
kwargs['widget'] = AdminMarkItUpWidget()
return super(PostAdmin, self).formfield_for_dbfield(db_field, **kwargs)
class NavItemAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'url', 'weight')
class WidgetAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'weight')
class FileAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'upload_path', 'url')
readonly_fields = ('url',)
class TagAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
admin.site.register(NavItem, NavItemAdmin)
admin.site.register(Widget, WidgetAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(File, FileAdmin)
admin.site.register(Tag, TagAdmin)
```
#### File: fuzzingtheweb/blog/models.py
```python
from django.db import models
from datetime import datetime
from django.utils.timezone import utc
from blog.utils import uuslug as slugify
from django.utils.translation import ugettext_lazy as __
class PostManager(models.Manager):
def published(self):
return self.filter(
live=True,
page=False,
published__lte=datetime.utcnow().replace(tzinfo=utc))
class NavItem(models.Model):
""" Primary nav bar items """
name = models.CharField(__('Name'), blank=False, max_length=40)
url = models.CharField(__('Url'), blank=True, null=True, default='', max_length=240)
weight = models.IntegerField(default=0)
class Meta:
ordering = ('weight',)
def __unicode__(self):
return self.name
class Widget(models.Model):
""" Sidebar items """
name = models.CharField(__('Name'), blank=True, max_length=40)
body = models.TextField(__('Body'), blank=True)
weight = models.IntegerField(default=0)
class Meta:
ordering = ('weight',)
def __unicode__(self):
return self.name
class Tag(models.Model):
""" Tag item """
name = models.CharField(__('Name'), max_length=60)
def __unicode__(self):
return self.name
class File(models.Model):
""" File item """
title = models.CharField(__('Title'), blank=False, max_length=120)
upload_path = models.FileField(
__('File'),
blank=False,
upload_to='%Y/%m/%d',
default='',
help_text='Select a file to upload')
url = models.CharField(__('Url'), blank=True, max_length=240)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.url = self.upload_path.url
super(File, self).save(*args, **kwargs)
class Post(models.Model):
""" Blog entry items """
title = models.CharField(__('Title'), blank=False, max_length=120)
slug = models.SlugField(__('Slug'), blank=True, max_length=120)
body = models.TextField(__('Body'))
published = models.DateTimeField(
__('Publish Date'),
default=datetime.now,
help_text=__('Future-dated posts will only be published at the \
specified date and time.'))
live = models.BooleanField(
default=False,
help_text=__('If checked, won\'t be displayed in the public site.'))
page = models.BooleanField(
default=False,
help_text=__('If checked, this will be a page, not a blog post. It \
will be useful for "about" pages and so.'))
objects = PostManager()
mytags = models.ManyToManyField("Tag", blank=True, null=True)
class Meta:
ordering = ('-published',)
get_latest_by = ('published',)
verbose_name, verbose_name_plural = 'Blog Post', 'Blog Posts'
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title, instance=self)
super(Post, self).save(*args, **kwargs)
```
|
{
"source": "jgasteiz/home-consumption-dashboard",
"score": 2
}
|
#### File: management/commands/load_unit_rates.py
```python
from django.core.management.base import BaseCommand
from domain import unit_rates
class Command(BaseCommand):
help = "Load consumption"
def handle(self, *args, **kwargs):
unit_rates.load_unit_rates()
```
|
{
"source": "jgasteiz/javiman",
"score": 2
}
|
#### File: javiman/blog/models.py
```python
import datetime
from django.utils.text import slugify
from django.db import models
from markupfield.fields import MarkupField
class PostManager(models.Manager):
def get_queryset(self):
return super(PostManager, self).get_queryset()
def published(self):
return super(PostManager, self).get_queryset().filter(is_published=True)
class Post(models.Model):
title = models.CharField(max_length=128)
created = models.DateTimeField(null=True)
updated = models.DateTimeField(null=True)
slug = models.CharField(max_length=128, blank=True)
body = MarkupField(default_markup_type='markdown')
is_published = models.BooleanField(default=False)
objects = PostManager()
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
if not self.id and not self.created:
self.created = datetime.datetime.now()
self.updated = datetime.datetime.now()
super(Post, self).save(*args, **kwargs)
class Meta:
ordering = ('-created',)
class PhotoManager(models.Manager):
def get_queryset(self):
return super(PhotoManager, self).get_queryset()
def published(self):
return super(PhotoManager, self).get_queryset().filter(is_published=True)
class Photo(models.Model):
title = models.CharField(max_length=256, blank=True)
subtitle = models.CharField(max_length=256, blank=True)
url = models.CharField(max_length=256)
is_published = models.BooleanField(default=False)
order = models.IntegerField(default=0)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
objects = PhotoManager()
class Meta:
ordering = ('order', '-created',)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.pk:
for photo in Photo.objects.filter(order__gte=self.order):
photo.order += 1
photo.save()
super(Photo, self).save(*args, **kwargs)
def decrease_order_and_save(self):
self.order = max(0, self.order - 1)
# Get the photo with the same order as this photo and increase its order.
photo_qs = Photo.objects.filter(order=self.order)
if photo_qs.exists():
photo = photo_qs[0]
photo.order = self.order + 1
photo.save()
self.save()
def increase_order_and_save(self):
self.order += 1
# Get the photo with the same order as this photo and decrease its order.
photo_qs = Photo.objects.filter(order=self.order)
if photo_qs.exists():
photo = photo_qs[0]
photo.order = self.order - 1
photo.save()
self.save()
def get_flickr_preview(self):
"""
This will only work for jpeg images stored in flickr.
Given a url like:
//farm1.staticflickr.com/702/20638346971_1c14fc9bff_l.jpg,
this will return something like:
//farm1.staticflickr.com/702/20638346971_1c14fc9bff_m.jpg
"""
image_url = self.url[:-6]
return '{}_m.jpg'.format(image_url)
def get_640(self):
"""
This will only work for jpeg images stored in flickr.
Given a url like:
//farm1.staticflickr.com/702/20638346971_1c14fc9bff_l.jpg,
this will return something like:
//farm1.staticflickr.com/702/20638346971_1c14fc9bff_z.jpg
"""
image_url = self.url[:-6]
return '{}_z.jpg'.format(image_url)
```
#### File: cms/templatetags/form_tags.py
```python
from django import template
from django import forms
register = template.Library()
@register.filter(name='add_css_class')
def add_css_class(field, css_class):
return field.as_widget(attrs={'class': css_class})
@register.filter(name='is_checkbox')
def is_checkbox(field):
return field.field.widget.__class__ == forms.CheckboxInput
```
#### File: javiman/cms/views.py
```python
from django.core.urlresolvers import reverse, reverse_lazy
from django.shortcuts import redirect
from django.views.generic import CreateView, UpdateView, DeleteView, View, ListView
from google.appengine.api import users
from blog.models import Post, Photo
from cms.forms import PostForm, PhotoForm
class RestrictedAccessMixin(object):
def dispatch(self, *args, **kwargs):
if not users.is_current_user_admin():
login_url = '{}?next={}'.format(
reverse('login:djangae_login_redirect'),
reverse('cms:post_list')
)
return redirect(login_url)
return super(RestrictedAccessMixin, self).dispatch(*args, **kwargs)
def get_context_data(self, *args, **kwargs):
ctx = super(RestrictedAccessMixin, self).get_context_data(*args, **kwargs)
ctx['user_email'] = users.get_current_user()
return ctx
class PostListView(RestrictedAccessMixin, ListView):
model = Post
template_name = 'cms/post_list.html'
post_list = PostListView.as_view()
class NewPostView(RestrictedAccessMixin, CreateView):
form_class = PostForm
model = Post
success_url = reverse_lazy('cms:post_list')
template_name = 'cms/post_form.html'
new_post = NewPostView.as_view()
class UpdatePostView(RestrictedAccessMixin, UpdateView):
form_class = PostForm
model = Post
success_url = reverse_lazy('cms:post_list')
template_name = 'cms/post_form.html'
update_post = UpdatePostView.as_view()
class DeletePostView(RestrictedAccessMixin, DeleteView):
model = Post
success_url = reverse_lazy('cms:post_list')
template_name = 'cms/post_confirm_delete.html'
delete_post = DeletePostView.as_view()
class PhotoListView(RestrictedAccessMixin, ListView):
model = Photo
template_name = 'cms/photo_list.html'
photo_list = PhotoListView.as_view()
class NewPhotoView(RestrictedAccessMixin, CreateView):
form_class = PhotoForm
model = Photo
success_url = reverse_lazy('cms:photo_list')
template_name = 'cms/photo_form.html'
new_photo = NewPhotoView.as_view()
class UpdatePhotoView(RestrictedAccessMixin, UpdateView):
form_class = PhotoForm
model = Photo
success_url = reverse_lazy('cms:photo_list')
template_name = 'cms/photo_form.html'
update_photo = UpdatePhotoView.as_view()
class DeletePhotoView(RestrictedAccessMixin, DeleteView):
model = Photo
success_url = reverse_lazy('cms:photo_list')
template_name = 'cms/photo_confirm_delete.html'
delete_photo = DeletePhotoView.as_view()
class SetPhotoOrderView(RestrictedAccessMixin, View):
model = Photo
def post(self, request, *args, **kwargs):
photo = self.model.objects.get(pk=kwargs.get('pk'))
order_modifier = int(request.POST.get('order_modifier'))
if order_modifier > 0:
photo.increase_order_and_save()
else:
photo.decrease_order_and_save()
photo.save()
return redirect('cms:photo_list')
set_photo_order = SetPhotoOrderView.as_view()
```
|
{
"source": "jgasteiz/weather-pwa",
"score": 2
}
|
#### File: weatherservice/tests/factory.py
```python
import datetime
import random
import time
from random import choice
import factory
from django.utils import timezone
from faker import Faker
from faker.providers import BaseProvider
from weatherservice.controller import WeatherServiceController
from weatherservice.models import ForecastDataPoint, Location
class ActiveLocationFactory(factory.DjangoModelFactory):
class Meta:
model = Location
is_active = True
class LocationFactory(factory.DjangoModelFactory):
class Meta:
model = Location
is_active = False
class DailyForecastDataPointFactory(factory.DjangoModelFactory):
class Meta:
model = ForecastDataPoint
data_point_type = ForecastDataPoint.DAILY_FORECAST
datetime = factory.LazyAttribute(lambda a: timezone.now() + datetime.timedelta(days=1))
class HourlyForecastDataPointFactory(factory.DjangoModelFactory):
class Meta:
model = ForecastDataPoint
data_point_type = ForecastDataPoint.HOURLY_FORECAST
datetime = factory.LazyAttribute(lambda a: timezone.now() + datetime.timedelta(hours=1))
class CurrentConditionsForecastDataPointFactory(factory.DjangoModelFactory):
class Meta:
model = ForecastDataPoint
data_point_type = ForecastDataPoint.CURRENT_CONDITIONS
datetime = factory.LazyAttribute(lambda a: timezone.now() - datetime.timedelta(minutes=5))
class WeatherFakerProvider(BaseProvider):
def get_epoch_time(self):
return time.time() - random.randint(60, 3600)
def get_weather_text(self):
return choice(['Sunny', 'Fallout', 'Rainy', 'Smog', 'Thunderstorm'])
def get_accuweather_icon(self):
return choice([int(icon) for icon in WeatherServiceController.ACCUWEATHER_ICON_MAP.keys()])
def get_openweathermap_icon(self):
return choice([icon for icon in WeatherServiceController.OPENWEATHERMAP_ICON_MAP.keys()])
def get_temperature(self):
return float(random.randint(-40, 40))
class CurrentWeatherResponseFaker(object):
def get_current_weather_response(self):
faker = Faker()
faker.add_provider(WeatherFakerProvider)
return {
"weather": [
{
"main": faker.get_weather_text(),
"icon": faker.get_openweathermap_icon()
}
],
"main": {
"temp": faker.get_temperature(),
},
"dt": faker.get_epoch_time(),
}
def get_hourly_forecast_response(self):
pass
```
#### File: website/tests/api_tests.py
```python
import datetime
import pytest
import pytz
from django.conf import settings
from django.test import Client
from django.urls import reverse
from django.utils import timezone
from weatherservice.tests import factory
@pytest.mark.django_db
def test_forecast_api_basic():
active_location = factory.ActiveLocationFactory(name='London, UK')
# Create a few FactoryDataPoints
factory.CurrentConditionsForecastDataPointFactory(
location=active_location,
temperature=20
)
factory.HourlyForecastDataPointFactory(
location=active_location,
temperature=21
)
factory.DailyForecastDataPointFactory(
location=active_location,
temperature=22
)
client = Client()
url = reverse('api_forecast')
response = client.get(url)
assert response.status_code == 200
# Expect a json with the three data points.
current_conditions_json = response.json().get('current_conditions')
hour_forecast_json = response.json().get('hourly_forecast')
day_forecast_json = response.json().get('daily_forecast')
assert len(day_forecast_json) == 1
assert len(hour_forecast_json) == 1
assert current_conditions_json.get('temperature') == 20.0
assert current_conditions_json.get('location_name') == 'London, UK'
assert hour_forecast_json[0].get('temperature') == 21.0
assert hour_forecast_json[0].get('location_name') == 'London, UK'
assert day_forecast_json[0].get('temperature') == 22.0
assert day_forecast_json[0].get('location_name') == 'London, UK'
@pytest.mark.django_db
def test_forecast_api_ignore_past_datapoints():
active_location = factory.ActiveLocationFactory(name='London, UK')
factory.HourlyForecastDataPointFactory(
location=active_location,
datetime=timezone.now() - datetime.timedelta(hours=2)
)
factory.DailyForecastDataPointFactory(
location=active_location,
datetime=timezone.now() - datetime.timedelta(days=2)
)
client = Client()
url = reverse('api_forecast')
response = client.get(url)
assert response.status_code == 200
# Expect a json with the three data points.
current_conditions_json = response.json().get('current_conditions')
hour_forecast_json = response.json().get('hourly_forecast')
day_forecast_json = response.json().get('daily_forecast')
assert current_conditions_json == {}
assert len(day_forecast_json) == 0
assert len(hour_forecast_json) == 0
@pytest.mark.django_db
def test_forecast_api_latest_current_conditions():
active_location = factory.ActiveLocationFactory(name='London, UK')
five_mins_ago = timezone.now() - datetime.timedelta(minutes=5)
factory.CurrentConditionsForecastDataPointFactory(
location=active_location,
datetime=timezone.now() - datetime.timedelta(minutes=10)
)
factory.CurrentConditionsForecastDataPointFactory(
location=active_location,
datetime=five_mins_ago
)
factory.CurrentConditionsForecastDataPointFactory(
location=active_location,
datetime=timezone.now() - datetime.timedelta(minutes=10)
)
client = Client()
url = reverse('api_forecast')
response = client.get(url)
assert response.status_code == 200
# Expect a json with the three data points.
current_conditions_json = response.json().get('current_conditions')
assert current_conditions_json.get('datapoint_time') == five_mins_ago.astimezone(pytz.timezone(settings.TIME_ZONE)).strftime('%B %d, %H:%M')
```
#### File: website/tests/view_tests.py
```python
from django.test import Client
from django.urls import reverse
def test_index():
client = Client()
url = reverse('index')
response = client.get(url)
assert response.status_code == 200
```
|
{
"source": "jgasthaus/antaresia",
"score": 3
}
|
#### File: antaresia/nlp/utils.py
```python
from collections import defaultdict
import numpy as np
def getNGramCounts(data, N):
"""Count the occurences of all N+1 grams in the
data. Outputs a dictionary mapping histories of length N
to a histogram dictionary."""
counts = defaultdict(dict)
for i in xrange(N,len(data)):
history = tuple(data[i-N:i])
obs = data[i]
if obs in counts[history]:
counts[history][obs] += 1
else:
counts[history][obs] = 1
return counts
def generateContextSymbolPairs(input, n):
for i in xrange(n,len(input)):
yield (input[i-n:i], input[i])
def computeLogLoss(predictor, data, maxContextLength=1000):
N = len(data)
probabilities = np.zeros(N)
for i in xrange(N):
probabilities[i] = predictor(tuple(data[max(0,i-maxContextLength):i]), data[i])
losses = -np.log(probabilities)/np.log(2)
logloss = np.mean(losses)
perplexity = 2**logloss
return logloss, perplexity, probabilities
```
|
{
"source": "jgasthaus/gpu_python",
"score": 2
}
|
#### File: jgasthaus/gpu_python/inference.py
```python
from numpy import *
from numpy.random import rand, random_sample
from scipy.maxentropy.maxentutils import logsumexp
import logging
import time
from pylab import * # DEBUG
import sys
from utils import *
from model import *
### RESAMPLING SCHEMES
def multinomial_resampling(weights):
"""Multinomial resampling of the given weights. The counts for each class
are simply drawn from a multinomial distribution with the given weights.
"""
return counts_to_index(rmultinomial(weights,len(weights)))
def residual_resampling(weights):
"""Residual resampling. The counts in each bin are floor(w*N) + N' where
N' is sampled from a multinomial with the residual weights."""
N = weights.shape[0]
counts = floor(weights*N)
R = int(sum(counts))
new_weights = (weights*N - counts)/(N-R)
counts += rmultinomial(new_weights,N-R)
return counts_to_index(array(counts,dtype=int32))
def stratified_resampling(weights):
N = weights.shape[0]
# obtain u_i drawn from U(i/N,(i+1)/N)
us = 1./N*arange(N) + 1./N*rand(N)
return inverseCDF(cumsum(weights),us)
def systematic_resampling(weights):
N = weights.shape[0]
u = 1./N*rand(N)
us = arange(N,dtype=double)/N+u
return inverseCDF(cumsum(weights),us)
class InferenceParams(object):
def __init__(self,rho,alpha,p_uniform_deletion,r_abs):
self.rho = rho
self.alpha = alpha
self.p_uniform_deletion = p_uniform_deletion
self.r_abs = r_abs
def __str__(self):
out = []
out.append('Inference parameters:')
out.append('rho: ' + str(self.rho))
out.append('alpha: ' + str(self.alpha))
out.append('p_uniform_deletion: ' + str(self.p_uniform_deletion))
out.append('r_abs: ' + str(self.r_abs))
return '\n'.join(out)
class Inference:
pass
class ParticleFilter(Inference):
def __init__(self,model,data,data_time,params,num_particles,
storage_class=FixedSizeStoreRing,
max_clusters = 100,
resample_fun=multinomial_resampling,
before_resampling_callback=noop):
self.model = model
self.data = data
self.data_time = data_time
self.num_particles = num_particles
self.resample_fun = resample_fun
self.params = params
self.before_resampling_callback = before_resampling_callback
self.T = data.shape[1]
self.particles = empty(num_particles,dtype=object)
for i in range(num_particles):
self.particles[i] = Particle(self.T,None,storage_class,max_clusters)
self.weights = ones(num_particles)/float(num_particles)
self.effective_sample_size = zeros(self.T)
self.filtering_entropy = zeros(self.T)
self.current_entropy = zeros(num_particles)
self.unique_particles = zeros(self.T,dtype=uint32)
self.__check()
def __check(self):
"""Check whether dimensions of parameters and data are consistent."""
if not len(self.data.shape) == 2:
raise ValueError, \
"Data should be a 2D array with data points as columns!"
if not self.model.dims == self.data.shape[0]:
raise ValueError, "Model dimension does not match data dimension: "+\
str(self.model.dims) + " != " + str(self.data.shape[0])
def run(self):
for t in range(self.T):
start_t = time.time()
logging.info('t = ' + str(t) + '/' + str(self.T))
x = self.data[:,t]
tau = self.data_time[t]
# the probability under the prior is the same for all particles
p_prior = self.model.p_prior(x)
self.model.set_data(x);
# move particles forward
for n in range(self.num_particles):
p = self.particles[n]
# perform deletion step / compute new cluster sizes {{{
if t > 0:
p.mstore.copy(t-1,t)
p.lastspike.copy(t-1,t)
m = p.mstore.get_array(t)
old_zero = m == 0;
if rand() < self.params.p_uniform_deletion: # uniform deletion
# TODO: Speed this up by sampling only from surviving allocations
U = random_sample(p.c.shape);
# delete from alive allocations with prob. 1-p.rho
# We assume that for non-assigned x we have c<0
idx = logical_and(logical_and(U<1-self.params.rho,p.d>=t), p.c>=0)
else: # size-biased deletion
i = rdiscrete(m/float(sum(m)),1)
idx = logical_and(logical_and(p.c == i, p.d>=t), p.c >= 0)
p.d[idx] = t
# compute current alive cluster sizes p.m; TODO: vectorize this?
for k in range(p.K):
nm = sum(logical_and(p.c[0:t] == k,p.d[0:t]>t))
m[k] = nm
p.mstore.set(t,k,nm)
new_zero = m == 0;
died = logical_and(new_zero,logical_not(old_zero)).nonzero()[0]
for d in died:
p.deathtime[d] = t
else:
m = array([],dtype=int32)
### sample new labels for all data points in data {{{
# We use q(c_t|m,U,z) = p(z_k|U) x p(c|m) as proposal distribution,
# i.e. the product of the CRP and the probability of the data point under
# that class assignment with the current class parameters
# (or the prior if we generate a new class).
active_idx = m>0
active = where(active_idx)[0]
# number of clusters before we see new data
Kt = len(active)
# Generalized Poly Urn / CRP
p_crp = hstack((m[active_idx],self.params.alpha))
p_crp = p_crp/sum(p_crp)
# Vector for storing the likelihood values
p_lik = zeros(Kt+1);
# compute probability of data point under all old clusters
for i in range(Kt):
isi = self.data_time[t] - p.lastspike.get(t,active[i])
if isi < self.params.r_abs:
p_crp[i] = 0
p_lik[i] = self.model.p_likelihood(x,p.U.get(t-1,active[i]))
# likelihood for new cluster
p_lik[Kt] = p_prior
# propsal distribution: CRP x likelihood
q = p_crp * p_lik
# normalize to get a proper distribution
q = q / sum(q)
self.current_entropy[n] = entropy(q)
# sample a new label from the discrete distribution q
c = rdiscrete(q,1)[0]
Z_qc = p_crp[c]/q[c]
# update data structures if we propose a new cluster
if c == Kt:
# set birthtime of cluster K to the current time
p.birthtime[p.K] = t
active = hstack((active,p.K))
p.mstore.append(t,0)
p.lastspike.append(t,0)
# update number-of-clusters counts
p.K += 1
active_c = active[c]
p.mstore.set(t,active_c,p.mstore.get(t,active_c)+1)
# assign data point to cluster
p.c[t] = active_c
p.lastspike.set(t,active_c,self.data_time[t])
### sample parameters U for all alive clusters {{{
#
# This samples from q(U|...), for each of the three conditions:
# - new cluster created at this time step
# - sample from prior updated with the data from this time step
# - old cluster, and data assigned to it in this time step
# - sample from distribution given old value and new data
# - old cluster, but no new data assigned to it
# - sample from transition kernel
#
pU_U = ones(Kt)
qU_Uz = ones(Kt)
p.U.copy(t-1,t)
qU_z = 1
G0 = 1
p_ratio = 1
for i in range(len(active)): # for all active clusters
cabs = active[i]
if i >= Kt: # cluster newly created at this time step
new_params = self.model.sample_posterior()
p.U.append(t,new_params)
# compute probability of this sample for use in weight
qU_z = self.model.p_posterior(new_params)
# compute probability of this sample under G_0
G0 = self.model.p_prior_params(new_params)
else: # old cluster
if cabs == c: # new data associated with cluster at this time step
(new_params,p_ratio) = self.model.walk_with_data(
p.U.get(t,cabs),x)
else: # no new data
new_params = self.model.walk(p.U.get(t,cabs))
p.U.set(t,cabs,new_params)
# %%% compute incremental weight for this update step {{{
# %
# % The weight is computed from the following components:
# % - prod(Z_qc) -- the normalizer of q(c|m,...); first line of (9)
# % - prod(G0) -- G0(U); num. of third line of (9)
# % - prod(qU_z) -- q(U|{z|c=k}); denom. of third line of (9)
# % - prod(pU_U) -- p(U|U_old); num. of second line of (9)
# % - prod(qU_Uz)-- q(U|U_old,z); denom. of second line of (9)
#
# w_inc = prod(Z_qc).*prod(pU_U)./prod(qU_Uz).*prod(G0)./prod(qU_z);
# compute probability of current data point under new parameters
pz_U = self.model.p_likelihood(x,p.U.get(t,active_c))
w_inc = pz_U*Z_qc*G0*p_ratio/qU_z
# print pz_U,Z_qc,G0,p_ratio,qU_z
# print pz_U*G0/qU_z
# print w_inc
self.weights[n] *= w_inc
#
# if isnan(w_inc) % bad weight -- can happen if underflow occurs
# w_inc = 0; % discard the particle by giving it weight 0
# end
### resample
# normalize weights
self.weights = self.weights / sum(self.weights)
Neff = 1/sum(self.weights**2)
self.effective_sample_size[t] = Neff
self.filtering_entropy[t] = mean(self.current_entropy)
self.before_resampling_callback(self,t)
self.unique_particles[t] = self.num_particles
# resample if Neff too small or last time step
if (Neff < (self.num_particles / 2.)) or (t == self.T-1):
resampled_indices = self.resample_fun(self.weights)
self.unique_particles[t] = unique(resampled_indices).shape[0]
# assume weights are uniform after resampling
self.weights = 1./self.num_particles * ones(self.num_particles)
new_particles = empty(self.num_particles,dtype=object)
used = set()
for i in range(len(resampled_indices)):
j = resampled_indices[i]
if j in used:
new_particles[i] = self.particles[j].shallow_copy()
else:
new_particles[i] = self.particles[j]
used.add(j)
self.particles = new_particles
end_t = time.time()
elapsed = end_t - start_t
remaining = elapsed * (self.T-t)
finish_time = time.strftime("%a %H:%M:%S",
time.localtime(time.time()+remaining))
print "Status: %i/%i -- %.1f => %s" % (t,self.T,elapsed,finish_time)
sys.stdout.flush()
logging.info("One step required " + str(elapsed) + " seconds, " +
str(remaining) + " secs remaining.")
def get_labeling(self):
labeling = empty((self.num_particles,self.T),dtype=int32)
for p in range(self.num_particles):
labeling[p,:] = self.particles[p].c
return labeling
class GibbsSampler(Inference):
def __init__(self,data,data_time,params,model,state=None):
self.data = data
self.data_time = data_time
self.model = model
self.params = params
self.T = data.shape[1]
if state != None:
self.state = state
if state.T != self.T:
logging.error("State length does not match data length!")
else:
self.state = self.__init_state()
self.old_lnp = self.p_log_joint()
self.lnps = []
self.num_accepted = 0
self.num_rejected = 0
self.__init_debugging()
def __init_debugging(self):
pass
def __init_state(self):
"""Initialize the state of the Gibbs sampler."""
self.num_clusters = 1 # TODO
def p_log_joint(self,inc_walk=True,inc_likelihood=True,inc_death=True):
"""Compute the log-joint probability of the current state."""
state = self.state
ms = zeros_like(self.state.mstore)
lnp = 0
active = set()
for t in range(self.T):
# construct m up to time t
if t > 0:
ms[:,t] = ms[:,t-1]
ms[state.c[t],t] += 1
dying = where(state.d == t)[0]
for tau in dying:
ms[state.c[tau],t] -= 1
if inc_walk:
for k in where(ms[:,t]>0)[0]:
theta = self.state.U[k,t]
if t > 0 and ms[k,t-1]>0:
# old cluster that is still alive
# aux | previous theta
old_theta = self.state.U[k,t-1]
aux_vars = self.state.aux_vars[t-1,k,:,:]
lnp += self.model.kernel.p_log_aux_vars(old_theta,aux_vars)
# theta | aux
lnp += self.model.kernel.p_log_posterior(theta,aux_vars)
else:
# new cluster
# G0(theta)
lnp += self.model.p_log_prior_params(theta)
# c | m
# TODO: speed up computation of alive clusters
lnp += log(self.p_crp(t,ms,active))
active.add(state.c[t])
# x | c, theta
if inc_likelihood:
c = self.state.c[t]
theta = self.state.U[c,t]
lnp += sum(logpnorm(self.data[:,t],theta.mu,theta.lam))
# d_t
if inc_death:
lnp += self.p_log_deathtime(t)
# update mstore
# self.mstore = ms
return lnp
def p_log_joint_cs(self):
return self.p_log_joint(False,False,False)
def mh_sweep(self):
"""Do one MH sweep through the data, i.e. propose for all parameters
once."""
for t in range(self.T):
# propose new c_t
self.sample_label(t)
self.propose_death_time(t)
self.sample_params(t)
self.propose_auxs(t)
#print self.num_accepted + self.num_rejected
#print ("Acceptance rate: %.2f" %
# (self.num_accepted/float(self.num_accepted + self.num_rejected)))
def propose_c(self,t):
# propose from mean occupancy count (not symmetric!)
active = where(sum(self.mstore,1)>0)[0]
K = active.shape[0]
forward_probs = zeros(K+1)
forward_probs[0:K] = mean(self.mstore[active,:],1)
forward_probs[K] = self.params.alpha
forward_probs /= sum(forward_probs) # normalize
new_c = rdiscrete(forward_probs)
forward_lnq = log(forward_probs[new_c])
old_c = self.state.c[t]
if new_c == K:
# new cluster
self.active = hstack((active,self.state.free_labels.pop()))
self.state.c[t] = active[new_c]
# TODO need to sample new d as well ...
new_ms = self.state.reconstruct_mstore(self.state.c,self.state.d)
backward_probs = zeros(active.shape[0])
backward_probs[0:K] = mean(new_ms[active,:],1)
backward_probs[K] = self.params.alpha
backward_probs /= sum(backward_probs) # normalize
if mh_accept(backward_lnq - forward_lnq):
return
else:
self.c[t] = old_c
def mh_accept(self,q_ratio=0.):
"""Return true if the current state is to be accepted by the MH
algorithm and update self.old_lnp.
Params:
q_ratio -- the log of the ratio of the proposal
= log q(z|z*)- log q(z*|z)
= 0 if the proposal is symmetric
"""
lnp = self.p_log_joint()
A = min(1,exp(lnp - self.old_lnp + q_ratio))
if random_sample() < A:
# accept!
self.old_lnp = lnp
self.num_accepted += 1
return True
else:
# reject
self.num_rejected += 1
return False
def p_log_deathtime(self,t):
"""Compute the log probability of the death time of the allocation
at time step t."""
alive = self.state.d[t] - t - 1
return alive*log(self.params.rho) + log(1-self.params.rho)
def sweep(self):
"""Do one Gibbs sweep though the data."""
for t in range(self.T):
logging.info("t=%i/%i" % (t,self.T))
self.sample_label(t)
self.sample_death_time(t)
self.sample_aux_vars(t)
self.sample_params(t)
self.state.check_consistency(self.data_time)
raw_input()
def p_crp(self,t,ms,active):
"""Compute the conditional probability of the allocation at time
t given the table sizes m (and the spike times tau).
"""
if t == 0:
return 1
state = self.state
active = array(list(active))
num_active = active.shape[0]
p_crp = zeros(num_active+1)
p_crp[-1] = self.params.alpha
for i in range(num_active):
c = active[i]
if (self.data_time[t] - self.get_last_spike_time(c,t-1)
< self.params.r_abs):
p_crp[i] = 0
else:
p_crp[i] = ms[c,t-1]
p_crp = normalize(p_crp)
idx = where(active==self.state.c[t])[0]
if len(idx) > 0:
pos = idx[0]
else:
pos = num_active
return p_crp[pos]
def get_last_spike_time(self,c,t):
"""Returns the occurence time of the last spike associated with cluster c
before time t."""
return self.state.lastspike[c,t]
def propose_death_time(self,t):
log_joint_before = self.p_log_joint(False,False)
old_d = self.state.d[t]
new_d = t + 1 + rgeometric(self.params.rho)
if new_d > self.T:
new_d = self.T
self.state.d[t] = new_d
log_joint_after = self.p_log_joint(False,False,False)
A = min(1,exp(log_joint_after - log_joint_before))
if random_sample() < A:
# accept
# if we extended the life of the cluster, sample new params
logging.debug("Accepted new death time %i" % new_d)
self.num_accepted += 1
if new_d > self.state.deathtime[self.state.c[t]]:
self.sample_walk(
self.state.c[t],
self.state.deathtime[self.state.c[t]],
new_d
)
self.state.deathtime[self.state.c[t]] = max(
self.state.d[self.state.c == self.state.c[t]])
self.state.mstore = self.state.reconstruct_mstore(
self.state.c,
self.state.d)
#print self.state.mstore
else:
# reject
self.num_rejected += 1
self.state.d[t] = old_d
def sample_death_time(self,t):
"""Sample a new death time for the allocation variable at time t.
The posterior p(d_t|...) is proportional to p(c_(t:last)|d_t)p(d_t),
where p(d_t) is prior death time distribution (geometric) and p(c|d_t)
is the probability of the assignments to cluster c_t from the current
time step until the last allocation in that cluster dies.
"""
state = self.state
c = state.c[t]
mc = state.mstore[c,:].copy()
d_old = state.d[t]
length = self.T - t
# relative indices of assignments to this cluster
assignments = where(state.c[t:] == c)[0]
if assignments[0] != 0:
raise RuntimeError,"Something's wrong!"
assignments = assignments[1:]
# determine the last assignment made to this cluster (rel. to t)
last_assignment = assignments[-1]
dp = ones(length)
# find the last allocation that "depends" on this allocation being,
# i.e. without it mstore at that point would be 0.
# take out current allocation
mc[t:d_old] -= 1
dependencies = where(
logical_and(state.c[t:d_old] == c,
mc[t:d_old] == 1
))[0]
if len(dependencies)>0:
last_dep = dependencies[-1]
# the probability of deletion before last_dep is 0
dp[0:last_dep]=0
else:
last_dep = 0
possible_deaths = t+arange(last_dep+1,self.T-t+1)
p = self.p_labels_given_deathtime(t,possible_deaths)
dp[last_dep:self.T-t] = p
# The prior probability for d=t+1,...,T
prior = self.params.rho ** arange(0,length)*(1-self.params.rho)
prior[-1] = 1-sum(prior[0:-1])
q = dp * prior
q = q / sum(q)
dt = rdiscrete(q)
return dt + t + 1
def p_labels_given_deathtime(self,t,possible_deaths):
p1 = self.p_labels_given_deathtime_slow(t,possible_deaths)
p2 = self.p_labels_given_deathtime_fast(t,possible_deaths)
p1 = p1/sum(p1)
p2 = p2/sum(p2)
assert(all(p1==p2))
def p_labels_given_deathtime_slow(self,t,possible_deaths):
"""Compute the likelihood of the label at time t as a function of the
possible death times for that label.
"""
c = self.state.c[t]
d_old = self.state.d[t]
p = ones(possible_deaths.shape[0])
for i in range(possible_deaths.shape[0]):
d = possible_deaths[i]
# construct mstore for this situation
ms = self.state.mstore.copy()
ms[c,t:d_old] -= 1
ms[c,t+1:d] += 1
for tau in range(t+1,self.T):
p[i] *= self.p_crp(tau,ms[:,tau-1])
return p
def p_labels_given_deathtime_fast(self,t,possible_deaths):
"""Like the slow version, but compute the likelihood incrementally,
thus saving _a lot_ of computation time."""
c = self.state.c[t]
d_old = self.state.d[t]
last_dep = possible_deaths[0] - 1 # this should always be true
num_possible = self.T - last_dep
assert(num_possible==possible_deaths.shape[0])
# possible deaths always ranges from last_dep+1 to T (inclusive)
p = ones(possible_deaths.shape[0])
# first, compute the full solution for the first possible death time
ms = self.state.mstore.copy()
# ms[:,t-1] has to represent the state after allocation at time step
# t-1 and after deletion at time step t
# TODO: Do we have to compute this backwards?!
ms[c,last_dep:d_old] -= 1
for tau in range(last_dep+1,self.T):
p[0] *= self.p_crp(tau,ms[:,tau-1])
for i in range(1,num_possible-1):
d = i + last_dep + 1
print d
ms[c,d-1] +=1
if self.state.c[d] == c:
# numerator changed
p[i]=p[i-1]/(ms[c,d-1] - 1)*ms[c,d-1]
old = sum(ms[:,d-1]) + self.params.alpha
new = old + 1
Z = old/new
p[i] = p[i-1]*Z
# dying after the last allocation has the same probability a
p[-1] = p[-2]
return p
def sample_label(self,t):
"""Sample a new label for the data point at time t.
The conditional probability of p(c_t|rest) is proportional to
p(c_t|seating) x p(x_t|c_t)
TODO: Handle the case of singletons separately -- the is no point in
relabeling them.
"""
logging.debug("Sampling new label at time %i" % t)
state = self.state
c_old = state.c[t]
res = self.log_p_label_posterior_new(t)
possible, p_crp = res
num_possible = possible.shape[0]
p_lik = empty(num_possible+1,dtype=float64)
for i in range(num_possible):
p_lik[i] = self.model.p_log_likelihood(self.data[:,t],state.U[possible[i],t])
p_lik[num_possible] = self.model.p_log_prior(self.data[:,t])
q = p_crp + p_lik
q = exp(q - logsumexp(q))
# sample new label
choice = rdiscrete(q)
# map choice to actual label
if choice < num_possible:
c = possible[choice]
new_cluster = False
elif choice == num_possible:
if sum(state.c == c_old)==1:
# singleton, keep label
logging.debug("Keeping label for singleton %i" % c_old)
c = c_old
new_cluster = False
else:
c = self.get_free_label()
new_cluster = True
if c != c_old:
logging.debug("New label t=%i: %i=>%i" % (t,c_old,c))
state.c[t] = c
# update mstore
state.mstore[c_old,t:state.d[t]] -= 1
state.mstore[c,t:state.d[t]] += 1
# update birthtime
if new_cluster or (t < state.birthtime[c]):
state.birthtime[c] = t
if state.birthtime[c_old] == t:
assocs = where(state.c == c_old)[0]
if assocs.shape[0] > 0:
state.birthtime[c_old] = assocs[0]
else:
state.birthtime[c_old] = self.T
# update deathtime
if new_cluster:
state.deathtime[c] = state.d[t]
else:
state.deathtime[c] = max(state.deathtime[c],state.d[t])
# update lastspike
self.state.reconstruct_lastspike(self.data_time)
deaths_c_old = state.d[state.c==c_old]
if len(deaths_c_old)==0:
logging.debug("Cluster %i died, recycling label" % c_old)
# cluster died completely
state.deathtime[c_old] = self.T
self.add_free_label(c_old)
else:
state.deathtime[c_old] = max(deaths_c_old)
logging.debug("New deathtime for %i: %i"
% (c_old,state.deathtime[c_old]))
# sample parameters for new cluster
if new_cluster:
self.model.set_data(self.data[:,t])
self.state.U[self.state.c[t],t] = self.model.sample_posterior()
self.sample_walk(self.state.c[t],t+1,self.state.d[t])
self.sample_walk_backwards(self.state.c[t],t,0)
def propose_auxs(self,t):
active = self.get_active(t)
for c in active:
self.propose_aux(t,c)
def propose_aux(self,t,c):
"""Propose new values for the auxiliary variables after time t."""
# forward proposal
params = self.state.U[c,t]
old_aux = self.state.aux_vars[t,c,:,:]
new_aux = self.model.kernel.sample_aux(params)
# we can speed this up by only computing the joint for these params
# TODO: Compute A as p(new_params|new_aux)/p(new_params|old_aux)
if self.state.c[t] == c:
data = self.data[:,t]
else:
data = None
if t < self.T-1 and self.state.deathtime[c]!=t+1:
p_old = self.model.kernel.p_log_posterior(
self.state.U[c,t+1],
old_aux,
data)
p_new = self.model.kernel.p_log_posterior(
self.state.U[c,t+1],
new_aux,
data)
A = min(1,exp(p_new - p_old))
else:
A = 1.0
if random_sample() < A:
# accept!
self.num_accepted += 1
self.state.aux_vars[t,c,:,:] = new_aux
else:
# reject
self.num_rejected += 1
def sample_params(self,t):
"""Sample new parameters for the clusters at time t."""
active = where(sum(self.state.mstore,1)>0)[0]
for c in active:
if self.state.deathtime[c] > t:
self.sample_param(t,c)
def sample_param(self,t,c):
"""Sample new parameters for cluster c at time. The cluster may be
an old cluster or newly created. The auxiliary variables at this
time step have already been sampled."""
logging.debug("New parameter for cluster %i at time %i" % (c,t))
data = None
if self.state.c[t] == c:
# there is data associated with this cluster at this time step
data = self.data[:,t]
previous = zeros((self.model.dims,0))
next = zeros((self.model.dims,0))
if t > 0 and self.state.birthtime[self.state.c[t]] < t:
previous = self.state.aux_vars[t-1,c,:,:]
next = self.state.aux_vars[t,c,:,:]
aux_vars = hstack((previous,next))
self.state.U[c,t] = self.model.kernel.sample_posterior(
aux_vars,
data
)
def sample_walk(self,c,start,stop):
"""Sample new parameters from the walk for cluster c between time
steps start and stop. This is necessary if we extend the life of a
cluster by sampling a new death time.
"""
logging.debug("Sampling walk forward for %i: %i=>%i" % (c,start,stop))
for tau in range(start,stop):
self.state.aux_vars[tau-1,c,:,:] = self.model.kernel.sample_aux(
self.state.U[c,tau-1])
self.state.U[c,tau] = self.model.kernel.sample_posterior(
self.state.aux_vars[tau-1,c,:,:])
self.state.aux_vars[stop-1,c,:,:] = self.model.kernel.sample_aux(
self.state.U[c,stop-1])
def sample_walk_backwards(self,c,start,stop):
"""Sample backwards from walk starting at start-1 to stop (inclusive).
"""
logging.debug("Sampling walk backwards for %i: %i=>%i" % (c,start,stop))
for tau in reversed(range(stop,start)):
self.state.aux_vars[tau,c,:,:] = self.model.kernel.sample_aux(
self.state.U[c,tau+1])
self.state.U[c,tau] = self.model.kernel.sample_posterior(
self.state.aux_vars[tau,c,:,:])
def sample_aux_vars(self,t):
"""Sample the auxiliary variables at time step t."""
active = where(sum(self.state.mstore,1)>0)[0]
for c in active:
if self.deathtime[c] > t and self.state.birthtime[c] != t:
self.sample_aux_var(t,c)
def sample_aux_var(self,t,c):
"""Sample the auxiliary variable(s) for cluster c at time t.
We can assume that the cluster has existed at the previous time step.
"""
logging.debug("Sampling aux vars for cluster %i at time %i" % (c,t))
# FIXME: This is incorrect, as it does not take the future into account!
self.state.aux_vars[t,c,:,:] = self.model.kernel.sample_aux(
self.state.U[c,t-1])
def log_p_label_posterior_new(self,t):
"""Compute the conditional probability over allocation variables at
time t."""
state = self.state
d = min(state.d[t],state.T) # TODO: min needed?
possible = where(sum(state.mstore[:,t:d],1)>0)[0]
lnp = zeros(possible.shape[0]+1)
old_c = self.state.c[t]
for i in range(possible.shape[0]):
c = possible[i]
self.state.c[t] = c
lnp[i] = self.p_log_joint_cs()
self.state.c[t] = self.state.free_labels.pop()
lnp[possible.shape[0]] = self.p_log_joint_cs()
self.state.free_labels.append(self.state.c[t])
self.state.c[t] = old_c
# normalize
return (possible,lnp - logsumexp(lnp))
def log_p_label_posterior(self,t):
"""Compute the posterior probability over allocation variables given
all other allocation variables and death times.
Returns:
None if the allocation cannot be changed due to DCW
(possible,p) where possible is an array of cluster labels
the we can assign to, and p is an array of
the respective probabilities.
"""
# 2) temporarily remove the current allocation from the counts m
# 3) for each possible label:
# - temporarily assign to this cluster and update m
# - compute joint of seating arrangement up to d_t
state = self.state
ms = state.mstore.copy() # local working copy
c_old = state.c[t]
d = min(state.d[t],state.T) # TODO: min needed?
# remove from old cluster
ms[c_old,t:d] = ms[c_old,t:d] - 1
# Check for "dying customer's wish":
# If removing the current allocation causes the cluster to die, but
# data is assigned to it _after_ its death, then we can't move the
# allocation
tmp = where(ms[c_old,t:]==0)[0]
if tmp.shape[0] == 0:
new_death = state.T+1
else:
new_death = t + tmp[0]
if any(where(state.c==c_old)[0]>=new_death):
# dying customers wish
logging.debug("DCW at time %i, %i=>%i" %
(t,state.deathtime[c_old],new_death))
return None
# 1) Determine which clusters we can potentially assign to:
# - any cluster that is alive at any point from now until this
# alloc dies
possible = where(sum(state.mstore[:,t:d],1)>0)[0]
# remove allocation c_t from ms[c_t,t]
for tau in range(t+1,d):
ms[state.c[tau],tau] -= 1
p_crp = zeros(possible.shape[0],dtype=float64)
for i in range(possible.shape[0]):
ms_tmp = ms.copy()
c_new = possible[i]
# temporarily allocate to c_new
ms_tmp[c_new,t:d] +=1
if ms_tmp[c_new,t] > 0:
p_crp[i] = log(ms_tmp[c_new,t])
for tau in range(t+1,d):
if ms_tmp[c_new,tau] > 0:
p_crp[i] += log(ms_tmp[c_new,tau])
# The normalization constant (normalized such that the probability for
# starting a new cluster is alpha) is given by the product of mstore
# for t+1:d
Z = 0.
for tau in range(t+1,d):
if ms[state.c[tau],tau] > 0:
Z += log(ms[state.c[tau],tau])
return (possible,p_crp - logsumexp(p_crp))
def get_active(self,t):
"""Return a list of active clusters at time t."""
return where(self.state.mstore[:,t]>0)[0]
def add_free_label(self,label):
self.state.free_labels.append(label)
def get_free_label(self):
"""Return a label that is currently "free", i.e. can be used for
starting a new cluster."""
return self.state.free_labels.pop()
```
#### File: jgasthaus/gpu_python/model.py
```python
import numpy.random as R
from collections import deque
from utils import *
from numpy import *
class TransitionKernel(object):
def __init__(self,model,params):
self.params = params
self.model = model
def p_walk(self,old_mu,old_lam,mu,lam,tau=None):
raise NotImplementedError
def walk(self,params,tau=None,p_old=None):
raise NotImplementedError
def walk_with_data(self,params,data,tau=None):
"""Sample from the walk given some observersion. This fallback
implementation just samples from the walk ignoring the data."""
return (self.walk(params,tau),1)
def walk_backwards(self,params,tau=None):
return self.walk(params,tau)
class MetropolisWalk(TransitionKernel):
def walk(self,params,tau=None,p_old=None):
if p_old == None:
p_old = exp(self.model.p_log_prior_params(params));
# random walk on mean
n_mu = params.mu + self.params[0] * R.standard_normal(self.model.dims)
n_lam = params.lam + self.params[1] * R.standard_normal(self.model.dims)
# keep values that are about to become negative the same
if self.model.dims > 1:
idx = n_lam <= 0
n_lam[idx] = params.lam[idx]
else:
if n_lam <= 0:
n_lam = params.lam
# Metropolis update rule
new_params = self.model.get_storage(n_mu,n_lam)
p_new = exp(self.model.p_log_prior_params(new_params))
if R.rand() > p_new/p_old: # not accepted -> keep old values
new_params = params
return new_params
class CaronIndependent(TransitionKernel):
def __init__(self,model,params):
TransitionKernel.__init__(self,model,params)
self.num_aux = params[0]
self.rho = params[1]
self.D = model.params.mu0.shape[0]
n0 = self.model.params.n0
mu0 = self.model.params.mu0
alpha = self.model.params.a
beta = self.model.params.b
self.beta_up = n0/(2*(n0+1))
self.np = n0 + 1
self.mu_up1 = (n0*mu0)/self.np
self.mu_up2 = self.np * (alpha+0.5)
self.mu_up3 = 2*alpha + 1
self.gam_up = alpha+0.5
def walk(self,params,tau=None):
return self.__general_walk(params,data=None,tau=tau)
def __general_walk(self,params,data=None,tau=None):
"""General version of the random walk allowing for an arbitrary
number of auxiliary variables and/or data points.
"""
return self.sample_posterior(self.sample_aux(params,tau),data,tau)
def p_log_posterior(self,params,aux_vars,data=None):
n0 = self.model.params.n0
mu0 = self.model.params.mu0
alpha = self.model.params.a
beta = self.model.params.b
num_aux = aux_vars.shape[1]
if data != None:
N = num_aux + 1
nn = num_aux/self.rho + 1
else:
N = num_aux
nn = num_aux/self.rho
if data != None:
aux_vars = c_[aux_vars,data]
data_mean = mean(aux_vars,1)
# make data_mean a rank-2 D-by-1 array so we can use broadcasting
data_mean.shape = (data_mean.shape[0],1)
nvar = sum((aux_vars-data_mean)**2,1)
data_mean.shape = (data_mean.shape[0],)
mu_star = (n0*mu0 + nn*data_mean)/(n0+nn)
beta_star = beta + 0.5*nvar + (nn*n0*(mu0-data_mean)**2)/(2*(n0+nn))
p1 = sum(logpgamma(params.lam,alpha+0.5*nn,beta_star))
p2 = sum(logpnorm(params.mu,mu_star,(nn+n0)*params.lam))
return p1+p2
def p_posterior(self,params,aux_vars,data=None):
return exp(self.p_log_posterior(params,aux_vars,data))
def p_log_aux_vars(self,params,aux_vars):
mu = params.mu
lam = params.lam*self.rho
lnp = 0
for n in range(aux_vars.shape[1]):
lnp += sum(logpnorm(aux_vars[:,n],mu,lam))
return lnp
def sample_posterior(self,aux_vars,data=None,tau=None):
"""Sample from the posterior given the auxiliary variables and data."""
n0 = self.model.params.n0
mu0 = self.model.params.mu0
alpha = self.model.params.a
beta = self.model.params.b
num_aux = aux_vars.shape[1]
if data != None:
N = num_aux + 1
nn = num_aux/self.rho + 1
else:
N = num_aux
nn = num_aux/self.rho
if data != None:
aux_vars = c_[aux_vars,data]
data_mean = mean(aux_vars,1)
# make data_mean a rank-2 D-by-1 array so we can use broadcasting
data_mean.shape = (data_mean.shape[0],1)
nvar = sum((aux_vars-data_mean)**2,1)
data_mean.shape = (data_mean.shape[0],)
mu_star = (n0*mu0 + nn*data_mean)/(n0+nn)
beta_star = beta + 0.5*nvar + (nn*n0*(mu0-data_mean)**2)/(2*(n0+nn))
n_lam = rgamma(alpha+0.5*nn,beta_star)
n_mu = rnorm(mu_star,(nn+n0)*n_lam)
return self.model.get_storage(n_mu,n_lam)
def sample_aux(self,params,tau=None):
"""Sample auxiliary variables given the current state."""
return rnorm_many(params.mu,params.lam*self.rho,self.num_aux)
def walk_with_data(self,params,data,tau=None):
aux_vars = self.sample_aux(params,tau)
params = self.sample_posterior(aux_vars,data,tau)
p1 = self.p_posterior(params,aux_vars,None)
p2 = self.p_posterior(params,aux_vars,data)
return (params,p1/p2)
class Model(object):
pass
class DiagonalConjugate(Model):
def __init__(self,hyper_params,kernelClass=MetropolisWalk,
kernelParams=(0.1,0.001)):
self.params = hyper_params
self.dims = self.params.dims
self.empty = True
self.kernel = kernelClass(self,kernelParams)
self.walk = self.kernel.walk
self.walk_with_data = self.kernel.walk_with_data
def set_data(self,data):
if len(data.shape) <= 1:
# just one data point
self.mean = data
self.nvar = zeros_like(data)
self.nk = 1
self.nn = self.params.n0 + 1
self.mun = (self.params.n0 * self.params.mu0 + self.mean)/self.nn
self.bn = self.params.b + 0.5/self.nn*self.params.n0* \
(self.params.mu0 - self.mean)**2
self.ibn = 1/self.bn;
else:
self.mean = mean(data,1)
# column vector of variances
self.nvar = (data - samplemean)**2
self.nk = data.shape[1]
self.nn = self.params.n0 + self.nk
self.mun = (self.params.n0 * self.params.mu0 +
self.nk * self.mean)/(self.nn)
self.bn = (self.params.b + 0.5*self.nvar +
0.5/self.nn*self.nk*self.params.n0*
(self.params.mu0 - self.mean)**2)
self.ibn = 1/self.bn;
self.empty = False
def p_log_likelihood(self,x,params):
"""Compute log p(x|params)"""
return sum(logpnorm(x,params.mu,params.lam))
def p_likelihood(self,x,params):
return exp(self.p_log_likelihood(x,params))
def p_log_predictive(self,x):
"""Compute log p(x|z)."""
if self.empty:
p = self.p_log_prior(x)
else:
p = sum(logpstudent(
x,
self.mun,
self.nn*(self.params.a + 0.5*self.nk)/(self.nn + 1)*self.ibn,
2*self.params.a+self.nk))
return p
def p_predictive(self,x):
return exp(self.p_log_predictive(x))
def p_log_posterior_mean(self,mu,lam):
"""Compute log p(mu|z)."""
if self.empty:
p = 0;
else:
p = sum(logpnorm(mu,self.mun,lam*self.nn))
return p
def p_log_posterior_precision(self,lam):
if self.empty:
p = 0;
else:
p = sum(logpgamma(lam,self.params.a+0.5*self.nk,self.bn));
return p
def p_log_posterior(self,params):
return (self.p_log_posterior_mean(params.mu,params.lam) +
self.p_log_posterior_precision(params.lam))
def p_posterior(self,params):
return exp(self.p_log_posterior(params))
def p_log_prior(self,x):
"""Compute log p(x) (i.e. \int p(x|theta)p(theta) dtheta)."""
return sum(logpstudent(x,self.params.mu0,
self.params.n0/(self.params.n0+1)*self.params.a/self.params.b,
2.*self.params.a))
def p_prior(self,x):
return exp(self.p_log_prior(x))
def p_log_prior_params(self,params):
return (
sum(logpnorm(params.mu,self.params.mu0,self.params.n0 * params.lam))
+ sum(logpgamma(params.lam,self.params.a,self.params.b)))
def p_prior_params(self,params):
return exp(self.p_log_prior_params(params))
def sample_posterior(self):
if self.empty:
return self.sample_prior()
lam = rgamma(self.params.a+0.5*self.nk,self.bn)
mu = rnorm(self.mun,lam*self.nn)
return self.get_storage(mu,lam)
def sample_prior(self):
lam = rgamma(self.params.a,self.params.b)
mu = rnorm(self.params.mu0,self.params.n0 * lam)
return self.get_storage(mu,lam)
def sample_Uz(self,mu,lam,data,num_sir_samples=10):
"""Sample from p(U|U_old,z)=p(U|U_old)p(z|U)/Z."""
if self.empty:
return (self.walk(mu,lam),1)
# SIR: sample from P(U|U_old), compute weights P(x|U), then
# sample from the discrete distribution.
mu_samples = zeros((self.dims,num_sir_samples))
lam_samples = zeros((self.dims,num_sir_samples))
sir_weights = zeros(num_sir_samples)
p_old = self.p_log_prior_params(mu,lam);
for s in range(num_sir_samples):
tmp = walk(mu,lam,p_old=p_old);
mu_samples[:,s] = tmp.mu
lam_samples[:,s] = tmp.lam
sir_weights[s] = self.p_posterior(tmp.mu,tmp.lam)
sir_weights = sir_weights / sum(sir_weights);
s = rdiscrete(sir_weights)
new_mu = mu_samples[:,s]
new_lam = lam_samples[:,s]
weight = sir_weights[s]
return (self.get_storage(new_mu,new_lam),weight)
def get_storage(self,mu=None,lam=None):
"""Get a new parameter storage object."""
return DiagonalConjugateStorage(mu,lam)
class DiagonalConjugateHyperParams(object):
def __init__(self,a,b,mu0,n0,dims=None):
if dims != None:
self.a = ones(dims) * a
self.b = ones(dims) * b
self.mu0 = ones(dims) * mu0
else:
self.a = a
self.b = b
self.mu0 = mu0
self.n0 = n0
if self.a.shape != self.b.shape:
raise ValueError, "shape mismatch: a.shape: " + str(a.shape) +\
"b.shape: " + str(b.shape)
elif self.a.shape != self.mu0.shape:
raise ValueError, "shape mismatch: a.shape: " + str(a.shape) +\
"mu0.shape: " + str(mu0.shape)
if len(self.a.shape)!= 0:
self.dims = self.a.shape[0]
else:
self.dims = 1
def compute_stats(self):
e_mu = self.mu0
v_mu = self.b / (self.n0*(self.a-1))
e_lam = self.a/self.b
v_lam = self.a/(self.b**2)
out = ("E[mu] = %.3f, V[mu] = %.3f, E[lam] = %.3f, V[lam] = %.3f"
% (e_mu,v_mu,e_lam,v_lam))
return out
def __str__(self):
out = ['Model hyperparameters:\n']
out.append('a: ' + str(self.a) + '\n')
out.append('b: ' + str(self.b) + '\n')
out.append('mu0: ' + str(self.mu0) + '\n')
out.append('n0: ' + str(self.n0) + '\n')
return ''.join(out)
class DiagonalConjugateStorage(object):
"""Class for storing the parameter values of a single component."""
def __init__(self,mu=None,lam=None):
self.mu = mu
self.lam = lam
def __str__(self):
return 'mu: ' + str(self.mu) + '\nlambda: ' + str(self.lam)
def __repr__(self):
return self.__str__()
class Particle(object):
"""The Particle class stores the state of the particle filter / Gibbs
sampler.
"""
def __init__(self,T,copy=None,storage_class=FixedSizeStoreRing,
max_clusters=100):
if copy != None:
self.T = copy.T
self.c = copy.c.copy()
self.d = copy.d.copy()
self.K = copy.K
self.max_clusters = copy.max_clusters
self.mstore = copy.mstore.shallow_copy()
self.lastspike = copy.lastspike.shallow_copy()
self.U = copy.U.shallow_copy()
self.birthtime = copy.birthtime.shallow_copy()
self.deathtime = copy.deathtime.shallow_copy()
self.storage_class = copy.storage_class
else:
self.T = T
self.storage_class = storage_class
self.max_clusters = max_clusters
# allocation variables for all time steps
self.c = -1*ones(T,dtype=int16)
# death times of allocation variables (assume they
# don't die until they do)
self.d = T * ones(T,dtype=uint32)
# total number of clusters in this particle up to the current time
self.K = 0
# array to store class counts at each time step
self.mstore = self.storage_class(T,dtype=int32,
max_clusters=self.max_clusters)
# storage object for the spike time of the last spike associated
# with each cluster for each time step.
self.lastspike = self.storage_class(T,dtype=float64,
max_clusters=self.max_clusters)
# Parameter values of each cluster 1...K at each time step 1...T
self.U = self.storage_class(T,dtype=object,
max_clusters=self.max_clusters)
# vector to store the birth times of clusters
self.birthtime = ExtendingList()
# vector to store the death times of allocation variables
# (0 if not dead)
self.deathtime = ExtendingList()
def shallow_copy(self):
"""Make a shallow copy of this particle.
In essence, copies of lists are created, but the list contents are not
copied. This is useful for making copies of particles during
resampling, such that the resulting particles share the same history,
but can be moved forward independently.
"""
return Particle(self.T,self)
def __str__(self):
out = []
out.append('c: ' + str(self.c)+'\n')
out.append('d: ' + str(self.d)+'\n')
out.append('K: ' + str(self.K)+'\n')
out.append('mstore: ' + str(self.mstore)+'\n')
out.append('lastspike: ' + str(self.lastspike)+'\n')
out.append('U: ' + str(self.U)+'\n')
return ''.join(out)
__repr__ = __str__
class GibbsState():
"""Class representing the state of the Gibbs sampler. This is similar to
a particle in many respects. However, as in the Gibbs sampler we only
need to hold one state object in memory at any given time, we can trade
off speed and memory consumption differently.
If a particle object is passed to the constructor it will be used to
initialize the state.
"""
def __init__(self,particle=None,model=None,max_clusters=100):
self.max_clusters = max_clusters
if particle != None and model != None:
self.from_particle(particle,model)
else:
self.__empty_state()
def from_particle(self,particle,model):
"""Construct state from the given particle object."""
self.T = particle.T
# allocation variables for all time steps
self.c = particle.c.copy()
# death times of allocation variables
self.d = particle.d.copy()
# make sure the maximum death time is T
self.d[self.d>self.T] = self.T
# total number of clusters in the current state
self.K = particle.K
# array to store class counts at each time step
self.mstore = zeros((self.max_clusters,self.T),dtype=int32)
self.lastspike = zeros((self.max_clusters,self.T),dtype=float64)
self.U = empty((self.max_clusters,self.T),dtype=object)
self.aux_vars = zeros(
(self.T,
self.max_clusters,
model.kernel.D,
model.kernel.num_aux))
for t in range(self.T):
m = particle.mstore.get_array(t)
n = m.shape[0]
self.mstore[0:n,t] = m
m = particle.lastspike.get_array(t)
n = m.shape[0]
self.lastspike[0:n,t] = m
m = particle.U.get_array(t)
n = m.shape[0]
self.U[0:n,t] = m
# vector to store the birth times of clusters
self.birthtime = particle.birthtime.to_array(self.max_clusters,
dtype=int32)
# vector to store the death times of clusters (0 if not dead)
self.deathtime = particle.deathtime.to_array(self.max_clusters,
dtype=int32)
self.deathtime[self.deathtime==0] = self.T
# determine active clusters
active = where(sum(self.mstore,1)>0)[0]
# compute free labels
self.free_labels = deque(reversed(list(set(range(self.max_clusters))
-set(active))))
# all clusters must have parameters from time 0 to their death
# -> sample them from their birth backwards
for c in active:
logging.debug("sampling params for cluster %i at time %i"
% (c,t))
for tau in reversed(range(0,self.birthtime[c])):
self.aux_vars[tau,c,:,:] = model.kernel.sample_aux(
self.U[c,tau+1])
self.U[c,tau] = model.kernel.sample_posterior(
self.aux_vars[tau,c,:,:])
self.initialize_aux_variables(model)
def initialize_aux_variables(self,model):
"""Sample initial value for the auxiliary variables given the rest of
of the state. This is done by sampling forward in time."""
for t in range(self.T):
active = where(self.mstore[:,t]>0)[0]
for c in active:
if t >= self.birthtime[c]:
self.aux_vars[t,c,:,:] = model.kernel.sample_aux(
self.U[c,t])
def __empty_state(self):
"""Set all fields to represent an empty state."""
pass
def check_consistency(self,data_time):
"""Check consistency of the Gibbs sampler state.
In particular, perform the following checks:
1) if m(c,t) > 0 then U(c,t) != None
2) m(c,birth:death-1)>0 and m(c,0:birth)==0 and m(c,death:T)==0
3) m matches the information in c and deathtime
4) check that lastspike is correct
"""
errors = 0
# check 1) we have parameter values for all non-empty clusters
idx = where(self.mstore>0)
if any(isNone(self.U[idx])):
logging.error("Consitency error: Some needed parameters are None!"+
str(where(isNone(self.U[idx]))))
errors += 1
# check 1b) we need parameter values from 0 to the death of each cluster
active = where(sum(self.mstore,1)>0)[0]
for c in active:
d = self.deathtime[c]
if any(isNone(self.U[c,0:d])):
logging.error("Consitency error: Parameters not avaliable " +
"from the start")
# check 2) There are no "re-births", assuming birthtime and deathtime
# are correct
active = where(sum(self.mstore,1)>0)[0]
for c in active:
# the death time of _cluster_ c is the first zero after its birth
birth = self.birthtime[c]
active_birth_to_end = where(self.mstore[c,birth:]==0)[0]
if active_birth_to_end.shape[0] == 0:
death = self.T
else:
death = birth + active_birth_to_end[0]
if death != self.deathtime[c]:
logging.error("deatime does not contain the first zero after "+
"birth of cluster %i (%i!=%i)" %
(c,self.deathtime[c],death))
if (any(self.mstore[c,birth:death]==0)):
logging.error(("Consistency error: mstore 0 while " +
"cluster %i is alive") % c)
if any(self.mstore[c,0:birth]>0):
logging.error(("Consistency error: mstore > 0 while " +
"cluster %i is not yet born") % c)
if any(self.mstore[c,death:]>0):
logging.error(("Consistency error: mstore > 0 while "
"cluster %i is already dead!") % c)
# check 3) we can reconstruct mstore from c and d
new_ms = self.reconstruct_mstore(self.c,self.d)
if any(self.mstore != new_ms):
logging.error("Consitency error: Cannot reconstruct " +
"mstore from c and d")
# check 4)
# lastspike[c,t] is supposed to contain the last spike time for all
# clusters _after_ the observation at time t
lastspike = zeros(self.max_clusters)
for t in range(self.T):
lastspike[self.c[t]] = data_time[t]
if any(self.lastspike[:,t]!=lastspike):
logging.error("Consistency error:lastspike incorrect at " +
"time %i" % t)
logging.error(str(where(self.lastspike[:,t]!=lastspike)))
def reconstruct_lastspike(self,data_time):
lastspike = zeros(self.max_clusters)
for t in range(self.T):
lastspike[self.c[t]] = data_time[t]
self.lastspike[:,t] = lastspike
def reconstruct_mstore(self,c,d):
new_ms = zeros_like(self.mstore)
for t in range(self.T):
if t > 0:
new_ms[:,t] = new_ms[:,t-1]
new_ms[c[t],t] += 1
dying = where(d == t)[0]
for tau in dying:
new_ms[c[tau],t] -= 1
return new_ms
def __str__(self,include_U=True):
out = []
out.append('c: ' + str(self.c)+'\n')
out.append('d: ' + str(self.d)+'\n')
out.append('K: ' + str(self.K)+'\n')
out.append('mstore: ' + str(self.mstore)+'\n')
out.append('lastspike: ' + str(self.lastspike)+'\n')
if include_U:
out.append('U: ' + str(self.U)+'\n')
return ''.join(out)
```
#### File: jgasthaus/gpu_python/set_partitions.py
```python
from numpy import *
import analysis
from pylab import *
def labelings_from_partitions():
txt = open("set_partitions.txt",'r').read()
sets = eval(txt.replace("\r\n"," "))
labelings = -1*ones((115975,10),dtype=uint8)
for k in range(len(sets)):
l = sets[k]
for i in range(len(l)):
s = l[i]
if type(s) != tuple:
s = tuple([s])
for j in range(len(s)):
labelings[k,s[j]-1] = i
savetxt('labelings.txt',labelings,fmt="%i")
def compute_entropy(labelings,lnp):
N,T = labelings.shape
ent = zeros(T)
for t in range(T):
possible = unique(labelings[:,t])
p = zeros(possible.shape[0])
for i in range(possible.shape[0]):
p[i] = sum(exp(lnp)[labelings[:,t]==possible[i]])
p = p / sum(p)
l = log2(p)
l[l==-inf]=0
ent[t] = -sum(p*l)
return ent
def compute_kl(all_labels,lnps,samples):
"""Compute KL divergence between the discrete distribution with
atoms all_labels and (log-)weights lnps, and the equally weighted
samples in samples."""
p1 = exp(lnps)
# use small data type for efficiency
all_labels = array(all_labels,dtype=uint8)
samples = array(samples,dtype=uint8)
for i in range(samples.shape[0]):
samples[i,:] = analysis.map_labels(samples[i,:])
samples = array(samples,dtype=uint8)
p2 = zeros_like(p1)
N = p1.shape[0]
kl1 = 0.
kl2 = 0.
for i in range(N):
p2[i] = sum(all(samples==all_labels[i,:],1))/float(samples.shape[0])
return (p2,cross_entropy(p2,p1)-cross_entropy(p2,p2))
def plot_probs(labelings,probs,num=10):
idx = argsort(probs)
mat = labelings[idx[-num:],:]
ax = subplot(1,2,1)
#axis('image')
pcolor(mat,cmap=cm.binary)
grid()
setp(gca(), yticklabels=[])
title('Labelings')
ylabel('Labeling')
xlabel('Time step')
subplot(1,2,2,sharey=ax)
barh(arange(num),probs[idx[-num:]])
grid()
setp(gca(), yticklabels=[])
title('Prior probability')
xlabel('p(labeling)')
F = gcf()
F.set_size_inches(6,3)
subplots_adjust(0.1,0.15,0.95,0.9,0.1,0.2)
def cross_entropy(p1,p2):
l2 = log2(p2)
l2[l2==-inf] = 0
return -sum(p1*l2)
def main():
labels = loadtxt('labelings.txt')
if __name__ == "__main__":
main()
```
|
{
"source": "jgasthaus/libPLUMP",
"score": 2
}
|
#### File: libPLUMP/examples/expected_tables.py
```python
import libplump
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import math
import time
from collections import defaultdict
from antaresia.nputils import *
from antaresia.plot import *
log_stirling_cache = {}
def log_stirling_cached(d,n,m):
d,n,m = (float(d), int(n), int(m))
if (d,n,m) in log_stirling_cache:
return log_stirling_cache[(d,n,m)]
else:
r = libplump.log_gen_stirling_direct(d,n,m)
log_stirling_cache[(d,n,m)] = r
return r
# stratified resampling of Carpenter et al.
def strat_res(w,N):
K = np.sum(w/N)
U = np.random.rand()*K
out = []
for i in range(len(w)):
U = U - w[i]
if U < 0:
out.append(i)
U = U + K
return out
# comp optimal threshold for resampling as in Fearnhead
def opt_thresh(w, N):
w_sort = np.sort(w)
o = np.ones((2,len(w)))
j = -1
for i in range(len(w)):
o[1,:] = w_sort/w_sort[i]
m = np.sum(np.min(o,0))
if m <= N:
j = i
break
if j == -1:
print "warning, no such j found!"
kappa = w_sort[j]
print kappa
Ak = np.sum(w_sort >= kappa)
print Ak
Bk = np.sum(w_sort[w_sort < kappa])
return Bk/(N - Ak)
# perform Fearnhead's threshold resampling
def threshold_resampling(w, N):
outweights = np.zeros(w.shape)
c = opt_thresh(w,N)
keep = w > c
M = np.sum(keep)
print M
outweights[keep] = w[keep]
otherweights = w[w <= c]
otherweights /= np.sum(otherweights)
others = strat_res(otherweights,(N-M))
idx = np.arange(len(w))[w <= c][others]
outweights[idx] = c
return outweights
# compute log posterior probability of tw tables up to
# and additive constant
def log_post_t(a,d,cw,tw,T,p0):
# see eq. 10 in "Improvements ..." paper
return (libplump.logKramp(a + d, d, float(T -1)) +
log_stirling_cached(d, cw, tw) +
tw*math.log(p0))
def log_post_ts(a,d,cws,tws,p0s):
# see eq. 10 in "Improvements ..." paper
return (libplump.logKramp(a + d, d, float(np.sum(tws)) - 1) +
np.sum([log_stirling_cached(d,cws[i],tws[i]) for i in range(len(cws))]) +
np.sum([tws[i]*math.log(p0s[i]) for i in range(len(cws))])
)
def logcrp(a,d,c,t):
return (libplump.logKramp(a + d, d, float(t -1)) -
libplump.logKramp(a + 1, 1, float(c-1)) +
log_stirling_cached(d, c, t))
def prior_et(a,d,c):
return (np.exp( libplump.logKramp(a + d, 1, c)
- np.log(d)
- libplump.logKramp(a + 1, 1, c - 1))
- a/d)
def log_post_hierarchical_ts(a,d,cw1,tw1,tw0,p0):
"""Posterior distribution over the number of tables in a two-level hierarchy,
where cw1/tw1 are the number of customer at the bottom level, tw0
are the tables at the top level and p0 is the base distribution."""
return ( logcrp(a,d,cw1,tw1)
+ logcrp(a,d,tw1,tw0)
+ tw0*math.log(p0))
def post_hierarchical(a,d,N,p0):
res = np.ones((N,N))*-np.inf
for tw1 in range(N):
for tw0 in range(tw1+1):
res[tw1,tw0] = log_post_hierarchical_ts(a,d,N,tw1+1,tw0+1,p0)
res -= np.max(res)
res = np.exp(res)
res /= np.sum(res)
return res
def post_hierarchical_three(a,d,N,p0):
res = np.ones((N,N,N))*-np.inf
for tw2 in range(N):
for tw1 in range(tw2+1):
for tw0 in range(tw1+1):
res[tw2,tw1,tw0] = ( logcrp(a,d,N,tw2 + 1)
+ logcrp(a,d,tw2 + 1,tw1 + 1)
+ logcrp(a,d,tw1 + 1, tw0 + 1)
+ (tw0 + 1)*math.log(p0))
res -= np.max(res)
res = np.exp(res)
res /= np.sum(res)
return res
def expect_hierarchical_three(a, d, N, p0):
etw0 = np.sum(np.arange(1,N+1)*np.sum(np.sum(post_hierarchical_three(a, d, N, p0), 0), 0))
etw1 = np.sum(np.arange(1,N+1)*np.sum(np.sum(post_hierarchical_three(a, d, N, p0), 2), 0))
etw2 = np.sum(np.arange(1,N+1)*np.sum(np.sum(post_hierarchical_three(a, d, N, p0), 2), 1))
return etw2, etw1, etw0
def max_hierarchical_three(a, d, N, p0):
post = post_hierarchical_three(a, d, N, p0)
global_idx = np.unravel_index(np.argmax(post), post.shape)
m0 = np.argmax(np.sum(np.sum(post, 0), 0)) + 1
m1 = np.argmax(np.sum(np.sum(post, 2), 0)) + 1
m2 = np.argmax(np.sum(np.sum(post, 2), 1)) + 1
c2 = np.argmax(post[:, global_idx[1], global_idx[2]])
c1 = np.argmax(post[c2, :, global_idx[2]])
c0 = np.argmax(post[c2, c1, :])
return (m2, m1, m0), np.array(global_idx) + 1, (c2 + 1, c1 + 1, c0 + 1)
def joint_hierarchical(a,d,N,p0):
res = np.ones((N,N))*-np.inf
for tw1 in range(N):
for tw0 in range(tw1+1):
res[tw1,tw0] = (logcrp(a,d,N,tw1+1) + logcrp(a,d,tw1+1,tw0+1))
res = np.exp(res)
return res
def hierarchical_pf(a,d,N,p0,num_particles=1000, resample=False):
X = np.zeros((2,num_particles, N))
W = np.zeros((num_particles, N))
X[:,:,0] = 1 # first customer at first table
W[:,0] = 1./num_particles # first customer at first table
et = np.zeros(N)
et[0] = 1
for i in range(1,N):
for j in range(num_particles):
cw1 = i
tw1 = X[0,j,i-1]
tw0 = X[1,j,i-1]
p00 = (tw1 - tw0*d + (a+tw0*d)*p0)/(tw1 + a) # predictive in top rest
p = (cw1 - tw1*d + (a+tw1*d)*p00)/(cw1 + a) # predictive in bottom rest
f1 = (a + d * tw1)*p00/((a + d*tw1)*p00 + cw1 - tw1*d) # new table prob bottom
f0 = (a + d * tw0)*p0/((a + d*tw0)*p0 + tw1 - tw0*d) # new table prob top
bottom_new_table = int(np.random.rand() < f1)
top_new_table = 0
if bottom_new_table:
top_new_table = int(np.random.rand() < f0)
X[0,j,i] = tw1 + bottom_new_table
X[1,j,i] = tw0 + top_new_table
W[j,i] = W[j,i-1] * p
W[:,i] /= np.sum(W[:,i])
if resample and 1./np.sum(np.square(W[:,i])) < 0.1*num_particles:
#idx = choice(W[:,i], num_particles)
idx = stratified_resampling(W[:,i])
idx = residual_resampling(W[:,i])
X[:,:,:] = X[:,idx,:]
W[:,i] = 1./num_particles
#et[i] = np.mean(np.dot(W[:,i], X[1,:,i]))
#return np.mean(np.dot(W[:,N-1], X[1,:,N-1])), et
return X, W
def evaluate_pf_result(X,W,at):
idx = np.sum(X[:,:,-1] == np.array(at)[:,None],0)==2
return np.sum(W[idx,-1])
def post_t(a, d, cw, other_T, p0):
"""Compute the posterior probability distribution over the
number of tables in a two parameter CRP, assuming you have observed
cw customers of a single type."""
r = np.array([log_post_t(a,d,cw,t, other_T + t, p0) for t in range(1,cw+1)])
r -= np.max(r)
r = np.exp(r)
r /= np.sum(r)
return r
def pypPredictive(a,d,cwk,twk,cw,tw,p0):
return (cwk - twk*d + (a+tw*d)*p0)/(cw + a)
def expected_num_tables_two(a,d,cws,p0s):
ps = np.zeros((cws[0], cws[1]))
# compute post. probs for each t
for i in range(cws[0]):
for j in range(cws[1]):
ps[i,j] = log_post_ts(a,d,cws,(i+1, j+1),p0s)
# normalize
ps = ps - np.max(ps)
ps = np.exp(ps)/np.sum(np.exp(ps))
marg_0 = np.sum(ps,1)
marg_1 = np.sum(ps,0)
return ps, (np.dot(range(1,cws[0]+1),marg_0), np.dot(range(1,cws[1]+1), marg_1))
def expected_num_tables(a,d,N,p0):
tables = range(1,N+1)
# compute post. probs for each t
ps = [log_post_t(a,d,N,t,t,p0) for t in tables]
# normalize
ps = np.array(ps)
ps = ps - np.max(ps)
ps = np.exp(ps)/np.sum(np.exp(ps))
return (ps, np.dot(ps, tables))
def choice(p, N):
u = np.random.rand(N)
c = np.cumsum(p)
return c.searchsorted(u * c[-1])
def optimal_proposal(a, d, cwk, twk, tw, p0):
post = post_t(a,d,cwk,tw - twk, p0)
sample = choice(post,1)[0]
return (sample + 1, post[sample])
def collapsed_particle_num_tables(a, d, obs, p0s,
proposal=lambda a,d,cwk,twk,tw,p0: (1 + np.random.randint(cwk), 1./cwk), # uniform proposal
num_particles=1000, resample=False):
X = np.zeros((max(obs)+1,2,num_particles),dtype=np.int)
W = np.zeros(num_particles)
#X[obs[0],0,:] = 1 # first customer at first table
#
#X[obs[0],1,:] = 1 # first customer at first table
W[:] = 1./num_particles # first customer at first table
et = np.zeros(len(obs))
et[0] = 1
for i in range(len(obs)):
for j in range(num_particles):
k = obs[i]
cwk = X[k,0,j]
twk = X[k,1,j]
tw = np.sum(X[:,1,j])
(tw_proposal, proposal_prob) = proposal(a,d,cwk + 1, twk, tw, p0s[k])
assert(tw_proposal > 0 and tw_proposal <= cwk+1)
#print tw_proposal, proposal_prob
X[k,0,j] += 1 # seat customer
X[k,1,j] = tw_proposal
joint_new = np.exp(log_post_t(a,d, cwk + 1, tw_proposal, tw - twk + tw_proposal, p0s[k]))
joint_old = np.exp(log_post_t(a,d, cwk, twk, tw, p0s[k]))
W[j] *= (joint_new/(proposal_prob*joint_old))
#if resample and 1./np.sum(np.square(W[:,i])) < 20:
# W[:,i] /= np.sum(W[:,N-1])
# idx = choice(W[:,i], num_particles)
# X[:,:,:] = X[:,idx,:]
# W[:,i] = 1./num_particles
W[:] /= np.sum(W[:])
et[i] = np.dot(W, X[0,1,:])
return np.dot(W[:], X[:,1,:].T), et
# collapsed particle filter operating on the space of table counts
# this is effectively equivalent to doing straight importance sampling
# for the target distribution
def collapsed_particle_num_tables_single(a, d, N, p0,
proposal=lambda cw,tw: (1 + np.random.randint(cw), 1./cw), # uniform proposal
num_particles=1000, resample=False):
X = np.zeros((2,num_particles, N))
W = np.zeros((num_particles, N))
X[:,:,0] = 1 # first customer at first table
W[:,0] = 1./num_particles # first customer at first table
et = np.zeros(N)
et[0] = 1
for i in range(1,N):
for j in range(num_particles):
cw = X[0,j,i-1]
tw = X[1,j,i-1]
(tw_proposal, proposal_prob) = proposal(cw + 1, tw)
#print tw_proposal, proposal_prob
X[0,j,i] = X[0,j,i-1] + 1
X[1,j,i] = tw_proposal
joint_new = np.exp(log_post_t(a,d, cw + 1, tw_proposal, tw_proposal, p0))
joint_old = np.exp(log_post_t(a,d, cw, tw, tw, p0))
W[j,i] = W[j,i-1] * (joint_new/(proposal_prob*joint_old))
W[:,i] /= np.sum(W[:,i])
if resample and 1./np.sum(np.square(W[:,i])) < 20:
idx = choice(W[:,i], num_particles)
X[:,:,:] = X[:,idx,:]
W[:,i] = 1./num_particles
et[i] = np.mean(np.dot(W[:,i], X[1,:,i]))
W[:,N-1] /= np.sum(W[:,N-1])
return np.mean(np.dot(W[:,N-1], X[1,:,N-1])), et
def crp_particle_num_tables(a, d, N, p0, num_particles=1000, resample=False):
X = np.zeros((2,num_particles, N))
W = np.zeros((num_particles, N))
X[:,:,0] = 1 # first customer at first table
W[:,0] = 1./num_particles # first customer at first table
et = np.zeros(N)
et[0] = 1
for i in range(1,N):
for j in range(num_particles):
cw = X[0,j,i-1]
tw = X[1,j,i-1]
f = (a + d * tw)*p0/((a + d*tw)*p0 + cw - tw*d)
p = (cw - tw*d + (a+tw*d)*p0)/(cw + a)
X[0,j,i] = X[0,j,i-1] + 1
X[1,j,i] = X[1,j,i-1] + int(np.random.rand() < f)
W[j,i] = W[j,i-1] * p
W[:,i] /= np.sum(W[:,i])
if resample and 1./np.sum(np.square(W[:,i])) < 0.1*num_particles:
#idx = choice(W[:,i], num_particles)
idx = stratified_resampling(W[:,i])
idx = residual_resampling(W[:,i])
X[:,:,:] = X[:,idx,:]
W[:,i] = 1./num_particles
et[i] = np.mean(np.dot(W[:,i], X[1,:,i]))
return np.mean(np.dot(W[:,N-1], X[1,:,N-1])), et
def full_crp_particle_num_tables(a, d, N, p0, num_particles=1000, resample=False):
X = np.zeros((N,num_particles, N)) # table_id, particles, input pos
W = np.zeros((num_particles, N))
X[0,:,0] = 1 # first customer at first table
W[:,0] = 1./num_particles # first customer at first table
et = np.zeros(N)
et[0] = 1
for i in range(1,N):
X[:,:,i] = X[:,:,i-1]
for j in range(num_particles):
tables = X[:,j,i]
num_tables = np.sum(tables > 0)
probs = np.zeros(num_tables + 1)
probs[0:num_tables] = tables[:num_tables] - d
probs[-1] = (a + d*num_tables)*p0
sample = choice(probs, 1)[0]
p = (np.sum(tables) - num_tables*d + (a+num_tables*d)*p0)/(np.sum(tables) + a)
X[sample,j,i] += 1
W[j,i] = W[j,i-1] * p
W[:,i] /= np.sum(W[:,i])
if resample and 1./np.sum(np.square(W[:,i])) < 0.1*num_particles:
#idx = choice(W[:,i], num_particles)
idx = stratified_resampling(W[:,i])
idx = residual_resampling(W[:,i])
X[:,:,:] = X[:,idx,:]
W[:,i] = 1./num_particles
et[i] = np.mean(np.dot(W[:,i], np.sum(X[:,:,i]>0,0)))
return np.mean(np.dot(W[:,N-1], np.sum(X[:,:,N-1]>0,0))), et
def crp_particle_num_tables_uniform_proposal(a, d, N, p0, num_particles=1000, resample=False):
X = np.zeros((2,num_particles, N))
W = np.zeros((num_particles, N))
X[:,:,0] = 1 # first customer at first table
W[:,0] = 1./num_particles # first customer at first table
et = np.zeros(N)
et[0] = 1
for i in range(1,N):
for j in range(num_particles):
cw = X[0,j,i-1]
tw = X[1,j,i-1]
new_table = int(np.random.rand() < 0.5)
X[0,j,i] = X[0,j,i-1] + 1
X[1,j,i] = X[1,j,i-1] + new_table
if new_table:
inc_weight = p0 * (a + d*tw)
else:
inc_weight = cw - tw*d
W[j,i] = W[j,i-1] * inc_weight/0.5
W[:,i] /= np.sum(W[:,i])
if resample: # and 1./np.sum(np.square(W[:,i])) < 1000:
idx = choice(W[:,i], num_particles)
X[:,:,:] = X[:,idx,:]
W[:,i] = 1./num_particles
print X, W
et[i] = np.mean(np.dot(W[:,i], X[1,:,i]))
return np.mean(np.dot(W[:,N-1], X[1,:,N-1])), et
def crp_particle_num_tables_enumerate(a, d, N, p0, num_particles=1000, merge = True, sample=True):
particles = [1]
weights = [1.]
et = [1]
for i in range(1,N):
new_particles = []
new_weights = []
for j in range(len(particles)):
cw = i
tw = particles[j]
f = (a + d * tw)*p0/((a + d*tw)*p0 + cw - tw*d)
p = (cw - tw*d + (a+tw*d)*p0)/(cw + a)
w = weights[j]
new_particles.extend([tw, tw+1])
new_weights.extend([w*p*(1-f), w*p*f])
#new_weights.extend([w, w*p0])
weights = np.array(new_weights)
weights /= np.sum(weights)
if merge:
particles=np.array(new_particles)
u = np.unique(particles)
weights = np.bincount(particles, weights)[u]
particles=u
else:
particles = new_particles
#print particles, weights
if len(particles) > num_particles:
if sample:
newweights = threshold_resampling(weights, num_particles)
idx = newweights > 0
print np.sum(idx)
particles = particles[idx]
weights = newweights[idx]
print particles.shape, weights.shape
else:
idx = np.argsort(weights)[-num_particles:]
weights = weights[idx]
weights /= np.sum(weights)
particles = [particles[k] for k in idx]
et.append(np.dot(weights, particles))
particles = np.array(particles)
return np.dot(weights, particles), et
def crp_fractional_num_tables(a, d, N, p0):
cw = 0
tw = 0
for i in range(N):
f = (a + d * tw)*p0/((a + d*tw)*p0 + cw - tw*d)
tw += f
cw += 1
return tw
def crp_fractional_hierarchical_two(a, d, N, p0):
cw1 = 0 # bottom
tw1 = 0
tw0 = 0 # top
for i in range(N):
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
f1 = (a + d * tw1)*p1/((a + d*tw1)*p1 + cw1 - tw1*d)
f0 = (a + d * tw0)*p0/((a + d*tw0)*p0 + tw1 - tw0*d) * f1
tw1 += f1
tw0 += f0
cw1 += 1
return tw1, tw0
def crp_fractional_hierarchical_three(a, d, N, p0):
"""Fractional approximate inference in a three-level
hierarchy."""
cw2 = 0 # bottom
tw2 = 0
tw1 = 0 # middle
tw0 = 0 # top
for i in range(N):
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
p2 = (tw2 - d*tw1 + (a + d*tw1)*p1)/(tw2 + a)
f2 = (a + d * tw2)*p2/((a + d*tw2)*p2 + cw2 - tw2*d)
f1 = (a + d * tw1)*p1/((a + d*tw1)*p1 + tw2 - tw1*d) * f2
f0 = (a + d * tw0)*p0/((a + d*tw0)*p0 + tw1 - tw0*d) * f1
tw2 += f2
tw1 += f1
tw0 += f0
cw2 += 1
return tw2, tw1, tw0
def crp_optimize_hierarchical_three(a, d, N, p0):
"""Iterative local optimization"""
cw2 = 0 # bottom
tw2 = 0
tw1 = 0 # middle
tw0 = 0 # top
for i in range(N):
cw2 += 1
for j in range(2):
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
p2 = (tw2 - d*tw1 + (a + d*tw1)*p1)/(tw2 + a)
post2 = post_t(a, d, cw2, 0, p2)
tw2 = np.argmax(post2) + 1
post1 = post_t(a, d, tw2, 0, p1)
tw1 = np.argmax(post1) + 1
post0 = post_t(a,d, tw1, 0, p0)
tw0 = np.argmax(post0) + 1
return tw2, tw1, tw0
def crp_optimize_hierarchical_alt_three(a, d, N, p0):
"""Iterative local optimization"""
cw2 = 0 # bottom
tw2 = 0
tw1 = 0 # middle
tw0 = 0 # top
for i in range(N):
cw2 += 1
for j in range(10):
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
p2 = (tw2 - d*tw1 + (a + d*tw1)*p1)/(tw2 + a)
post2 = post_t(a, d, cw2, 0, p2)
tw2 = np.argmax(post2) + 1
tw1 = tw2
tw0 = tw2
return tw2, tw1, tw0
def crp_optimize_hierarchical_pairs_three(a, d, N, p0):
"""Iterative local optimization"""
cw2 = 0 # bottom
tw2 = 0
tw1 = 0 # middle
tw0 = 0 # top
for i in range(N):
cw2 += 1
for j in range(1):
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
p2 = (tw2 - d*tw1 + (a + d*tw1)*p1)/(tw2 + a)
post1 = post_hierarchical(a, d, cw2, p1)
tw2, tw1 = np.array(np.unravel_index(np.argmax(post1), post1.shape)) + 1
post2 = post_hierarchical(a, d, tw2, p0)
tw1, tw0 = np.array(np.unravel_index(np.argmax(post2), post2.shape)) + 1
return tw2, tw1, tw0
def post_direct(a, d, cw1, tw0):
out = np.zeros(cw1)
for i in range(cw1):
out[i] = ( libplump.logKramp(a + d, d, i)
- libplump.logKramp(a + 1, 1, i)
+ log_stirling_cached(d, cw1, i + 1)
+ log_stirling_cached(d, i + 1, tw0))
out -= np.max(out)
out = np.exp(out)
out /= np.sum(out)
return out
def crp_sample_hierarchical_direct(a, d, N, p0, num_samples, initPf=True):
"""Direct gibbs sampler for two layer hierarchy"""
cw1 = 0 # bottom
tw1 = 0 # bottom
tw0 = 0 # top
res = []
if initPf:
for i in range(N):
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
f1 = (a + d * tw1)*p1/((a + d*tw1)*p1 + cw1 - tw1*d)
f0 = (a + d * tw0)*p0/((a + d*tw0)*p0 + tw1 - tw0*d)
if (np.random.rand() < f1):
tw1 +=1
if (np.random.rand() < f0):
tw0 +=1
cw1 += 1
else:
for i in range(N):
cw1 += 1
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
post = post_direct(a, d, cw1, tw0)
tw1_new = (choice(post, 1) + 1)[0]
post = post_t(a, d, tw1_new, 0, p0)
tw0 = (choice(post, 1) + 1)[0]
tw1 = tw1_new
for j in range(num_samples):
p1 = (tw1 - d*tw0 + (a + d*tw0)*p0)/(tw1 + a)
p2 = (cw1 - d*tw1 + (a + d*tw1)*p1)/(cw1 + a)
res.append((p2, tw0, tw1))
post = post_direct(a, d, cw1, tw0)
tw1 = (choice(post, 1) + 1)[0]
post = post_t(a, d, tw1, 0, p0)
tw0 = (choice(post, 1) + 1)[0]
return res
def crp_fractional_num_tables_p0s(a, d, customers, p0dict):
cw = defaultdict(lambda: 0)
tw = defaultdict(lambda: 0)
c = 0
t = 0
for c in customers:
f = (a + d * t)*p0dict[c]/((a + d*t)*p0dict[c] + cw[c] - tw[c]*d)
tw[c] += f
t += f
cw[c] += 1
c += 1
return tw
def plot_posterior_tables_vary_d(a,ds,N,p0):
i = 1
fig = plt.figure()
for d in ds:
post, et = expected_num_tables(a,float(d),N,p0)
ax = fig.add_subplot(len(ds)/3, 3, i)
ax.bar(range(1,N+1), post, color = "black")
ax.axis((1,N,0,0.5))
ax.grid();
ax.set_title("$d = %.1f$, $E[t_w] = %.2f$" %(d, et))
i += 1
fig.suptitle("Posterior distribution of $t_s$; $c_s = %d$, $\\alpha=%.1f$, $H(s)=%.1f$" % (N,a, p0))
fig.set_size_inches(8,9)
return fig, ax
def plot_posterior_tables_vary_p(a,d,N,p0s):
i = 1
fig = plt.figure()
for p0 in p0s:
post, et = expected_num_tables(a,float(d),N,p0)
ax = fig.add_subplot(len(p0s)/3, 3, i)
ax.bar(range(1,N+1), post, color = "black")
ax.axis((1,N,0,0.8))
ax.grid();
ax.set_title("$H(s) = %.1f$, $E[t_w] = %.2f$" %(p0, et))
i += 1
fig.suptitle("Posterior distribution of $t_s$; $c_s = %d$, $\\alpha=%.1f$, $d=%.1f$" % (N,a, d))
fig.set_size_inches(8,9)
return fig, ax
def plot_posterior_tables_ds_ps(a,N,ds=np.arange(0.0,1,0.1),p0s=np.arange(0.2,1.1,0.1)):
i = 1
cm = plt.cm.spectral
for p0 in p0s:
plt.subplot(len(ds)/3, 3, i)
plt.gca().set_color_cycle([cm(k) for k in np.linspace(0, 1, 11)])
for d in ds:
post, et = expected_num_tables(a,float(d),N,float(p0))
plt.plot(range(1,N+1), post)
plt.gca().set_color_cycle([cm(k) for k in np.linspace(0, 1, 11)])
for d in ds:
post, et = expected_num_tables(a,float(d),N,float(p0))
plt.plot([et],[d],'x')
plt.axis((1,N,0,1))
plt.grid();
plt.title("H(s) = %.2f" %(p0,))
i += 1
plt.gcf().suptitle("Posterior distribution of $t_s$; $c_s = %d$, $\\alpha=%.1f$" % (N,a))
def plot_customers_vs_expected_tables_discounts(a, p0, max_customers, ds=np.arange(0.1,1,0.2), legend_loc=2, plot_frac=True, plot_particles=0, particle_filter=crp_particle_num_tables_enumerate):
customers = range(1,max_customers + 1)
et_exact = [[expected_num_tables(a,d,c,p0)[1] for c in customers] for d in ds]
et_exact = np.array(et_exact).T
et_approx = [[crp_fractional_num_tables(a,d,c,p0) for c in customers] for d in ds]
et_approx = np.array(et_approx).T
#plt.plot(customers, et_exact, customers, et_approx, "--")
plt.plot(customers, et_exact)
if plot_frac:
plt.gca().set_color_cycle(mpl.rcParams['axes.color_cycle'])
plt.plot(customers, et_approx, '--')
if plot_particles > 0:
plt.gca().set_color_cycle(mpl.rcParams['axes.color_cycle'])
#plt.plot(customers, np.array([[crp_particle_num_tables_enumerate(a,d,c,p0,plot_particles,False) for c in customers] for d in ds]).T, '-.')
plt.plot(customers, np.array([[particle_filter(a,d,c,p0,num_particles=plot_particles) for c in customers] for d in ds]).T, '-.')
plt.legend(["$d=%.1f$"%(d,) for d in ds], loc=legend_loc)
plt.xlabel("$c_w$")
plt.ylabel("$E[t_w]$")
plt.title("Posterior $E[t_w]$, $\\alpha=%.1f$, $p_0=%.2f$" % (a, p0))
plt.grid()
def plot_tables_posterior_two(a, d, cs, p0s):
plt.imshow(expected_num_tables_two(a,d,cs,p0s)[0], extent=(1,cs[0],1,cs[1]),
interpolation="none", origin="lower")
def plot_tables_posterior_two_grid(a,d,cs):
ps = np.arange(0.1,1,0.1)
for i in range(len(ps)):
plt.subplot(3,3,i+1)
plot_tables_posterior_two(a,d,cs, [ps[i], 1-ps[i]])
plt.title("p = %f"%(ps[i],))
def plot_posterior_components(a,N, ds=np.arange(0.1,1,0.2), ps=np.arange(0.1,1,0.2)):
fig = plt.figure()
ax = fig.add_subplot(111)
# plot stirling numbers
ps = []
for d in ds:
p1, = ax.plot(range(1,N+1), [log_stirling_cached(d,N,i) for i in range(1,N+1)], ':')
# plot Kramp's symbol
ax.set_color_cycle(mpl.rcParams['axes.color_cycle'])
for d in ds:
p2, = ax.plot(range(1,N+1), [libplump.logKramp(a + d, d, i - 1) for i in range(1,N+1)],'--')
# plot sum of the two
ax.set_color_cycle(mpl.rcParams['axes.color_cycle'])
for d in ds:
p3, = ax.plot(range(1,N+1), [libplump.logKramp(a + d, d, i - 1) + log_stirling_cached(d,N,i) for i in range(1,N+1)])
ps.append(p3)
#plt.gca().set_color_cycle(mpl.rcParams['axes.color_cycle'])
#for p in ps:
# plt.plot(range(1,N+1), [np.log(p)*i for i in range(1,N+1)],'.')
#plt.gca().set_color_cycle(mpl.rcParams['axes.color_cycle'])
# for p in ps:
# plt.plot(range(1,N+1), [np.log(p)*i for i in range(1,N+1)],'.')
ax.set_xlabel("Number of tables")
ax.set_title("Log-Posterior Parts, $\\alpha = %.1f$"%(a,))
l1 = ax.legend([p1, p2, p3],
[ "$\log S_d(c, t)$",
"$\log [\\alpha + d]_d^{t-1}$",
"$\log S_d(c, t) + \log [\\alpha + d]_d^{t-1}$"
], loc = 6)
ax.legend(ps, ["$d = %.1f$"%(d,) for d in ds], loc = 8,
ncol=len(ds) , borderaxespad=0.)
ax.add_artist(l1)
ax.grid(True)
return fig, ax
def get_errorbars(fun, K=10):
runs = []
for i in range(K):
runs.append(fun())
data = np.array(runs)
m = np.mean(data,0)
sd = np.std(data,0)
return m, sd
def plot_crp_fractional(a, d, N, p0):
plt.plot(range(1,N+1),[expected_num_tables(a, d,i, p0)[1] for i in range(1, N + 1)],'k',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_num_tables(a,d,c,p0) for c in range(1,N+1)],'b--',linewidth=2)
plt.grid()
plt.title(r"Posterior Expected Number of Tables, $\alpha=%.1f$, $d=%.1f$, $H(s)=%.1f$"%(a,d,p0))
plt.xlabel("# customers of type $s$")
plt.ylabel('$E[t_s]$')
plt.xlim(1,N)
def plot_crp_fractional_hier_two(a, d, N, p0):
plt.plot(range(1,N+1),[np.sum(np.arange(1,i+1)*np.sum(post_hierarchical(a, d, i, p0), 0)) for i in range(1, N + 1)],'k',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_hierarchical_two(a,d,c,p0)[1] for c in range(1,N+1)],'k--',linewidth=2)
plt.plot(range(1,N+1),[np.sum(np.arange(1,i+1)*np.sum(post_hierarchical(a, d, i, p0), 1)) for i in range(1, N + 1)],'r',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_hierarchical_two(a,d,c,p0)[0] for c in range(1,N+1)],'r--',linewidth=2)
plt.grid()
plt.title(r"Posterior Expected Number of Tables, $\alpha=%.1f$, $d=%.1f$, $H(s)=%.1f$"%(a,d,p0))
plt.xlabel("# customers of type $s$")
plt.ylabel('$E[t_s]$')
plt.xlim(1,N)
def plot_crp_fractional_hier_three(a, d, N, p0):
plt.plot(range(1,N+1),[np.sum(np.arange(1,i+1)*np.sum(np.sum(post_hierarchical_three(a, d, i, p0), 0), 0)) for i in range(1, N + 1)],'k',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_hierarchical_three(a,d,c,p0)[2] for c in range(1,N+1)],'k--',linewidth=2, label="_nolegend_")
plt.plot(range(1,N+1),[np.sum(np.arange(1,i+1)*np.sum(np.sum(post_hierarchical_three(a, d, i, p0), 2), 0)) for i in range(1, N + 1)],'r',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_hierarchical_three(a,d,c,p0)[1] for c in range(1,N+1)],'r--',linewidth=2, label="_nolegend_")
plt.plot(range(1,N+1),[np.sum(np.arange(1,i+1)*np.sum(np.sum(post_hierarchical_three(a, d, i, p0), 2), 1)) for i in range(1, N + 1)],'b',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_hierarchical_three(a,d,c,p0)[0] for c in range(1,N+1)],'b--',linewidth=2, label="_nolegend_")
plt.grid()
plt.title(r"Marginal Posterior $\mathrm{E}[t_s]$, $\alpha=%.1f$, $d=%.1f$, $H(s)=%.1f$"%(a,d,p0))
plt.xlabel("# customers of type $s$")
plt.ylabel('$E[t_s]$')
plt.xlim(1,N)
plt.legend(["top", "middle", "bottom"], "upper left")
def plot_crp_fractional_p0s(a, d, N):
p0 = 0.1
plt.plot(range(1,N+1),[expected_num_tables(a, d,i, p0)[1] for i in range(1, N + 1)],'k',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_num_tables(a,d,c,p0) for c in range(1,N+1)],'k--',linewidth=2, label="_nolegend_")
p0 = 0.4
plt.plot(range(1,N+1),[expected_num_tables(a, d,i, p0)[1] for i in range(1, N + 1)],'r',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_num_tables(a,d,c,p0) for c in range(1,N+1)],'r--',linewidth=2, label="_nolegend_")
p0 = 0.7
plt.plot(range(1,N+1),[expected_num_tables(a, d,i, p0)[1] for i in range(1, N + 1)],'b',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_num_tables(a,d,c,p0) for c in range(1,N+1)],'b--',linewidth=2, label="_nolegend_")
p0 = 0.95
plt.plot(range(1,N+1),[expected_num_tables(a, d,i, p0)[1] for i in range(1, N + 1)],'g',linewidth=2)
plt.plot(range(1,N+1),[crp_fractional_num_tables(a,d,c,p0) for c in range(1,N+1)],'g--',linewidth=2, label="_nolegend_")
plt.grid()
plt.title(r"Posterior Expected Number of Tables, $\alpha=%.1f$, $d=%.1f$"%(a,d))
plt.xlabel("# customers of type $s$")
plt.ylabel('$E[t_s]$')
plt.xlim(1,N)
plt.legend(["H(s) = 0.1",
"H(s) = 0.4",
"H(s) = 0.7",
"H(s) = 0.95"], "upper left")
def plot_maximization(a, d, N, p0):
m2s = []
m1s = []
m0s = []
ml2s = []
ml1s = []
ml0s = []
ma2s = []
ma1s = []
ma0s = []
g2s = []
g1s = []
g0s = []
for i in range(1, N+1):
ml2, ml1, ml0 = crp_optimize_hierarchical_three(a, d, i, p0)
m2, m1, m0 = crp_optimize_hierarchical_pairs_three(a, d, i, p0)
m2s.append(m2)
m1s.append(m1)
m0s.append(m0)
ml2s.append(ml2)
ml1s.append(ml1)
ml0s.append(ml0)
margmax, globmax, _ = max_hierarchical_three(a, d, i, p0)
ma2s.append(margmax[0])
ma1s.append(margmax[1])
ma0s.append(margmax[2])
g2s.append(globmax[0])
g1s.append(globmax[1])
g0s.append(globmax[2])
print g0s
plt.plot(range(1, N + 1), m2s, "r", alpha=0.7, linewidth=2)
plt.plot(range(1, N + 1), m1s, "r--", label="_nolegend_", linewidth=2, alpha=0.7)
plt.plot(range(1, N + 1), m0s, "r-.", label="_nolegend_", linewidth=2, alpha=0.7)
plt.plot(range(1, N + 1), ml2s, "g", alpha=0.7, linewidth=2)
plt.plot(range(1, N + 1), ml1s, "g--", label="_nolegend_", linewidth=2, alpha=0.7)
plt.plot(range(1, N + 1), ml0s, "g-.", label="_nolegend_", linewidth=2, alpha=0.7)
# plt.plot(range(1, N + 1), ma2s,"b", linewidth=2)
# plt.plot(range(1, N + 1), ma1s,"b--", label="_nolegend_", linewidth=2)
# plt.plot(range(1, N + 1), ma0s,"b-.", label="_nolegend_", linewidth=2)
plt.plot(range(1, N + 1), g2s, "k", linewidth=2, alpha=0.7)
plt.plot(range(1, N + 1), g1s, "k--", label="_nolegend_", linewidth=2, alpha=0.7)
plt.plot(range(1, N + 1), g0s, "k-.", label="_nolegend_", linewidth=2, alpha=0.7)
plt.grid()
plt.title(r"Posterior Number of Tables, $\alpha=%.1f$, $d=%.1f$, $H(s)=%.1f$"%(a,d,p0))
plt.xlabel("# customers of type $s$")
plt.ylabel('$t_s$')
plt.xlim(1,N)
plt.legend(["pairwise", "local", "global"], "upper left")
def plot_crp_particle_filters(a, d, N, p0, K=10):
#m1, s1 = get_errorbars(lambda: full_crp_particle_num_tables(a,d,N,p0,100)[1],K)
#plt.errorbar(np.arange(1,N+1)-0.3, m1, s1,fmt='b',alpha=0.5)
m1, s1 = get_errorbars(lambda: crp_particle_num_tables(a,d,N,p0,1)[1], K)
plt.errorbar(np.arange(1,N+1), m1, s1,fmt='b',alpha=0.5)
m2, s2 = get_errorbars(lambda: crp_particle_num_tables(a,d,N,p0,100)[1], K)
plt.errorbar(np.arange(1,N+1), m2, s2,fmt='r',alpha=0.5)
m3, s3 = get_errorbars(lambda: crp_particle_num_tables_enumerate(a,d,N,p0,20)[1], K)
plt.errorbar(np.arange(1,N+1)+0.3, m3, s3,fmt='g',alpha=0.5)
plt.plot(range(1,N+1),crp_particle_num_tables_enumerate(a,d,N,p0, 20, sample=False)[1],'c',linewidth=2)
plt.plot(range(1,N+1),[expected_num_tables(a, d,i+1, p0)[1] for i in range(N)],'k',linewidth=2)
plt.plot(range(1, N+1), [crp_fractional_num_tables(a,d,c,p0) for c in range(1,N+1)],'b--',linewidth=2)
plt.grid()
plt.title(r"Posterior Expected Number of Tables, $\alpha=%.1f$, $d=%.1f$, $H(s)=%.1f$"%(a,d,p0))
plt.xlabel("# customers of type $s$")
plt.ylabel('$E[t_s]$')
plt.xlim(1,N)
def plot_crp_particle_filter_variance(a, d, N, p0, num_particles_list, K=10):
m1, s1 = get_errorbars(lambda: [crp_particle_num_tables(a,d,N,p0,i)[1][-1] for i in num_particles_list],K)
plt.errorbar(num_particles_list, m1, s1,fmt='b',alpha=0.5)
m2, s2 = get_errorbars(lambda: [crp_particle_num_tables_enumerate(a,d,N,p0,i)[1][-1] for i in num_particles_list], K)
plt.errorbar(num_particles_list, m2, s2,fmt='r',alpha=0.5)
plt.hlines(expected_num_tables(a,d,N,p0)[1],num_particles_list[0], num_particles_list[-1])
plt.grid()
plt.title(r"SMC Estimate of $E[t_s]$, $c_s = %d$, $\alpha=%.1f$, $d=%.1f$, $H(s)=%.1f$"%(N,a,d,p0))
plt.xlabel("# particles")
plt.ylabel('$E[t_s]$')
def plot_enumerate_particle_filters(a, d, N, p0, num_particles=100):
#for i in range(20): plt.plot(crp_particle_num_tables_enumerate(a,d,N,p0,100)[1],'g',alpha=0.5)
before = time.clock()
plt.plot(crp_particle_num_tables_enumerate(a,d,N,p0,num_particles,sample=False)[1],'b',alpha=0.5)
plt.plot(crp_particle_num_tables_enumerate(a,d,N,p0,num_particles,sample=True)[1],'r',alpha=0.5)
plt.plot([crp_fractional_num_tables(a,d,c,p0) for c in range(1,N+1)],'g--')
after = time.clock()
print "Elapsed time", after - before
#for i in range(20): plt.plot(crp_particle_num_tables_enumerate(a,d,N,p0,100, merge=False, sample=False)[1],'r',alpha=0.5)
before = time.clock()
plt.plot([expected_num_tables(a, d,i+1, p0)[1] for i in range(N)],'k',linewidth=2)
after = time.clock()
print "Elapsed time", after - before
plt.grid()
plt.title(r"Posterior Expected Number of Tables, $\alpha=%.1f$, $d=%.1f$, $H(s)=%.1f$"%(a,d,p0))
plt.xlabel("# customers of type $s$")
plt.ylabel('# tables')
def main():
"""Create various plots and save them."""
fig, ax = plot_posterior_components(1., 100)
fig.set_size_inches(10, 5)
save_figure(fig, "plot.pdf")
if __name__ == "__main__":
pass
#main()
```
#### File: examples/experiments_nips2010/ll_experiment_worker.py
```python
from optparse import OptionParser
import sys
import numpy as np
import libplump as lp
DISCOUNTS = [.62, .69, .74, .80, .95]
NUM_TYPES = 16383
BURN_IN_SAMPLES = 50
PREDICT_SAMPLES = 50
def run(options):
restaurant = lp.ReinstantiatingCompactRestaurant()
nodeManager = lp.SimpleNodeManager(restaurant.getFactory())
discounts = lp.VectorDouble(DISCOUNTS)
parameters = lp.SimpleParameters(discounts, options.alpha)
seq = lp.VectorInt()
lp.pushIntFileToVec(options.train_file, seq)
print >> sys.stderr, "Train seq length: %i" % (seq.size(),)
# initialize model
model = lp.HPYPModel(seq, nodeManager, restaurant, parameters, NUM_TYPES)
#insert training observations into model using particle filter
model.computeLosses(0, seq.size())
# add test observations to underlying sequence
testOffset = seq.size()
lp.pushIntFileToVec(options.test_file, seq)
print >> sys.stderr, "Test seq length: %i" % (seq.size() - testOffset,)
if options.prediction == 2:
predictMode = lp.HPYPModel.BELOW
elif options.prediction == 1:
predictMode = lp.HPYPModel.FRAGMENT
else:
predictMode = lp.HPYPModel.ABOVE
if options.inference == 1:
for i in xrange(BURN_IN_SAMPLES):
print >> sys.stderr, "Burn in iteration %i" % (i,)
model.runGibbsSampler()
if options.prediction != 3:
loss = float(lp.prob2loss(model.predictSequence(testOffset, seq.size(), predictMode)))
else:
loss = float(np.mean(model.computeLosses(testOffset, seq.size())))
if options.inference == 2 and options.prediction != 3:
losses = np.zeros((PREDICT_SAMPLES, seq.size() - testOffset))
for i in xrange(BURN_IN_SAMPLES):
print >> sys.stderr, "Burn in iteration %i" % (i,)
model.runGibbsSampler()
for i in xrange(PREDICT_SAMPLES):
print >> sys.stderr, "Prediction iteration %i" % (i,)
model.runGibbsSampler()
losses[i,:] = model.predictSequence(testOffset, seq.size(), predictMode)
loss = float(np.mean(-np.log2(np.mean(losses,0))))
print loss
# make sure destructors are called in correct order
del model
del nodeManager
def main():
parser = OptionParser()
parser.add_option("--train-file",
dest = "train_file",
type = "string",
help = "File used for training")
parser.add_option("--test-file",
dest = "test_file",
type = "string",
help = "File used for testing")
parser.add_option("--alpha",
dest = "alpha",
type = "float",
help = "Concentration parameter",
default = 0.0)
parser.add_option("--inference",
dest = "inference",
type = "int",
help = "Inference mode: 0: particle filter, " + \
"1: 1-sample Gibbs, " + \
"2: 50-sample Gibbs")
parser.add_option("--prediction",
dest = "prediction",
type = "int",
help = "prediction mode: 0: above, " + \
"1: fragment, " + \
"2: below" + \
"3: particle filter")
(options, args) = parser.parse_args()
if options.train_file == None or options.test_file == None:
parser.print_help()
exit(1)
if options.inference == 2 and options.prediction == 3:
print "ERROR: Can't combine particle filter prediction with multiple samples!"
exit(1)
run(options)
if __name__ == "__main__":
main()
```
|
{
"source": "jgathogo/hpa",
"score": 2
}
|
#### File: hpa/cfp/models.py
```python
import datetime
from django.db import models
from django.utils import timezone
from django.urls import reverse
from autoslug import AutoSlugField
class Donor(models.Model):
name = models.CharField(max_length=200, unique=True)
abbrev = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.abbrev
class Meta:
ordering = ('name',)
class Theme(models.Model):
theme = models.CharField(unique=True, max_length=50)
def __str__(self):
return self.theme
class Meta:
ordering = ('theme',)
class Zone(models.Model):
zone = models.CharField(unique=True, max_length=200)
def __str__(self):
return self.zone
class Meta:
ordering = ('zone',)
class Cfp(models.Model):
CURRENCY = (
('USD', 'US Dollars'),
('GBP', 'British Pound'),
('EUR', 'Euros'),
('KES', 'Kenya Shillings'),
('JPY', 'Japanese Yen'),
('CAD', 'Canadian Dollars'),
)
entered_at = models.DateTimeField(auto_now_add=True, editable=False)
donor = models.ForeignKey(Donor, on_delete=models.CASCADE)
title = models.CharField(
max_length=200, unique=True, verbose_name='Call for proposals title')
slug = AutoSlugField(max_length=255, null=True, editable=True, unique=True, populate_from='title')
link = models.URLField(verbose_name='Call for proposals website')
pub_date = models.DateField(verbose_name='Published')
closing_date_provided = models.BooleanField(
verbose_name='Closing date specified?')
closing_date = models.DateField(
null=True, blank=True, verbose_name='Closing date for applications')
themes = models.ManyToManyField(Theme)
zones = models.ManyToManyField(Zone)
type_of_projects = models.TextField()
# eligibility = models.TextField(verbose_name='Eligibility Criteria')
funding_currency = models.CharField(max_length=3, choices=CURRENCY)
grant_size_specified = models.BooleanField(
verbose_name='Has the grant size been specified?')
overall_budget_specified = models.BooleanField(
verbose_name='Has the overall budget been specified?')
overall_budget = models.FloatField(
null=True, blank=True, verbose_name='Total or overall budget available')
minimum_budget = models.FloatField(
null=True, blank=True, verbose_name='Minimum budget for a project')
maximum_budget = models.FloatField(
null=True, blank=True, verbose_name='Maximum budget for a project')
duration_specified = models.BooleanField(
verbose_name='Project duration specified?')
duration = models.PositiveIntegerField(
null=True, blank=True, verbose_name='Maximum duration(in months) for a project')
# how_to_apply = models.TextField()
apply_here = models.URLField(blank=True)
notes = models.TextField(blank=True)
def get_absolute_url(self):
return reverse('cfp:cfp_detail', args=[str(self.slug)])
def __str__(self):
return self.title
def past_deadline(self):
leo = datetime.date.today()
deadline = self.closing_date
return leo > deadline
def no_closing_date(self):
return self.closing_date is None
cfp = models.Manager()
```
#### File: hpa/inventory/models.py
```python
from django.db import models
class Product(models.Model):
item = models.CharField(max_length=200)
unit = models.CharField(max_length=50)
no_of_pieces = models.IntegerField()
def __str__(self):
return self.item
class Meta:
ordering = ('item',)
class Country(models.Model):
country = models.CharField(max_length=20)
def __str__(self):
return self.country
class Meta:
ordering = ('country',)
class Town(models.Model):
country = models.ForeignKey(Country, on_delete=models.CASCADE)
town = models.CharField(max_length=20)
def __str__(self):
return self.town
class Meta:
ordering = ('town',)
class StockCard(models.Model):
STOCKMOVEMENT = (
('IN', 'Stock coming in'),
('OUT', 'Stock going out'),
)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
transaction_date = models.DateField()
stock_in_or_out = models.CharField(max_length=3, choices=STOCKMOVEMENT)
supplier = models.CharField(max_length=20, blank=True)
to_where = models.ForeignKey(Town, on_delete=models.CASCADE, blank=True, null=True)
document_no = models.CharField(max_length=50)
stock_amount = models.FloatField()
def __str__(self):
return '%s %s %s %s %s' % (self.product, self.transaction_date, self.document_no, " : Stock Movement - ", self.stock_in_or_out)
```
|
{
"source": "jgathogo/python_level_1",
"score": 4
}
|
#### File: python_level_1/week2/problem1.py
```python
import os
import sys
"""
Notes:
- It's great that you've used functions even though we haven't reached that part of the course.
Also, the naming of the function is clear and a good variable name.
- Typically, the docstring for the function starts immediately after the triple quote otherwise we
introduce a newline (\n) in the documentation, which doesn't look good. I've corrected it below.
- The 'return' variable in the docstring is not correct since your program actually returns None (you can test this)
- Trivial point: to preserve the order of modules, name them problem0.py,...,problem9.py; this way they will always appear in order
- Feel free to include additional testing modules if you need to though you don't have to commit them to the repo.
"""
def print_name_age():
"""Ask user name and age and print out the result"""
name = input("Please enter your name: ")
age = input("Please enter your age in years: ")
print(f"Your name is {name} and you are {age} years old")
def main():
v = print_name_age()
# print(f"the return value of 'print_name_age()' is {v}")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
#### File: python_level_1/week3/problem0.py
```python
import os
import sys
def main():
# no need for int_* in the variable name
start = int(input("Enter the start digit: "))
stop = int(input("Enter the stop number: "))
step = int(input("Enter the step number: "))
print(f"Generated integers: {list(range(start, stop, step))}")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
#### File: python_level_1/week3/problem3.py
```python
import os
import sys
import random
def main():
# correct
random_list = list(random.choices(range(5), k=10))
print(f"Number of times each number is present in list: \n"
f"0: {random_list.count(0)}\n"
f"1: {random_list.count(1)}\n"
f"2: {random_list.count(2)}\n"
f"3: {random_list.count(3)}\n"
f"4: {random_list.count(4)}\n"
f"5: {random_list.count(5)}\n")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
#### File: python_level_1/week3/problem8.py
```python
import os
import sys
def main():
# no need for int_* in the variable name
# the variables had been int_start, which wasn't necessary; start is fine (as you have done below)
# Sorry Paul, what did you mean by the above comment? I get an error if I leave out int out of input
start = int(input("Enter the start digit: "))
# fixme: use a try...except block to catch the exception
# so that you don't show the user an ugly traceback
assert start >= 0, "Start must be zero and above"
stop = int(input("Enter the stop number: "))
# fixme: use a try...except block here too
assert stop > start, "Stop must be more than Start"
step = int(input("Enter the step number: "))
# fixme: use a try...except block here too
assert step > 0, "Step must be 1 and above"
print(f"Generated integers: {list(range(start, stop, step))}")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
#### File: python_level_1/week4/problem9.py
```python
import os
import sys
"""
Notes:
- Great job!
- Extending to 1000 might require a bit more thought!
- Run the program again to see numbers which are not prime!
- Notice that the sets were related to primes...
"""
def main():
stop = 1000
one = set(range(2, stop))
two = set(range(4, stop, 2))
three = set(range(6, stop, 3))
five = set(range(10, stop, 5))
seven = set(range(14, stop, 7))
x = one - two - three - five - seven
print(sorted(x))
print()
print(list(filter(lambda y: y % 11 == 0, x)))
print(list(filter(lambda y: y % 13 == 0, x)))
print(list(filter(lambda y: y % 17 == 0, x)))
print(list(filter(lambda y: y % 19 == 0, x)))
print(list(filter(lambda y: y % 23 == 0, x)))
# The values in the resulting set are prime numbers
# To provide values to 1000, change stop value in range
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
#### File: python_level_1/week5/problem3.py
```python
import os
import random
import sys
"""
Notes:
- Great!
"""
def main():
l1 = list(random.choices(range(100), k=100))
l2 = list(random.choices(range(100), k=100))
print(f"l1: {l1}\n"
f"l2: {l2}")
l3 = []
i = 0
while i < len(l2):
# for i in range(len(l1)):
l3.append(l1[i] + l2[i])
i += 1
print(f"Pairwise sums: {l3}")
cum_prod = []
i = 0
while i < len(l2):
# for i in range(len(l1)):
cum_prod.append(sum(cum_prod) + l1[i] * l2[i])
i += 1
print(f"Cumulative pairwise products: {cum_prod}")
even_odd = []
i = 0
while i < len(l1):
# for i in range(len(l1)):
if (l1[i] % 2 == 0 and l2[i] % 2 == 0) or (l1[i] % 2 != 0 and l2[i] % 2 != 0):
even_odd.append(l1[i] + l2[i])
i += 1
print(f"Sums of even/odd pairs: {even_odd}")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
#### File: python_level_1/week5/problem4.py
```python
import os
import random
import sys
"""
Notes:
- Good job!
- There is a bug in your translation (b). Use the example provided in the challenge to make
sure your translated sequence is the same.
"""
def main():
# Random sequence string of 100 base pairs
residues = ['A', 'C', 'G', 'T']
pairs = random.choices(residues, k=50)
dna = ""
for i in pairs:
dna += i
print(f"Original: {dna}")
# Transcription of DNA sequence to corresponding RNA sequence
trans_map = {'A': 'U',
'C': 'g_temp',
'G': 'C',
'T': 'A'}
for d, r in trans_map.items():
dna = dna.replace(d, r)
rna = dna.replace('g_temp', 'G')
# fixme: is this 'translated' or 'transcribed'?
print(f"Translated: {rna}")
# Translation
code = {'UUU': 'Phe', 'UUC': 'Phe', 'UUA': 'Leu', 'UUG': 'Leu', 'CUU': 'Leu', 'CUC': 'Leu', 'CUA': 'Leu',
'CUG': 'Leu', 'AUU': 'Ile', 'AUC': 'Ile', 'AUA': 'Ile', 'AUG': 'Met', 'GUU': 'Val', 'GUC': 'Val',
'GUA': 'Val', 'GUG': 'Val', 'UCU': 'Ser', 'UCC': 'Ser', 'UCA': 'Ser', 'UCG': 'Ser', 'CCU': 'Pro',
'CCC': 'Pro', 'CCA': 'Pro', 'CCG': 'Pro', 'ACU': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',
'GCU': 'Ala', 'GCC': 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'UAU': 'Tyr', 'UAC': 'Tyr', 'UAA': 'Stop',
'UAG': 'Stop', 'CAU': 'His', 'CAC': 'His', 'CAA': 'Gln',
'CAG': 'Gln', 'AAU': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'GAU': 'Asp', 'GAC': 'Asp',
'GAA': 'Glu', 'GAG': 'Glu', 'UGU': 'Cys', 'UGC': 'Cys', 'UGA': 'Stop', 'UGG': 'Trp', 'CGU': 'Arg',
'CGC': 'Arg', 'CGA': 'Arg', 'CGG': 'Arg', 'AGU': 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg',
'GGU': 'Gly', 'GGC': 'Gly', 'GGA': 'Gly', 'GGG': 'Gly',
}
trans_sequence = []
stops = 0
# todo: what can you do here to fix the bug?
for i in range(len(rna)):
codon = rna[i:i + 3]
# print(codon)
if len(codon) == 3:
# print(code[codon])
if code[codon] == 'Stop':
stops += 1
continue
trans_sequence.append(code[codon])
print(f"Number of codons: {len(trans_sequence)}")
print(f"Number of Stops: {stops}")
print(f"List of translations: {trans_sequence}")
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
```
#### File: python_level_1/week6/problem2.py
```python
import os
import sys
"""
Notes:
- Great job! ask the user for input 😄
- Please add a docstring to your function in line with the notes provided.
"""
def is_palindrome(text):
return text == text[::-1]
def main():
s = "step on no pets"
if is_palindrome(s):
print(f"{s} is a palindrome")
else:
print(f"{s} is not a palindrome")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
#### File: python_level_1/week6/problem3.py
```python
import os
import sys
"""
Notes:
- Very good. Works as expected and it's brave of you to use a while loop.
- Please add a docstring to your function in line with the notes provided.
"""
def reverse_string(s):
rev_str = ''
pos = len(s) - 1
while pos >= 0:
rev_str += s[pos]
pos -= 1
return rev_str
def main():
# s = "step on no pets"
s = input("string? ")
print(reverse_string(s))
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jgavert/Higanbana",
"score": 2
}
|
#### File: Higanbana/benchmarks/macros.bzl
```python
def src_core_benchmark(target_name):
native.cc_binary(
name = "bench_core_" + target_name,
srcs = ["core/bench_" + target_name + ".cpp"],
deps = ["//core:core", "catch-benchmark-main"],
copts = select({
"@bazel_tools//src/conditions:windows": ["/std:c++latest", "/arch:AVX2", "/permissive-", "/Z7"],
"//conditions:default": ["-std=c++2a", "-msse4.2", "-m64", "-pthread"],
}),
defines = ["_ENABLE_EXTENDED_ALIGNED_STORAGE", "CATCH_CONFIG_ENABLE_BENCHMARKING", "_HAS_DEPRECATED_RESULT_OF"],
linkopts = select({
"@bazel_tools//src/conditions:windows": ["/subsystem:CONSOLE", "/DEBUG"],
"//conditions:default": ["-pthread"],
}),
)
```
#### File: Higanbana/tests/macros.bzl
```python
def src_graphics_test(target_name):
native.cc_test(
name = "test_graphics_" + target_name,
srcs = ["graphics/test_" + target_name + ".cpp", "graphics/graphics_config.hpp", "graphics/graphics_config.cpp"],
deps = ["//graphics:graphics", "//ext/Catch2:catch2_main"],
copts = select({
"@bazel_tools//src/conditions:windows": ["/std:c++latest", "/arch:AVX2", "/Z7", "/permissive-"],
"//conditions:default": ["-std=c++2a", "-msse4.2", "-m64"],
}),
data = ["//tests/data:mapping"],
defines = ["_ENABLE_EXTENDED_ALIGNED_STORAGE"],
linkopts = select({
"@bazel_tools//src/conditions:windows": ["/subsystem:CONSOLE", "/DEBUG"],
"//conditions:default": ["-pthread", "-ltbb", "-ldl"],
}),
)
def src_graphics_test_with_header(target_name):
native.cc_test(
name = "test_graphics_" + target_name,
srcs = ["graphics/test_" + target_name + ".cpp", "graphics/test_" + target_name + ".hpp", "graphics/graphics_config.hpp", "graphics/graphics_config.cpp"],
deps = ["//graphics:graphics", "//ext/Catch2:catch2_main"],
copts = select({
"@bazel_tools//src/conditions:windows": ["/std:c++latest", "/arch:AVX2", "/Z7", "/permissive-"],
"//conditions:default": ["-std=c++2a", "-msse4.2", "-m64"],
}),
data = ["//tests/data:mapping"],
defines = ["_ENABLE_EXTENDED_ALIGNED_STORAGE"],
linkopts = select({
"@bazel_tools//src/conditions:windows": ["/subsystem:CONSOLE", "/DEBUG"],
"//conditions:default": ["-pthread", "-ltbb", "-ldl"],
}),
)
def src_core_test(target_name):
native.cc_test(
name = "test_core_" + target_name,
srcs = ["core/test_" + target_name + ".cpp"],
deps = ["//core:core", "//ext/Catch2:catch2_main"],
copts = select({
"@bazel_tools//src/conditions:windows": ["/std:c++latest", "/arch:AVX2", "/permissive-", "/Z7"],
"//conditions:default": ["-std=c++2a", "-msse4.2", "-m64", "-pthread"],
}),
data = ["//tests/data:mapping"],
defines = ["_ENABLE_EXTENDED_ALIGNED_STORAGE"],
linkopts = select({
"@bazel_tools//src/conditions:windows": ["/subsystem:CONSOLE", "/DEBUG"],
"//conditions:default": ["-pthread"],
}),
)
def src_core_test_with_header(target_name):
native.cc_test(
name = "test_core_" + target_name,
srcs = ["core/test_" + target_name + ".cpp", "core/test_" + target_name + ".hpp"],
deps = ["//core:core", "//ext/Catch2:catch2_main"],
copts = select({
"@bazel_tools//src/conditions:windows": ["/std:c++latest", "/arch:AVX2", "/permissive-", "/Z7"],
"//conditions:default": ["-std=c++2a", "-msse4.2", "-m64"],
}),
data = ["//tests/data:mapping"],
defines = ["_ENABLE_EXTENDED_ALIGNED_STORAGE"],
linkopts = select({
"@bazel_tools//src/conditions:windows": ["/subsystem:CONSOLE", "/DEBUG"],
"//conditions:default": ["-pthread"],
}),
)
```
|
{
"source": "jgavris/tensorflow",
"score": 2
}
|
#### File: estimator/inputs/pandas_io.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.pandas_import import HAS_PANDAS
from tensorflow.python.estimator.inputs.queues import feeding_functions
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
```
|
{
"source": "jgawrilo/butler",
"score": 3
}
|
#### File: butler/contrib/mine_by_type.py
```python
from google import search
import codecs
import operator
term = "<EMAIL>"
fmap = {}
dp = "/Volumes/JB5T/georgia_tech/dumps/majorgeeks.txt"
lines = 0
with codecs.open(dp,encoding="utf-8",errors="ignore") as inf:
for line in inf:
charmap = {}
lines += 1
for c in line.strip():
charmap[c] = charmap.get(c,0) + 1
print lines
for c in charmap:
fmap[c] = fmap.get(c,{})
fmap[c][charmap[c]] = fmap[c].get(charmap[c],0) + 1
def gethighest(x):
mm = 0
for m in fmap[x]:
if fmap[x][m] > mm:
mm = fmap[x][m]
return (x,mm)
highest = sorted(map(gethighest,fmap.keys()),key=lambda tup:tup[1],reverse=True)
print highest[:10]
'''
for url in search(term, stop=30):
print url
'''
```
|
{
"source": "jgawrilo/qcr_ci",
"score": 2
}
|
#### File: jgawrilo/qcr_ci/launch_service.py
```python
from rpy2.robjects import r
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
from rpy2.rinterface import RRuntimeError
import argparse
import logging
from flask import Flask, make_response, jsonify
from flask.ext.restful import Api, Resource, reqparse
from flask.ext.restful.representations.json import output_json
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from elasticsearch import Elasticsearch
import simplejson as json
import subprocess
import pandas as pd
from io import StringIO
import datetime
import os
app = Flask(__name__)
api = Api(app)
pandas2ri.activate()
ci = importr('CausalImpact')
base = importr('base')
devtools = importr('devtools')
dollar = base.__dict__['$']
at = base.__dict__['@']
logging.basicConfig(format='%(levelname)s %(asctime)s %(filename)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ImpactPredictorAPI(Resource):
def __init__(self, **kwargs):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('data', type=dict, location='json')
super(ImpactPredictorAPI, self).__init__()
def post(self):
try:
args = self.reqparse.parse_args()
treatment_control_data = pd.DataFrame.from_dict(args["data"]["series"])
time_df = treatment_control_data[["date"]]
time_df.index = range(1,len(time_df)+1)
logger.info("PRE-START:" +str(args["data"]["pre_start"]))
logger.info("PRE-END:" +str(args["data"]["pre_end"]))
logger.info("POST-START:" +str(args["data"]["post_start"]))
logger.info("POST-END:" +str(args["data"]["post_end"]))
logger.info("DATA:"+json.dumps(args["data"]["series"]))
treatment_control_data = treatment_control_data[["treatment","control"]]
treatment_control_data.index = range(1,len(treatment_control_data)+1)
pre_start = time_df[time_df.date == args["data"]["pre_start"]].index[0]
pre_end = time_df[time_df.date == args["data"]["pre_end"]].index[0]
post_start = time_df[time_df.date == args["data"]["post_start"]].index[0]
post_end = time_df[time_df.date == args["data"]["post_end"]].index[0]
columns = [
"response",
"cum.response",
"point.pred",
"point.pred.lower",
"point.pred.upper",
"cum.pred",
"cum.pred.lower",
"cum.pred.upper",
"point.effect",
"point.effect.lower",
"point.effect.upper",
"cum.effect",
"cum.effect.lower",
"cum.effect.upper"
]
r_data = pandas2ri.py2ri(treatment_control_data)
pre = robjects.IntVector((pre_start,pre_end))
post = robjects.IntVector((post_start,post_end))
results = ci.CausalImpact(r_data,pre,post)
py_out = pandas2ri.ri2py(results)
dout = pandas2ri.ri2py_dataframe(dollar(py_out,"series"))
dout.columns = columns
dout.index = range(1,len(dout)+1)
final = pd.concat([time_df,dout],axis=1)
final.drop('cum.response', axis=1, inplace=True)
final.drop('cum.pred.lower', axis=1, inplace=True)
final.drop('cum.pred.upper', axis=1, inplace=True)
final.drop('cum.pred', axis=1, inplace=True)
impact = dollar(dollar(py_out,"summary"),"AbsEffect")[1]
# Turn into json
ts_results = final.to_json(orient='records')
ret_results = {"ts_results":json.loads(ts_results),"total_impact":impact}
with open('data/' + datetime.datetime.now().isoformat().replace(':','_'),'w') as out:
out.write(json.dumps(ret_results,indent=2))
return ret_results
except RRuntimeError as e:
return {"message":str(e)}
except Exception as e:
logger.error(e)
raise e
'''
Deal with these later
'''
class HealthCheck(Resource):
def get(self):
return make_response(jsonify({"status": "ok"}), 200)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'Bad request'}), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
# Start
if __name__ == '__main__':
logger.info('Starting Causal Impact Service.')
# Read in config, set port
config = json.load(open('./config.json'))
port = config["impactServicePort"]
api.add_resource(ImpactPredictorAPI, '/api/impact', resource_class_kwargs=config)
api.add_resource(HealthCheck, '/api/health')
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(port)
IOLoop.instance().start()
```
|
{
"source": "jgawrilo/tagless",
"score": 2
}
|
#### File: tagless/tagless/random_sampler.py
```python
import os
import sys
import h5py
from datetime import datetime
import numpy as np
class RandomSampler(object):
def __init__(self, crow, n=2, prefix=None):
crow = h5py.File(crow)
self.labs = crow['labs'].value
if prefix is not None:
self.labs = np.array([os.path.join(prefix, l) for l in self.labs])
self.order = np.random.permutation(self.labs.shape[0])
self.y = np.zeros(self.labs.shape[0]) - 1
self.validation_idx = []
self._counter = 0
self.n = n
def get_next(self):
out = []
for _ in range(self.n):
out.append(self.order[self._counter])
self._counter += 1
return np.array(out)
def set_label(self, idx, label):
self.y[idx] = label
self.validation_idx.append(idx)
def get_data(self):
raise NotImplemented
def n_hits(self):
return (self.y == 1).sum()
def n_labeled(self):
return (self.y >= 0).sum()
def is_labeled(self, idx):
return idx in np.where(self.y >= 0)[0]
def save(self, outpath):
f = h5py.File('%s-%s-%s.h5' % (outpath, 'validation', datetime.now().strftime('%Y%m%d_%H%M%S')))
f['y'] = self.y
f['labs'] = self.labs
f['validation_idx'] = np.array(self.validation_idx)
f.close()
```
#### File: tagless/tagless/uncertainty_sampler.py
```python
import sys
import h5py
from datetime import datetime
import numpy as np
from libact.base.dataset import Dataset
from libact.models import LinearSVC
from libact.query_strategies import UncertaintySampling
class UncertaintySampler(object):
def __init__(self, X, y, labs, n=2):
y = [yy if yy >= 0 else None for yy in y]
self.dataset = Dataset(X, y)
self.labs = labs
self.uc = UncertaintySampling(self.dataset, method='lc', model=LinearSVC())
self.n = n
def get_next(self):
print >> sys.stderr, 'get_next: start'
out = self.uc.make_query(n=self.n)
print >> sys.stderr, 'get_next: done'
return out
def set_label(self, idx, label):
print >> sys.stderr, 'set_label: start'
out = self.dataset.update(idx, label)
print >> sys.stderr, 'set_label: done'
return out
def get_data(self):
X, y = zip(*self.dataset.get_entries())
X, y = np.vstack(X), np.array([yy if yy is not None else -1 for yy in y])
return X, y
def n_hits(self):
labels = np.array(zip(*self.dataset.get_entries())[1])
return (labels == 1).sum()
def n_labeled(self):
return self.dataset.len_labeled()
def is_labeled(self, idx):
return idx in np.where(zip(*self.dataset.get_entries())[1])[0]
def save(self, outpath):
""" !! This should be updated to save in same format as simple_las """
X, y = self.get_data()
f = h5py.File('%s-%s-%s.h5' % (outpath, 'uncertainty', datetime.now().strftime('%Y%m%d_%H%M%S')))
f['X'] = X
f['y'] = y
f['labs'] = self.labs
f.close()
```
|
{
"source": "jgawrilo/youtube",
"score": 3
}
|
#### File: jgawrilo/youtube/pull_related_videos.py
```python
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import json
import os
import codecs
from bs4 import BeautifulSoup
import argparse
import requests
import sys
import googleapiclient
def get_video_info(vid, youtube):
response = youtube.videos().list(
part="id,snippet,contentDetails,statistics",
id=vid,
maxResults=1
).execute()
return response
def get_video_suggestions(youtube,vid):
try:
#print "Related to:", vid
search_response = youtube.search().list(
type="video",
part="id",
relatedToVideoId=vid,
maxResults=20
).execute()
for i in search_response["items"]:
#print float(get_video_info(i["id"]["videoId"],youtube)["items"][0]["statistics"]["viewCount"])
if float(get_video_info(i["id"]["videoId"],youtube)["items"][0]["statistics"]["viewCount"]) < 100000:
print i["id"]["videoId"]
except googleapiclient.errors.HttpError:
pass
# MAIN
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pull some youtube.')
parser.add_argument("--key", help="https://cloud.google.com/console")
args = parser.parse_args()
# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps
# tab of
# https://cloud.google.com/console
# Please ensure that you have enabled the YouTube Data API for your project.
DEVELOPER_KEY = args.key
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
for f in os.listdir("../flashpoint/videos/"):
get_video_suggestions(youtube,f)
```
|
{
"source": "jgay-dauphine/Initiation-Python",
"score": 4
}
|
#### File: Initiation-Python/Codes/suites.py
```python
def fib(rang):
"""
Ceci est la fonction qui affiche l'ensemble des
éléments de la suite de Fibonacci de 1 à rang
"""
# Initialisation
(u, v) = (1, 1)
# Itérations
while u <= rang:
print(u, end=' ')
(u, v) = (v, u+v)
print()
def fib2(rang): # renvoit la suite de Fibonacci jusqu'à n
res = []
u, v = 1, 1
while u <= rang:
res.append(u)
u, v = v, u+v
return res
if __name__ == '__main__':
print('Tests du module suites :')
print('affichage des éléments jusqu\'à 50 :')
fib(50)
print('Récupération des valeurs des éléments jusqu\'à 50')
values = fib2(50)
print(values)
```
|
{
"source": "jgay-dauphine/python-mailer",
"score": 3
}
|
#### File: jgay-dauphine/python-mailer/template_builder.py
```python
from jinja2 import Environment, FileSystemLoader
from email.mime.text import MIMEText
class Builder:
def __init__(self):
return
def build_template(self, symbols, summary, articles, company_name, exchange):
try:
# Declare jinja2 template
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('email.html')
# Build template from email.html file
output = template.render(symbols=symbols, summary=summary, articles=articles, companyName=company_name, exchange=exchange)
body = MIMEText(output, 'html')
return body
except Exception as e:
# Dispaly error
print(str(e))
```
|
{
"source": "jgayfer/pycord",
"score": 3
}
|
#### File: examples/app_commands/slash_basic.py
```python
import discord
bot = discord.Bot()
# If you use commands.Bot, @bot.slash_command should be used for
# slash commands. You can use @bot.slash_command with discord.Bot aswell
@bot.command(guild_ids=[...]) # create a slash command for the supplied guilds
async def hello(ctx):
"""Say hello to the bot""" # the command description can be supplied as the docstring
await ctx.send(f"Hello {ctx.author}!")
@bot.command(
name="hi"
) # Not passing in guild_ids creates a global slash command (might take an hour to register)
async def global_command(ctx, num: int): # Takes one integer parameter
await ctx.send(f"This is a global command, {num}!")
@bot.command(guild_ids=[...])
async def joined(
ctx, member: discord.Member = None
): # Passing a default value makes the argument optional
user = member or ctx.author
await ctx.send(f"{user.name} joined at {discord.utils.format_dt(user.joined_at)}")
# To learn how to add descriptions, choices to options check slash_options.py
bot.run("TOKEN")
```
|
{
"source": "jgayfer/Spirit",
"score": 3
}
|
#### File: Spirit/cogs/core.py
```python
from discord.ext import commands
import discord
import asyncio
from cogs.utils import constants
from cogs.utils.message_manager import MessageManager
class Core:
"""Core functionality required for the bot to function"""
def __init__(self, bot):
self.bot = bot
async def on_ready(self):
self.display_startup_info()
self.add_remove_offline_guilds()
async def on_member_remove(self, user):
"""Remove user from database when they leave the guild"""
member_ids = []
for member in self.bot.get_all_members():
member_ids.append(member.id)
if user.id not in member_ids:
self.bot.db.remove_user(user.id)
async def on_command_error(self, ctx, error):
"""Command error handler"""
manager = MessageManager(ctx)
if isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, commands.MissingRequiredArgument):
pass
elif isinstance(error, commands.NotOwner):
pass
elif isinstance(error, commands.NoPrivateMessage):
await manager.send_message("You can't use that command in a private message")
elif isinstance(error, commands.CheckFailure):
await manager.send_message("You don't have the required permissions to do that")
elif isinstance(error, commands.CommandOnCooldown):
await manager.send_message(error)
# Non Discord.py errors
elif isinstance(error, commands.CommandInvokeError):
if isinstance(error.original, discord.errors.Forbidden):
pass
elif isinstance(error.original, asyncio.TimeoutError):
await manager.send_private_message("I'm not sure where you went. We can try this again later.")
else:
raise error
else:
raise error
await manager.clean_messages()
def add_remove_offline_guilds(self):
"""Add/remove guilds that may have added/removed the bot while it was offline"""
to_delete = []
results = self.bot.db.get_guilds()
for row in results:
guild_id = row.get('guild_id')
guild = self.bot.get_guild(guild_id)
if not guild:
to_delete.append(guild_id)
for guild_id in to_delete:
self.bot.db.remove_guild(guild_id)
def display_startup_info(self):
print('Spirit v{}'.format(constants.VERSION))
print('Username: {}'.format(self.bot.user.name))
print('------')
```
#### File: Spirit/cogs/events.py
```python
from datetime import datetime
from discord.ext import commands
import discord
from db.query_wrappers import get_event_role, get_event_delete_role
from cogs.utils.message_manager import MessageManager
from cogs.utils.checks import is_event, is_int, is_message
from cogs.utils import constants
from cogs.utils.format import format_role_name
class Events:
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.guild_only()
@commands.cooldown(rate=2, per=5, type=commands.BucketType.user)
async def event(self, ctx):
"""
Create an event in the events channel
After invoking the event command, the bot will ask
you to enter the event details. Once the event
is created, it will appear in the upcoming-events
channel. The upcoming-events channel is designed with
the assumption that it isn't used for anything but
displaying events; non event messages will be deleted.
Users will be able to accept and decline the
event by adding reactions. If a maximum number
of attendees is set and the event is full,
additional attendees will be placed in a standby
section. If a spot opens up, the user at the
top of the standby section will be automatically
moved into the event.
By default, everyone can make events. However, a minimum role
requirement to create events can be defined in the settings.
See `help settings seteventrole` for more information.
The event creator and those with the Manage Sever permission
can delete events by reacting to the event message with \U0001f480.
"""
manager = MessageManager(ctx)
event_role = get_event_role(ctx)
member_permissions = ctx.author.permissions_in(ctx.channel)
if event_role:
if ctx.author.top_role < event_role:
event_role_str = format_role_name(event_role)
await manager.send_message("You must be of role **{}** or higher to do that.".format(event_role))
return await manager.clean_messages()
await manager.send_message('Event creation instructions have been messaged to you')
# Title
await manager.send_private_message("Enter event title:")
res = await manager.get_next_private_message()
if not res:
return await manager.clean_messages()
title = res.content
# Description
await manager.send_private_message("Enter event description (type 'none' for no description):")
res = await manager.get_next_private_message()
if not res:
return await manager.clean_messages()
if res.content.upper() != 'NONE':
description = res.content
else:
description = ""
# Number of attendees
max_members = 0
while not max_members:
await manager.send_private_message("Enter the maximum numbers of attendees (type 'none' for no maximum):")
res = await manager.get_next_private_message()
if not res:
return await manager.clean_messages()
if res.content.upper() == 'NONE':
break
elif is_int(res.content) and int(res.content) in range(1,10000):
max_members = int(res.content)
else:
await manager.send_private_message("Invalid entry. Must be a number between 1 and 9999.")
# Start time
start_time = None
while not start_time:
await manager.send_private_message("Enter event time (YYYY-MM-DD HH:MM AM/PM):")
res = await manager.get_next_private_message()
if not res:
return await manager.clean_messages()
start_time_format = '%Y-%m-%d %I:%M %p'
try:
start_time = datetime.strptime(res.content, start_time_format)
except ValueError:
await manager.send_private_message("Invalid event time!")
# Time zone
time_zone = None
while not time_zone:
await manager.send_private_message("Enter the time zone (PST, EST, etc):")
res = await manager.get_next_private_message()
if not res:
return await manager.clean_messages()
user_timezone = "".join(res.content.upper().split())
if user_timezone not in constants.TIME_ZONES:
await manager.send_private_message("Unsupported time zone")
else:
time_zone = user_timezone
affected_rows = self.bot.db.create_event(title, start_time, time_zone, ctx.guild.id, description, max_members, ctx.author.id)
if affected_rows == 0:
await manager.send_private_message("An event with that name already exists!")
return await manager.clean_messages()
event_channel = await self.get_events_channel(ctx.guild)
await manager.send_private_message("Event created! The " + event_channel.mention + " channel will be updated momentarily.")
await self.list_events(ctx.guild)
await manager.clean_messages()
def user_can_create_events(self, member):
pass
async def list_events(self, guild):
"""Clear the event channel and display all upcoming events"""
events_channel = await self.get_events_channel(guild)
await events_channel.purge(limit=999, check=is_message)
events = self.bot.db.get_events(guild.id)
if len(events) > 0:
for event in events:
event_embed = self.create_event_embed(guild, event)
msg = await events_channel.send(embed=event_embed)
await msg.add_reaction("\N{WHITE HEAVY CHECK MARK}")
await msg.add_reaction("\N{CROSS MARK}")
await msg.add_reaction("\N{WHITE QUESTION MARK ORNAMENT}")
else:
await events_channel.send("There are no upcoming events.")
async def on_raw_reaction_add(self, payload):
"""If a reaction represents a user RSVP, update the DB and event message"""
channel = self.bot.get_channel(payload.channel_id)
if isinstance(channel, discord.abc.PrivateChannel):
return
try:
message = await channel.get_message(payload.message_id)
except:
return
guild = channel.guild
member = guild.get_member(payload.user_id)
deleted = None
# We check that the user is not the message author as to not count
# the initial reactions added by the bot as being indicative of attendance
if is_event(message) and member != message.author:
title = message.embeds[0].title
if payload.emoji.name == "\N{WHITE HEAVY CHECK MARK}":
await self.set_attendance(member, guild, 1, title, message)
elif payload.emoji.name == "\N{CROSS MARK}":
await self.set_attendance(member, guild, 0, title, message)
elif payload.emoji.name == "\N{WHITE QUESTION MARK ORNAMENT}":
await self.set_attendance(member, guild, 2, title, message)
elif payload.emoji.name == "\N{SKULL}":
deleted = await self.delete_event(guild, title, member, channel)
if not deleted:
try:
await message.remove_reaction(payload.emoji, member)
except:
pass
async def set_attendance(self, member, guild, attending, title, message):
"""Send updated event attendance info to db and update the event"""
self.bot.db.add_user(member.id)
self.bot.db.update_attendance(member.id, guild.id, attending, title, datetime.now())
# Update event message in place for a more seamless user experience
event = self.bot.db.get_event(guild.id, title)
if event:
event_embed = self.create_event_embed(guild, event)
await message.edit(embed=event_embed)
else:
raise ValueError("Could not retrieve event")
return
async def delete_event(self, guild, title, member, channel):
"""Delete an event and update the events channel on success"""
event_delete_role = get_event_delete_role(self.bot, guild)
result = self.bot.db.get_event_creator(guild.id, title)
creator_id = result.get('user_id') if result else None
if member.permissions_in(channel).manage_guild or (member.id == creator_id) or (event_delete_role and member.top_role >= event_delete_role):
deleted = self.bot.db.delete_event(guild.id, title)
if deleted:
await self.list_events(guild)
return True
else:
try:
await member.send("You don't have permission to delete that event.")
except:
pass
async def get_events_channel(self, guild):
"""Return the events channel if it exists, otherwise create one and return it"""
for channel in guild.channels:
if channel.name == "upcoming-events":
return channel
# Need to make sure the bot can still send messages in the events channel
overwrites = {
guild.default_role: discord.PermissionOverwrite(send_messages=False, add_reactions=True),
guild.me: discord.PermissionOverwrite(send_messages=True, add_reactions=True)
}
return await guild.create_text_channel("upcoming-events", overwrites=overwrites)
def create_event_embed(self, guild, event):
"""Create and return a Discord Embed object that represents an upcoming event"""
title = event.get('event_title')
description = event.get('description')
time = event.get('start_time')
timezone = event.get('timezone')
creator_id = event.get('user_id')
accepted = event.get('accepted')
declined = event.get('declined')
maybe = event.get('maybe')
max_members = event.get('max_members')
embed_msg = discord.Embed(color=constants.BLUE)
embed_msg.title = title
creator = guild.get_member(creator_id)
if creator:
embed_msg.set_footer(text="Created by {} | React with {} to remove this event".format(creator.display_name, '\U0001f480'))
else:
embed_msg.set_footer(text="React with {} to remove this event".format('\U0001f480'))
if description:
embed_msg.description = description
time_str = time.strftime("%A %b %-d, %Y @ %-I:%M %p")
embed_msg.add_field(name="Time", value=time_str + " " + timezone, inline=False)
if accepted:
accepted_list = None
if max_members:
accepted_list = accepted.split(',')[:max_members]
else:
accepted_list = accepted.split(',')
text = ""
for user_id in accepted_list:
member = guild.get_member(int(user_id))
if member:
text += "{}\n".format(member.display_name)
if not text:
text = '-'
if max_members:
embed_msg.add_field(name="Accepted ({}/{})".format(len(accepted_list), max_members), value=text)
else:
embed_msg.add_field(name="Accepted", value=text)
else:
if max_members:
embed_msg.add_field(name="Accepted (0/{})".format(max_members), value="-")
else:
embed_msg.add_field(name="Accepted", value="-")
if declined:
declined_list = declined.split(',')
text = ""
for user_id in declined_list:
member = guild.get_member(int(user_id))
if member:
text += "{}\n".format(member.display_name)
if not text:
text = '-'
embed_msg.add_field(name="Declined", value=text)
else:
embed_msg.add_field(name="Declined", value="-")
if maybe:
maybe_list = maybe.split(',')
text = ""
for user_id in maybe_list:
member = guild.get_member(int(user_id))
if member:
text += "{}\n".format(member.display_name)
if not text:
text = '-'
embed_msg.add_field(name="Maybe", value=text)
else:
embed_msg.add_field(name="Maybe", value="-")
if accepted and max_members:
standby_list = accepted.split(',')[max_members:]
if standby_list:
text = ""
for user_id in standby_list:
member = guild.get_member(int(user_id))
if member:
text += "{}\n".format(member.display_name)
if not text:
text = '-'
embed_msg.add_field(name="Standby", value=text, inline=False)
return embed_msg
```
#### File: cogs/utils/paginator.py
```python
import asyncio
class Paginator:
"""This code was adapted from RoboDanny by Rapptz - https://www.github.com/Rapptz/RoboDanny"""
def __init__(self, bot, ctx):
self.bot = bot
self.ctx = ctx
self.embeds = []
self.current_page = 0
self.length = 0
self.message = None
self.action = None
self.paginating = False
self.reaction_emojis = [
('\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}', self.first_page),
('\N{BLACK LEFT-POINTING TRIANGLE}', self.previous_page),
('\N{BLACK RIGHT-POINTING TRIANGLE}', self.next_page),
('\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}', self.last_page)]
def add_embed(self, embed):
self.embeds.append(embed)
self.length = len(self.embeds)
async def first_page(self):
await self.show_page(0)
async def last_page(self):
await self.show_page(self.length - 1)
async def first_page(self):
await self.show_page(0)
async def next_page(self):
if self.current_page < self.length - 1:
await self.show_page(self.current_page + 1)
async def previous_page(self):
if self.current_page != 0:
await self.show_page(self.current_page - 1)
async def show_page(self, page_num):
"""Display the given page"""
self.current_page = page_num
if self.length == 0:
return
elif self.length > 1:
self.embeds[self.current_page].set_footer(text="Page {} of {}".format(page_num + 1, self.length))
if not self.message:
self.message = await self.ctx.send(embed=self.embeds[self.current_page])
await self.add_reactions()
else:
await self.message.edit(embed=self.embeds[self.current_page])
def react_check(self, reaction, user):
"""Check if reaction is valid. Set action function if it matches"""
if user is None or user.id != self.ctx.author.id:
return False
if reaction.message.id != self.message.id:
return False
for (emoji, func) in self.reaction_emojis:
if reaction.emoji == emoji:
self.action = func
return True
return False
async def add_reactions(self):
"""Add reaction 'buttons'"""
if self.length == 1:
return
for (reaction, _) in self.reaction_emojis:
if self.length == 2 and reaction in ('\u23ed', '\u23ee'):
continue
await self.message.add_reaction(reaction)
async def paginate(self):
"""Display message and start listening for reactions"""
func = self.show_page(self.current_page)
self.bot.loop.create_task(func)
self.paginating = True
while self.paginating:
try:
reaction, user = await self.bot.wait_for('reaction_add', check=self.react_check, timeout=90.0)
except asyncio.TimeoutError:
self.paginating = False
# Remove footer
self.embeds[self.current_page].set_footer(text="")
await self.message.edit(embed=self.embeds[self.current_page])
try:
await self.message.clear_reactions()
except:
pass
finally:
break
try:
await self.message.remove_reaction(reaction, user)
except:
pass
await self.action()
```
|
{
"source": "jgayfer/spirit-server",
"score": 3
}
|
#### File: jgayfer/spirit-server/server.py
```python
import requests
import json
import pickle
from flask import Flask, request, redirect
import redis
app = Flask(__name__)
red = redis.StrictRedis(host='localhost', port=6379, db=0)
with open('credentials.json') as f:
cred_dict = json.load(f)
client_id = cred_dict['client_id']
client_secret = cred_dict['client_secret']
@app.route("/")
@app.route("/oauth/")
def oauth():
if 'code' not in request.args or 'state' not in request.args:
return "<h1 style='color:blue'>Missing Code or State</h1>"
code = request.args.get('code')
discord_id = request.args.get('state')
r = requests.post('https://www.bungie.net/platform/app/oauth/token/',
data={'grant_type': 'authorization_code', 'code': code,
'client_id': client_id, 'client_secret': client_secret})
json_res = r.json()
# Check response from Bungie
if not all(k in json_res for k in ("access_token", "refresh_token", "membership_id")):
return "<h1 style='color:blue'>Bad response from Bungo</h1>"
# Extract user info
membership_id = json_res['membership_id']
access_token = json_res['access_token']
refresh_token = json_res['refresh_token']
user_info = {}
for i in ('membership_id', 'access_token', 'refresh_token'):
user_info[i] = locals()[i]
# Pickle info and send to Redis
pickled_info = pickle.dumps(user_info)
red.publish(discord_id, pickled_info)
return redirect("https://www.bungie.net/", code=302)
if __name__ == "__main__":
app.run(host='0.0.0.0')
```
|
{
"source": "jgbarsness/journal_mngr",
"score": 3
}
|
#### File: journal_mngr/peck/entrybox.py
```python
from peck.ab_entry import AEntry
class TextBox():
'a text box for taking in input and displaying it'
def __init__(self, obj_ref: AEntry = None, attribute=None):
from tkinter import Tk, Text, Button, mainloop
self.text = None
self.root = Tk()
text_box = Text(self.root, height=15, width=50,
bg='#1c1d1c', fg='#fafbfa',
wrap='word',
highlightthickness=0,
selectbackground='#313231',
font=(None, 14))
text_box.pack(side='bottom', fill='both', expand='yes')
# auto focus on window to minimize clicking
text_box.focus_force()
button = Button(self.root, text='store',
command=lambda: get_text(),
font=(None, 12))
button.pack(side='top', fill='both')
def get_text():
'retrieve input and destroy window'
# refers to which section was passed, and assigns accordingly
if attribute == 'first':
obj_ref.first = text_box.get('1.0', 'end-1c')
elif attribute == 'second':
obj_ref.second = text_box.get('1.0', 'end-1c')
elif attribute == 'title':
obj_ref.title = text_box.get('1.0', 'end-1c')
# destroy root
self.root.after(1, self.root.destroy())
mainloop()
```
#### File: journal_mngr/peck/modify_strats.py
```python
from peck.a_strategy import CommandStrategy
from peck.file_handle import FileHandle
import peck.info_and_paths as c
'strategies used to modify collections'
class DeleteModStrat(CommandStrategy):
'performs a delete operation on a collection object'
def call_command(self, collections: list, title: str, path: str) -> bool:
'bulk or single delete entries'
delete = StratHelpers.return_thing(title, collections)
# if nothing is returned by previous call, end
if (len(delete) == 0):
print('\nnothing to delete containing '
+ '\'' + c.CYAN + title + c.END + '\'')
return False
# if function can continue, print out entry
print('to be deleted, containing ' + '\''
+ c.CYAN + title + c.END + '\':')
for thing in delete:
print('\n' + thing + c.SEPERATOR)
choice = input('\ndelete ' + str(len(delete)) + ' entries? y/n\n')
if choice == 'y':
# find all occurances
for things in delete:
# remove them
collections.remove(things)
return True
else:
print('\nentries preserved')
return False
class QuickDeleteStrat(CommandStrategy):
'performs quick delete on a collections object'
def call_command(self, collections: list, title: str, path: str) -> bool:
# return if collection is empty
if (len(collections) == 0):
print('\nnothing to delete')
return False
answer = input('\ndelete last entry? y/n\n')
if answer == 'y':
del collections[-1]
return True
else:
print('\nnothing deleted')
return False
class StratHelpers():
'helper methods'
@staticmethod
def show_keyword(key: str, container: list):
'shows entries if keyword matches'
entry = StratHelpers.return_thing(key, container)
if (len(entry) == 0):
print('\nnothing to show')
return entry
# if function can continue, print out entry
StratHelpers.print_entries(entry)
return entry
@staticmethod
def return_thing(key: str, container: list):
'returns a list of all entries containing a keywork'
# search for instances of keyword
entry = [elmnt for elmnt in container if key in elmnt]
return entry
@staticmethod
def print_entries(container):
'used to print out things after search is ran'
for thing in container:
print('\n' + thing + c.SEPERATOR)
```
#### File: journal_mngr/peck/tag_entry.py
```python
from peck.ab_entry import AEntry
import peck.info_and_paths as c
import peck.entry_writer
class TagEntry(AEntry):
'represents an entry with a tag'
def __init__(self, passed_title, tag):
from peck.entrybox import TextBox
super().__init__(passed_title)
self.tag = tag
self.writer = peck.entry_writer.TagWrite()
self.begin_entry()
def begin_entry(self):
super().begin_entry()
if self.print is False:
return
self.format_readability()
def write(self):
self.writer.write(self)
def format_readability(self):
super().format_readability()
```
#### File: journal_mngr/peck/view_strats.py
```python
from peck.a_strategy import CommandStrategy
from peck.file_handle import FileHandle
import peck.info_and_paths as c
from peck.modify_strats import StratHelpers
from datetime import datetime as dt
from re import match
'strategies used to display collections'
class ViewStrat(CommandStrategy):
'view / print strat'
def call_command(self, collections: list, title: str, path: str):
if not ViewHelpers.vs_file_check():
return
# if there is a keyword to search with
if (len(title) != 0):
criteria = title
print('entries containing ' + '\'' + c.BLUE + criteria
+ c.END + '\':')
if len(StratHelpers.show_keyword(title, collections)) != 0:
ViewHelpers.print_num_entries(len(StratHelpers.return_thing(
title,
collections)),
path)
# if no keyword is present, print out entire collection
else:
# check for formatted entries
if (len(collections) != 0):
print('all entries:')
StratHelpers.print_entries(collections)
ViewHelpers.print_num_entries(len(collections),
path)
else:
# means that a file is present, but nothing parsed from it
print('\nempty collection and/or invalid entry format')
class TSearchStrat(CommandStrategy):
'strat to search entries for tag'
def call_command(self, collections: list, title: str, path: str):
if not ViewHelpers.vs_file_check():
return
if (len(collections) != 0) and (len(title) != 0):
print('searching for tag ' + '\'' + c.BLUE + title + c.END + '\':')
if len(StratHelpers.show_keyword('(' + title + ')',
collections)) != 0:
ViewHelpers.print_num_entries(len(StratHelpers.return_thing(
'(' + title + ')',
collections)),
path)
else:
print('\nnothing to show\nformat: peck -t [tag]')
class DateSearch(CommandStrategy):
'searches entries by date'
def call_command(self, collections: list, title: str, path: str):
if not ViewHelpers.vs_file_check():
return
if (len(collections) != 0) and (len(title) != 0):
matches = self.search_by_date(collections, title)
if len(matches) != 0:
print('entries on ' + c.BLUE + str(title) + c.END + ':')
StratHelpers.print_entries(matches)
ViewHelpers.print_num_entries(len(matches),
path)
else:
print('\nno matches on this date or bad date format')
else:
print('\nnothing to show\nformat: peck -ds [mm/dd/yy]')
def search_by_date(self, collections: list, title: str) -> list:
matches = []
try:
# the criteria date given from title
criteria = dt.strptime(title, "%m/%d/%y")
except ValueError:
return []
for note in collections:
note_date = match(c.VIEW_REGEX, note).group(0)
try:
# replace call is used to standardize format
note_date_obj = dt.strptime(note_date,
c.DATETIME).replace(hour=00,
minute=00)
# if the date is the same
if note_date_obj == criteria:
matches.append(note)
except ValueError:
print('\n' + c.RED +
'error: entry parsing error. entry date format modified' +
c.END + '\n')
return
return matches
class ViewHelpers():
'utility for view strats'
@staticmethod
def vs_file_check() -> bool:
'checks file presence, prints any messages'
if not FileHandle.file_verify():
print("\ndefault entry file doesn't exist")
return False
else:
return True
@staticmethod
def print_num_entries(num: int, place: str) -> None:
'prints out the count of entries to be printed'
print(str(num) + " entry(s) in " + str(place))
```
|
{
"source": "jgberry/ssort",
"score": 3
}
|
#### File: src/ssort/_exceptions.py
```python
class UnknownEncodingError(Exception):
def __init__(self, msg, *, encoding):
super().__init__(msg)
self.encoding = encoding
class DecodingError(Exception):
pass
class ParseError(Exception):
def __init__(self, msg, *, lineno, col_offset):
super().__init__(msg)
self.lineno = lineno
self.col_offset = col_offset
class ResolutionError(Exception):
def __init__(self, msg, *, name, lineno, col_offset):
super().__init__(msg)
self.name = name
self.lineno = lineno
self.col_offset = col_offset
class WildcardImportError(Exception):
def __init__(self, msg, *, lineno, col_offset):
super().__init__(msg)
self.lineno = lineno
self.col_offset = col_offset
```
#### File: ssort/tests/test_ast.py
```python
from __future__ import annotations
import ast
import sys
from typing import Iterable
import pytest
from ssort._ast import iter_child_nodes
_deprecated_node_types: tuple[type[ast.AST], ...] = (
ast.AugLoad,
ast.AugStore,
ast.Param,
ast.Suite,
)
if sys.version_info >= (3, 9):
_deprecated_node_types += (ast.Index, ast.ExtSlice)
_ignored_node_types: tuple[type[ast.AST], ...] = (
ast.expr_context,
ast.boolop,
ast.operator,
ast.unaryop,
ast.cmpop,
)
def _nodes_types(
node_type: type[ast.AST] = ast.AST,
) -> Iterable[type[ast.AST]]:
# Skip deprecated AST nodes.
if issubclass(node_type, _deprecated_node_types):
return
# Skip ignored AST nodes.
if issubclass(node_type, _ignored_node_types):
return
subclasses = node_type.__subclasses__()
if subclasses:
# Note that we do not yield the node_type if it has any subclasses.
# This is because AST base classes are used for categorical purposes
# only and are not intended to be instantiated.
for subclass in subclasses:
yield from _nodes_types(subclass)
else:
yield node_type
def _instantiate_node(node_type: type[ast.AST]) -> ast.AST:
# AST node fields are either strings or iterables of child AST nodes. The
# empty string satisfies both those requirements.
return node_type(*([""] * len(node_type._fields)))
def parametrize_nodes() -> pytest.MarkDecorator:
node_types = list(_nodes_types())
nodes = [_instantiate_node(node_type) for node_type in node_types]
ids = [node_type.__name__ for node_type in node_types]
return pytest.mark.parametrize("node", nodes, ids=ids)
def test_iter_child_nodes_is_not_implemented_for_none() -> None:
with pytest.raises(NotImplementedError):
iter_child_nodes(None)
@parametrize_nodes()
def test_iter_child_nodes_is_implemented(node: ast.AST) -> None:
list(iter_child_nodes(node))
```
#### File: ssort/tests/test_bindings.py
```python
import ast
import sys
import textwrap
import pytest
from ssort._bindings import get_bindings
# Most walrus operator syntax is valid in 3.8. Only use this decorator for the
# rare cases where it is not.
walrus_operator = pytest.mark.skipif(
sys.version_info < (3, 9),
reason="some walrus operator syntax is not valid prior to python 3.9",
)
match_statement = pytest.mark.skipif(
sys.version_info < (3, 10),
reason="match statements were introduced in python 3.10",
)
def _parse(source):
source = textwrap.dedent(source)
root = ast.parse(source)
assert len(root.body) == 1
node = root.body[0]
if sys.version_info >= (3, 9):
print(ast.dump(node, include_attributes=True, indent=2))
else:
print(ast.dump(node, include_attributes=True))
return node
def test_function_def_bindings():
node = _parse(
"""
def function():
name
"""
)
assert list(get_bindings(node)) == ["function"]
def test_function_def_bindings_walrus_default():
node = _parse(
"""
def function(a, b = (b_binding := 2)):
pass
"""
)
assert list(get_bindings(node)) == ["function", "b_binding"]
def test_function_def_bindings_walrus_kw_default():
node = _parse(
"""
def function(*, kw1 = (kw1_binding := 1), kw2):
pass
"""
)
assert list(get_bindings(node)) == ["function", "kw1_binding"]
def test_function_def_bindings_walrus_type():
node = _parse(
"""
def function(
posonly: (posonly_type := int), / ,
arg: (arg_type := int),
*args: (args_type := int),
kwarg: (kwarg_type := int),
**kwargs: (kwargs_type := int)
) -> (return_type := int):
pass
"""
)
assert list(get_bindings(node)) == [
"function",
"posonly_type",
"arg_type",
"args_type",
"kwarg_type",
"kwargs_type",
"return_type",
]
@walrus_operator
def test_function_def_bindings_walrus_decorator():
node = _parse(
"""
@(p := property)
def prop(self):
pass
"""
)
assert list(get_bindings(node)) == ["p", "prop"]
def test_async_function_def_bindings():
"""
..code:: python
AsyncFunctionDef(
identifier name,
arguments args,
stmt* body,
expr* decorator_list,
expr? returns,
string? type_comment,
)
"""
node = _parse(
"""
async def function():
name
"""
)
assert list(get_bindings(node)) == ["function"]
def test_async_function_def_bindings_walrus_kw_default():
node = _parse(
"""
async def function(*, kw1 = (kw1_binding := 1), kw2):
pass
"""
)
assert list(get_bindings(node)) == ["function", "kw1_binding"]
def test_async_function_def_bindings_walrus_type():
node = _parse(
"""
async def function(
posonly: (posonly_type := int), / ,
arg: (arg_type := int),
*args: (args_type := int),
kwarg: (kwarg_type := int),
**kwargs: (kwargs_type := int)
) -> (return_type := int):
pass
"""
)
assert list(get_bindings(node)) == [
"function",
"posonly_type",
"arg_type",
"args_type",
"kwarg_type",
"kwargs_type",
"return_type",
]
@walrus_operator
def test_async_function_def_bindings_walrus_decorator():
node = _parse(
"""
@(p := property)
async def prop(self):
pass
"""
)
assert list(get_bindings(node)) == ["p", "prop"]
def test_class_def_bindings():
"""
..code:: python
ClassDef(
identifier name,
expr* bases,
keyword* keywords,
stmt* body,
expr* decorator_list,
)
"""
node = _parse(
"""
@decorator
class ClassName:
a = 1
def b(self):
pass
"""
)
assert list(get_bindings(node)) == ["ClassName"]
@walrus_operator
def test_class_def_bindings_walrus_decorator():
node = _parse(
"""
@(d := decorator())
class ClassName:
pass
"""
)
assert list(get_bindings(node)) == ["d", "ClassName"]
def test_class_def_bindings_walrus_base():
node = _parse(
"""
class ClassName(BaseClass, (OtherBase := namedtuple())):
pass
"""
)
assert list(get_bindings(node)) == ["OtherBase", "ClassName"]
def test_class_def_bindings_walrus_metaclass():
node = _parse(
"""
class Class(metaclass=(class_meta := MetaClass)):
pass
"""
)
assert list(get_bindings(node)) == ["class_meta", "Class"]
def test_class_def_bindings_walrus_body():
node = _parse(
"""
class Class:
a = (prop := 2)
"""
)
assert list(get_bindings(node)) == ["Class"]
def test_return_bindings():
"""
..code:: python
Return(expr? value)
"""
node = _parse("return x")
assert list(get_bindings(node)) == []
def test_return_bindings_walrus():
node = _parse("return (x := 1)")
assert list(get_bindings(node)) == ["x"]
def test_delete_bindings():
"""
..code:: python
Delete(expr* targets)
"""
node = _parse("del something")
assert list(get_bindings(node)) == []
def test_delete_bindings_multiple():
node = _parse("del a, b")
assert list(get_bindings(node)) == []
def test_delete_bindings_subscript():
node = _parse("del a[b:c]")
assert list(get_bindings(node)) == []
def test_delete_bindings_attribute():
node = _parse("del obj.attr")
assert list(get_bindings(node)) == []
def test_assign_bindings():
"""
..code:: python
Assign(expr* targets, expr value, string? type_comment)
"""
node = _parse("a = b")
assert list(get_bindings(node)) == ["a"]
def test_assign_bindings_star():
node = _parse("a, *b = c")
assert list(get_bindings(node)) == ["a", "b"]
def test_assign_bindings_attribute():
node = _parse("obj.attr = value")
assert list(get_bindings(node)) == []
def test_assign_bindings_list():
node = _parse("[a, b, [c, d]] = value")
assert list(get_bindings(node)) == ["a", "b", "c", "d"]
def test_assign_bindings_list_star():
node = _parse("[first, *rest] = value")
assert list(get_bindings(node)) == ["first", "rest"]
def test_assign_bindings_walrus_value():
node = _parse("a = (b := c)")
assert list(get_bindings(node)) == ["a", "b"]
def test_aug_assign_bindings():
"""
..code:: python
AugAssign(expr target, operator op, expr value)
"""
node = _parse("a += b")
assert list(get_bindings(node)) == ["a"]
def test_aug_assign_bindings_attribute():
node = _parse("obj.attr /= value")
assert list(get_bindings(node)) == []
def test_aug_assign_bindings_walrus_value():
node = _parse("a ^= (b := c)")
assert list(get_bindings(node)) == ["a", "b"]
def test_ann_assign_bindings():
"""
..code:: python
# 'simple' indicates that we annotate simple name without parens
AnnAssign(expr target, expr annotation, expr? value, int simple)
"""
node = _parse("a: int = b")
assert list(get_bindings(node)) == ["a"]
def test_ann_assign_bindings_no_value():
# TODO this expression doesn't technically bind `a`.
node = _parse("a: int")
assert list(get_bindings(node)) == ["a"]
def test_ann_assign_bindings_walrus_value():
node = _parse("a: int = (b := c)")
assert list(get_bindings(node)) == ["a", "b"]
def test_ann_assign_bindings_walrus_type():
node = _parse("a: (a_type := int) = 4")
assert list(get_bindings(node)) == ["a", "a_type"]
def test_for_bindings():
"""
..code:: python
# use 'orelse' because else is a keyword in target languages
For(
expr target,
expr iter,
stmt* body,
stmt* orelse,
string? type_comment,
)
"""
node = _parse(
"""
for i in range(10):
a += i
else:
b = 4
"""
)
assert list(get_bindings(node)) == ["i", "a", "b"]
def test_for_bindings_walrus():
node = _parse(
"""
for i in (r := range(10)):
pass
"""
)
assert list(get_bindings(node)) == ["i", "r"]
def test_async_for_bindings():
"""
..code:: python
AsyncFor(
expr target,
expr iter,
stmt* body,
stmt* orelse,
string? type_comment,
)
"""
node = _parse(
"""
async for i in range(10):
a += i
else:
b = 4
"""
)
assert list(get_bindings(node)) == ["i", "a", "b"]
def test_async_for_bindings_walrus():
node = _parse(
"""
async for i in (r := range(10)):
pass
"""
)
assert list(get_bindings(node)) == ["i", "r"]
def test_while_bindings():
"""
..code:: python
While(expr test, stmt* body, stmt* orelse)
"""
node = _parse(
"""
while test():
a = 1
else:
b = 2
"""
)
assert list(get_bindings(node)) == ["a", "b"]
def test_while_bindings_walrus_test():
node = _parse(
"""
while (value := test):
pass
"""
)
assert list(get_bindings(node)) == ["value"]
def test_if_bindings():
"""
..code:: python
If(expr test, stmt* body, stmt* orelse)
"""
node = _parse(
"""
if predicate_one():
a = 1
elif predicate_two():
b = 2
else:
c = 3
"""
)
assert list(get_bindings(node)) == ["a", "b", "c"]
def test_if_bindings_walrus_test():
node = _parse(
"""
if (result := predicate()):
pass
"""
)
assert list(get_bindings(node)) == ["result"]
def test_with_bindings():
"""
..code:: python
With(withitem* items, stmt* body, string? type_comment)
"""
node = _parse(
"""
with A() as a:
b = 4
"""
)
assert list(get_bindings(node)) == ["a", "b"]
def test_with_bindings_requirements_example():
node = _parse(
"""
with chdir(os.path.dirname(path)):
requirements = parse_requirements(path)
for req in requirements.values():
if req.name:
results.append(req.name)
"""
)
assert list(get_bindings(node)) == ["requirements", "req"]
def test_with_bindings_multiple():
node = _parse(
"""
with A() as a, B() as b:
pass
"""
)
assert list(get_bindings(node)) == ["a", "b"]
def test_with_bindings_unbound():
node = _parse(
"""
with A():
pass
"""
)
assert list(get_bindings(node)) == []
def test_with_bindings_tuple():
node = _parse(
"""
with A() as (a, b):
pass
"""
)
assert list(get_bindings(node)) == ["a", "b"]
def test_with_bindings_walrus():
node = _parse(
"""
with (ctx := A()) as a:
pass
"""
)
assert list(get_bindings(node)) == ["ctx", "a"]
def test_async_with_bindings():
"""
..code:: python
AsyncWith(withitem* items, stmt* body, string? type_comment)
"""
node = _parse(
"""
async with A() as a:
b = 4
"""
)
assert list(get_bindings(node)) == ["a", "b"]
def test_async_with_bindings_multiple():
node = _parse(
"""
async with A() as a, B() as b:
pass
"""
)
assert list(get_bindings(node)) == ["a", "b"]
def test_async_with_bindings_unbound():
node = _parse(
"""
async with A():
pass
"""
)
assert list(get_bindings(node)) == []
def test_async_with_bindings_tuple():
node = _parse(
"""
async with A() as (a, b):
pass
"""
)
assert list(get_bindings(node)) == ["a", "b"]
def test_async_with_bindings_walrus():
node = _parse(
"""
async with (ctx := A()) as a:
pass
"""
)
assert list(get_bindings(node)) == ["ctx", "a"]
def test_raise_bindings():
"""
..code:: python
Raise(expr? exc, expr? cause)
"""
node = _parse("raise TypeError()")
assert list(get_bindings(node)) == []
def test_raise_bindings_reraise():
node = _parse("raise")
assert list(get_bindings(node)) == []
def test_raise_bindings_with_cause():
node = _parse("raise TypeError() from exc")
assert list(get_bindings(node)) == []
def test_raise_bindings_walrus():
node = _parse("raise (exc := TypeError())")
assert list(get_bindings(node)) == ["exc"]
def test_raise_bindings_walrus_in_cause():
node = _parse("raise TypeError() from (original := exc)")
assert list(get_bindings(node)) == ["original"]
def test_try_bindings():
"""
..code:: python
Try(
stmt* body,
excepthandler* handlers,
stmt* orelse,
stmt* finalbody,
)
"""
node = _parse(
"""
try:
a = something_stupid()
except Exception as exc:
b = recover()
else:
c = otherwise()
finally:
d = finish()
"""
)
assert list(get_bindings(node)) == ["a", "exc", "b", "c", "d"]
def test_try_bindings_walrus():
node = _parse(
"""
try:
pass
except (x := Exception):
pass
"""
)
assert list(get_bindings(node)) == ["x"]
def test_assert_bindings():
"""
..code:: python
Assert(expr test, expr? msg)
"""
node = _parse("assert condition()")
assert list(get_bindings(node)) == []
def test_assert_bindings_with_message():
node = _parse('assert condition(), "message"')
assert list(get_bindings(node)) == []
def test_assert_bindings_walrus_condition():
node = _parse("assert (result := condition())")
assert list(get_bindings(node)) == ["result"]
def test_assert_bindings_walrus_message():
node = _parse('assert condition, (message := "message")')
assert list(get_bindings(node)) == ["message"]
def test_import_bindings():
"""
..code:: python
Import(alias* names)
"""
node = _parse("import something")
assert list(get_bindings(node)) == ["something"]
def test_import_bindings_as():
node = _parse("import something as something_else")
assert list(get_bindings(node)) == ["something_else"]
def test_import_bindings_nested():
node = _parse("import module.submodule")
assert list(get_bindings(node)) == ["module"]
def test_import_from_bindings():
"""
..code:: python
ImportFrom(identifier? module, alias* names, int? level)
"""
node = _parse("from module import a, b")
assert list(get_bindings(node)) == ["a", "b"]
def test_import_from_bindings_as():
node = _parse("from module import something as something_else")
assert list(get_bindings(node)) == ["something_else"]
def test_global_bindings():
"""
..code:: python
Global(identifier* names)
"""
node = _parse("global name")
assert list(get_bindings(node)) == ["name"]
def test_global_bindings_multiple():
node = _parse("global a, b")
assert list(get_bindings(node)) == ["a", "b"]
def test_non_local_bindings():
"""
..code:: python
Nonlocal(identifier* names)
"""
node = _parse("nonlocal name")
assert list(get_bindings(node)) == ["name"]
def test_nonlocal_bindings_multiple():
node = _parse("nonlocal a, b")
assert list(get_bindings(node)) == ["a", "b"]
def test_pass_bindings():
"""
..code:: python
Pass
"""
node = _parse("pass")
assert list(get_bindings(node)) == []
def test_break_bindings():
"""
..code:: python
Break
"""
node = _parse("break")
assert list(get_bindings(node)) == []
def test_continue_bindings():
"""
..code:: python
Continue
"""
node = _parse("continue")
assert list(get_bindings(node)) == []
def test_bool_op_bindings():
"""
..code:: python
# BoolOp() can use left & right?
# expr
BoolOp(boolop op, expr* values)
"""
node = _parse("a and b")
assert list(get_bindings(node)) == []
def test_named_expr_bindings():
"""
..code:: python
NamedExpr(expr target, expr value)
"""
node = _parse("(a := b)")
assert list(get_bindings(node)) == ["a"]
def test_named_expr_bindings_recursive():
"""
..code:: python
NamedExpr(expr target, expr value)
"""
node = _parse("(a := (b := (c := d)))")
assert list(get_bindings(node)) == ["a", "b", "c"]
def test_bool_op_bindings_walrus_left():
node = _parse("(left := a) and b")
assert list(get_bindings(node)) == ["left"]
def test_bool_op_bindings_walrus_right():
node = _parse("a or (right := b)")
assert list(get_bindings(node)) == ["right"]
def test_bool_op_bindings_walrus_both():
node = _parse("(left := a) and (right := b)")
assert list(get_bindings(node)) == ["left", "right"]
def test_bool_op_bindings_walrus_multiple():
node = _parse("(a := 1) and (b := 2) and (c := 3)")
assert list(get_bindings(node)) == ["a", "b", "c"]
def test_bin_op_bindings():
"""
..code:: python
BinOp(expr left, operator op, expr right)
"""
node = _parse("a and b")
assert list(get_bindings(node)) == []
def test_bin_op_bindings_walrus_left():
node = _parse("(left := a) | b")
assert list(get_bindings(node)) == ["left"]
def test_bin_op_bindings_walrus_right():
node = _parse("a ^ (right := b)")
assert list(get_bindings(node)) == ["right"]
def test_bin_op_bindings_walrus_both():
node = _parse("(left := a) + (right := b)")
assert list(get_bindings(node)) == ["left", "right"]
def test_unary_op_bindings():
"""
..code:: python
UnaryOp(unaryop op, expr operand)
"""
node = _parse("-a")
assert list(get_bindings(node)) == []
def test_unary_op_bindings_walrus():
node = _parse("-(a := b)")
assert list(get_bindings(node)) == ["a"]
def test_lambda_bindings():
"""
..code:: python
Lambda(arguments args, expr body)
"""
pass
def test_lambda_bindings_walrus_default():
node = _parse("(lambda a, b = (b_binding := 2): None)")
assert list(get_bindings(node)) == ["b_binding"]
def test_lambda_bindings_walrus_kw_default():
node = _parse("(lambda *, kw1 = (kw1_binding := 1), kw2: None)")
assert list(get_bindings(node)) == ["kw1_binding"]
def test_lambda_bindings_walrus_body():
node = _parse("(lambda : (a := 1) + a)")
assert list(get_bindings(node)) == []
def test_if_exp_bindings():
"""
..code:: python
IfExp(expr test, expr body, expr orelse)
"""
node = _parse("subsequent() if predicate() else alternate()")
assert list(get_bindings(node)) == []
def test_if_exp_bindings_walrus_subsequent():
node = _parse("(a := subsequent()) if predicate() else alternate()")
assert list(get_bindings(node)) == ["a"]
def test_if_exp_bindings_walrus_predicate():
node = _parse("subsequent() if (a := predicate()) else alternate()")
assert list(get_bindings(node)) == ["a"]
def test_if_exp_bindings_walrus_alternate():
node = _parse("subsequent() if predicate() else (a := alternate())")
assert list(get_bindings(node)) == ["a"]
def test_if_exp_bindings_walrus():
node = _parse(
"(a := subsequent()) if (b := predicate()) else (c := alternate())"
)
assert list(get_bindings(node)) == ["b", "a", "c"]
def test_dict_bindings():
"""
..code:: python
Dict(expr* keys, expr* values)
"""
node = _parse("{key: value}")
assert list(get_bindings(node)) == []
def test_dict_bindings_empty():
node = _parse("{}")
assert list(get_bindings(node)) == []
def test_dict_bindings_unpack():
node = _parse("{**values}")
assert list(get_bindings(node)) == []
def test_dict_bindings_walrus_key():
node = _parse("{(key := genkey()): value}")
assert list(get_bindings(node)) == ["key"]
def test_dict_bindings_walrus_value():
node = _parse("{key: (value := genvalue())}")
assert list(get_bindings(node)) == ["value"]
def test_dict_bindings_walrus_unpack():
node = _parse("{key: value, **(rest := other)}")
assert list(get_bindings(node)) == ["rest"]
def test_set_bindings():
"""
..code:: python
Set(expr* elts)
"""
node = _parse("{a, b, c}")
assert list(get_bindings(node)) == []
def test_set_bindings_unpack():
node = _parse("{a, b, *rest}")
assert list(get_bindings(node)) == []
@walrus_operator
def test_set_bindings_walrus():
node = _parse("{a, {b := genb()}, c}")
assert list(get_bindings(node)) == ["b"]
def test_set_bindings_walrus_py38():
node = _parse("{a, {(b := genb())}, c}")
assert list(get_bindings(node)) == ["b"]
def test_set_bindings_walrus_unpack():
node = _parse("{a, b, *(rest := other)}")
assert list(get_bindings(node)) == ["rest"]
def test_list_comp_bindings():
"""
..code:: python
comprehension = (expr target, expr iter, expr* ifs, int is_async)
ListComp(expr elt, comprehension* generators)
"""
node = _parse("[item for item in iterator if condition(item)]")
assert list(get_bindings(node)) == ["item"]
def test_list_comp_bindings_walrus_target():
node = _parse("[( a:= item) for item in iterator if condition(item)]")
assert list(get_bindings(node)) == ["a", "item"]
def test_list_comp_bindings_walrus_iter():
node = _parse("[item for item in (it := iterator) if condition(item)]")
assert list(get_bindings(node)) == ["item", "it"]
def test_list_comp_bindings_walrus_condition():
node = _parse("[item for item in iterator if (c := condition(item))]")
assert list(get_bindings(node)) == ["item", "c"]
def test_set_comp_bindings():
"""
..code:: python
comprehension = (expr target, expr iter, expr* ifs, int is_async)
SetComp(expr elt, comprehension* generators)
"""
node = _parse("{item for item in iterator if condition(item)}")
assert list(get_bindings(node)) == ["item"]
def test_set_comp_bindings_walrus_target():
node = _parse("{( a:= item) for item in iterator if condition(item)}")
assert list(get_bindings(node)) == ["a", "item"]
def test_set_comp_bindings_walrus_iter():
node = _parse("{item for item in (it := iterator) if condition(item)}")
assert list(get_bindings(node)) == ["item", "it"]
def test_set_comp_bindings_walrus_condition():
node = _parse("{item for item in iterator if (c := condition(item))}")
assert list(get_bindings(node)) == ["item", "c"]
def test_dict_comp_bindings():
"""
..code:: python
DictComp(expr key, expr value, comprehension* generators)
"""
node = _parse("{item[0]: item[1] for item in iterator if check(item)}")
assert list(get_bindings(node)) == ["item"]
def test_dict_comp_bindings_unpack():
node = _parse("{key: value for key, value in iterator}")
assert list(get_bindings(node)) == ["key", "value"]
def test_dict_comp_bindings_walrus_key():
node = _parse(
"{(key := item[0]): item[1] for item in iterator if check(item)}"
)
assert list(get_bindings(node)) == ["key", "item"]
def test_dict_comp_bindings_walrus_value():
node = _parse(
"{item[0]: (value := item[1]) for item in iterator if check(item)}"
)
assert list(get_bindings(node)) == ["value", "item"]
def test_dict_comp_bindings_walrus_iter():
node = _parse(
"{item[0]: item[1] for item in (it := iterator) if check(item)}"
)
assert list(get_bindings(node)) == ["item", "it"]
def test_dict_comp_bindings_walrus_condition():
node = _parse(
"{item[0]: item[1] for item in iterator if (c := check(item))}"
)
assert list(get_bindings(node)) == ["item", "c"]
def test_generator_exp_bindings():
"""
..code:: python
GeneratorExp(expr elt, comprehension* generators)
"""
node = _parse("(item for item in iterator if condition(item))")
assert list(get_bindings(node)) == ["item"]
def test_generator_exp_bindings_walrus_target():
node = _parse("(( a:= item) for item in iterator if condition(item))")
assert list(get_bindings(node)) == ["a", "item"]
def test_generator_exp_bindings_walrus_iter():
node = _parse("(item for item in (it := iterator) if condition(item))")
assert list(get_bindings(node)) == ["item", "it"]
def test_generator_exp_bindings_walrus_condition():
node = _parse("(item for item in iterator if (c := condition(item)))")
assert list(get_bindings(node)) == ["item", "c"]
def test_await_bindings():
"""
..code:: python
# the grammar constrains where yield expressions can occur
Await(expr value)
"""
node = _parse("await fun()")
assert list(get_bindings(node)) == []
def test_await_bindings_walrus():
node = _parse("await (r := fun())")
assert list(get_bindings(node)) == ["r"]
def test_yield_bindings():
"""
..code:: python
Yield(expr? value)
"""
node = _parse("yield fun()")
assert list(get_bindings(node)) == []
def test_yield_bindings_no_result():
node = _parse("yield")
assert list(get_bindings(node)) == []
def test_yield_bindings_walrus():
node = _parse("yield (r := fun())")
assert list(get_bindings(node)) == ["r"]
def test_yield_from_bindings():
"""
..code:: python
YieldFrom(expr value)
"""
node = _parse("yield from fun()")
assert list(get_bindings(node)) == []
def test_yield_from_bindings_walrus():
node = _parse("yield from (r := fun())")
assert list(get_bindings(node)) == ["r"]
def test_compare_bindings():
"""
..code:: python
# need sequences for compare to distinguish between
# x < 4 < 3 and (x < 4) < 3
Compare(expr left, cmpop* ops, expr* comparators)
"""
node = _parse("0 < value < 5")
assert list(get_bindings(node)) == []
def test_compare_bindings_walrus():
node = _parse("(a := 0) < (b := value) < (c := 5)")
assert list(get_bindings(node)) == ["a", "b", "c"]
def test_call_bindings():
"""
..code:: python
keyword = (identifier? arg, expr value)
Call(expr func, expr* args, keyword* keywords)
"""
node = _parse("fun(arg, *args, kwarg=kwarg, **kwargs)")
assert list(get_bindings(node)) == []
def test_call_bindings_walrus_function():
node = _parse("(f := fun)()")
assert list(get_bindings(node)) == ["f"]
def test_call_bindings_walrus_args():
node = _parse(
"""
fun(
(arg_binding := arg),
*(args_binding := args),
kwarg=(kwarg_binding := kwarg),
**(kwargs_binding := kwargs),
)
"""
)
assert list(get_bindings(node)) == [
"arg_binding",
"args_binding",
"kwarg_binding",
"kwargs_binding",
]
def test_joined_str_bindings():
"""
..code:: python
JoinedStr(expr* values)
FormattedValue(expr value, int? conversion, expr? format_spec)
"""
node = _parse('f"a: {a}"')
assert list(get_bindings(node)) == []
def test_joined_str_bindings_walrus():
"""
..code:: python
JoinedStr(expr* values)
FormattedValue(expr value, int? conversion, expr? format_spec)
"""
node = _parse('f"a: {(a := get_a())}"')
assert list(get_bindings(node)) == ["a"]
def test_constant_bindings():
"""
..code:: python
Constant(constant value, string? kind)
"""
node = _parse("1")
assert list(get_bindings(node)) == []
def test_attribute_bindings():
"""
..code:: python
# the following expression can appear in assignment context
Attribute(expr value, identifier attr, expr_context ctx)
"""
node = _parse("a.b.c")
assert list(get_bindings(node)) == []
def test_attribute_bindings_walrus():
node = _parse("(a_binding := a).b")
assert list(get_bindings(node)) == ["a_binding"]
def test_subscript_bindings():
"""
..code:: python
Subscript(expr value, expr slice, expr_context ctx)
"""
node = _parse("a[b]")
assert list(get_bindings(node)) == []
def test_subscript_bindings_slice():
node = _parse("a[b:c]")
assert list(get_bindings(node)) == []
def test_subscript_bindings_slice_with_step():
node = _parse("a[b:c:d]")
assert list(get_bindings(node)) == []
def test_subscript_bindings_walrus_value():
node = _parse("(a_binding := a)[b]")
assert list(get_bindings(node)) == ["a_binding"]
def test_subscript_bindings_walrus_index():
node = _parse("a[(b_binding := b)]")
assert list(get_bindings(node)) == ["b_binding"]
def test_subscript_bindings_walrus_slice():
node = _parse("a[(b_binding := b):(c_binding := c)]")
assert list(get_bindings(node)) == ["b_binding", "c_binding"]
def test_subscript_bindings_walrus_slice_with_step():
node = _parse("a[(b_binding := b):(c_binding := c):(d_binding := d)]")
assert list(get_bindings(node)) == ["b_binding", "c_binding", "d_binding"]
def test_starred_bindings():
"""
..code:: python
Starred(expr value, expr_context ctx)
"""
node = _parse("*a")
assert list(get_bindings(node)) == []
def test_starred_bindings_walrus():
node = _parse("*(a_binding := a)")
assert list(get_bindings(node)) == ["a_binding"]
def test_name_bindings():
"""
..code:: python
Name(identifier id, expr_context ctx)
"""
node = _parse("a")
assert list(get_bindings(node)) == []
def test_list_bindings():
"""
..code:: python
List(expr* elts, expr_context ctx)
"""
node = _parse("[a, b, c]")
assert list(get_bindings(node)) == []
def test_list_bindings_unpack():
node = _parse("{a, b, *rest}")
assert list(get_bindings(node)) == []
def test_list_bindings_walrus():
node = _parse("[a, (b := genb()), c]")
assert list(get_bindings(node)) == ["b"]
def test_list_bindings_walrus_unpack():
node = _parse("[a, b, *(rest := other)]")
assert list(get_bindings(node)) == ["rest"]
def test_tuple_bindings():
"""
..code:: python
Tuple(expr* elts, expr_context ctx)
"""
node = _parse("(a, b, c)")
assert list(get_bindings(node)) == []
def test_tuple_bindings_unpack():
node = _parse("(a, b, *rest)")
assert list(get_bindings(node)) == []
def test_tuple_bindings_walrus():
node = _parse("(a, (b := genb()), c)")
assert list(get_bindings(node)) == ["b"]
def test_tuple_bindings_walrus_unpack():
node = _parse("(a, b, *(rest := other))")
assert list(get_bindings(node)) == ["rest"]
def test_formatted_value_bindings():
"""
..code:: python
FormattedValue(expr value, int conversion, expr? format_spec)
"""
node = _parse("f'{a} {b} {c}'")
assert list(get_bindings(node)) == []
def test_formatted_value_bindings_walrus():
node = _parse("f'{a} {1 + (b := 1)} {c}'")
assert list(get_bindings(node)) == ["b"]
def test_formatted_value_bindings_format_spec_walrus():
node = _parse("f'{a} {b:{0 + (c := 0.3)}} {d}'")
assert list(get_bindings(node)) == ["c"]
@match_statement
def test_match_statement_bindings_literal():
node = _parse(
"""
match a:
case True:
pass
"""
)
assert list(get_bindings(node)) == []
@match_statement
def test_match_statement_bindings_capture():
node = _parse(
"""
match a:
case b:
pass
"""
)
assert list(get_bindings(node)) == ["b"]
@match_statement
def test_match_statement_bindings_wildcard():
node = _parse(
"""
match a:
case _:
pass
"""
)
assert list(get_bindings(node)) == []
@match_statement
def test_match_statement_bindings_constant():
node = _parse(
"""
match a:
case 1:
pass
"""
)
assert list(get_bindings(node)) == []
@match_statement
def test_match_statement_bindings_named_constant():
node = _parse(
"""
match a:
case MyEnum.CONSTANT:
pass
"""
)
assert list(get_bindings(node)) == []
@match_statement
def test_match_statement_bindings_sequence():
node = _parse(
"""
match a:
case [b, *c, d, _]:
pass
"""
)
assert list(get_bindings(node)) == ["b", "c", "d"]
@match_statement
def test_match_statement_bindings_sequence_wildcard():
node = _parse(
"""
match a:
case [*_]:
pass
"""
)
assert list(get_bindings(node)) == []
@match_statement
def test_match_statement_bindings_mapping():
node = _parse(
"""
match a:
case {"k1": "v1", "k2": b, "k3": _, **c}:
pass
"""
)
assert list(get_bindings(node)) == ["b", "c"]
@match_statement
def test_match_statement_bindings_class():
node = _parse(
"""
match a:
case MyClass(0, b, x=_, y=c):
pass
"""
)
assert list(get_bindings(node)) == ["b", "c"]
@match_statement
def test_match_statement_bindings_or():
node = _parse(
"""
match a:
case b | c:
pass
"""
)
assert list(get_bindings(node)) == ["b", "c"]
@match_statement
def test_match_statement_bindings_as():
node = _parse(
"""
match a:
case b as c:
pass
"""
)
assert list(get_bindings(node)) == ["b", "c"]
```
#### File: ssort/tests/test_dependencies.py
```python
import textwrap
from ssort._dependencies import module_statements_graph
from ssort._parsing import parse
def _clean(source):
return textwrap.dedent(source).strip() + "\n"
def _unreachable(*args, **kwargs):
raise AssertionError("unreachable")
def test_dependencies_ordered_by_first_use():
source = _clean(
"""
def c():
pass
def a():
map()
b()
c()
def b():
pass
"""
)
c, a, b = statements = list(parse(source, filename="<unknown>"))
graph = module_statements_graph(
statements, on_unresolved=_unreachable, on_wildcard_import=_unreachable
)
assert list(graph.dependencies[a]) == [b, c]
```
#### File: ssort/tests/test_executable.py
```python
import pathlib
import subprocess
import sys
import pytest
from ssort._utils import escape_path
_good = b"""
def _private():
pass
def public():
return _private()
"""
_unsorted = b"""
def public():
return _private()
def _private():
pass
"""
_encoding = b"""
# coding=invalid-encoding
"""
_character = b"""
# coding=ascii
\xfe = 2
"""
_syntax = b"""
def _private(
pass
def public(
return _private()
"""
_resolution = b"""
def _private():
pass
def public():
return _other()
"""
_double_resolution = b"""
def _private():
pass
def public():
return _other() + _same()
"""
def _write_fixtures(dirpath, texts):
paths = []
for index, text in enumerate(texts):
path = dirpath / f"file_{index:04}.py"
path.write_bytes(text)
paths.append(str(path))
return paths
@pytest.fixture(params=["entrypoint", "module"])
def check(request):
def _check(dirpath):
ssort_exe = {
"entrypoint": ["ssort"],
"module": [sys.executable, "-m", "ssort"],
}[request.param]
result = subprocess.run(
[*ssort_exe, "--check", str(dirpath)],
capture_output=True,
encoding="utf-8",
)
return result.stderr.splitlines(keepends=True), result.returncode
return _check
@pytest.fixture(params=["entrypoint", "module"])
def ssort(request):
def _ssort(dirpath):
ssort_exe = {
"entrypoint": ["ssort"],
"module": [sys.executable, "-m", "ssort"],
}[request.param]
result = subprocess.run(
[*ssort_exe, str(dirpath)],
capture_output=True,
encoding="utf-8",
)
return result.stderr.splitlines(keepends=True), result.returncode
return _ssort
def test_check_all_well(check, tmp_path):
_write_fixtures(tmp_path, [_good, _good, _good])
expected_msgs = [
"3 files would be left unchanged\n",
]
expected_status = 0
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_check_one_unsorted(check, tmp_path):
paths = _write_fixtures(tmp_path, [_unsorted, _good, _good])
expected_msgs = [
f"ERROR: {escape_path(paths[0])} is incorrectly sorted\n",
"1 file would be resorted, 2 files would be left unchanged\n",
]
expected_status = 1
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_check_all_unsorted(check, tmp_path):
paths = _write_fixtures(tmp_path, [_unsorted, _unsorted, _unsorted])
expected_msgs = [
f"ERROR: {escape_path(paths[0])} is incorrectly sorted\n",
f"ERROR: {escape_path(paths[1])} is incorrectly sorted\n",
f"ERROR: {escape_path(paths[2])} is incorrectly sorted\n",
"3 files would be resorted\n",
]
expected_status = 1
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_check_one_syntax_error(check, tmp_path):
paths = _write_fixtures(tmp_path, [_syntax, _good, _good])
expected_msgs = [
f"ERROR: syntax error in {escape_path(paths[0])}: line 3, column 5\n",
"2 files would be left unchanged, 1 file would not be sortable\n",
]
expected_status = 1
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_check_all_syntax_error(check, tmp_path):
paths = _write_fixtures(tmp_path, [_syntax, _syntax, _syntax])
expected_msgs = [
f"ERROR: syntax error in {escape_path(paths[0])}: line 3, column 5\n",
f"ERROR: syntax error in {escape_path(paths[1])}: line 3, column 5\n",
f"ERROR: syntax error in {escape_path(paths[2])}: line 3, column 5\n",
"3 files would not be sortable\n",
]
expected_status = 1
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_check_resolution_error(check, tmp_path):
paths = _write_fixtures(tmp_path, [_resolution, _good, _good])
expected_msgs = [
f"ERROR: unresolved dependency '_other' in {escape_path(paths[0])}: line 6, column 11\n",
"2 files would be left unchanged, 1 file would not be sortable\n",
]
expected_status = 1
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_check_double_resolution_error(check, tmp_path):
paths = _write_fixtures(tmp_path, [_double_resolution, _good, _good])
expected_msgs = [
f"ERROR: unresolved dependency '_other' in {escape_path(paths[0])}: line 6, column 11\n",
f"ERROR: unresolved dependency '_same' in {escape_path(paths[0])}: line 6, column 22\n",
"2 files would be left unchanged, 1 file would not be sortable\n",
]
expected_status = 1
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_check_one_unsorted_one_syntax_error(check, tmp_path):
paths = _write_fixtures(tmp_path, [_syntax, _unsorted, _good])
expected_msgs = [
f"ERROR: syntax error in {escape_path(paths[0])}: line 3, column 5\n",
f"ERROR: {escape_path(paths[1])} is incorrectly sorted\n",
"1 file would be resorted, 1 file would be left unchanged, 1 file would not be sortable\n",
]
expected_status = 1
actual_msgs, actual_status = check(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_all_well(ssort, tmp_path):
_write_fixtures(tmp_path, [_good, _good, _good])
expected_msgs = [
"3 files were left unchanged\n",
]
expected_status = 0
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_one_unsorted(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_unsorted, _good, _good])
expected_msgs = [
f"Sorting {escape_path(paths[0])}\n",
"1 file was resorted, 2 files were left unchanged\n",
]
expected_status = 0
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_all_unsorted(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_unsorted, _unsorted, _unsorted])
expected_msgs = [
f"Sorting {escape_path(paths[0])}\n",
f"Sorting {escape_path(paths[1])}\n",
f"Sorting {escape_path(paths[2])}\n",
"3 files were resorted\n",
]
expected_status = 0
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_one_syntax_error(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_syntax, _good, _good])
expected_msgs = [
f"ERROR: syntax error in {escape_path(paths[0])}: line 3, column 5\n",
"2 files were left unchanged, 1 file was not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_all_syntax_error(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_syntax, _syntax, _syntax])
expected_msgs = [
f"ERROR: syntax error in {escape_path(paths[0])}: line 3, column 5\n",
f"ERROR: syntax error in {escape_path(paths[1])}: line 3, column 5\n",
f"ERROR: syntax error in {escape_path(paths[2])}: line 3, column 5\n",
"3 files were not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_resolution_error(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_resolution, _good, _good])
expected_msgs = [
f"ERROR: unresolved dependency '_other' in {escape_path(paths[0])}: line 6, column 11\n",
"2 files were left unchanged, 1 file was not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_one_unsorted_one_syntax_error(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_syntax, _unsorted, _good])
expected_msgs = [
f"ERROR: syntax error in {escape_path(paths[0])}: line 3, column 5\n",
f"Sorting {escape_path(paths[1])}\n",
"1 file was resorted, 1 file was left unchanged, 1 file was not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_encoding_error(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_encoding])
expected_msgs = [
f"ERROR: unknown encoding, 'invalid-encoding', in {escape_path(paths[0])}\n",
"1 file was not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_character_error(ssort, tmp_path):
paths = _write_fixtures(tmp_path, [_character])
expected_msgs = [
f"ERROR: encoding error in {escape_path(paths[0])}: 'ascii' codec can't decode byte 0xfe in position 16: ordinal not in range(128)\n",
"1 file was not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_preserve_crlf_endlines(ssort, tmp_path):
input = b"a = b\r\nb = 4"
expected_output = b"b = 4\r\na = b\r\n"
paths = _write_fixtures(tmp_path, [input])
expected_msgs = [
f"Sorting {escape_path(paths[0])}\n",
"1 file was resorted\n",
]
expected_status = 0
actual_msgs, actual_status = ssort(tmp_path)
assert actual_msgs == expected_msgs
assert actual_status == expected_status
(output,) = [pathlib.Path(path).read_bytes() for path in paths]
assert output == expected_output
def test_ssort_empty_dir(ssort, tmp_path):
expected_msgs = ["No files are present to be sorted. Nothing to do.\n"]
expected_status = 0
actual_msgs, actual_status = ssort(tmp_path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_non_existent_file(ssort, tmp_path):
path = tmp_path / "file.py"
expected_msgs = [
f"ERROR: {escape_path(path)} does not exist\n",
"1 file was not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_no_py_extension(ssort, tmp_path):
path = tmp_path / "file"
path.write_bytes(_good)
expected_msgs = ["1 file was left unchanged\n"]
expected_status = 0
actual_msgs, actual_status = ssort(path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
@pytest.mark.skipif(
sys.platform == "win32", reason="can't block read on windows"
)
def test_ssort_unreadable_file(ssort, tmp_path):
path = tmp_path / "file.py"
path.write_bytes(_good)
path.chmod(0)
expected_msgs = [
f"ERROR: {escape_path(path)} is not readable\n",
"1 file was not sortable\n",
]
expected_status = 1
actual_msgs, actual_status = ssort(path)
assert (actual_msgs, actual_status) == (expected_msgs, expected_status)
def test_ssort_run_module():
entrypoint_result = subprocess.run(
["ssort", "--help"],
capture_output=True,
encoding="utf-8",
)
entrypoint_output = entrypoint_result.stderr.splitlines(keepends=True)
module_result = subprocess.run(
[sys.executable, "-m", "ssort", "--help"],
capture_output=True,
encoding="utf-8",
)
module_output = module_result.stderr.splitlines(keepends=True)
assert module_output == entrypoint_output
```
#### File: ssort/tests/test_files.py
```python
from __future__ import annotations
import pathlib
import pytest
from ssort._files import is_ignored
def test_ignore_git(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / ".git").mkdir()
(tmp_path / ".gitignore").write_text("ignored")
assert not is_ignored("src")
assert not is_ignored("src/main.py")
assert is_ignored("ignored")
assert is_ignored("ignored/main.py")
assert is_ignored("src/ignored")
assert is_ignored("src/ignored/main.py")
assert not is_ignored("../ignored")
assert not is_ignored("../ignored/main.py")
assert is_ignored(f"../{tmp_path.name}/ignored")
assert is_ignored(f"../{tmp_path.name}/ignored/main.py")
def test_ignore_git_with_no_repo(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / ".gitignore").write_text("ignored")
assert not is_ignored("src")
assert not is_ignored("src/main.py")
assert is_ignored("ignored")
assert is_ignored("ignored/main.py")
assert is_ignored("src/ignored")
assert is_ignored("src/ignored/main.py")
assert not is_ignored("../ignored")
assert not is_ignored("../ignored/main.py")
assert is_ignored(f"../{tmp_path.name}/ignored")
assert is_ignored(f"../{tmp_path.name}/ignored/main.py")
def test_ignore_git_in_subdirectory(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / ".git").mkdir()
(tmp_path / ".gitignore").write_text("parent")
(tmp_path / "sub").mkdir()
(tmp_path / "sub" / ".gitignore").write_text("child")
assert not is_ignored("src")
assert not is_ignored("src/main.py")
assert not is_ignored("sub/src")
assert not is_ignored("sub/src/main.py")
assert is_ignored("parent")
assert is_ignored("parent/main.py")
assert is_ignored("sub/parent")
assert is_ignored("sub/parent/main.py")
assert is_ignored("src/parent")
assert is_ignored("src/parent/main.py")
assert is_ignored("sub/src/parent")
assert is_ignored("sub/src/parent/main.py")
assert not is_ignored("../parent")
assert not is_ignored("../parent/main.py")
assert not is_ignored("../sub/parent")
assert not is_ignored("../sub/parent/main.py")
assert is_ignored(f"../{tmp_path.name}/parent")
assert is_ignored(f"../{tmp_path.name}/parent/main.py")
assert is_ignored(f"../{tmp_path.name}/sub/parent")
assert is_ignored(f"../{tmp_path.name}/sub/parent/main.py")
assert not is_ignored("child")
assert not is_ignored("child/main.py")
assert is_ignored("sub/child")
assert is_ignored("sub/child/main.py")
assert not is_ignored("src/child")
assert not is_ignored("src/child/main.py")
assert is_ignored("sub/src/child")
assert is_ignored("sub/src/child/main.py")
assert not is_ignored("sub/../child")
assert not is_ignored("sub/../child/main.py")
assert is_ignored(f"../{tmp_path.name}/sub/child")
assert is_ignored(f"../{tmp_path.name}/sub/child/main.py")
def test_ignore_git_in_working_subdirectory(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
(tmp_path / ".git").mkdir()
(tmp_path / ".gitignore").write_text("ignored")
(tmp_path / "sub").mkdir()
monkeypatch.chdir(tmp_path / "sub")
assert not is_ignored("src")
assert not is_ignored("src/main.py")
assert is_ignored("ignored")
assert is_ignored("ignored/main.py")
assert is_ignored("src/ignored")
assert is_ignored("src/ignored/main.py")
assert is_ignored("../ignored")
assert is_ignored("../ignored/main.py")
assert is_ignored("../sub/ignored")
assert is_ignored("../sub/ignored/main.py")
assert not is_ignored("../../ignored")
assert not is_ignored("../../ignored/main.py")
def test_ignore_git_in_working_parent_directory(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / "sub").mkdir()
(tmp_path / "sub" / ".git").mkdir()
(tmp_path / "sub" / ".gitignore").write_text("ignored")
assert not is_ignored("ignored")
assert not is_ignored("ignored/main.py")
assert is_ignored("sub/ignored")
assert is_ignored("sub/ignored/main.py")
assert is_ignored("sub/src/ignored")
assert is_ignored("sub/src/ignored/main.py")
assert not is_ignored("sub/../ignored")
assert not is_ignored("sub/../ignored/main.py")
def test_ignore_git_subdirectory_pattern(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / ".git").mkdir()
(tmp_path / ".gitignore").write_text("sub/ignored")
(tmp_path / "sub").mkdir()
assert not is_ignored("sub")
assert not is_ignored("sub/main.py")
assert is_ignored("sub/ignored")
assert is_ignored("sub/ignored/main.py")
def test_ignore_git_symlink_recursive(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / ".git").mkdir()
(tmp_path / ".gitignore").write_text("ignored")
(tmp_path / "dir").mkdir()
(tmp_path / "dir" / "link").symlink_to(tmp_path / "dir")
assert not is_ignored("dir")
assert not is_ignored("dir/link")
assert not is_ignored("dir/link/link")
assert is_ignored("dir/ignored")
assert is_ignored("dir/link/ignored")
assert is_ignored("dir/link/link/ignored")
def test_ignore_git_symlink_outside_repo(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / "repo" / ".git").mkdir(parents=True)
(tmp_path / "repo" / ".gitignore").write_text("link")
(tmp_path / "link").mkdir()
(tmp_path / "repo" / "link").symlink_to(tmp_path / "link")
assert not is_ignored("link")
assert not is_ignored("link/main.py")
assert is_ignored("repo/link")
assert is_ignored("repo/link/main.py")
def test_ignore_symlink_circular(
tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.chdir(tmp_path)
(tmp_path / "link1").symlink_to(tmp_path / "link2")
(tmp_path / "link2").symlink_to(tmp_path / "link1")
assert not is_ignored("link1")
assert not is_ignored("link2")
```
#### File: ssort/tests/test_statements.py
```python
from ssort._parsing import parse
def test_statement_text_padded_same_row():
statements = list(parse("a = 4; b = 5"))
assert statements[1].text_padded() == " b = 5"
def test_statement_text_padded_separate_rows():
statements = list(parse("a = 4\n\nb = 5"))
assert statements[1].text_padded() == "\n\nb = 5"
```
|
{
"source": "jgbishop/comics-rss",
"score": 3
}
|
#### File: jgbishop/comics-rss/comics-rss.py
```python
import argparse
import calendar
import glob
import json
import os
import pytz
import re
import rfeed
import sys
from bs4 import BeautifulSoup
from contextlib import closing
from datetime import date, datetime, timedelta
from requests import get
from requests.exceptions import RequestException
from slugify import slugify
from urllib.request import urlopen
MIN_PYTHON = (3, 4)
VERSION = "1.0.0"
def get_image(url, filename):
print(" - Attempting to get image: {}".format(filename))
try:
with closing(get(url, stream=True)) as resp:
if resp.status_code == 200:
raw_html = resp.content
else:
print("ERROR: Bad response getting page ()".format(
resp.status_code
))
return None
except RequestException as e:
print("ERROR: {}".format(e))
return None
html = BeautifulSoup(raw_html, 'lxml')
title = html.select_one('meta[name=title]')
short_link = html.select_one('meta[property=og:image]')
response = urlopen(short_link['content'])
data_response = get(response.url)
if data_response.status_code == 200:
print(" Got success response code; writing image content")
output = open(filename, "wb")
output.write(data_response.content)
output.close()
return {
'title': title
}
else:
print("ERROR: Bad response downloading image ()".format(
data_response.status_code)
)
return None
if sys.version_info < MIN_PYTHON:
sys.exit()
github_url = 'https://github.com/jgbishop/comics-rss'
root_url = "https://comicskingdom.com"
# Handle script arguments
parser = argparse.ArgumentParser()
parser.add_argument('--file', default='rss-sources.json')
args = parser.parse_args()
days = dict(zip(calendar.day_name, range(7)))
cwd = os.getcwd()
today = date.today()
# Load our config file
with open(args.file) as f:
config = json.load(f)
# Make sure we have everything we expect
errors = []
for x in ('feed_dir', 'feed_url'):
if not config.get(x, ""):
errors.append("ERROR: Missing the {} configuration directive".format(x))
else:
# Strip trailing slashes from file system paths and URLs
config[x] = config[x].rstrip('/')
if errors:
sys.exit("\n".join(errors))
# Setup the cache paths and URLs
if not config.get('cache_dir', ''):
config['cache_dir'] = "{}/cache".format(config['feed_dir'])
elif config.get('cache_dir').endswith('/'):
config['cache_dir'] = config['cache_dir'].rstrip('/')
if not config.get('cache_url', ''):
config['cache_url'] = "{}/cache".format(config['feed_url'])
elif config.get('cache_url').endswith('/'):
config['cache_url'] = config['cache_url'].rstrip('/')
# Create the cache directory
cache_dir = config.get('cache_dir')
if not cache_dir.startswith('/'):
cache_dir = os.path.join(cwd, cache_dir)
try:
os.makedirs(cache_dir, exist_ok=True)
except OSError as e:
sys.exit("Failed to create {}: {}".format(cache_dir, str(e)))
# Create the feeds directory (in case it's different)
feed_dir = config.get('feed_dir')
if not feed_dir.startswith('/'):
feed_dir = os.path.join(cwd, feed_dir)
try:
os.makedirs(feed_dir, exist_ok=True)
except OSError as e:
sys.exit("Failed to create {}: {}".format(feed_dir, str(e)))
expires = config.get('expires', 0)
# Process the comics that we read from the config
images_processed = {}
for entry in config.get('comics', []):
if not entry.get('name', ''):
print("WARNING: Skipping comics entry with no name field")
continue
slug = entry.get('slug', '')
if not slug:
slug = slugify(entry.get('name'))
print("Processing comic: {}".format(slug))
images_processed.setdefault(slug, set())
item_list = []
last_stop = 15
schedule = entry.get('schedule', [])
if schedule:
last_stop = 22 # Allow 22 days back
schedule_weekdays = {days.get(x) for x in schedule}
for x in range(last_stop):
the_date = today - timedelta(days=x)
if schedule and the_date.weekday() not in schedule_weekdays:
continue
img_filename = "{}-{}.gif".format(slug, the_date.isoformat())
images_processed[slug].add(img_filename)
url = "{}/{}/{}".format(
root_url, slug, the_date.isoformat()
)
# Check to see if we need to fetch the image
img_path = os.path.join(cache_dir, img_filename)
if not os.path.isfile(img_path):
get_image(url, img_path)
title = "{} comic strip for {}".format(
entry.get("name"), the_date.strftime("%B %d, %Y")
)
img_url = "{}/{}".format(config.get("cache_url"), img_filename)
clines = []
clines.append('<p><img src="{}" alt="{}"></p>'.format(img_url, title))
clines.append('<p>')
clines.append(' <a href="{}">View on King Comics</a> -'.format(url))
clines.append(' <a href="{}">GitHub Project</a>'.format(github_url))
clines.append('</p>')
pubtime = datetime.combine(the_date, datetime.min.time())
pubtime = pubtime.replace(tzinfo=pytz.UTC)
item = rfeed.Item(
title=title,
link=url,
description='\n'.join(clines),
guid=rfeed.Guid(url),
pubDate=pubtime
)
item_list.append(item)
# Start building the feed
feed = rfeed.Feed(
title=entry.get('name'),
link="{}/{}".format(root_url, slug),
description="RSS feed for {}".format(entry.get('name')),
language='en-US',
lastBuildDate=datetime.now(),
items=item_list,
generator="comics-rss.py ({})".format(github_url),
)
feed_path = os.path.join(feed_dir, "{}.xml".format(slug))
with open(feed_path, "w") as feed_file:
feed_file.write(feed.rss())
if(expires > 0):
to_prune = []
candidates = glob.glob("{}/{}-*.gif".format(cache_dir, slug))
for img in candidates:
match = re.search(r'(\d{4}-\d{2}-\d{2})', img)
if(match.group is None):
print("WARNING: Unable to locate date string in file: {}".format(img))
continue
try:
date = datetime.strptime(match.group(0), "%Y-%m-%d").date()
delta = today - date
if(delta.days >= expires):
to_prune.append(img)
except ValueError:
print("WARNING: Unable to parse date from cache file: {}".format(img))
if(to_prune):
print("Pruning {} expired cache files for {}.".format(len(to_prune), slug))
for f in sorted(to_prune):
print(" - Removing {}".format(f))
try:
os.remove(f)
except OSError:
raise
sys.exit(1)
```
|
{
"source": "JGBMichalski/House-Hunter",
"score": 2
}
|
#### File: House-Hunter/HouseHunter/core.py
```python
from tarfile import SUPPORTED_TYPES
import requests
import re
from bs4 import BeautifulSoup
import json
import HouseHunter.globals as Globals
from HouseHunter.ad import *
from pathlib import Path
class Core():
def __init__(self, filename="ads.json"):
self.filepath = Path().absolute().joinpath(filename) if filename else None
self.all_ads = {}
self.new_ads = {}
self.third_party_ads = []
self.load_ads()
# Reads given file and creates a dict of ads in file
def load_ads(self):
# If filepath is None, just skip local file
if self.filepath:
# If the file doesn't exist create it
if not self.filepath.exists():
ads_file = self.filepath.open(mode='w')
ads_file.write("{}")
ads_file.close()
return
with self.filepath.open(mode="r") as ads_file:
self.all_ads = json.load(ads_file)
# Save ads to file
def save_ads(self):
# If filepath is None, just skip local file
if self.filepath:
with self.filepath.open(mode="w") as ads_file:
json.dump(self.all_ads, ads_file)
def validate_origin(self, url):
for origin in Globals.SUPPORTED_ORIGINS:
if origin in url:
return Globals.SUPPORTED_ORIGINS.index(origin)
return -1
# Pulls page data from a given url and finds all ads on each page
def scrape_url_for_ads(self, url):
self.new_ads = {}
email_title = None
origin = self.validate_origin(url)
if origin < 0:
print("Site not supported: {}".format(url))
return self.new_ads, email_title
while url:
# Get the html data from the URL
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
# If the email title doesnt exist pull it from the html data
if email_title is None:
email_title = self.get_email_title(origin, soup)
# Find ads on the page
self.find_ads(soup, origin)
# Set url for next page of ads
# Depending on supported origins, this may not apply to all
url = soup.find("a", string="Next")
if not url:
url = soup.find("a", href=True, rel="next")
if url:
url = Globals.SUPPORTED_ORIGINS[origin] + url['href']
return self.new_ads, email_title
def find_ads(self, soup, origin):
# Finds all ad trees in page html.
ad_regex = re.compile('.*{}.*'.format(Globals.AD_ROOT_CLASS_NAMES[origin][Globals.PRIMARY]))
ads = soup.find_all(Globals.AD_ROOT_ELEMENT_TYPE[origin], {"class": ad_regex})
# If no ads use different class name
if not ads:
ad_regex = re.compile('.*{}.*'.format(Globals.AD_ROOT_CLASS_NAMES[origin][Globals.SECONDARY]))
ads = soup.find_all(Globals.AD_ROOT_ELEMENT_TYPE[origin], {"class": ad_regex})
# Create a dictionary of all ads with ad id being the key
for ad in ads:
if origin == 0:
current_ad = WFPAd(origin, ad)
elif origin == 1:
current_ad = RewAd(origin, ad)
else:
return
# Skip third-party ads and ads already found
if (current_ad.id not in self.all_ads):
self.new_ads[current_ad.id] = current_ad.info
self.all_ads[current_ad.id] = current_ad.info
def get_email_title(self, origin, soup):
if origin != 0:
# Used for origins that do not give any details about the search options
return Globals.SUPPORTED_FULL_NAMES[origin]
else:
# Depending on supported origins, this may not apply to all
email_title_location = soup.find('div', {"class": "results-info"}).find('h1')
if email_title_location:
# Depending on supported origins, this may not apply to all
return Globals.SUPPORTED_FULL_NAMES[origin] + " - " + self.format_title(email_title_location.text.split(' in ')[1].strip('"'))
else:
return Globals.SUPPORTED_FULL_NAMES[origin]
# Makes the first letter of every word upper-case
def format_title(self, title):
new_title = []
title = title.split()
for word in title:
new_word = ''
new_word += word[0].upper()
if len(word) > 1:
new_word += word[1:]
new_title.append(new_word)
return ' '.join(new_title)
# Returns a given list of words to lower-case words
def words_to_lower(self, words):
return [word.lower() for word in words]
```
#### File: House-Hunter/HouseHunter/helpers.py
```python
import re
class Helpers:
def removeHTMLTags(str):
"""
Removes HTML Tags from a supplied string.
"""
expr = re.compile('<.*?>')
cleanedText = re.sub(expr, '', str)
return cleanedText
```
|
{
"source": "jgbrasier/KFPyEstimate",
"score": 3
}
|
#### File: KFPyEstimate/src/run.py
```python
from src.filters import *
from src.utils import *
from jax import value_and_grad
def run_simulation(filter, x0, action_sequence):
measurements = []
states = [x0]
for u in action_sequence:
x = filter.dynamic(states[-1], u)
y = filter.observation(x)
measurements.append(y)
states.append(x)
return states, measurements
def run_kf(filter, s0, action_history, measurement_history):
assert len(action_history) == len(measurement_history)
states = [s0]
for (u, y) in zip(action_history, measurement_history):
sp = filter.prediction(states[-1], u)
sn = filter.correction(sp, y)
states.append(sn)
return states
# def run_gradient(filter, epochs, s0, action_history, measurement_history):
# assert len(action_history) == len(measurement_history)
# loss = []
# for i in range(epochs):
# filt_states = run_kf(filter, s0, action_history, measurement_history)
# l, gs = value_and_grad(kf_likelihood, argnums=0)(filter.A, filter.B, filter.Q, filter.C, filter.R, filt_states, action_history, measurement_history)
# filter.A -= 0.0001*gs
# loss.append(l)
# return loss
```
|
{
"source": "JGBroadbent/openscm-runner",
"score": 2
}
|
#### File: openscm-runner/scripts/test_install.py
```python
import importlib
import pkgutil
import openscm_runner
def import_submodules(package_name):
package = importlib.import_module(package_name)
for _, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
print(full_name)
importlib.import_module(full_name)
if is_pkg:
import_submodules(full_name)
import_submodules("openscm_runner")
print(openscm_runner.__version__)
```
#### File: tests/integration/test_magicc7.py
```python
import numpy.testing as npt
import pymagicc.io
import pytest
from scmdata import ScmRun
from openscm_runner import run
from openscm_runner.adapters import MAGICC7
from openscm_runner.utils import calculate_quantiles
RTOL = 1e-5
def _check_res(exp, check_val, raise_error, rtol=RTOL):
try:
npt.assert_allclose(exp, check_val, rtol=rtol)
except AssertionError:
if raise_error:
raise
print("exp: {}, check_val: {}".format(exp, check_val))
def test_magicc7_run(test_scenarios, magicc7_is_available):
debug_run = False
res = run(
climate_models_cfgs={
"MAGICC7": [
{
"core_climatesensitivity": 3,
"rf_soxi_dir_wm2": -0.2,
"out_temperature": 1,
"out_forcing": 1,
"out_dynamic_vars": [
"DAT_AEROSOL_ERF",
"DAT_HEATCONTENT_AGGREG_TOTAL",
"DAT_CO2_AIR2LAND_FLUX",
],
"out_ascii_binary": "BINARY",
"out_binary_format": 2,
},
{
"core_climatesensitivity": 2,
"rf_soxi_dir_wm2": -0.1,
"out_temperature": 1,
"out_forcing": 1,
"out_ascii_binary": "BINARY",
"out_binary_format": 2,
},
{
"core_climatesensitivity": 5,
"rf_soxi_dir_wm2": -0.35,
"out_temperature": 1,
"out_forcing": 1,
"out_ascii_binary": "BINARY",
"out_binary_format": 2,
},
],
},
scenarios=test_scenarios.filter(scenario=["ssp126", "ssp245", "ssp370"]),
output_variables=(
"Surface Temperature",
"Effective Radiative Forcing",
"Effective Radiative Forcing|Aerosols",
"Effective Radiative Forcing|CO2",
"Heat Content|Ocean",
"Net Atmosphere to Land Flux|CO2",
),
)
assert isinstance(res, ScmRun)
assert res["run_id"].min() == 0
assert res["run_id"].max() == 8
assert res.get_unique_meta(
"climate_model", no_duplicates=True
) == "MAGICC{}".format(MAGICC7.get_version())
assert set(res.get_unique_meta("variable")) == set(
[
"Surface Temperature",
"Effective Radiative Forcing",
"Effective Radiative Forcing|Aerosols",
"Effective Radiative Forcing|CO2",
"Heat Content|Ocean",
"Net Atmosphere to Land Flux|CO2",
]
)
# check ocean heat content unit conversion comes through correctly
_check_res(
1824.05,
res.filter(
unit="ZJ",
variable="Heat Content|Ocean",
region="World",
year=2100,
scenario="ssp126",
).values.max(),
not debug_run,
rtol=RTOL,
)
_check_res(
0.472378,
res.filter(
unit="GtC / yr",
variable="Net Atmosphere to Land Flux|CO2",
region="World",
year=2100,
scenario="ssp126",
).values.max(),
not debug_run,
rtol=RTOL,
)
_check_res(
2.756034,
res.filter(
variable="Surface Temperature", region="World", year=2100, scenario="ssp126"
).values.max(),
not debug_run,
rtol=RTOL,
)
_check_res(
1.2195495,
res.filter(
variable="Surface Temperature", region="World", year=2100, scenario="ssp126"
).values.min(),
not debug_run,
rtol=RTOL,
)
_check_res(
5.5226571,
res.filter(
variable="Surface Temperature", region="World", year=2100, scenario="ssp370"
).values.max(),
not debug_run,
rtol=RTOL,
)
_check_res(
2.733369581,
res.filter(
variable="Surface Temperature", region="World", year=2100, scenario="ssp370"
).values.min(),
not debug_run,
rtol=RTOL,
)
# check we can also calcluate quantiles
quantiles = calculate_quantiles(res, [0.05, 0.17, 0.5, 0.83, 0.95])
_check_res(
1.27586919,
quantiles.filter(
variable="Surface Temperature",
region="World",
year=2100,
scenario="ssp126",
quantile=0.05,
).values,
not debug_run,
rtol=RTOL,
)
_check_res(
2.6587052,
quantiles.filter(
variable="Surface Temperature",
region="World",
year=2100,
scenario="ssp126",
quantile=0.95,
).values,
not debug_run,
rtol=RTOL,
)
_check_res(
2.83627686,
quantiles.filter(
variable="Surface Temperature",
region="World",
year=2100,
scenario="ssp370",
quantile=0.05,
).values,
not debug_run,
rtol=RTOL,
)
_check_res(
5.34663565,
quantiles.filter(
variable="Surface Temperature",
region="World",
year=2100,
scenario="ssp370",
quantile=0.95,
).values,
not debug_run,
rtol=RTOL,
)
if debug_run:
assert False, "Turn off debug"
def test_write_scen_files_and_make_full_cfgs(
monkeypatch, tmpdir, test_scenarios, magicc7_is_available
):
adapter = MAGICC7()
test_scenarios_magiccdf = pymagicc.io.MAGICCData(test_scenarios)
res = adapter._write_scen_files_and_make_full_cfgs(
test_scenarios_magiccdf,
[
{
"file_emisscen_3": "overwritten by adapter.magicc_scenario_setup",
"other_cfg": 12,
}
],
)
for (model, scenario), _ in test_scenarios_magiccdf.meta.groupby(
["model", "scenario"]
):
scen_file_name = (
"{}_{}.SCEN7".format(scenario, model)
.upper()
.replace("/", "-")
.replace("\\", "-")
.replace(" ", "-")
)
scenario_cfg = [v for v in res if v["file_emisscen"] == scen_file_name]
assert len(scenario_cfg) == 1
scenario_cfg = scenario_cfg[0]
assert scenario_cfg["other_cfg"] == 12
assert scenario_cfg["model"] == model
assert scenario_cfg["scenario"] == scenario
for i in range(2, 9):
scen_flag_val = scenario_cfg["file_emisscen_{}".format(i)]
assert scen_flag_val == "NONE"
@pytest.mark.parametrize(
"out_config",
(
("core_climatesensitivity", "rf_total_runmodus"),
("core_climatesensitivity",),
("rf_total_runmodus",),
),
)
def test_return_config(test_scenarios, magicc7_is_available, out_config):
core_climatesensitivities = [2, 3]
rf_total_runmoduses = ["ALL", "CO2"]
cfgs = []
for cs in core_climatesensitivities:
for runmodus in rf_total_runmoduses:
cfgs.append(
{
"out_dynamic_vars": [
"DAT_TOTAL_INCLVOLCANIC_ERF",
"DAT_SURFACE_TEMP",
],
"core_climatesensitivity": cs,
"rf_total_runmodus": runmodus,
}
)
res = run(
climate_models_cfgs={"MAGICC7": cfgs},
scenarios=test_scenarios.filter(scenario=["ssp126", "ssp245", "ssp370"]),
output_variables=("Surface Temperature", "Effective Radiative Forcing",),
out_config={"MAGICC7": out_config},
)
for k in out_config:
assert k in res.meta.columns
ssp126 = res.filter(scenario="ssp126")
# check all the configs were used and check that each scenario
# has all the configs included in the metadata too
if k == "core_climatesensitivity":
assert set(res.get_unique_meta(k)) == set(core_climatesensitivities)
assert set(ssp126.get_unique_meta(k)) == set(core_climatesensitivities)
elif k == "rf_total_runmodus":
assert set(res.get_unique_meta(k)) == set(rf_total_runmoduses)
assert set(ssp126.get_unique_meta(k)) == set(rf_total_runmoduses)
else:
raise NotImplementedError(k)
```
|
{
"source": "jgbustos/ml-rest-api",
"score": 3
}
|
#### File: ml_rest_api/api/restx.py
```python
from logging import Logger, getLogger
from typing import Tuple, Dict
from jsonschema import FormatChecker
from flask_restx import Api
from ml_rest_api.settings import get_value
class MLRestAPIException(Exception):
"""Base ML Rest API Exception"""
class MLRestAPINotReadyException(MLRestAPIException):
"""Base ML Rest API NOT READY Exception"""
FlaskApiReturnType = Tuple[Dict, int]
log: Logger = getLogger(__name__)
api = Api( # pylint: disable=invalid-name
version="0.1",
title="Machine Learning REST API",
description="A RESTful API to return predictions from a trained ML model, \
built with Python 3 and Flask-RESTX",
format_checker=FormatChecker(
formats=(
"date-time",
"date",
)
),
default="health",
default_label="Basic health check methods",
)
@api.errorhandler(MLRestAPINotReadyException)
def not_ready_error_handler() -> FlaskApiReturnType:
"""NOT READY error handler that returns HTTP 503 error."""
log.exception("Server Not Ready")
return {"message": "Server Not Ready"}, 503
@api.errorhandler
def default_error_handler(exception) -> FlaskApiReturnType:
"""Default error handler that returns HTTP 500 error."""
log.exception(exception.message)
if get_value("FLASK_DEBUG"):
error_msg = exception.message
else:
error_msg = "An unhandled exception occurred"
return {"message": error_msg}, 500
```
#### File: ml-rest-api/ml_rest_api/app.py
```python
import os
from logging import Logger, getLogger
import logging.config
from typing import List
from flask import Flask, Blueprint
from flask_wtf import CSRFProtect # pylint: disable=unused-import
from ml_rest_api.settings import get_value
from ml_rest_api.ml_trained_model.wrapper import trained_model_wrapper
from ml_rest_api.api.restx import api
import ml_rest_api.api.health.liveness # pylint: disable=unused-import
import ml_rest_api.api.health.readiness # pylint: disable=unused-import
import ml_rest_api.api.model.predict # pylint: disable=unused-import
IN_UWSGI: bool = True
try:
# pyright: reportMissingImports=false
import uwsgi # pylint: disable=unused-import
except ImportError:
IN_UWSGI = False
def configure_app(flask_app: Flask) -> None:
"""Configures the app."""
flask_settings_to_apply: List = [
#'FLASK_SERVER_NAME',
"SWAGGER_UI_DOC_EXPANSION",
"RESTX_VALIDATE",
"RESTX_MASK_SWAGGER",
"SWAGGER_UI_JSONEDITOR",
"ERROR_404_HELP",
"WTF_CSRF_ENABLED",
]
for key in flask_settings_to_apply:
flask_app.config[key] = get_value(key)
flask_app.config["SECRET_KEY"] = os.urandom(32)
def initialize_app(flask_app: Flask) -> None:
"""Initialises the app."""
configure_app(flask_app)
blueprint = Blueprint("api", __name__, url_prefix="/api")
api.init_app(blueprint)
flask_app.register_blueprint(blueprint)
if get_value("MULTITHREADED_INIT") and not IN_UWSGI:
trained_model_wrapper.multithreaded_init()
else:
trained_model_wrapper.init()
def main() -> None:
"""Main routine, executed only if running as stand-alone."""
log.info(
"***** Starting development server at http://%s/api/ *****",
get_value("FLASK_SERVER_NAME"),
)
APP.run(
debug=get_value("FLASK_DEBUG"),
port=get_value("FLASK_PORT"),
host=get_value("FLASK_HOST"),
)
APP = Flask(__name__)
logging.config.fileConfig(
os.path.normpath(os.path.join(os.path.dirname(__file__), "../logging.conf"))
)
log: Logger = getLogger(__name__)
initialize_app(APP)
if __name__ == "__main__":
main()
```
|
{
"source": "jgc128/crabada.py",
"score": 3
}
|
#### File: src/helpers/teams.py
```python
from typing import List
from src.common.types import TeamTask
from src.libs.CrabadaWeb2Client.types import Team
from src.models.User import User
from src.common.clients import makeCrabadaWeb2Client
def fetchAvailableTeamsForTask(user: User, task: TeamTask) -> List[Team]:
"""
Fetch available teams from Crabada, and return only those
that are supposed to perform the given task
"""
# Teams that are supposed to perform the given task
ids = [t["id"] for t in user.getTeamsByTask(task)]
if not ids:
return []
# Fetch list of available teams
availableTeams = makeCrabadaWeb2Client().listTeams(
user.address, {"is_team_available": 1, "limit": len(ids) * 2, "page": 1}
)
# Intersect teams with the task with available teams
return [t for t in availableTeams if t["team_id"] in ids]
```
#### File: CrabadaWeb2Client/Tests/testGetMine.py
```python
from src.helpers.general import secondOrNone
from src.libs.CrabadaWeb2Client.CrabadaWeb2Client import CrabadaWeb2Client
from pprint import pprint
from sys import argv
# VARS
client = CrabadaWeb2Client()
mineId = secondOrNone(argv) or 269751
# TEST FUNCTIONS
def test() -> None:
pprint(client.getMine(mineId))
# EXECUTE
test()
```
#### File: CrabadaWeb2Client/Tests/testListLootableMines.py
```python
from src.common.config import users
from src.libs.CrabadaWeb2Client.CrabadaWeb2Client import CrabadaWeb2Client
from pprint import pprint
# VARS
client = CrabadaWeb2Client()
looterAddress = users[0]["address"]
# TEST FUNCTIONS
def test() -> None:
params = {
"limit": 3,
"page": 1,
}
pprint(client.listLootableMines(looterAddress, params=params))
# EXECUTE
test()
```
#### File: CrabadaWeb3Client/tests/testAttack.py
```python
from typing import cast
from hexbytes import HexBytes
from src.libs.Web3Client.exceptions import Web3ClientException
from src.libs.Web3Client.helpers.debug import printTxInfo
from src.helpers.general import fourthOrNone, secondOrNone, thirdOrNone
from src.common.config import nodeUri, users
from src.libs.CrabadaWeb3Client.CrabadaWeb3Client import CrabadaWeb3Client
from sys import argv
from web3.exceptions import ContractLogicError
# VARS
client = CrabadaWeb3Client(nodeUri=nodeUri, privateKey=users[0]["privateKey"])
teamId = users[0]["teams"][0]["id"]
mineId = int(secondOrNone(argv) or 0)
expiredTime = int(thirdOrNone(argv) or 0)
certificate = HexBytes(fourthOrNone(argv))
if not (mineId and expiredTime and certificate):
print("Specify 3 non-zero arguments: mineId, expiredTime and certificate")
exit(1)
# TEST FUNCTIONS
def test() -> None:
txHash = client.attack(mineId, teamId, expiredTime, certificate)
printTxInfo(client, txHash)
# EXECUTE
try:
test()
except ContractLogicError as e:
print(">>> CONTRACT EXCEPTION!")
print(e)
except Web3ClientException as e:
print(">>> CLIENT EXCEPTION!")
print(e)
```
#### File: CrabadaWeb3Client/tests/testCloseGame.py
```python
from sys import argv
from typing import cast
from src.helpers.general import secondOrNone
from src.libs.Web3Client.exceptions import Web3ClientException
from src.libs.Web3Client.helpers.debug import printTxInfo
from src.common.config import nodeUri, users
from src.libs.CrabadaWeb3Client.CrabadaWeb3Client import CrabadaWeb3Client
from web3.exceptions import ContractLogicError
# VARS
client = CrabadaWeb3Client(nodeUri=nodeUri, privateKey=users[0]["privateKey"])
gameId = int(secondOrNone(argv) or 284549)
# TEST FUNCTIONS
def test() -> None:
txHash = client.closeGame(gameId)
printTxInfo(client, txHash)
# EXECUTE
try:
test()
except ContractLogicError as e:
print(">>> CONTRACT EXCEPTION!")
print(e)
except Web3ClientException as e:
print(">>> CLIENT EXCEPTION!")
print(e)
```
#### File: CrabadaWeb3Client/tests/testStartGameLogs.py
```python
from typing import cast
from src.libs.Web3Client.helpers.debug import pprintAttributeDict
from src.helpers.general import secondOrNone
from src.common.config import nodeUri
from src.libs.CrabadaWeb3Client.CrabadaWeb3Client import CrabadaWeb3Client
from sys import argv
from eth_typing.encoding import HexStr
from pprint import pprint
# VARS
txHash = cast(
HexStr,
(
secondOrNone(argv)
or "0x41705baf18b1ebc8ec204926a8524d3530aada11bd3c249ca4a330ed047f005e"
),
)
client = CrabadaWeb3Client(nodeUri=nodeUri)
tx = client.getTransaction(txHash)
txReceipt = client.getTransactionReceipt(txHash)
logs = client.contract.events.StartGame().processReceipt(txReceipt)
# TEST FUNCTIONS
def test() -> None:
print(">>> TX")
pprint(tx)
print(">>> TX LOGS")
for log in logs:
pprintAttributeDict(log)
# EXECUTE
test()
```
#### File: libs/Web3Client/Erc20Web3Client.py
```python
from typing import Any, Union
from eth_typing import Address, HexStr
from web3 import Web3
from src.libs.Web3Client.Web3Client import Web3Client
from web3.types import TxParams, Nonce
import os
class Erc20Web3Client(Web3Client):
"""
Client that comes with the ERC20 ABI preloaded.
AMOUNTS
=======
Whenever we will refer to an "amount" of the token, we really mean an
"amount in token units". A token unit is the smallest subdivision of
the token. For example:
- If the token has 6 digits (like most stablecoins) an amount of 1
corresponds to one millionth of the token.
- For tokens with 18 digits (like most non-stablecoins) an amount
of 1 is equal to 1/10^18 of the token (a single wei).
"""
abiDir = os.path.dirname(os.path.realpath(__file__)) + "/contracts"
abi = Web3Client.getContractAbiFromFile(abiDir + "/erc20Abi.json")
def __init__(
self,
nodeUri: str,
chainId: int = None,
txType: int = 2,
privateKey: str = None,
maxPriorityFeePerGasInGwei: float = 1,
upperLimitForBaseFeeInGwei: float = float("inf"),
contractAddress: Address = None,
) -> None:
super().__init__(
nodeUri,
chainId,
txType,
privateKey,
maxPriorityFeePerGasInGwei,
upperLimitForBaseFeeInGwei,
contractAddress,
self.abi,
)
####################
# Read
####################
def balanceOf(self, address: Address) -> int:
"""
Return the amount held by the given address
"""
return self.contract.functions.balanceOf(address).call()
def name(self) -> str:
"""
Return the name/label of the token
"""
return self.contract.functions.name().call()
def symbol(self) -> str:
"""
Return the symbol/ticker of the token
"""
return self.contract.functions.symbol().call()
def totalSupply(self) -> int:
"""
Return the total supply of the token
"""
return self.contract.functions.totalSupply().call()
def decimals(self) -> int:
"""
Return the number of digits of the token
"""
return self.contract.functions.decimals().call()
####################
# Write
####################
def transfer(self, to: Address, amount: int, nonce: Nonce = None) -> HexStr:
"""
Transfer some amount of the token to an address; does not
require approval.
"""
tx: TxParams = self.buildContractTransaction(
self.contract.functions.transfer(Web3.toChecksumAddress(to), amount)
)
if nonce:
tx["nonce"] = nonce
return self.signAndSendTransaction(tx)
```
#### File: Web3Client/tests/testToWei.py
```python
from web3 import Web3
from pprint import pprint
# VARS
currency = "ether"
# TEST FUNCTIONS
def test() -> None:
pprint(Web3.toWei(1, currency))
# EXECUTE
test()
```
#### File: bot/looting/testCloseLoots.py
```python
from src.bot.looting.closeLoots import closeLoots
from src.common.config import users
from src.models.User import User
# VARS
# TEST FUNCTIONS
def test() -> None:
nFinished = closeLoots(User(users[0]["address"]))
print(f"CLOSED {nFinished} LOOTING GAMES")
# EXECUTE
test()
```
#### File: src/tests/testFetchOpenLoots.py
```python
from sys import argv
from src.common.config import users
from src.helpers.general import secondOrNone
from src.helpers.mines import fetchOpenLoots
from src.models.User import User
# VARS
userNumber = int(secondOrNone(argv) or 1)
# TEST FUNCTIONS
def test() -> None:
openLoots = fetchOpenLoots(User.find(userNumber))
print(openLoots)
# EXECUTE
test()
```
#### File: src/tests/testLooterCanReinforce.py
```python
from src.helpers.general import secondOrNone
from src.helpers.reinforce import looterCanReinforce, getLooterReinforcementStatus
from src.libs.CrabadaWeb2Client.CrabadaWeb2Client import CrabadaWeb2Client
from pprint import pprint
from sys import argv
# VARS
mineId = secondOrNone(argv)
client = CrabadaWeb2Client()
if not mineId:
print("Provide a game ID")
exit(1)
mine = client.getMine(mineId)
# TEST FUNCTIONS
def test() -> None:
print(">>> MINE")
pprint(mine)
print(">>> LOOTER REINFORCEMENT STATUS")
pprint(getLooterReinforcementStatus(mine))
print(">>> LOOTER CAN REINFORCE")
pprint(looterCanReinforce(mine))
# EXECUTE
test()
```
#### File: src/tests/testSendIM.py
```python
from sys import argv
from src.helpers.instantMessage import sendIM
from src.helpers.general import secondOrNone
from pprint import pprint
# VARS
silent = False if secondOrNone(argv) == "1" else True
body = "Join Earth's mightiest heroes. Like <NAME>."
# TEST FUNCTIONS
def test() -> None:
output = sendIM(
body=body, forceSend=True, silent=silent # send IM regardless of settings
)
print(">>> SILENT?")
pprint(silent)
print(">>> SUCCESS?")
pprint(output)
# EXECUTE
test()
```
|
{
"source": "jgc234/eversolar",
"score": 3
}
|
#### File: jgc234/eversolar/eversolar.py
```python
import time
import sys
import json
import serial
import logging
import struct
import collections
import datetime
import argparse
#format = '%(asctime)-15s %(filename)s:%(funcName)s:%(lineno)s [%(levelname)s] %(message)s'
format = '%(asctime)-15s [%(levelname)s] %(message)s'
datefmt="%Y-%m-%d %H:%M:%S"
logging.basicConfig(format=format, datefmt=datefmt)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class Device:
def __init__(self, serial, addr):
self.serial = serial
self.addr = addr
self.field_map = {}
return
pass
item_map = {}
class DataItem:
def __init__(self, code, var, name, multiplier, units, descr):
self.code = code
self.var = var
self.name = name
self.multiplier = multiplier
self.units = units
self.descr = descr
item_map[code] = self
return
pass
#
# data definitions
#
DataItem(0x00, 'temp', 'Temperature', 0.1, '°C', 'Internal inverter temperature')
DataItem(0x01, 'v_pv1', 'Vpv1', 0.1, 'V', 'PV1 voltage')
DataItem(0x02, 'v_pv2', 'Vpv2', 0.1, 'V', 'PV2 voltage')
DataItem(0x04, 'i_pv1', 'Ipv1', 0.1, 'A', 'PV1 current')
DataItem(0x05, 'i_pv2', 'Ipv2', 0.1, 'A', 'PV2 current')
DataItem(0x07, 'e_total_h', 'E-Total_H', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0x08, 'e_total_l', 'E-Total_L', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0x09, 'h_total_h', 'H-Total_H', 1, 'Hr', 'Total operation hours')
DataItem(0x0a, 'h_total_l', 'H-Total_L', 1, 'Hr', 'Total operation hours')
DataItem(0x0b, 'p_ac', 'Pac', 1, 'W', 'Total power to grid')
DataItem(0x0c, 'mode', 'Mode', 1, '', 'Operation Mode')
DataItem(0x0d, 'e_today', 'E-today', 0.01, 'KW.Hr', 'The accumulated kWh of day')
DataItem(0x20, 'sur_temp', 'surTemp', 0.1, '°C', 'Ambient Temperature')
DataItem(0x21, 'bd_temp', 'bdTemp', 0.1, '°C', 'Panel Temperature')
DataItem(0x22, 'irr', 'irr', 0.1, 'W/m2', 'Rad')
DataItem(0x23, 'wind_speed', 'windSpeed', 0.1, 'm/s', 'Speed of wind')
DataItem(0x38, 'waiting_time', 'waitingTime', 1, 's', 'wait time on connection')
DataItem(0x39, 'tmp_fault_value', 'TmpFaultValue', 0.1, '°C', 'Temperature fault value')
DataItem(0x3a, 'pv1_fault_value', 'PV1FaultValue', 0.1, 'V', 'PV1 voltage fault value')
DataItem(0x3b, 'pv2_fault_value', 'PV2FaultValue', 0.1, 'V', 'PV2 voltage fault value')
DataItem(0x3d, 'gfci_fault_value', ' GFCIFaultValue', 0.001, 'A', 'GFCI current fault value')
DataItem(0x3e, 'error_msg_h', 'ErrorMesssageH', None, '', 'Failure description for status')
DataItem(0x3f, 'error_msg_l', 'ErrorMesssageH', None, '', 'Failure description for status')
#
# Single or R phase.
#
DataItem(0x40, 'v_pv', 'Vpv', 0.1, 'V', 'PV voltage')
DataItem(0x41, 'i_pv', 'Ipv1', 0.1, 'A', 'PV current')
DataItem(0x42, 'v_ac', 'Vac', 0.1, 'V', 'Grid voltage')
DataItem(0x43, 'f_ac', 'Fac', 0.01, 'Hz', 'Grid frequency')
DataItem(0x44, 'p_ac', 'Pac', 1, 'W', 'Power to grid')
DataItem(0x45, 'z_ac', 'Zac', 0.001, 'Ω', 'Grid Impedance')
DataItem(0x46, 'i_pv', 'Ipv', 0.1, 'A', 'PV current')
DataItem(0x47, 'e_total_hr', 'E-Total_H', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0x48, 'e_total_l', 'E-Total_L', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0x49, 'h_total_h', 'H-Total_H', 1, 'Hr', 'Total operation hours')
DataItem(0x4a, 'h_total_l', 'H-Total_L', 1, 'Hr', 'Total operation hours')
DataItem(0x4b, 'power_on', 'Power_On', None, '', 'Number of times the inverter starts feeding the grid')
DataItem(0x4c, 'mode', 'Mode', 1, '', 'Operation Mode')
DataItem(0x78, 'gv_fault_value', 'GVFaultValue', 0.1, 'V', 'Grid Voltage Fault Value')
DataItem(0x79, 'gf_fault_value', 'GFFaultValue', 0.01, 'Hz', 'Grid Frequency Fault Value')
DataItem(0x7a, 'gz_fault_value', 'GZFaultValue', 0.001, 'Ω', 'Grid Impedance Fault Value')
DataItem(0x7b, 'tmp_fault_fault','TmpFaultValue', 0.1, '°C', 'Temperature Fault Value')
DataItem(0x7c, 'pv1_fault_value','PV1FaultValue', 0.1, 'V', 'PV1 voltage fault value')
DataItem(0x7d, 'gfci_fault_value','GFCIFaultValue', 0.1, 'A', 'GFCI current fault value')
DataItem(0x7e, 'error_msg_h', 'ErrorMesssageH', None, '', 'Failure description for status')
DataItem(0x7f, 'error_msg_l', 'ErrorMesssageH', None, '', 'Failure description for status')
#
# S phase
#
DataItem(0x80, 'v_pv2', 'Vpv', 0.1, 'V', 'PV voltage')
DataItem(0x81, 'i_pv_s', 'Ipv1', 0.1, 'A', 'PV current')
DataItem(0x82, 'v_ac_s', 'Vac', 0.1, 'V', 'Grid voltage')
DataItem(0x83, 'f_ac_s', 'Fac', 0.01, 'Hz', 'Grid frequency')
DataItem(0x84, 'p_ac_s', 'Pac', 1, 'W', 'Power to grid')
DataItem(0x85, 'z_ac_s', 'Zac', 0.001, 'Ω', 'Grid Impedance')
DataItem(0x86, 'i_pv_s', 'Ipv', 0.1, 'A', 'PV current')
DataItem(0x87, 'e_total_hr', 'E-Total_H', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0x88, 'e_total_l', 'E-Total_L', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0x89, 'h_total_h', 'H-Total_H', 1, 'Hr', 'Total operation hours')
DataItem(0x8a, 'h_total_l', 'H-Total_L', 1, 'Hr', 'Total operation hours')
DataItem(0x8b, 'power_on', 'Power_On', None, '', 'Number of times the inverter starts feeding the grid')
DataItem(0x8c, 'mode', 'Mode', 1, '', 'Operation Mode')
DataItem(0xb8, 'gv_fault_value', 'GVFaultValue', 0.1, 'V', 'Grid Voltage Fault Value')
DataItem(0xb9, 'gf_fault_value', 'GFFaultValue', 0.01, 'Hz', 'Grid Frequency Fault Value')
DataItem(0xba, 'gz_fault_value', 'GZFaultValue', 0.001, 'Ω', 'Grid Impedance Fault Value')
DataItem(0xbb, 'tmp_fault_fault','TmpFaultValue', 0.1, '°C', 'Temperature Fault Value')
DataItem(0xbc, 'pv1_fault_value','PV1FaultValue', 0.1, 'V', 'PV1 voltage fault value')
DataItem(0xbd, 'gfci_fault_value','GFCIFaultValue', 0.1, 'A', 'GFCI current fault value')
DataItem(0xbe, 'error_msg_h', 'ErrorMesssageH', None, '', 'Failure description for status')
DataItem(0xbf, 'error_msg_l', 'ErrorMesssageH', None, '', 'Failure description for status')
#
# T phase
#
DataItem(0xc0, 'v_pv2', 'Vpv', 0.1, 'V', 'PV voltage')
DataItem(0xc1, 'i_pv_s', 'Ipv1', 0.1, 'A', 'PV current')
DataItem(0xc2, 'v_ac_s', 'Vac', 0.1, 'V', 'Grid voltage')
DataItem(0xc3, 'f_ac_s', 'Fac', 0.01, 'Hz', 'Grid frequency')
DataItem(0xc4, 'p_ac_s', 'Pac', 1, 'W', 'Power to grid')
DataItem(0xc5, 'z_ac_s', 'Zac', 0.001, 'Ω', 'Grid Impedance')
DataItem(0xc6, 'i_pv_s', 'Ipv', 0.1, 'A', 'PV current')
DataItem(0xc7, 'e_total_hr', 'E-Total_H', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0xc8, 'e_total_l', 'E-Total_L', 0.1, 'KW.Hr', 'Total Energy to grid')
DataItem(0xc9, 'h_total_h', 'H-Total_H', 1, 'Hr', 'Total operation hours')
DataItem(0xca, 'h_total_l', 'H-Total_L', 1, 'Hr', 'Total operation hours')
DataItem(0xcb, 'power_on', 'Power_On', None, '', 'Number of times the inverter starts feeding the grid')
DataItem(0xcc, 'mode', 'Mode', 1, '', 'Operation Mode')
DataItem(0xf8, 'gv_fault_value', 'GVFaultValue', 0.1, 'V', 'Grid Voltage Fault Value')
DataItem(0xf9, 'gf_fault_value', 'GFFaultValue', 0.01, 'Hz', 'Grid Frequency Fault Value')
DataItem(0xfa, 'gz_fault_value', 'GZFaultValue', 0.001, 'Ω', 'Grid Impedance Fault Value')
DataItem(0xfb, 'tmp_fault_fault','TmpFaultValue', 0.1, '°C', 'Temperature Fault Value')
DataItem(0xfc, 'pv1_fault_value','PV1FaultValue', 0.1, 'V', 'PV1 voltage fault value')
DataItem(0xfd, 'gfci_fault_value','GFCIFaultValue', 0.1, 'A', 'GFCI current fault value')
DataItem(0xfe, 'error_msg_h', 'ErrorMesssageH', None, '', 'Failure description for status')
DataItem(0xff, 'error_msg_l', 'ErrorMesssageH', None, '', 'Failure description for status')
device_map = {}
class Eversolar:
##
##
functions = {
'register_offline_query' : ((0x10, 0x00), (0x10, 0x80)),
'register_send_register_address' : ((0x10, 0x01), (0x10, 0x81)),
'register_remove_register' : ((0x10, 0x02), (0x10, 0x82)),
'register_re_connect' : ((0x10, 0x03), None),
'register_re_register' : ((0x10, 0x04), None),
'read_query_description' : ((0x11, 0x00), (0x11, 0x80)),
'read_query_normal_info' : ((0x11, 0x02), (0x11, 0x82)),
'read_query_inverter_id' : ((0x11, 0x03), (0x11, 0x83)),
}
##
##
def __init__(self, port_filename):
self.port_filename = port_filename
self.addr = 0x01
self.next_addr = 0x10
self.open_port()
return
##
##
def open_port(self):
self.port = serial.Serial()
self.port.port = self.port_filename
self.port.baudrate = 9600
self.port.parity = serial.PARITY_NONE
self.port.stopbits = serial.STOPBITS_ONE
self.port.bytesize = serial.EIGHTBITS
self.port.timeout = 1
self.port.open()
return
##
##
def register(self):
log.info('register')
reply = self.send_request(self.addr, 0x00, self.functions['register_offline_query'])
log.info('register reply=%s', reply)
if not reply:
log.error('wrong response from register_offline_query')
return
serial = reply
addr = self.next_addr
self.next_addr += 1
data = serial + struct.pack('B', addr)
reply = self.send_request(self.addr, 0x00, self.functions['register_send_register_address'], data)
if not reply:
log.error('register_send_register_address failed')
return
ack, = struct.unpack('B', reply)
log.info('ack =%s, type=%s, reply=%s', ack, type(ack), reply)
if ack != 0x06:
log.error('wrong acknowledgement code for from register_send_register_address response')
return
device = Device(serial, addr)
device_map[serial] = device
comm.get_inverter_id(device)
comm.get_inverter_descr(device)
return
##
##
def get_inverter_id(self, device):
data = self.send_request(self.addr, device.addr, self.functions['read_query_inverter_id'])
if not data:
log.error('failed to get data from id request')
return
phase, rating, firmware, model, manufacturer, serial, nomv = struct.unpack('!B6s5s16s16s16s4s', data[:64])
log.info(' INFO phase=%s, rating=%s, firmware=%s, model=%s, manufacturer=%s, serial=%s, nomv=%s',
phase, rating, firmware, model, manufacturer, serial, nomv)
device.phase = phase
device.rating = rating
device.firmware = firmware
device.manufacturer = manufacturer
device.nomv = nomv
return
##
##
def get_inverter_descr(self, device):
data = self.send_request(self.addr, device.addr, self.functions['read_query_description'])
if not data:
log.error('failed to get data from query description')
return
for i in range(0, len(data)):
value = data[i]
item = item_map.get(value)
device.field_map[i] = item
if item is None:
info = '(missing)'
else:
info = 'code=0x%02x, var=%s, multiplier=%s, units=%s, descr=%s' % (
item.code, item.var, item.multiplier, item.units, item.descr)
pass
log.info(' map [%02d] -> %s', i, info)
pass
return
def get_inverter_info(self, device):
log.info('--')
output = {}
data = self.send_request(self.addr, device.addr, self.functions['read_query_normal_info'])
if not data:
log.error('failed to get data from info request')
return
output['device'] = device.serial.decode()
output['timestamp'] = datetime.datetime.now().isoformat()
for index in range(0, int(len(data) / 2)):
offset = index * 2
chunk = data[offset:offset+2]
raw_value, = struct.unpack('!H', chunk)
dataitem = device.field_map.get(index)
if not dataitem: continue
if dataitem.multiplier:
value = raw_value * dataitem.multiplier
pass
log.info(' [%02d]->[0x%02x] %s = %s %s (%s)', index, dataitem.code, dataitem.name, value, dataitem.units, dataitem.descr)
output[dataitem.var] = value
pass
return output
##
##
def send_request(self, src_addr, dst_addr, function_info, data=[]):
tx_info, rx_info = function_info
#
# build packet
#
packet = bytearray()
packet.extend((0xAA, 0x55))
packet.extend((src_addr, 0x00))
packet.extend((0x00, dst_addr))
packet.extend(tx_info)
packet.append(len(data))
packet.extend(data)
checksum = sum(packet)
packet.extend(struct.pack('!H', checksum & 0xFFFF))
log.info('tx packet - src=0x%X, dst=0x%X, function=%s, data=%s, len=%s, checksum=%s', src_addr, dst_addr, tx_info, data, len(data), checksum)
log.info('tx packet - %s', packet.hex())
self.port.write(packet)
if rx_info is None:
log.info('not expecting a response')
return
log.info('expecting reply of %s', rx_info)
return self.receive_request(rx_info)
##
##
def receive_request(self, rx_info):
packet = self.port.read(256)
if not packet: return
log.info('rx packet - %s', packet.hex())
length = len(packet)
header, src_addr, dst_addr, control, function, data_len = struct.unpack('!HHHBBB', packet[0:9])
data = packet[9:9+data_len]
checksum = packet[-2:-1]
log.info('rx packet - src=0x%X, dst=0x%X, function=%s, data=%s, len=%s, checksum=%s', src_addr, dst_addr, rx_info, data, len(data), checksum)
return data
##
##
def re_register_all(self):
for i in range(0,3):
self.send_request(self.addr, 0x00, self.functions['register_re_register'])
pass
return
pass
##
##
##
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Eversolar PV Inverter protocol reader')
parser.add_argument('--debug', type=str, default='info', help='debug level debug, info, warn, etc')
parser.add_argument('--kafka', type=str, help='optional kafka address to send messages to')
parser.add_argument('--kafka-topic', type=str, default='pvsolar', help='kafka topic name')
parser.add_argument('--serial', type=str, help='filename of serial device (eg /dev/ttyUSB0)')
parser.add_argument('--syslog', default=False, action='store_true', help='enable logging to syslog')
args = parser.parse_args()
log.setLevel(logging.getLevelName(args.debug.upper()))
log.debug('called with args - %s', args)
if not args.serial:
parser.print_help()
sys.exit(1)
pass
if args.kafka:
import kafka
producer = kafka.KafkaProducer(bootstrap_servers=[args.kafka], )
else:
producer = None
pass
if args.syslog:
import syslog
syslog.openlog(ident='eversolar', facility=syslog.LOG_USER)
else:
syslog = False
pass
comm = Eversolar(args.serial)
comm.re_register_all()
comm.register()
time_previous = time.time()
while 1:
for device in device_map.values():
results = comm.get_inverter_info(device)
keys = sorted(output.keys())
json_output = json.dumps(results).encode('utf-8')
kv_output = ' '.join( [ '%s=%s' % (k, results[k]) for k in keys ] )
if producer:
producer.send(args.kafka_topic, json_output)
pass
if syslog:
syslog.syslog(kv_output)
pass
log.info('output=%s', kv_output)
pass
time.sleep(9)
time_now = time.time()
if time_now - time_previous > 60:
time_previous = time_now
comm.register()
pass
pass
pass
```
|
{
"source": "jgc234/optiburb",
"score": 2
}
|
#### File: jgc234/optiburb/optiburb.py
```python
import math
import time
import os
import sys
import re
import shapely
import logging
import geopandas
import osmnx
import networkx as nx
import numpy as np
import itertools
import argparse
import gpxpy
import gpxpy.gpx
import datetime
logging.basicConfig(format='%(asctime)-15s %(filename)s:%(funcName)s:%(lineno)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
log = logging.getLogger(__name__)
class Burbing:
WARNING = '''WARNING - this program does not consider the direction of one-way roads or other roads that may be not suitable for your mode of transport. You must confirm the path safe for yourself'''
def __init__(self):
self.g = None
self.polygons = {}
self.region = shapely.geometry.Polygon()
self.name = ''
self.start = None
#
# filters to roughly match those used by rendrer.earth (see
# https://wandrer.earth/scoring )
#
self.custom_filter = (
'["highway"]'
'["area"!~"yes"]'
#'["highway"!~"motorway|motorway_link|trunk|trunk_link|bridleway|footway|service|pedestrian|'
'["highway"!~"motorway|motorway_link|bridleway|footway|service|pedestrian|'
'steps|stairs|escalator|elevator|construction|proposed|demolished|escape|bus_guideway|'
'sidewalk|crossing|bus_stop|traffic_signals|stop|give_way|milestone|platform|speed_camera|'
'raceway|rest_area|traffic_island|services|yes|no|drain|street_lamp|razed|corridor|abandoned"]'
'["access"!~"private|no|customers"]'
'["bicycle"!~"dismount|use_sidepath|private|no"]'
'["service"!~"private|parking_aisle"]'
'["motorroad"!="yes"]'
'["golf_cart"!~"yes|designated|private"]'
'[!"waterway"]'
'[!"razed"]'
)
log.debug('custom_filter=%s', self.custom_filter)
# not all of these fields are used at the moment, but they
# look like fun for the future.
useful_tags_way = [
'bridge', 'tunnel', 'oneway', 'lanes', 'ref', 'name', 'highway', 'maxspeed', 'service',
'access', 'area', 'landuse', 'width', 'est_width', 'junction', 'surface',
]
osmnx.utils.config(useful_tags_way=useful_tags_way, use_cache=True, log_console=True)
log.warning(self.WARNING)
return
##
##
def add_polygon(self, polygon, name):
self.polygons[name] = polygon
self.region = self.region.union(polygon)
if self.name:
self.name += '_'
pass
processed_name = name.lower()
processed_name = re.sub(r'[\s,_]+', '_', processed_name)
self.name += processed_name
return
##
##
def get_osm_polygon(self, name, select=1, buffer_dist=20):
log.info('searching for query=%s, which_result=%s', name, select)
gdf = osmnx.geocode_to_gdf(name, buffer_dist=buffer_dist, which_result=select)
log.info('gdf=%s', gdf)
polygon = gdf.geometry.values[0]
return polygon
##
##
def get_shapefile_polygon(self, shapefile, key, name):
log.info('shapefile=%s, key=%s, name=%s', shapefile, key, name)
df = shapefile
suburb = df[df[key] == name]
suburb = suburb.to_crs(epsg=4326)
log.info('suburb=%s', suburb)
polygon = suburb['geometry'].values[0]
return polygon
##
##
def set_start_location(self, addr):
point = osmnx.geocoder.geocode(addr)
self.start = point
log.info('setting start point to %s', self.start)
return
##
##
def find_odd_nodes(self):
# for undirected graphs
odd_nodes = { i for i, n in self.g.degree if n % 2 == 1 }
return odd_nodes
##
##
def get_pair_combinations(self, nodes):
pairs = list(itertools.combinations(nodes, 2))
return pairs
##
##
def get_shortest_path_pairs(self, g, pairs):
# XXX - consider Floyd–Warshall here instead of repeated
# Dijkstra. Also consider how to parallelise this as a
# short-term speed-up, by palming off chunks to another
# thread, except this wont work in python.
shortest_paths = {}
_prev_pct = 0
_size = len(pairs)
_prev_n = 0
_prev_time = time.time()
for n, pair in enumerate(pairs):
i, j = pair
shortest_paths[pair] = nx.dijkstra_path_length(g, i, j, weight='length')
## output progress
_cur_pct = int(100 * n / _size)
if _prev_pct != _cur_pct:
_cur_time = time.time()
log.info('dijkstra progress %s%%, [%d/%d] %d/second', _cur_pct, n, _size, (_prev_n - n) / (_prev_time - _cur_time))
_prev_time = _cur_time
_prev_pct = _cur_pct
_prev_n = n
pass
pass
return shortest_paths
##
##
def augment_graph(self, pairs):
# create a new graph and stuff in the new fake/virtual edges
# between odd pairs. Generate the edge metadata to make them
# look similar to the native edges.
log.info('pre augmentation eulerian=%s', nx.is_eulerian(self.g_augmented))
for i, pair in enumerate(pairs):
a, b = pair
length, path = nx.single_source_dijkstra(self.g, a, b, weight='length')
log.debug('PAIR[%s] nodes = (%s,%s), length=%s, path=%s', i, a, b, length, path)
linestring = self.path_to_linestring(self.g_augmented, path)
# create a linestring of paths...
data = {
'length': length,
'augmented': True,
'path': path,
'geometry': linestring,
'from': a,
'to': b,
}
log.debug(' creating new edge (%s,%s) - data=%s', a, b, data)
self.g_augmented.add_edge(a, b, **data)
pass
log.info('post augmentation eulerian=%s', nx.is_eulerian(self.g_augmented))
return
##
##
def print_edges(self, g):
for edge in g.edges:
data = g.get_edge_data(*edge, 0)
_osmid = ','.join(data.get('osmid')) if type(data.get('osmid')) == list else str(data.get('osmid'))
_name = ','.join(data.get('name')) if type(data.get('name')) == list else str(data.get('name'))
_highway = data.get('highway', '-')
_surface = data.get('surface', '-')
_oneway = data.get('oneway', '-')
_access = data.get('access', '-')
log.debug(f'{_osmid:10} {_name:30} {_highway:20} {_surface:10} {_oneway:10} {_access:10}')
pass
##
##
def determine_nodes(self):
self.g_directed = self.g
self.g = osmnx.utils_graph.get_undirected(self.g_directed)
self.print_edges(self.g)
self.g_augmented = self.g.copy()
self.odd_nodes = self.find_odd_nodes()
return
##
##
def optimise_dead_ends(self):
# preempt virtual path augmentation for the case of a dead-end
# road. Here the optimum combination pair is its only
# neighbour node, so why bother iterating through all the
# pairs to find that.
# XXX - not quite clean yet.. we are augmenting the original
# grpah.. need a cleaner way to pass changes through the
# processing pipeline.
deadends = { i for i, n in self.g.degree if n == 1 }
n1 = len(self.find_odd_nodes())
for deadend in deadends:
neighbours = self.g[deadend]
#node_data = self.g.nodes[deadend]
#log.info('deadend_ndoe=%s, data=%s', deadend, node_data)
log.debug('deadend_node=%s', deadend)
if len(neighbours) != 1:
log.error('wrong number of neighbours for a dead-end street')
continue
for neighbour in neighbours.keys():
log.debug('neighbour=%s', neighbour)
edge_data = dict(self.g.get_edge_data(deadend, neighbour, 0))
edge_data['augmented'] = True
log.debug(' creating new edge (%s,%s) - data=%s', deadend, neighbour, edge_data)
self.g.add_edge(deadend, neighbour, **edge_data)
pass
pass
# fix up the stuff we just busted. XXX - this should not be
# hidden in here.
self.odd_nodes = self.find_odd_nodes()
self.g_augmented = self.g.copy()
n2 = len(self.odd_nodes)
log.info('odd_nodes_before=%d, odd_nodes_after=%d', n1, n2)
log.info('optimised %d nodes out', n1 - n2)
return
##
##
def determine_combinations(self):
log.info('eulerian=%s, odd_nodes=%s', nx.is_eulerian(self.g), len(self.odd_nodes))
odd_node_pairs = self.get_pair_combinations(self.odd_nodes)
log.info('combinations=%s', len(odd_node_pairs))
odd_pair_paths = self.get_shortest_path_pairs(self.g, odd_node_pairs)
# XXX - this part doesn't work well because it doesn't
# consider the direction of the paths.
# create a temporary graph of odd pairs.. really we should be
# doing the combination max calculations here.
self.g_odd_nodes = nx.Graph()
for k, length in odd_pair_paths.items():
i,j = k
attrs = {
'length': length,
'weight': -length,
}
self.g_odd_nodes.add_edge(i, j, **attrs)
pass
log.info('new_nodes=%s, edges=%s, eulerian=%s', self.g_odd_nodes.order(), self.g_odd_nodes.size(), nx.is_eulerian(self.g_odd_nodes))
log.info('calculating max weight matching - this can also take a while')
return
##
##
def determine_circuit(self):
odd_matching = nx.algorithms.max_weight_matching(self.g_odd_nodes, True)
log.info('augment original graph with %s pairs', len(odd_matching))
self.augment_graph(odd_matching)
start_node = self.get_start_node(self.g, self.start)
self.euler_circuit = list(nx.eulerian_circuit(self.g_augmented, source=start_node))
return
##
##
def reverse_linestring(self, line):
return shapely.geometry.LineString(line.coords[::-1])
##
##
def directional_linestring(self, g, edge):
# return a linestring that points in the same direction as the
# nodes of the specified edge.
u, v = edge
data = g.get_edge_data(u, v, 0)
if data is None:
log.error('no data for edge %s', edge)
return None
node_to = data.get('to')
node_from = data.get('from')
if (u, v) == (node_from, node_to):
return data.get('geometry')
if (u, v) == (node_to, node_from):
return self.reverse_linestring(data.get('geometry'))
log.error('failed to match start and end for directional linestring edge=%s, linestring=%s', edge, data)
return None
##
##
def get_start_node(self, g, start_addr):
if start_addr:
(start_node, distance) = osmnx.distance.get_nearest_node(g, start_addr, return_dist=True)
log.info('start_node=%s, distance=%s', start_node, distance)
else:
start_node = None
pass
return start_node
##
##
def path_to_linestring(self, g, path):
# this creates a new linestring that follows the path of the
# augmented route between two odd nodes. this is needed to
# force a path with the final GPX route, rather than drawing a
# straight line between the two odd nodes and hoping some
# other program route the same way we wanted to.
coords = []
prev = None
u = path.pop(0)
for v in path:
edge = (u, v)
log.debug('working with edge=%s', edge)
data = g.get_edge_data(u, v, 0)
if data is None:
log.error('missing data for edge (%s, %s)', u, v)
continue
linestring = data.get('geometry')
directional_linestring = self.directional_linestring(g, edge)
for c in directional_linestring.coords:
if c == prev: continue
coords.append(c)
prev = c
pass
u = v
pass
return shapely.geometry.LineString(coords)
##
##
def prune(self):
# eliminate edges with unnamed tracks. At least where I live,
# these tend to be 4wd tracks that require a mountain bike to
# navigate. probably need to do a better fitler that looks at
# surface type and other aspects.
remove_types = ('track', 'path')
removeset = set()
for edge in self.g.edges:
data = self.g.get_edge_data(*edge)
if data.get('highway') in remove_types and data.get('name') is None:
log.debug('removing edge %s, %s', edge, data)
removeset.add(edge)
pass
if data.get('highway') in ('cycleway',):
log.debug('removing edge %s, %s', edge, data)
removeset.add(edge)
pass
pass
for edge in removeset:
self.g.remove_edge(*edge)
pass
# this removes the isolated nodes orphaned from the removed
# edges above. It does not solve the problem of a
# non-connected graph (ie, nodes and edges in a blob that
# aren't reachable to other parts of the graph)
self.g = osmnx.utils_graph.remove_isolated_nodes(self.g)
return
##
##
def save_fig(self):
filename = f'burb_nodes_{self.name}.svg'
log.info('saving SVG node file as %s', filename)
nc = ['red' if node in self.odd_nodes else 'blue' for node in self.g.nodes() ]
fig, ax = osmnx.plot_graph(self.g, show=False, save=True, node_color=nc, filepath=filename)
return
##
##
def load(self, options):
log.info('fetching OSM data bounded by polygon')
self.g = osmnx.graph_from_polygon(self.region, network_type='bike', simplify=False, custom_filter=self.custom_filter)
log.debug('original g=%s, g=%s', self.g, type(self.g))
log.info('original nodes=%s, edges=%s', self.g.order(), self.g.size())
if options.simplify:
log.info('simplifying graph')
self.g = osmnx.simplification.simplify_graph(self.g, strict=False, remove_rings=False)
pass
return
##
##
def load_shapefile(self, filename):
df = geopandas.read_file(filename)
log.info('df=%s', df)
log.info('df.crs=%s', df.crs)
return df
##
##
def add_shapefile_region(self, name):
df = self.shapefile_df
key = self.shapefile_key
suburb = df[df[key] == value]
log.info('suburb=%s', suburb)
suburb = suburb.to_crs(epsg=4326)
log.info('suburb=%s', suburb)
polygon = suburb['geometry'].values[0]
return polygon
##
##
def create_gpx_polygon(self, polygon):
gpx = gpxpy.gpx.GPX()
gpx.name = f'boundary {self.name}'
gpx.author_name = 'optiburb'
gpx.creator = 'experimental burbing'
gpx.description = f'experimental burbing boundary for {self.name}'
track = gpxpy.gpx.GPXTrack()
track.name = f'burb bound {self.name}'
filename = f'burb_polygon_{self.name}.gpx'
log.info('saving suburb boundary - %s', filename)
# XXX - add colour?
#xmlns:gpxx="http://www.garmin.com/xmlschemas/GpxExtensions/v3"
#track.extensions =
#<extensions>
# <gpxx:TrackExtension>
# <gpxx:DisplayColor>Red</gpxx:DisplayColor>
# </gpxx:TrackExtension>
#</extensions>
gpx.tracks.append(track)
segment = gpxpy.gpx.GPXTrackSegment()
track.segments.append(segment)
for x, y in polygon.exterior.coords:
segment.points.append(gpxpy.gpx.GPXRoutePoint(latitude=y, longitude=x))
pass
data = gpx.to_xml()
with open(filename, 'w') as f:
f.write(data)
pass
return
##
##
def create_gpx_track(self, g, edges, simplify=False):
# create GPX XML.
stats_distance = 0.0
stats_backtrack = 0.0
stats_deadends = 0
gpx = gpxpy.gpx.GPX()
gpx.name = f'burb {self.name}'
gpx.author_name = 'optiburb'
#gpx.author_email =''
gpx.creator = 'experimental burbing'
gpx.description = f'experimental burbing route for {self.name}'
track = gpxpy.gpx.GPXTrack()
track.name = f'burb trk {self.name}'
gpx.tracks.append(track)
segment = gpxpy.gpx.GPXTrackSegment()
track.segments.append(segment)
i = 1
for n, edge in enumerate(edges):
u, v = edge
edge_data = g.get_edge_data(*edge, 0)
log.debug('EDGE [%d] - edge=%s, data=%s', n, edge, edge_data)
if edge_data is None:
log.warning('null data for edge %s', edge)
continue
linestring = edge_data.get('geometry')
augmented = edge_data.get('augmented')
stats_distance += edge_data.get('length', 0)
log.debug(' leg [%d] -> %s (%s,%s,%s,%s,%s)', n, edge_data.get('name', ''), edge_data.get('highway', ''), edge_data.get('surface', ''), edge_data.get('oneway', ''), edge_data.get('access', ''), edge_data.get('length', 0))
if linestring:
directional_linestring = self.directional_linestring(g, edge)
for lon, lat in directional_linestring.coords:
segment.points.append(gpxpy.gpx.GPXRoutePoint(latitude=lat, longitude=lon))
log.debug(' INDEX[%d] = (%s, %s)', i, lat, lon)
i += 1
pass
pass
else:
log.error(' no linestring for edge=%s', edge)
pass
if edge_data.get('augmented', False):
stats_backtrack += edge_data.get('length', 0)
pass
pass
log.info('total distance = %.2fkm', stats_distance/1000.0)
log.info('backtrack distance = %.2fkm', stats_backtrack/1000.0)
##
##
if simplify:
log.info('simplifying GPX')
gpx.simplify()
pass
data = gpx.to_xml()
filename = f'burb_track_{self.name}.gpx'
with open(filename, 'w') as f:
f.write(data)
pass
return
pass
##
##
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Optimum Suburb Route Generator')
parser.add_argument('names', type=str, nargs=argparse.REMAINDER, help='suburb names with state, country, etc')
parser.add_argument('--debug', type=str, default='info', help='debug level debug, info, warn, etc')
parser.add_argument('--start', type=str, help='optional starting address')
parser.add_argument('--prune', default=False, action='store_true', help='prune unnamed gravel tracks')
parser.add_argument('--simplify', default=False, action='store_true', help='simplify OSM nodes on load')
parser.add_argument('--simplify-gpx', dest='simplify_gpx', default=True, action='store_true', help='reduce GPX points')
parser.add_argument('--complex-gpx', dest='simplify_gpx', action='store_false', help='leave all the OSM points in the GPX output')
parser.add_argument('--select', type=int, default=1, help='select the nth item from the search results. a truely awful hack because i cant work out how to search for administrative boundaries.')
parser.add_argument('--shapefile', type=str, default=None, help='filename of shapefile to load localities, comma separated by the column to match on')
parser.add_argument('--buffer', type=int, dest='buffer', default=20, help='buffer distsance around polygon')
parser.add_argument('--save-fig', default=False, action='store_true', help='save an SVG image of the nodes and edges')
parser.add_argument('--save-boundary', default=False, action='store_true', help='save a GPX file of the suburb boundary')
parser.add_argument('--feature-deadend', default=False, action='store_true', help='experimental feature to optimised deadends in solution')
args = parser.parse_args()
log.setLevel(logging.getLevelName(args.debug.upper()))
log.debug('called with args - %s', args)
start_time = datetime.datetime.now()
burbing = Burbing()
if not args.names:
parser.print_help()
sys.exit(1)
pass
if args.shapefile:
filename, key = args.shapefile.split(',')
log.info('shapefile=%s, key=%s', filename, key)
shapefile = burbing.load_shapefile(filename)
for name in args.names:
polygon = burbing.get_shapefile_polygon(shapefile, key, name)
burbing.add_polygon(polygon, name)
pass
pass
else:
for name in args.names:
polygon = burbing.get_osm_polygon(name, args.select, args.buffer)
burbing.add_polygon(polygon, name)
pass
pass
if args.save_boundary:
burbing.create_gpx_polygon(burbing.region)
pass
if args.start:
burbing.set_start_location(args.start)
pass
burbing.load(args)
if args.prune:
burbing.prune()
pass
burbing.determine_nodes()
if args.feature_deadend:
burbing.optimise_dead_ends()
pass
if args.save_fig:
burbing.save_fig()
pass
burbing.determine_combinations()
burbing.determine_circuit()
burbing.create_gpx_track(burbing.g_augmented, burbing.euler_circuit, args.simplify_gpx)
end_time = datetime.datetime.now()
log.info('elapsed time = %s', end_time - start_time)
pass
```
|
{
"source": "jgcaffari1/mnelab",
"score": 2
}
|
#### File: mnelab/dialogs/filterdialog.py
```python
from qtpy.QtWidgets import (QDialog, QVBoxLayout, QGridLayout, QLabel,
QLineEdit, QDialogButtonBox)
class FilterDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle("Filter data")
vbox = QVBoxLayout(self)
grid = QGridLayout()
grid.addWidget(QLabel("Low cutoff frequency (Hz):"), 0, 0)
self.lowedit = QLineEdit()
grid.addWidget(self.lowedit, 0, 1)
grid.addWidget(QLabel("High cutoff frequency (Hz):"), 1, 0)
self.highedit = QLineEdit()
grid.addWidget(self.highedit, 1, 1)
vbox.addLayout(grid)
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
vbox.addWidget(buttonbox)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
vbox.setSizeConstraint(QVBoxLayout.SetFixedSize)
@property
def low(self):
low = self.lowedit.text()
return float(low) if low else None
@property
def high(self):
high = self.highedit.text()
return float(high) if high else None
```
#### File: mnelab/dialogs/interpolatebadsdialog.py
```python
from qtpy.QtWidgets import (QDialog, QVBoxLayout, QGridLayout, QLabel,
QHBoxLayout, QDialogButtonBox, QComboBox,
QCheckBox, QDoubleSpinBox)
class InterpolateBadsDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle("Interpolate bad channels")
vbox = QVBoxLayout(self)
grid = QGridLayout()
grid.addWidget(QLabel("Reset bads:"), 0, 0)
self.reset_bads_checkbox = QCheckBox()
self.reset_bads_checkbox.setChecked(True)
grid.addWidget(self.reset_bads_checkbox, 0, 1)
grid.addWidget(QLabel("Mode:"), 1, 0)
self.mode_select = QComboBox()
self.modes = {"Accurate": "accurate", "Fast": "fast"}
self.mode_select.addItems(self.modes.keys())
self.mode_select.setCurrentText("Accurate")
grid.addWidget(self.mode_select, 1, 1)
grid.addWidget(QLabel("Origin (x, y, z):"), 2, 0)
hbox = QHBoxLayout()
self.x = QDoubleSpinBox()
self.x.setValue(0)
self.x.setDecimals(3)
hbox.addWidget(self.x)
self.y = QDoubleSpinBox()
self.y.setValue(0)
self.y.setDecimals(3)
hbox.addWidget(self.y)
self.z = QDoubleSpinBox()
self.z.setValue(0.04)
self.z.setDecimals(3)
hbox.addWidget(self.z)
grid.addLayout(hbox, 2, 1)
vbox.addLayout(grid)
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
vbox.addWidget(buttonbox)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
vbox.setSizeConstraint(QVBoxLayout.SetFixedSize)
@property
def origin(self):
x = float(self.x.value())
y = float(self.y.value())
z = float(self.z.value())
return x, y, z
@property
def mode(self):
return self.mode_select.currentText()
@property
def reset_bads(self):
return self.reset_bads_checkbox.isChecked()
```
#### File: mnelab/dialogs/npydialog.py
```python
import sys
from PyQt5.QtCore import Qt
from qtpy.QtGui import QDoubleValidator
from qtpy.QtWidgets import (
QCheckBox,
QFormLayout,
QLineEdit,
QVBoxLayout,
QComboBox,
QSlider,
QDialogButtonBox,
QDialog
)
MIN_FS = 100
MAX_FS = 1000
STEP_SIZE = 100
MIN_ALLOWABLE_FS = 0.0001
DECIMAL_PLACES = 4
SUPPORTED_CHANNEL_TYPES = ["", "ecg", "bio", "stim", "eog",
"misc", "seeg", "ecog", "mag",
"eeg", "ref_meg", "grad", "emg", "hbr", "hbo"]
class NpyDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
# initialize settings:
self.settings = {'ch_type': "misc", 'fs': None, 'standardize': False}
self.setWindowTitle("Parameters")
# Create layout for all items.
outer_form = QVBoxLayout()
# create form for the text box:
top_form = QFormLayout()
# Create a text box for reading the sample rate:
self.fs = QLineEdit()
self.fs.setValidator(
QDoubleValidator(
MIN_ALLOWABLE_FS,
sys.float_info.max,
DECIMAL_PLACES))
top_form.addRow("Sample Rate (Hz):", self.fs)
# initialize slider for fs:
self.fs_slider = QSlider(Qt.Horizontal)
self.fs_slider.setMinimum(MIN_FS)
self.fs_slider.setMaximum(MAX_FS)
self.fs_slider.setValue(MIN_FS)
self.fs_slider.setTickPosition(QSlider.TicksBelow)
self.fs_slider.setTickInterval(STEP_SIZE)
self.fs_slider.setSingleStep(STEP_SIZE)
self.fs_slider.valueChanged.connect(self.value_change)
# initialize dropdown for selecting channel type:
self.ch_type_dropdown = QComboBox()
self.ch_type_dropdown.addItems(SUPPORTED_CHANNEL_TYPES)
self.ch_type_dropdown.activated.connect(self.set_type)
# initialize checkbox for controlling standardization:
self.standardize = QCheckBox("Standardize Data")
# initialize accept/deny buttons:
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.buttonbox.accepted.connect(self.button_accept)
self.buttonbox.rejected.connect(self.reject)
# build dialog window:
outer_form.addLayout(top_form)
outer_form.addWidget(self.fs_slider)
outer_form.addWidget(self.ch_type_dropdown)
outer_form.addWidget(self.standardize)
outer_form.addWidget(self.buttonbox)
self.setLayout(outer_form)
def set_type(self):
"""
sets the channel type based off of the selected item in the dropdown
menu.
"""
self.settings['ch_type'] = self.ch_type_dropdown.currentText()
if self.settings['ch_type'] != "":
self.settings['ch_type'] = "misc"
def value_change(self):
"""
Sets the text bar to match the slider. Is only called when the slider
is used.
"""
self.fs.setText(str(self.fs_slider.value()))
def get_values(self):
"""
gets the settings from the dialog box
"""
return self.settings
def set_values(self):
"""
Takes the settings from the text box and checkbox, and stores them in
their respective settings.
In this case, sets the sample frequency and standardization flag.
"""
fs = self.fs.text()
if fs != "":
fs = float(fs)
self.settings['fs'] = fs
self.settings['standardize'] = self.standardize.isChecked()
def button_accept(self):
"""
function called when dialog is accepted. Sets all values before closing
the dialog window.
"""
self.set_values()
return self.accept()
```
|
{
"source": "jgcasta/lostatnight",
"score": 2
}
|
#### File: jgcasta/lostatnight/createTasks.py
```python
import json
from optparse import OptionParser
import pbclient
from get_images import get_iss_photos
import random
import logging
from requests import exceptions
import asciitable
import time
def contents(filename):
return file(filename).read()
def handle_arguments():
# Arguments for the application
usage = "usage: %prog [options]"
parser = OptionParser(usage)
# URL where PyBossa listens
parser.add_option("-s", "--server", dest="api_url",
help="PyBossa URL http://domain.com/", metavar="URL",
default="http://localhost:5000/")
# API-KEY
parser.add_option("-k", "--api-key", dest="api_key",
help="PyBossa User API-KEY to interact with PyBossa",
metavar="API-KEY")
# Create App
parser.add_option("-c", "--create-app", action="store_true",
dest="create_app",
help="Create the application",
metavar="CREATE-APP")
# Update template for tasks and long_description for app
parser.add_option("-t", "--update-template", action="store_true",
dest="update_template",
help="Update Tasks template",
metavar="UPDATE-TEMPLATE")
# Update tasks question
parser.add_option("-q", "--update-tasks",
type="int",
dest="update_tasks",
help="Update Tasks n_answers",
metavar="UPDATE-TASKS")
parser.add_option("-x", "--extra-task", action="store_true",
dest="add_more_tasks",
help="Add more tasks",
metavar="ADD-MORE-TASKS")
# Modify the number of TaskRuns per Task
# (default 30)
parser.add_option("-n", "--number-answers",
type="int",
dest="n_answers",
help="Number of answers per task",
metavar="N-ANSWERS",
default=30)
parser.add_option("-a", "--application-config",
dest="app_config",
help="Application config file",
metavar="APP-CONFIG",
default="app.json")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if not options.create_app and not options.update_template\
and not options.add_more_tasks and not options.update_tasks:
parser.error("Please check --help or -h for the available options")
if not options.api_key:
parser.error("You must supply an API-KEY to create an \
application and tasks in PyBossa")
return options
def get_configuration():
options = handle_arguments()
# Load app details
try:
with file(options.app_config) as app_json:
app_config = json.load(app_json)
except IOError:
print "application config file is missing! Please create a new one"
exit(1)
return (app_config, options)
def run(app_config, options):
def check_api_error(api_response):
"""Check if returned API response contains an error"""
if type(api_response) == dict and (api_response.get('status') == 'failed'):
raise exceptions.HTTPError
def format_error(module, error):
"""Format the error for the given module"""
logging.error(module)
# Beautify JSON error
if type(error) == list:
print "Application not found"
else:
print json.dumps(error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def find_app_by_short_name():
try:
response = pbclient.find_app(short_name=app_config['short_name'])
check_api_error(response)
return response[0]
except:
format_error("pbclient.find_app", response)
def setup_app():
app = find_app_by_short_name()
app.long_description = contents('long_description.html')
app.info['task_presenter'] = contents('template.html')
app.info['thumbnail'] = app_config['thumbnail']
app.info['tutorial'] = contents('tutorial.html')
try:
response = pbclient.update_app(app)
check_api_error(response)
return app
except:
format_error("pbclient.update_app", response)
def create_photo_task(app, photo, question, priority=0):
# Data for the tasks
task_info = dict(
n_answers=options.n_answers,
idiss=photo['idISS'],
link_big=photo['link_big'],
link_small=photo['link_small'],
linkData=photo['linkData'],
citylon=photo['citylon'],
citylat=photo['citylat'],
focal=photo['focal'])
try:
response = pbclient.create_task(app.id, task_info, priority_0=priority)
check_api_error(response)
except:
#response = pbclient.create_task(app.id, task_info, priority_0=priority)
format_error("pbclient.create_task", response)
def add_photo_tasks(app):
photos = get_iss_photos( )
question = app_config['question']
[create_photo_task(app, p, question, priority=random.random()) for p in photos]
pbclient.set('api_key', options.api_key)
pbclient.set('endpoint', options.api_url)
if options.verbose:
print('Running against PyBosssa instance at: %s' % options.api_url)
print('Using API-KEY: %s' % options.api_key)
if options.create_app or options.add_more_tasks:
if options.create_app:
try:
response = pbclient.create_app(app_config['name'],
app_config['short_name'],
app_config['description'])
check_api_error(response)
app = setup_app()
except:
format_error("pbclient.create_app", response)
else:
app = find_app_by_short_name()
add_photo_tasks(app)
if options.update_template:
print "Updating app template"
# discard return value
setup_app()
if options.update_tasks:
def tasks(app):
offset = 0
limit = 100
while True:
try:
tasks = pbclient.get_tasks(app.id, offset=offset, limit=limit)
check_api_error(tasks)
if len(tasks) == 0:
break
for task in tasks:
yield task
offset += len(tasks)
except:
format_error("pbclient.get_tasks", response)
def update_task(task, count):
print "Updating task: %s" % task.id
if 'n_answers' in task.info:
del(task.info['n_answers'])
task.n_answers = options.update_tasks
try:
response = pbclient.update_task(task)
check_api_error(response)
count[0] += 1
except:
format_error("pbclient.update_task", response)
print "Updating task n_answers"
app = find_app_by_short_name()
n_tasks = [0]
[update_task(t, n_tasks) for t in tasks(app)]
print "%s Tasks have been updated!" % n_tasks[0]
if __name__ == "__main__":
app_config, options = get_configuration()
run(app_config, options)
```
|
{
"source": "jgcastro89/EarthMod",
"score": 3
}
|
#### File: EarthMod/EarthMod/EarthMod_controler.py
```python
import pandas as pd
from pyqtgraph import QtGui
from contour_map import ContourPlot, surfaceMesh
from pandas_tableview_model import PandasModel
from surface_map import surfacePlot
class Logic(QtGui.QMainWindow):
def __init__(self):
super(Logic, self).__init__()
"""
This Class acts as a controller between the GUI, and the Objects (maps, models, plots, views).
Instances of the objects being used by the GUI are initiated here.
"""
self.Contour_Map = ContourPlot() # Instance of contourPlot class
self.Surface_Plot = surfacePlot() # Instance of surfacePlot class
self.pandasTableView = QtGui.QTableView()
# variables for 2D maps
self.xDim = 100
self.yDim = 100
self.zDim = self.xDim / 2
self.xyzFileName = None
self.pandasDataframe = None
self.pandasModel = None
# buttons for menu dock
self.set_x_grid_size = QtGui.QPushButton('Set X Grid Size')
self.set_y_grid_size = QtGui.QPushButton('Set Y Grid Size')
self.set_z_grid_size = QtGui.QPushButton('Set Vertical Exaggeration')
self.open_fileButton = QtGui.QPushButton('Open File')
self.extrapolation_mapButton = QtGui.QPushButton('Extrapolation Methods')
self.interpolation_mapButton = QtGui.QPushButton('Interpolation Methods')
self.select_colormapButton = QtGui.QPushButton('Colormap Options')
# signals for methods
self.set_x_grid_size.clicked.connect(self.get_int_attr_X)
self.set_y_grid_size.clicked.connect(self.get_int_attr_Y)
self.set_z_grid_size.clicked.connect(self.get_int_attr_Z)
self.open_fileButton.clicked.connect(self.open_file)
self.extrapolation_mapButton.clicked.connect(self.build_extrapolation_map)
self.interpolation_mapButton.clicked.connect(self.build_interpolation_map)
self.select_colormapButton.clicked.connect(self.select_colormap)
def get_int_attr_X(self):
"""
This method assigns an integer value for the x-axis grid size. The value is stored in set_x_grid_size.
Modifications are needed for when a user clicks cancel instead of ok.
:return:
"""
num, ok = QtGui.QInputDialog.getInt(self, "Set Grid Size", "Enter an Integer", 300, 100)
input = str(num)
if num > 99 and ok:
self.set_x_grid_size.setText(input)
self.xDim = num
else:
self.get_int_attr_X()
self.Contour_Map.contour_ax = None
def get_int_attr_Y(self):
"""
This method assigns an integer value for the y-axis grid size. Tha value is stored in set_y_grid_size.
Modifications are needed for when a user clicks cancel.
:return:
"""
num, ok = QtGui.QInputDialog.getInt(self, "Set Grid Size", "Enter an Integer", 300, 100)
input = str(num)
if num > 99 and ok:
self.set_y_grid_size.setText(input)
self.yDim = num
else:
self.get_int_attr_Y()
self.Contour_Map.contour_ax = None
def get_int_attr_Z(self):
"""
This method assigns an integer for the z-axis grid size. The values is stored in set_z_grid_size.
This method is currently not in use. It's application might take place when developing volumetric models.
:return:
"""
num, ok = QtGui.QInputDialog.getDouble(self, "Set Grid Size", "Enter an Double", 2000.0, 0.00001)
input = str(num)
if num < 2001 and ok:
self.set_z_grid_size.setText(input)
self.Surface_Plot.verticalExag = num
self.Surface_Plot.init_surfaceMesh(self.Contour_Map.Z, self.xDim, self.yDim,
self.pandasDataframe.iloc[:,2].min())
else:
self.get_int_attr_Z()
def open_file(self):
"""
A file dialog is opened to allow users to load an CSV file that contains the xyz data.
:return:
"""
self.Contour_Map.contour_ax = None
self.xyzFileName = QtGui.QFileDialog.getOpenFileName(self, 'OpenFile')
self.build_pandas_dataframe()
def build_pandas_dataframe(self):
"""
Populate a pandas dataframe with a selected CSV file opened by the user.
:return:
"""
if 'csv' in self.xyzFileName[0]:
self.pandasDataframe = pd.read_csv(str(self.xyzFileName[0]), header=0)
print("Pass")
else:
#self.pandasDataframe = pd.read_csv(str(self.xyzFileName[0]), sep='\t', header=None)
# deprecated version
self.pandasDataframe = pd.read_table(str(self.xyzFileName[0]), delim_whitespace=True, header=None)
self.build_pandas_model()
def build_extrapolation_map(self):
"""
This method lets the user select an extrapolation scheme from a list. It then passes data, and extrapolation
method to Contour_Map.build_2D_grid to visualize the results.
:return:
"""
# Contour_Map.contour_ax is set to None in order to in order to generate a new extrapolation grid.
self.Contour_Map.contour_ax = None
items = ("Ordinary-Kriging", "Universal-Kriging")
item, ok = QtGui.QInputDialog.getItem(self, "Kriging", "Extrapolation", items, 0, False)
if(ok):
# If an Rbf extrapolation method is selected, we must remove the prefix.
if 'Rbf' in str(item):
item = str(item).split('-')
item = item[1]
self.Contour_Map.build_2D_grid(self.pandasDataframe.iloc[:,0], self.pandasDataframe.iloc[:,1],
self.pandasDataframe.iloc[:,2], self.xDim, self.yDim,
interp_type='Rbf', func=item)
else:
item = str(item).split('-')
item = item[0]
self.Contour_Map.build_2D_grid(self.pandasDataframe.iloc[:,0], self.pandasDataframe.iloc[:,1],
self.pandasDataframe.iloc[:,2], self.xDim, self.yDim,
interp_type=None, func=item)
self.Surface_Plot.init_surfaceMesh(self.Contour_Map.Z, self.xDim, self.yDim,
self.pandasDataframe.iloc[:,2].min())
def build_interpolation_map(self):
"""
This method lets the user select an interpolation cheme form a list. It the passes data, and interpolation
method to Contour_Map.build_2D_grid to visualize the results.
:return:
"""
self.Contour_Map.contour_ax = None
items = ("ordinary-kriging", "universal-kriging", "linear", "cubic")
item, ok = QtGui.QInputDialog.getItem(self, "Interpolation", "Interpolation", items, 0, False)
if(ok):
# If an Rbf extrapolation method is selected, we must remove the prefix.
if 'Rbf' in str(item):
item = str(item).split('-')
item = item[1]
self.Contour_Map.build_2D_grid(self.pandasDataframe.iloc[:,0], self.pandasDataframe.iloc[:,1], self.pandasDataframe.iloc[:,2],
self.xDim, self.yDim,
interp_type='Rbf',
func=item)
else:
try:
item = str(item).split('-')
item = item[0]
except IndexError:
pass
self.Contour_Map.build_2D_grid(self.pandasDataframe.iloc[:,0], self.pandasDataframe.iloc[:,1], self.pandasDataframe.iloc[:,2],
self.xDim, self.yDim,
interp_type=None,
func=item)
self.Surface_Plot.init_surfaceMesh(self.Contour_Map.Z, self.xDim, self.yDim,
self.pandasDataframe.iloc[:,2].min())
def build_pandas_model(self):
Pandas_Model_Instance = PandasModel(self.pandasDataframe)
self.pandasTableView.setModel(Pandas_Model_Instance)
self.pandasTableView.resizeColumnsToContents()
def select_colormap(self):
"""
This method opens up a dialog to allow the user to select from a list of colormap options.
If the extrapolation/interpolation method remains unchanged, Contour_Map.build_2D_grid skips over having to
generate a new grid. It utilizes the existing grid and simply changes the colormap.
:return:
"""
items = (
"cividis", "YlGnBu_r", "Purples_r", "Blues_r", "Greens_r",
"PuRd_r", "RdPu_r", "YlGn_r", "BuPu_r", "RdBu_r", "ocean", "gist_earth",
"terrain", "seismic", "jet", "viridis", "plasma", "inferno", "magma"
)
item, ok = QtGui.QInputDialog.getItem(self, "Select a Colormap Option", "Colormaps", items, 0, False)
if (ok):
self.Contour_Map.colormap = str(item)
self.Contour_Map.build_2D_grid(self.pandasDataframe.iloc[:,0], self.pandasDataframe.iloc[:,1],
self.pandasDataframe.iloc[:,2], self.xDim, self.yDim)
self.Surface_Plot.update_colormap(item)
```
#### File: EarthMod/EarthMod/pandas_tableview_model.py
```python
from pyqtgraph import QtGui, QtCore
class PandasModel(QtCore.QAbstractTableModel):
"""
Class to populate a table view with a pandas dataframe
"""
def __init__(self, data, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
def rowCount(self, parent=None):
return len(self._data.values)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole:
return str(self._data.values[index.row()][index.column()])
if role == QtCore.Qt.BackgroundRole:
# return self._data.Color[index.row()]
return QtGui.QColor(QtCore.Qt.white)
if role == QtCore.Qt.ForegroundRole:
pass
# return self._data.Color[index.row()]
return None
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self._data.columns[col]
return None
```
|
{
"source": "jgce-components/auth_py",
"score": 3
}
|
#### File: auth_py/auth_py/jwt.py
```python
from injector import inject, singleton
import python_jwt as jwt, jwcrypto.jwk as jwk, datetime
from .interfaces import Settings
from .interfaces import JWTService
@singleton
class JWTServiceImpl(JWTService):
@inject
def __init__(self, s: Settings):
self.settings = s
def sign(self, payload: dict) -> str:
ks = jwk.JWKSet()
ks = self.settings.jwks(ks)
k = list(ks["keys"])[0]
resp_token = jwt.generate_jwt(payload, k, 'RS256',
datetime.timedelta(minutes=5),
datetime.datetime.now(),
datetime.datetime.now(), 16,
{"kid": "alg1"})
return resp_token
```
|
{
"source": "JGC-HUST/Python-learning",
"score": 4
}
|
#### File: 01_exercise/00_exercise-zip.py/zip_version3.py
```python
import os
import subprocess
import time
source = r'C:\__Test__\1.txt'
target_dir = r'C:\__Test4__'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
today = target_dir + os.sep + time.strftime('%Y%m%d')
now = time.strftime('%H%M%S')
comment = input('Enter a comment...')
if len(comment) > 0:
target = today + os.sep + now + '_' + comment.replace(' ', '_') + '.zip'
else:
target = today + os.sep + now + '.zip'
if not os.path.exists(today):
os.mkdir(today)
print('Successfully created directory', today)
zip_command = 'zip -r {0} {1}'.format(target, source)
def run():
print('Zip command is')
print(zip_command)
print('Running')
if subprocess.call(zip_command) == 0:
print('Successful backup to', target)
else:
print('Backup FAILED')
run()
```
#### File: 01_exercise/01_oop-variable.py/inherit.py
```python
class SchoolMember:
'''代表学校里的成员'''
def __init__(self, name, age):
self.name = name
self.age = age
print('(Initialized SchoolMember: {})'.format(self.name))
def tell(self):
'''告诉我有关我的细节'''
print('Name:"{}" Age:"{}"'.format(self.name, self.age), end = ' ')
class Teacher(SchoolMember):
'''代表一位老师'''
def __init__(self, name, age, salary):
SchoolMember.__init__(self, name, age)
self.salary = salary
print('(Initialized Teacher: {})'.format(self.name))
def tell(self):
SchoolMember.tell(self)
print('Salary: "{:d}"'.format(self.salary))
class Student(SchoolMember):
'''代表一位同学'''
def __init__(self, name, age, marks):
SchoolMember.__init__(self, name, age)
self.marks = marks
print('(Initialized Student: {})'.format(self.name))
def tell(self):
SchoolMember.tell(self)
print('Marks: "{:d}"'.format(self.marks))
t = Teacher('A', 40, 300000)
s = Student('B', 25, 99)
print()
members = [t, s]
for member in members:
member.tell()
```
|
{
"source": "jgcobb3/python-azavea-climate",
"score": 3
}
|
#### File: jgcobb3/python-azavea-climate/azavea.py
```python
import logging
import configparser
from os import path
from urllib.parse import urljoin, urlparse
import json
from geopy.distance import vincenty
import requests
import pandas as pd
import pprint
log = logging.getLogger(__name__)
class Climate(object):
def __init__(self, configfile=".config"):
self.baseurl = 'https://app.climate.azavea.com/api/'
self.header = {'Authorization': 'Token {}'.format(self._get_api_token(configfile)),
'Origin': 'https://www.serch.us'}
@staticmethod
def _get_config_file(configfile):
configpath = path.join(path.dirname(__file__), configfile)
if not path.exists(configpath):
raise FileNotFoundError(configpath)
else:
return configpath
def _read_config(self, configfile):
config = configparser.ConfigParser()
config.read(self._get_config_file(configfile))
try:
return dict(config.items('Azavea'))
except configparser.NoSectionError as e:
raise e
def _get_api_token(self, configfile):
keys = self._read_config(configfile)
if 'api_token' in keys:
return keys['api_token']
else:
raise KeyError('api_token not found in {}'.format(configfile))
def _get(self, url, params=None):
if not bool(urlparse(url).netloc):
url = urljoin(self.baseurl,url)
try:
response = requests.get(url,
params=params,
headers=self.header)
response.raise_for_status()
log.info(response.url)
result = json.loads(response.content.decode())
return result
except requests.exceptions.RequestException as e:
log.error(e)
raise e
def model(self, name=None):
if name:
return self._get('model/{}'.format(name))
else:
return self._get('model/')
def scenario(self, name=None):
if name:
return self._get('scenario/{}'.format(name))
else:
return self._get('scenario/')
def indicator(self, name=None):
if name:
return self._get('indicator/{}'.format(name))
else:
return self._get('indicator/')
def city(self, id=None, url='city/', cities=[]):
if id:
return self._get('city/{}'.format(id))
else:
result = self._get(url)
cities = cities + result['features']
if result['next']:
return self.city(url=result['next'], cities=cities)
else:
return cities
class City(Climate):
def __init__(self, lon=None, lat=None, name=None, admin=None):
super().__init__()
self.lon = lon
self.lat = lat
self.name = name
self.admin = admin
if lon and lat:
self._feature = self._nearest(lon, lat)
elif name and admin:
self._feature = self._query(name, admin)
else:
self._feature = None
if self._feature:
self.id = self._feature['id']
else:
self.id = None
def __repr__(self):
return pprint.saferepr(self._feature['properties'])
def _nearest(self, lon, lat):
result = self._get('city/nearest',
{'lon': lon,
'lat': lat})
if result['features']:
return result['features'][0]
else:
return None
def _query(self, name, admin):
result = self._get('city', params={'name': name,
'admin': admin})
if result['features']:
return result['features'][0]
else:
return None
def offset(self):
if self.lon and self.lat:
pt1 = (self.lon, self.lat)
pt2 = tuple(self._feature['geometry']['coordinates'])
return vincenty(pt1, pt2).kilometers
else:
return None
def boundary(self):
return self._get('city/{}/boundary'.format(self.id))
def data(self, scenario, indicator):
d = self._get('climate-data/{}/{}/indicator/{}/'\
.format(self.id, scenario, indicator))
return pd.DataFrame(d['data']).transpose()
```
|
{
"source": "JGCRI/e3sm_to_cmip",
"score": 2
}
|
#### File: e3sm_to_cmip/cmor_handlers/emiso4.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import cmor
import cdms2
import logging
import numpy as np
from e3sm_to_cmip.lib import handle_variables
# list of raw variable names needed
RAW_VARIABLES = [str('SFso4_a1'), str('SFso4_a2'), str('so4_a1_CLXF'), str('so4_a2_CLXF')]
VAR_NAME = str('emiso4')
VAR_UNITS = str('kg m-2 s-1')
TABLE = str('CMIP6_AERmon.json')
def write_data(varid, data, timeval, timebnds, index, **kwargs):
"""
emiso4 = SFso4_a1 (kg/m2/s) + SFso4_a2 (kg/m2/s) + (so4_a1_CLXF (molec/cm2/s) + \
so4_a2_CLXF(molec/cm2/s)) x 115.107340 (sulfate mw) / 6.02214e+22
"""
outdata = data['SFso4_a1'][index, :] + data['SFso4_a2'][index, :] + \
(data['so4_a1_CLXF'][index, :] + data['so4_a2_CLXF'][index, :]) * \
115.107340 / 6.02214e22
if kwargs.get('simple'):
return outdata
cmor.write(
varid,
outdata,
time_vals=timeval,
time_bnds=timebnds)
def handle(infiles, tables, user_input_path, **kwargs):
return handle_variables(
metadata_path=user_input_path,
tables=tables,
table=TABLE,
infiles=infiles,
raw_variables=RAW_VARIABLES,
write_data=write_data,
outvar_name=VAR_NAME,
outvar_units=VAR_UNITS,
serial=kwargs.get('serial'),
logdir=kwargs.get('logdir'),
simple=kwargs.get('simple'),
outpath=kwargs.get('outpath'))
# ------------------------------------------------------------------
```
|
{
"source": "JGCRI/ESMValTool",
"score": 3
}
|
#### File: diag_scripts/emissions_mip/initial_analysis.py
```python
import os
import iris
import matplotlib.pyplot as plt
from esmvaltool.diag_scripts.shared import group_metadata, run_diagnostic
from esmvalcore.preprocessor import area_statistics
def _plot_time_series(vars_to_plot):
"""
Example of personal diagnostic plotting function.
Plots a monthly time series of an area-averaged variable
between the ground and first levels.
Parameters
----------
vars_to_plot: list of tuples
List containing tuples representing a variable to be plotted.
Tuple format: (cfg, cube, dataset), where:
* cfg : nested dictionary
Nested dictionary of variable metadata.
* cube : Iris cube
Variable data to plot.
* dataset : str
Name of the dataset to plot.
Returns
-------
None.
Notes
-----
* This function is private; remove the '_'
so you can make it public.
Change Log
----------
2020-04-28
* NumPy-ize documentation style.
* Modified arguments to plot multiple variables on one plot.
2020-05-04
* Remove dummy return value.
"""
# Plot output directory can be created dynamically,
# but is hard-coded for now.
local_path = '/home/nich980/emip/output/diagnostics'
for var in vars_to_plot:
# cube = var[1], dataset str = var[2]
plt.plot(var[1].data, label=var[2])
plt.xlabel('Time (months)')
plt.ylabel('Area average')
plt.title('Time series at ground level')
plt.tight_layout()
plt.grid()
plt.legend()
#png_name = 'Time_series-my_little_diagnostic.png'
png_name = 'time_series-initial_analysis-all_in_one.png'
plt.savefig(os.path.join(local_path, png_name))
plt.close()
def run_my_diagnostic(cfg):
"""
Simple example of a diagnostic.
This is a basic (and rather esotherical) diagnostic that firstly
loads the needed model data as iris cubes, performs a difference between
values at ground level and first vertical level, then squares the
result.
Before plotting, we grab the squared result (not all operations on cubes)
and apply an area average on it. This is a useful example of how to use
standard esmvalcore.preprocessor functionality within a diagnostic, and
especially after a certain (custom) diagnostic has been run and the user
needs to perform an operation that is already part of the preprocessor
standard library of functions.
The user will implement their own (custom) diagnostics, but this
example shows that once the preprocessor has finished a whole lot of
user-specific metrics can be computed as part of the diagnostic,
and then plotted in various manners.
Parameters
----------
cfg - Dictionary
Nested dictionary containing dataset names and variables.
Returns
-------
None.
Notes
-----
* Since the preprocessor extracts the 1000 hPa level data,
the cube's data will have shape (36, 180, 360) corresponding
to time (in months), latitude, longitude.
Change log
----------
2020-05-04
* NumPy-ize documentation.
* Configure to plot multiple variables on one plot.
* Pass list containing variable tuples to plotting function.
"""
# assemble the data dictionary keyed by dataset name
# this makes use of the handy group_metadata function that
# orders the data by 'dataset'; the resulting dictionary is
# keyed on datasets e.g. dict = {'MPI-ESM-LR': [var1, var2...]}
# where var1, var2 are dicts holding all needed information per variable
my_files_dict = group_metadata(cfg['input_data'].values(), 'dataset')
var_list = []
# iterate over key(dataset) and values(list of vars)
for key, value in my_files_dict.items():
# load the cube from data files only
# using a single variable here so just grab the first (and only)
# list element
cube = iris.load_cube(value[0]['filename'])
print('KEY: {}'.format(key))
print('Cube shape: {}'.format(cube.data.shape))
print('Cube coords: {}'.format(cube.coords))
# compute an area average over the cube using the preprocessor
# The cube contains only 100000 Pa level data (see recipe).
area_avg_cube = area_statistics(cube, 'mean')
# Append the cfg, area_avg_cube, and key tuple to variable list
var_list.append((cfg, area_avg_cube, key))
_plot_time_series(var_list)
if __name__ == '__main__':
# always use run_diagnostic() to get the config (the preprocessor
# nested dictionary holding all the needed information)
with run_diagnostic() as config:
# list here the functions that need to run
run_my_diagnostic(config)
```
#### File: diag_scripts/examples/my_little_diagnostic.py
```python
import os
# to manipulate iris cubes
import iris
import matplotlib.pyplot as plt
# import internal esmvaltool modules here
from esmvaltool.diag_scripts.shared import group_metadata, run_diagnostic
from esmvalcore.preprocessor import area_statistics
def _plot_time_series(vars_to_plot):
"""
Example of personal diagnostic plotting function.
Plots a monthly time series of an area-averaged variable
between the ground and first levels.
Parameters
----------
vars_to_plot: list of tuples
List containing tuples representing a variable to be plotted.
Tuple format: (cfg, cube, dataset), where:
* cfg : nested dictionary
Nested dictionary of variable metadata.
* cube : Iris cube
Variable data to plot.
* dataset : str
Name of the dataset to plot.
Returns
-------
None.
Notes
-----
* This function is private; remove the '_'
so you can make it public.
Change Log
----------
2020-04-28
* NumPy-ize documentation style.
* Modified arguments to plot multiple variables on one plot.
2020-05-04
* Remove dummy return value.
"""
# custom local paths for e.g. plots are supported -
# here is an example
# root_dir = '/group_workspaces/jasmin2/cmip6_prep/' # edit as per need
# out_path = 'esmvaltool_users/valeriu/' # edit as per need
# local_path = os.path.join(root_dir, out_path)
# but one can use the already defined esmvaltool output paths
#local_path = cfg['plot_dir']
local_path = '/home/nich980/emip/output/diagnostics'
for var in vars_to_plot:
# cube = var[1], dataset str = var[2]
plt.plot(var[1].data, label=var[2])
plt.xlabel('Time (months)')
plt.ylabel('Area average')
plt.title('Time series at ground level')
plt.tight_layout()
plt.grid()
plt.legend()
#png_name = 'Time_series-my_little_diagnostic.png'
png_name = 'time_series-my_little_diagnostic-all_in_one.png'
plt.savefig(os.path.join(local_path, png_name))
plt.close()
def run_my_diagnostic(cfg):
"""
Simple example of a diagnostic.
This is a basic (and rather esotherical) diagnostic that firstly
loads the needed model data as iris cubes, performs a difference between
values at ground level and first vertical level, then squares the
result.
Before plotting, we grab the squared result (not all operations on cubes)
and apply an area average on it. This is a useful example of how to use
standard esmvalcore.preprocessor functionality within a diagnostic, and
especially after a certain (custom) diagnostic has been run and the user
needs to perform an operation that is already part of the preprocessor
standard library of functions.
The user will implement their own (custom) diagnostics, but this
example shows that once the preprocessor has finished a whole lot of
user-specific metrics can be computed as part of the diagnostic,
and then plotted in various manners.
Parameters
----------
cfg - Dictionary
Nested dictionary containing dataset names and variables.
Returns
-------
None.
Notes
-----
* Since the preprocessor extracts the 1000 hPa level data,
the cube's data will have shape (36, 180, 360) corresponding
to time (in months), latitude, longitude.
Change log
----------
2020-05-04
* NumPy-ize documentation.
* Configure to plot multiple variables on one plot.
* Pass list containing variable tuples to plotting function.
"""
# assemble the data dictionary keyed by dataset name
# this makes use of the handy group_metadata function that
# orders the data by 'dataset'; the resulting dictionary is
# keyed on datasets e.g. dict = {'MPI-ESM-LR': [var1, var2...]}
# where var1, var2 are dicts holding all needed information per variable
my_files_dict = group_metadata(cfg['input_data'].values(), 'dataset')
var_list = []
# iterate over key(dataset) and values(list of vars)
for key, value in my_files_dict.items():
# load the cube from data files only
# using a single variable here so just grab the first (and only)
# list element
cube = iris.load_cube(value[0]['filename'])
print('KEY: {}'.format(key))
print('Cube shape: {}'.format(cube.data.shape))
print('Cube coords: {}'.format(cube.coords))
# compute an area average over the cube using the preprocessor
# The cube contains only 100000 Pa level data (see recipe).
area_avg_cube = area_statistics(cube, 'mean')
# Append the cfg, area_avg_cube, and key tuple to variable list
var_list.append((cfg, area_avg_cube, key))
_plot_time_series(var_list)
if __name__ == '__main__':
# always use run_diagnostic() to get the config (the preprocessor
# nested dictionary holding all the needed information)
with run_diagnostic() as config:
# list here the functions that need to run
run_my_diagnostic(config)
```
|
{
"source": "JGCRI/gcamwrapper",
"score": 3
}
|
#### File: gcamwrapper/gcamwrapper/query_library.py
```python
import yaml
import warnings
import re
import pkg_resources
def read_yaml(yaml_file):
'''Read a YAML file.
:param yaml_file: The file path to the YAML file to load
:type yaml_file: str
:returns: A dict representing the YAML data
'''
with open(yaml_file, 'r') as yml:
return yaml.load(yml, Loader=yaml.FullLoader)
'''The parsed query file included with the package'''
PACKAGE_QUERIES = yaml.load(pkg_resources.resource_stream(__name__, 'query_library.yml'), Loader=yaml.FullLoader)
class Query(str):
'''A Simple extension to str to be able to associate units meta data'''
units: None
class QuerySyntaxException(Exception):
'''An Exception type used to signal gcamwrapper Query syntax error either in the
Query itself or in the place holder replacements provided by the users.
'''
pass
def get_query(*args, query_file=None):
'''Look up a query from a YAML file.
The YAML query files will often be organized into nested categories
so users must provide the full path as individual strings such as:
\code{get_query("emissions", "co2_emissions")}
:param query_file: The path to a YAML file to read if not None otherwise
the package query file will be used.
:type query_file: str
:param *args: Strings specifiying the path to the query to lookup
:type *args: str
:returns: The raw query from the query file with units set as an attribute
if specified
'''
if query_file is None:
queries = PACKAGE_QUERIES
else:
queries = read_yaml(query_file)
query = queries
for path in args:
try:
query = query[path]
except KeyError:
raise(f"Could not find query: {args}")
if len(query) == 0:
raise Exception(f"Could not find query: {args}")
query_str = Query(query[0])
if len(query) > 1:
query_str.units = query[1]
if len(query) > 2:
warnings.warn(f"Additional elements for {args} are ignored, expecting only <query> <units>")
return query_str
# TODO: Remove when we build Sphinx docs
# Design doc:
# change get/set data to accept a query_args argument that is a dict/list
# used to substitute {arg_tag@arg_type} tags from the query database with the rules:
# 1) arg_type = name -> NamedFilter; year -> YearFilter
# 2) key: None -> +MatchesAny
# 3) key: [operator, int-ish]
# where int-ish is ether an int or '.' support others?
# and operators are </>|= or * for any
# NOTE: we should support IndexFilter instead of YearFilter but to keep it simple
# for the users we will try to detect which they meant by assuming only years will
# be four digits long
# 4) key: [ operator,char]
# where operators are either = or =~ for regex or * for any
# 5) For query tags not specified by the user it will be replaced with nothing:
# i.e. matches any but collapse
def find_placeholders(query_str):
'''Find all placeholder in a raw query and return them in a list.
Find query placeholders of the form \code{'{tag@type}'} and put them into a
dict such as \code{dict('tag1': 'name', 'tag2': 'year')} for instance.
:param query_str: A raw query string
:type query_str: str
:returns: A dict of all the placeholders found in the query where the keys
are the tags and the values are the types
'''
raw_placeholders = re.findall(r'{([\w@]+)}', query_str)
ret = dict()
for placeholder in raw_placeholders:
ph_split = placeholder.split('@')
if len(ph_split) != 2:
raise QuerySyntaxException(f"Invalid placeholder syntax: {placeholder} expecting two values split by @")
elif ph_split[1] != 'name' and ph_split[1] != 'year':
raise QuerySyntaxException(f"Invalid placeholder syntax, unknown type: {ph_split[1]} expecting name or year")
else:
ret[ph_split[0]] = ph_split[1]
if len(raw_placeholders) != len(ret):
raise QuerySyntaxException(f"Duplicate placeholder tags in {query_str}")
return ret
def parse_int_query_param(param_operands, is_get_data):
'''Parse user options for integer operators and generate GCAM Fusion syntax.
The currently supported operators are:
`+`: Indicates to read/write to a DataFrame. Note if `is_get_data` this is always
implied to be set.
`*`: which always matches and so no additional operands are necessary (note if
param_operands is empty this operator is assumed)
The standard comparison operators: `=`, `<`, `<=`, `>`, `>=`. Note if `is_get_data` is true
or the `+` is not set an additional argument must be supplied which is the integer (or a
string that can be converted to integer) to be used as the RHS operand in the comparison.
:param param_operands: An array containing operators and potentially an operand to be used with that
operator
:type param_operands: array of str
:param is_get_data: A boolean if true follows get data symantics and set data if false
:type is_get_data: boolean
:returns: A GCAM Fusion filter string representing the parameters given
'''
wrapper_to_fusion_lookup = {'*': 'MatchesAny', '<': 'IntLessThan', '<=': 'IntLessThanEq', '>': 'IntGreaterThan', '>=': 'IntGreaterThanEq', '=': 'IntEquals'}
try:
plus_index = param_operands.index('+')
plus_op = param_operands.pop(plus_index)
except (ValueError, AttributeError):
if is_get_data:
plus_op = '+'
else:
plus_op = ''
ret = '[' + plus_op
if param_operands is None or len(param_operands) == 0 or param_operands[0] == '*':
ret += 'YearFilter,' + wrapper_to_fusion_lookup['*']
elif not is_get_data and plus_op == '+':
if len(param_operands) < 1:
raise QuerySyntaxException(f"Invalid query parameter spec: {param_operands}")
ret += 'YearFilter,' + wrapper_to_fusion_lookup[param_operands[0]]
elif len(param_operands) == 2 and param_operands[0] in wrapper_to_fusion_lookup.keys():
try:
operandAsInt = int(param_operands[1])
except ValueError:
raise QuerySyntaxException(f"Expecting integer operand, got: {param_operands}")
# if the int value looks like a date assume YearFilter otherwise it is
# a model period
if operandAsInt > 1000:
ret += 'YearFilter,'
else:
ret += 'IndexFilter,'
ret += wrapper_to_fusion_lookup[param_operands[0]] + ',' + str(operandAsInt)
else:
raise QuerySyntaxException(f"Invalid query parameter spec: {param_operands}")
ret += ']'
return ret
def parse_str_query_param(param_operands, is_get_data):
'''Parse user options for string operators and generate GCAM Fusion syntax
The currently supported operators are:
`+`: Indicates to read/write to a DataFrame. Note if `is_get_data` this is always
implied to be set.
`*`: which always matches and so no additional operands are necessary (note if
param_operands is empty this operator is assumed)
The operators: `=`, `=~` (regular expression matching). Note if `is_get_data` is true
or the `+` is not set an additional argument must be supplied which is the string to
be used as the RHS operand in the comparison.
:param param_operands: An array containing operators and potentially an operand to be used with that
operator
:type param_operands: array of str
:param is_get_data: A boolean if true follows get data symantics and set data if false
:type is_get_data: boolean
:returns: A GCAM Fusion filter string representing the parameters given
'''
wrapper_to_fusion_lookup = {'*': 'MatchesAny', '=': 'StringEquals','=~': 'StringRegexMatches'}
try:
plus_index = param_operands.index('+')
plus_op = param_operands.pop(plus_index)
except (ValueError, AttributeError) as e:
if is_get_data:
plus_op = '+'
else:
plus_op = ''
ret = '[' + plus_op + 'NamedFilter,'
if param_operands is None or len(param_operands) == 0 or param_operands[0] == '*':
ret += wrapper_to_fusion_lookup['*']
elif not is_get_data and plus_op == '+':
if len(param_operands) < 1:
raise QuerySyntaxException(f"Invalid query parameter spec: {param_operands}")
ret += wrapper_to_fusion_lookup[param_operands[0]]
elif len(param_operands) == 2 and param_operands[0] in wrapper_to_fusion_lookup.keys():
ret += wrapper_to_fusion_lookup[param_operands[0]] + ',' + param_operands[1]
else:
raise QuerySyntaxException(f"Invalid query parameter spec: {param_operands}")
ret += ']'
return ret
def apply_query_params(query, query_params, is_get_data):
'''Translate a raw query looked up from a query library into a GCAM Fusion query.
Raw queries will contain placeholders such as \code{'{arg_tag@arg_type}'} and those
will get replaced user supplied filter options supplied as a list in \code{query_params
where we match those keys to `arg_type`. If `arg_type` is "name" then `parse_str_query_param`
will be used to process the value of query_params[[arg_tag]] and if "year" then
`parse_int_query_param` is used.
Note symantics are slightly different if is_get_data is true as described in
parse_.*_query_params functions.
For any arg_tag which has no entry in query_params it will be replaced with nothing
which tells GCAM Fusion to match any but do not read/write to the DataFrame for that container.
:param query: The raw query which needs to have it's placeholders translated.
:type query: str
:param query_params: The user options provided as a list of arg_tags to and array of
operators and potentially operands which will get translated to GCAM
Fusion syntax.
:type query_params: dict of str to array
:param is_get_data: A boolean if true follows get data symantics and set data if false.
:type is_get_data: boolean
:returns: A translated query into GCAM Fusion syntax that is ready to run.
'''
placeholders = find_placeholders(query)
parsed_params = dict()
for param, args in query_params.items():
if not param in placeholders.keys():
warnings.warn(param+' has no placeholders in '+query)
else:
# note error checking on placeholder types has already occurred
if placeholders[param] == 'year':
parsed_params[param+'@'+placeholders[param]] = parse_int_query_param(args, is_get_data)
else:
parsed_params[param+'@'+placeholders[param]] = parse_str_query_param(args, is_get_data)
# TODO: better syntax?
for param, ptype in placeholders.items():
if not param in query_params.keys():
parsed_params[param+'@'+ptype] = ''
return query.format(**parsed_params)
```
|
{
"source": "JGCRI/githubstats",
"score": 3
}
|
#### File: githubstats/githubstats/organization.py
```python
import os
import requests
class Organization:
"""Download and process GitHub metadata for an organization.
:param organization: GitHub organization name
:param username: GitHub user name
:param token: GitHub Personal access token
USAGE:
```
organization = 'JGCRI'
token = '<your token here>'
uname = '<your user name here>'
# instantiate Organization
org = Organization(organization, uname, token)
# list all repositories in an organization
org.list_repos_in_org()
```
"""
GITHUB_API = 'https://api.github.com'
# number of records to return for a page, this is set high because we want to make sure we get all repositories
PAGE_LIMIT = 1000
def __init__(self, organization, username, token):
self.org = organization
self.username = username
self.token = token
def list_repos_in_org(self):
"""Generate a list of all repository names for an organization.
See: https://developer.github.com/v3/repos/#list-organization-repositories
"""
# construct URL to get the clones traffic data from a specific repository
url = os.path.join(Organization.GITHUB_API, 'orgs', self.org, 'repos?per_page={}'.format(Organization.PAGE_LIMIT))
# conduct get to GitHub API
response = requests.get(url, auth=(self.username, self.token))
return [i['name'] for i in response.json()]
```
|
{
"source": "JGCRI/pygcam",
"score": 2
}
|
#### File: mcs/built_ins/addexp_plugin.py
```python
from pygcam.log import getLogger
from .McsSubcommandABC import McsSubcommandABC
_logger = getLogger(__name__)
def driver(args, tool):
'''
Set up the base coremcs database. Must call startDb() before calling this.
'''
from ..Database import getDatabase
db = getDatabase()
expId = db.createExp(args.expName, parent=args.parent, description=args.description)
_logger.debug("Added experiment '%s' with id=%d" % (args.expName, expId))
class AddExpCommand(McsSubcommandABC):
def __init__(self, subparsers):
kwargs = {'help' : '''Adds the named experiment to the database, with an optional description.'''}
super(AddExpCommand, self).__init__('addexp', subparsers, kwargs)
def addArgs(self, parser):
parser.add_argument('expName', type=str,
help='Add the named experiment to the database.')
parser.add_argument('-d', '--description', type=str, required=False,
default='No description',
help='Add the named experiment to the database.')
parser.add_argument('-p', '--parent', type=str, required=False,
default=None,
help='''Set the "parent" of this experiment. If adding a policy scenario,
the parent should be the name of the baseline. If adding a baseline, do not
specify a parent.''')
return parser # for auto-doc generation
def run(self, args, tool):
driver(args, tool)
```
|
{
"source": "JGCRI/pygis",
"score": 2
}
|
#### File: JGCRI/pygis/setup.py
```python
import sys
class VersionError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
try:
from setuptools import setup, find_packages
except ImportError:
raise("Must have setuptools installed to run setup.py. Please install and try again.")
def readme():
with open('README.md') as f:
return f.read()
def get_requirements():
with open('requirements.txt') as f:
return f.read().split()
setup(
name='pygis',
version='0.1.0',
packages=find_packages(),
url='https://github.com/JGCRI/pygis.git',
license='BSD 2-Clause',
author='<NAME>',
author_email='<EMAIL>',
description='Python-based Geographic Information System (GIS) utilities',
long_description=readme(),
install_requires=get_requirements()
)
```
|
{
"source": "JGCRI/pypackageutils",
"score": 3
}
|
#### File: pypackageutils/tests/test_install_supplement.py
```python
import os
import pkg_resources
import tempfile
import unittest
import pandas as pd
from pypackageutils.install_supplement import InstallSupplement
class TestInstallSupplement(unittest.TestCase):
# test info from Zenodo
REMOTE_DATA_DIR = 'test'
REMOTE_DATA_FILE = 'test_no-header.csv'
# comparison datasets
COMP_CSV = pkg_resources.resource_filename('pypackageutils', 'tests/data/comp_data/test_no-header.csv')
def test_fetch_and_unpack(self):
# create a temporary directory to hold the outputs
with tempfile.TemporaryDirectory() as dirpath:
# instantiate class
sup = InstallSupplement(dirpath)
# fetch and unzip data to tempdir
sup.fetch_unpack_data()
# create path to unpacked file
test_file = os.path.join(dirpath, TestInstallSupplement.REMOTE_DATA_DIR, TestInstallSupplement.REMOTE_DATA_FILE)
# test file to data frame
df_test = pd.read_csv(test_file)
# comparison file to data frame
df_comp = pd.read_csv(TestInstallSupplement.COMP_CSV)
# compare for equality
pd.testing.assert_frame_equal(df_comp, df_test)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JGCRI/stitches",
"score": 2
}
|
#### File: stitches/modules/paper_experiment_miniCMIP.py
```python
import pandas as pd
import numpy as np
import stitches as stitches
import pkg_resources
import os
from pathlib import Path
# pd.set_option('display.max_columns', None)
# OUTPUT_DIR = pkg_resources.resource_filename('stitches', 'data/created_data')
OUTPUT_DIR = '/pic/projects/GCAM/stitches_pic/paper1_outputs'
# #############################################################################
# Experiment setup
# #############################################################################
# experiment parameters
tolerances = [0.075] # np.round(np.arange(0.07, 0.225, 0.005), 3)
Ndraws = 1
error_threshold = 0.1
# pangeo table of ESMs for reference
pangeo_path = pkg_resources.resource_filename('stitches', 'data/pangeo_table.csv')
pangeo_data = pd.read_csv(pangeo_path)
pangeo_data = pangeo_data[((pangeo_data['variable'] == 'tas') | (pangeo_data['variable'] == 'pr') | (pangeo_data['variable'] == 'psl'))
& ((pangeo_data['domain'] == 'Amon') | (pangeo_data['domain'] == 'day')) ].copy()
# Keep only the runs that have data for all vars X all timesteps:
pangeo_good_ensembles =[]
for name, group in pangeo_data.groupby(['model', 'experiment', 'ensemble']):
df = group.drop_duplicates().copy()
if len(df) == 6:
pangeo_good_ensembles.append(df)
del(df)
pangeo_good_ensembles = pd.concat(pangeo_good_ensembles)
pangeo_good_ensembles = pangeo_good_ensembles[['model', 'experiment', 'ensemble']].drop_duplicates().copy()
pangeo_good_ensembles = pangeo_good_ensembles.reset_index(drop=True).copy()
# won't use idealized runs
pangeo_good_ensembles = pangeo_good_ensembles[~((pangeo_good_ensembles['experiment'] == '1pctCO2') |
(pangeo_good_ensembles['experiment'] == 'abrupt-4xCO2')) ].reset_index(drop=True).copy()
esms = ['CMCC-CM2-SR5', 'NorESM2-MM']
# ['ACCESS-CM2', 'ACCESS-ESM1-5', 'AWI-CM-1-1-MR', 'BCC-CSM2-MR',
# 'BCC-ESM1', 'CESM2', 'CESM2-FV2', 'CESM2-WACCM', 'CMCC-CM2-HR4',
# 'CMCC-CM2-SR5', 'CMCC-ESM2', 'CanESM5', 'HadGEM3-GC31-LL',
# 'HadGEM3-GC31-MM', 'IITM-ESM', 'MIROC-ES2L', 'MIROC6',
# 'MPI-ESM-1-2-HAM', 'MPI-ESM1-2-HR', 'MPI-ESM1-2-LR', 'MRI-ESM2-0',
# 'NorESM2-LM', 'NorESM2-MM', 'SAM0-UNICON', 'TaiESM1',
# 'UKESM1-0-LL']
# #############################################################################
# Load full archive and target data
# #############################################################################
# Load the full archive of all staggered windows, which we will be matching on
full_archive_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_archive_data = pd.read_csv(full_archive_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_archive_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_archive_data= full_archive_data[i1.isin(i2)].copy()
del(i1)
del(i2)
# Load the original archive without staggered windows, which we will draw
# the target trajectories from for matching
full_target_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_target_data = pd.read_csv(full_target_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_target_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_target_data = full_target_data[i1.isin(i2)].copy()
del(i1)
del(i2)
del(keys)
# #############################################################################
# Some helper functions
# #############################################################################
def prep_target_data(target_df):
if not target_df.empty:
grped = target_df.groupby(['experiment', 'variable', 'ensemble', 'model'])
for name, group in grped:
df1 = group.copy()
# if it isn't a complete time series (defined as going to 2099 or 2100),
# remove it from the target data frame:
if max(df1.end_yr) < 2099:
target_df = target_df.loc[(target_df['ensemble'] != df1.ensemble.unique()[0])].copy().reset_index(
drop=True)
del (df1)
del (grped)
target_df = target_df.reset_index(drop=True).copy()
return(target_df)
def get_orig_data(target_df):
if not target_df.empty:
esm_name = target_df.model.unique()[0]
scn_name = target_df.experiment.unique()[0]
full_rawtarget_path = pkg_resources.resource_filename('stitches', ('data/tas-data/' + esm_name + '_tas.csv'))
full_rawtarget_data = pd.read_csv(full_rawtarget_path)
orig_data = full_rawtarget_data[(full_rawtarget_data['experiment'] == scn_name)].copy()
keys = ['experiment', 'ensemble', 'model']
i1 = orig_data.set_index(keys).index
i2 = target_df.set_index(keys).index
orig_data = orig_data[i1.isin(i2)].copy()
del (i1)
del (i2)
del (keys)
del (full_rawtarget_data)
del (full_rawtarget_path)
orig_data = orig_data.reset_index(drop=True).copy()
return (orig_data)
def get_orig_netcdfs(target_df, res, non_tas_vars, pt, DIR):
mod = target_df['model'].unique()[0]
exper = target_df['experiment'].unique()[0]
ens = target_df['ensemble'].unique()
if res=='mon':
res = 'Amon'
p = pt[(pt['model'] == mod) & (pt['experiment'] == exper) & (pt['domain'] == res)].copy()
p = p[p['ensemble'].isin(ens)].copy()
p_hist = pt[(pt['model'] == mod) & (pt['experiment'] == 'historical') & (pt['domain'] == res)].copy()
p_hist = p_hist[p_hist['ensemble'].isin(ens)].copy()
p = p.append(p_hist).reset_index(drop=True).copy()
non_tas_vars.append('tas')
for v in non_tas_vars:
filelist = p[p['variable']==v].zstore
for f in filelist:
ds = stitches.fx_pangeo.fetch_nc(f)
f1 = p[(p['variable']==v) & (p['zstore'] == f)].reset_index(drop=True).copy()
ds.to_netcdf((DIR + "/" + v + "_" + res + "_" + mod + "_" + str(f1.experiment[0]) + "_" + str(f1.ensemble[0]) + "_orig.nc"))
# end get orig_netcdfs fcn
def get_jumps(tgav_df):
tgav_jump = []
for name, group in tgav_df.groupby(['variable', 'experiment', 'ensemble', 'model']):
ds = group.copy()
ds['jump'] = ds.value.diff().copy()
ds = ds.dropna().copy()
tgav_jump.append(ds)
del (ds)
tgav_jump = pd.concat(tgav_jump)
tgav_jump = tgav_jump.drop(columns=['value']).copy()
tgav_jump = tgav_jump.drop_duplicates().reset_index(drop=True).copy()
return(tgav_jump)
def four_errors(gen_data, orig_data):
gen_data_jump = get_jumps(gen_data)
orig_data_jump = get_jumps(orig_data)
orig_stats = []
for name, group in orig_data.groupby(['model', 'variable', 'experiment']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment']].drop_duplicates().copy()
ds1['mean_orig_tgav'] = np.mean(ds.value.values)
ds1['sd_orig_tgav'] = np.std(ds.value.values)
orig_stats.append(ds1)
del (ds)
del (ds1)
orig_stats = pd.concat(orig_stats).reset_index(drop=True).copy()
orig_stats_jump = []
for name, group in orig_data_jump.groupby(['model', 'variable', 'experiment']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment']].drop_duplicates().copy()
ds1['mean_orig_jump'] = np.mean(ds.jump.values)
ds1['sd_orig_jump'] = np.std(ds.jump.values)
orig_stats_jump.append(ds1)
del (ds)
del (ds1)
orig_stats_jump = pd.concat(orig_stats_jump).reset_index(drop=True).copy()
orig_stats = orig_stats.merge(orig_stats_jump, how='left', on=['model', 'variable', 'experiment']).copy()
del (orig_stats_jump)
gen_stats = []
for name, group in gen_data.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']].drop_duplicates().copy()
ds1['mean_gen_tgav'] = np.mean(ds.value.values)
ds1['sd_gen_tgav'] = np.std(ds.value.values)
gen_stats.append(ds1)
del (ds)
del (ds1)
gen_stats = pd.concat(gen_stats).reset_index(drop=True).copy()
gen_stats_jump = []
for name, group in gen_data_jump.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']].drop_duplicates().copy()
ds1['mean_gen_jump'] = np.mean(ds.jump.values)
ds1['sd_gen_jump'] = np.std(ds.jump.values)
gen_stats_jump.append(ds1)
del (ds)
del (ds1)
gen_stats_jump = pd.concat(gen_stats_jump).reset_index(drop=True).copy()
gen_stats = gen_stats.merge(gen_stats_jump, how='left',
on=['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']).copy()
del (gen_stats_jump)
compare = gen_stats.merge(orig_stats, how='left', on=['model', 'variable', 'experiment']).copy()
del (gen_stats)
del (orig_stats)
compare['E1_tgav'] = abs(compare.mean_orig_tgav - compare.mean_gen_tgav) / compare.sd_orig_tgav
compare['E2_tgav'] = compare.sd_gen_tgav / compare.sd_orig_tgav
compare['E1_jump'] = abs(compare.mean_orig_jump - compare.mean_gen_jump) / compare.sd_orig_jump
compare['E2_jump'] = compare.sd_gen_jump / compare.sd_orig_jump
compare = compare[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive',
'E1_tgav', 'E2_tgav', 'E1_jump', 'E2_jump']].copy()
four_values = []
for name, group in compare.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds['max_metric'] = np.max(
[ds.E1_tgav.values, abs(1 - ds.E2_tgav.values), ds.E1_jump.values, abs(1 - ds.E2_jump.values)])
four_values.append(ds)
del (ds)
four_values = pd.concat(four_values).reset_index(drop=True).copy()
del (compare)
return(four_values)
# #############################################################################
# The experiment
# #############################################################################
# for each of the esms in the experiment, subset to what we want
# to work with and run the experiment.
for esm in esms:
print(esm)
# subset the archive and the targets to this ESM
archive_w_all = full_archive_data[(full_archive_data['model'] == esm)].copy()
target_245 = full_target_data[(full_target_data['model'] == esm) &
(full_target_data['experiment'] == 'ssp245')].copy()
target_370 = full_target_data[(full_target_data['model'] == esm) &
(full_target_data['experiment'] == 'ssp370')].copy()
# Clean up target data and pull corresponding original/raw data
if not target_245.empty:
# clean up
target_245 = prep_target_data(target_245).copy()
# and pull corresponding original/raw data for later comparison
orig_245 = get_orig_data(target_245).copy()
get_orig_netcdfs(target_df=target_245,
res='mon',
non_tas_vars=['pr', 'psl'],
pt=pangeo_data,
DIR=(OUTPUT_DIR + "/" + esm + "/high_freq"))
if not target_370.empty:
# clean up
target_370 = prep_target_data(target_370).copy()
# and pull corresponding original/raw data for later comparison
orig_370 = get_orig_data(target_370).copy()
get_orig_netcdfs(target_df=target_370,
res='mon',
non_tas_vars=['pr', 'psl'],
pt=pangeo_data,
DIR=(OUTPUT_DIR + "/" + esm + "/high_freq"))
# loop over tolerances:
for tolerance in tolerances:
rp_245 = stitches.make_recipe(target_data=target_245,
archive_data=archive_w_all,
N_matches=20000,
res="mon",
tol=tolerance,
non_tas_variables=["pr", "psl"])
rp_245.to_csv((OUTPUT_DIR + "/" + esm + "/high_freq/recipes_for_target_" + esm + '_ssp245.csv'), index=False)
out_245 = stitches.gridded_stitching((OUTPUT_DIR + "/" + esm + "/high_freq"), rp_245)
rp_370 = stitches.make_recipe(target_data=target_370,
archive_data=archive_w_all,
N_matches=20000,
res="mon",
tol=tolerance,
non_tas_variables=["pr", "psl"])
rp_370.to_csv((OUTPUT_DIR + "/" + esm + "/high_freq/recipes_for_target_" + esm +'_ssp370.csv' ), index=False)
out_370 = stitches.gridded_stitching((OUTPUT_DIR + "/" + esm + "/high_freq"), rp_370)
# end for loop over ESMs
```
#### File: stitches/modules/paper_experiment_tolerance_sweep.py
```python
import pandas as pd
import numpy as np
import stitches as stitches
import pkg_resources
import os
from pathlib import Path
pd.set_option('display.max_columns', None)
OUTPUT_DIR = pkg_resources.resource_filename('stitches', 'data/created_data')
# OUTPUT_DIR = '/pic/projects/GCAM/stitches_pic/paper1_outputs'
# #############################################################################
# Experiment setup
# #############################################################################
# experiment parameters
tolerances = np.round(np.arange(0.05, 0.225, 0.005), 3)
Ndraws =20
error_threshold = 0.1
# pangeo table of ESMs for reference
pangeo_path = pkg_resources.resource_filename('stitches', 'data/pangeo_table.csv')
pangeo_data = pd.read_csv(pangeo_path)
pangeo_data = pangeo_data[((pangeo_data['variable'] == 'tas') | (pangeo_data['variable'] == 'pr') | (pangeo_data['variable'] == 'psl'))
& ((pangeo_data['domain'] == 'Amon') ) ].copy()
# Keep only the runs that have data for all vars X all timesteps:
pangeo_good_ensembles =[]
for name, group in pangeo_data.groupby(['model', 'experiment', 'ensemble']):
df = group.drop_duplicates().copy()
if len(df) == 3:
pangeo_good_ensembles.append(df)
del(df)
pangeo_good_ensembles = pd.concat(pangeo_good_ensembles)
pangeo_good_ensembles = pangeo_good_ensembles[['model', 'experiment', 'ensemble']].drop_duplicates().copy()
pangeo_good_ensembles = pangeo_good_ensembles.reset_index(drop=True).copy()
# won't use idealized runs
pangeo_good_ensembles = pangeo_good_ensembles[~((pangeo_good_ensembles['experiment'] == '1pctCO2') |
(pangeo_good_ensembles['experiment'] == 'abrupt-4xCO2')|
(pangeo_good_ensembles['experiment'] == 'ssp534-over')) ].reset_index(drop=True).copy()
# #############################################################################
# Load full archive and target data
# #############################################################################
# Load the full archive of all staggered windows, which we will be matching on
full_archive_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_archive_data = pd.read_csv(full_archive_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_archive_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_archive_data= full_archive_data[i1.isin(i2)].copy()
del(i1)
del(i2)
# get list of ESMs that are both pangeo good ensembles and in archive
df1 = full_archive_data[['model', 'experiment', 'ensemble']].drop_duplicates()
d = pd.merge(df1, pangeo_good_ensembles.drop_duplicates(), how = 'inner')
esms = d.model.unique().copy()
del(df1)
del(d)
# Load the original archive without staggered windows, which we will draw
# the target trajectories from for matching
full_target_path = pkg_resources.resource_filename('stitches', 'data/matching_archive.csv')
full_target_data = pd.read_csv(full_target_path)
# Keep only the entries that appeared in pangeo_good_ensembles:
keys =['model', 'experiment', 'ensemble']
i1 = full_target_data.set_index(keys).index
i2 = pangeo_good_ensembles.set_index(keys).index
full_target_data = full_target_data[i1.isin(i2)].copy()
del(i1)
del(i2)
del(keys)
# Make sure no 2012-2014 windows got through
# TODO won't work with staggering??
full_target_data['window_size'] = full_target_data['end_yr'] - full_target_data['start_yr']
full_target_data = full_target_data[full_target_data['window_size'] >=7].drop(columns=['window_size']).copy()
full_archive_data['window_size'] = full_archive_data['end_yr'] - full_archive_data['start_yr']
full_archive_data = full_archive_data[full_archive_data['window_size'] >=7].drop(columns=['window_size']).copy()
# #############################################################################
# Some helper functions
# #############################################################################
def prep_target_data(target_df):
if not target_df.empty:
grped = target_df.groupby(['experiment', 'variable', 'ensemble', 'model'])
for name, group in grped:
df1 = group.copy()
# if it isn't a complete time series (defined as going to 2099 or 2100),
# remove it from the target data frame:
if max(df1.end_yr) < 2099:
target_df = target_df.loc[(target_df['ensemble'] != df1.ensemble.unique()[0])].copy().reset_index(
drop=True)
del (df1)
del (grped)
target_df = target_df.reset_index(drop=True).copy()
return(target_df)
def get_orig_data(target_df):
if not target_df.empty:
esm_name = target_df.model.unique()[0]
scn_name = target_df.experiment.unique()[0]
full_rawtarget_path = pkg_resources.resource_filename('stitches', ('data/tas-data/' + esm_name + '_tas.csv'))
full_rawtarget_data = pd.read_csv(full_rawtarget_path)
orig_data = full_rawtarget_data[(full_rawtarget_data['experiment'] == scn_name)].copy()
keys = ['experiment', 'ensemble', 'model']
i1 = orig_data.set_index(keys).index
i2 = target_df.set_index(keys).index
orig_data = orig_data[i1.isin(i2)].copy()
del (i1)
del (i2)
del (keys)
del (full_rawtarget_data)
del (full_rawtarget_path)
orig_data = orig_data.reset_index(drop=True).copy()
return (orig_data)
def match_draw_stitchTgav(target_df, archive_df, toler, num_draws, TGAV_OUTPUT_DIR, reproducible):
esm_name = archive_df.model.unique()[0]
if not target_df.empty:
# Use the match_neighborhood function to generate all of the matches between the target and
# archive data points.
match_df = stitches.match_neighborhood(target_df, archive_df, tol=toler)
scn_name = target_df.experiment.unique()[0]
if ((not ('ssp245' in archive_df.experiment.unique())) & (not ('ssp370' in archive_df.experiment.unique()))):
archive_id = 'scenarioMIP'
elif scn_name in archive_df.experiment.unique():
archive_id = 'w_target'
else:
archive_id = 'wo_target'
for draw in range(0, num_draws):
# Do the random draw of recipes
if reproducible:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=True)
else:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=False)
new_ids = ('tol' + str(toler) + '~draw' + str(draw) + '~' + archive_id + '~'+
unformatted_recipe['stitching_id'].astype(str)).copy()
unformatted_recipe = unformatted_recipe.drop(columns=['stitching_id']).copy()
unformatted_recipe['stitching_id'] = new_ids
del (new_ids)
# format the recipe
recipe = stitches.generate_gridded_recipe(unformatted_recipe)
recipe.columns = ['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr', 'tas_file']
recipe['tolerance'] = toler
recipe['draw'] = draw
recipe['archive'] = archive_id
recipe.to_csv((OUTPUT_DIR + '/' + esm_name + '/experiment_CMIP6/' +
'gridded_recipes_' + esm_name + '_target_' + scn_name +
'_tol' + str(toler) +
'_draw' + str(draw) +
'_archive_' + archive_id + '.csv'), index=False)
del (unformatted_recipe)
# stitch the GSAT values and save as csv
try:
gsat = stitches.gmat_stitching(recipe)
gsat['tolerance'] = toler
gsat['draw'] = draw
gsat['archive'] = archive_id
for id in gsat.stitching_id.unique():
ds = gsat[gsat['stitching_id'] == id].copy()
fname = (TGAV_OUTPUT_DIR +
'stitched_' + esm_name + '_GSAT_' + id + '.csv')
ds.to_csv(fname, index=False)
del (ds)
del (gsat)
except:
print(("Some issue stitching GMAT for " + esm_name + ". Skipping and moving on"))
else:
recipe = []
print('Some missing target data for ' + esm_name + '. Analysis will be skipped')
return(recipe)
def get_jumps(tgav_df):
tgav_jump = []
for name, group in tgav_df.groupby(['variable', 'experiment', 'ensemble', 'model']):
ds = group.copy()
ds['jump'] = ds.value.diff().copy()
ds = ds.dropna().copy()
tgav_jump.append(ds)
del (ds)
tgav_jump = pd.concat(tgav_jump)
tgav_jump = tgav_jump.drop(columns=['value']).copy()
tgav_jump = tgav_jump.drop_duplicates().reset_index(drop=True).copy()
return(tgav_jump)
def four_errors(gen_data, orig_data):
gen_data_jump = get_jumps(gen_data)
orig_data_jump = get_jumps(orig_data)
orig_stats = []
for name, group in orig_data.groupby(['model', 'variable', 'experiment']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment']].drop_duplicates().copy()
ds1['mean_orig_tgav'] = np.mean(ds.value.values)
ds1['sd_orig_tgav'] = np.std(ds.value.values)
orig_stats.append(ds1)
del (ds)
del (ds1)
orig_stats = pd.concat(orig_stats).reset_index(drop=True).copy()
orig_stats_jump = []
for name, group in orig_data_jump.groupby(['model', 'variable', 'experiment']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment']].drop_duplicates().copy()
ds1['mean_orig_jump'] = np.mean(ds.jump.values)
ds1['sd_orig_jump'] = np.std(ds.jump.values)
orig_stats_jump.append(ds1)
del (ds)
del (ds1)
orig_stats_jump = pd.concat(orig_stats_jump).reset_index(drop=True).copy()
orig_stats = orig_stats.merge(orig_stats_jump, how='left', on=['model', 'variable', 'experiment']).copy()
del (orig_stats_jump)
gen_stats = []
for name, group in gen_data.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']].drop_duplicates().copy()
ds1['mean_gen_tgav'] = np.mean(ds.value.values)
ds1['sd_gen_tgav'] = np.std(ds.value.values)
gen_stats.append(ds1)
del (ds)
del (ds1)
gen_stats = pd.concat(gen_stats).reset_index(drop=True).copy()
gen_stats_jump = []
for name, group in gen_data_jump.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']].drop_duplicates().copy()
ds1['mean_gen_jump'] = np.mean(ds.jump.values)
ds1['sd_gen_jump'] = np.std(ds.jump.values)
gen_stats_jump.append(ds1)
del (ds)
del (ds1)
gen_stats_jump = pd.concat(gen_stats_jump).reset_index(drop=True).copy()
gen_stats = gen_stats.merge(gen_stats_jump, how='left',
on=['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']).copy()
del (gen_stats_jump)
compare = gen_stats.merge(orig_stats, how='left', on=['model', 'variable', 'experiment']).copy()
del (gen_stats)
del (orig_stats)
compare['E1_tgav'] = abs(compare.mean_orig_tgav - compare.mean_gen_tgav) / compare.sd_orig_tgav
compare['E2_tgav'] = compare.sd_gen_tgav / compare.sd_orig_tgav
compare['E1_jump'] = abs(compare.mean_orig_jump - compare.mean_gen_jump) / compare.sd_orig_jump
compare['E2_jump'] = compare.sd_gen_jump / compare.sd_orig_jump
compare = compare[['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive',
'E1_tgav', 'E2_tgav', 'E1_jump', 'E2_jump']].copy()
four_values = []
for name, group in compare.groupby(['model', 'variable', 'experiment', 'tolerance', 'draw', 'archive']):
ds = group.copy()
ds['max_metric'] = np.max(
[ds.E1_tgav.values, abs(1 - ds.E2_tgav.values), ds.E1_jump.values, abs(1 - ds.E2_jump.values)])
four_values.append(ds)
del (ds)
four_values = pd.concat(four_values).reset_index(drop=True).copy()
del (compare)
return(four_values)
def match_draw_stitch_evalTgav(target_df, archive_df, toler, num_draws, ERR_OUTPUT_DIR, reproducible):
esm_name = archive_df.model.unique()[0]
if not target_df.empty:
# Use the match_neighborhood function to generate all of the matches between the target and
# archive data points.
match_df = stitches.match_neighborhood(target_df, archive_df, tol=toler)
scn_name = target_df.experiment.unique()[0]
# get corresponding original data to the target
orig_df = get_orig_data(target_df).copy()
if ((not ('ssp245' in archive_df.experiment.unique())) & (not ('ssp370' in archive_df.experiment.unique()))):
archive_id = 'scenarioMIP'
elif scn_name in archive_df.experiment.unique():
archive_id = 'w_target'
else:
archive_id = 'wo_target'
compared = []
for draw in range(0, num_draws):
# Do the random draw of recipes
if reproducible:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=True)
else:
unformatted_recipe = stitches.permute_stitching_recipes(N_matches=10000,
matched_data=match_df,
archive=archive_df,
testing=False)
new_ids = ('tol' + str(toler) + '~draw' + str(draw) + '~' + archive_id + '~'+
unformatted_recipe['stitching_id'].astype(str)).copy()
unformatted_recipe = unformatted_recipe.drop(columns=['stitching_id']).copy()
unformatted_recipe['stitching_id'] = new_ids
del (new_ids)
# format the recipe
recipe = stitches.generate_gridded_recipe(unformatted_recipe)
recipe.columns = ['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr', 'tas_file']
recipe['tolerance'] = toler
recipe['draw'] = draw
recipe['archive'] = archive_id
recipe.to_csv((ERR_OUTPUT_DIR + '/' + esm_name + '/gridded_recipes_' + esm_name + '_target_' + scn_name +
'_tol' + str(toler) +
'_draw' + str(draw) +
'_archive_' + archive_id + '.csv'), index=False)
del (unformatted_recipe)
# stitch the GSAT values and save as csv
gsat = stitches.gmat_stitching(recipe)
gen_ens_size = len(gsat.stitching_id.unique())
gsat = gsat.rename(columns={'stitching_id': 'ensemble'}).copy()
gsat['tolerance'] = toler
gsat['draw'] = draw
gsat['archive'] = archive_id
gsat['experiment'] = scn_name
gsat['model'] = esm_name
compared_ds = four_errors(gen_data=gsat, orig_data=orig_df)
compared_ds['gen_ens_size'] = gen_ens_size
compared.append(compared_ds)
del (gsat)
compared = pd.concat(compared).reset_index(drop=True).copy()
fname = (ERR_OUTPUT_DIR + esm_name + '/all_metrics_' + esm_name + '_target_' + scn_name + '_tol' + str(toler) + '_alldraws_archive_' + archive_id + '.csv')
print(fname)
compared.to_csv(fname, index=False)
else:
recipe = []
print('Some missing target data for ' + esm_name + '. Analysis will be skipped')
return(recipe)
# #############################################################################
# The experiment
# #############################################################################
# issues with AWI, CESM2, hadgem mm, 'IITM-ESM', MPI-ESM-1-2-HAM,
# noresm lm for tol=0.055 (0.05 seems to work?)
#esms = esms[[0, 2, 4, 5, 6, 7, 8, 11, 13, 14, 15, 17, 18, 19]].copy()
esms = esms[[0,1, 2, 3,4, 5, 7, 8, 9,10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 22, 23,24,25,26,27, 28, 29]].copy()
# for each of the esms in the experiment, subset to what we want
# to work with and run the experiment.
for esm in esms:
print(esm)
# subset the archive and the targets to this ESM
archive_w_all = full_archive_data[(full_archive_data['model'] == esm)].copy()
archive_wo245 = full_archive_data[(full_archive_data['model'] == esm) &
(full_archive_data['experiment'] != 'ssp245')].copy()
archive_wo370 = full_archive_data[(full_archive_data['model'] == esm) &
(full_archive_data['experiment'] != 'ssp370')].copy()
archive_scenMIP = full_archive_data[(full_archive_data['model'] == esm) &
((full_archive_data['experiment'] == 'ssp126') |
(full_archive_data['experiment'] == 'ssp585'))].copy()
target_245 = full_target_data[(full_target_data['model'] == esm) &
(full_target_data['experiment'] == 'ssp245')].copy()
target_370 = full_target_data[(full_target_data['model'] == esm) &
(full_target_data['experiment'] == 'ssp370')].copy()
# Clean up target data and pull corresponding original/raw data
if not target_245.empty:
# clean up
target_245 = prep_target_data(target_245).copy()
if not target_370.empty:
# clean up
target_370 = prep_target_data(target_370).copy()
# loop over tolerances:
for tolerance in tolerances:
rp_245_w = match_draw_stitch_evalTgav(target_245, archive_w_all,
toler=tolerance, num_draws=Ndraws,
ERR_OUTPUT_DIR=(OUTPUT_DIR +'/tolerance_sweeps/all_draws/'),
reproducible=False)
# rp_245_wo = match_draw_stitch_evalTgav(target_245, archive_wo245,
# toler=tolerance, num_draws=Ndraws,
# ERR_OUTPUT_DIR=(OUTPUT_DIR + '/tolerance_sweeps/all_draws/'),
# reproducible=False)
rp_370_w = match_draw_stitch_evalTgav(target_370, archive_w_all,
toler=tolerance, num_draws=Ndraws,
ERR_OUTPUT_DIR=(OUTPUT_DIR +'/tolerance_sweeps/all_draws/'),
reproducible=False)
# rp_370_wo = match_draw_stitch_evalTgav(target_370, archive_wo370,
# toler=tolerance, num_draws=Ndraws,
# ERR_OUTPUT_DIR=(OUTPUT_DIR +'/tolerance_sweeps/all_draws/'),
# reproducible=False)
rp_245_scenMIP = match_draw_stitch_evalTgav(target_245, archive_scenMIP,
toler=tolerance, num_draws=Ndraws,
ERR_OUTPUT_DIR=(OUTPUT_DIR +'/tolerance_sweeps/all_draws/'),
reproducible=False)
rp_370_scenMIP = match_draw_stitch_evalTgav(target_370, archive_scenMIP,
toler=tolerance, num_draws=Ndraws,
ERR_OUTPUT_DIR=(OUTPUT_DIR +'/tolerance_sweeps/all_draws/'),
reproducible=False)
# End for loop over tolerances
#########################################################
# Read in all generated GSAT files and format so error metrics can
# be calculated.
compared_data = []
entries = Path((OUTPUT_DIR + '/tolerance_sweeps/all_draws/' + esm + '/'))
for entry in entries.iterdir():
if (('all_metrics' in entry.name) & (esm in entry.name) ):
print(entry.name)
data = pd.read_csv((OUTPUT_DIR + '/tolerance_sweeps/all_draws/'+ esm + '/') + entry.name)
compared_data.append(data)
del (data)
if len(compared_data) > 0:
compared_data = pd.concat(compared_data).reset_index(drop=True).copy()
# average over draws
aggregate_metrics = []
for name, group in compared_data.groupby(['model', 'variable', 'experiment', 'tolerance', 'archive']):
ds = group.copy()
ds1 = ds[['model', 'variable', 'experiment', 'tolerance', 'archive']].drop_duplicates().copy()
ds1['aggregate_E1_tgav'] = np.mean(ds.E1_tgav.values)
ds1['aggregate_E2_tgav'] = np.mean(ds.E2_tgav.values)
ds1['aggregate_E1_jump'] = np.mean(ds.E1_jump.values)
ds1['aggregate_E2_jump'] = np.mean(ds.E2_jump.values)
ds1['max_metric'] = np.max([ds1.aggregate_E1_tgav.values,
abs(1 - ds1.aggregate_E2_tgav.values),
ds1.aggregate_E1_jump.values,
abs(1 - ds1.aggregate_E2_jump.values)])
ds1['min_gen_ens_size'] = np.min(ds.gen_ens_size)
ds1['mean_gen_ens_size'] = np.mean(ds.gen_ens_size)
ds1['max_gen_ens_size'] = np.max(ds.gen_ens_size)
aggregate_metrics.append(ds1)
del (ds)
del (ds1)
aggregate_metrics = pd.concat(aggregate_metrics).reset_index(drop=True).copy()
aggregate_metrics.to_csv((OUTPUT_DIR + '/tolerance_sweeps/aggregate_metrics_' + esm + '.csv'), index=False)
# end for loop over ESMs
```
#### File: JGCRI/stitches/setup.py
```python
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
def get_requirements():
with open('requirements.txt') as f:
return f.read().split()
setup(
name='stitches',
version='0.1.0',
packages=find_packages(),
url='https://github.com/JGCRI/stitches',
license='BSD 2-Clause',
author='',
author_email='',
description='Amalgamate existing climate data to create monthly climate variable fields',
long_description=readme(),
python_requires='>=3.6.0',
include_package_data=True,
install_requires=get_requirements()
)
```
#### File: stitches/stitches/fx_stitch.py
```python
import numpy as np
import pandas as pd
import pkg_resources
import xarray as xr
import os as os
import stitches.fx_util as util
import stitches.fx_data as data
import stitches.fx_pangeo as pangeo
def find_zfiles(rp):
""" Determine which cmip files must be downlaoded from pangeo.
:param rp: data frame of the recepies
:return: numpy.ndarray array of the gs:// files to pull from pangeo
"""
# Figure out which columns contain the string file
flat_list = rp.filter(regex='file', axis=1).values.flatten()
unique_list = np.unique(flat_list)
return unique_list
def find_var_cols(x):
""" Determine which variables that are going to be downloaded.
:param x: pandas data frame of the stitches recipe
:return: a list of the variables that are going to be written out to the netcdf files.
"""
# Parse out the variable name so that we can use it
# to label the final output.
set = x.filter(regex='file').columns.tolist()
out = []
for text in set:
new = text.replace("_file", "")
out.append(new)
return out
def get_netcdf_values(i, dl, rp, fl, name):
"""Extract the archive values from the list of downloaded cmip data
:param i: int index of the row of the recipe data frame
:param dl: list of xarray cmip files
:param rp: data frame of the recipe
:param fl: list of the cmip files
:param name: name of the variable file that is going to be processed.
:return: a slice of xarray (not sure confident on the technical term)
"""
file = rp[name][i]
start_yr = rp["archive_start_yr"][i]
end_yr = rp["archive_end_yr"][i]
# Figure out which index level we are on and then get the
# xarray from the list.
index = int(np.where(fl == file)[0])
extracted = dl[index].sortby('time')
v = name.replace("_file", "")
# Have to have special time handeler, consider functionalizinng this.
times = extracted.indexes['time']
if type(times) in [xr.coding.cftimeindex.CFTimeIndex, pd.core.indexes.datetimes.DatetimeIndex]:
yrs = extracted.indexes['time'].year # pull out the year information from the time index
flags = list(map(lambda x: x in range(start_yr, end_yr+1), yrs))
to_keep = times[flags]
else:
raise TypeError(f"unsupported time type")
dat = extracted.sel(time=to_keep)[v].values.copy()
if ((times.freq == 'D') | (times.freq == 'day')):
expected_times = pd.date_range(start=str(start_yr) + "-01-01", end=str(end_yr) + "-12-31", freq='D')
if times.calendar == 'noleap':
expected_len = len(expected_times[~((expected_times.month == 2) & (expected_times.day == 29))])
else:
expected_len = len(pd.date_range(start=str(start_yr) + "-01-01", end=str(end_yr) + "-12-31", freq='M'))
assert (len(dat) == expected_len), "Not enough data in " + file + "for period " + str(start_yr) + "-" + str(end_yr)
return dat
def get_var_info(rp, dl, fl, name):
"""Extract the cmip variable attribute information.
:param rp: data frame of the recipes
:param dl: list of the data files
:param fl: list of the data file names
:param name: string of the column containing the variable file name from rp
:return: pandas dataframe of the variable meta data
TODO add a check to make sure that there is only one stitching id being passed into
the function.
"""
util.check_columns(rp, {name})
file = rp[name][0]
index = int(np.where(fl == file)[0])
extracted = dl[index]
attrs = data.get_ds_meta(extracted)
attrs["calendar"] = extracted.indexes['time'].calendar
return attrs
def get_atts(rp, dl, fl, name):
"""Extract the cmip variable attribute information.
:param rp: data frame of the recepies
:param dl: list of the data files
:param fl: list of the data file names
:param name: string of the column containing the variable files to process
:return: dict object containing the cmip variable information
TODO add a check to make sure that there is only one stitching id being passed into
the function.
"""
file = rp[name][0]
index = int(np.where(fl == file)[0])
extracted = dl[index]
v=name.replace("_file", "")
out=extracted[v].attrs.copy()
return out
def internal_stitch(rp, dl, fl):
"""Stitch a single recpie into netcdf outputs
:param dl: list of xarray cmip files
:param rp: data frame of the recipe
:param fl: list of the cmip files
:return: a list of the data arrays for the stitched products of the different variables.
"""
rp.reset_index(drop=True, inplace=True)
variables = find_var_cols(rp)
out = []
# For each of the of the variables stitch the
# data together.
for v in variables:
# Get the information about the variable that is going to be stitched together.
col = v + '_file'
var_info = get_var_info(rp, dl, fl, col)
# For each of time slices extract the data & concatenate together.
gridded_data = get_netcdf_values(i=0, dl=dl, rp=rp, fl=fl, name=col)
# Now add the other time slices.
for i in range(1, len(rp)):
new_vals = get_netcdf_values(i=i, dl=dl, rp=rp, fl=fl, name=col)
gridded_data = np.concatenate((gridded_data, new_vals), axis=0)
# Note that the pd.date_range call need the date/month defined otherwise it will
# truncate the year from start of first year to start of end year which is not
# what we want. We want the full final year to be included in the times series.
start = str(min(rp["target_start_yr"]))
end = str(max(rp["target_end_yr"]))
if var_info["frequency"][0].lower() == "mon":
freq = "M"
elif var_info["frequency"][0].lower() == "day":
freq = "D"
else:
raise TypeError(f"unsupported frequency")
times = pd.date_range(start=start + "-01-01", end=end + "-12-31", freq=freq)
# Again, some ESMs stop in 2099 instead of 2100 - so wejust drop the
# last year of gridded_data when that is the case.
#TODO this will need something extra/different for daily data; maybe just
# a simple len(times)==len(gridded_data)-12 : len(times) == len(gridded_data)-(nDaysInYear)
# with correct parentheses would do it
if ((max(rp["target_end_yr"]) == 2099) & (len(times) == (len(gridded_data) - 12))):
gridded_data = gridded_data[0:len(times), 0:, 0:].copy()
if ((var_info["calendar"][0].lower() == "noleap") & (freq == "D")):
times = times[~((times.month == 2) & (times.day == 29))]
assert (len(gridded_data) == len(times)), "Problem with the length of time"
# Extract the lat and lon information that will be used to structure the
# empty netcdf file. Make sure to copy all of the information including
# the attributes!
lat = dl[0].lat.copy()
lon = dl[0].lon.copy()
r = rp.reset_index(drop=True).to_string()
rslt = xr.Dataset({v: xr.DataArray(
gridded_data,
coords=[times, lat, lon],
dims=["time", "lat", 'lon'],
attrs={'units': var_info['units'][0],
'variable': var_info['variable'][0],
'experiment': var_info['experiment'][0],
'ensemble': var_info['ensemble'][0],
'model': var_info['model'][0],
'stitching_id': rp['stitching_id'].unique()[0],
'recipe': r})
})
out.append(rslt)
out_dict = dict(zip(variables, out))
return out_dict
def gridded_stitching(out_dir, rp):
"""Stitch
:param out_dir: string directory location where to write the netcdf files out to
:param rp: data frame of the recipe
:return: a list of the netcdf files paths
"""
flag = os.path.isdir(out_dir)
if not flag:
raise TypeError(f'The output directory does not exist.')
# Check inputs.
util.check_columns(rp, {'target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_variable', 'archive_model', 'archive_ensemble', 'stitching_id',
'archive_start_yr', 'archive_end_yr'})
# Determine which variables will be downloaded.
variables = find_var_cols(rp)
if not (len(variables) >= 1):
raise TypeError(f'No variables were found to be processed.')
# Determine which files need to be downloaded from pangeo.
file_list = find_zfiles(rp)
# Make sure that all of the files are available to download from pangeo.
# Note that this might be excessively cautious but this is an issue we have run into in
# the past.
avail = pangeo.fetch_pangeo_table()
flag = all(item in list(avail['zstore']) for item in list(file_list))
if not flag:
raise TypeError(f'Trying to request a zstore file that does not exist')
# Download all of the data from pangeo.
data_list = list(map(pangeo.fetch_nc, file_list))
# For each of the stitching recipes go through and stitch a recipe.
for single_id in rp['stitching_id'].unique():
# initialize f to be empty just to be safe now that we've added a
# try...except approach. It's technically possible the first id
# tried will fail and the function will try to return a non-existent f.
f = []
try:
print((
'Stitching gridded netcdf for: ' + rp.archive_model.unique() + " " + rp.archive_variable.unique() + " " + single_id))
# Do the stitching!
# ** this can be a slow step and prone to errors
single_rp = rp.loc[rp['stitching_id'] == single_id].copy()
rslt = internal_stitch(rp=single_rp, dl=data_list, fl=file_list)
# Print the files out at netcdf files
f = []
for i in rslt.keys():
ds = rslt[i]
ds = ds.sortby('time').copy()
fname = (out_dir + '/' + "stitched_" + ds[i].attrs['model'] + '_' +
ds[i].attrs['variable'] + '_' + single_id + '.nc')
ds.to_netcdf(fname)
f.append(fname)
# end For loop over rslt keys
#end try
except:
print(('Stitching gridded netcdf for: ' + rp.archive_model.unique() + " " + rp.archive_variable.unique() + " " + single_id +' failed. Skipping. Error thrown within gridded_stitching fxn.'))
# end except
# end for loop over single_id
return f
# end gridded stitching function
def gmat_internal_stitch(row, data):
""" Select data from a tas archive based on a single row in a recipe data frame, this
function is used to iterate over an entire recipe to do the stitching.
:param row: pandas.core.series.Series a row entry of a fully formatted recpie
:param data: pandas.core.frame.DataFrame containing the tas values to be stiched togeher
:return: pandas.core.frame.DataFrame of tas values
"""
years = list(range(int(row["target_start_yr"]), int(row["target_end_yr"]) + 1))
select_years = list(range(int(row["archive_start_yr"]), int(row["archive_end_yr"]) + 1))
selected_data = data.loc[(data["experiment"] == row["archive_experiment"]) &
(data["year"].isin(select_years)) &
(data["ensemble"] == row["archive_ensemble"])]
# some models stop at 2099 instead of 2100 - so there is a mismatch
# between len(years) and selected data but not a fatal one.
# Write a very specific if statement to catch this & just chop the extra year
# off the end of selected_data.
if ((len(years) == (util.nrow(selected_data) - 1)) & (max(years) == 2099) ):
selected_data = selected_data.iloc[0:len(years), ].copy()
if len(years) != util.nrow(selected_data):
raise TypeError(f"Trouble with selecting the tas data.")
new_vals = selected_data['value']
d = {'year': years,
'value': new_vals}
df = pd.DataFrame(data=d)
df['variable'] = 'tas'
return df
# TODO ACS we do have a bit of a behavior change here so that this function so that the
# TODO rp read in here is the same as the rp read in to the gridded_stitching function.
def gmat_stitching(rp):
""" Based on a recipe data frame stitch together a time series of global tas data.
:param rp: pandas.core.frame.DataFrame a fully formatted recipe data frame.
:return: pandas.core.frame.DataFrame of stitched together tas data.
"""
# Check inputs.
util.check_columns(rp, {'target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_variable', 'archive_model', 'archive_ensemble', 'stitching_id',
'archive_start_yr', 'archive_end_yr', 'tas_file'})
# One the assumptions of this function is that it only works with tas, so
# we can safely add tas as the variable column.
rp['variable'] = 'tas'
out = []
for name, match in rp.groupby(['stitching_id']):
# Reset the index in the match data frame so that we can use a for loop
# to iterate through match data frame an apply the gmat_internal_stitch.
match = match.reset_index(drop=True)
# Find the tas data to be stitched together.
dir_path = pkg_resources.resource_filename('stitches', 'data/tas-data')
all_files = util.list_files(dir_path)
# Load the tas data for a particular model.
model = match['archive_model'].unique()[0]
csv_to_load = [file for file in all_files if (model in file)][0]
data = pd.read_csv(csv_to_load)
# Format the data so that if we have historical years in the future scenarios
# then that experiment is relabeled as "historical".
fut_exps = ['ssp245', 'ssp126', 'ssp585', 'ssp119', 'ssp370', 'ssp434', 'ssp534-over', 'ssp460']
nonssp_data = data.loc[~data["experiment"].isin(fut_exps)]
fut_data = data.loc[(data["experiment"].isin(fut_exps)) &
(data["year"] > 2014)]
hist_data = data.loc[(data["experiment"].isin(fut_exps)) &
(data["year"] <= 2014)]
hist_data["experiment"] = "historical"
tas_data = pd.concat([nonssp_data, fut_data, hist_data])[['variable', 'experiment', 'ensemble', 'model', 'year',
'value']].drop_duplicates().reset_index(drop=True)
# Stitch the data together based on the matched recpies.
dat = []
for i in match.index:
row = match.iloc[i, :]
dat.append(gmat_internal_stitch(row, tas_data))
dat = pd.concat(dat)
# Add the stitiching id column to the data frame.
dat['stitching_id'] = name
# Add the data to the out list
out.append(dat)
# Format the list of data frames into a single data frame.
final_output = pd.concat(out)
final_output = final_output.reset_index(drop=True).copy()
final_output = final_output.sort_values(['stitching_id', 'year']).copy()
final_output = final_output.reset_index(drop=True).copy()
return final_output
```
|
{
"source": "JGCRI/xanthosvis",
"score": 2
}
|
#### File: xanthosvis/xanthosvis/main.py
```python
import json
import os
import uuid
import dash
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
import pandas as pd
import seaborn as sns
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from flask_caching import Cache
import xanthosvis.util_functions as xvu
# ----- Define init options and system configuration
# Dash init, define parameters and css information
app = dash.Dash(__name__, external_stylesheets=['assets/base.css', 'assets/custom.css'],
meta_tags=[{"name": "viewport", "content": "width=device-width"}], suppress_callback_exceptions=True,
compress=False)
# Set up disk based cache with 100 min timeout
cache = Cache(app.server, config={
'CACHE_TYPE': 'filesystem',
'CACHE_DIR': 'cache-directory',
"CACHE_DEFAULT_TIMEOUT": 6000
})
server = app.server
root_dir = 'include/'
config = {'displaylogo': False, 'toImageButtonOptions': {
'format': 'svg', # one of png, svg, jpeg, webp
'filename': 'custom_image',
'height': None,
'width': None,
'scale': 1 # Multiply title/legend/axis/canvas sizes by this factor
}}
# Clear cache on load, don't want old files lingering
cache.clear()
# Access Token for Mapbox
mapbox_token = open("include/mapbox-token").read()
# Misc Options
sns.set()
group_colors = {"control": "light blue", "reference": "red"}
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
# ----- End Init
# ----- Reference Files & Global Vars
# reference file to be included in the package data for mapping cells to basins
gridcell_ref_file = os.path.join(root_dir, 'reference', 'xanthos_0p5deg_landcell_reference.csv')
# read in reference file to dataframe
df_ref = pd.read_csv(gridcell_ref_file)
# reference geojson file for basins
basin_json = os.path.join(root_dir, 'reference', 'gcam_basins.geojson')
basin_features = xvu.process_geojson(basin_json)
# World reference file for viewing by country
world_json = os.path.join(root_dir, 'reference', 'world.geojson')
with open(world_json, encoding='utf-8-sig', errors='ignore') as get:
country_features = json.load(get)
# Available Runoff Statistic for the Choropleth Map
acceptable_statistics = [{'label': 'Mean', 'value': 'mean'}, {'label': 'Median', 'value': 'median'},
{'label': 'Min', 'value': 'min'}, {'label': 'Max', 'value': 'max'},
{'label': 'Standard Deviation', 'value': 'standard deviation'}]
# ----- End Reference
# ----- HTML Components
app.layout = html.Div(
children=[
# Data stores that store a key value for the cache and a select store for remembering selections/persistence
dcc.Store(id="select_store"),
dcc.Store(id="data_store", storage_type='memory'),
dcc.ConfirmDialog(
id='confirm',
message='Your data has timed out. Please reload.',
),
html.Div(id="error-message"),
# Banner/Header Div
html.Div(
className="banner row",
children=[
html.H2(className="h2-title", children="GCIMS Hydrologic Explorer"),
html.Div(
className="div-logo",
children=[
html.A([
html.Img(className="logo", src=app.get_asset_url("gcims_logo.svg")
),
],
href="https://gcims.pnnl.gov/global-change-intersectoral-modeling-system", target="blank",
),
]),
html.H2(className="h2-title-mobile", children="GCIMS Hydrologic Explorer"),
],
),
# Body of the App
html.Div(
className="row app-body",
children=[
# User Controls
html.Div(
className="four columns card",
children=[
html.Div(
className="bg-white user-control",
children=[
html.Div(
className="padding-top-bot",
children=[
html.H6("Data Upload (File Types: .csv, zipped .csv)"),
dcc.Loading(id='file_loader', children=[
dcc.Upload(
id='upload-data',
className="loader",
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '35px',
'lineHeight': '35px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center'
},
# Allow multiple files to be uploaded
multiple=True
)]),
],
),
html.Div(
className="form-row",
children=[
html.Div(
style=dict(
width='50%',
verticalAlign="middle"),
children=[
html.H6("Choose Statistic:")
]
),
dcc.Dropdown(
id='statistic',
className="loader",
options=[{'label': i['label'], 'value': i['value']} for i in
acceptable_statistics],
value=acceptable_statistics[0]['value'], clearable=False,
style=dict(
# width='50%',
verticalAlign="middle"
)
),
],
),
html.Div(
className="form-row",
children=[
html.Div(
style=dict(
width='50%',
verticalAlign="middle"),
children=[
html.H6("Choose Units:")
]
),
dcc.Dropdown(
id='units',
className="loader",
options=[],
value=None, clearable=False,
style=dict(
# width='50%',
verticalAlign="middle"
)
),
],
),
html.Div(
className="form-row",
children=[
html.Div(
style=dict(
width='50%',
verticalAlign="middle"),
children=[
html.H6("Choose Start Date:"),
]
),
dcc.Dropdown(
id='start_year',
className="loader",
options=[],
clearable=False,
style=dict(
# width='50%',
verticalAlign="middle"
)
),
],
),
html.Div(
className="form-row",
children=[
html.Div(
style=dict(
width='50%',
verticalAlign="middle"),
children=[
html.H6("Choose End Date:"),
]
),
dcc.Dropdown(
id='through_year',
className="loader",
options=[], clearable=False,
style=dict(
# width='50%',
verticalAlign="middle"
)
),
],
),
html.Div(
className="form-row",
children=[
html.Div(
style=dict(
width='50%',
verticalAlign="middle"),
children=[
html.H6("Choose View By:"),
]
),
dcc.RadioItems(
id="area_select",
options=[
{'label': 'GCAM Basin', 'value': 'gcam'},
{'label': 'Country', 'value': 'country'}
],
value='gcam',
labelStyle={'display': 'inline-block'}
)
],
),
html.Div(
className="padding-top-bot",
children=[
html.H6("Filter by Months:"),
dcc.Dropdown(
id='months_select',
options=[],
multi=True,
style=dict(
height='90px',
width='100%',
verticalAlign="middle"
)
)
]
),
html.Div(
className="padding-top-bot",
children=[
daq.BooleanSwitch(id="grid_toggle", on=False,
style={'display': 'inline-block'}),
html.Label(" View as Gridded Data", className="grid-label"),
],
),
html.Div(
className="padding-top-bot",
children=[
html.Button('Load Data', id='submit_btn', n_clicks=0),
html.Button('Reset Graph', id='reset_btn', n_clicks=0),
],
),
html.Div(
className="xanthos-div",
children=[
html.A(
[html.Img(src="assets/GitHub-Mark-Light-32px.png", className="xanthos-img"),
"Find Xanthos on GitHub"],
href="https://github.com/JGCRI/xanthos",
target="blank", className="a-xanthos-link")
]
),
],
)
],
),
# Graphs/Output Div
html.Div(
className="eight columns card-left",
children=[
dcc.Tabs(id='tabs', value="info_tab", parent_className='custom-tabs',
className='custom-tabs-container loader', children=[
dcc.Tab(label='Instructions', value='info_tab', className='custom-tab',
selected_className='custom-tab--selected', children=[
html.Div(id='tab1_content', className="bg-white",
style={'height': '100%', 'min-height': '490px', 'padding-top':
'20px', 'padding-left': '15px'}, children=[
html.H6("Loading Data:"),
html.Ol(children=[
html.Li("Use the 'Data Upload' component to upload Xanthos output "
"data"),
html.Li("Choose the statistic and associated units you would like "
"to view"),
html.Li(
"Choose the date range from the available start/end dates ("
"calculated from data upload)"),
html.Li(
"Click the 'Load Data' button (also click again after making "
"any changes to input fields)"),
]),
html.H6("Filtering Data:"),
html.Ul(children=[
html.Li(
"Once data is loaded, to view a subset of basins "
"use the box select or lasso tool to "
"select group of basins and rescale the graph"),
html.Li(
"To view downscaled 0.5 degree resolution cell data, click "
"the 'View as Gridded Data' toggle (note: best used on a subset"
" of data due to processing time constraints)"),
html.Li(
"To reset the graph back to it's initial state, click the "
"'Reset Graph' button")
]),
]),
]),
dcc.Tab(label='Output', value='output_tab', className='custom-tab',
selected_className='custom-tab--selected', children=[
html.Div(
children=[
]),
dcc.Loading(id='choro_loader', children=[
dcc.Graph(
id='choro_graph', figure={
'layout': {
'title': 'Runoff by Basin (Upload data and click "Load Data")'
},
'data': []
}, config=config
)]),
dcc.Loading(id='hydro_loader', children=[
dcc.Graph(
id='hydro_graph', figure={
'layout': {
'title': 'Single Basin Runoff per Year (Click on a basin)'
}
}, config=config
)]
),
]),
]),
],
),
],
),
]
)
# ----- End HTML Components
# ----- Dash Callbacks
# @app.callback(Output('choro_graph', 'extendData'),
# [Input('choro_graph', "relayoutData")],
# [State("grid_toggle", "on"), State("select_store", 'data'),
# State("choro_graph", "figure")],
# )
# def update_markers(relay_data, toggle_value, store_state, fig_info):
# print("in update markers")
# click_value = dash.callback_context.triggered[0]['value']
# click_info = dash.callback_context.triggered[0]['prop_id']
# if click_info == 'choro_graph.relayoutData':
# if type(click_value).__name__ == 'dict' and 'mapbox.zoom' in click_value.keys() and toggle_value is True:
# choro_data = copy.deepcopy(fig_info['data'])
# choro_data[0]['marker']['size'] = click_value['mapbox.zoom'] * 4
# # fig_info['data'][0]['radius'] = math.ceil(click_value['mapbox.zoom'] * 3 + 1) mapbox={'zoom': 0.6}
# return choro_data
# elif click_value != {'autosize': True}:
# print("HERE")
# print(click_value)
# raise PreventUpdate
# Callback to generate and load the choropleth graph when user clicks load data button, reset button, or selects
# content in the graph for filterinxg
@app.callback([Output("tabs", "value"), Output("grid_toggle", "on"),
Output("select_store", 'data'), Output('confirm', 'displayed'), Output("choro_graph", "figure")],
[Input("submit_btn", 'n_clicks'), Input("reset_btn", 'n_clicks'), Input("choro_graph", "selectedData")],
[State("months_select", "value"), State("grid_toggle", "on"), State("upload-data", "contents"),
State("upload-data", "filename"), State("upload-data", "last_modified"), State("start_year", "value"),
State("through_year", "value"), State("statistic", "value"), State("choro_graph", "figure"),
State("through_year", "options"), State("select_store", 'data'), State("data_store", 'data'),
State("area_select", "value"), State("units", "value")],
prevent_initial_call=True)
@cache.memoize(timeout=6000)
def update_choro(load_click, reset_click, selected_data, months, toggle_value, contents, filename,
filedate, start, end, statistic, fig_info, through_options, store_state, data_state, area_type, units):
"""Generate choropleth figure based on input values and type of click event
:param load_click: Click event data for load button
:type load_click: int
:param reset_click: Click event data for reset button
:type reset_click: int
:param selected_data Area select event data for the choropleth graph
:type selected_data dict
:param months List of selected months if available
:type months list
:param toggle_value Value of grid toggle switch
:type toggle_value int
:param contents: Contents of uploaded file
:type contents: str
:param filename: Name of uploaded file
:type filename: list
:param filedate: Date of uploaded file
:type filedate: str
:param start Start year value
:type start str
:param end End year value
:type end str
:param statistic Chosen statistic to run on data
:type statistic str
:param fig_info Current state of figure object
:type fig_info dict
:param through_options Current state of figure object
:type through_options dict
:param store_state Current state of figure object
:type store_state dict
:param data_state Current state of figure object
:type data_state dict
:param area_type Current state of figure object
:type area_type str
:param units Area select event data for the choropleth graph
:type units str
:return: Active tab, grid toggle value, selection data, warning status, Choropleth figure
"""
# Don't process anything unless there's contents in the file upload component
if contents and dash.callback_context.triggered[0]['prop_id'] in ['submit_btn.n_clicks',
'choro_graph.selectedData',
'choro_graph.relayoutData',
'reset_btn.n_clicks']:
# Check for valid years inputs
if start > end:
error_message = html.Div(
className="alert",
children=["Invalid Years: Please choose a start year that is less than end year."],
)
raise PreventUpdate
# Get the values of what triggered the callback here
click_value = dash.callback_context.triggered[0]['value']
click_info = dash.callback_context.triggered[0]['prop_id']
# If the user zooms while viewing by grid cell then dynamically adjust marker size for optimal viewing
# Currently disabled due to performance issues of the relayoutData event triggering too often
if click_info == 'choro_graph.relayoutData':
if type(click_value).__name__ == 'dict' and 'mapbox.zoom' in click_value.keys() and toggle_value is True:
fig_info['data'][0]['marker']['size'] = click_value['mapbox.zoom'] * 4
# fig_info['data'][0]['radius'] = math.ceil(click_value['mapbox.zoom'] * 3 + 1)
return 'output_tab', toggle_value, store_state, False, fig_info
elif click_value != {'autosize': True}:
print("HERE")
raise PreventUpdate
# Get the cached contents of the data file here instead of rereading every time
data = cache.get(data_state)
if data is not None:
df = data[0]
file_info = data[1]
else:
return 'info_tab', False, store_state, True, fig_info
# Process inputs (years, data) and set up variables
year_list = xvu.get_target_years(start, end, through_options)
# Determine if viewing by country or basin to set up data calls
df_per_area = None
if area_type == "gcam":
if toggle_value is False:
df_per_area = xvu.data_per_basin(df, statistic, year_list, df_ref, months, filename, units)
df_per_area['var'] = round(df_per_area['var'], 2)
features = basin_features
else:
if toggle_value is False:
df_per_area = xvu.data_per_country(df, statistic, year_list, df_ref, months, filename, units)
df_per_area['var'] = round(df_per_area['var'], 2)
features = country_features
# If the user clicked the reset button then reset graph selection store data to empty
if click_info == 'reset_btn.n_clicks':
if area_type == "gcam":
df_per_area = xvu.data_per_basin(df, statistic, year_list, df_ref, months, filename, units)
else:
df_per_area = xvu.data_per_country(df, statistic, year_list, df_ref, months, filename, units)
df_per_area['var'] = round(df_per_area['var'], 2)
fig = xvu.plot_choropleth(df_per_area, features, mapbox_token, statistic, start, end, file_info, months,
area_type, units)
store_state = None
return 'output_tab', False, store_state, False, fig
# Generate figure based on type of click data (click, area select, or initial load)
if selected_data is not None and click_info == 'choro_graph.selectedData':
store_state = selected_data
if len(selected_data['points']) == 0:
fig = xvu.plot_choropleth(df_per_area, features, mapbox_token, statistic, start, end, file_info,
months, area_type, units)
else:
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.update_choro_select(df_ref, df_per_area, features, year_list, mapbox_token,
selected_data, start, end, statistic, file_info, months, area_type,
units)
elif click_info == "grid_toggle.on":
if store_state is None:
selected_data = None
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.update_choro_select(df_ref, df_per_area, features, year_list, mapbox_token,
selected_data, start, end, statistic, file_info, months, area_type, units)
else:
if store_state is None:
selected_data = None
if selected_data is not None and len(selected_data['points']) != 0:
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.update_choro_select(df_ref, df_per_area, features, year_list, mapbox_token,
selected_data, start, end, statistic, file_info, months, area_type,
units)
else:
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.plot_choropleth(df_per_area, features, mapbox_token, statistic, start, end,
file_info, months, area_type, units)
return 'output_tab', toggle_value, store_state, False, fig
# If no contents, just return the blank map with instruction
else:
raise PreventUpdate
# Callback to set start year options when file is uploaded and store data in disk cache
@app.callback(
[Output("start_year", "options"), Output("start_year", "value"), Output("upload-data", "children"),
Output("data_store", 'data'), Output("months_select", "options"), Output("units", "options"),
Output("units", "value")],
[Input("upload-data", "contents")], [State('upload-data', 'filename'), State('upload-data', 'last_modified')],
prevent_initial_call=True
)
def update_options(contents, filename, filedate):
"""Set start year options based on uploaded file's data
:param contents: Contents of uploaded file
:type contents: str
:param filename: Name of uploaded file
:type filename: str
:param filedate: Date of uploaded file
:type filedate: str
:return: Options list, initial value, new upload component text
"""
# Check if there is uploaded content
if contents:
# Process contents for available years
target_years, months_list = xvu.process_input_years(contents, filename, filedate)
if months_list is None:
months = []
else:
months = xvu.get_available_months(months_list)
name = filename[0]
new_text = html.Div(["Using file " + name[:25] + '...' if (len(name) > 25) else "Using file " + name])
data = xvu.process_file(contents, filename, filedate, years=None)
xanthos_data = data[0]
# Create id key for data store and use it as reference
file_id = str(uuid.uuid4())
df = xvu.prepare_data(xanthos_data, df_ref)
data_state = file_id
cache.set(file_id, [df, data[1]])
# Evaluate and set unit options
unit_options = xvu.get_unit_options(data[1])
if 'km3' in name:
unit_val = 'km³'
elif 'mm' in name:
unit_val = 'mm'
else:
unit_val = 'm³/s'
return target_years, target_years[0]['value'], new_text, data_state, months, unit_options, unit_val
# Callback to set through year options when start year changes
@app.callback(
[Output('through_year', 'options'), Output('through_year', 'value')],
[Input('start_year', 'value'), Input('start_year', 'options')], [State('through_year', 'value')],
prevent_initial_call=True)
def set_through_year_list(value, options, current_value):
"""Assign through/end year options based on the start year options and value
:param value: Start year's selected value
:type value: int
:param options: Start year's option list
:type options: dataframe
:param current_value: Current value of through_year, if any
:type current_value: int
:return: Through/end year options and initial value
"""
print(value)
if current_value is None:
year_list = xvu.available_through_years(options, options[0]['value'])
new_value = options[len(options) - 1]['value']
else:
year_list = xvu.available_through_years(options, value)
if len([i for i in options if i['value'] == current_value]) >= 1:
new_value = current_value
else:
new_value = options[len(options) - 1]['value']
return year_list, new_value
# Callback to load the hydro graph when user clicks on choropleth graph
@app.callback(
Output('hydro_graph', 'figure'),
[Input('choro_graph', 'clickData'), Input("submit_btn", 'n_clicks')],
[State('start_year', 'value'), State('through_year', 'value'), State("upload-data", "contents"),
State('upload-data', 'filename'), State('upload-data', 'last_modified'), State("through_year", "options"),
State('months_select', 'value'), State('area_select', 'value'), State("hydro_graph", 'figure'),
State("units", "value"), State("data_store", "data")],
prevent_initial_call=True
)
def update_hydro(click_data, n_click, start, end, contents, filename, filedate, year_options, months, area_type,
hydro_state, units, data_state):
"""Generate choropleth figure based on input values and type of click event
:param click_data: Click event data for the choropleth graph
:type click_data: dict
:param n_click Submit button click event
:type n_click object
:param start Start year value
:type start str
:param end End year value
:type end str
:param contents: Contents of uploaded file
:type contents: str
:param filename: Name of uploaded file
:type filename: list
:param filedate: Date of uploaded file
:type filedate: str
:param year_options: List of year range
:type year_options: dict
:param months: List of selected months
:type months: list
:param area_type: Indicates if user is viewing by country or basin
:type area_type: str
:param hydro_state: Current state of hydro figure
:type hydro_state: dict
:param units: Chosen units
:type units: str
:param data_state: File cache data
:type data_state: dict
:return: Choropleth figure
"""
if contents is not None:
# If invalid end date then don't do anything and output message
if start >= end:
return {
'data': [],
'layout': {
'title': 'Please choose an end year that is greater than the start year'
}
}
# If there wasn't a click event on choro graph then do not load new hydro graph
if click_data is None:
return {
'data': [],
'layout': {
'title': 'Single Basin Data per Year (Click on a basin to load)'
}
}
# Get data from cache
data = cache.get(data_state)
if data is not None:
df = data[0]
file_info = data[1]
else:
raise PreventUpdate
# Evaluate chosen area type (basin or country) and set dynamic parameter values
if area_type == "gcam":
area_name = "basin_name"
area_id = "basin_id"
feature_id = "properties.basin_id"
area_loc = "basin_id"
area_title = "Basin"
area_custom_index = 0
else:
area_name = "country_name"
area_id = "country_id"
feature_id = "properties.name"
area_loc = "country_name"
area_title = "Country"
area_custom_index = 1
# Get data from user click
points = click_data['points']
context = dash.callback_context.triggered[0]['prop_id']
# Evaluate current state and only update if user made a different selection
if context != 'choro_graph.clickData' and 'data' in hydro_state.keys() and len(hydro_state['data']) > 0:
hydro_type = hydro_state['data'][0]['customdata'][0][0]
if hydro_type == "basin_id" and area_type == "country":
raise PreventUpdate
elif hydro_type == "country_name" and area_type == "gcam":
raise PreventUpdate
# Evaluate click event to determine if user clicked on an area or a grid cell
if 'cell_id' not in points[0]['customdata'].keys():
location = points[0]['customdata'][area_loc]
location_type = area_title
else:
location = points[0]['customdata']['cell_id']
location_type = 'cell'
# Process years, basin/cell information
years = xvu.get_target_years(start, end, year_options)
if location_type == 'Basin':
hydro_data = xvu.data_per_year_area(df, location, years, months, area_loc, filename, units, df_ref)
return xvu.plot_hydrograph(hydro_data, location, df_ref, 'basin_id', file_info, units)
elif location_type == 'Country':
hydro_data = xvu.data_per_year_area(df, location, years, months, area_loc, filename, units, df_ref)
return xvu.plot_hydrograph(hydro_data, location, df_ref, 'country_name', file_info, units)
elif location_type == 'cell':
hydro_data = xvu.data_per_year_cell(df, location, years, months, area_loc, filename, units, df_ref)
return xvu.plot_hydrograph(hydro_data, location, df_ref, 'grid_id', file_info, units, area_name)
# Return nothing if there's no uploaded contents
else:
data = []
layout = {}
return {
'data': data,
'layout': layout
}
# ----- End Dash Callbacks
# Start Dash Server
if __name__ == '__main__':
app.run_server(debug=False, threaded=True)
```
|
{
"source": "jgcumming/napalm-sros",
"score": 3
}
|
#### File: napalm_sros/utils/parse_output_to_dict.py
```python
import os
import textfsm
def parse_with_textfsm(template, command_output):
"""
:param template: TextFSM template to parse command
:param command_output: Command output from a node
:return: List of dicts. Dict per FSM row.
"""
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), template), "r") as template_file:
fsm = textfsm.TextFSM(template_file)
fsm_results = fsm.ParseText(command_output)
output_list = []
# print fsm.header
for index, line in enumerate(fsm_results, 1):
# print line
textfsm_dict = {}
for number, value in enumerate(line):
textfsm_dict[fsm.header[number]] = value
output_list.append(textfsm_dict)
return output_list
def parse_with_textfsm_by_first_value(template, command_output):
"""
:param template: TextFSM template to parse command
:param command_output: Command output from a node
:return: Dict per first(top) textFSM template value
"""
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), template), "r") as template_file:
fsm = textfsm.TextFSM(template_file)
fsm_results = fsm.ParseText(command_output)
textfsm_dict = {}
# print fsm.header
for line in fsm_results:
# print line
textfsm_dict[line[0]] = {}
for number, value in enumerate(line):
if value != line[0]:
textfsm_dict[line[0]].update({fsm.header[number]: value})
return textfsm_dict
if __name__ == '__main__':
pass
```
#### File: napalm_sros/utils/utils.py
```python
import logging
from datetime import datetime
LOG_FORMAT = '%(asctime)-15s %(filename)s %(funcName)s line %(lineno)d %(levelname)s: %(message)s'
def init_logging():
date = datetime.now()
new_date = date.strftime('%Y-%m-%d %H.%M.%S')
file_path = "./logs_{}.log".format(new_date)
logging.basicConfig(filename=file_path, format=LOG_FORMAT, level=logging.DEBUG)
return logging.getLogger()
```
|
{
"source": "jgd10/4inns",
"score": 3
}
|
#### File: jgd10/4inns/FourInns.py
```python
import numpy as np
checkpoints = ['Start','Skye','HeyMs','Crow',
'Tors','Gate','Snake','Edale',
'Chap','White','Cat','Fin']
class Team:
def __init__(self,name,position):
self.name = name
self.members = []
self.times = []
self.checkpoints = ['Start','Skye','HeyMs','Crow',
'Tors','Gate','Snake','Edale',
'Chap','White','Cat','Fin']
self.rank = position
def add_members(self,names):
if type(names)==list:
self.members.extend(names)
else:
self.members.append(names)
def add_times(self,times):
for i in range(12):
t = times[i]
dmy = t[:3]
if dmy=='Ret' or dmy=='ret':
act_time = -1.
elif times[i] == 'Missed':
act_time = self.times[-1]
elif times[i] == 'DNS':
act_time = -1.
else:
hrs = float(t[:2])
mins = float(t[3:])
act_time = hrs+(mins/60.)
if act_time < 6.0: act_time += 24.
self.times.append(act_time)
self._intervals()
def _intervals(self):
N = len(self.times)
self.intvls = []
for i in range(1,N):
self.intvls.append(self.times[i]-self.times[i-1])
self.cmsm = np.cumsum(self.intvls)
self.total = self.cmsm[-1]
class Year:
def __init__(self,year,teams):
self.year = year
self.teams = []
self.teams.extend(teams)
self._combineTeams()
self.checkpoints = ['Start','Skye','HeyMs','Crow',
'Tors','Gate','Snake','Edale',
'Chap','White','Cat','Fin']
def _combineTeams(self):
self.names = []
self.times = []
self.intvls = []
self.ranks = []
self.totals = []
for t in self.teams:
self.names.append(t.name)
self.times.append(t.times)
self.intvls.append(t.intvls)
self.ranks.append(t.rank)
self.totals.append(t.total)
self.names = np.array(self.names)
self.times = np.array(self.times)
self.intvls = np.array(self.intvls)
self.ranks = np.array(self.ranks)
self.totals = np.array(self.totals)
```
|
{
"source": "jgd10/pySALESetup_legacy",
"score": 2
}
|
#### File: pySALESetup_legacy/debugging/PSDtest.py
```python
import pySALESetup as pss
import scipy.special as scsp
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
plt.rcParams['text.usetex']=True
def normalPDF(x,mu,sg):
pdf = np.exp(-0.5*((x-mu)/sg)**2.)/np.sqrt(2.*np.pi*sg**2.)
return pdf
def normalCDF(x,mu,sg):
cdf = .5*(1.+scsp.erf((x-mu)/np.sqrt(2.*sg**2.)))
return cdf
def PDF(x):
# The PDF
D = 0.00936177062374
E = 0.0875
L = 0.643
F = D*L
#pdf = (1./a)*A*L*C*np.exp(L*x)/(B+C*np.exp(L*x))**2.
pdf = 10.*F*np.exp(-L*x)/(E+np.exp(-L*x))**2.
return pdf
def CDF(x):
# The CDF
#A = 2.908
#B = 0.028
#C = 0.320
#a = 99.4
#D = A*C/a
#E = B/C
D = 0.00936177062374*10.
E = 0.0875
L = 0.643
print D,E
return D/(E+np.exp(-L*x))
def reverse_phi(p):
return (2**(-p))*.5*1.e-3
m = pss.Mesh(X=600,Y=1200,cellsize=2.5e-6)
G = pss.Ensemble(m)
SD = pss.SizeDistribution(func=CDF)
maxphi = -np.log2(2*4*2.5e-3)
minphi = -np.log2(2*2500*2.5e-3)
N = 100
# Generate N phi values and equiv radii (in cells)
phi = np.linspace(minphi,maxphi,N)
#ax.hist(phi,bins=20)
#print len(Counter(phi).keys())
#print len(np.unique(phi))
dp = abs(phi[0]-phi[1])
Area = np.float64(.5*m.Ncells)
#Nparts = 1000.
#for p in phi:
# freq = SD.frequency(p,dp)*Area
# r = reverse_phi(p)/m.cellsize
# #print SD.frequency(p,dp)*Area,np.pi*r**2.
# freq = int(freq/(np.pi*r**2.))
# for f in range(freq):
# g = pss.Grain(r)
# G.add(g,x=0,y=0)
#print G.details()
#print G.PSDdetails()
#G.plotPSD()
#fitted normal dist to regolith data has: mu = 3.5960554191 and sigma = 2.35633102167
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
phi = np.linspace(-4,9,100)
#ax1.plot(phi,PDF(phi)*100.,linestyle='--',label='original sample fit',color='k')
#ax2.plot(phi,CDF(phi)*100.,linestyle='--',color='k')
mu = 3.5960554191
sigma = 2.35633102167
ax1.plot(phi,normalPDF(phi,mu,sigma)*100.,color='grey',label='$\mu$, $\sigma$')
ax2.plot(phi,normalCDF(phi,mu,sigma)*100.,color='grey')
#sigma /= 2.
#ax1.plot(phi,normalPDF(phi,mu,sigma)*100.,color='orange',label='$\mu$, $\sigma/2$')
#ax2.plot(phi,normalCDF(phi,mu,sigma)*100.,color='orange')
#sigma *= 4.
#ax1.plot(phi,normalPDF(phi,mu,sigma)*100.,color='darkred',label='$\mu$, $2\sigma$')
#ax2.plot(phi,normalCDF(phi,mu,sigma)*100.,color='darkred')
#mu += 1.
#sigma = 2.35633102167
#ax1.plot(phi,normalPDF(phi,mu,sigma)*100.,color='b',label='$\mu+1$, $\sigma$')
#ax2.plot(phi,normalCDF(phi,mu,sigma)*100.,color='b')
for ax in [ax1,ax2]:
ax.axvline(x=-0.3,color='k',linestyle='-.',label='Max grain size')
p = -np.log2(8.*2.5e-3)
ax.axvline(x=p,color='.0',linestyle=':',label = '4cppr')
p = -np.log2(6.*2.5e-3)
ax.axvline(x=p,color='.2',linestyle=':',label = '3cppr')
p = -np.log2(4.*2.5e-3)
ax.axvline(x=p,color='.4',linestyle=':',label = '2cppr')
p = -np.log2(2.*2.5e-3)
ax.axvline(x=p,color='.6',linestyle=':',label = '1cppr')
p = -np.log2(1.*2.5e-3)
ax.axvline(x=p,color='orange',linestyle=':',label = '1/2cppr')
ax1.legend(loc='best',fontsize='small')
ax2.set_xlabel(r'$\phi = -log_2(D)$, D is in mm')
ax1.set_ylabel(r'Area.\%')
ax2.set_ylabel(r'Cumulative Area.\%')
plt.show()
#fig.savefig('fourPSDs.pdf')
```
#### File: pySALESetup_legacy/pySALESetup/pssfunctions.py
```python
from __future__ import print_function
from __future__ import absolute_import
import random
import warnings
import numpy as np
from PIL import Image
from math import ceil
try:
import cPickle as pickle
except:
import pickle
import scipy.special as scsp
from scipy import stats as scst
import matplotlib.pyplot as plt
import matplotlib.path as mpath
from collections import Counter, OrderedDict
from mpl_toolkits.axes_grid1 import make_axes_locatable
from . import domainclasses as psd
#from . import grainclasses as psg
#from . import objectclasses as pso
def quickFill(grain,target,volfrac,ensemble,
material=1,method='insertion',xbnds=None,ybnds=None,nooverlap=False,mattargets=None):
"""
Fills a region of mesh with the same grain instance. Most input values are identical to insertRandomly/insertRandomwalk
because of this.
Function does not have compatability with a material number of 0 yet, but it does work with -1. I.e. it can handle
complete overwriting but not selective overwriting.
Args:
grain: Grain instance
target: Mesh instance
volfrac: float
ensemble: Ensemble instance
material: integer is one of -1, 1, 2, 3, 4, 5, 6, 7, 8, 9
method: string
xbnds: list (len == 2) or None
ybnds: list (len == 2) or None
nooverlap: bool
mattargets: list or None
Returns:
Nothing returned
"""
assert method == 'insertion' or method == 'random walk', 'ERROR: quickFill only supports insertion and random walk methods'
assert material != 0, 'ERROR: function does not have advanced pore creation capability yet. This is to be integrated at a later date'
if xbnds is None: xbnds = target._setboundsWholemesh(axis=0)
if ybnds is None: ybnds = target._setboundsWholemesh(axis=1)
# Set current volume fraction
if material == -1 or material == 0:
current_volume_fraction = 1.-target.vfrac(xbounds=xbnds[:],ybounds=ybnds[:])
else:
current_volume_fraction = target.vfrac(xbounds=xbnds[:],ybounds=ybnds[:])
if material == 0:
pass
# advanced pore-creation capability has not yet been integrated into this function
# more will be added at a later date.
# insert grains until target volume fraction achieved
while current_volume_fraction <= volfrac:
# insert randomly into a specified region as material 1 for now
if method == 'insertion':
grain.insertRandomly(target,material,xbounds=xbnds[:],ybounds=ybnds[:],nooverlap=nooverlap)
elif method == 'random walk':
grain.insertRandomwalk(target,material,xbounds=xbnds[:],ybounds=ybnds[:])
# add Grain instance to Ensemble
ensemble.add(grain)
# calculate the new volume fraction in the new region
prev_vfrac=current_volume_fraction
if material == -1 or material == 0:
current_volume_fraction = 1. - target.vfrac(xbounds=xbnds[:],ybounds=ybnds[:])
else:
current_volume_fraction = target.vfrac(xbounds=xbnds[:],ybounds=ybnds[:])
return
def polygon_area(X,Y):
"""
Returns exact area of a polygon
Args:
X: [x coords]
Y: [y coords]
Returns:
A: Scalar float
"""
N = np.size(X)
assert N==np.size(Y), "ERROR: x and y index arrays are unequal in size"
A = 0
for i in range(1,N):
A += (X[i-1]*Y[i]-X[i]*Y[i-1])*.5
return abs(A)
def combine_meshes(mesh2,mesh1,axis=1):
"""
Combines two mesh classes, either horizontally or vertically and creates a new Mesh instance
for the result. Material fractions are carried over, as are velocities, and the 'mesh' param.
Args:
mesh2: Mesh instance
mesh1: Mesh instance
Returns:
New: new Mesh instance
"""
assert mesh1.cellsize == mesh2.cellsize, "ERROR: meshes use different cellsizes {} & {}".format(mesh1.cellsize,mesh2.cellsize)
if axis == 0: assert mesh1.y == mesh2.y, "ERROR: Horizontal merge; meshes must have same y; not {} & {}".format(mesh1.x,mesh2.x)
if axis == 1: assert mesh1.x == mesh2.x, "ERROR: Vertical merge; meshes must have same x; not {} & {}".format(mesh1.y,mesh2.y)
if axis == 0:
Xw = mesh1.x + mesh2.x
Yw = mesh1.y
if axis == 1:
Yw = mesh1.y + mesh2.y
Xw = mesh1.x
# cellsize and mixed not important here because all material already placed and output is independent of cellsize
New = psd.Mesh(X=Xw,Y=Yw,cellsize=mesh1.cellsize,mixed=False,label=mesh2.name+mesh1.name)
New.materials = np.concatenate((mesh1.materials,mesh2.materials),axis=1+axis)
New.mesh = np.concatenate((mesh1.mesh,mesh2.mesh),axis=axis)
New.VX = np.concatenate((mesh1.VX,mesh2.VX),axis=axis)
New.VY = np.concatenate((mesh1.VY,mesh2.VY),axis=axis)
return New
def populateMesh(mesh,ensemble):
"""
Populate a mesh, given an Ensemble.
"""
# use information stored in the ensemble to repopulate domain
# except NOW we can use the optimal materials from optimise_materials!
for x,y,g,m in zip(ensemble.xc,ensemble.yc,ensemble.grains,ensemble.mats):
g.remove()
g.place(x,y,m,mesh)
return mesh
def MeshfromPSSFILE(fname='meso_m.iSALE.gz',cellsize=2.5e-6,NumMats=9):
"""
Generate a Mesh instance from an existing meso output file. NB NumMats
MUST be set explicitly because the function does not have the capbility
to read from file yet.
Args:
fname: string
cellsize: float
NumMats: int
"""
# import all fields from input file, NB coords must be int
# in numpy genfromtxt & loadtxt handle .gz files implicitly
CellInd = np.genfromtxt(fname,skip_header=1,usecols=(0,1)).astype(int)
CellVel = np.genfromtxt(fname,skip_header=1,usecols=(2,3))
# No. mats not necessarily 9, so use string to specify how many cols
matcols = range(4,4+NumMats)
CellMat = np.genfromtxt(fname,skip_header=1,usecols=(matcols))
# Extract mesh size from index cols. Indices start at 0, so want + 1
# for actual size
nx = int(np.amax(CellInd[:,1])+1)
ny = int(np.amax(CellInd[:,0])+1)
# Create the mesh instance & use cellsize as not stored in input file!!
mesh = psd.Mesh(X=nx,Y=ny,cellsize=cellsize)
# initialise a counter (k) and cycle through all coords
k = 0
for j,i in CellInd:
# At each coordinate cycle through all the materials and assign to mesh
for m in range(NumMats):
mesh.materials[m,i,j] = CellMat[k,m]
# additionally assign each velocity as needed
mesh.VX[i,j] = CellVel[k,0]
mesh.VY[i,j] = CellVel[k,1]
# increment counter
k += 1
# return Mesh instance at end
return mesh
def MeshfromBMP(imname,cellsize=2.e-6):
"""
Function that populates a Mesh instance from a bitmap, or similar.
When opened by PIL the result MUST be convertible to a 2D array of
grayscale values (0-255).
Different shades are treated as different materials, however, white is ignored
and treated as 'VOID'.
NB bmp can NOT have colour info or an alpha channel.
Args:
A: 2D array of grayscale integer; black - white values (0 - 255)
cellsize: float; equivalent to GRIDSPC, size of each cell
Returns:
mesh: Mesh
"""
im = Image.open(imname)
B = np.asarray(im)
A = np.copy(B)
A = np.rot90(A,k=3)
nx, ny = np.shape(A)
#white is considered 'VOID' and should not be included
ms = np.unique(A[A!=255])
Nms = np.size(ms)
assert Nms <= 9, "ERROR: More than 9 different shades present (apart from white = void)"
mesh = psd.Mesh(nx,ny,cellsize=cellsize)
m = 0
for c in ms:
mesh.materials[m][A==c] = 1.
m += 1
return mesh
def grainfromVertices(R=None,fname='shape.txt',mixed=False,eqv_rad=10.,rot=0.,radians=True,min_res=4):
"""
This function generates a mesh0 from a text file containing a list of its vertices
in normalised coordinates over a square grid of dimensions 1 x 1. Centre = (0,0)
coordinates must be of the form:
j i
x x
x x
x x
. .
. .
. .
and the last coordinate MUST be identical to the first. Additionally function will take
an array R instead, of the same form.
Args:
mixed: logical; partially filled cells on or off
rot: float; rotation of the grain (radians)
areascale: float; Fraction between 0 and 1, indicates how to scale the grain
min_res: int; Minimum resolution allowed for a grain
Returns:
mesh_: square array with filled cells, with value 1
"""
if radians is not True: rot = rot*np.pi/180.
assert eqv_rad > 0, "ERROR: Equivalent radius must be greater than 0!"
# If no coords provided use filepath
if R is None:
J_ = np.genfromtxt(fname,comments='#',usecols=0,delimiter=',')
I_ = np.genfromtxt(fname,comments='#',usecols=1,delimiter=',')
# else use provided coords
elif type(R) == list:
R = np.array(R)
if type(R) == np.ndarray:
J_ = R[:,0]
I_ = R[:,1]
# if coords not yet normalised; normalise them onto the range -1. to 1.
if np.amax(abs(I_)>1.) or np.amax(abs(J_))>1.:
MAXI = np.amax(I_)
MINI = np.amin(I_)
MAXJ = np.amax(J_)
MINJ = np.amin(J_)
diffI = MAXI - MINI
diffJ = MAXJ - MINJ
# scale coords onto whichever coordinates have the largest difference
if diffI>diffJ:
I_ = 2.*(I_-MINI)/(MAXI-MINI) - 1.
J_ = 2.*(J_-MINI)/(MAXI-MINI) - 1.
else:
I_ = 2.*(I_-MINJ)/(MAXJ-MINJ) - 1.
J_ = 2.*(J_-MINJ)/(MAXJ-MINJ) - 1.
# last point MUST be identical to first; append to end if necessary
if J_[0] != J_[-1]:
J_ = np.append(J_,J_[0])
I_ = np.append(I_,I_[0])
# equivalent radius is known and polygon area is known
# scale shape as appropriate
radius = np.sqrt(polygon_area(I_,J_)/np.pi)
lengthscale = eqv_rad/radius
J_ *= lengthscale
I_ *= lengthscale
# rotate points according by angle rot
theta = rot
ct = np.cos(theta)
st = np.sin(theta)
J = J_*ct - I_*st
I = J_*st + I_*ct
# find max radii from centre and double it for max width
radii = np.sqrt(I**2+J**2)
maxwidth = int(2*np.amax(radii)+2)
maxwidth = max(maxwidth,min_res)
if maxwidth%2!=0: maxwidth+=1
# Add double max rad + 1 for mini mesh dims
mesh_ = np.zeros((maxwidth,maxwidth))
# define ref coord as 0,0 and centre to mesh_ centre
qx = 0.
qy = 0.
y0 = float(maxwidth/2.)
x0 = y0
I += x0
J += y0
path = mpath.Path(np.column_stack((I,J)))
for i in range(maxwidth):
for j in range(maxwidth):
in_shape = path.contains_point([i+.5,j+.5])
if in_shape and mixed == False: mesh_[i,j] = 1.
elif in_shape and mixed == True:
for ii in np.arange(i,i+1,.1):
for jj in np.arange(j,j+1,.1):
in_shape2 = path.contains_point([ii+.05,jj+.05])
if in_shape2: mesh_[i,j] += .01
return mesh_
def grainfromCircle(r_):
"""
This function generates a circle within the base mesh0. It very simply converts
each point to a radial coordinate from the origin (the centre of the shape.
Then assesses if the radius is less than that of the circle in question. If it
is, the cell is filled.
Args:
r_: radius of the circle, origin is assumed to be the centre of the mesh0
Returns:
mesh0: square array of floats
"""
assert r_>0, "ERROR: Radius must be greater than 0!"
N = int(2.*ceil(r_)+2.)
mesh0 = np.zeros((N,N))
x0 = r_ + 1.
y0 = r_ + 1.
for j in range(N):
for i in range(N):
xc = 0.5*(i + (i+1)) - x0
yc = 0.5*(j + (j+1)) - y0
r = (xc/r_)**2. + (yc/r_)**2.
if r<=1:
mesh0[j,i] = 1.0
return mesh0
def grainfromEllipse(r_,a_,e_,radians=True):
"""
This function generates an ellipse in mesh0. It uses a semi-major axis of r_
a rotation of a_ and an eccentricity of e_. It otherwise works on
principles similar to those used in grainfromCircle
Args:
r_ : float; the equivalent radius of a circle with the same area
a_ : float; the angle of rotation (in radians)
e_ : float; the eccentricity of the ellipse
Returns:
mesh0: square array of floats
"""
if radians is not True: a_ = a_*np.pi/180.
assert e_ >= 0 and e_ < 1, "ERROR: eccentricity can not be less than 0 and must be less than 1; {} is not allowed".format(e_)
assert r_>0, "ERROR: Radius must be greater than 0!"
# A is the semi-major radius, B is the semi-minor radius
A = r_/((1.-e_**2.)**.25)
B = A*np.sqrt(1.-e_**2.)
# Make mini mesh bigger than semi-major axis
N = int(2.*ceil(A)+2.)
mesh0 = np.zeros((N,N))
# Centre of ellipse
x0 = A + 1
y0 = A + 1
for j in range(N):
for i in range(N):
xc = 0.5*(i + (i+1)) - x0
yc = 0.5*(j + (j+1)) - y0
xct = xc * np.cos(a_) - yc * np.sin(a_)
yct = xc * np.sin(a_) + yc * np.cos(a_)
r = (xct/A)**2. + (yct/B)**2.
if r<=1:
mesh0[j,i] = 1.
return mesh0
```
|
{
"source": "jgd10/PySALESetup",
"score": 2
}
|
#### File: PySALESetup/PySALESetup/mesh.py
```python
from PySALESetup.objects import PySALEObject, Velocity
from PySALESetup.functions import get_figure_from_ax
from collections import namedtuple
import numpy as np
from dataclasses import dataclass
from shapely.geometry import Point
from typing import Iterable, Optional, Tuple, Union, List, Dict
import matplotlib.pyplot as plt
from pathlib import Path
import warnings
import gzip
from enum import Enum
from mpl_toolkits.axes_grid1 import make_axes_locatable
class Region(Enum):
NORTH = 1
SOUTH = 2
EAST = 3
WEST = 4
ExtensionZoneFactor = namedtuple('ExtensionZoneFactors',
['multiplier', 'max_cell_size'])
class ExtensionZone:
"""iSALE Extension zone object.
Extension zones can be bolted onto the main mesh (aka the
high resolution zone). These can only have one material,
a fixed velocity and a specific depth. Up to four can be
added at a time and the order they appear in the PySALEMesh
object is the order they will be applied.
.. code-block::
North
|
|
West----MESH----East
|
|
South
"""
def __init__(self, depth: int, region: Region, cell_size: float,
factor: ExtensionZoneFactor = None):
self.depth = depth
self.region = region
self.cell_size = cell_size
if factor is None:
factor = ExtensionZoneFactor(1, cell_size)
self.factor = factor
@property
def length(self) -> float:
"""Physical length of the zone.
Returns
-------
length : float
"""
return self.calculate_zone_length()
def calculate_zone_length(self) -> float:
"""Calculate physical length of the zone.
Returns
-------
float
"""
total_length = 0
for i in range(self.depth):
if self.cell_size < self.factor.max_cell_size:
self.cell_size *= self.factor.multiplier
total_length += self.cell_size
return total_length
@dataclass
class Cell:
"""PySALEMesh cell dataclass object.
Contains cell information including the physical centroid of a cell
as a shapely.geometry.Point object; the indices of the cell in the
mesh as integers; the material in the cell as an integer;
the velocity of the cell as a pss Velocity object.
"""
point: Point
i: int
j: int
material: int = None
velocity: Velocity = Velocity(0., 0.)
class PySALEMesh:
"""Mesh object of discrete cells which polygons can be projected on.
Examples
--------
Once creating a polygon (or polygons) to represent your simulation
it must then be projected onto/applied to this mesh object in order
to save the resulting mesh to an iSALE input file.
Here we create a void polygon that is 10 m x 15 m and populate
it to 40% area fraction with circles of radius 1 m. Then we
optimise the material distribution and apply the polygon to a
mesh object. Then we can save the file.
>>> from PySALESetup import PySALEObject, PySALEDomain, PySALEMesh
>>> main = PySALEObject([(0, 0), (0, 15), (10, 15), (10, 0)])
>>> main.set_material(0)
>>> domain = PySALEDomain(main)
>>> circle = PySALEObject.generate_ellipse([0., 0.], 1, 1, 0.)
>>> domain.fill_with_random_grains_to_threshold(circle, 40)
>>> domain.optimise_materials()
>>> mesh = PySALEMesh(100, 150, cell_size=.1)
>>> mesh.project_polygons_onto_mesh([main])
>>> mesh.save()
"""
def __init__(self,
x_cells: int,
y_cells: int,
cell_size: float = 2.e-6,
extension_zones: List[ExtensionZone] = None,
cylindrical_symmetry: bool = False,
collision_index: int = 0,
origin: Tuple[float, float] = (0., 0.)):
"""Discrete rectangular mesh construction.
Parameters
----------
x_cells : int
y_cells : int
cell_size : float
extension_zones : Optional[List[ExtensionZone]]
cylindrical_symmetry : bool
collision_index : int
origin : Tuple[float, float] - The origin for the
coordinate system.
"""
self.x = x_cells
self.y = y_cells
self._origin = origin
self.cell_size = cell_size
self._x_range = None
self._y_range = None
self._cells = None
self._material_meshes = None
self._velocities = None
self._extension_zones = extension_zones
self._extension_factor = None
self._y_physical_length = None
self._x_physical_length = None
self.cylindrical_symmetry = cylindrical_symmetry
self._collision = collision_index
def __str__(self):
ezs = {Region.NORTH: 'N', Region.EAST: 'E',
Region.WEST: 'W', Region.SOUTH: 'S'}
extensions = [ezs[z.region] for z in self.extension_zones]
s = '[' + ','.join(extensions) + ']'
return f'PySALEMesh({self.x}, {self.y}, {self.cell_size}, ' \
f'extension_zones={s}, origin={self.origin})'
@property
def origin(self) -> Tuple[int, int]:
"""The origin coordinate of the mesh.
All dimensions are relative to this coordinate.
Defaults to (0, 0). This coordinate is in the coordinate system
of the mesh before the origin is applied. This has the 0, 0
at the bottom-left corner of the *high-resolution* zone.
Returns
-------
origin : Tuple[int, int]
"""
return self._origin
@classmethod
def from_dimensions(cls, dimensions: Tuple[float, float],
cell_size: float,
extensions: Optional[List[ExtensionZone]] = None,
origin: Tuple[float, float] = (0., 0.)) \
-> 'PySALEMesh':
"""Given high-res zone dimensions and cell size, return PySALEMesh.
Parameters
----------
dimensions : Tuple[float, float] X - Y Dimensions of the high-res
region in metres
cell_size : float Dimension of a high-res cell in the mesh
extensions : List[ExtensionZone] List of all the extension zones
that should be applied
origin : Tuple[float, float] The coordinate to be considered the
origin. This coordinate is in the same coordinate
system as the default, where the origin is the bottom
left of the high-res zone.
Returns
-------
PySALEMesh instance.
"""
x_cells = round(dimensions[0] / cell_size)
y_cells = round(dimensions[1] / cell_size)
mesh = cls(x_cells, y_cells, cell_size,
extension_zones=extensions,
origin=origin)
return mesh
def _find_extension_factor(self):
if self._extension_zones:
assert all([e.cell_size == self.cell_size
for e in self._extension_zones]), \
"All extension zones must have the same cell size!"
self._extension_factor = self._extension_zones[0].factor
else:
self._extension_factor = \
ExtensionZoneFactor(1., self.cell_size)
@property
def x_physical(self):
"""The physical x-length of the mesh.
Returns
-------
length : float
"""
if self._x_physical_length is None:
self._populate_n_range()
return self._x_physical_length
@property
def y_physical(self):
"""The physical y-length of the mesh.
Returns
-------
length : float
"""
if self._y_physical_length is None:
self._populate_n_range()
return self._y_physical_length
@property
def objresh(self) -> int:
"""iSALE input parameter; half the **height** of the mesh.
Despite being the "horizontal" object resolution this refers to
the height of the mesh. This is because there IS an OBJRESV
but you only need to use it if your object does not cover
the full width of the mesh. If no OBJRESV is present, its value
defaults to OBJRESH. When using PySALESetup-created input files
we never want anything less than the full width of the mesh so
it is simpler to leave it out and use OBJRESH instead. In
PySALESetup you can easily create objects of any size or shape
you want!
Notes
-----
If the number of cells is not divisible by 2, this property will
guarantee that the returned value is rounded up, rather than
down.
E.g. a mesh width of 100 would return a value of 50.
A mesh width of 99 would *also* return a value of 50.
98 would return a value of 49, and so on.
Returns
-------
objresh : int
"""
if self.y % 2 == 0:
objresh = int(self.y / 2)
else:
objresh = int((self.y // 2) + 1)
return objresh
@property
def vertical_offset(self) -> int:
"""Half the vertical depth of the mesh, rounded down, in cells.
Returns
-------
offset : int
"""
if self.y % 2 == 0:
offset = int(self.y / 2)
else:
offset = int((self.y-1) / 2)
return offset
@property
def max_cell_size(self) -> float:
"""Return the maximum allowed cell size according to extensions.
No extensions returns a max cell size identical to the
mesh cell size.
Returns
-------
max_cell_size : float
"""
max_cell_size = self.cell_size
if self.extension_zones:
max_cell_size = self.extension_factor.max_cell_size
return max_cell_size
@property
def collision_site(self) -> int:
"""The vertical collision location in the mesh, in cells.
Defaults to 0.
Returns
-------
collision_site : int
"""
return self._collision
@collision_site.setter
def collision_site(self, value: int):
self._collision = value
@property
def x_range(self) -> np.ndarray:
"""Array of the cell x-positions in the mesh.
Returns
-------
x_range : float
"""
if self._x_range is None:
self._populate_n_range()
self._set_origin()
return self._x_range
@property
def y_range(self) -> np.ndarray:
"""Array of the cell y-positions in the mesh.
Returns
-------
x_range : float
"""
if self._y_range is None:
self._populate_n_range()
self._set_origin()
return self._y_range
def _set_origin(self):
self._y_range -= self._origin[1]
self._x_range -= self._origin[0]
return
def get_geometric_centre(self) -> Tuple[float, float]:
"""Return the geometric centre of the mesh in physical coords.
Returns
-------
centre : Tuple[float, float]
"""
x = np.ptp(self.x_range)*.5 + self.x_range[0]
y = np.ptp(self.y_range)*.5 + self.y_range[0]
return x, y
@property
def material_meshes(self) -> Dict[int, np.ndarray]:
"""Dictionary of numpy arrays representing material fill,
indexed by material number.
Returns
-------
meshes : Dict[int, np.ndarray]
"""
if self._material_meshes is None:
self._populate_material_meshes()
return self._material_meshes
@property
def velocities(self) -> Dict[str, np.ndarray]:
"""Velocity arrays in the mesh in a dict indexed by axis.
Returns
-------
velocities : Dict[str, np.ndarray]
"""
if self._velocities is None:
self._populate_velocities()
return self._velocities
def _populate_n_range(self):
# funcs. they are a bit off. I think y-range works now
# but x-range still broke. They should be combined probably
y_length = self.y
x_length = self.x
self._y_physical_length = self.y * self.cell_size
self._x_physical_length = self.x * self.cell_size
zones = {zone.region: zone for zone in self.extension_zones}
highres_xstart = 0
highres_ystart = 0
highres_xend = self.x
highres_yend = self.y
south_range = [-0.5 * self.cell_size]
west_range = [-0.5 * self.cell_size]
if Region.SOUTH in zones:
zone = zones[Region.SOUTH]
y_length += zone.depth
highres_ystart = zone.depth
highres_yend += zone.depth
south_range = self._insert_extension_zone(zone)
if Region.WEST in zones:
zone = zones[Region.WEST]
x_length += zone.depth
highres_xstart = zone.depth
highres_xend += zone.depth
west_range = self._insert_extension_zone(zone)
if Region.NORTH in zones:
zone = zones[Region.NORTH]
y_length += zone.depth
highres_yend = highres_ystart + self.y + 1
north_range = self._insert_extension_zone(zone)
if Region.EAST in zones:
zone = zones[Region.EAST]
x_length += zone.depth
highres_xend = highres_xstart + self.x + 1
east_range = self._insert_extension_zone(zone)
self._y_range = np.zeros((y_length))
self._x_range = np.zeros((x_length))
highres_yend_pos = (self.y+.5) * self.cell_size
highres_xend_pos = (self.x+.5) * self.cell_size
if Region.SOUTH in zones:
self._y_range[:highres_ystart] = south_range
highres_ystart_pos = np.amax(south_range)
highres_yend_pos += highres_ystart_pos
if Region.WEST in zones:
self._x_range[:highres_xstart] = west_range
highres_xstart_pos = np.amax(west_range)
highres_xend_pos += highres_xstart_pos
self._y_range[highres_ystart:highres_yend] = \
self._generate_highres_zone(highres_yend,
highres_ystart,
south_range)
self._x_range[highres_xstart:highres_xend] = \
self._generate_highres_zone(highres_xend,
highres_xstart,
west_range)
if Region.NORTH in zones:
self._y_range[highres_yend-1:] = north_range
if Region.EAST in zones:
self._x_range[highres_xend-1:] = east_range
return self._x_range, self._y_range
def _generate_highres_zone(self,
highres_end,
highres_start,
range_):
highres_zone = [np.amax(range_) + i * self.cell_size
for i in range(1, highres_end-highres_start+1)]
return np.array(highres_zone)
def _populate_velocities(self):
x_cells = self.x_range.size
y_cells = self.y_range.size
self._velocities = {r: np.zeros((x_cells, y_cells))
for r in ['x', 'y']}
def _populate_material_meshes(self):
x_cells = self.x_range.size
y_cells = self.y_range.size
self._material_meshes = {i: np.zeros((x_cells, y_cells))
for i in range(1, 9 + 1)}
@property
def cells(self) -> List[Cell]:
"""List of all Cell objects in the mesh.
The mesh is represented by a collection of Cell objects,
each of which represents a single cell in the mesh. These Cell
objects are namedtuples containing all the information needed
about that cell, including its indices, geometric centre,
velocity, and material.
Returns
-------
cells : List[Cell]
"""
if self._cells is None:
self._populate_cells()
return self._cells
def _populate_cells(self):
self._cells = [Cell(Point(x, y), i, j, None, Velocity(0., 0.))
for i, x in enumerate(self.x_range)
for j, y in enumerate(self.y_range)]
def project_polygons_onto_mesh(self,
polygons: List[PySALEObject]) -> None:
"""Project a polygon (and all its children) onto the mesh.
Method calls itself recursively on all children of the polygon.
The children at the bottom of the hierachy get priority. Once
a cell is populated with material, new material will NOT
overwrite it.
Parameters
----------
polygons : List[PySALEObject]
Returns
-------
None
Examples
--------
Here we create a solid circle that is 5 m x 5 m and populate
it to 40% area fraction with circles of radius 0.5 m. Then we
optimise the material distribution and apply the polygon to a
mesh object. Then we plot the result.
>>> from PySALESetup import PySALEObject, PySALEDomain, PySALEMesh
>>> import matplotlib.pyplot as plt
>>> main = PySALEObject.generate_ellipse([5., 5.], 5., 5., 0.)
>>> main.set_material(1)
>>> domain = PySALEDomain(main)
>>> circle = PySALEObject.generate_ellipse([0., 0.], .5, .5, 0.)
>>> domain.fill_with_random_grains_to_threshold(circle, 40)
>>> domain.optimise_materials([2, 3, 4, 5])
>>> mesh = PySALEMesh(100, 100, cell_size=.1)
>>> mesh.project_polygons_onto_mesh([main])
>>> mesh.plot_materials()
>>> plt.show()
"""
for i, cell in enumerate(self.cells):
if cell.material is None:
self._project_polygons_onto_cell(cell, polygons)
def _project_polygons_onto_cell(self, cell: Cell, polygons):
for polygon in polygons:
if cell.point.within(polygon):
if polygon.children:
self._project_polygons_onto_cell(
cell,
polygon.children
)
if cell.material is None:
self._fill_cell(cell, polygon)
break
def _fill_cell(self, cell: Cell, geometry: PySALEObject):
"""Fill a mesh cell with the properties of a given polygon.
Parameters
----------
cell: Cell
geometry : PySALEObject
Returns
-------
None
"""
if geometry.material == 0:
self._void_cell(cell)
else:
self.material_meshes[geometry.material][cell.i, cell.j] = 1.
self.velocities['x'][cell.i, cell.j] = geometry.velocity.x
self.velocities['y'][cell.i, cell.j] = geometry.velocity.y
cell.material = geometry.material
cell.velocity = geometry.velocity
def _void_cell(self, cell: Cell):
"""Fill a mesh cell with void.
Parameters
----------
cell : Cell
Returns
-------
None
"""
for number in self.material_meshes.keys():
self.material_meshes[number][cell.i, cell.j] = 0.
self.velocities['x'][cell.i, cell.j] = 0.
self.velocities['y'][cell.i, cell.j] = 0.
cell.material = 0
cell.velocity = Velocity(0., 0.)
def plot_cells(self, ax: plt.Axes = None):
"""Plot the cell centres of the mesh.
Parameters
----------
ax : plt.Axes
Returns
-------
fig, ax : Tuple[plt.Axes, plt.figure]
"""
ax, fig = get_figure_from_ax(ax)
xi, yi = np.meshgrid(self.x_range, self.y_range)
ax.scatter(xi, yi, marker='.', color='k')
self._set_plot_lims_and_labels(ax)
ax.set_title('Cell centres')
return fig, ax
def plot_materials(self, ax: plt.Axes = None,
cmap: str = 'rainbow') -> Tuple[plt.Figure,
plt.Axes]:
"""Plot the materials in the mesh using matplotlib.
If no axes are provided, axes and a figure are made. Otherwise,
the given axes are used and returned along with the associated
figure object.
Parameters
----------
ax : plt.Axes
cmap: str
Returns
-------
fig, ax : Tuple[plt.Axes, plt.figure]
Examples
--------
Here we construct a simple 2D meteorite impacting flat ground.
Once our objects have been created and applied, we use
plot_materials to view the mesh, although we need to use
`plt.show()` to visualise the object you could just as easily
save the figure instead.
>>> from PySALESetup import PySALEObject
>>> from PySALESetup import PySALEMesh
>>> import matplotlib.pyplot as plt
>>> impactor = PySALEObject.generate_ellipse([5., 8.], 2., 2., 0.)
>>> impactor.set_material(1)
>>> impactor.set_velocity(0. -1000.)
>>> target = PySALEObject([(0, 0), (0, 6), (10, 6), (10, 0)])
>>> target.set_material(3)
>>> mesh = PySALEMesh(100, 100, cell_size=.1)
>>> mesh.project_polygons_onto_mesh([impactor, target])
>>> mesh.plot_materials()
>>> plt.show()
"""
ax, fig = get_figure_from_ax(ax)
xi, yi = np.meshgrid(self.x_range, self.y_range)
for i in range(1, 9+1):
matter = np.copy(self.material_meshes[i])*i
matter = np.ma.masked_where(matter == 0., matter)
p = ax.pcolormesh(xi,
yi,
matter.T,
cmap=cmap,
vmin=1,
vmax=9,
shading='auto')
self._set_plot_lims_and_labels(ax)
self._add_colorbar(ax, p, 'Material No.')
ax.set_title('Materials')
return fig, ax
@staticmethod
def _add_colorbar(ax, graph_object, label):
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
ax, fig = get_figure_from_ax(ax)
cb = fig.colorbar(graph_object, cax=cax)
cb.set_label(label)
return cb
def _set_plot_lims_and_labels(self, ax):
ax.set_xlim(np.amin(self.x_range), np.amax(self.x_range))
ax.set_ylim(np.amin(self.y_range), np.amax(self.y_range))
ax.set_xlabel('$x$ [m]')
ax.set_ylabel('$y$ [m]')
def plot_velocities(self,
ax1: Optional[plt.Axes] = None,
ax2: Optional[plt.Axes] = None,
cmap: str = 'viridis') -> Tuple[plt.Figure,
plt.Figure,
plt.Axes,
plt.Axes]:
"""Plot the velocities of cells.
If axes are provided they are used. If any are not provided,
they are created. Either way the axes and figures are returned.
Parameters
----------
ax1 : Optional[plt.Axes]
ax2 : Optional[plt.Axes]
cmap : str
Returns
-------
fig1, fig2, ax1, ax2 : Tuple[plt.Figure, plt.Figure,
plt.Axes, plt.Axes]
Examples
--------
>>> from PySALESetup import PySALEObject
>>> from PySALESetup.mesh import PySALEMesh
>>> import matplotlib.pyplot as plt
>>> impactor = PySALEObject.generate_ellipse([5., 8.], 2., 2., 0.)
>>> impactor.set_material(1)
>>> impactor.set_velocity(0. -1000.)
>>> target = PySALEObject([(0, 0), (0, 6), (10, 6), (10, 0)])
>>> target.set_material(3)
>>> mesh = PySALEMesh(100, 100, cell_size=.1)
>>> mesh.project_polygons_onto_mesh([impactor, target])
>>> mesh.plot_materials()
>>> plt.show()
"""
ax1, fig1 = get_figure_from_ax(ax1)
ax2, fig2 = get_figure_from_ax(ax2)
xi, yi = np.meshgrid(self.x_range, self.y_range)
matter_sum = self.material_meshes[1]
for i in range(2, 9+1):
matter_sum += self.material_meshes[i]
vx = np.ma.masked_where(matter_sum == 0., self.velocities['x'])
vy = np.ma.masked_where(matter_sum == 0., self.velocities['y'])
for ax, v in [(ax1, vx), (ax2, vy)]:
p = ax.pcolormesh(xi, yi, v.T, cmap=cmap,
vmin=np.amin(v),
vmax=np.amax(v),
shading='auto')
self._set_plot_lims_and_labels(ax)
self._add_colorbar(ax, p, 'Velocity [m/s]')
ax1.set_title('Velocity - x')
ax2.set_title('Velocity - y')
return fig1, fig2, ax1, ax2
@staticmethod
def _cell_to_row(cell: Cell, materials: Iterable[int]) -> str:
material_columns = ['1.000' if cell.material == m
else '0.000' for m in materials]
row = f'{cell.i} {cell.j} ' \
+ ' '.join(material_columns) \
+ f' {cell.velocity.x:.2f} {cell.velocity.y:.2f}\n'
return row
@property
def material_numbers(self) -> List[int]:
"""List of non-zero materials in the mesh.
Returns
-------
numbers : List[int]
"""
return [key for key, value in self.material_meshes.items()
if np.sum(value) > 0.]
def save(self, file_name: Path = Path('./meso_m.iSALE'),
compress: bool = False) -> None:
"""Save the current mesh to a meso_m.iSALE file.
This compiles the integer indices of each cell,
as well as the material in them. It saves all this to the file
specified by the user, which defaults to ``meso_m.iSALE`` in
the user's current directory.
Parameters
----------
file_name : Path
compress : bool
Compress the resulting text file using gunzip.
Users are expected to add their own .gz file extension.
If one is not present a UserWarning is raised.
Returns
-------
None
"""
cell_number = self.x * self.y
if compress:
if file_name.suffix != '.gz':
raise NameError(f'Mesh is being compressed but file '
f'name "{file_name}" does not have a '
f'.gz extension.')
with gzip.open(file_name, 'wt') as f:
self._write_mesh_to_file(cell_number,
f,
self.material_numbers)
else:
with open(file_name, 'w') as f:
self._write_mesh_to_file(cell_number,
f,
self.material_numbers)
def _write_mesh_to_file(self,
cell_number: int,
file_object,
material_numbers: Union[List[int],
Tuple[int]]) \
-> None:
first_row = f'{cell_number}, {len(material_numbers)}\n'
file_object.write(first_row)
file_object.writelines([self._cell_to_row(cell,
material_numbers)
for cell in self.cells])
def _insert_extension_zone(self, zone: ExtensionZone):
factor = self.extension_factor.multiplier
max_size = self.extension_factor.max_cell_size
varying_cell_size = self.cell_size
if zone.region in [Region.NORTH, Region.EAST]:
half = 'north/east'
if zone.region == Region.NORTH:
position = self._y_physical_length + self.cell_size*.5
else:
position = self._x_physical_length + self.cell_size*.5
else:
half = 'south/west'
position = -0.5*self.cell_size
varying_cell_size, coord = self._create_extension_zone_coordinates(
factor, max_size, [position], varying_cell_size, zone,
half)
if half == 'north/east':
range_ = np.array(coord)
if zone.region == Region.NORTH:
self._y_physical_length += np.ptp(range_)
else:
self._x_physical_length += np.ptp(range_)
return range_
else:
range_ = np.array(coord)
return range_[::-1]
def _insert_north_zone(self, zone: ExtensionZone):
factor = self.extension_factor.multiplier
max_size = self.extension_factor.max_cell_size
varying_cell_size = self.cell_size
position = self._y_physical_length + self.cell_size*.5
varying_cell_size, y_coord = self._create_extension_zone_coordinates(
factor, max_size, [position], varying_cell_size, zone,
'north/east')
north_y_range = np.array(y_coord)
self._y_physical_length += np.ptp(north_y_range)
return north_y_range
def _insert_east_zone(self, zone: ExtensionZone):
factor = self.extension_factor.multiplier
max_size = self.extension_factor.max_cell_size
varying_cell_size = self.cell_size
position = self._x_physical_length + self.cell_size*.5
varying_cell_size, x_coord = self._create_extension_zone_coordinates(
factor, max_size, [position], varying_cell_size, zone,
'north/east')
east_x_range = np.array(x_coord)
self._x_physical_length += np.ptp(east_x_range)
return east_x_range
def _insert_west_zone(self, zone: ExtensionZone):
factor = self.extension_factor.multiplier
max_size = self.extension_factor.max_cell_size
varying_cell_size = self.cell_size
position = -0.5*self.cell_size
varying_cell_size, x_coord = self._create_extension_zone_coordinates(
factor, max_size, [position], varying_cell_size, zone,
'south/west')
west_x_range = np.array(x_coord)
return west_x_range[::-1]
def _insert_south_zone(self, zone: ExtensionZone):
factor = self.extension_factor.multiplier
max_size = self.extension_factor.max_cell_size
varying_cell_size = self.cell_size
position = -0.5*self.cell_size
varying_cell_size, y_coord = self._create_extension_zone_coordinates(
factor, max_size, [position], varying_cell_size, zone,
'south/west')
south_y_range = np.array(y_coord)
# south_y_range += abs(np.amin(south_y_range)) + varying_cell_size
# self._y_physical_length += np.amax(south_y_range)
return south_y_range[::-1]
def _create_extension_zone_coordinates(self, factor: float,
max_size: float,
coord: List[float],
varying_cell_size: float,
zone: ExtensionZone,
half: str):
counter = 1
position = coord[0]
while counter < zone.depth:
if varying_cell_size < max_size:
varying_cell_size = counter * factor * self.cell_size
else:
varying_cell_size = max_size
if half.lower() == 'south/west':
position -= varying_cell_size
elif half.lower() == 'north/east':
position += varying_cell_size
coord.append(position)
counter += 1
return varying_cell_size, coord
@property
def extension_zones(self) -> List[ExtensionZone]:
"""The extension zones applied to the mesh.
Returns
-------
zones : List[ExtensionZone]
"""
if self._extension_zones is not None:
self._find_extension_factor()
return self._extension_zones
else:
return []
@property
def extension_factor(self) -> ExtensionZoneFactor:
"""The ExtensionZoneFactor associated with this mesh.
There can only be one extension factor associated with a mesh.
When this property is called it also checks that the given
extension zones don't have clashing properties.
Returns
-------
factor : ExtensionZoneFactor
"""
if self._extension_factor is None:
self._find_extension_factor()
return self._extension_factor
```
#### File: PySALESetup/tests/test_constants.py
```python
from PySALESetup.constants import PACKAGE_ROOT_DIRECTORY, \
ASTEROID_TEMPLATE_PATH, \
ADDITIONAL_TEMPLATE_PATH, \
GRAIN_LIBRARY_PATH
import pathlib
import pytest
class TestPaths:
@pytest.mark.parametrize('directory', [PACKAGE_ROOT_DIRECTORY,
ASTEROID_TEMPLATE_PATH,
ADDITIONAL_TEMPLATE_PATH,
GRAIN_LIBRARY_PATH])
def test_is_path(self, directory):
assert isinstance(directory, pathlib.Path)
@pytest.mark.parametrize('directory', [PACKAGE_ROOT_DIRECTORY,
GRAIN_LIBRARY_PATH])
def test_is_directory(self, directory):
assert directory.is_dir()
@pytest.mark.parametrize('file', [ASTEROID_TEMPLATE_PATH,
ADDITIONAL_TEMPLATE_PATH])
def test_is_directory(self, file):
assert file.is_file()
```
#### File: PySALESetup/tests/test_creation.py
```python
from PySALESetup import PySALEDomain, PySALEObject, \
PySALEDistributionBase, PySALEUniformDistribution, \
PySALENormalDistribution, PySALEWeibull2Distribution, \
PySALELogNormalDistribution, PySALECustomDistribution
import pytest
from math import isclose
class TestPySALEDomain:
def test_move_object_to_random_coords(self, simple_object):
domain = PySALEDomain(simple_object)
simple_object = \
domain._move_object_to_random_coordinate_in_domain(
simple_object, 1., 1., 0., 0.
)
assert 0. < simple_object.centroid.x < 1.
assert 0. < simple_object.centroid.y < 1.
def test_fill_to_threshold(self, simple_object):
domain = PySALEDomain(simple_object)
grain = PySALEObject([(0, 0), (0.5, 0.5), (1, 0)])
domain.fill_with_random_grains_to_threshold(grain, 20)
frac = sum([c.area
for c in simple_object.children])/simple_object.area
tolerance = (grain.area/simple_object.area)*100
assert isclose(frac*100., 20., abs_tol=tolerance)
@pytest.mark.parametrize('threshold_', [10., 30., 60., 90.])
def test_threshold_check(self, simple_object, threshold_):
domain = PySALEDomain(simple_object)
inserted_area, insertion_possible, threshold = \
domain._check_threshold_input(threshold_)
assert inserted_area == 0.
assert insertion_possible
assert threshold == threshold_
@pytest.mark.parametrize('threshold_', [30., 60., 90.])
def test_threshold_check_already_populated(self, simple_object,
threshold_):
domain = PySALEDomain(simple_object)
simple_object.spawn_polygon_in_shape([(0, 0), (0, 5), (5, 5), (5, 0)])
inserted_area, insertion_possible, threshold = \
domain._check_threshold_input(threshold_)
assert inserted_area == 25.
assert insertion_possible
assert threshold == threshold_
@pytest.mark.parametrize('threshold_', [0., 10.])
def test_object_already_over_threshold(self, simple_object,
threshold_):
domain = PySALEDomain(simple_object)
simple_object.spawn_polygon_in_shape([(0, 0), (0, 5), (5, 5), (5, 0)])
with pytest.raises(AssertionError):
_, _, _ = domain._check_threshold_input(threshold_)
@pytest.mark.parametrize('max_attempts', [1, 10, 100, 1000, 10000])
def test_insert_randomly_maxes_out(self, simple_object, max_attempts):
domain = PySALEDomain(simple_object)
grain = PySALEObject([(0, 0), (11, 11), (12, 0)])
with pytest.warns(UserWarning):
domain.insert_randomly(grain, max_attempts=max_attempts)
@pytest.mark.parametrize('max_attempts', [-1, 0, 6.5, '7'])
def test_insert_randomly_invalid_max_attempts(self, simple_object,
max_attempts):
domain = PySALEDomain(simple_object)
grain = PySALEObject([(0, 0), (11, 11), (12, 0)])
with pytest.raises(AssertionError):
domain.insert_randomly(grain, max_attempts=max_attempts)
def test_optimise_materials(self, simple_object):
domain = PySALEDomain(simple_object)
domain.fill_with_random_grains_to_threshold(
PySALEObject.generate_ellipse([0, 0], .5, .5, 0, 1),
50.
)
domain.optimise_materials()
materials = {grain.material for grain in domain.object.children}
assert len(materials) == 9
class TestRandomlyRotateObjects:
def test_object_unchanged(self, circle):
domain = PySALEDomain(circle)
new = domain.randomly_rotate_object(circle)
assert new.bounds == circle.bounds
def test_rectangle_rotates(self, rectangle):
dist = PySALENormalDistribution(45., 5.)
domain = PySALEDomain(rectangle)
new = domain.randomly_rotate_object(rectangle, dist)
assert new.bounds != rectangle.bounds
class TestRandomlyResizeObjects:
@pytest.mark.parametrize('area', [True, False])
def test_object_unchanged(self, simple_object, area):
domain = PySALEDomain(simple_object)
new = domain.randomly_resize_object(simple_object, area=area)
assert new.area == simple_object.area
def test_with_normal_dist(self, object_with_normal_distributions):
object_, radii, areas, angles = object_with_normal_distributions
self.resize_object_based_on_dist(object_, radii)
def test_with_uniform_dist(self, object_with_uniform_distributions):
object_, radii, areas, angles = object_with_uniform_distributions
self.resize_object_based_on_dist(object_, radii)
def test_with_lognormal_dist(self, object_with_lognormal_distributions):
object_, radii, areas, angles = object_with_lognormal_distributions
self.resize_object_based_on_dist(object_, radii)
def test_with_weibull_dist(self, object_with_weibull_distributions):
object_, radii, areas, angles = object_with_weibull_distributions
self.resize_object_based_on_dist(object_, radii)
def resize_object_based_on_dist(self, object_, radii):
domain = PySALEDomain(object_)
result = domain.randomly_resize_object(object_,
size_distribution=radii,
area=False)
self.assert_shapes_different_coords(object_, result)
@staticmethod
def assert_shapes_different_coords(object_, result):
old_coords = object_.exterior.coords.xy
new_coords = result.exterior.coords.xy
for old, new in zip(old_coords, new_coords):
assert old != new
class TestDistributionBase:
def test_cdf_defined_but_not_implemented(self):
with pytest.raises(NotImplementedError):
pdb = PySALEDistributionBase()
pdb.cdf(1.)
def test_random_number_defined_but_not_implemented(self):
with pytest.raises(NotImplementedError):
pdb = PySALEDistributionBase()
pdb.random_number()
def test_details(self):
pdb = PySALEDistributionBase()
with pytest.raises(TypeError):
pdb.details()
def test_frequency(self):
pdb = PySALEDistributionBase()
with pytest.raises(NotImplementedError):
pdb.frequency(1., (0., 1.))
class TestAllDistributionProperties:
@pytest.mark.parametrize('distribution', [PySALEUniformDistribution((0., 1.)),
PySALENormalDistribution(0.5, 0.5),
PySALEWeibull2Distribution(1., 1.),
PySALELogNormalDistribution(0.5, 0.5),
PySALECustomDistribution(lambda x: 1., lambda: 1.)])
def test_name(self, distribution):
assert distribution.name is not None
@pytest.mark.parametrize('distribution', [PySALEUniformDistribution((0., 1.)),
PySALENormalDistribution(0.5, 0.5),
PySALEWeibull2Distribution(1., 1.),
PySALELogNormalDistribution(0.5, 0.5)])
def test_skew(self, distribution):
assert distribution.skew is not None
@pytest.mark.parametrize('distribution', [PySALEUniformDistribution((0., 1.)),
PySALENormalDistribution(0.5, 0.5),
PySALEWeibull2Distribution(1., 1.),
PySALELogNormalDistribution(0.5, 0.5)])
def test_mean(self, distribution):
assert distribution.mean is not None
@pytest.mark.parametrize('distribution', [PySALEUniformDistribution((0., 1.)),
PySALENormalDistribution(0.5, 0.5),
PySALEWeibull2Distribution(1., 1.),
PySALELogNormalDistribution(0.5, 0.5)])
def test_median(self, distribution):
assert distribution.median is not None
@pytest.mark.parametrize('distribution', [PySALEUniformDistribution((0., 1.)),
PySALENormalDistribution(0.5, 0.5),
PySALEWeibull2Distribution(1., 1.),
PySALELogNormalDistribution(0.5, 0.5)])
def test_variance(self, distribution):
assert distribution.variance is not None
@pytest.mark.parametrize('distribution', [PySALEUniformDistribution((0., 1.)),
PySALENormalDistribution(0.5, 0.5),
PySALEWeibull2Distribution(1., 1.),
PySALELogNormalDistribution(0.5, 0.5),
PySALECustomDistribution(lambda x: 1., lambda: 1.)])
def test_cdf(self, distribution):
v = distribution.cdf(0.5)
assert isinstance(v, float)
@pytest.mark.parametrize('distribution', [PySALEUniformDistribution((0., 1.)),
PySALENormalDistribution(0.5, 0.5),
PySALEWeibull2Distribution(1., 1.),
PySALELogNormalDistribution(0.5, 0.5),
PySALECustomDistribution(lambda x: 1., lambda: 1.)])
def test_random_number(self, distribution):
v = distribution.random_number()
assert isinstance(v, float)
class TestCustomDistributionProperties:
@pytest.mark.parametrize('prop', ['mean',
'median',
'skew',
'variance'])
def test_properties(self, prop):
custom = PySALECustomDistribution(lambda x: 1., lambda: 1.)
value = getattr(custom, prop)
assert value is None
```
|
{
"source": "jgd10/RegolithSetupRoutines",
"score": 3
}
|
#### File: jgd10/RegolithSetupRoutines/regolith_exp_vs_imp_DIAGRAM.py
```python
import numpy as np
import pySALESetup as pss
import matplotlib.pyplot as plt
from math import ceil
import copy
import random
random.seed(42)
plt.rcParams['text.usetex']=True
# Cumulative Distribution Function
def CDF(x):
# The CDF
A = 2.908
B = 0.028
C = 0.320
L = 0.643
a = 99.4
return (1./a)*A/(B+C*np.exp(-L*x))
# Population Distribution Function
def PDF(x):
# The PDF
A = 2.908
B = 0.028
C = 0.320
L = 0.643
a = 99.4
pdf = (1./a)*A*L*C*np.exp(L*x)/(B+C*np.exp(L*x))**2.
return pdf
# legacy function, this is now integrated into pySALESetup
def lunar_pdf(x,LB_tol,UB_tol):
# Integrate PDF at a value of x
P = abs(CDF(x+UB_tol) - CDF(x-LB_tol))
return P
# Convert to krumbein phi
def PHI_(x):
"""
x must be in SI (metres) for this function to work
PHI is only calculated correctly when the arg is mm
"""
return -1.*np.log2(x*1000.)
# reverse of krumbein phi
def DD_(x):
return 2.**(-x)
# reverse of krumbein phi
# duplicate of above; except converts to metres
# and returns radius, not diameter
def reverse_phi(p):
return (2**(-p))*.5*1.e-3
# Top and bottom mesh created separately
# Create four meshes
meshA = pss.Mesh(X=500,Y=500,cellsize=2.5e-6)
meshA.label='A'
# target volume (area) fraction
vfrac = 0.5
# Store grain objects in list, 'grains'
grainsA = []
grainsB = []
# Minimum krubeim phi = min resolution (4 cppr)
# Max ... '' '' '' '' = max resolution (200 cppr)
# Max res is one which still fits in the domain
minres = 10
maxphi = -np.log2(10*2.5e-3)
minphi = -np.log2(90*2.5e-3)
NA = 3
NB = 3
# Generate N phi values and equiv radii (in cells)
phiA = np.linspace(minphi,maxphi,NA)
RsA = reverse_phi(phiA)/meshA.cellsize
cmap = plt.cm.copper
cols = [0.3,0.5,0.7]
# interval over which to calculate number from pdf
# No. = |CDF(x+h) - CDF(x-h)| * no. of areas
hA = abs(phiA[1]-phiA[0])
# baseline mu and sigma fitted to a normal distribution
mu = 3.5960554191
sigma = 2.35633102167
# standard distro
SD = pss.SizeDistribution(func='normal',mu=mu,sigma=sigma)
# target area that ALL particles should take up at end
target_area = float(meshA.Ncells*vfrac)
# Generate 4 libraries, one for each domain
# of grains, and record the expected frequency
# of each.
diff = 0
freqs = []
for r,p in zip(RsA,phiA):
freq1 = SD.frequency(p,hA)*target_area
freq2 = freq1/(np.pi*r**2.)
freq = int(freq2)
diff += (freq2-freq)*np.pi*r**2.
freqs.append(freq)
#print diff*np.pi*r**2.
ctr = 0
for r in RsA:
test = diff/(np.pi*r**2.)
if (1.-test)<=0.2:
Rextra = r
break
ctr += 1
# library of grains has been generated, now place them into the mesh!
groupA = pss.Ensemble(meshA,name='normaldistA')
Rs = copy.deepcopy(RsA)
Fr = copy.deepcopy(freqs)
# place them in, but don't worry if not possible to fit all.
# allow for keyboard interrupt if there's a problem.
fig = plt.figure(figsize=(8,4))
fig1 = plt.figure(figsize=(6,4))
ax1a = fig1.add_subplot(131,aspect='equal')
ax2a = fig1.add_subplot(132,aspect='equal')
ax3a = fig1.add_subplot(133,aspect='equal')
ax1a.axis('off')
ax2a.axis('off')
ax3a.axis('off')
ax11 = fig.add_subplot(241)
ax22 = fig.add_subplot(242)
ax33 = fig.add_subplot(243)
ax44 = fig.add_subplot(244)
sizes1 = [3,6,12,0]
sizes2 = [0,3,6,12]
colors1 = [0.7,.5,.3,.0]
colors2 = [0.,.7,.5,.3]
for ax,c1,sz1,c2,sz2 in zip([ax11,ax22,ax33,ax44],colors1,sizes1,colors2,sizes2):
ax.axis('off')
ax.text(0.4,0.3,'$DE_{min}$: \n$DI_{max}$: ',va='center',ha='center',fontsize=18)
if ax != ax44: ax.plot([0.83],[0.42],marker='o',color=cmap(c1),ms=sz1)
if ax != ax11: ax.plot([0.83],[0.22],marker='o',color=cmap(c2),ms=sz2)
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax1 = fig.add_subplot(245,aspect='equal')
ax2 = fig.add_subplot(246,aspect='equal')
ax3 = fig.add_subplot(247,aspect='equal')
ax4 = fig.add_subplot(248,aspect='equal')
group = []
for ax in [ax1,ax2,ax3,ax4]:
ctr = 0
m = 0
for r,freq in zip(Rs,Fr):
if r == Rextra: freq+=1
m += 1
for f in range(freq):
if ax==ax1:
g = pss.Grain(r)
g.insertRandomly(meshA, m=m)
group.append(g)
else:
g = group[ctr]
g.place(g.x,g.y,m,meshA)
ctr += 1
if ax != ax1: meshA.fillAll(m+1)
for KK,col in zip(range(meshA.NoMats),cols):
matter = np.copy(meshA.materials[KK,:,:])*col
if KK!=2:
ax1a.contour(meshA.xi,meshA.yi,matter,1,colors='k',linewidths=1)
ax3a.contour(meshA.xi,meshA.yi,matter,1,colors='k',linewidths=1)
else:
pass
matter = np.ma.masked_where(matter==0.,matter)
if KK == 2 and ax==ax1:
ax1a.pcolormesh(meshA.xi,meshA.yi,matter, cmap=cmap,vmin=0,vmax=1)
if KK == 2 and ax==ax2:
ax3a.pcolormesh(meshA.xi,meshA.yi,matter, cmap=cmap,vmin=0,vmax=1)
ax.pcolormesh(meshA.xi,meshA.yi,matter, cmap=cmap,vmin=0,vmax=1)
ax.axis('off')
Rs = list(Rs[:-1])
Fr = list(Fr[:-1])
meshA.fillAll(-1)
ax1.set_title('Explicitly\nResolved')
ax2.set_title('Semi-Explicitly\nResolved')
ax3.set_title('Semi-Explicitly\nResolved')
ax4.set_title('Implicitly\nResolved')
bbox_props = dict(boxstyle="darrow", fc='w', ec="k", lw=2)
ax1.text(0.52, 0.8, "Explicitly Resolved | Implicitly Resolved", ha="center", va="center",
size=15,bbox=bbox_props,transform=fig.transFigure)
bbox_props = dict(boxstyle="rarrow", fc='w', ec="k", lw=2)
ax2a.text(0.5, 0.5, "Parameterisation\nassumes\nuniform density", ha="center", va="center",
size=12,bbox=bbox_props)
#fig.tight_layout()
fig.savefig('explicit_implicit_demonstration.png',dpi=300,transparent=True)
fig1.savefig('matrixunifromity_assum_demonstration.png',dpi=300,transparent=True)
plt.show()
# view final meshes
#meshA.viewMats()
```
#### File: jgd10/RegolithSetupRoutines/regolith_exp_vs_imp.py
```python
import numpy as np
import pySALESetup as pss
import matplotlib.pyplot as plt
from math import ceil
# Cumulative Distribution Function
def CDF(x):
# The CDF
A = 2.908
B = 0.028
C = 0.320
L = 0.643
a = 99.4
return (1./a)*A/(B+C*np.exp(-L*x))
# Population Distribution Function
def PDF(x):
# The PDF
A = 2.908
B = 0.028
C = 0.320
L = 0.643
a = 99.4
pdf = (1./a)*A*L*C*np.exp(L*x)/(B+C*np.exp(L*x))**2.
return pdf
# legacy function, this is now integrated into pySALESetup
def lunar_pdf(x,LB_tol,UB_tol):
# Integrate PDF at a value of x
P = abs(CDF(x+UB_tol) - CDF(x-LB_tol))
return P
# Convert to krumbein phi
def PHI_(x):
"""
x must be in SI (metres) for this function to work
PHI is only calculated correctly when the arg is mm
"""
return -1.*np.log2(x*1000.)
# reverse of krumbein phi
def DD_(x):
return 2.**(-x)
# reverse of krumbein phi
# duplicate of above; except converts to metres
# and returns radius, not diameter
def reverse_phi(p):
return (2**(-p))*.5*1.e-3
# Top and bottom mesh created separately
# Create four meshes
meshA = pss.Mesh(X=500,Y=1200,cellsize=2.5e-6)
meshB = pss.Mesh(X=500,Y=1200,cellsize=2.5e-6)
meshA.label='A'
meshB.label='B'
# target volume (area) fraction
vfrac = 0.5
# Store grain objects in list, 'grains'
grainsA = []
grainsB = []
# Minimum krubeim phi = min resolution (4 cppr)
# Max ... '' '' '' '' = max resolution (200 cppr)
# Max res is one which still fits in the domain
minres = 10
maxphi = -np.log2(2*minres*2.5e-3)
minphi = -np.log2(2*200*2.5e-3)
NA = 20
NB = 20
# Generate N phi values and equiv radii (in cells)
phiA = np.linspace(minphi,maxphi,NA)
phiB = np.linspace(minphi,maxphi,NB)
RsA = reverse_phi(phiA)/meshA.cellsize
RsB = reverse_phi(phiB)/meshB.cellsize
#RsA = ((DD_(phiA)*.5*1.e-3)/meshA.cellsize)
#RsB = ((DD_(phiB)*.5*1.e-3)/meshB.cellsize)
#RsC = ((DD_(phiC)*.5*1.e-3)/meshC.cellsize)
#RsD = ((DD_(phiD)*.5*1.e-3)/meshD.cellsize)
# interval over which to calculate number from pdf
# No. = |CDF(x+h) - CDF(x-h)| * no. of areas
hA = abs(phiA[1]-phiA[0])
hB = abs(phiB[1]-phiB[0])
# baseline mu and sigma fitted to a normal distribution
mu = 3.5960554191
sigma = 2.35633102167
# standard distro
SD = pss.SizeDistribution(func='normal',mu=mu,sigma=sigma*.3333333333333333)
print SD.details()
# target area that ALL particles should take up at end
target_area = float(meshA.Ncells*vfrac)
# Generate 4 libraries, one for each domain
# of grains, and record the expected frequency
# of each.
diff = 0
freqs = []
for r,p in zip(RsA,phiA):
freq1 = SD.frequency(p,hA)*target_area
freq2 = freq1/(np.pi*r**2.)
freq = int(freq2)
diff += (freq2-freq)*np.pi*r**2.
freqs.append(freq)
#print diff*np.pi*r**2.
ctr = 0
for r in RsA:
test = diff/(np.pi*r**2.)
if (1.-test)<=0.2:
Rextra = r
break
ctr += 1
for r,freq in zip(RsA,freqs):
if r == Rextra: freq+=1
for f in range(freq):
g = pss.Grain(r)
grainsA.append(g)
grainsB.append(g)
# library of grains has been generated, now place them into the mesh!
groupA = pss.Ensemble(meshA,name='normaldistA')
groupB = pss.Ensemble(meshB,name='normaldistB')
# place them in, but don't worry if not possible to fit all.
# allow for keyboard interrupt if there's a problem.
try:
i = 0
for gA in grainsA:
gA.insertRandomly(meshA, m=1)
groupA.add(gA,gA.x,gA.y)
for gB in grainsB:
gB.insertRandomly(meshB, m=1)
groupB.add(gB,gB.x,gB.y)
except KeyboardInterrupt:
pass
# optimise the material number distribution amongst grains
groupA.optimise_materials(np.array([1,2,3,4,5,6,7]))
groupB.optimise_materials(np.array([1,2,3,4,5,6,7]))
# wipe the mesh
meshA.fillAll(-1)
meshB.fillAll(-1)
# replace all grains with their new materials
for xA,yA,gA,mA in zip(groupA.xc,groupA.yc,groupA.grains,groupA.mats):
gA.place(xA,yA,mA,meshA)
for xB,yB,gB,mB in zip(groupB.xc,groupB.yc,groupB.grains,groupB.mats):
gB.place(xB,yB,mB,meshB)
meshA.fillAll(8)
meshB.fillAll(8)
v_voidA = meshA.VoidFracForTargetPorosity(8,bulk=0.5,final_por=0.5)
v_voidB = meshB.VoidFracForTargetPorosity(8,bulk=0.5,final_por=0.5)
GV = pss.Grain(eqr=4)
vfA = 0.
print v_voidA*100.,v_voidB*100.
while vfA < v_voidA:
GV.insertRandomly(meshA, m=0,mattargets=[8])
vfA = 1.-meshA.calcVol(frac=True)
if vfA > v_voidA: break
vfB = 0.
while vfB < v_voidB:
GV.insertRandomly(meshB, m=0,mattargets=[8])
vfB = 1.-meshB.calcVol(frac=True)
if vfB > v_voidB: break
# Fill each domain with a matrix material; A+B will form a mesh, as will C+D
# Calculate porosity required for each matrix
#meshA.matrixPorosity(8,0.5,Print=True)
#print groupA.details()
#meshB.matrixPorosity(8,0.5,Print=True)
#print groupB.details()
# Plot the particle size distribution created in each case
#groupA.plotPSD()
# Save the ensemble objects (pickle) for later use
groupA.save()
groupB.save()
# add a blanket velocity to each half
meshA.blanketVel(-1500.,axis=1)
meshB.blanketVel(+1500.,axis=1)
# combine the pairs of meshes
meshAB = pss.combine_meshes(meshA,meshB,axis=1)
# top and tail each mesh (delete top and bottom 3 rows of cells)
meshAB.top_and_tail()
# view final meshes
meshAB.viewMats()
# save final meshes as output files
meshAB.save(fname='regolith_PSD_minres{}cppr+voids_por0.50.iSALE'.format(minres),compress=True)
# redo with new velocities if necessary.
#meshC.multiplyVels()
#meshC.save(fname='regolith_circles_v1500.iSALE',compress=True)
#meshC.multiplyVels()
#meshC.save(fname='regolith_circles_v750.iSALE',compress=True)
```
|
{
"source": "jgdelrio/albert-embeddings",
"score": 2
}
|
#### File: albert-embeddings/test/test_nlp_model.py
```python
import torch
from transformers import AlbertModel, AlbertTokenizer
import pytest
from hamcrest import assert_that, equal_to, isinstanceof
from albert_emb.utils import logger
from albert_emb.nlp_model import get_embeddings, load_model
from albert_emb.config import MODELS, ROOT
GET_EMBEDDINGS_TEST = [
("Hello, world!", ('hello_world.pt', 2, 13), False, "mean"),
("Colon discovered America", ('simple_phrase.pt', 3, 24), False, "mean"),
("Colon discovered America. Later he returned to Spain.", ('phrase2.pt', 8, 53), False, "mean"),
(["Colon again.", "He come and go.", "With three carabellas."], ('paragraphs3.pt', 9, 51), False, "mean"),
("Colon again. He come and go. With three carabellas.", ('paragraphs3.pt', 9, 51), False, "mean"),
("Colon discovered America", ('simple_phrase_sum.pt', 3, 24), False, "sum"),
("Some today's news include Macron bid for a tough", ('news.pt', 9, 48), False, "sum"),
]
@pytest.mark.parametrize("text_in, expected, curate, aggregate", GET_EMBEDDINGS_TEST)
def test_get_embeddings(text_in, expected, curate, aggregate):
sample_ref, exp_word_count, exp_char_count = expected
exp_embeddings = torch.load(ROOT.joinpath('test', 'samples', sample_ref))
result = get_embeddings(text_in, curate, aggregate)
embeddings = result["embeddings"]
word_count = result["word_count"]
char_count = result["char_count"]
assert_that(embeddings.shape[0], equal_to(MODELS['albert-base-v2']['hidden_size']))
logger.debug(f"Result shape is: {embeddings.shape}")
assert(torch.all(embeddings.eq(exp_embeddings)))
assert_that(word_count, equal_to(exp_word_count))
assert_that(char_count, equal_to(exp_char_count))
logger.debug("Embeddings value of phrase are correct.")
def test_get_embeddings_curate_type_error():
with pytest.raises(Exception):
assert get_embeddings("test", 1)
def test_get_embeddings_aggregate_type_error():
with pytest.raises(Exception):
assert get_embeddings("test", False, 42)
def test_get_embeddings_text_type_error():
with pytest.raises(Exception):
assert get_embeddings(3.14, False)
def test_load_model_raise_name_error():
with pytest.raises(Exception):
assert load_model("non_existing_model")
def test_load_model_albert():
name = 'albert-base-v2'
model, tokenizer = load_model(name)
assert(isinstance(model, AlbertModel))
assert(isinstance(tokenizer, AlbertTokenizer))
if __name__ == "__main__":
test_get_embeddings(*GET_EMBEDDINGS_TEST[0])
```
#### File: albert-embeddings/test/test_utils.py
```python
import pytest
from hamcrest import assert_that, equal_to
from albert_emb.utils import logger, paragraphs_join
def test_paragraphs_join():
input_paragraphs = ["first paragraph", "Second paragraph. ", "3rd paragraph...", '4th and final.']
expected = "first paragraph. Second paragraph. 3rd paragraph... . 4th and final."
result = paragraphs_join(input_paragraphs)
assert_that(result, equal_to(expected), f"Result:\n{result}\nDiffer from:\n{expected}")
if __name__ == "__main__":
test_paragraphs_join()
```
|
{
"source": "jgdelrio/covid-data",
"score": 3
}
|
#### File: covid-data/covid/dashboard.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
from covid.data_manager import global_data
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
def generate_table(df, max_rows: int=20):
"""Generates an HTML table from a pandas dataframe with the number of rows specified"""
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in df.columns])
),
html.Tbody([
html.Tr([
html.Td(df.iloc[i][col]) for col in df.columns
]) for i in range(min(len(df), max_rows))
])
])
app.layout = html.Div(
style={'backgroundColor': colors['background']},
children=[
html.H1(children='Graphical View',
style={'textAlign': 'center', 'color': colors['text']}
),
html.Div(children='Visualization examples',
style={'textAlign': 'center', 'color': colors['text']}),
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},
{'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},
],
'layout': {
'plot_bgcolor': colors['background'],
'paper_bgcolor': colors['background'],
'font': {
'color': colors['text']
}
}
}
),
generate_table(global_data['Deaths']),
])
if __name__ == '__main__':
app.run_server(debug=True)
```
#### File: covid-data/covid/data_manager.py
```python
import re
import dateutil
import numpy as np
import pandas as pd
from datetime import date, datetime, timedelta
from covid.sources import data_sources
from covid.config import DATA_FOLDER
is_date = re.compile(r'[\d]{1,2}/[\d]{1,2}/[\d]{1,4}')
def as_time_series(data):
if 'Province/State' in data.columns:
data.set_index('Province/State', inplace=True)
counter = 0
for d in data.columns:
if isinstance(d, str):
if is_date.match(d):
break
else:
counter += 1
# print(f"Counter: {counter}")
output = data.iloc[:, counter:].copy()
if isinstance(output.columns[0], datetime):
return output
else:
output.columns = [dateutil.parser.parse(d) for d in output.columns]
return output
def get_total(df, column: str='total'):
return pd.DataFrame({column: df.sum(axis=1)}, index=df.index)
def get_country(global_d, name: str='Spain'):
output = {}
as_array = []
for key, entry in global_d.items():
output[key] = get_total(as_time_series(entry[entry['Country/Region'] == name]).transpose())
as_array.append((output[key], key))
return pd.concat(as_array, axis=1), output
def load_global_data():
global_d = {}
for dat_src in data_sources['global']:
global_d[dat_src['title']] = pd.read_csv(DATA_FOLDER.joinpath(f"{dat_src['output']}.{dat_src['type']}"))
return global_d
global_data = load_global_data()
# country=df_glo_conf['Country/Region'].unique().tolist()
if __name__ == "__main__":
print(global_data['Deaths'].head())
```
|
{
"source": "jgdingding/snake-ml",
"score": 3
}
|
#### File: jgdingding/snake-ml/gameDisplay.py
```python
from tkinter import Tk, Canvas
from snakeGame import direction, SnakeGame
class GameDisplay:
def __init__(self, sg):
self.gameInstance = sg
self.snakeLayer = []
self.foodLayer = []
# initialize tkinter elements
self.root = Tk()
self.root.geometry('500x500')
self.canvas = Canvas(self.root, width=500, height=500) # w
self.drawBoard()
def drawBoard(self):
self.canvas.create_line(50, 50, 50, 450)
self.canvas.create_line(450, 450, 450, 50)
self.canvas.create_line(50, 50, 450, 50)
self.canvas.create_line(50, 450, 450, 450)
for i in range(70, 450, 20):
self.canvas.create_line(i, 50, i, 450)
for j in range(70, 450, 20):
self.canvas.create_line(50, j, 450, j)
for i in range(len(self.gameInstance.snake)):
if i > 0:
self.snakeLayer.append(self.canvas.create_oval(
self.gameInstance.snake[i][0]*20 +
44, self.gameInstance.snake[i][1]*20+44,
self.gameInstance.snake[i][0]*20+56, self.gameInstance.snake[i][1]*20+56, fill="black"))
else:
self.snakeLayer.append(self.canvas.create_oval(
self.gameInstance.snake[i][0]*20 +
44, self.gameInstance.snake[i][1]*20+44,
self.gameInstance.snake[i][0]*20+56, self.gameInstance.snake[i][1]*20+56, fill="blue"))
self.foodLayer.append(self.canvas.create_oval(
self.gameInstance.food[0]*20+44, self.gameInstance.food[1]*20+44,
self.gameInstance.food[0]*20+56, self.gameInstance.food[1]*20+56, fill="red"))
self.canvas.pack()
self.root.update()
return
# Redraws snake and food
def updateSnake(self, movement):
if movement == -1:
moveme = self.foodLayer.pop()
self.canvas.itemconfig(moveme, fill="blue")
self.snakeLayer.insert(0, moveme)
self.canvas.itemconfig(self.snakeLayer[1], fill="black")
self.foodLayer.append(self.canvas.create_oval(
self.gameInstance.food[0]*20 +
44, self.gameInstance.food[1]*20+44,
self.gameInstance.food[0]*20+56, self.gameInstance.food[1]*20+56, fill="red"))
elif movement == 1:
moveme = self.snakeLayer.pop()
self.canvas.itemconfig(moveme, fill="blue")
x = ((self.gameInstance.snake[0][0])
- int((self.canvas.coords(moveme)[0] - 44)/20)) * 20
y = ((self.gameInstance.snake[0][1])
- int((self.canvas.coords(moveme)[1] - 44)/20)) * 20
self.canvas.move(moveme, x, y)
self.snakeLayer.insert(0, moveme)
self.canvas.itemconfig(self.snakeLayer[1], fill="black")
self.root.update()
return
def animate(self):
self.root.mainloop()
```
|
{
"source": "jgdwyer/baseball-H2H-AuctionValues",
"score": 3
}
|
#### File: baseball-H2H-AuctionValues/baseball/pull_data.py
```python
import urllib.request
import getpass
import requests
import shutil
def pull_ids():
url = 'http://crunchtimebaseball.com/master.csv'
# Download the file from `url` and save it locally under `file_name`:
with urllib.request.urlopen(url) as response, open('./source_data/ids.csv', 'wb') as out_file:
data = response.read() # a `bytes` object
out_file.write(data)
def pull_fangraphs():
url1 = 'http://www.fangraphs.com/projections.aspx?pos=all&stats='
url2 = '&type=fangraphsdc&team=0&lg=all&players=0'
print('Go to: ' + url1 + 'bat' + url2 + ' and click export data')
print('Go to: ' + url1 + 'pit' + url2 + ' for the pitcher data')
def pull_cbs():
urlhit = 'http://jabo.baseball.cbssports.com/stats/stats-main/all:C:1B:2B:3B:SS:LF:CF:RF:U/period-1:p/z8/?print_rows=9999'
urlpit = 'http://jabo.baseball.cbssports.com/stats/stats-main/all:SP:RP/tp:p/foo2016_2/?print_rows=9999'
print('Click on the following links and save as html:')
print(urlhit)
print(urlpit)
```
|
{
"source": "jgdwyer/nn-convection",
"score": 2
}
|
#### File: backend/lasagne/mlp.py
```python
from __future__ import (absolute_import, division, unicode_literals, print_function)
__all__ = ['MultiLayerPerceptronBackend']
import os
import sys
import math
import time
import types
import logging
import itertools
log = logging.getLogger('sknn')
import numpy
import theano
import sklearn.base
import sklearn.pipeline
import sklearn.preprocessing
import sklearn.cross_validation
import theano.tensor as T
import lasagne.layers
import lasagne.nonlinearities as nl
from ..base import BaseBackend
from ...nn import Layer, Convolution, Native, ansi
def explin(x):
return x * (x>=0) + (x<0) * (T.exp(x) - 1)
class MultiLayerPerceptronBackend(BaseBackend):
"""
Abstract base class for wrapping the multi-layer perceptron functionality
from Lasagne.
"""
def __init__(self, spec):
super(MultiLayerPerceptronBackend, self).__init__(spec)
self.mlp = None
self.f = None
self.trainer = None
self.validator = None
self.regularizer = None
def _create_mlp_trainer(self, params):
# Aggregate all regularization parameters into common dictionaries.
layer_decay = {}
if self.regularize in ('L1', 'L2') or any(l.weight_decay for l in self.layers):
wd = self.weight_decay or 0.0001
for l in self.layers:
layer_decay[l.name] = l.weight_decay or wd
assert len(layer_decay) == 0 or self.regularize in ('L1', 'L2', None)
if len(layer_decay) > 0:
if self.regularize is None:
self.auto_enabled['regularize'] = 'L2'
regularize = self.regularize or 'L2'
penalty = getattr(lasagne.regularization, regularize.lower())
apply_regularize = lasagne.regularization.apply_penalty
self.regularizer = sum(layer_decay[s.name] * apply_regularize(l.get_params(regularizable=True), penalty)
for s, l in zip(self.layers, self.mlp))
if self.normalize is None and any([l.normalize != None for l in self.layers]):
self.auto_enabled['normalize'] = 'batch'
cost_functions = {'mse': 'squared_error', 'mcc': 'categorical_crossentropy'}
loss_type = self.loss_type or ('mcc' if self.is_classifier else 'mse')
assert loss_type in cost_functions,\
"Loss type `%s` not supported by Lasagne backend." % loss_type
self.cost_function = getattr(lasagne.objectives, cost_functions[loss_type])
cost_symbol = self.cost_function(self.trainer_output, self.data_output)
cost_symbol = lasagne.objectives.aggregate(cost_symbol.T, self.data_mask, mode='mean')
if self.regularizer is not None:
cost_symbol = cost_symbol + self.regularizer
return self._create_trainer_function(params, cost_symbol)
def _create_trainer_function(self, params, cost):
if self.learning_rule in ('sgd', 'adagrad', 'adadelta', 'rmsprop', 'adam'):
lr = getattr(lasagne.updates, self.learning_rule)
self._learning_rule = lr(cost, params, learning_rate=self.learning_rate)
elif self.learning_rule in ('momentum', 'nesterov'):
lasagne.updates.nesterov = lasagne.updates.nesterov_momentum
lr = getattr(lasagne.updates, self.learning_rule)
self._learning_rule = lr(cost, params, learning_rate=self.learning_rate, momentum=self.learning_momentum)
else:
raise NotImplementedError(
"Learning rule type `%s` is not supported." % self.learning_rule)
trainer = theano.function([self.data_input, self.data_output, self.data_mask], cost,
updates=self._learning_rule,
on_unused_input='ignore',
allow_input_downcast=True)
compare = self.cost_function(self.network_output, self.data_correct).mean()
validator = theano.function([self.data_input, self.data_correct], compare,
allow_input_downcast=True)
return trainer, validator
def _get_activation(self, l):
nonlinearities = {'Rectifier': nl.rectify,
'Sigmoid': nl.sigmoid,
'Tanh': nl.tanh,
'Softmax': nl.softmax,
'Linear': nl.linear,
'ExpLin': explin}
assert l.type in nonlinearities,\
"Layer type `%s` is not supported for `%s`." % (l.type, l.name)
return nonlinearities[l.type]
def _create_convolution_layer(self, name, layer, network):
self._check_layer(layer,
required=['channels', 'kernel_shape'],
optional=['units', 'kernel_stride', 'border_mode',
'pool_shape', 'pool_type', 'scale_factor'])
if layer.scale_factor != (1, 1):
network = lasagne.layers.Upscale2DLayer(
network,
scale_factor=layer.scale_factor)
network = lasagne.layers.Conv2DLayer(
network,
num_filters=layer.channels,
filter_size=layer.kernel_shape,
stride=layer.kernel_stride,
pad=layer.border_mode,
nonlinearity=self._get_activation(layer))
normalize = layer.normalize or self.normalize
if normalize == 'batch':
network = lasagne.layers.batch_norm(network)
if layer.pool_shape != (1, 1):
network = lasagne.layers.Pool2DLayer(
network,
pool_size=layer.pool_shape,
stride=layer.pool_shape)
return network
def _create_native_layer(self, name, layer, network):
if layer.units and 'num_units' not in layer.keywords:
layer.keywords['num_units'] = layer.units
return layer.type(network, *layer.args, **layer.keywords)
def _create_layer(self, name, layer, network):
if isinstance(layer, Native):
return self._create_native_layer(name, layer, network)
dropout = layer.dropout or self.dropout_rate
if dropout is not None:
network = lasagne.layers.dropout(network, dropout)
if isinstance(layer, Convolution):
return self._create_convolution_layer(name, layer, network)
self._check_layer(layer, required=['units'])
network = lasagne.layers.DenseLayer(network,
num_units=layer.units,
nonlinearity=self._get_activation(layer))
normalize = layer.normalize or self.normalize
if normalize == 'batch':
network = lasagne.layers.batch_norm(network)
return network
def _create_mlp(self, X, w=None):
self.data_input = T.tensor4('X') if self.is_convolution(input=True) else T.matrix('X')
self.data_output = T.tensor4('y') if self.is_convolution(output=True) else T.matrix('y')
self.data_mask = T.vector('m') if w is not None else T.scalar('m')
self.data_correct = T.matrix('yp')
lasagne.random.get_rng().seed(self.random_state)
shape = list(X.shape)
network = lasagne.layers.InputLayer([None]+shape[1:], self.data_input)
# Create the layers one by one, connecting to previous.
self.mlp = []
for i, layer in enumerate(self.layers):
network = self._create_layer(layer.name, layer, network)
network.name = layer.name
self.mlp.append(network)
log.info(
"Initializing neural network with %i layers, %i inputs and %i outputs.",
len(self.layers), self.unit_counts[0], self.layers[-1].units)
for l, p, count in zip(self.layers, self.mlp, self.unit_counts[1:]):
space = p.output_shape
if isinstance(l, Convolution):
log.debug(" - Convl: {}{: <10}{} Output: {}{: <10}{} Channels: {}{}{}".format(
ansi.BOLD, l.type, ansi.ENDC,
ansi.BOLD, repr(space[2:]), ansi.ENDC,
ansi.BOLD, space[1], ansi.ENDC))
# NOTE: Numbers don't match up exactly for pooling; one off. The logic is convoluted!
# assert count == numpy.product(space.shape) * space.num_channels,\
# "Mismatch in the calculated number of convolution layer outputs."
elif isinstance(l, Native):
log.debug(" - Nativ: {}{: <10}{} Output: {}{: <10}{} Channels: {}{}{}".format(
ansi.BOLD, l.type.__name__, ansi.ENDC,
ansi.BOLD, repr(space[2:]), ansi.ENDC,
ansi.BOLD, space[1], ansi.ENDC))
else:
log.debug(" - Dense: {}{: <10}{} Units: {}{: <4}{}".format(
ansi.BOLD, l.type, ansi.ENDC, ansi.BOLD, l.units, ansi.ENDC))
assert count == space[1],\
"Mismatch in the calculated number of dense layer outputs. {} != {}".format(count, space[1])
if self.weights is not None:
l = min(len(self.weights), len(self.mlp))
log.info("Reloading parameters for %i layer weights and biases." % (l,))
self._array_to_mlp(self.weights, self.mlp)
self.weights = None
log.debug("")
self.network_output = lasagne.layers.get_output(network, deterministic=True)
self.trainer_output = lasagne.layers.get_output(network, deterministic=False)
self.f = theano.function([self.data_input], self.network_output, allow_input_downcast=True)
def _conv_transpose(self, arr):
ok = arr.shape[-1] not in (1,3) and arr.shape[1] in (1,3)
return arr if ok else numpy.transpose(arr, (0, 3, 1, 2))
def _initialize_impl(self, X, y=None, w=None):
if self.is_convolution(input=True):
X = self._conv_transpose(X)
if y is not None and self.is_convolution(output=True):
y = self._conv_transpose(y)
if self.mlp is None:
self._create_mlp(X, w)
# Can do partial initialization when predicting, no trainer needed.
if y is None:
return
if self.valid_size > 0.0:
assert self.valid_set is None, "Can't specify valid_size and valid_set together."
X, X_v, y, y_v = sklearn.cross_validation.train_test_split(
X, y,
test_size=self.valid_size,
random_state=self.random_state)
self.valid_set = X_v, y_v
if self.valid_set and self.is_convolution():
X_v, y_v = self.valid_set
if X_v.shape[-2:] != X.shape[-2:]:
self.valid_set = numpy.transpose(X_v, (0, 3, 1, 2)), y_v
params = []
for spec, mlp_layer in zip(self.layers, self.mlp):
if spec.frozen: continue
params.extend(mlp_layer.get_params())
self.trainer, self.validator = self._create_mlp_trainer(params)
return X, y
def _predict_impl(self, X):
if self.is_convolution():
X = numpy.transpose(X, (0, 3, 1, 2))
y = None
for Xb, _, _, idx in self._iterate_data(self.batch_size, X, y, shuffle=False):
yb = self.f(Xb)
if y is None:
if X.shape[0] <= self.batch_size:
y = yb
break
else:
y = numpy.zeros(X.shape[:1] + yb.shape[1:], dtype=theano.config.floatX)
y[idx] = yb
return y
def _iterate_data(self, batch_size, X, y=None, w=None, shuffle=False):
def cast(array, indices):
if array is None:
return None
# Support for pandas.DataFrame, requires custom indexing.
if type(array).__name__ == 'DataFrame':
array = array.loc[indices]
else:
array = array[indices]
# Support for scipy.sparse; convert after slicing.
if hasattr(array, 'todense'):
array = array.todense()
return array.astype(theano.config.floatX)
total_size = X.shape[0]
indices = numpy.arange(total_size)
if shuffle:
numpy.random.shuffle(indices)
for index in range(0, total_size, batch_size):
excerpt = indices[index:index + batch_size]
Xb, yb, wb = cast(X, excerpt), cast(y, excerpt), cast(w, excerpt)
yield Xb, yb, wb, excerpt
def _print(self, text):
if self.verbose:
sys.stdout.write(text)
sys.stdout.flush()
def _batch_impl(self, X, y, w, processor, mode, output, shuffle):
progress, batches = 0, X.shape[0] / self.batch_size
loss, count = 0.0, 0
for Xb, yb, wb, _ in self._iterate_data(self.batch_size, X, y, w, shuffle):
self._do_callback('on_batch_start', locals())
if mode == 'train':
loss += processor(Xb, yb, wb if wb is not None else 1.0)
elif mode == 'train_obj':
loss += processor(Xb, yb)
else:
loss += processor(Xb, yb)
count += 1
while count / batches > progress / 60:
self._print(output)
progress += 1
self._do_callback('on_batch_finish', locals())
self._print('\r')
return loss / count
def _train_impl(self, X, y, w=None):
return self._batch_impl(X, y, w, self.trainer, mode='train', output='.', shuffle=True)
def _train_obj_impl(self, X, y, w=None):
return self._batch_impl(X, y, w, self.validator, mode='train_obj', output=' ', shuffle=False)
def _valid_impl(self, X, y, w=None):
return self._batch_impl(X, y, w, self.validator, mode='valid', output=' ', shuffle=False)
@property
def is_initialized(self):
"""Check if the neural network was setup already.
"""
return not (self.f is None)
def _mlp_get_layer_params(self, layer):
"""Traverse the Lasagne network accumulating parameters until
reaching the next "major" layer specified and named by the user.
"""
assert layer.name is not None, "Expecting this layer to have a name."
params = []
while hasattr(layer, 'input_layer'):
params.extend(layer.get_params())
layer = layer.input_layer
if layer.name is not None:
break
return params
def _mlp_to_array(self):
return [[p.get_value() for p in self._mlp_get_layer_params(l)] for l in self.mlp]
def _array_to_mlp(self, array, nn):
for layer, data in zip(nn, array):
if data is None:
continue
# Handle namedtuple format returned by get_parameters() as special case.
# Must remove the last `name` item in the tuple since it's not a parameter.
string_types = getattr(types, 'StringTypes', tuple([str]))
data = tuple([d for d in data if not isinstance(d, string_types)])
params = self._mlp_get_layer_params(layer)
assert len(data) == len(params),\
"Mismatch in data size for layer `%s`. %i != %i"\
% (layer.name, len(data), len(params))
for p, d in zip(params, data):
ps = tuple(p.shape.eval())
assert ps == d.shape, "Layer parameter shape mismatch: %r != %r" % (ps, d.shape)
p.set_value(d.astype(theano.config.floatX))
```
#### File: nn-convection/src/nnload.py
```python
import numpy as np
from sklearn import preprocessing, metrics
import scipy.stats
import pickle
import warnings
from netCDF4 import Dataset
def LoadData(filename, minlev, all_lats=True, indlat=None, N_trn_exs=None,
rainonly=False, noshallow=False, cosflag=True, randseed=False,
verbose=True):
"""v2 of the script to load data. See prep_convection_output.py for how
the input filename is generated.
Args:
filename: The file to be loaded. e.g., './data/convcond_training_v3.pkl'
minlev: The topmost model level for which to load data. Set to 0. to
load all data
all_lats: Logical value for whether to load data from all latitudes
indlat: If all_lats is false, give the index value [0-63] for the
latitude at which to load data.
N_trn_exs: Number of training examples to load. If set to None, or
if requested number exceeds max available will load all.
rainonly: If true, only return training examples of when it is raining
noshallow: If true, only return training examples of when the shallow
convection scheme does NOT happen. (So, only return examples
with deep convection, or no convection at all)
cosflag: If true, use cos(lat) weighting for loading training examples
randseed: If true, seed the random generator to a recreateable state
verbose: If true, prints some basic stats about training set
Returns:
x : 2-d numpy array of input features (m_training examples x
N_input features). If minlev is 0., there will be 60 input
features, the top 30 for temperature and the bottom 30 for
humidity.
y : 2-d numpy array of output targets (m_traning examples x
N_output targets). If minlev is 0., there will be 60 output
features, the top 30 for temp. tendencies and the bottom 30
for q tend.
cv : 1-d array (m_training examples x 1) that gives 1 if convection
occurs and 0 if it does not.
Pout : 1-d arrray (m_training examples x 1) of how much precipitation
occurs in kg/m^2/s (multiply by 3600*24 to convert
precipitation to mm/day)
lat2 : 1-d array of latitude for one hemisphere (since hemispheres
are combined)
lev : The vertical model levels (1 is the surface and 0 is the top
of the atmosphere).
dlev : The difference between model levels, useful for calculating
some derived quantities.
timestep: How large each model timestep is in seconds.
"""
# Data to read in is N_lev x N_lat (SH & NH) x N_samples
# Samples are quasi indpendent with only 5 from each latitude range chosen
# randomly over different longitudes and times within that 24 hour period.
# Need to use encoding because saved using python2 on yellowstone:
# http://stackoverflow.com/q/28218466
v = dict()
[v['Tin'], v['qin'], v['Tout'], v['qout'], Pout, lat] = \
pickle.load(open(filename, 'rb'), encoding='latin1')
# Use this to calculate the real sigma levels
lev, dlev, indlev = get_levs(minlev)
varis = ['Tin', 'qin', 'Tout', 'qout']
# Reshape the arrays
for var in varis:
# Change shape of data to be N_samp x N_lev
if all_lats:
# print('error')
if cosflag:
v[var] = reshape_cos_lats(v[var], indlev, lat)
else:
v[var] = reshape_all_lats(v[var], indlev)
else:
if indlat is not None:
v[var] = reshape_one_lat(v[var], indlev, indlat)
else:
raise TypeError('Need to set an index value for indlat')
# Also reshape precipitation
if all_lats:
if cosflag:
Pout = reshape_cos_lats(Pout, None, lat, is_precip=True)
else:
# Need to do a transpose to be consistent with reshape_all_lats
Pout = np.reshape(Pout.transpose(), -1)
else:
Pout = Pout[indlat, :]
# Randomize the order of these events
m = v['Tin'].shape[0]
if randseed:
np.random.seed(0)
randind = np.random.permutation(m)
for var in varis:
v[var] = v[var][randind, :]
Pout = Pout[randind]
# Converted heating rates to K/day and g/kg/day in
# prep_convection_output.py
# Concatenate input and output variables together
x = pack(v['Tin'], v['qin'], axis=1)
y = pack(v['Tout'], v['qout'], axis=1)
# The outputs get lined up in prep_convection_output.py
# Print some statistics about rain and limit to when it's raining if True
x, y, Pout = limitrain(x, y, Pout, rainonly, noshallow=noshallow,
verbose=verbose)
# Limit to only certain events if requested
if N_trn_exs is not None:
if N_trn_exs > y.shape[0]:
warnings.warn('Requested more samples than available. Using the' +
'maximum number available')
N_trn_exs = y.shape[0]
ind = np.arange(N_trn_exs)
x = x[ind, :]
y = y[ind, :]
Pout = Pout[ind]
# Store when convection occurs
cv, _ = whenconvection(y, verbose=verbose)
timestep = 10*60 # 10 minute timestep in seconds
return x, y, cv, Pout, lat, lev, dlev, timestep
def reshape_cos_lats(z, indlev, lat, is_precip=False):
if is_precip:
z = z.swapaxes(0, 1)
z2 = np.empty((0))
else:
z = z[indlev, :, :]
z = z.swapaxes(0, 2)
z2 = np.empty((0, sum(indlev)))
N_ex = z.shape[0]
for i, latval in enumerate(lat):
Ninds = int(N_ex * np.cos(np.deg2rad(latval)))
if is_precip:
z2 = np.concatenate((z2, z[0: Ninds, i]), axis=0)
else:
z2 = np.concatenate((z2, z[0:Ninds, i, :]), axis=0)
return z2
def reshape_all_lats(z, indlev):
# Expects data to be N_lev x N_lat x N_samples and returns
# (N_lat*N_samp x N_lev)
z = z[indlev, :, :]
z = z.swapaxes(0, 2)
return np.reshape(z, (-1, sum(indlev)))
def reshape_one_lat(z, indlev, indlat):
# Expects data to be N_lev x N_lat x N_samples and returns (N_samp x N_lev)
z = z[indlev, indlat, :]
z = z.swapaxes(0, 1)
return z
def pack(d1, d2, axis=1):
"""Combines T & q profiles as an input matrix to NN"""
return np.concatenate((d1, d2), axis=axis)
def unpack(data, vari, axis=1):
"""Reverse pack operation to turn ouput matrix into T & q"""
N = int(data.shape[axis]/2)
varipos = {'T': np.arange(N), 'q': np.arange(N, 2*N)}
out = np.take(data, varipos[vari], axis=axis)
return out
# Initialize & fit scaler
def init_pp(ppi, raw_data):
# Initialize list of scaler objects
if ppi['name'] == 'MinMax':
pp = [preprocessing.MinMaxScaler(feature_range=(-1.0, 1.0)), # temp
preprocessing.MinMaxScaler(feature_range=(-1.0, 1.0))] # humid.
elif ppi['name'] == 'MaxAbs':
pp = [preprocessing.MaxAbsScaler(), # for temperature
preprocessing.MaxAbsScaler()] # and humidity
elif ppi['name'] == 'StandardScaler':
pp = [preprocessing.StandardScaler(), # for temperature
preprocessing.StandardScaler()] # and humidity
elif ppi['name'] == 'RobustScaler':
pp = [preprocessing.RobustScaler(), # for temperature
preprocessing.RobustScaler()] # and humidity
elif ppi['name'] == 'SimpleY':
pp = [10./1., 10./2.5] # for temperature
else:
ValueError('Incorrect scaler name')
# Initialize scalers with data
if ppi['method'] == 'individually':
pp[0].fit(unpack(raw_data, 'T'))
pp[1].fit(unpack(raw_data, 'q'))
elif ppi['method'] == 'alltogether':
pp[0].fit(np.reshape(unpack(raw_data, 'T'), (-1, 1)))
pp[1].fit(np.reshape(unpack(raw_data, 'q'), (-1, 1)))
elif ppi['method'] == 'qTindividually':
if ppi['name'] != 'SimpleY':
pp = pp[0]
pp.fit(raw_data)
else:
raise ValueError('Incorrect scaler method')
return pp
# Transform data using initialized scaler
def transform_data(ppi, pp, raw_data):
if ppi['method'] == 'individually':
T_data = pp[0].transform(unpack(raw_data, 'T'))
q_data = pp[1].transform(unpack(raw_data, 'q'))
elif ppi['method'] == 'alltogether':
T_data = pp[0].transform(np.reshape(unpack(raw_data, 'T'), (-1, 1)))
q_data = pp[1].transform(np.reshape(unpack(raw_data, 'q'), (-1, 1)))
# Return to original shape (N_samples x N_features) rather than
# (N_s*N_f x 1)
shp = unpack(raw_data, 'T').shape
T_data = np.reshape(T_data, shp)
q_data = np.reshape(q_data, shp)
elif ppi['method'] == 'qTindividually':
if ppi['name'] == 'SimpleY':
T_data = unpack(raw_data, 'T')/pp[0]
q_data = unpack(raw_data, 'q')/pp[1]
else:
all_data = pp.transform(raw_data)
T_data = unpack(all_data, 'T')
q_data = unpack(all_data, 'q')
else:
print('Given method is ' + ppi['method'])
raise ValueError('Incorrect scaler method')
# Return single transformed array as output
return pack(T_data, q_data)
# Apply inverse transformation to unscale data
def inverse_transform_data(ppi, pp, trans_data):
if ppi['method'] == 'individually':
T_data = pp[0].inverse_transform(unpack(trans_data, 'T'))
q_data = pp[1].inverse_transform(unpack(trans_data, 'q'))
elif ppi['method'] == 'alltogether':
T_data = pp[0].inverse_transform(np.reshape(unpack(trans_data, 'T'),
(-1, 1)))
q_data = pp[1].inverse_transform(np.reshape(unpack(trans_data, 'q'),
(-1, 1)))
# Return to original shape (N_samples x N_features) rather than
# (N_s*N_f x 1)
shp = unpack(trans_data, 'T').shape
T_data = np.reshape(T_data, shp)
q_data = np.reshape(q_data, shp)
elif ppi['method'] == 'qTindividually':
if ppi['name'] == 'SimpleY':
T_data = unpack(trans_data, 'T') * pp[0]
q_data = unpack(trans_data, 'q') * pp[1]
else:
all_data = pp.inverse_transform(trans_data)
T_data = unpack(all_data, 'T')
q_data = unpack(all_data, 'q')
else:
raise ValueError('Incorrect scaler method')
# Return single transformed array as output
return pack(T_data, q_data)
def limitrain(x, y, Pout, rainonly=False, noshallow=False, verbose=True):
indrain = np.greater(Pout, 0)
if verbose:
print('There is some amount of rain {:.1f}% of the time'.
format(100. * np.sum(indrain)/len(indrain)))
print('There is a rate of >3 mm/day {:.1f}% of the time'.
format(100. * np.sum(np.greater(Pout, 3))/len(indrain)))
if rainonly:
x = x[indrain, :]
y = y[indrain, :]
Pout = Pout[indrain]
if verbose:
print('Only looking at times it is raining!')
if noshallow:
cv, _ = whenconvection(y, verbose=True)
indnosha = np.logical_or(Pout > 0, cv == 0)
x = x[indnosha, :]
y = y[indnosha, :]
Pout = Pout[indnosha]
if verbose:
print('Excluding all shallow convective events...')
return x, y, Pout
def whenconvection(y, verbose=True):
"""Caluclate how often convection occurs...useful for classification
Also store a variable that is 1 if convection and 0 if no convection"""
cv_strength = np.sum(np.abs(unpack(y, 'T')), axis=1)
cv = np.copy(cv_strength)
cv[cv > 0] = 1
if verbose:
print('There is convection {:.1f}% of the time'.
format(100. * np.sum(cv)/len(cv)))
return cv, cv_strength
def avg_hem(data, lat, axis, split=False):
"""Averages the NH and SH data (or splits them into two data sets)"""
ixsh = np.where(lat < 0)[0] # where returns a tuple
ixnh = np.where(lat >= 0)[0]
if len(ixsh) == 0:
print(lat)
raise ValueError('Appears that lat does not have SH values')
lathalf = lat[ixnh]
sh = np.take(data, ixsh, axis=axis)
nh = np.take(data, ixnh, axis=axis)
# Flip the direction of the sh data at a given axis
shrev = np.swapaxes(np.swapaxes(sh, 0, axis)[::-1], 0, axis)
# If splitting data, return these arrays
if split:
return nh, shrev, lathalf
else:
return (nh + shrev) / 2., lathalf
def load_one_lat(x_ppi, y_ppi, x_pp, y_pp, r_mlp, indlat, datafile, minlev=0.,
rainonly=False):
"""Returns N_samples x 2*N_lev array of true and predicted values
at a given latitude"""
# Load data
x, y, cv, Pout, lat, lev, dlev, timestep = \
LoadData(datafile, minlev, rainonly=rainonly, all_lats=False,
indlat=indlat, verbose=False, N_trn_exs=2500)
# Calculate predicted output
x = transform_data(x_ppi, x_pp, x)
y_pred = r_mlp.predict(x)
y_pred = inverse_transform_data(y_ppi, y_pp, y_pred)
# Output true and predicted temperature and humidity tendencies
T = unpack(y, 'T')
q = unpack(y, 'q')
T_pred = unpack(y_pred, 'T')
q_pred = unpack(y_pred, 'q')
return T, q, T_pred, q_pred
def stats_by_latlev(x_ppi, y_ppi, x_pp, y_pp, r_mlp, lat, lev, datafile):
# Initialize
Tmean = np.zeros((len(lat), len(lev)))
qmean = np.zeros((len(lat), len(lev)))
Tbias = np.zeros((len(lat), len(lev)))
qbias = np.zeros((len(lat), len(lev)))
rmseT = np.zeros((len(lat), len(lev)))
rmseq = np.zeros((len(lat), len(lev)))
rT = np.zeros((len(lat), len(lev)))
rq = np.zeros((len(lat), len(lev)))
for i in range(len(lat)):
print('Loading data for latitude {:d} of {:d}'.format(i, len(lat)))
T_true, q_true, T_pred, q_pred = \
load_one_lat(x_ppi, y_ppi, x_pp, y_pp, r_mlp, i, datafile,
minlev=np.min(lev))
# Get means of true output
Tmean[i, :] = np.mean(T_true, axis=0)
qmean[i, :] = np.mean(q_true, axis=0)
# Get bias from means
Tbias[i, :] = np.mean(T_pred, axis=0) - Tmean[i, :]
qbias[i, :] = np.mean(q_pred, axis=0) - qmean[i, :]
# Get rmse
rmseT[i, :] = np.sqrt(
metrics.mean_squared_error(T_true, T_pred,
multioutput='raw_values'))
rmseq[i, :] = np.sqrt(
metrics.mean_squared_error(q_true, q_pred,
multioutput='raw_values'))
# Get correlation coefficients
for j in range(len(lev)):
rT[i, j], _ = scipy.stats.pearsonr(T_true[:, j], T_pred[:, j])
rq[i, j], _ = scipy.stats.pearsonr(q_true[:, j], q_pred[:, j])
return Tmean.T, qmean.T, Tbias.T, qbias.T, rmseT.T, rmseq.T, rT.T, rq.T
def GetDataPath(cirrusflag, convcond):
if cirrusflag:
datadir = '/disk7/jgdwyer/chickpea/nndata/'
else:
datadir = './data/'
if convcond:
trainfile = datadir + 'convcond_training_v3.pkl'
testfile = datadir + 'convcond_testing_v3.pkl'
pp_str = 'convcond_'
else:
trainfile = datadir + 'conv_training_v3.pkl'
testfile = datadir + 'conv_testing_v3.pkl'
pp_str = ''
return datadir, trainfile, testfile, pp_str
def get_levs(minlev):
# Define half sigma levels for data
half_lev = np.array([0.000000000000000e+00, 9.202000000000000e-03,
1.244200000000000e-02, 1.665600000000000e-02,
2.207400000000000e-02, 2.896500000000000e-02,
3.762800000000000e-02, 4.839600000000000e-02,
6.162600000000000e-02, 7.769200000000000e-02,
9.697200000000000e-02, 1.198320000000000e-01,
1.466070000000000e-01, 1.775800000000000e-01,
2.129570000000000e-01, 2.528400000000000e-01,
2.972050000000000e-01, 3.458790000000000e-01,
3.985190000000000e-01, 4.546020000000000e-01,
5.134170000000000e-01, 5.740720000000000e-01,
6.355060000000000e-01, 6.965140000000000e-01,
7.557840000000000e-01, 8.119360000000000e-01,
8.635820000000000e-01, 9.093730000000000e-01,
9.480640000000000e-01, 9.785660000000000e-01,
1.000000000000000e+00])
# Calculate the full levels
lev = np.array(np.zeros((half_lev.size-1,)))
for i in range(half_lev.size-1):
lev[i] = (half_lev[i] + half_lev[i+1])/2.
# Limit levels to those specified
indlev = np.greater_equal(lev, minlev)
lev = lev[indlev]
# Calculate the spacing between levels
dlev = np.diff(half_lev)
dlev = dlev[indlev]
return lev, dlev, indlev
def get_x_y_pred_true(r_str, training_file, minlev, noshallow=False,
rainonly=False):
# Load model and preprocessors
mlp, _, errors, x_ppi, y_ppi, x_pp, y_pp, lat, lev, _ = \
pickle.load(open('./data/regressors/' + r_str + '.pkl', 'rb'))
# Load raw data from file
x_unscl, ytrue_unscl, _, _, _, _, _, _ = \
LoadData(training_file, minlev=minlev, N_trn_exs=None)
# Scale true values
ytrue_scl = transform_data(y_ppi, y_pp, ytrue_unscl)
# Apply x preprocessing to scale x-data and predict output
x_scl = transform_data(x_ppi, x_pp, x_unscl)
ypred_scl = mlp.predict(x_scl)
ypred_unscl = inverse_transform_data(y_ppi, y_pp, ypred_scl)
return x_scl, ypred_scl, ytrue_scl, x_unscl, ypred_unscl, ytrue_unscl
def load_error_history(r_str):
_, _, err, _, _, _, _, _, _, _ = pickle.load(open('./data/regressors/' +
r_str, + 'pkl', 'rb'))
return err
def load_netcdf_onepoint(filename, minlev, latind=None, lonind=None,
timeind=None, ensemble=False):
f = Dataset(filename, mode='r')
# Files are time x lev x lat x lon
Tin = f.variables['t_intermed'][:]
qin = f.variables['q_intermed'][:]
Tout = f.variables['dt_tg_convection'][:]
qout = f.variables['dt_qg_convection'][:]
Pout = f.variables['convection_rain'][:]
Tout_dbm = f.variables['dt_tg_convection_dbm'][:]
qout_dbm = f.variables['dt_qg_convection_dbm'][:]
Pout_dbm = f.variables['convection_rain_dbm'][:]
# If requested loaded predictions from ensemble
ten = dict() # initialize these regardless
qen = dict()
if ensemble:
tstr = ['dt' + str(i) for i in range(10)]
qstr = ['dq' + str(i) for i in range(10)]
for v in tstr:
ten[v] = f.variables[v][:]
for v in qstr:
qen[v] = f.variables[v][:]
f.close()
_, _, indlev = get_levs(minlev)
if latind is None:
latind = np.random.randint(0, Tin.shape[2])
if lonind is None:
lonind = np.random.randint(0, Tin.shape[3])
if timeind is None:
timeind = np.random.randint(0, Tin.shape[0])
Tin = np.squeeze(Tin[timeind, indlev, latind, lonind])
qin = np.squeeze(qin[timeind, indlev, latind, lonind])
Tout = np.squeeze(Tout[timeind, indlev, latind, lonind]) * 3600 * 24
qout = np.squeeze(qout[timeind, indlev, latind, lonind]) * 3600 * 24 * 1000
Pout = np.squeeze(Pout[timeind, latind, lonind]) * 3600 * 24
Tout_dbm = np.squeeze(Tout_dbm[timeind, indlev, latind, lonind])\
* 3600 * 24
qout_dbm = np.squeeze(qout_dbm[timeind, indlev, latind, lonind]) \
* 3600 * 24 * 1000
Pout_dbm = np.squeeze(Pout_dbm[timeind, latind, lonind]) * 3600 * 24
for key in ten:
ten[key] = np.squeeze(ten[key][timeind, indlev, latind, lonind])\
* 3600 * 24
for key in qen:
qen[key] = np.squeeze(qen[key][timeind, indlev, latind, lonind])\
* 3600 * 24 * 1000
x = pack(Tin[:, None].T, qin[:, None].T)
y = pack(Tout[:, None].T, qout[:, None].T)
y_dbm = pack(Tout_dbm[:, None].T, qout_dbm[:, None].T)
return x, y, y_dbm, [Pout], [Pout_dbm], ten, qen
```
|
{
"source": "JGearhart4/CST205_Project_3",
"score": 3
}
|
#### File: CST205_Project_3/GUI/Gui1.py
```python
from dejavu import Dejavu
from dejavu.recognize import FileRecognizer, MicrophoneRecognizer
from Tkinter import *
import warnings
import time
warnings.filterwarnings("ignore")
## Main window
root = Tk()
root.title('Name that Song!')
root.geometry('500x500')
config = {
"database": {
"host": "127.0.0.1",
"user": "root",
"passwd": "<PASSWORD>",
"db": "dejavu",
}
}
def pause():
time.sleep(5)
def start(event):
if __name__ == '__main__':
djv = Dejavu(config)
secs = 8
song = djv.recognize(MicrophoneRecognizer, seconds=secs)
if song is None:
errorlabel = Label(frm, text="Did you play the song out loud so your mic could hear it?")
errorlabel.pack(side=TOP)
else:
print "From mic with %d seconds we recognized: %s\n" % (secs, song)
if song["song_id"] == 1:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Stressed Out - Twenty One Pilots')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text='Closer - The Chainsmokers')
recommend2.pack(side=TOP)
if song["song_id"] == 2:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Mama Said - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text='See You Again - <NAME> ft. <NAME>')
recommend2.pack(side=TOP)
if song["song_id"] == 3:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Just Life Fire - P!nk')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text='Hello - Adele')
recommend2.pack(side=TOP)
if song["song_id"] == 5:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Dangerous Woman - Ariana Grande')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - Ariana Grande ft. Ninki Minaj")
recommend2.pack(side=TOP)
if song["song_id"] == 6:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Into You - Ariana Grande')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Dangerous Woman - Ariana Grande")
recommend2.pack(side=TOP)
if song["song_id"] == 7:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='My Humps - The Black Eyed Peas')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Fergalicious - Fergie")
recommend2.pack(side=TOP)
if song["song_id"] == 8:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Uptown Funk - <NAME> ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Ride - Twenty One Pilots")
recommend2.pack(side=TOP)
if song["song_id"] == 9:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Closer - The Chainsmokers')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Ride - Twenty One Pilots")
recommend2.pack(side=TOP)
if song["song_id"] == 10:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='When I Was Your Man - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="It Will Rain - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 11:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='How Deep - <NAME> & Disciples')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Stitches - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 12:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Broccoli - D.R.A.M ft. Lil Yachty')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Do You Mind - DJ Khaled ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 13:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='The Greatest - Sia')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Titanium - <NAME> ft. Sia")
recommend2.pack(side=TOP)
if song["song_id"] == 14:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Paradise - Coldplay')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Send My Love (To Your New Lover) - Adele")
recommend2.pack(side=TOP)
if song["song_id"] == 15:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Juju On That Beat - <NAME> & Zayion McCall')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Don't Mind - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 16:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Scars To Your Beautiful - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Hide Away - Daya")
recommend2.pack(side=TOP)
if song["song_id"] == 17:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='2 Phones - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="<NAME> - Desiigner")
recommend2.pack(side=TOP)
if song["song_id"] == 18:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Broccoli - D.R.A.M')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Panda - Desiigner")
recommend2.pack(side=TOP)
if song["song_id"] == 19:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Side To Side - <NAME> ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="No Limit - Usher ft. Young Thug")
recommend2.pack(side=TOP)
if song["song_id"] == 20:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Hold You Down - DJ Khaled ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Tru - Lloyd")
recommend2.pack(side=TOP)
if song["song_id"] == 21:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Let Me Love You - DJ Snake ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Lean On - DJ Snake & Major Lazer")
recommend2.pack(side=TOP)
if song["song_id"] == 22:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Cold Water - Major Lazer ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="In The Name Of Love - <NAME> & <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 23:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Toothbrush - DNCE')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Body Moves - DNCE")
recommend2.pack(side=TOP)
if song["song_id"] == 24:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Cold Water - Major Lazer ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - <NAME>ande ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 25:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Scars To Your Beautiful - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Don't Wanna Know - Maroon 5")
recommend2.pack(side=TOP)
if song["song_id"] == 26:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='One Dance - Drake')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Don't Mind - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 27:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Lego House - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Stitches - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 28:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text="I'm Not The Only One - <NAME>")
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Cold Water - Major Lazer ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 29:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text="Don't Let Me Down - The Chainsmokers")
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Lights - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 30:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Going Under - Evanescence')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Sweet Sacrifice - Evanescence")
recommend2.pack(side=TOP)
if song["song_id"] == 31:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Hold Up - Beyonce')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - Ariana Grande ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 32:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Sorry - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Cold Water - Major Lazer ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 33:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='BO$$ - Fifth Harmony')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Sorry - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 34:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Scars To Your Beautiful - Alessia Cara')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="You've Got The Love - Florence And The Machine")
recommend2.pack(side=TOP)
if song["song_id"] == 35:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Scars To Your Beautiful - Alessia Cara')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Don't Wanna Know - Maroon 5")
recommend2.pack(side=TOP)
if song["song_id"] == 36:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Just Give Me A Reason - P!nk ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Royals - Lorde")
recommend2.pack(side=TOP)
if song["song_id"] == 37:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Cold Water - Major Lazer ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="What They Want - Russs")
recommend2.pack(side=TOP)
if song["song_id"] == 38:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='I Took A Pill In Ibiza - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Scars To Your Beautiful - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 39:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='What The Hell - Avril Lavigne')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Demons - Imagine Dragons")
recommend2.pack(side=TOP)
if song["song_id"] == 40:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Waiting On The World To Change - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Daughters - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 41:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Hotling Bling - Drake')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Uptown Funk - <NAME> ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 42:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='This Is What You Came For - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Faded - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 43:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Treat You Better - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Don't Let Me Down - The Chainsmokers")
recommend2.pack(side=TOP)
if song["song_id"] == 44:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Counting Stars - OneRepublic')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Take Me To Church - Hozier")
recommend2.pack(side=TOP)
if song["song_id"] == 45:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Scars To Your Beautiful - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - Ariana Grande ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 46:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Scars To Your Beaufitul - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - Ariana Grande ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 47:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Locked Away - R City ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Sorry - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 48:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Rock On - Tucker Beathard')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Backroad Song - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 49:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Cold Water - Major Lazer ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Cake By The Ocean - DNCE")
recommend2.pack(side=TOP)
if song["song_id"] == 50:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Cry Me A River - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Not A Bad Thing - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 51:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Hot N Cold - Katy Perry')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - Ariana Grande ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 52:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Side To Side - Ariana Grande ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Cold Water - Major Lazer ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 53:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='I Took A Pill In Ibiza - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Don't Wanna Know - Maroon 5")
recommend2.pack(side=TOP)
if song["song_id"] == 54:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='The Greatest - Sia')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Million Reasons - Lady Gaga")
recommend2.pack(side=TOP)
if song["song_id"] == 55:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Bartender - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="What Hurts The Most - Rascal Flatts")
recommend2.pack(side=TOP)
if song["song_id"] == 56:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Starry Eyed - <NAME>ing')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Anything Could - Ellie Goulding")
recommend2.pack(side=TOP)
if song["song_id"] == 57:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Criminal Mind - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Scars To Your Beautiful - Alessia Cara")
recommend2.pack(side=TOP)
if song["song_id"] == 58:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Let Me Love You - DJ Snake ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Scars To Your Beautiful - Alessia Cara")
recommend2.pack(side=TOP)
if song["song_id"] == 59:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='24K Magic - Bruno Mars')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Blank Space - Taylor Swift")
recommend2.pack(side=TOP)
if song["song_id"] == 60:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Treat You Better - Shawn Mendes')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Don't Wanna Know - Maroon 5")
recommend2.pack(side=TOP)
if song["song_id"] == 61:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='<NAME> - <NAME> ft. Meghan Trainor')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Scars To Your Beautiful - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 62:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Blank Space - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="All About That Bass - Meghan Trainor")
recommend2.pack(side=TOP)
if song["song_id"] == 63:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Summer - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - <NAME> ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 64:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Locked Away - R City ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Scars To Your Beautiful - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 65:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Cold Water - Major Lazer ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - Ariana Grande ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 66:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='So What - P!nk')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Love The Way You Lie - Eminem ft. Rihanna")
recommend2.pack(side=TOP)
if song["song_id"] == 67:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Scars To Your Beautiful - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Cold Water - Major Lazer ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 68:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Shake It Off - T<NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Let Me Love You - DJ Snake ft. <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 69:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Die Young - Ke$ha')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Counting Stars - OneRepublic")
recommend2.pack(side=TOP)
if song["song_id"] == 70:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Kiss It Better - Rihanna')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Pour It Up - Rihanna")
recommend2.pack(side=TOP)
if song["song_id"] == 71:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Love Song - Adele')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Diamonds - Rihanna")
recommend2.pack(side=TOP)
if song["song_id"] == 72:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Titanium - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Blank Space - Taylor Swift")
recommend2.pack(side=TOP)
if song["song_id"] == 73:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Kiss It Better - Rihanna')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="One Dance - Drake")
recommend2.pack(side=TOP)
if song["song_id"] == 74:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Let Her Go - Passenger')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Counting Stars - One Republic")
recommend2.pack(side=TOP)
if song["song_id"] == 75:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Hello - Adele')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Skyfall - Adele")
recommend2.pack(side=TOP)
if song["song_id"] == 76:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Mercy - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Into You - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 77:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Hotline Bling - Drake')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="One Call Away - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 78:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Hymn For The Weekend - Coldplay')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="One Dance - Drake")
recommend2.pack(side=TOP)
if song["song_id"] == 79:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text="Hips Don't Lie - Shakira")
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Moves Like Jagger - Maroon 5")
recommend2.pack(side=TOP)
if song["song_id"] == 80:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='The Greatest - Sia')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Sit Still, Look Pretty - Daya")
recommend2.pack(side=TOP)
if song["song_id"] == 81:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='In The Name of Love - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Big Girls Cry - Sia")
recommend2.pack(side=TOP)
if song["song_id"] == 82:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Drive - Incubus')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Say It Ain't So - Weezer")
recommend2.pack(side=TOP)
if song["song_id"] == 83:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Purple Lamborghini - Skrillex')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Starboy - The Weeked")
recommend2.pack(side=TOP)
if song["song_id"] == 84:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Thinking Out Loud - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="I'm Not the Only One - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 85:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Black Beetles - Rae Sremmond ft. Gucci Mane')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Counting Stars - One Republic")
recommend2.pack(side=TOP)
if song["song_id"] == 86:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Heathens - Twenty One Pilots')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Radioactive - Imagine Dragons")
recommend2.pack(side=TOP)
if song["song_id"] == 87:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text="We Don't Talk Anymore - <NAME>")
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="This Is What You Came For Lyrics - <NAME> ft. Rihanna")
recommend2.pack(side=TOP)
if song["song_id"] == 88:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Take Me To Church - Hozier')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Mirrors - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 89:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='<NAME> - Desiigner')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Treat You Better - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 90:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Just The Way You Are lyrics - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Thinking out Loud - <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 91:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Cheap Thrills - Sia ft. <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Starving - Hailee Steinfeld Grey feat Zedd")
recommend2.pack(side=TOP)
if song["song_id"] == 92:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Heathens- Twenty One Pilots')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Adele - Send My Love (To Your New Lover)")
recommend2.pack(side=TOP)
if song["song_id"] == 93:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text="Don't Let Me Down- The Chainsmokers ft. Daya")
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="This Is What You Came For - <NAME> ft. Rihanna")
recommend2.pack(side=TOP)
if song["song_id"] == 94:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Young M.A - OOOUUU')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Do You Mind - DJ Khaled ft <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 95:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Meghan Trainor - Lips Are Movin')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Gnash - I hate u, I love u")
recommend2.pack(side=TOP)
if song["song_id"] == 96:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='<NAME> - What Do You Mean')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="<NAME> - Treat You Better")
recommend2.pack(side=TOP)
if song["song_id"] == 97:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Love Yourself - <NAME>')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Let me love you -DJ Snake _ <NAME>")
recommend2.pack(side=TOP)
if song["song_id"] == 98:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='Ride - Twenty One Pilots')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Work from Home - Fifth Harmony ft. <NAME> $")
recommend2.pack(side=TOP)
if song["song_id"] == 99:
id = Label(frm, text='Your song: ' + song['song_name'])
id.pack(side=TOP)
recommend = Label(frm, text='Recommended songs:')
recommend.pack(side=TOP)
recommend1 = Label(frm, text='No Problem - Chance The Rapper ft. 2 Chainz and Lil Wayne')
recommend1.pack(side=TOP)
recommend2 = Label(frm, text="Side To Side - Ariana Grande ft. <NAME>")
recommend2.pack(side=TOP)
# Grid sizing behavior in window
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
## Canvas
cnv = Canvas(root, bg ="white")
cnv.grid(row=0, column=0, sticky='nswe')
# Scrollbars for canvas
hScroll = Scrollbar(root, orient=HORIZONTAL, command=cnv.xview)
hScroll.grid(row=1, column=0, sticky='we')
vScroll = Scrollbar(root, orient=VERTICAL, command=cnv.yview)
vScroll.grid(row=0, column=1, sticky='ns')
cnv.configure(xscrollcommand=hScroll.set, yscrollcommand=vScroll.set)
# Frame in canvas
frm = Frame(cnv, bg="white", relief=SUNKEN)
# This puts the frame in the canvas's scrollable zone
cnv.create_window(250, 0, window=frm, anchor='n')
# Frame contents
T = Text(frm, height=6, width=45)
T.pack(side=TOP)
T.insert(END, " Welcome to '<NAME>!'\n Press the button be"
""
""
"low to get started!\n")
Listenbutton = Button(frm, text='Start listening', width=10)
Listenbutton.bind("<Button-1>", start)
Listenbutton.pack(side=TOP, padx=2, pady=2)
# Update display to get correct dimensions
frm.update_idletasks()
# Configure size of canvas's scrollable zone
cnv.configure(scrollregion=(0, 0, frm.winfo_width(), frm.winfo_height()))
# Go!
root.mainloop()
```
|
{
"source": "jgeboski/mctl",
"score": 2
}
|
#### File: mctl/mctl/commands.py
```python
import click
import logging
import os
import time
from typing import Any, Callable, List, Optional
from mctl.config import Config, load_config, Package, Server
from mctl.exception import MctlError
from mctl.fake_server import (
DEFAULT_MESSAGE,
DEFAULT_MOTD,
DEFAULT_PORT,
run_fake_server,
)
from mctl.package import (
package_build,
package_revisions,
package_upgrade,
sort_revisions_n2o,
)
from mctl.server import server_execute, server_start, server_start_fake, server_stop
from mctl.util import await_sync
LOG = logging.getLogger(__name__)
class MctlCommand(click.Command):
def invoke(self, *args: Any, **kwargs: Any) -> Any:
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
return super().invoke(*args, **kwargs)
try:
return super().invoke(*args, **kwargs)
except MctlError as ex:
raise click.ClickException(str(ex))
class MctlRootGroup(click.Group):
def command(self, *args: Any, **kwargs: Any) -> Callable:
kwargs["cls"] = MctlCommand
return super().command(*args, **kwargs)
def get_packages(
config: Config,
all_packages: bool,
all_except: Optional[List[str]],
package_names: Optional[List[str]],
server: Optional[Server] = None,
) -> List[Package]:
server_pkg_names = server.packages if server else list(config.packages)
if all_except:
for name in all_except:
config.get_package(name)
selected_names = [name for name in server_pkg_names if name not in all_except]
elif all_packages:
selected_names = server_pkg_names
elif package_names:
selected_names = package_names
else:
raise click.UsageError(
"--all-packages, --all-except, or --package-name required"
)
packages = [config.get_package(name) for name in selected_names]
if len(packages) == 0:
raise click.ClickException("No packages selected with the specified options")
return packages
@click.group(help="Minecraft server controller", cls=MctlRootGroup)
@click.option(
"--config-file",
"-c",
help="Configuration file to use",
envvar="FILE",
default=os.path.expanduser(os.path.join("~", ".mctl/config.yml")),
)
@click.option(
"--debug", "-d", help="Show debugging messages", is_flag=True,
)
@click.pass_context
@await_sync
async def cli(ctx: click.Context, config_file: str, debug: bool) -> None:
logging.basicConfig(
format="[%(asctime)s] [%(levelname)s] %(message)s",
level=logging.DEBUG if debug else logging.INFO,
)
ctx.obj = await load_config(config_file)
@cli.command(help="Build one or more packages")
@click.option(
"--all-packages", "-a", help="Act on all packages", is_flag=True,
)
@click.option(
"--all-except",
"-e",
help="Act on all packages except these (can be specified multiple times)",
envvar="PACKAGE",
multiple=True,
)
@click.option(
"--force",
"-f",
help="Force packages to build even if the revision already exists",
is_flag=True,
)
@click.option(
"--package-name",
"-p",
help="Name(s) of the package to act on (can be specified multiple times)",
envvar="PACKAGE",
multiple=True,
)
@click.pass_obj
@await_sync
async def build(
config: Config,
all_packages: bool,
all_except: Optional[List[str]],
force: bool,
package_name: Optional[List[str]],
) -> None:
packages = get_packages(config, all_packages, all_except, package_name)
# Rather than re-nicing all of the subprocesses for building, just
# re-nice everything at a top-level (including mctl).
if hasattr(os, "nice"):
new_nice = os.nice(config.build_niceness) # type: ignore
LOG.debug("Set niceness to %s for building", new_nice)
else:
LOG.debug("Re-nicing not supported by this OS")
for package in packages:
await package_build(config, package, force)
@cli.command(help="Execute an arbitrary server command")
@click.argument("command", nargs=-1, envvar="COMMAND", required=True)
@click.option(
"--server-name",
"-s",
help="Name of the server to act on",
envvar="SERVER",
required=True,
)
@click.pass_obj
@await_sync
async def execute(config: Config, command: List[str], server_name: str) -> None:
server = config.get_server(server_name)
await server_execute(server, " ".join(command))
@cli.command("fake-server", help="Run the fake server in the foreground")
@click.option(
"--listen-address", "-l", help="IPv4/IPv6 address to listen on", envvar="ADDRESS",
)
@click.option("--icon-file", "-i", help="PNG icon to use", envvar="FILE")
@click.option(
"--message",
"-m",
help="Message to disconnect players with",
envvar="MESSAGE",
default=DEFAULT_MESSAGE,
)
@click.option(
"--motd",
"-t",
help="Message of the day to display",
envvar="MESSAGE",
default=DEFAULT_MOTD,
)
@click.option(
"--port", "-p", help="Port to listen on", envvar="PORT", default=DEFAULT_PORT,
)
@click.pass_obj
@await_sync
async def fake_server(
config: Config,
listen_address: Optional[str],
icon_file: Optional[str],
message: str,
motd: str,
port: int,
) -> None:
await run_fake_server(listen_address, port, message, motd, icon_file)
@cli.command(help="List all packages")
@click.pass_obj
def packages(config: Config) -> None:
for package in config.packages.values():
click.echo(f"{package.name}:")
if package.repositories:
click.secho(" Repositories:")
for repo in package.repositories.values():
click.echo(f" URL: {repo.url}")
click.echo(f" Type: {repo.type}")
click.echo(f" Committish: {repo.committish}")
if package.fetch_urls:
click.echo(f" Fetch URLs:")
for path, url in package.fetch_urls.items():
click.echo(f" - {path}: {url}")
click.echo(f" Build Commands:")
for command in package.build_commands:
click.echo(f" - {command}")
click.echo(f" Artifacts:")
for name, regex in package.artifacts.items():
click.echo(f" - {regex} -> {name}")
revs = package_revisions(config, package)
if revs:
click.secho(" Built Revisions:")
for rev, ts in sort_revisions_n2o(revs):
click.secho(f" - {rev} ({time.ctime(ts)})")
click.echo("")
@cli.command(help="Restart a server")
@click.option(
"--message", "-m", help="Restart message show to players", envvar="MESSAGE",
)
@click.option(
"--now",
"-n",
help="Restart the server now without waiting the server-timeout",
is_flag=True,
)
@click.option(
"--server-name",
"-s",
help="Name of the server to act on",
envvar="SERVER",
required=True,
)
@click.pass_obj
@await_sync
async def restart(
config: Config, message: Optional[str], now: bool, server_name: str
) -> None:
server = config.get_server(server_name)
await server_stop(server, message, not now)
await server_start(server)
@cli.command(help="List all servers")
@click.pass_obj
def servers(config: Config) -> None:
for server in config.servers.values():
click.echo(f"{server.name}:")
click.echo(f" Path: {server.path}")
click.echo(f" Command: {server.command}")
click.echo(f" Stop Timeout: {server.stop_timeout}")
click.echo(f" Packages:")
for package in server.packages:
click.echo(f" - {package}")
click.echo("")
@cli.command(help="Start a server")
@click.option(
"--fake", "-k", help="Start the fake server instead", is_flag=True,
)
@click.option(
"--fake-message",
"-m",
help="Use this message for the fake server",
envvar="MESSAGE",
)
@click.option(
"--server-name",
"-s",
help="Name of the server to act on",
envvar="SERVER",
required=True,
)
@click.pass_obj
@await_sync
async def start(
config: Config, fake: bool, fake_message: Optional[str], server_name: str
) -> None:
server = config.get_server(server_name)
if fake:
await server_start_fake(server, fake_message)
else:
await server_start(server)
@cli.command(help="Stop a server")
@click.option(
"--message", "-m", help="Shutdown message show to players", envvar="MESSAGE",
)
@click.option(
"--now",
"-n",
help="Stop the server now without waiting the server-timeout",
is_flag=True,
)
@click.option(
"--server-name",
"-s",
help="Name of the server to act on",
envvar="SERVER",
required=True,
)
@click.option(
"--start-fake",
"-k",
help="Start the fake server when the server is stopped",
is_flag=True,
)
@click.pass_obj
@await_sync
async def stop(
config: Config,
message: Optional[str],
now: bool,
server_name: str,
start_fake: bool,
) -> None:
server = config.get_server(server_name)
await server_stop(server, message, not now)
if start_fake:
await server_start_fake(server, message)
@cli.command(help="Upgrade one or more packages")
@click.option(
"--all-packages", "-a", help="Act on all packages", is_flag=True,
)
@click.option(
"--all-except",
"-e",
help="Act on all packages except these (can be specified multiple times)",
envvar="PACKAGE",
multiple=True,
)
@click.option(
"--force",
"-f",
help="Force packages to upgrade even if they are up-to-date",
is_flag=True,
)
@click.option(
"--package-name",
"-p",
help="Name(s) of the package to act on (can be specified multiple times)",
envvar="PACKAGE",
multiple=True,
)
@click.option(
"--revision",
"-n",
help="Revision (or version) of the package to upgrade or downgrade to",
envvar="REV",
)
@click.option(
"--server-name",
"-s",
help="Name of the server to act on",
envvar="SERVER",
required=True,
)
@click.pass_obj
@await_sync
async def upgrade(
config: Config,
all_packages: bool,
all_except: Optional[List[str]],
force: bool,
package_name: Optional[List[str]],
revision: Optional[str],
server_name: str,
) -> None:
server = config.get_server(server_name)
packages = get_packages(config, all_packages, all_except, package_name, server)
for package in packages:
await package_upgrade(config, server, package, revision, force)
```
#### File: mctl/mctl/package.py
```python
import asyncio
from collections import defaultdict
import logging
import os
import re
import time
from typing import DefaultDict, Dict, List, Optional, Tuple
from mctl.config import Config, Package, Server
from mctl.exception import massert
from mctl.repository import unified_repo_revision, update_all_repos
from mctl.util import download_url, execute_shell_check, get_rel_dir_files
LOG = logging.getLogger(__name__)
def archive_build(config: Config, package: Package, build_dir: str, rev: str) -> None:
build_files = get_rel_dir_files(build_dir)
archive_dir = os.path.join(config.data_path, "archive")
for path, pattern in package.artifacts.items():
matches = [
build_file for build_file in build_files if pattern.match(build_file)
]
massert(
len(matches) != 0,
f"Found no artifacts for package {package.name} matching pattern {pattern}",
)
massert(
len(matches) == 1,
f"Ambiguous artifact pattern {pattern} for package {package.name}: {matches}",
)
match = matches[0]
artifact_path = os.path.join(build_dir, match)
root, ext = os.path.splitext(path)
archive_path = os.path.join(archive_dir, package.name, f"{root}-{rev}{ext}")
LOG.debug("Archiving artifact %s to %s", artifact_path, archive_path)
os.makedirs(os.path.dirname(archive_path), exist_ok=True)
os.rename(artifact_path, archive_path)
def cleanup_builds(config: Config, package: Package) -> None:
revs = package_revisions(config, package)
LOG.debug("Package %s has %d revisions", package.name, len(revs))
removed = set()
for rev, _ in reversed(sort_revisions_n2o(revs)):
if len(revs) <= config.max_package_revisions:
break
in_use = False
for artifact_path, (archive_path, _) in revs[rev].items():
for server in config.servers.values():
full_path = os.path.join(server.path, artifact_path)
if not os.path.islink(full_path):
continue
link_path = os.readlink(full_path)
if os.path.samefile(link_path, archive_path):
LOG.debug(
"Revision %s for package %s still in use by server %s",
rev,
package.name,
server.name,
)
in_use = True
break
else:
continue
# The inner for-loop found a link to the artifact, just give up on
# removing this revision.
break
if in_use:
return
removed.add(rev)
for archive_path, _ in revs[rev].values():
LOG.debug(
"Removing old build artifact for package %s: %s",
package.name,
archive_path,
)
os.unlink(archive_path)
if removed:
LOG.info(
"Removed %d old revisions of package %s: %s",
len(removed),
package.name,
removed,
)
new_rev_count = len(revs) - len(removed)
if new_rev_count > config.max_package_revisions:
LOG.warning(
"Servers still using more than %d (%s) revisions of package %s",
config.max_package_revisions,
new_rev_count,
package.name,
)
async def package_build(config: Config, package: Package, force: bool = False) -> None:
LOG.info("Building package %s", package.name)
build_dir = os.path.join(config.data_path, "builds", package.name)
os.makedirs(build_dir, exist_ok=True)
repos = package.repositories.values()
await update_all_repos(build_dir, repos)
rev = await unified_repo_revision(build_dir, repos)
prev_revs = package_revisions(config, package)
if not force and rev is not None and rev in prev_revs:
LOG.info(
"Build of package %s already exists for revision %s, skipping",
package.name,
rev,
)
return
if package.fetch_urls:
LOG.info(
"Fetching %d URLs for package %s...", len(package.fetch_urls), package.name
)
await asyncio.gather(
*[
download_url(url, os.path.join(build_dir, path))
for path, url in package.fetch_urls.items()
]
)
cmd_count = len(package.build_commands)
for i, command in enumerate(package.build_commands, 1):
LOG.info("Executing build command %d of %d: %s", i, cmd_count, command)
await execute_shell_check(command, hide_ouput=False, cwd=build_dir)
# Attempt to get an updated revision from all git repos after all
# build commands have executed. This helps support packages that use
# scripts to fetch Git repos (ex: Spigot's BuildTools). The build
# process will update these repos twice. Once up above to make sure
# the same revision is not being rebuilt. And once here to make sure
# the revision is accurate.
rev = await unified_repo_revision(build_dir, repos)
if rev is None:
rev = str(int(time.time()))
# Cleanup before archiving to avoid cleaning up the new version
cleanup_builds(config, package)
archive_build(config, package, build_dir, rev)
def package_revisions(
config: Config, package: Package
) -> Dict[str, Dict[str, Tuple[str, int]]]:
# {<rev>: <relative_artifact_path>: (<absolute_archive_path>, <time>)}
revs: DefaultDict[str, Dict[str, Tuple[str, int]]] = defaultdict(dict)
archive_dir = os.path.join(config.data_path, "archive", package.name)
for path in package.artifacts:
path_head, path_tail = os.path.split(path)
base_dir = os.path.join(archive_dir, path_head)
if not os.path.exists(base_dir):
continue
root, ext = os.path.splitext(path_tail)
pattern = re.compile(
fr"{re.escape(root)}\-(?P<rev>[a-zA-Z0-9]+){re.escape(ext)}$"
)
with os.scandir(base_dir) as dirit:
for item in dirit:
if not item.is_file():
continue
match = pattern.match(item.name)
if not match:
continue
st = item.stat()
rev = match.group("rev")
revs[rev][path] = os.path.join(base_dir, item.name), int(st.st_ctime)
ret_revs: Dict[str, Dict[str, Tuple[str, int]]] = {}
required = set(package.artifacts)
for rev, artifacts in revs.items():
available = set(artifacts)
missing = required - available
if missing:
LOG.warning(
"Ignoring revision %s for package %s due to missing artifacts: %s",
rev,
package.name,
missing,
)
else:
ret_revs[rev] = artifacts
return ret_revs
async def package_upgrade(
config: Config,
server: Server,
package: Package,
rev: Optional[str] = None,
force: bool = False,
) -> None:
massert(
package.name in server.packages,
f"Package {package.name} not used by server {server.name}",
)
revs = package_revisions(config, package)
massert(revs, f"There are no built revisions for package {package.name}")
if rev is None:
rev, _ = sort_revisions_n2o(revs)[0]
LOG.debug(
"No revision specified for package %s, using revision %s", package.name, rev
)
else:
massert(rev in revs, f"Unknown revision {rev} for package {package.name}")
artifacts = revs[rev]
rand_artifact, _ = list(revs[rev].items())[0]
rand_path = os.path.join(server.path, rand_artifact)
current_rev = None
if os.path.islink(rand_path):
root, ext = os.path.splitext(os.path.basename(rand_artifact))
match = re.match(
fr"{re.escape(root)}\-(?P<rev>[a-zA-Z0-9]+){re.escape(ext)}$",
os.path.basename(os.readlink(rand_path)),
)
if match:
current_rev = match.group("rev")
if not force and current_rev == rev:
LOG.info(
"Package %s already up-to-date for server %s, skipping",
package.name,
server.name,
)
return
LOG.info(
"Upgrading package %s to revision %s from revision %s",
package.name,
rev,
current_rev,
)
for path, info in artifacts.items():
archive_path, _ = info
artifact_path = os.path.join(server.path, path)
if os.path.exists(artifact_path):
LOG.debug("Removing old artifact %s", artifact_path)
os.unlink(artifact_path)
LOG.debug("Linking archived artifact %s to %s", archive_path, artifact_path)
os.makedirs(os.path.dirname(artifact_path), exist_ok=True)
os.symlink(archive_path, artifact_path)
def sort_revisions_n2o(
revs: Dict[str, Dict[str, Tuple[str, int]]]
) -> List[Tuple[str, int]]:
timed_revs = {
rev: max(artifact[1] for artifact in artifacts.values())
for rev, artifacts in revs.items()
}
return sorted(timed_revs.items(), key=lambda kv: kv[1], reverse=True)
```
|
{
"source": "JGefroh/ttfm",
"score": 3
}
|
#### File: experiments/hfbf/parse_tips.py
```python
import sys
import re
txt = sys.stdin.read()
def logger(msg):
sys.stderr.write("{}\n".format(msg))
txt = txt.replace("’", "'")
lines = txt.split("\n")
lines = [line.strip() for line in lines]
lines = [line for line in lines if len(line)]
row = None
num = 0
tmpl = "MarketVendor.where(market_id: {}, vendor_id: Vendor.where(name: \"{}\").select(\"id\")).first.update(booth_location: \"{}\")"
market_id = 61
for line in lines:
parserow = re.findall(" Row ([A-Za-z]+)|(Lawn) ", line)
if parserow:
row = parserow[0][0] if parserow[0][0] else parserow[0][1]
num = 0
continue
if not row:
continue
parts = line.split(":", 1)
if len(parts) != 2:
logger("Can't parse {}".format(line))
continue
vendor, desc = parts
num += 1
boothid = "{}{}".format(row, num)
txt = tmpl.format(market_id, vendor, boothid)
print(txt)
```
|
{
"source": "jgehrcke/ci-analysis",
"score": 2
}
|
#### File: ci-analysis/cia/analysis.py
```python
import logging
from datetime import timezone
import pandas as pd
log = logging.getLogger(__name__)
def calc_rolling_event_rate(
series,
window_width_seconds,
upsample_with_zeros=False,
upsample_with_zeros_until=None,
):
"""
Require that Series index is a timestamp index.
http://pandas.pydata.org/pandas-docs/version/0.19.2/api.html#window
"""
assert isinstance(window_width_seconds, int)
log.info(
"Calculate event rate over rolling window (width: %s s)", window_width_seconds
)
# Each sample/item in the series corresponds to one event. The index value
# is the datetime of the event (build), with a resolution of 1 second
# (assumption about input). Multiple events per second are rare, but to be
# expected: hence, get the number of events for any given second (group by
# index value, and get the group size for each unique index value).
eventcountseries = series.groupby(series.index).size()
# Rename to `e` for the following transformations.
e = eventcountseries
log.info("raw event count series (raw series.groupby(series.index).size()):")
print(e)
n_minute_bins = 60
log.info("downsample series into %s-minute bins", n_minute_bins)
# Downsample the series into N-minute bins and sum the values falling into
# a bin (counting the number of 'events' in this bin). Note(double-check,
# but I think that's right): after this, the minimal time difference
# between adjacent data points is `n_minute`s, and the maximal one is
# unknown (can be large, this does not fill gaps).
e = e.resample(f"{n_minute_bins}min").sum()
# print(e)
# The 'resample' before is not expected to upsample, just downsample. That
# is, the resulting time index is expected to have gaps (where no events
# occur in a time interval wider than the bin width above), Up-sample the
# time index to fill these gaps, with a certain desired resolution and fill
# the missing values with zeros. If desired.
if upsample_with_zeros:
if upsample_with_zeros_until is not None:
if series.index.max() == upsample_with_zeros_until:
log.info(
"calc_rolling_event_rate: last data point in series (%s) "
+ "is equal to upsample_with_zeros_until (%s) -- skip",
series.index.max(),
upsample_with_zeros_until,
)
elif series.index.max() > upsample_with_zeros_until:
log.error(
"calc_rolling_event_rate: last data point in series (%s) "
+ "is newer than upsample_with_zeros_until (%s)",
series.index.max(),
upsample_with_zeros_until,
)
raise Exception("calc_rolling_event_rate: bad input (see error above)")
else:
# Construct additional data point as Series with 1 row.
dp = pd.Series([0], index=[upsample_with_zeros_until])
log.info(
"upsample_with_zeros_until mechanism: add data point to event count series: %s",
dp,
)
# print(f"e before: {e}")
e = e.append(dp)
# print(f"e after: {e}")
# Example state after this extension: last to samples in `e`:
# 2020-12-07 12:00:00+00:00 4
# 2020-12-10 08:00:01+00:00 0
log.info("upsample series (%s-minute bins) to fill gips, with 0", n_minute_bins)
e = e.asfreq(f"{n_minute_bins}min", fill_value=0)
# print(e)
# Store point in time of newest data point in timeseries, needed later.
# Note that this might be newer than the original `series.index.max()`, if
# there was a `upsample_with_zeros_until`-based extension.
datetime_newest_datapoint = e.index.max()
log.info("newest data point in event count series: %s", datetime_newest_datapoint)
# Construct Window object using `df.rolling()` whereas a time offset string
# defines the rolling window width in seconds. Require N samples to be in
# the moving window otherwise produce NaN?
window = e.rolling(window="%sS" % window_width_seconds, min_periods=1)
# Count the number of events (builds) within the rolling window.
s = window.sum()
# Normalize event count with/by the window width, yielding the average
# build rate [Hz] in that time window.
# rolling_build_rate = s / float(window_width_seconds)
rolling_event_rate_d = 86400 * s / float(window_width_seconds)
new_rate_column_name = "builds_per_day_%ss_window" % window_width_seconds
rolling_event_rate_d.rename(new_rate_column_name, inplace=True)
# In the resulting Series object, the request rate value is assigned to the
# right window boundary index value (i.e. to the newest timestamp in the
# window). For presentation and symmetry it is convenient and correct to
# have it assigned (approximately) to the temporal center of the time
# window. That makes sense for intuitive data interpretation of a single
# rolling window time series, but is essential for meaningful presentation
# of multiple rolling window series in the same plot -- keeping the
# symmetry of original data is especially important when doing long term
# analysis, with wide time windows with varying window width varies.
# However: invoking `rolling(..., center=True)` however yields
# `NotImplementedError: center is not implemented for datetimelike and
# offset based windows`. As a workaround, shift the data by half the window
# size to 'the left': shift the timestamp index by a constant / offset.
offset = pd.DateOffset(seconds=window_width_seconds / 2.0)
rolling_event_rate_d.index = rolling_event_rate_d.index - offset
# In the resulting time series, all leftmost values up to the rolling
# window width are dominated by the effect that the rolling window
# (incoming from the left) does not yet completely overlap with the data.
# That is, here the rolling window result is (linearly increasing)
# systematically to small. Because by now the time series has one sample
# per `n_minute_bins` minute, the number of leftmost samples with a bad
# result corresponds to `int(window_width_seconds / (n_minute_bins * 60))`.
# TODO: review this calc :)
rolling_event_rate_d = rolling_event_rate_d[
int(window_width_seconds / (n_minute_bins * 60)) :
]
# print(rolling_event_rate_d)
# Forward-fill the last value up to the last point in time of the original
# time series (the newest data point in the rolling time window series
# should be half a time window width older than that) -- that would be the
# same as "plotting the window aggregate to the right edge of the window",
# just that we didn't want to do so for the entire data interval (for
# symmetry reasons, see above).
apdx_last_value = rolling_event_rate_d.iloc[-1]
# print(rolling_event_rate_d.index.max())
# now = pd.Timestamp.now(tz=timezone.utc)
apdx_index = pd.date_range(
start=rolling_event_rate_d.index.max(),
end=datetime_newest_datapoint,
freq=f"{n_minute_bins}min",
)
apdx_series = pd.Series([apdx_last_value] * len(apdx_index), index=apdx_index)
# print(apdx_index)
# print(apdx_series)
log.info(
"rolling_event_rate_d: forward-fill to %s with last value %s",
datetime_newest_datapoint,
apdx_last_value,
)
rolling_event_rate_d = rolling_event_rate_d.append(apdx_series)
# df.set_index("dt").reindex(r).fillna(0.0).rename_axis("dt").reset_index()
# There's a lot of magic going on between how the datetime64 values
# actually encode datetime in plots. Sharing an axis across (sub)plots is
# brittle w.r.t. these differences. Work around this, here: make it so that
# individual timestamps have a non-zero value for seconds, by simply adding
# one second, shifting the whole data set by one second to the left. That
# prevents, I guess, an optimization to hit in which would see that
# individual timestamps hit the full hour or integer multiples of 30 or 15
# minutes. Also see
# https://github.com/pandas-dev/pandas/issues/15874
# https://github.com/pandas-dev/pandas/issues/15071
# https://github.com/pandas-dev/pandas/issues/31074
# https://github.com/pandas-dev/pandas/issues/29705
# https://github.com/pandas-dev/pandas/issues/29719
# https://github.com/pandas-dev/pandas/issues/18571
# https://github.com/pandas-dev/pandas/issues/11574
# https://github.com/pandas-dev/pandas/issues/22586
# maybe also look at the index.resolution property to make this systematic.
rolling_event_rate_d.index = rolling_event_rate_d.index + pd.to_timedelta("1 sec")
# print(rolling_event_rate_d)
return rolling_event_rate_d
```
|
{
"source": "jgehrcke/textstory",
"score": 3
}
|
#### File: jgehrcke/textstory/documentreader.py
```python
import os
from logger import log
class DocumentReader(object):
def __init__(self, document_path):
self.document_path = document_path
if not os.path.isfile(self.document_path):
raise SystemExit("File not found: %s" % self.document_path)
log.info("Reading file: %s.", self.document_path)
with open(self.document_path, "rb") as f:
self.file_string = f.read()
def get_string(self):
try:
return self.file_string.decode("utf-8").strip()
except UnicodeDecodeError:
raise SystemExit("Cannot read '" + self.document_path + "': UnicodeDecodeError.")
def save(self, doc_content):
with open(self.document_path, "wb") as f:
f.write(doc_content.encode("utf-8"))
log.info("Wrote UTF-8-encoded document: %s.", self.document_path)
```
|
{
"source": "jgehringUCB/kite",
"score": 3
}
|
#### File: kite/kite/__init__.py
```python
import os
from collections import OrderedDict
def version():
print("0.0.1")
def make_mismatch_map(FeatureDict):
odict = OrderedDict()
counter=0
for item in FeatureDict:
name=(item)
seq=FeatureDict[item]
if counter == 0:
feature_barcode_length = len(seq)
print("Feature Barcode Length: "+str(feature_barcode_length)+'\n')
print('Read the following Feature Barcodes:')
counter+=1
print(name)
print(seq)
odict[name+'-*-*'] = str(seq)[:feature_barcode_length]
for pos in range(feature_barcode_length):
letter =str(seq)[pos]
barcode=list(str(seq)[:feature_barcode_length])
if letter=='A':
barcode[pos]='T'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='G'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='C'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
elif letter=='G':
barcode[pos]='T'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='A'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='C'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
elif letter=='C':
barcode[pos]='T'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='G'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='A'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
else:
barcode[pos]='A'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='G'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='C'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
return odict
def write_mismatch_map(tag_map, mismatch_t2g_path, mismatch_fasta_path):
tagmap_file = open(mismatch_t2g_path, "w+")
tagmap_fasta = open(mismatch_fasta_path, "w+")
for i in list(tag_map.keys()):
if i[-4:]=='-*-*':
#print(i[:-4]+'\t'+i[:-4]+'\t'+i[:-4])
tagmap_file.write(i[:-4]+'\t'+i[:-4]+'\t'+i[:-4]+'\n')
tagmap_fasta.write(">" + i[:-4] + "\n" +tag_map[i] + "\n")
else:
#print(i+'\t'+'-'.join(i.split('-')[:-2])+'\t'+'-'.join(i.split('-')[:-2]))
tagmap_file.write(i+'\t'+'-'.join(i.split('-')[:-2])+'\t'+'-'.join(i.split('-')[:-2])+'\n')
tagmap_fasta.write(">" + i + "\n" +tag_map[i] + "\n")
tagmap_file.close()
tagmap_fasta.close()
def kite_mismatch_maps(FeatureDict, mismatch_t2g_path, mismatch_fasta_path):
write_mismatch_map(make_mismatch_map(FeatureDict), mismatch_t2g_path, mismatch_fasta_path)
print("The t2g and fasta files are now ready")
```
|
{
"source": "jgehunter/echelp",
"score": 3
}
|
#### File: echelp/test/test_echelp.py
```python
import pytest
from echelp.echelp import electricalCircuit
class TestCircuits:
def test_CircuitObjectCreated(self):
testObject = electricalCircuit(name="Test")
assert isinstance(testObject, electricalCircuit)
def test_CreateCircuitObject(self):
testString = "This is a test circuit."
testCircuit = electricalCircuit(name=testString)
assert testCircuit.name == testString, "Expected %r but received %r" % testString % testCircuit.name
def test_PassCircuitCharacteristics(self):
exampleCharacteristics = {
"R1": 10,
"R2": 20,
"C1": 10,
}
testCircuit = electricalCircuit(
name="Test", parameters=exampleCharacteristics)
assert testCircuit.parameters == exampleCharacteristics
def test_ExceptionIfGetCharacteristicsNoName(self):
testCircuit = electricalCircuit()
assert pytest.raises(Exception)
def test_SetAttributes(self):
testCircuit = electricalCircuit(Test=1)
assert testCircuit.Test == 1
def test_SetCircuitDictionary(self):
circuitDictionary = {
"SallenKeyLowPass": {
"Unknowns": 4,
"Parameters": ["RA", "RB", "F", "R", "C"],
"Equations": ["3-(RA+RB)/RA-1.4142", "F-1/(2*pi*R*C)"]
}
}
testCircuit = electricalCircuit(circuitDictionary=circuitDictionary)
assert testCircuit.circuitDictionary == circuitDictionary
def test_ParametersSetFromName(self):
circuitDictionary = {
"SallenKeyLowPass": {
"Unknowns": 4,
"Parameters": ["RA", "RB", "F", "R", "C"],
"Equations": ["3-(RA+RB)/RA-1.4142", "F-1/(2*pi*R*C)"]
}
}
testCircuit = electricalCircuit(
name="SallenKeyLowPass", circuitDictionary=circuitDictionary)
testCircuit.setParameters()
assert testCircuit.parameters == circuitDictionary[
"SallenKeyLowPass"]["Parameters"]
```
|
{
"source": "jgeleta/PyNet_Class",
"score": 3
}
|
#### File: jgeleta/PyNet_Class/C2L2_telnetlib.py
```python
from __future__ import print_function, unicode_literals, division
from pprint import pprint
from netmiko import Netmiko
from getpass import getpass
from ciscoconfparse import CiscoConfParse
import os # Allows Python to perform Operating System functions.
# os.system Allows Python to run commands from the Command Prompt.
import random # Allows Python to randomly generate something, like an integer
import re # Allows Python to perform Regular Expression searches.
import csv # https://docs.python.org/3/library/csv.html
import jinja2 # Jinja is a template engine for the Python programming language
import yaml # YAML is a human-readable data serialization language
import json # JSON is an open-standard file format that uses human-readable text to transmit data objects
import telnetlib # Library the implements the Telnet protocol
import socket # Library that will allow for socket timeouts to be coupled with try: except:
import sys # Library of variables that have strong interaction with the interpreter
import time #
banner = ('-' *80) # Create a banner for use as a section separator
'''
Write a script that connects using telnet to the pynet-rtr1 router.
Execute the 'show ip int brief' command on the router and return the output.
You should be able to do this by using the following items:
telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
net_conn.read_until(<string_pattern>, TELNET_TIMEOUT)
net_conn.read_very_eager()
net_conn.write(<command> + '\n')
net_conn.close()
'''
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def write_bytes(out_data):
"""
Write Python2 and Python3 compatible byte stream.
It ensures that the unicode in the program is always encoded into a UTF-8 byte stream
in the proper way (when bytes are written out to the network). Or worded another way,
Unicode in the program is in the idealized unicode code points, and when you write it
out to the network it needs to be represented a certain way (encoded).
"""
if sys.version_info[0] >= 3:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(b'')):
return out_data
else:
if isinstance(out_data, type(u'')):
return out_data.encode('utf-8')
elif isinstance(out_data, type(str(''))):
return out_data
msg = "Invalid value for out_data neither unicode nor byte string: {}".format(out_data)
raise ValueError(msg)
def write_channel(net_conn, data):
# Handle the PY2/PY3 differences to write data out to the device.
net_conn.write(write_bytes(data))
def read_channel(net_conn):
# Handle the PY2/PY3 differences to write data out to the device.
return net_conn.read_very_eager().decode('utf-8', 'ignore')
def telnet_connect(ip_addr):
# Establish telnet connection.
try:
return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
sys.exit("Connection timed-out")
def login(net_conn, username, password):
# Login to network device.
output = net_conn.read_until(b"sername:", TELNET_TIMEOUT).decode('utf-8', 'ignore')
write_channel(net_conn, username + '\n')
output += net_conn.read_until(b"ssword:", TELNET_TIMEOUT).decode('utf-8', 'ignore')
write_channel(net_conn, password + '\n')
return output
def disable_paging(net_conn, paging_cmd='terminal length 0'):
# Disable the paging of output (i.e. --More--).
return send_command(net_conn, paging_cmd)
def send_command(net_conn, cmd):
# Send a command down the telnet channel and return the response.
cmd = cmd.rstrip()
write_channel(net_conn, cmd + '\n')
time.sleep(1)
return read_channel(net_conn)
"""
Write a script that connects to the lab pynet-rtr1, logins, and executes the
'show ip int brief' command.
"""
ip_addr = input("IP address: ")
ip_addr = ip_addr.strip()
username = 'pyclass'
password = getpass()
net_conn = telnet_connect(ip_addr)
output = login(net_conn, username, password)
time.sleep(1)
read_channel(net_conn)
disable_paging(net_conn)
output = send_command(net_conn, 'show ip int brief')
print('\n')
print(output)
print('\n')
net_conn.close()
# EoF
```
|
{
"source": "jgelfman/MSSM-Summer-Preparation-Python-Assignment",
"score": 4
}
|
#### File: jgelfman/MSSM-Summer-Preparation-Python-Assignment/GelfmanPythonAssignment.py
```python
import pandas as pd
import csv
#####################################
# TASK 1 - Separate Movie IDs and Movie names
#####################################
# Read CSV
tempRatingsInput = pd.read_csv('./InputFiles/RatingsInput.csv')
# Split into the two columns
tempRatingsInput[['MovieID', 'MovieName']] = tempRatingsInput.MovieName.str.split(',', expand=True)
# Create new format CSV
tempRatingsInput.to_csv('./OutputFiles/Task1.csv', index=False)
#####################################
# TASK 2 - String Capitalization
# Capitalizing the first letter of every word in the movie names
#####################################
# Titlize every entry in MovieName column
tempRatingsInput[['MovieName']] = tempRatingsInput.MovieName.str.title()
# Create new format CSV
tempRatingsInput.to_csv('./OutputFiles/Task2.csv', index=False)
#####################################
# TASK 3 - Read in from your new CSV file from Task 2 and parse data into lists and dictionaries
#####################################
dictInput = pd.read_csv('./OutputFiles/Task2.csv')
dictInput.drop(columns=dictInput.columns[0])
# Create list of all entries
all = []
with open('./OutputFiles/Task2.csv', "r") as task2:
reader = csv.reader(task2)
for r in reader:
all.append(r)
# Remove headers
all.pop(0)
# Prepare outer ages dictionary
ages = []
for a in range(len(all)):
ages.append(all[a][2])
# Prepare inner rankings dictionaries and append to ages
agesDict = {}
for row in range(len(all)):
thisrow = all[row]
# What age is this person
age = int(thisrow[2])
ageRatings = {}
# Work on films for this rating
rating = int(thisrow[5])
film = str(thisrow[4])
thisratingFilms = {}
try:
if rating not in agesDict[age].keys():
# Initialize new rating and its first film
thisratingFilms = {rating : [film]}
# Append rating to age
ageRatings.update(thisratingFilms)
else:
# Append film to existing rating
agesDict[age][rating].append(film)
except KeyError:
# Debugging first rating added for age
firstFilmForAge = {rating : [film]}
ageRatings.update(firstFilmForAge)
# Append to age
if age not in agesDict:
agesDict[age] = ageRatings
else:
agesDict[age].update(ageRatings)
#####################################
# TASK 4 - Find the recommended movies for a given age from best to worst ratings
#####################################
# Approximation function in case input age does not exist
def approx(d, inpt):
# Make sorted temp list of ages
templ = list()
for k in d.keys(): templ.append(k)
templ.sort()
# Retrieve existing upper and lower available ages
lentempl = len(templ)
upbnd = lentempl - 1
for k in range(lentempl):
if inpt == templ[k]:
rtNum = inpt
break
else:
if int(inpt) > int(templ[k]) and k < upbnd:
pass
else:
# In case input is higher than any available age
if k <= upbnd:
rtNum = templ[k]
break
else:
rtNum = templ[k - 1]
break
return rtNum
# Main function for reocmmending films
def ageRecommendations(inputAge, maxNfilms):
# Figure out closest age in case of age mismatch:
inputAge = int(inputAge)
agesPresent = agesDict.keys()
if inputAge not in agesPresent:
age = approx(agesDict, inputAge)
# Commented for Task 5, but uncomment for better UI
# print("(No data for ratings by users aged " + str(inputAge) + ", displaying ratings by users of closest age " + str(age) + " instead.) \n\n")
else:
age = inputAge
# Commented for Task 5, but uncomment for better UI
# # Print clarification
# print("Movies with a rating of 4 and above, \n" + "as rated by other users of age " + str(age) + ": \n")
# Work on films
whichFilms = []
for r in agesDict[age]:
# Recommend movies only with ratings 4 and above
if r >= 4:
for f in agesDict[age].get(r):
whichFilms.append(f)
# Display only specific amount of films as specified
lengthReturntMovies = len(whichFilms)
maxNfilms = int(maxNfilms)
NFilms = lengthReturntMovies - maxNfilms
returnMovies = []
# If user specifies to display more than there are films with a good rating, simply return all films rated 4 and above
if NFilms < 0:
for i in whichFilms[0 : lengthReturntMovies]:
returnMovies.append(i)
else:
for i in whichFilms[NFilms : lengthReturntMovies]:
returnMovies.append(i)
# Figure out right order to display
returnMovies.reverse()
# Commented for Task 5, but uncomment for better UI
# for i in returnMovies:
# print(str(i) + "\n")
return returnMovies
#####################################
# TASK 5 - Recommend movies to users in the second input file
#####################################
# Read CSV
NewUsersData = []
with open('./InputFiles/NewUsers.csv', "r") as task5:
reader = csv.reader(task5)
for r in reader:
NewUsersData.append(r)
# Remove headers
NewUsersData.pop(0)
ProcessedNewUsers = NewUsersData
for NewUser in range(len(ProcessedNewUsers)):
# Copy name, age, and films amount
NewUserName = ProcessedNewUsers[NewUser][0]
NewUserAge = ProcessedNewUsers[NewUser][1]
NewUserNFilms = ProcessedNewUsers[NewUser][2]
# Replace question marks with films
films = ageRecommendations(NewUserAge, NewUserNFilms)
for f in films:
if ProcessedNewUsers[NewUser][3] != '?':
ProcessedNewUsers[NewUser][3] += ", " + str(f)
else:
ProcessedNewUsers[NewUser][3] = str(f)
# Write the new processed CSV
with open('./OutputFiles/Task5.csv', 'w') as f:
writer = csv.writer(f)
for r in ProcessedNewUsers:
writer.writerow(r)
f.close()
```
|
{
"source": "jgeneaguilar/video_server",
"score": 3
}
|
#### File: video_server/models/user.py
```python
import bcrypt
from sqlalchemy import Column, Text, DateTime
from sqlalchemy.sql.expression import func
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import UUID
import uuid
from .meta import Base
class User(Base):
"""A user model"""
__tablename__ = "users"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
username = Column(Text, nullable=False, unique=True)
password_hash = Column(Text)
mobile_token = Column(Text)
created_at = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
updated_at = Column(
DateTime(timezone=True),
server_default=func.now(),
onupdate=func.now(),
nullable=False,
)
rooms = relationship("Room", secondary="room_memberships", back_populates="users")
def __init__(self, **kwargs):
if "password" in kwargs:
self.set_password(kwargs.pop("password"))
# map the rest of Column names to class attributes
super(User, self).__init__(**kwargs)
def set_password(self, pw):
pwhash = bcrypt.hashpw(pw.encode("utf8"), bcrypt.gensalt())
self.password_hash = pwhash.decode("utf8")
def check_password(self, pw):
if self.password_hash is not None:
expected_hash = self.password_hash.encode("utf8")
return bcrypt.checkpw(pw.encode("utf8"), expected_hash)
return False
```
#### File: video_server/services/helpers.py
```python
def to_int(value, default_value):
value = value or default_value
try:
return int(value)
except ValueError:
return default_value
```
#### File: video_server/views/auth.py
```python
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from ..models import User
from ..services import encoding
def _authenticate_user(request):
username = request.json_body.get("username")
password = request.json_body.get("password")
user = request.dbsession.query(User).filter_by(username=username).first()
if user is not None:
if user.check_password(password):
return user
else:
raise HTTPBadRequest("The password you have entered is incorrect.")
else:
return None
@view_config(
route_name="login", request_method="POST", renderer="json",
)
def login(request):
"""Authenticate the user by checking the username-password combination.
Params:
username: string
password: <PASSWORD>
Return:
dict of id(uuid), username(string), token(jwt)
"""
user = _authenticate_user(request)
if user is not None:
return {"data": encoding.encode_response_token(user, request)}
else:
raise HTTPNotFound()
```
|
{
"source": "jgennis/ninja",
"score": 2
}
|
#### File: jgennis/ninja/configure.py
```python
from __future__ import print_function
from optparse import OptionParser
import os
import sys
import platform_helper
sys.path.insert(0, 'misc')
import ninja_syntax
parser = OptionParser()
profilers = ['gmon', 'pprof']
parser.add_option('--platform',
help='target platform (' +
'/'.join(platform_helper.platforms()) + ')',
choices=platform_helper.platforms())
parser.add_option('--host',
help='host platform (' +
'/'.join(platform_helper.platforms()) + ')',
choices=platform_helper.platforms())
parser.add_option('--debug', action='store_true',
help='enable debugging extras',)
parser.add_option('--profile', metavar='TYPE',
choices=profilers,
help='enable profiling (' + '/'.join(profilers) + ')',)
parser.add_option('--with-gtest', metavar='PATH',
help='use gtest unpacked in directory PATH')
parser.add_option('--with-python', metavar='EXE',
help='use EXE as the Python interpreter',
default=os.path.basename(sys.executable))
parser.add_option('--force-pselect', action='store_true',
help='ppoll() is used by default where available, '
'but some platforms may need to use pselect instead',)
(options, args) = parser.parse_args()
if args:
print('ERROR: extra unparsed command-line arguments:', args)
sys.exit(1)
platform = platform_helper.Platform(options.platform)
if options.host:
host = platform_helper.Platform(options.host)
else:
host = platform
BUILD_FILENAME = 'build.ninja'
buildfile = open(BUILD_FILENAME, 'w')
n = ninja_syntax.Writer(buildfile)
n.comment('This file is used to build ninja itself.')
n.comment('It is generated by ' + os.path.basename(__file__) + '.')
n.newline()
n.variable('ninja_required_version', '1.3')
n.newline()
n.comment('The arguments passed to configure.py, for rerunning it.')
n.variable('configure_args', ' '.join(sys.argv[1:]))
env_keys = set(['CXX', 'AR', 'CFLAGS', 'LDFLAGS'])
configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
if configure_env:
config_str = ' '.join([k + '=' + configure_env[k] for k in configure_env])
n.variable('configure_env', config_str + '$ ')
n.newline()
CXX = configure_env.get('CXX', 'g++')
objext = '.o'
if platform.is_msvc():
CXX = 'cl'
objext = '.obj'
def src(filename):
return os.path.join('src', filename)
def built(filename):
return os.path.join('$builddir', filename)
def doc(filename):
return os.path.join('doc', filename)
def cc(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
def cxx(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
def binary(name):
if platform.is_windows():
exe = name + '.exe'
n.build(name, 'phony', exe)
return exe
return name
n.variable('builddir', 'build')
n.variable('cxx', CXX)
if platform.is_msvc():
n.variable('ar', 'link')
else:
n.variable('ar', configure_env.get('AR', 'ar'))
if platform.is_msvc():
cflags = ['/nologo', # Don't print startup banner.
'/Zi', # Create pdb with debug info.
'/W4', # Highest warning level.
'/WX', # Warnings as errors.
'/wd4530', '/wd4100', '/wd4706',
'/wd4512', '/wd4800', '/wd4702', '/wd4819',
# Disable warnings about passing "this" during initialization.
'/wd4355',
'/GR-', # Disable RTTI.
# Disable size_t -> int truncation warning.
# We never have strings or arrays larger than 2**31.
'/wd4267',
'/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
'/D_VARIADIC_MAX=10',
'/DNINJA_PYTHON="%s"' % options.with_python]
if platform.msvc_needs_fs():
cflags.append('/FS')
ldflags = ['/DEBUG', '/libpath:$builddir']
if not options.debug:
cflags += ['/Ox', '/DNDEBUG', '/GL']
ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF']
else:
cflags = ['-g', '-Wall', '-Wextra',
'-Wno-deprecated',
'-Wno-unused-parameter',
'-fno-rtti',
'-fno-exceptions',
'-fvisibility=hidden', '-pipe',
'-Wno-missing-field-initializers',
'-DNINJA_PYTHON="%s"' % options.with_python]
if options.debug:
cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC']
cflags.remove('-fno-rtti') # Needed for above pedanticness.
else:
cflags += ['-O2', '-DNDEBUG']
if 'clang' in os.path.basename(CXX):
cflags += ['-fcolor-diagnostics']
if platform.is_mingw():
cflags += ['-D_WIN32_WINNT=0x0501']
ldflags = ['-L$builddir']
libs = []
if platform.is_mingw():
cflags.remove('-fvisibility=hidden');
ldflags.append('-static')
elif platform.is_sunos5():
cflags.remove('-fvisibility=hidden')
elif platform.is_msvc():
pass
else:
if options.profile == 'gmon':
cflags.append('-pg')
ldflags.append('-pg')
elif options.profile == 'pprof':
cflags.append('-fno-omit-frame-pointer')
libs.extend(['-Wl,--no-as-needed', '-lprofiler'])
if (platform.is_linux() or platform.is_openbsd() or platform.is_bitrig()) and \
not options.force_pselect:
cflags.append('-DUSE_PPOLL')
def shell_escape(str):
"""Escape str such that it's interpreted as a single argument by
the shell."""
# This isn't complete, but it's just enough to make NINJA_PYTHON work.
if platform.is_windows():
return str
if '"' in str:
return "'%s'" % str.replace("'", "\\'")
return str
if 'CFLAGS' in configure_env:
cflags.append(configure_env['CFLAGS'])
n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags))
if 'LDFLAGS' in configure_env:
ldflags.append(configure_env['LDFLAGS'])
n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
n.newline()
if platform.is_msvc():
n.rule('cxx',
command='$cxx /showIncludes $cflags -c $in /Fo$out',
description='CXX $out',
deps='msvc')
else:
n.rule('cxx',
command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
depfile='$out.d',
deps='gcc',
description='CXX $out')
n.newline()
if host.is_msvc():
n.rule('ar',
command='lib /nologo /ltcg /out:$out $in',
description='LIB $out')
elif host.is_mingw():
n.rule('ar',
command='cmd /c $ar cqs $out.tmp $in && move /Y $out.tmp $out',
description='AR $out')
else:
n.rule('ar',
command='rm -f $out && $ar crs $out $in',
description='AR $out')
n.newline()
if platform.is_msvc():
n.rule('link',
command='$cxx $in $libs /nologo /link $ldflags /out:$out',
description='LINK $out')
else:
n.rule('link',
command='$cxx $ldflags -o $out $in $libs',
description='LINK $out')
n.newline()
objs = []
if not platform.is_windows() and not platform.is_solaris():
n.comment('browse_py.h is used to inline browse.py.')
n.rule('inline',
command='src/inline.sh $varname < $in > $out',
description='INLINE $out')
n.build(built('browse_py.h'), 'inline', src('browse.py'),
implicit='src/inline.sh',
variables=[('varname', 'kBrowsePy')])
n.newline()
objs += cxx('browse', order_only=built('browse_py.h'))
n.newline()
n.comment('the depfile parser and ninja lexers are generated using re2c.')
def has_re2c():
import subprocess
try:
proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
return int(proc.communicate()[0], 10) >= 1103
except OSError:
return False
if has_re2c():
n.rule('re2c',
command='re2c -b -i --no-generation-date -o $out $in',
description='RE2C $out')
# Generate the .cc files in the source directory so we can check them in.
n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
else:
print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
"changes to src/*.in.cc will not affect your build.")
n.newline()
n.comment('Core source files all build into ninja library.')
for name in ['build',
'build_log',
'clean',
'debug_flags',
'depfile_parser',
'deps_log',
'disk_interface',
'edit_distance',
'eval_env',
'graph',
'graphviz',
'lexer',
'line_printer',
'manifest_parser',
'metrics',
'state',
'util',
'version']:
objs += cxx(name)
if platform.is_windows():
for name in ['subprocess-win32',
'includes_normalize-win32',
'msvc_helper-win32',
'msvc_helper_main-win32']:
objs += cxx(name)
if platform.is_msvc():
objs += cxx('minidump-win32')
objs += cc('getopt')
else:
objs += cxx('subprocess-posix')
if platform.is_msvc():
ninja_lib = n.build(built('ninja.lib'), 'ar', objs)
else:
ninja_lib = n.build(built('libninja.a'), 'ar', objs)
n.newline()
if platform.is_msvc():
libs.append('ninja.lib')
else:
libs.append('-lninja')
all_targets = []
n.comment('Main executable is library plus main() function.')
objs = cxx('ninja')
ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib,
variables=[('libs', libs)])
n.newline()
all_targets += ninja
n.comment('Tests all build into ninja_test executable.')
variables = []
test_cflags = cflags + ['-DGTEST_HAS_RTTI=0']
test_ldflags = None
test_libs = libs
objs = []
if options.with_gtest:
path = options.with_gtest
gtest_all_incs = '-I%s -I%s' % (path, os.path.join(path, 'include'))
if platform.is_msvc():
gtest_cflags = '/nologo /EHsc /Zi /D_VARIADIC_MAX=10 '
if platform.msvc_needs_fs():
gtest_cflags += '/FS '
gtest_cflags += gtest_all_incs
else:
gtest_cflags = '-fvisibility=hidden ' + gtest_all_incs
objs += n.build(built('gtest-all' + objext), 'cxx',
os.path.join(path, 'src', 'gtest-all.cc'),
variables=[('cflags', gtest_cflags)])
test_cflags.append('-I%s' % os.path.join(path, 'include'))
else:
# Use gtest from system.
if platform.is_msvc():
test_libs.extend(['gtest_main.lib', 'gtest.lib'])
else:
test_libs.extend(['-lgtest_main', '-lgtest'])
n.variable('test_cflags', test_cflags)
for name in ['build_log_test',
'build_test',
'clean_test',
'depfile_parser_test',
'deps_log_test',
'disk_interface_test',
'edit_distance_test',
'graph_test',
'lexer_test',
'manifest_parser_test',
'ninja_test',
'state_test',
'subprocess_test',
'test',
'util_test']:
objs += cxx(name, variables=[('cflags', '$test_cflags')])
if platform.is_windows():
for name in ['includes_normalize_test', 'msvc_helper_test']:
objs += cxx(name, variables=[('cflags', test_cflags)])
if not platform.is_windows():
test_libs.append('-lpthread')
ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
variables=[('ldflags', test_ldflags),
('libs', test_libs)])
n.newline()
all_targets += ninja_test
n.comment('Ancillary executables.')
objs = cxx('build_log_perftest')
all_targets += n.build(binary('build_log_perftest'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
objs = cxx('canon_perftest')
all_targets += n.build(binary('canon_perftest'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
objs = cxx('depfile_parser_perftest')
all_targets += n.build(binary('depfile_parser_perftest'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
objs = cxx('hash_collision_bench')
all_targets += n.build(binary('hash_collision_bench'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
objs = cxx('manifest_parser_perftest')
all_targets += n.build(binary('manifest_parser_perftest'), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
n.newline()
n.comment('Generate a graph using the "graph" tool.')
n.rule('gendot',
command='./ninja -t graph all > $out')
n.rule('gengraph',
command='dot -Tpng $in > $out')
dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja'])
n.build('graph.png', 'gengraph', dot)
n.newline()
n.comment('Generate the manual using asciidoc.')
n.rule('asciidoc',
command='asciidoc -b docbook -d book -o $out $in',
description='ASCIIDOC $out')
n.rule('xsltproc',
command='xsltproc --nonet doc/docbook.xsl $in > $out',
description='XSLTPROC $out')
xml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc'))
manual = n.build(doc('manual.html'), 'xsltproc', xml,
implicit=doc('style.css'))
n.build('manual', 'phony',
order_only=manual)
n.newline()
n.comment('Generate Doxygen.')
n.rule('doxygen',
command='doxygen $in',
description='DOXYGEN $in')
n.variable('doxygen_mainpage_generator',
src('gen_doxygen_mainpage.sh'))
n.rule('doxygen_mainpage',
command='$doxygen_mainpage_generator $in > $out',
description='DOXYGEN_MAINPAGE $out')
mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage',
['README', 'COPYING'],
implicit=['$doxygen_mainpage_generator'])
n.build('doxygen', 'doxygen', doc('doxygen.config'),
implicit=mainpage)
n.newline()
if not host.is_mingw():
n.comment('Regenerate build files if build script changes.')
n.rule('configure',
command='${configure_env}%s configure.py $configure_args' %
options.with_python,
generator=True)
n.build('build.ninja', 'configure',
implicit=['configure.py', os.path.normpath('misc/ninja_syntax.py')])
n.newline()
n.default(ninja)
n.newline()
if host.is_linux():
n.comment('Packaging')
n.rule('rpmbuild',
command="misc/packaging/rpmbuild.sh",
description='Building rpms..')
n.build('rpm', 'rpmbuild')
n.newline()
n.build('all', 'phony', all_targets)
print('wrote %s.' % BUILD_FILENAME)
```
|
{
"source": "jgennis/rules_rust",
"score": 2
}
|
#### File: jgennis/rules_rust/workspace.bzl
```python
load("@bazel_skylib//lib:versions.bzl", "versions")
def _store_bazel_version(repository_ctx):
bazel_version = versions.get()
if len(bazel_version) == 0:
print("You're using development build of Bazel, make sure it's at least version 0.17.1")
elif versions.is_at_most("0.17.0", bazel_version):
fail("Bazel {} is too old to use with rules_rust, please use at least Bazel 0.17.1, preferably newer.".format(bazel_version))
repository_ctx.file("BUILD", "exports_files(['def.bzl'])")
repository_ctx.file("def.bzl", "BAZEL_VERSION='" + bazel_version + "'")
bazel_version = repository_rule(
implementation = _store_bazel_version,
)
```
|
{
"source": "jgentle/grid-generation",
"score": 3
}
|
#### File: jgentle/grid-generation/grid.py
```python
import os, sys
import ogr
from math import ceil
def main(outputGridfn,xmin,xmax,ymin,ymax,gridHeight,gridWidth):
# convert sys.argv to float
xmin = float(xmin)
xmax = float(xmax)
ymin = float(ymin)
ymax = float(ymax)
gridWidth = float(gridWidth)
gridHeight = float(gridHeight)
# get rows
rows = ceil((ymax-ymin)/gridHeight)
# get columns
cols = ceil((xmax-xmin)/gridWidth)
# start grid cell envelope
ringXleftOrigin = xmin
ringXrightOrigin = xmin + gridWidth
ringYtopOrigin = ymax
ringYbottomOrigin = ymax-gridHeight
# create output file
outDriver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(outputGridfn):
os.remove(outputGridfn)
outDataSource = outDriver.CreateDataSource(outputGridfn)
outLayer = outDataSource.CreateLayer(outputGridfn,geom_type=ogr.wkbPolygon )
featureDefn = outLayer.GetLayerDefn()
# create grid cells
countcols = 0
while countcols < cols:
countcols += 1
# reset envelope for rows
ringYtop = ringYtopOrigin
ringYbottom =ringYbottomOrigin
countrows = 0
while countrows < rows:
countrows += 1
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(ringXleftOrigin, ringYtop)
ring.AddPoint(ringXrightOrigin, ringYtop)
ring.AddPoint(ringXrightOrigin, ringYbottom)
ring.AddPoint(ringXleftOrigin, ringYbottom)
ring.AddPoint(ringXleftOrigin, ringYtop)
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
# add new geom to layer
outFeature = ogr.Feature(featureDefn)
outFeature.SetGeometry(poly)
outLayer.CreateFeature(outFeature)
outFeature = None
# new envelope for next poly
ringYtop = ringYtop - gridHeight
ringYbottom = ringYbottom - gridHeight
# new envelope for next poly
ringXleftOrigin = ringXleftOrigin + gridWidth
ringXrightOrigin = ringXrightOrigin + gridWidth
# Save and close DataSources
outDataSource = None
if __name__ == "__main__":
#
# example run : $ python grid.py <full-path><output-shapefile-name>.shp xmin xmax ymin ymax gridHeight gridWidth
#
if len( sys.argv ) != 8:
print "[ ERROR ] you must supply seven arguments: output-shapefile-name.shp xmin xmax ymin ymax gridHeight gridWidth"
sys.exit( 1 )
main( sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7] )
```
|
{
"source": "jgeofil/avax-python",
"score": 3
}
|
#### File: cb58ref/cb58ref/__main__.py
```python
import argparse
import sys
from . import __version__, cb58decode, cb58encode
EPILOG = """
CB58 is a base-58 encoding with a 32-bit checksum, used on the AVA network.
It's similar to base58check.
"""
def main(argv=None):
parser = argparse.ArgumentParser(
prog='cb58ref',
description=__doc__,
epilog=EPILOG,
)
parser.add_argument(
'-d', '--decode', action='store_true',
help='decode data',
)
parser.add_argument(
'-n', action='store_false',
dest='newline',
help='do not output the trailing newline',
)
parser.add_argument(
'file', metavar='FILE',
type=argparse.FileType('rb'),
default='-',
nargs='?',
help='file to read from (default: stdin)'
)
parser.add_argument(
'--version', action='store_true',
help='print program version and exit',
)
args = parser.parse_args(argv)
if args.version:
print(parser.prog, __version__)
return 0
# Workaround for https://bugs.python.org/issue14156
# We want to read binary data, but (as of Jun 2020) argparse doesn't
# provide that when reading from stdin.
if args.file == sys.stdin:
args.file = args.file.buffer
# Read CB58, output bytes
if args.decode:
s = args.file.read().decode('ascii')
b = cb58decode(s)
sys.stdout.buffer.write(b)
if args.newline:
sys.stdout.buffer.write(b'\n')
return 0
# Read CB58, output CB58
b = args.file.read()
s = cb58encode(b)
sys.stdout.write(s)
if args.newline:
sys.stdout.write('\n')
return 0
if __name__ == '__main__':
sys.exit(main()) # pragma: no cover
```
#### File: 3rdparty/x509-parser/x509Parser.py
```python
import json
from OpenSSL import crypto
from datetime import datetime
def bytes_to_string(bytes):
return str(bytes, 'utf-8')
def x509_name_to_json(x509_name):
json = { }
for key, value in x509_name.get_components():
json.update({ bytes_to_string(key): bytes_to_string(value) })
return json
def x509_extensions_to_json(x509_cert):
json = { }
for ext_index in range(0, x509_cert.get_extension_count(), 1):
extension = x509_cert.get_extension(ext_index)
json.update({ bytes_to_string(extension.get_short_name()): str(extension) })
return json
class x509Parser:
def x509_to_str(x509_cert):
cert_str = x509Parser.parse_x509(x509_cert)
return json.dumps(cert_str, indent=4)
def parse_x509(cert,ignore_extensions=False):
x509_cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
cert = {
"subject": x509_name_to_json(x509_cert.get_subject()),
"issuer": x509_name_to_json(x509_cert.get_issuer()),
"has-expired": x509_cert.has_expired(),
"not-after": str(datetime.strptime(bytes_to_string(x509_cert.get_notAfter()), '%Y%m%d%H%M%SZ')),
"not-before": str(datetime.strptime(bytes_to_string(x509_cert.get_notBefore()), '%Y%m%d%H%M%SZ')),
"serial-number": x509_cert.get_serial_number(),
"serial-number(hex)": hex(x509_cert.get_serial_number()),
"signature-algorithm": bytes_to_string(x509_cert.get_signature_algorithm()),
"version": x509_cert.get_version(),
"pulic-key-length": x509_cert.get_pubkey().bits()
}
if (not ignore_extensions):
cert.update({"extensions": x509_extensions_to_json(x509_cert)})
return cert
```
#### File: 3rdparty/x509-parser/x509_parser.py
```python
import sys, getopt
import ssl
import socket
import json
from x509Parser import x509Parser
BEGIN_X509_CERT = "-----BEGIN CERTIFICATE-----"
END_X509_CERT = "-----END CERTIFICATE-----"
def main(argv):
try:
opts, args = getopt.getopt(argv, "hi:p:f:deo:v", ["help", "host=", "port=", "file=", "dump", "ignore-extensions", "ignore-cert-validation", "ouput"])
except getopt.GetoptError as err:
print(err)
print_help()
sys.exit(2)
cert_list = None
host = None
port = None
ignore_extensions = False
ignore_cert_validation = False
file_output = None
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in ("-e", "ignore-extensions"):
ignore_extensions = True
elif opt in ("-i", "--host"):
host = arg
elif opt in ("-p", "--port"):
port = arg
elif opt in ("-f", "--file"):
cert_list = parse_multi_certs(open(arg, 'r').read())
elif opt in ("-d", "--dump"):
cert_input = ""
line = input("Enter X509 cert: ")
while line:
cert_input += line
cert_input += "\n"
line = input()
cert_list = parse_multi_certs(cert_input)
elif opt in ("-v", "--ignore-cert-validation"):
ignore_cert_validation = True
elif opt in ("-o", "--output"):
file_output = arg
else:
print_help
sys.exit(2)
if (host and port):
cert_list = [get_certificate(host, port, ignore_cert_validation=ignore_cert_validation)]
elif (host):
cert_list = [get_certificate(host, ignore_cert_validation=ignore_cert_validation)]
x509_array = []
for cert in cert_list:
x509_array.append(x509Parser.parse_x509(cert, ignore_extensions))
certs = { "certs": x509_array }
if (file_output):
print("Writing to file %s..." % file_output)
output_file = open(file_output, 'w')
output_file.write(json.dumps(certs, indent=4))
output_file.close
print("Completed!")
else:
print(json.dumps(certs, indent=4))
def print_help():
print("x509_parser.py -i <host> -p <port> -f <input-file> -d -e")
print("-h (--help) = print this help summary.")
print("-i (--host) = host name of the web server to obtain certificate from.")
print("-p (--port) = to be used in conjunction with the host option to specify the port number to connect to the server on, if none is supplied it defaults to 443.")
print("-f (--file) = the filename of the file containing the X509 certificates.")
print("-d (--dump) = past in a collection of X509 certificates.")
print("-e (--ignore-extensions = do not include extensions in the parse output.")
print("-v (--ignore-cert-validation = ignore certificate validation.")
print("-o (--output) = filename to put the output into instead of the standard output.")
def get_certificate(host, port=443, timeout=10, ignore_cert_validation=False):
context = ssl.create_default_context()
if (ignore_cert_validation):
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
connection = socket.create_connection((host, port))
sock = context.wrap_socket(connection, server_hostname=host)
sock.settimeout(timeout)
try:
der_cert = sock.getpeercert(True)
finally:
sock.close()
return ssl.DER_cert_to_PEM_cert(der_cert)
def parse_multi_certs(certs):
cert_list = []
begin_index = certs.find(BEGIN_X509_CERT)
while (begin_index != -1):
end_index = certs.find(END_X509_CERT, begin_index) + len(END_X509_CERT)
cert_list.append(certs[begin_index:end_index])
begin_index = certs.find(BEGIN_X509_CERT, end_index)
return cert_list
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: avax-python/api/admin.py
```python
import avaxpython
import jsrpc
import warnings
caller = avaxpython.get_caller()
def getNodeID():
warnings.warn("This method is deprecated and will be removed in future versions. Use info.getNodeID instead.", DeprecationWarning)
data = {}
ret = caller("admin.getNodeID", data)
return ret["nodeID"]
def peers():
warnings.warn("This method is deprecated and will be removed in future versions. Use info.peers instead.", DeprecationWarning)
data = {}
ret = caller("admin.peers", data)
return ret["peers"]
def getNetworkID():
warnings.warn("This method is deprecated and will be removed in future versions. Use info.getNetworkID instead.", DeprecationWarning)
data = {}
ret = caller("admin.getNetworkID", data)
return ret["networkID"]
def alias(endpoint, alias):
data = {
"alias": alias,
"endpoint": endpoint
}
ret = caller("admin.alias", data)
return ret["success"]
def aliasChain(chain, alias):
data = {
"alias": alias,
"chain": chain
}
ret = caller("admin.aliasChain", data)
return ret["success"]
def getBlockchainID(alias):
warnings.warn("This method is deprecated and will be removed in future versions. Use info API instead.", DeprecationWarning)
data = {
"alias": alias
}
ret = caller("admin.getBlockchainID", data)
return ret["blockchainID"]
def startCPUProfiler(fileName):
data = {
"fileName": fileName
}
ret = caller("admin.startCPUProfiler", data)
return ret["success"]
def stopCPUProfiler():
data = {}
ret = caller("admin.stopCPUProfiler", data)
return ret["success"]
def memoryProfile(fileName):
data = {
"fileName": fileName
}
ret = caller("admin.memoryProfile", data)
return ret["success"]
def lockProfile(fileName):
data = {
"fileName": fileName
}
ret = caller("admin.lockProfile", data)
return ret["success"]
```
#### File: avax-python/api/avaxpython.py
```python
import avaxconfig
import jsrpc
import inspect
from apimeta import api_meta
def make_caller(url):
def f(method, data):
return jsrpc.ava_call(url, method, data)
return f
def make_poster(url):
def f(data):
return jsrpc.ava_post(url, data)
return f
def get_caller():
"""
Uses the source caller's module name to determine
which caller to return.
Will fail if source is __main__. Do not call module
functions directly from command line.
"""
src_caller = inspect.stack()[1]
module_name = inspect.getmodule(src_caller[0]).__name__
return make_caller(avaxconfig.urls[module_name])
def get_poster():
src_caller = inspect.stack()[1]
module_name = inspect.getmodule(src_caller[0])
return make_poster(avaxconfig.urls[module_name])
```
#### File: avax-python/api/avm.py
```python
import avaxpython
import jsrpc
caller = avaxpython.get_caller()
def buildGenesis(genesisData):
data = {
"genesisData": genesisData
}
return caller("avm.buildGenesis", data)
def importKey(privateKey, username, password):
data = {
"privateKey": privateKey,
"username": username,
"password": password
}
return caller("avm.importKey", data)
def exportKey(address, username, password):
data = {
"address": address,
"username": username,
"password": password
}
return caller("avm.exportKey", data)
def exportAVA(to_addr, amt, username, password):
data = {
"to": to_addr,
"amount": int(amt),
"username": username,
"password": password
}
return caller("avm.exportAVA", data)
def importAVA(to, username, password):
data = {
"to": to,
"username": username,
"password": password
}
return caller("avm.importAVA", data)
def getAllBalances(address):
data = {
"address": address
}
return caller("avm.getAllBalances", data)
def getBalance(address, assetID):
data = {
"address": address,
"assetID": assetID
}
return caller("avm.getBalance", data)
def getUTXOs(addresses):
data = {
"addresses": addresses
}
return caller("avm.getUTXOs", data)
def getTxStatus(txID):
data = {
"txID": txID
}
return caller("avm.getTxStatus", data)
def createAddress(username, password):
data = {
"username": username,
"password": password
}
return caller("avm.createAddress", data)
def listAddresses(username, password):
data = {
"username": username,
"password": password
}
return caller("avm.listAddresses", data)
def issueTx(tx):
data = {
"tx": tx
}
return caller("avm.issueTx", data)
def signMintTx(tx, minter, username, password):
data = {
"tx": tx,
"minter": minter,
"username": username,
"password": password
}
return caller("avm.signMintTx", data)
def createMintTx(amount, assetID, to, minters):
data = {
"amount": amount,
"assetID": assetID,
"to": to,
"minters": minters
}
return caller("avm.createMintTx", data)
def send(amount, assetID, to, username, password):
data = {
"amount": amount,
"assetID": assetID,
"to": to,
"username": username,
"password": password
}
return caller("avm.send", data)
def createFixedCapAsset(name, symbol, denomination, initialHolders, username, password):
"""
initialHolders = [{
address: string,
amount: int
}, ...]
"""
data = {
"name": name,
"symbol": symbol,
"denomination": denomination,
"initialHolders": initialHolders,
"username": username,
"password": password
}
return caller("avm.createFixedCapAsset", data)
def createVariableCapAsset(name, symbol, denomination, minterSets, username, password):
"""
minterSets = [{
minters: [string],
threshold: int
}, ...]
"""
data = {
"name": name,
"symbol": symbol,
"denomination": denomination,
"minterSets": minterSets,
"username": username,
"password": password
}
return caller("avm.createVariableCapAsset", data)
def getAssetDescription(assetID):
data = {
"assetID": assetID
}
return caller("avm.getAssetDescription", data)
```
#### File: avax-python/api/spec2py.py
```python
import re
import tatsu
from tatsu.ast import AST
from tatsu.walkers import NodeWalker
import json
GRAMMAR_FILE = "api.tatsu"
SPEC_FILE = "api.specification"
class AvaField():
"""
Convenience class to encapsulate an API field (parameter or return).
If avatype is a recursive type, it'll contain an array / subset of return types.
"""
def __init__(self, name, avatype, optional):
self.name = name
self.avatype = avatype
self.optional = optional
def __repr__(self):
mystr = "AvaField( " + str(self.name) + ": " + str(self.avatype)
if (self.optional):
mystr += " (Optional) "
mystr += " ) "
return mystr
class AvaMethod():
"""Convenience class to encapsulate an API method."""
def __init__(self, package, methodx, params, returns):
self.package = package
self.methodx = methodx
self.params = params
self.returns = returns
def __repr__(self):
return "AvaMethod( " + self.package + "." + self.methodx + "(" + str(self.params) + ") -> " + str(self.returns) + " )"
class AvaEndpoint():
"""Convenience class to encapsulate an API endpoint and its methods."""
def __init__(self, endpoint, methods):
self.endpoint = endpoint
self.methods = methods
def __repr__(self):
return "AvaEndpoint( " + self.endpoint + " -> " + str(self.methods) + " )"
class AvaApi():
"""Convenience class to encapsulate an AVA API."""
def __init__(self, endpoints):
self.endpoints = endpoints
def __repr__(self):
return "AvaApi( " + str(self.endpoints) + " )"
def read_spec(specfil):
"""Reads de API spec file into endpoint-delimited sections"""
rk = {}
curr_ep = "error" # invalid endpoint [indicates a parsing problem]
rgx = re.compile("^endpoint:\\s*([^\\s]+)")
with open(specfil, "r") as f:
lin = f.readline()
grp = ""
while lin:
m = rgx.search(lin.strip())
if m:
curr_ep = m[1]
elif lin.strip() == "":
if curr_ep in rk:
rk[curr_ep].append(grp)
else:
rk[curr_ep] = [grp]
grp = ""
else:
grp += lin
lin = f.readline()
return rk
def walk_fields(part):
"""
part is an array of fields from the AST.
Types are represented by arrays of format [name,semicolon,type[,comma_or_array[,maybe_comma]]]
The field tree is built recursively.
For part lengths 3 and 4 we shortcircuit the special cases.
All others are senth through recursion.
"""
ret = []
for mx in part:
lx = len(mx)
if isinstance(mx, list):
# fields always have 2nd entry == ":"
if not (lx >= 3 and mx[1] == ":"):
return walk_fields(mx)
if mx == "{" or mx == "}":
continue
if lx == 3:
# scalar type
fld = AvaField(mx[0], mx[2], False)
ret.append(fld);
continue
if lx > 3:
if mx[3] == "(optional)":
fld = AvaField(mx[0], mx[2], True)
continue
if lx == 4:
if mx[3] == ",":
fld = AvaField(mx[0], mx[2], False)
ret.append(fld);
continue
if mx[2] == "[]":
fld = AvaField(mx[0], None, False)
fld.avatype = AvaField("[]", mx[3], False)
ret.append(fld);
continue
if lx > 4:
fld = AvaField(mx[0], mx[2], False)
if mx[4] == "(optional)":
fld.optional = True
continue
if mx[2] == "{":
fld.avatype = AvaField("{}", walk_fields(mx[3]), False)
ret.append(fld);
continue
if mx[2] == "[]":
fld.avatype = AvaField("[]", walk_fields(mx[4]), False)
ret.append(fld);
continue
print("SYNTAX ERROR. CHECK GRAMMAR FOR UNTREATED CASES.\nLENGTH {} \nMX {} \nPART {}".format(lx, mx, part))
exit(1)
return ret
def parse_api(spec_file, grammar_file):
api = AvaApi([])
apispec = read_spec(spec_file)
grammar = open(grammar_file, 'r').read()
parser = tatsu.compile(grammar)
for endpoint, v in apispec.items():
ep = AvaEndpoint(endpoint, [])
for chunk in v:
if len(chunk.strip()) > 0:
ix = parser.parse(chunk)
package, methodx = ix[0].split(".")
mth = AvaMethod(package, methodx, [], [])
for inx in range(2, len(ix)):
part = ix[inx]
if part == "}" or part == "{":
continue
if isinstance(part, list):
mth.params = walk_fields(part)
if part == "->":
mth.returns = walk_fields(ix[inx+1])
break # end tree processing
ep.methods.append(mth)
api.endpoints.append(ep)
return api
def render_txt(api):
for endpoint in api.endpoints:
print(endpoint.endpoint)
for mtx in endpoint.methods:
print("\t{}".format(mtx))
def render_list_field(flds):
out_struct = []
for prm in flds:
if isinstance(prm.avatype, AvaField):
out_struct.append({
"type": render_dict_field([prm.avatype]),
"optional": prm.optional
})
elif isinstance(prm.avatype, list):
newtype = render_list_field(prm.avatype)
out_struct.append({
"type": newtype,
"optional": prm.optional
})
else:
out_struct.append({
"type": prm.avatype,
"optional": prm.optional
})
return out_struct
def render_dict_field(flds):
out_struct = {}
for prm in flds:
if prm.name not in out_struct:
out_struct[prm.name] = {}
if isinstance(prm.avatype, AvaField):
out_struct[prm.name] = {
"type": render_dict_field([prm.avatype]),
"optional": prm.optional
}
elif isinstance(prm.avatype, list):
newtype = []
else:
out_struct[prm.name] = {
"type": prm.avatype,
"optional": prm.optional
}
return out_struct
def render_dict(api):
out_struct = {}
for endpoint in api.endpoints:
ep = endpoint.endpoint
out_struct[ep] = {}
for mtx in endpoint.methods:
if mtx.package not in out_struct[ep]:
out_struct[ep][mtx.package] = {}
if mtx.methodx not in out_struct[ep][mtx.package]:
out_struct[ep][mtx.package][mtx.methodx] = {}
out_struct[ep][mtx.package][mtx.methodx]["parameters"] = render_dict_field(mtx.params)
out_struct[ep][mtx.package][mtx.methodx]["returns"] = render_dict_field(mtx.returns)
return out_struct
def render_json(api):
return json.dumps(render_dict(api), sort_keys=True, indent=4)
if __name__ == "__main__":
api = parse_api(SPEC_FILE, GRAMMAR_FILE)
outx = render_json(api)
print(outx)
```
#### File: avaxpython/chains/manager.py
```python
from typing import List, Set
from avaxpython.ids.ID import ID
from avaxpython.snow.engine.avalanche.transitive import Transitive as AVMTransitive
from avaxpython.snow.engine.snowman.transitive import Transitive as SnowTransitive
from avaxpython.chains.subnet import Subnet, subnet
from avaxpython.snow.engine.avalanche.vertex.vm import DAGVM
from avaxpython.snow.engine.avalanche.config.config import Config as AvalancheConfig
from avaxpython.snow.engine.snowman.block.vm import ChainVM
from avaxpython.snow.networking.router.handler import Handler
from avaxpython.snow.context import Context
from avaxpython.utils.constants import application
from avaxpython.ids.aliases import Aliaser
from avaxpython.chains import chain_configs
from avaxpython.chains.chain_configs import AVAXAssetID, ConsensusDispatcher, DecisionDispatcher, x_chain_id
from avaxpython.chains.chain import chain
from avaxpython.chains.chain_parameters import ChainParameters
from avaxpython.chains.manager_config import ManagerConfig
from avaxpython.snow.engine.avalanche.bootstrap.config import Config as BootstrapConfig
from avaxpython.snow.engine.common.config import Config as CommonConfig
class Manager:
def __init__(self):
self.chains = {}
self.subnets = {}
self.config = ManagerConfig()
self.aliaser = Aliaser()
def Router(self):
pass
def CreateChain(self, chain_parameters):
"""Create a chain in the future"""
pass
def ForceCreateChain(self, chain_parameters):
"""Create a chain now"""
if chain_parameters.SubnetID in self.subnets:
sb = self.subnets[chain_parameters.SubnetID]
else:
sb = subnet()
self.subnets[chain_parameters.SubnetID] = sb
sb.addChain(chain_parameters.ID)
chain = self.buildChain(chain_parameters, sb)
self.chains[chain_parameters.ID] = chain.Handler
# Until we have proper dynamic chain creation, load the main chains
# from static config.
chain_configs.NodeID = chain_parameters.NodeID
chain_configs.InitChains()
for chain_id_str in chain_configs.chains:
chain = chain_configs.chains[chain_id_str]
self.chains[str(chain_parameters.ID)] = chain
self.config.Router.AddChain(chain.Handler)
def buildChain(self, chain_parameters: ChainParameters, sb: Subnet):
vmID = self.config.VMManager.Lookup(chain_parameters.VMAlias)
primaryAlias = self.aliaser.PrimaryAlias(chain_parameters.ID)
new_chain : chain = None
chain_configs.AVAXAssetID = self.config.AVAXAssetID
chain_configs.x_chain_id = self.config.x_chain_id
chain_configs.DecisionDispatcher = self.config.DecisionEvents
chain_configs.ConsensusDispatcher = self.config.ConsensusEvents
ctx = Context(
NetworkID = self.config.NetworkID,
SubnetID = chain_parameters.SubnetID,
chain_id = chain_parameters.ID,
NodeID = self.config.NodeID,
x_chain_id = self.config.x_chain_id,
AVAXAssetID = self.config.AVAXAssetID,
Log = None,
DecisionDispatcher = self.config.DecisionEvents,
ConsensusDispatcher = self.config.ConsensusEvents,
Keystore = None,
SharedMemory = None,
BCLookup = self,
SNLookup = self,
Namespace = f"{application.PlatformName}_{primaryAlias}_vm",
Metrics = None,
EpochFirstTransition = self.config.EpochFirstTransition,
EpochDuration = self.config.EpochDuration,
)
fxs = []
vmFactory = self.config.VMManager.GetVMFactory(vmID)
vm = vmFactory.New(ctx)
consensusParams = self.config.ConsensusParams
vdrs = self.config.Validators.GetValidators(chain_parameters.SubnetID)
beacons = vdrs
if chain_parameters.CustomBeacons:
beacons = chain_parameters.CustomBeacons
if isinstance(vm, DAGVM):
new_chain = self.createAvalancheChain(
ctx,
chain_parameters,
vdrs,
beacons,
vm,
fxs,
consensusParams,
0,
sb,
)
elif isinstance(vm, ChainVM):
new_chain = self.createSnowmanChain(
ctx,
chain_parameters,
vdrs,
beacons,
vm,
fxs,
consensusParams,
0,
sb,
)
else:
raise Exception("the vm should have type avalanche.DAGVM or snowman.ChainVM. Chain not created")
self.config.Router.AddChain(new_chain.Handler)
return new_chain
def AddRegistrant(self, Registrant):
"""Add a registrant [r]. Every time a chain is created, [r].RegisterChain([new chain]) is called"""
pass
def Lookup(self, alias: str) -> ID:
"""Given an alias, return the ID of the chain associated with that alias"""
pass
def LookupVM(self, alias: str) -> ID:
"""Given an alias, return the ID of the VM associated with that alias"""
pass
def Aliases(self, a_id: ID) -> List[str]:
"""Return the aliases associated with a chain"""
pass
def Alias(self, a_id: ID, alias: str):
"""Add an alias to a chain"""
pass
def SubnetID(self, chaina_ID: ID) -> ID:
"""Returns the ID of the subnet that is validating the provided chain"""
pass
def IsBootstrapped(self, a_id: ID) -> bool:
"""Returns true iff the chain with the given ID exists and is finished bootstrapping"""
def Shutdown(self):
pass
def createAvalancheChain(self, ctx, chain_params, validators, beacons, vm, fxs, consensusParams, bootstrapWeight, sb):
engine = AVMTransitive(vm, ctx)
engine.Initialize(AvalancheConfig(
ConfigP = BootstrapConfig(
CConfig = CommonConfig(
Ctx = ctx,
Validators= validators,
Beacons= beacons,
SampleK= sampleK,
StartupAlpha= (3*bootstrapWeight + 3) / 4,
Alpha= bootstrapWeight/2 + 1, # must be > 50%
Sender= sender,
Subnet= sb,
Delay= delay,
RetryBootstrap= m.RetryBootstrap,
RetryBootstrapMaxAttempts= m.RetryBootstrapMaxAttempts,
),
VtxBlocked= vtxBlocker,
TxBlocked= txBlocker,
Manager= vtxManager,
VM= vm
),
Params= consensusParams,
Consensus = avcon.Topological(),
))
genesisData = chain_params.GenesisData
handler = Handler(ctx=ctx, engine=engine)
handler.Initialize(engine, validators, None, self.config.MaxPendingMsgs, self.config.MaxNonStakerPendingMsgs, self.config.StakerMSGPortion, self.config.StakerCPUPortion, f"{consensusParams.Namespace}_handler", consensusParams.Metrics, None)
chainAlias = self.PrimaryAlias(ctx.chain_id)
return chain(
name = chainAlias,
engine = engine,
handler = handler,
vm = vm,
ctx = ctx,
params = chain_params
)
def createSnowmanChain(self, ctx, chain_params, validators, beacons, vm, fxs, consensusParams, bootstrapWeight, sb):
engine = SnowTransitive(vm, ctx)
handler = Handler(ctx=ctx, engine=engine)
genesisData = chain_params.GenesisData
handler.Initialize(engine, validators, None, self.config.MaxPendingMsgs, self.config.MaxNonStakerPendingMsgs, self.config.StakerMSGPortion, self.config.StakerCPUPortion, f"{consensusParams.Namespace}_handler", consensusParams.Metrics, None)
chainAlias = self.aliaser.PrimaryAlias(ctx.chain_id)
return chain(
name = chainAlias,
engine = engine,
handler = handler,
vm = vm,
ctx = ctx,
beacons = beacons,
params = chain_params
)
```
#### File: avaxpython/codec/manager.py
```python
from typing import Dict
from avaxpython.codec.codec import Codec
from avaxpython.utils.wrappers.Packer import Packer
from avaxpython.codec.reflectcodec.type_codec import genericCodec
# default max size, in bytes, of something being marshalled by Marshal()
defaultMaxSize = 1 << 18
# initial capacity of byte slice that values are marshaled into.
# Larger value --> need less memory allocations but possibly have allocated but unused memory
# Smaller value --> need more memory allocations but more efficient use of allocated memory
initialSliceCap = 128
errMarshalNil = Exception("can't marshal null pointer or interface")
errUnmarshalNil = Exception("can't unmarshal null data")
errCantPackVersion = Exception("couldn't pack codec version")
errCantUnpackVersion = Exception("couldn't unpack codec version")
errUnknownVersion = Exception("unknown codec version")
errDuplicatedVersion = Exception("duplicated codec version")
class Manager:
"""Manager describes the functionality for managing codec versions."""
def __init__(self, maxSize=defaultMaxSize, codecs=Dict[int, Codec]) -> None:
self.maxSize = maxSize
self.codecs: Dict[int, Codec] = {}
def RegisterCodec(self, version: int, codec):
"""Associate the given codec with the given version ID"""
pass
def SetMaxSize(self, int):
""" Define the maximum size, in bytes, of something serialized/deserialized
by this codec manager"""
pass
def Marshal(self, version: int, source):
"""Marshal the given value using the codec with the given version.
RegisterCodec must have been called with that version."""
pass
def Unmarshal(self, source: bytes, dest, type_ids={}):
""" Unmarshal the given bytes into the given destination. [destination] must
be a pointer or an interface. Returns the version of the codec that
produces the given bytes."""
if dest is None:
raise errUnmarshalNil
if source is None:
raise errUnmarshalNil
if len(source) > self.maxSize:
raise IndexError(f"Byte array exceeds maximum length {self.maxSize}")
p = Packer(b=source)
version = p.UnpackShort()
# TODO implement codec versioning. NOP to keep pylint happy.
version = version # punt
if p.Errored():
raise errCantUnpackVersion
return genericCodec.Unmarshal(p.Bytes[p.Offset:], dest, type_ids=type_ids)
```
#### File: avaxpython/genesis/beacons.py
```python
from avaxpython.utils import constants
import numpy as np
beacon_ips = {
constants.MainnetID: [
"192.168.3.11:9651",
"172.16.58.3:9651",
"172.16.58.3:9651",
"172.16.58.3:9651",
"172.16.58.3:9651",
"192.168.3.11:9651",
"192.168.3.11:9651",
"172.16.17.32:9651",
"192.168.127.12:9651",
"172.16.31.10:9651",
"172.16.58.3:9651",
"192.168.127.12:9651",
"172.16.31.10:9651",
"172.16.58.3:9651",
"192.168.3.11:9651",
"172.16.17.32:9651",
"172.16.17.32:9651",
"172.16.58.3:9651",
"172.16.17.32:9651",
"192.168.127.12:9651",
"192.168.3.11:9651",
"192.168.3.11:9651",
"192.168.3.11:9651",
"172.16.58.3:9651",
],
constants.FujiID: [
"172.16.17.32:21001",
"172.16.31.10:21001",
"172.16.17.32:21001",
"172.16.17.32:21001",
"172.16.31.10:21001",
"192.168.3.11:21001",
"172.16.31.10:21001",
"172.16.31.10:21001",
"172.16.17.32:21001",
"192.168.127.12:21001",
"172.16.31.10:21001",
"172.16.31.10:21001",
"172.16.31.10:21001",
"172.16.17.32:21001",
"192.168.127.12:21001",
"172.16.17.32:21001",
"192.168.127.12:21001",
"172.16.58.3:21001",
"172.16.17.32:21001",
"172.16.58.3:21001",
]
}
beacon_ids = {
constants.MainnetID: [
"NodeID-A6onFGyJjA37EZ7kYHANMR1PFRT8NmXrF",
"NodeID-6SwnPJLH8cWfrJ162JjZekbmzaFpjPcf",
"NodeID-GSgaA47umS1px2ohVjodW9621Ks63xDxD",
"NodeID-BQEo5Fy1FRKLbX51ejqDd14cuSXJKArH2",
"NodeID-Drv1Qh7iJvW3zGBBeRnYfCzk56VCRM2GQ",
"NodeID-DAtCoXfLT6Y83dgJ7FmQg8eR53hz37J79",
"NodeID-F<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"NodeID-Hr78Fy8uDYiRYocRYHXp4eLCYeb8x5UuM",
"NodeID-9CkG9MBNavnw7EVSRsuFr7ws9gascDQy3",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
],
constants.FujiID: [
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"NodeID-<KEY>9Tx7pUonPaS",
"NodeID-JjvzhxnLHLUQ5HjVRkvG827ivbLXPwA9u",
"NodeID-4CWTbdvgXHY1CLXqQNAp22nJDo5nAmts6",
]
}
# getIPs returns the beacon IPs for each network
def getIPs(networkID):
if networkID in beacon_ips.keys():
return beacon_ips[networkID]
raise Exception(f"networkID {networkID} not found")
# getNodeIDs returns the beacon node IDs for each network
def getNodeIDs(networkID):
if networkID in beacon_ids:
return beacon_ids[networkID]
raise Exception(f"networkID {networkID} not found")
# SampleBeacons returns the some beacons this node should connect to
def SampleBeacons(networkID, count):
ips = getIPs(networkID)
ids = getNodeIDs(networkID)
numIPs = len(ips)
if numIPs < count:
count = numIPs
sampledIPs = np.random.choice(ips, numIPs)
sampledIDs = np.random.choice(ids, numIPs)
return (sampledIPs, sampledIDs)
```
#### File: avaxpython/genesis/config.py
```python
from typing import List
from avaxpython.ids.ShortID import ShortID
class LockedAmount:
def __init__(self):
self.Amount: int
self.Locktime: int
class Allocation:
def __init__(self):
self.ETHAddr: ShortID
self.AVAXAddr: ShortID
self.InitialAmount: int
self.UnlockSchedule: List[LockedAmount]
class UnparsedAllocation:
def __init__(self):
self.ETHAddr: str
self.AVAXAddr: str
self.InitialAmount: int
self.UnlockSchedule: List[LockedAmount]
class UnparsedStaker:
def __init__(self):
self.NodeID: str
self.RewardAddress: str
self.DelegationFee: int
class Staker:
def __init__(self):
self.NodeID: ShortID
self.RewardAddress: ShortID
self.DelegationFee: int
def Unparse(self, networkID: int) -> UnparsedStaker:
avaxAddr = formatting.FormatAddress("X", constants.GetHRP(networkID), s.RewardAddress.Bytes())
return UnparsedStaker(
NodeID=self.NodeID.PrefixedString(constants.NodeIDPrefix),
RewardAddress=avaxAddr,
DelegationFee=self.DelegationFee,
)
class Config:
def __init__(self):
self.NetworkID: int
self.Allocations: List[Allocation]
self.StartTime: int
self.InitialStakeDuration: int
self.InitialStakeDurationOffset: int
self.InitialStakedFunds: List[ShortID]
self.InitialStakers: List[Staker]
self.CChainGenesis: str
self.Message:str
def GetConfig(networkID: int) -> Config:
if networkID == constants.MainnetID:
return MainnetConfig
elif networkID == constants.FujiID:
return FujiConfig
elif networkID == constants.LocalID:
return LocalConfig
else:
tempConfig = LocalConfig
tempConfig.NetworkID = networkID
return tempConfig
```
#### File: avaxpython/genesis/genesis.py
```python
from typing import List, Tuple
from avaxpython.ids.ID import ID
from avaxpython.genesis import config as config_pkg
from avaxpython.genesis.config import Config
from avaxpython.utils import constants
def Genesis(networkID: int, filepath: str) -> Tuple[bytes, ID]:
""" Genesis returns the genesis data of the Platform Chain.
Since an Avalanche network has exactly one Platform Chain, and the Platform
Chain defines the genesis state of the network (who is staking, which chains
exist, etc.), defining the genesis state of the Platform Chain is the same as
defining the genesis state of the network.
Genesis accepts:
1) The ID of the new network. [networkID]
2) The location of a custom genesis config to load. [filepath]
If [filepath] is empty or the given network ID is Mainnet, Testnet, or Local, loads the
network genesis state from predefined configs. If [filepath] is non-empty and networkID
isn't Mainnet, Testnet, or Local, loads the network genesis data from the config at [filepath].
Genesis returns:
1) The byte representation of the genesis state of the platform chain
(ie the genesis state of the network)
2) The asset ID of AVAX"""
config = config_pkg.GetConfig(networkID)
return FromConfig(config)
def FromConfig(config: Config) -> Tuple[bytes, ID]:
"""FromConfig returns:
1) The byte representation of the genesis state of the platform chain (ie the genesis state of the network)
2) The asset ID of AVAX"""
ret_b = b""
ret_id = ID()
return ret_b, ret_id
```
#### File: avaxpython/ids/aliases.py
```python
from typing import Dict, List
from avaxpython.ids.ID import ID
class Aliaser:
"""Aliaser allows one to give an ID aliases and lookup the aliases given to an
ID. An ID can have arbitrarily many aliases; two IDs may not have the same
alias."""
def __init__(self):
self.dealias: Dict[str, ID] = {}
self.aliases: Dict(ID, List[str]) = {}
def Initialize(self):
"""Initialize the aliaser to have no aliases"""
self.dealias = {}
self.aliases = {}
def Lookup(self, alias: str) -> ID:
"""Lookup returns the ID associated with alias"""
self.lock.RLock()
if alias in self.dealias:
return self.dealias[alias]
return None
def Aliases(self, id: ID):
if id in self.aliases:
return self.aliases[id]
return []
def PrimaryAlias(self, id: ID) -> str:
"""PrimaryAlias returns the first alias of [id]"""
if id in self.aliases:
aliases = self.aliases[id]
return aliases[0]
return None
def Alias(self, id: ID, alias: str):
"""Alias gives [id] the alias [alias]"""
if alias in self.dealias:
self.dealias[alias] = id
if id not in self.aliases:
self.aliases[id] = []
self.aliases[id].append(alias)
def RemoveAliases(self, id: ID):
"""RemoveAliases of the provided ID"""
delete(self.aliases[id])
for alias in self.aliases[id]:
delete(self.dealias[alias])
```
#### File: avaxpython/network/Messages.py
```python
from .Op import Op
from .Field import Field
class Messages:
"""
Network message structures.
Canonical version: avalanchego/network/commands.go
"""
__msg_structure = {
# Handshake:
Op.GetVersion: [],
Op.Version: [Field.NetworkID, Field.NodeID, Field.MyTime, Field.IP, Field.VersionStr],
Op.GetPeerList: [],
Op.PeerList: [Field.Peers],
Op.Ping: [],
Op.Pong: [],
# Bootstrapping:
Op.GetAcceptedFrontier: [Field.ChainID, Field.RequestID, Field.Deadline],
Op.AcceptedFrontier: [Field.ChainID, Field.RequestID, Field.ContainerIDs],
Op.GetAccepted: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerIDs],
Op.Accepted: [Field.ChainID, Field.RequestID, Field.ContainerIDs],
Op.GetAncestors: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID],
Op.MultiPut: [Field.ChainID, Field.RequestID, Field.MultiContainerBytes],
# Consensus:
Op.Get: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID],
Op.Put: [Field.ChainID, Field.RequestID, Field.ContainerID, Field.ContainerBytes],
Op.PushQuery: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID, Field.ContainerBytes],
Op.PullQuery: [Field.ChainID, Field.RequestID, Field.Deadline, Field.ContainerID],
Op.Chits: [Field.ChainID, Field.RequestID, Field.ContainerIDs],
# Signature
Op.SignedVersion: [Field.NetworkID, Field.NodeID, Field.MyTime, Field.IP, Field.VersionStr, Field.VersionTime, Field.SigBytes],
Op.SignedPeerList: [Field.SignedPeers],
}
@classmethod
def get(cls, op):
if op in cls.__msg_structure:
return cls.__msg_structure.get(op)
raise LookupError(f"Message structure not found for Op {op}")
```
#### File: avaxpython/network/Op.py
```python
class Op:
GetVersion = 0
Version = 1
GetPeerList = 2
PeerList = 3
Ping = 4
Pong = 5
# Bootstrapping:
GetAcceptedFrontier = 6
AcceptedFrontier = 7
GetAccepted = 8
Accepted = 9
GetAncestors = 10
MultiPut = 11
# Consensus:
Get = 12
Put = 13
PushQuery = 14
PullQuery = 15
Chits = 16
SignedVersion = 17
SignedPeerList = 18
__op = {
GetVersion: "get_version",
Version: "version",
GetPeerList: "get_peerlist",
PeerList: "peerlist",
Ping: "ping",
Pong: "pong",
GetAcceptedFrontier: "get_accepted_frontier",
AcceptedFrontier: "accepted_frontier",
GetAccepted: "get_accepted",
Accepted: "accepted",
Get: "get",
GetAncestors: "get_ancestors",
Put: "put",
MultiPut: "multi_put",
PushQuery: "push_query",
PullQuery: "pull_query",
Chits: "chits",
SignedVersion: "signed_version",
SignedPeerList: "signed_peer_list"
}
@classmethod
def String(cls, op):
if op in cls.__op:
return cls.__op.get(op)
raise KeyError(f"Op {op} not found")
@classmethod
def OpNames(cls):
return [cls.__op[x] for x in cls.__op]
```
#### File: avaxpython/network/peer.py
```python
import time
import avaxpython
from avaxpython.utils.ip import IPDesc
from avaxpython.network.Msg import Msg
from avaxpython.utils import constants
from avaxpython import Config
# alias is a secondary IP address where a peer
# was reached
class Alias:
"""Encapsulate an IP alias."""
def __init__(self, ip, expiry):
self.ip = ip
self.expiry = expiry
class Peer:
"""Class encapsulating the functionality of an AVAX network Peer"""
def __init__(self, net, conn, ip: IPDesc, tickerCloser=None, port=0, id=None, node=None, my_staking_ip = None):
# network this peer is part of
self.net = net
self.expiry = None
self.node = node
# if the version message has been received and is valid. is only modified
# on the connection's reader routine.
self.gotVersion = False # utils.AtomicBool
self.gotPeerList = False # utils.AtomicBool
self.connected = False # utils.AtomicBool
# only close the peer once
self.once = None # sync.Once
self.my_staking_ip = my_staking_ip
# if the close function has been called.
self.closed = False # utils.AtomicBool
# number of bytes currently in the send queue.
self.pendingBytes = 0 # int64
# lock to ensure that closing of the sender queue is handled safely
self.senderLock = None # sync.Mutex
# queue of messages this connection is attempting to send the peer. Is
# closed when the connection is closed.
self.sender = None # chan []byte
# ip may or may not be set when the peer is first started. is only modified
# on the connection's reader routine.
self.ip: IPDesc = ip
# ipLock must be held when accessing [ip].
self.ipLock = None # sync.RWMutex
# aliases is a list of IPs other than [ip] that we have connected to
# this peer at.
self.aliases = [] # []alias
# aliasTimer triggers the release of expired records from [aliases].
self.aliasTimer = None # *timer.Timer
# aliasLock must be held when accessing [aliases] or [aliasTimer].
self.aliasLock = None # sync.Mutex
# id should be set when the peer is first created.
self.id = id
# the connection object that is used to read/write messages from
self.conn = conn
# version that the peer reported during the handshake
self.versionStruct = None
self.versionStr = None # utils.AtomicInterface
# unix time of the last message sent and received respectively
# Must only be accessed atomically
self.lastSent = None
self.lastReceived = None
self.tickerCloser = tickerCloser
# ticker processes
self.tickerOnce = None # sync.Once
self.Log = avaxpython.config().logger()
def __repr__(self):
return f"IP {self.ip} ID {self.id}"
# assume the [stateLock] is held
def Start(p):
# go p.ReadMessages()
# go p.WriteMessages()
pass
def StartTicker(p):
# go p.requestFinishHandshake()
# go p.sendPings()
# go p.monitorAliases()
pass
def sendPings(p):
sendPingsTicker = time.NewTicker(p.net.pingFrequency)
# defer sendPingsTicker.Stop()
pass
# request missing handshake messages from the peer
def requestFinishHandshake(p):
# finishHandshakeTicker = time.NewTicker(p.net.getVersionTimeout)
# defer finishHandshakeTicker.Stop()
pass
# while True:
# select {
# case <-finishHandshakeTicker.C:
# if connected || closed {
# return
# }
# if !gotVersion {
# p.GetVersion()
# }
# if !gotPeerList {
# p.GetPeerList()
# }
# case <-p.tickerCloser:
# return
# }
# monitorAliases periodically attempts
# to release timed out alias IPs of the
# peer.
# monitorAliases will acquire [stateLock]
# when an alias is released.
def monitorAliases(p):
# go func() {
# <-p.tickerCloser
# p.aliasTimer.Stop()
# }()
# p.aliasTimer.Dispatch()
pass
# attempt to read messages from the peer
def ReadMessages(p):
pass
# attempt to write messages to the peer
def WriteMessages(p):
pass
def Send(p, msg: Msg):
bts = msg.Bytes()
btlen = len(bts)
barr = bytearray(btlen.to_bytes(4, "big"))
barr.extend(bts)
b_out = bytes(barr)
b_sent = 0
while b_sent < len(b_out):
sent = p.conn.send(b_out[b_sent:])
if sent == 0:
raise RuntimeError(f"Cannot write {len(b_out[b_sent:])} bytes to peer {p} : Msg {msg}")
b_sent += sent
p.Log.debug(f"Sent {b_sent} bytes to Peer {p} : Msg {msg}")
return b_sent == len(b_out)
def handle(p, msg):
pass
def dropMessagePeer(p):
pass
def dropMessage(p, connPendingLen, networkPendingLen):
pass
def Close(p):
pass
def GetVersion(p):
pass
def Version(p):
ts = int(time.time())
msg = p.net.b.Version(constants.MainnetID, p.net.nodeID, ts, p.my_staking_ip, Config.AVAX_NETWORK_VERSION)
p.Send(msg)
def GetPeerList(p):
pass
def SendPeerList(p):
pass
def PeerList(p, peers):
"""Sends a peerlist message based on a list of Peer objects."""
peer_ips = []
for connected_peer in peers:
for peer in p.net.peers.values():
if connected_peer.id == peer.id:
peer_ips.append(peer.ip)
msg = p.net.b.PeerList(peer_ips)
p.Send(msg)
def PeerListByIds(p, peerids):
"""Sends a peerlist based on a list of peer ids"""
peer_ips = []
for pid in peerids:
for peer in p.net.peers.values():
if pid == peer.id:
peer_ips.append(peer.ip)
msg = p.net.b.PeerList(peer_ips)
p.Send(msg)
def Ping(p):
pass
def Pong(p):
pass
def getVersion(p, m):
pass
def version(p, msg):
pass
def getPeerList(p, msg):
pass
def peerList(msg):
pass
def ping(p, msg):
p.Pong()
def pong(p, msg):
pass
def getAcceptedFrontier(p, msg):
pass
def acceptedFrontier(p, msg):
pass
def getAccepted(p, msg):
pass
def accepted(p, msg):
pass
def get(p, msg):
pass
def getAncestors(p, msg):
pass
def put(p, msg):
pass
def multiPut(p, msg):
pass
def pushQuery(p, msg):
pass
def pullQuery(p, msg):
pass
def chits(p, msg):
pass
def tryMarkConnected(p):
p.connected = True
def discardIP(p):
pass
def discardMyIP(p):
pass
def setIP(p, ip):
p.ip = ip
def getIP(p):
return p.ip
def addAlias(p, ip):
pass
def releaseNextAlias(p, now_time):
"""releaseNextAlias returns the next released alias or None if none was released.
If none was released, then this will schedule the next time to remove an alias."""
def releaseExpiredAliases(p):
"""releaseExpiredAliases frees expired IP aliases. If there is an IP pending
expiration, then the expiration is scheduled."""
def releaseAllAliases(p):
"""releaseAllAliases frees all alias IPs.
assumes [stateLock] is held and that [aliasTimer] has been stopped"""
pass
```
#### File: avaxpython/parallel/Parallel.py
```python
import threading
import multiprocessing
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import inspect
from avaxpython.Config import Config
class Parallel:
_tp_map = {
'thread': ThreadPoolExecutor,
'process': ProcessPoolExecutor
}
def __init__(self, tp = Config.DEFAULT_WORKER_MODE):
if not tp in Parallel._tp_map:
raise RuntimeError("Unknown parallelization type.")
self.mode = tp
cls = Parallel._tp_map[tp]
self.cls = cls
# general worker
self.tp = cls(max_workers = Config.MAX_WORKERS)
# network worker
self.netp = cls(max_workers = Config.NETWORK_WORKERS)
def executor(self):
"""Returns the globally configured Executor class."""
return self.cls
def worker(self):
"""Returns the general worker Executor instance."""
return self.tp
def net_worker(self):
"""Returns the network-specific Executor instance."""
return self.netp
def go(self, fn, *args, **kwargs):
"""Launches a new thread outside of the general and network Executor workers' control."""
def _f1():
fn(*args, **kwargs)
if self.mode == "thread":
t1 = threading.Thread(target = _f1)
t1.start()
elif self.mode == "process":
p1 = multiprocessing.Process(target=_f1)
p1.start()
else:
raise RuntimeError(f"Unknown parallelization mode : {self.mode}")
```
#### File: snow/choices/decidable.py
```python
from avaxpython.ids.ID import ID as LongID
class Decidable:
"""Decidable represents element that can be decided.
Decidable objects are typically thought of as either transactions, blocks, or vertices.
"""
def __init__(self, idx: LongID = LongID()):
self.id: LongID = idx
def ID(self) -> LongID:
""" ID returns a unique ID for this element.
Typically, this is implemented by using a cryptographic hash of a
binary representation of this element. An element should return the same
IDs upon repeated calls.
"""
return self.id
def Accept(self):
""" Accept this element.
This element will be accepted by every correct node in the network.
"""
def Reject(self):
""" Reject this element.
This element will not be accepted by any correct node in the network.
"""
def Status(self) -> int:
""" Status returns this element's current status.
If Accept has been called on an element with this ID, Accepted should be
returned. Similarly, if Reject has been called on an element with this
ID, Rejected should be returned. If the contents of this element are
unknown, then Unknown should be returned. Otherwise, Processing should be returned."""
```
#### File: snow/choices/status.py
```python
from avaxpython.utils.wrappers.Packer import Packer
errUnknownStatus = Exception("unknown status")
class Status:
# List of possible status values
# [Unknown] Zero value, means the status is not known
# [Processing] means the operation is known, but hasn't been decided yet
# [Rejected] means the operation will never be accepted
# [Accepted] means the operation was accepted
Unknown = 0
Processing = 1
Rejected = 2
Accepted = 3
def __init__(self, status=0):
self.status = status
def MarshalJSON(self) -> bytes:
if not self.Valid():
return None
return ("\"" + self.String() + "\"").encode("utf-8")
def UnmarshalJSON(self, b: bytes):
b_str = b.decode("utf-8")
if b_str == "null":
return None
if b_str == "\"Unknown\"":
self.status = Status.Unknown
elif b_str == "\"Processing\"":
self.status = Processing
elif b_str == "\"Rejected\"":
self.status = Rejected
elif b_str == "\"Accepted\"":
self.status = Accepted
else:
raise errUnknownStatus
return None
def Fetched(self):
"""Fetched returns true if the status has been set."""
if self.status == Status.Processing:
return True
else:
return self.Decided()
def Decided(self):
"""Decided returns true if the status is Rejected or Executed."""
return self.status in [Status. Rejected, Status.Accepted]
def Valid(self):
"""Valid returns None if the status is a valid status."""
return self.status in [Status.Unknown, Status.Processing, Status.Rejected, Status.Accepted]
def String(self):
if self.status == Status.Unknown:
return "Unknown"
elif self.status == Status.Processing:
return "Processing"
elif self.status == Status.Rejected:
return "Rejected"
elif self.status == Status.Accepted:
return "Accepted"
else:
return "Invalid status"
def Bytes(self):
"""Bytes returns the byte repr. of this status"""
p = Packer()
p.PackInt(int(self.status))
return p.Bytes
```
#### File: consensus/snowstorm/tx.py
```python
from avaxpython.snow.choices.decidable import Decidable
class Tx(Decidable):
# Dependencies is a list of transactions upon which this transaction
# depends. Each element of Dependencies must be verified before Verify is
# called on this transaction.
# Similarly, each element of Dependencies must be accepted before this
# transaction is accepted.
def Dependencies(self):
pass
# InputIDs is a set where each element is the ID of a piece of state that
# will be consumed if this transaction is accepted.
# In the context of a UTXO-based payments system, for example, this would
# be the IDs of the UTXOs consumed by this transaction
def InputIDs(self):
pass
# Verify that the state transition this transaction would make if it were
# accepted is valid. If the state transition is invalid, a non-nil error
# should be returned.
# It is guaranteed that when Verify is called, all the dependencies of
# this transaction have already been successfully verified.
def Verify(self):
pass
# Bytes returns the binary representation of this transaction.
# This is used for sending transactions to peers. Another node should be
# able to parse these bytes to the same transaction.
def Bytes(self):
pass
```
#### File: snowman/bootstrap/bootstrapper.py
```python
from avaxpython.snow.engine.common.config import Config as CommonConfig
from avaxpython.snow.engine.common.boostrapper import Bootstrapper as CommonBootstrapper
from avaxpython.snow.engine.common.fetcher import Fetcher
from avaxpython.snow.engine.snowman.block.vm import ChainVM
# Parameters for delaying bootstrapping to avoid potential CPU burns
initialBootstrappingDelay = 500 * 0.001 # seconds
maxBootstrappingDelay = 60
class Config(CommonConfig):
"""Bootstrapper-specific config"""
def __init__(self, ctx, validators, beacons, samplek, startupalpha, alpha, sender, bootstrapable, subnet, delay, retrybootstrap, rbmaxattempts):
super().__init__(ctx, validators, beacons, samplek, startupalpha, alpha, sender, bootstrapable, subnet, delay, retrybootstrap, rbmaxattempts)
# Blocked tracks operations that are blocked on blocks
self.Blocked = []
self.VM = None
self.Bootstrapped = super().IsBootstrapped
class Bootstrapper(Config, CommonBootstrapper, Fetcher):
def __init__(self, vm, ctx):
Config.__init__(self, ctx, [], 0, 0, 0, None, False, None, 0, 0, 0, 0)
CommonBootstrapper.__init__(self, None, None, None, None, None, None, None, None, None, None, None)
Fetcher.__init__(self, 0, 0, None)
# Blocked tracks operations that are blocked on blocks
self.Blocked = []
self.VM: ChainVM = vm
self.Ctx = ctx
self.Bootstrapped = None
# true if all of the vertices in the original accepted frontier have been processed
self.processedStartingAcceptedFrontier = False
# number of state transitions executed
self.executedStateTransitions = 0
self.delayAmount = 0
```
#### File: avax-python/avaxpython/structured.py
```python
class AvaxStructured:
"""Utility class to generate __repr__ according to serialized field tags."""
def __struct__(self):
"""
__struct__() should provide a JSON-friendly Python representation of an object tree
All AvaxStructured objects automatically inherit this general implementation, but are
encouraged to provide their own for more fine grained representation.
__struct__ does not return JSON. It simply structures Python objects into JSON-compatible types.
That way the structure can be encoded in other formats if necessary.
"""
_d = {}
for field_key, field_dict in self._avax_tags:
attr = getattr(self, field_key)
j_key = field_key
if "json" in field_dict and len(field_dict["json"]) > 0:
j_key = field_dict["json"]
if "__struct__" in dir(attr):
_d[j_key] = attr.__struct__()
else:
_d[j_key] = attr
return _d
def __repr__(self):
return str(self.__struct__())
```
#### File: avaxpython/version/version.py
```python
from avaxpython.errors import errors
defaultAppSeparator = "/"
defaultVersionSeparator = "."
errDifferentApps = Exception("different applications")
errDifferentMajor = Exception("different major version")
class version:
def __init__(self, app, major, minor, patch, tostr):
self.app = app
self.major = major
self.minor = minor
self.patch = patch
self.tostr = tostr
def App(self):
return self.app
def Major(self):
return self.major
def Minor(self):
return self.minor
def Patch(self):
return self.patch
def String(self):
return self.tostr
def Compatible(self, o):
if self.App() != o.App():
return errDifferentApps
if self.Major() > o.Major():
return errDifferentMajor
return None
def Before(self, o):
if self.App() != o.App():
return False
v = self.Major()
o = o.Major()
if v < o:
return True
if v > o:
return False
v = v.Minor()
o = o.Minor()
if v < o:
return True
if v > o:
return False
v = v.Patch()
o = o.Patch()
if v < o:
return True
return False
# NewDefaultVersion returns a new version with default separators
def NewDefaultVersion(app, major, minor, patch):
return NewVersion(app, defaultAppSeparator, defaultVersionSeparator, major, minor, patch)
# NewVersion returns a new version
def NewVersion(app, appSeparator, versionSeparator, major, minor, patch):
v_string = "%s%s%d%s%d%s%d", (app, appSeparator, major, versionSeparator, minor, versionSeparator, patch)
return version(app=app, major=major, minor=minor, patch=patch, tostr=v_string)
class parser:
def __init__(self, sep, vsep):
self.appSeparator = sep
self.versionSeparator = vsep
def Parse(self, s):
splitApp = s.split(p.appSeparator)
if len(splitApp) != 2:
return nil, fmt.Errorf("failed to parse %s as a version", s)
splitVersion = strings.SplitN(splitApp[1], p.versionSeparator, 3)
if len(splitVersion) != 3:
return nil, fmt.Errorf("failed to parse %s as a version", s)
major, err = strconv.Atoi(splitVersion[0])
if err != nil:
return nil, fmt.Errorf("failed to parse %s as a version due to %w", s, err)
minor, err = strconv.Atoi(splitVersion[1])
if err != nil:
return nil, fmt.Errorf("failed to parse %s as a version due to %w", s, err)
patch, err = strconv.Atoi(splitVersion[2])
if err != nil:
return nil, fmt.Errorf("failed to parse %s as a version due to %w", s, err)
return NewVersion(splitApp[0], p.appSeparator, p.versionSeparator, major, minor, patch,), None
def NewDefaultParser():
return NewParser(defaultAppSeparator, defaultVersionSeparator)
def NewParser(appSeparator, versionSeparator):
return parser(appSeparator, versionSeparator)
```
#### File: vms/avm/import_tx.py
```python
from avaxpython.vms.components.avax.transferables import TransferableInput
from avaxpython.vms.avm.base_tx import BaseTx
from avaxpython.ids.ID import ID
from avaxpython.types import Slice
from avaxpython.structured import AvaxStructured
errNoImportInputs = Exception("no import inputs")
class ImportTx(AvaxStructured):
"""ImportTx is a transaction that imports an asset from another blockchain."""
_avax_tags = [
("BaseTx", { "serialize": True}),
("SourceChain", { "serialize": True, "json" : "sourceChain" }),
("ImportedIns", { "element_type": TransferableInput, "serialize": True, "json":"importedInputs"}),
]
def __init__(self) -> None:
self.BaseTx = BaseTx()
# Which chain to consume the funds from
self.SourceChain = ID()
# The inputs to this transaction
self.ImportedIns = Slice()
def InputUTXOs(self):
"""InputUTXOs track which UTXOs this transaction is consuming."""
utxos = self.BaseTx.InputUTXOs()
for inx in self.ImportedIns:
inx.Symbol = True
utxos.append(inx.UTXOID)
return utxos
def ConsumedAssetIDs(self):
"""ConsumedAssetIDs returns the IDs of the assets this transaction consumes"""
assets = self.BaseTx.AssetIDs()
for inx in self.ImportedIns:
assets.Add(inx.AssetID())
return assets
def AssetIDs(self):
"""AssetIDs returns the IDs of the assets this transaction depends on"""
assets = self.BaseTx.AssetIDs()
for inx in self.ImportedIns:
assets.Add(inx.AssetID())
return assets
def NumCredentials(self):
"""NumCredentials returns the number of expected credentials"""
return self.BaseTx.NumCredentials() + len(self.ImportedIns)
def SyntacticVerify(self, ctx, c, txFeeAssetID, txFee, a, numFxs):
"""SyntacticVerify that this transaction is well-formed."""
def SemanticVerify(selfvm, tx, creds):
"""SemanticVerify that this transaction is well-formed."""
def ExecuteWithSideEffects(selfvm, batch):
"""ExecuteWithSideEffects writes the batch with any additional side effects"""
def __struct__(self):
_d = {}
for k, v in self._avax_tags:
if "__struct__" in dir():
_d[k] = getattr(self, k).__struct__()
else:
_d[k] = getattr(self, k)
return str(_d)
def __repr__(self):
return str(self.__struct__())
```
#### File: vms/avm/operation_tx.py
```python
from avaxpython.vms.avm.base_tx import BaseTx
from avaxpython.vms.avm.operation import Operation
from avaxpython.structured import AvaxStructured
errOperationsNotSortedUnique = Exception("operations not sorted and unique")
errNoOperations = Exception("an operationTx must have at least one operation")
errDoubleSpend = Exception("inputs attempt to double spend an input")
class OperationTx(AvaxStructured):
"""OperationTx is a transaction with no credentials."""
_avax_tags = [
("BaseTx", { "serialize": True}),
("Ops", { "element_type": Operation, "serialize": True, "json":"operations"}),
]
def __init__(self) -> None:
self.BaseTx = BaseTx()
self.Ops = Slice()
def Operations(self):
"""Operations track which ops this transaction is performing. The returned array should not be modified."""
return self.Ops
def InputUTXOs(self):
"""InputUTXOs track which UTXOs this transaction is consuming."""
utxos = self.BaseTx.InputUTXOs()
for op in self.Ops:
utxos = append(utxos, op.UTXOIDs)
return utxos
def ConsumedAssetIDs(self):
"""ConsumedAssetIDs returns the IDs of the assets this transaction consumes"""
assets = self.BaseTx.AssetIDs()
for op in self.Ops:
if len(op.UTXOIDs) > 0:
assets.Add(op.AssetID())
return assets
def AssetIDs(self):
"""AssetIDs returns the IDs of the assets this transaction depends on"""
assets = self.BaseTx.AssetIDs()
for op in self.Op:
assets.Add(op.AssetID())
return assets
def NumCredentials(self) -> int:
"""NumCredentials returns the number of expected credentials"""
return self.BaseTx.NumCredentials() + len(self.Ops)
def UTXOs(self):
"""UTXOs returns the UTXOs transaction is producing."""
txID = self.ID()
utxos = self.BaseTx.UTXOs()
for op in self.Ops:
asset = op.AssetID()
for out in op.Op.Outs():
utxos.append(avax.UTXO(
UTXOID = avax.UTXOID(
TxID= txID,
OutputIndex = len(xos),
),
Asset = avax.Asset(ID = asset),
Out = out,
))
return utxos
def SyntacticVerify(ctx, c, txFeeAssetID, txFee, a, numFxs):
"""SyntacticVerify that this transaction is well-formed."""
def SemanticVerify(vm, tx, creds):
"""SemanticVerify that this transaction is well-formed."""
```
#### File: vms/avm/tx.py
```python
from typing import List
from avaxpython.vms.components.verify.verification import Verifiable
from avaxpython.ids.ID import ID as LongID
from avaxpython.vms.components.avax.utxo import UTXO
from avaxpython.vms.components.avax.utxo_id import UTXOID
from avaxpython.snow.context import Context
from avaxpython.types import *
from avaxpython.structured import AvaxStructured
from avaxpython.utils.hashing import hashing
class UnsignedTx:
_avax_interface = True
def __init__(self) -> None:
self.Bytes: bytes = None
def Initialize(self, unsignedBytes: bytes, bbytes: bytes):
self.Bytes = bbytes
self.unsignedBytes = unsignedBytes
def ID(self) -> LongID:
pass
def UnsignedBytes(self) -> bytes:
return self.unsignedBytes
def Bytes(self) -> bytes:
pass
def ConsumedAssetIDs(self):
pass
def AssetIDs(self):
pass
def NumCredentials(self):
pass
def InputUTXOs(self) -> List[UTXOID]:
pass
def UTXOs(self) -> List[UTXO]:
pass
def SyntacticVerify(ctx, c, txFeeAssetID, txFee, creationTxFee, numFxs):
pass
def SemanticVerify(vm, tx, creds):
pass
def ExecuteWithSideEffects(vm, batch):
pass
def __struct__(self):
_s = {
'UnsignedTx' : 'Unsigned Tx Interface'
}
return _s
class Tx(AvaxStructured):
_avax_tags = [
("UnsignedTx", { "serializeV0": True, "serialize": True, "json" : "unsignedTx" }),
("Creds", { "element_type": Verifiable, "serializeV0": True, "serialize": True, "json":"credentials"}),
]
def __init__(self):
# The credentials of this transaction
self.UnsignedTx = UnsignedTx()
self.Creds: List[Verifiable] = Slice()
def Initialize(self, unsignedBytes, bbytes: bytes):
self.id = hashing.ComputeHash256Array(bbytes)
self.unsignedBytes = unsignedBytes
self.bytes = bbytes
def ID(self):
return self.id
```
#### File: components/core/block.py
```python
from avaxpython.types import Uint64
from avaxpython.ids.ID import ID
from avaxpython.vms.components.core.metadata import Metadata
from avaxpython.vms.components.core.snowman_vm import SnowmanVM
from avaxpython.structured import AvaxStructured
errBlockNil = RuntimeError("block is nil")
errRejected = RuntimeError("block is rejected")
class Block(AvaxStructured):
"""Block contains fields and methods common to block's in a Snowman blockchain.
Block is meant to be a building-block (pun intended).
When you write a VM, your blocks can (and should) embed a core.Block
to take care of some bioler-plate code.
Block's methods can be over-written by structs that embed this struct.
"""
_avax_tags = [
("PrntID", { "serialize": True, "json": "parentID"}),
("Hght", { "serialize": True, "json": "height"}),
]
def __init__(self) -> None:
self.Metadata = Metadata()
self.PrntID = ID()
self.Hght = Uint64()
self.VM = SnowmanVM()
```
#### File: vms/platformvm/add_subnet_validator_tx.py
```python
from avaxpython.vms.platformvm.base_tx import BaseTx
from avaxpython.vms.components.verify.verification import Verifiable
from avaxpython.vms.platformvm.validator import Validator, SubnetValidator
from avaxpython.structured import AvaxStructured
errDSValidatorSubset = Exception("all subnets' staking period must be a subset of the primary network")
class UnsignedAddSubnetValidatorTx(AvaxStructured):
"""UnsignedAddSubnetValidatorTx is an unsigned addSubnetValidatorTx"""
_avax_tags = [
("BaseTx", { "serialize": True}),
("Validator", { "serialize": True, "json": "validator"}),
("SubnetAuth", { "serialize": True, "json": "subnetAuthorization"}),
]
def __init__(self) -> None:
# Metadata, inputs and outputs
self.BaseTx = BaseTx()
# The validator
self.Validator = SubnetValidator()
# Auth that will be allowing this validator into the network
self.SubnetAuth = Verifiable()
def StartTime(self):
"""StartTime of this validator"""
return self.Validator.StartTime()
# EndTime of this validator
def EndTime(self):
return self.Validator.EndTime()
# Weight of this validator
def Weight(self):
return self.Validator.Weight()
def Verify(ctx, c, feeAmount, feeAssetID, minStakeDuration, maxStakeDuration):
"""Verify return None iff [tx] is valid"""
def SemanticVerify(vm, db, stx):
"""SemanticVerify this transaction is valid."""
def InitiallyPrefersCommit(vm):
"""InitiallyPrefersCommit returns true if the proposed validators start time is after the current wall clock time,"""
```
#### File: vms/platformvm/common_blocks.py
```python
from avaxpython.types import Slice
from avaxpython.vms.platformvm.block import Block
from avaxpython.vms.components.core.block import Block as CoreBlock
from avaxpython.structured import AvaxStructured
errInvalidBlockclass = TypeError("invalid block type")
class decision:
"""A decision block (either Commit, Abort, or DecisionBlock.) represents a
decision to either commit (accept) or abort (reject) the changes specified in
its parent, if its parent is a proposal. Otherwise, the changes are committed
immediately."""
def onAccept(self):
""" This function should only be called after Verify is called.
returns a database that contains the state of the chain if this block is
accepted."""
class CommonBlock(AvaxStructured):
"""CommonBlock contains the fields common to all blocks of the Platform Chain"""
_avax_tags = [
("Block", { "serialize": True}),
]
def __init__(self) -> None:
self.Block = CoreBlock()
self.vm = None # Do not initialize VM here.
# This block's children
self.children = Slice()
class CommonDecisionBlock(AvaxStructured):
"""CommonDecisionBlock contains the fields and methods common to all decision blocks"""
_avax_tags = [
("CommonBlock", { "serialize": True}),
]
def __init__(self) -> None:
self.CommonBlock = CommonBlock()
# state of the chain if this block is accepted
self.onAcceptDB = None
# to be executed if this block is accepted
self.onAcceptFunc = None
class SingleDecisionBlock(AvaxStructured):
"""SingleDecisionBlock contains the accept for standalone decision blocks"""
_avax_tags = [
("CommonDecisionBlock", { "serialize": True}),
]
def __init__(self) -> None:
self.CommonDecisionBlock = CommonDecisionBlock
class DoubleDecisionBlock(AvaxStructured):
"""DoubleDecisionBlock contains the accept for a pair of blocks"""
_avax_tags = [
("CommonDecisionBlock", { "serialize": True}),
]
def __init__(self) -> None:
self.CommonDecisionBlock = CommonDecisionBlock()
```
#### File: vms/platformvm/import_tx.py
```python
from avaxpython.types import Slice
from avaxpython.ids.ID import ID
from avaxpython.vms.platformvm.base_tx import BaseTx
from avaxpython.vms.components.avax.transferables import TransferableInput
from avaxpython.structured import AvaxStructured
errAssetIDMismatch = Exception("asset IDs in the input don't match the utxo")
errWrongNumberOfCredentials = Exception("should have the same number of credentials as inputs")
errNoImportInputs = Exception("tx has no imported inputs")
errInputsNotSortedUnique = Exception("inputs not sorted and unique")
class UnsignedImportTx(AvaxStructured):
"""UnsignedImportTx is an unsigned ImportTx"""
_avax_tags = [
("BaseTx", { "serialize": True}),
("SourceChain", { "serialize": True, "json": "sourceChain"}),
("ImportedInputs", { "element_type": TransferableInput, "serialize": True, "json": "importedInputs"}),
]
def __init__(self) -> None:
self.BaseTx = BaseTx()
# Which chain to consume the funds from
self.SourceChain = ID()
# Inputs that consume UTXOs produced on the chain
self.ImportedInputs = Slice()
def InputUTXOs(self):
"""InputUTXOs returns the UTXOIDs of the imported funds"""
setx = {}
for inx in self.ImportedInputs:
setx.add(inx.InputID())
return setx
def Verify(self, avmID, ctx, c, feeAmount, feeAssetID):
"""Verify this transaction is well-formed"""
def SemanticVerify(self, vm, db, stx):
"""SemanticVerify this transaction is valid."""
def Accept(self, ctx, batch):
"""Accept this transaction and spend imported inputs
We spend imported UTXOs here rather than in semanticVerify because
we don't want to remove an imported UTXO in semanticVerify
only to have the transaction not be Accepted. This would be inconsistent.
Recall that imported UTXOs are not kept in a versionDB."""
utxoIDs = []
for inx in self.ImportedInputs:
utxoID = inx.InputID()
utxoIDs.append(utxoID[:])
```
#### File: vms/platformvm/reward_validator_tx.py
```python
from avaxpython.types import Bool
from avaxpython.ids.ID import ID
from avaxpython.structured import AvaxStructured
errShouldBeDSValidator = Exception("expected validator to be in the primary network")
errWrongTxType = Exception("wrong transaction type")
class UnsignedRewardValidatorTx(AvaxStructured):
"""UnsignedRewardValidatorTx is a transaction that represents a proposal to
remove a validator that is currently validating from the validator set.
If this transaction is accepted and the next block accepted is a Commit
block, the validator is removed and the address that the validator specified
receives the staked AVAX as well as a validating reward.
If this transaction is accepted and the next block accepted is an Abort
block, the validator is removed and the address that the validator specified
receives the staked AVAX but no reward."""
_avax_tags = [
("TxID", { "serialize": True, "json": "txID"}),
]
def __init__(self) -> None:
avax.Metadata
# ID of the tx that created the delegator/validator being removed/rewarded
TxID = ID()
# Marks if this validator should be rewarded according to this node.
self.shouldPreferCommit = Bool()
def SemanticVerify(self, vm, db, stx):
"""SemanticVerify this transaction performs a valid state transition.
# The current validating set must have at least one member.
# The next validator to be removed must be the validator specified in this block.
# The next validator to be removed must be have an end time equal to the current
# chain timestamp."""
```
#### File: vms/platformvm/validator.py
```python
from avaxpython.types import *
from avaxpython.ids.ID import ID
from avaxpython.ids.ShortID import ShortID
from avaxpython.structured import AvaxStructured
errBadSubnetID = ValueError("subnet ID can't be primary network ID")
class Validator(AvaxStructured):
"""Validator is a validator."""
_avax_tags = [
("NodeID", { "json": "version", "json" : "nodeID" }),
("Start", { "serialize": True, "json" : "start" }),
("End", { "serialize": True, "json":"end"}),
("Wght", { "serialize": True, "json":"weight"}),
]
def __init__(self) -> None:
# Node ID of the validator
self.NodeID = ShortID()
# Unix time this validator starts validating
self.Start = Uint64()
# Unix time this validator stops validating
self.End = Uint64()
# Weight of this validator used when sampling
self.Wght = Uint64()
class SubnetValidator:
"""Validator is a validator."""
_avax_tags = [
("Validator", { "json": "version" }),
("Subnet", { "serialize": True, "json" : "subnet" }),
]
def __init__(self) -> None:
self.Validator = Validator()
self.Subnet = ID()
```
#### File: avaxpython/wallet/BIP32.py
```python
from bip_utils import Bip32
from bip_utils.bip.bip_keys import BipPublicKey
from avaxpython.wallet.BIP44 import Bip44AVAXMainNet
import hashlib
import cb58ref
from avaxpython.wallet import bech32
import binascii
def key_from_seed(seed):
"""Return master private key from this seed"""
bip32_ctx = Bip32.FromSeed(seed)
return bip32_ctx
def get_preferred_HRP(networkID):
return "avax"
def address_from_publickey_bytes(bts: bytes) -> bytes:
m = hashlib.sha256()
m.update(bts)
sh256 = m.digest()
n = hashlib.new('ripemd160')
n.update(sh256)
return n.digest()
def address_from_publickey(pk) -> bytes:
m = hashlib.sha256()
return address_from_publickey_bytes(pk.ToBytes())
def address_to_string(hrp: str, chainId: str, addr: bytes):
dta = bech32.convertbits(addr, 8, 5, True)
ret = bech32.bech32_encode(hrp, dta, bech32.Encoding.BECH32)
return "{}-{}".format(chainId, ret)
def derive_master_key(masterKey, derivationPath):
return masterKey.DerivePath(derivationPath)
def get_address_for_index(masterKey: Bip32, changePath: str, index: int, chainId, networkID) -> str:
derivation_path = f"{changePath}/{index}"
key = derive_master_key(masterKey, derivation_path)
public_key = BipPublicKey(key, Bip44AVAXMainNet)
pk = public_key.RawCompressed()
addr = address_from_publickey(pk)
return address_to_string(networkID, chainId, addr)
```
|
{
"source": "jgeorgeson/flask-dance",
"score": 2
}
|
#### File: tests/contrib/test_google.py
```python
from __future__ import unicode_literals
import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.google import make_google_blueprint, google
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.backend import MemoryBackend
def test_blueprint_factory():
google_bp = make_google_blueprint(
client_id="foo",
client_secret="bar",
redirect_to="index",
)
assert isinstance(google_bp, OAuth2ConsumerBlueprint)
assert google_bp.session.scope == ["profile"]
assert google_bp.session.base_url == "https://www.googleapis.com/"
assert google_bp.session.client_id == "foo"
assert google_bp.client_secret == "bar"
assert google_bp.authorization_url == "https://accounts.google.com/o/oauth2/auth"
assert google_bp.token_url == "https://accounts.google.com/o/oauth2/token"
def test_load_from_config():
app = Flask(__name__)
app.secret_key = "anything"
app.config["GOOGLE_OAUTH_CLIENT_ID"] = "foo"
app.config["GOOGLE_OAUTH_CLIENT_SECRET"] = "bar"
google_bp = make_google_blueprint(redirect_to="index")
app.register_blueprint(google_bp)
resp = app.test_client().get("/google")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
def test_blueprint_factory_scope():
google_bp = make_google_blueprint(
client_id="foo",
client_secret="bar",
scope="customscope",
redirect_to="index",
)
assert google_bp.session.scope == "customscope"
@responses.activate
def test_context_local():
responses.add(responses.GET, "https://google.com")
# set up two apps with two different set of auth tokens
app1 = Flask(__name__)
goog_bp1 = make_google_blueprint(
"foo1", "bar1", redirect_to="url1",
backend=MemoryBackend({"access_token": "<PASSWORD>"}),
)
app1.register_blueprint(goog_bp1)
app2 = Flask(__name__)
goog_bp2 = make_google_blueprint(
"foo2", "bar2", redirect_to="url2",
backend=MemoryBackend({"access_token": "<PASSWORD>"}),
)
app2.register_blueprint(goog_bp2)
# outside of a request context, referencing functions on the `google` object
# will raise an exception
with pytest.raises(RuntimeError):
google.get("https://github.com")
# inside of a request context, `google` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
google.get("https://google.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
google.get("https://google.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
def test_offline():
app = Flask(__name__)
app.secret_key = "backups"
goog_bp = make_google_blueprint("foo", "bar", offline=True)
app.register_blueprint(goog_bp)
with app.test_client() as client:
resp = client.get(
"/google",
base_url="https://a.b.c",
follow_redirects=False,
)
# check that there is a `access_type=offline` query param in the redirect URL
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.query_dict["access_type"] == "offline"
def test_offline_reprompt():
app = Flask(__name__)
app.secret_key = "backups"
goog_bp = make_google_blueprint(
"foo", "bar", offline=True, reprompt_consent=True,
)
app.register_blueprint(goog_bp)
with app.test_client() as client:
resp = client.get(
"/google",
base_url="https://a.b.c",
follow_redirects=False,
)
assert resp.status_code == 302
location = URLObject(resp.headers["Location"])
assert location.query_dict["access_type"] == "offline"
assert location.query_dict["approval_prompt"] == "force"
```
|
{
"source": "jgeraigery/stach-extensions",
"score": 3
}
|
#### File: stach/extensions/IStachExtension.py
```python
from abc import abstractmethod, ABC
class IStachExtension(ABC):
@abstractmethod
def convert_to_dataframe(self):
"""
Converts all the tables in the provided package object to list of data frames.
:return: list of data frames
"""
```
|
{
"source": "jgerardhodge/plantcv",
"score": 2
}
|
#### File: plantcv/photosynthesis/reassign_frame_labels.py
```python
import numpy as np
import os
import pandas as pd
from skimage.util import img_as_bool
from plantcv.plantcv import fatal_error
from plantcv.plantcv.classes import PSII_data
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import params
from plotnine import ggplot, aes, geom_line, geom_point, labs
def reassign_frame_labels(ps_da, mask):
"""
Analyze fluorescence induction curve and assign Fm or Fmp frame labels.
Designed for cropreporter data. Analyze fluorescence frames to find max mean fluorescence and assign Fm or Fmp.
Use this if you want to assign Fm/Fmp based on observed values rather than CropReporter metadata.
Inputs:
ps_da = photosynthesis xarray DataArray
mask = mask of plant (binary, single channel)
Returns:
ps_da = dataarray with updated frame_label coordinate
ind_fig = ggplot induction curve of fluorescence
ind_df = data frame of mean fluorescence in the masked region at each timepoint
:param ps_da: xarray.core.dataarray.DataArray
:param mask: numpy.ndarray
:return ps_da: xarray.core.dataarray.DataArray
:return ind_fig: ggplot figure
:return ind_df: pandas.core.frame.DataFrame
"""
try:
if ps_da.name != "lightadapted" and ps_da.name != "darkadapted":
fatal_error("You must provide a xarray DataArray with name lightadapted or darkadapted")
except AttributeError:
if isinstance(ps_da, PSII_data):
fatal_error("You need to provide the `darkadapted` or `lightadapted` dataarray")
else:
fatal_error("You must provide a xarray DataArray with name lightadapted or darkadapted")
if mask.shape != ps_da.shape[:2] or len(np.unique(mask)) > 2:
fatal_error(f"Mask needs to be binary and have shape {ps_da.shape[:2]}")
# Prime is empty for Fv/Fm (dark- and light-adapted) and p for Fq'/Fm'
datasets = {
"lightadapted": {
"prime": "p",
"label": "PSL"
},
"darkadapted": {
"prime": "",
"label": "PSD"
}
}
# Get the number of frame labels
ind_size = ps_da.frame_label.size
# Create a new frame label array populated with the current labels
idx = ps_da.frame_label.values
# Reset the frame labels after F0/Fp
for i in range(2, ind_size):
idx[i] = f"{datasets[ps_da.name.lower()]['label']}{i}"
# get plant mean for each frame based on mask
fluor_values = ps_da.where(img_as_bool(mask)[..., None, None]).mean(['x', 'y', 'measurement'])
# find frame with max mean
max_ind = np.argmax(fluor_values.data)
# assign max frame label
idx[max_ind] = f"Fm{datasets[ps_da.name.lower()]['prime']}"
# assign new labels back to dataarray
ps_da = ps_da.assign_coords({'frame_label': ('frame_label', idx)})
# save induction curve data to dataframe
ind_df = pd.DataFrame({"Timepoints": range(0, ind_size), "Fluorescence": fluor_values}) # "Measurement": meas})
# Make the histogram figure using plotnine
ind_fig = (ggplot(data=ind_df, mapping=aes(x='Timepoints', y='Fluorescence'))
+ geom_line(show_legend=True, color="green")
+ geom_point()
+ labs(title=f"{ps_da.name} fluorescence")
)
# Plot/Print out the histograms
_debug(visual=ind_fig,
filename=os.path.join(params.debug_outdir, str(params.device) + "_fluor_histogram.png"))
return ps_da, ind_fig, ind_df
```
|
{
"source": "jgerardin/covid-chicago",
"score": 3
}
|
#### File: covid-chicago/data_processing/CDC Data MS.py
```python
import os
import pandas as pd
import numpy as np
import sys
import seaborn as sns
import matplotlib.pyplot as plt
sys.path.append('../')
from load_paths import load_box_paths
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
from processing_helpers import *
"""Define function methods"""
def load_data(column_list=None, remove_nas=False):
"""Read in only relevant columns """
if column_list == None:
column_list =['icu_length', 'hosp_length', 'age_group','res_county','res_state','hosp_yn', 'icu_yn', 'death_yn']
df_full = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'il_cdc_thru_0811.csv'),
usecols=column_list)
df = df_full.copy()
"""Remove Missings and Unknowns """
if remove_nas:
df = df.dropna(subset=["hosp_length"])
df = df.dropna(subset=["age_group"])
df = df.dropna(subset=["death_yn"])
df = df[df['age_group'] != 'Unknown' ]
df = df[df['icu_yn'] != 'Unknown' ]
df = df[df['icu_yn'] != 'Missing' ]
#print(df)
return df
def LOS_descriptive_tables(groupList, channel='hosp_length', sortByList=None, fname=None):
df_summary = df.groupby(groupList)[channel].agg(
[np.mean, CI_2pt5, CI_25, CI_50, CI_75, CI_97pt5]).reset_index()
if sortByList != None:
df_summary = df_summary.sort_values(by=sortByList)
if fname is not None:
df_summary.to_csv(os.path.join(plot_path,f'summary_{"_".join(groupList)}_{channel}_{fname}.csv'))
return df_summary
### Simple histogram, not age structured\
def plot_hist(df, channel='hosp_length') :
plt.rcParams.update({'figure.figsize':(7,5), 'figure.dpi':100})
x = df[channel]
plt.hist(x, bins=50)
plt.gca().set(title=channel, ylabel='Frequency');
return plt
### Function for age structured plot
def plot_hist_by_grp(df, channel='hosp_length',groups = None, grp_name = None, truncate_at=20) :
## Get age groups
if groups == None:
groups = ['0 - 9 Years', '10 - 19 Years', '20 - 29 Years', '30 - 39 Years', '40 - 49 Years', '50 - 59 Years',
'60 - 69 Years', '70 - 79 Years', '80+ Years']
if grp_name == None:
grp_name = 'age_group'
palette = sns.color_palette('husl', len(groups))
fig = plt.figure(figsize=(10, 6))
fig.subplots_adjust(right=0.97, left=0.1, hspace=0.4, wspace=0.3, top=0.90, bottom=0.05)
fig.suptitle(x=0.5, y=0.999, t='Hospital LOS')
for c, grp in enumerate(groups):
if len(groups)==9:
ax = fig.add_subplot(3, 3, c + 1)
else:
ax = fig.add_subplot(4, 4, c + 1)
mdf = df[df[grp_name] == grp]
if truncate_at is not None:
mdf.loc[mdf[channel] >truncate_at, channel] = truncate_at
median = np.median(mdf[channel])
ax.hist(mdf[channel], bins=50, color=palette[0])
ax.set_title(groups[c])
ax.axvline(x=median, color='#737373', linestyle='--')
ax.set(xlabel='', ylabel='Frequency')
plt.savefig(os.path.join(plot_path, f'{channel}_by_{grp_name}.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{channel}_by_{grp_name}.pdf'), format='PDF')
return plt
def plot_hist_by_grp_2(df, channel='hosp_length',color_channel = "icu_yn", groups = None, grp_name = None,truncate_at=None) :
## Get age groups
if groups == None:
groups = ['0 - 9 Years', '10 - 19 Years', '20 - 29 Years', '30 - 39 Years', '40 - 49 Years', '50 - 59 Years',
'60 - 69 Years', '70 - 79 Years', '80+ Years']
if grp_name == None:
grp_name = 'age_group'
palette = sns.color_palette('Set1', len(groups))
fig = plt.figure(figsize=(10, 6))
fig.subplots_adjust(right=0.97, left=0.1, hspace=0.4, wspace=0.3, top=0.90, bottom=0.05)
fig.suptitle(x=0.5, y=0.999, t='Hospital LoS by ICU admission status ')
for c, grp in enumerate(groups):
if len(groups)==9:
ax = fig.add_subplot(3, 3, c + 1)
else:
ax = fig.add_subplot(4, 4, c + 1)
mdf = df[df[grp_name] == grp]
if truncate_at is not None:
mdf.loc[mdf[channel] > truncate_at, channel] = truncate_at
ax.hist(mdf[mdf[color_channel]=='Yes'][channel], bins=50, color=palette[0], label="ICU yes", alpha=0.6)
ax.hist(mdf[mdf[color_channel]=='No'][channel], bins=50, color=palette[1], label="ICU no", alpha=0.6)
ax.axvline(x=np.median(mdf[mdf[color_channel]=='Yes'][channel]), color=palette[0], linestyle='--')
ax.axvline(x=np.median(mdf[mdf[color_channel]=='No'][channel]), color=palette[1], linestyle='--')
ax.set(xlabel='', ylabel='Frequency')
ax.set_title(groups[c] ) #,fontweight="bold"
ax.legend()
plotname = f'{channel}_colorby_{color_channel}_by_{grp_name}'
if truncate_at is not None:
plotname = plotname +'_truncated'
plt.savefig(os.path.join(plot_path, f'{plotname}.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{plotname}.pdf'), format='PDF')
return plt
if __name__ == '__main__':
"""Basic descriptive tables"""
plot_path = os.path.join(projectpath, 'Plots + Graphs','Age Model - MS')
df=load_data(remove_nas=True)
pd.crosstab(index=df['age_group'], columns='count')
LOS_descriptive_tables(channel='hosp_length',groupList=['age_group', 'death_yn'])
LOS_descriptive_tables(channel='hosp_length',groupList=['age_group', 'icu_yn'], sortByList=['icu_yn','age_group'])
df = df[df['hosp_length'] !=0 ]
LOS_descriptive_tables(groupList=['age_group', 'death_yn'])
LOS_descriptive_tables(groupList=['age_group', 'death_yn'], sortByList=['death_yn','age_group'],fname='_by_death_yn')
LOS_descriptive_tables(groupList=['age_group', 'icu_yn'], sortByList=['icu_yn','age_group'],fname='icu_yn')
## Same histogra, with colors by ICU_yn
plot_hist_by_grp_2(df, channel='hosp_length',color_channel = "icu_yn")
plot_hist_by_grp_2(df, channel='hosp_length',color_channel = "icu_yn", truncate_at=20)
"""Compare by region"""
df = load_data(remove_nas=True)
df = df.dropna(subset=["res_county"])
df = merge_county_covidregions(df_x=df, key_x='res_county', key_y='County')
pd.crosstab(index=df['covid_region'], columns='count')
LOS_descriptive_tables(channel='hosp_length',groupList=['covid_region', 'death_yn'])
LOS_descriptive_tables(channel='hosp_length',groupList=['covid_region', 'icu_yn'], sortByList=['icu_yn','covid_region'])
df = df[df['hosp_length'] !=0 ]
LOS_descriptive_tables(groupList=['covid_region', 'death_yn'])
LOS_descriptive_tables(groupList=['covid_region', 'death_yn'], sortByList=['death_yn','covid_region'],fname='_by_death_yn')
LOS_descriptive_tables(groupList=['covid_region', 'icu_yn'], sortByList=['icu_yn','covid_region'],fname='icu_yn')
plot_hist_by_grp(df=df, grp_name='covid_region', groups=list(range(1,12)))
plot_hist_by_grp_2(df=df, grp_name='covid_region', groups=list(range(1,12)))
```
#### File: covid-chicago/data_processing/ems_populations.py
```python
import os
import pandas as pd
import geopandas as gpd
from shapely.geometry import mapping, Point, Polygon
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
from load_paths import load_box_paths
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
datapath = os.path.join(datapath, 'covid_IDPH')
census_path = os.path.join(datapath, 'census')
shp_path = os.path.join(datapath, 'shapefiles')
pop_path = os.path.join(datapath, 'rasters', 'IL_2018_synpop')
def count_total_pop_for_ems(df, ems_poly, census_tract_shp) :
census_tract_shp['in_ems'] = census_tract_shp['geometry'].apply(lambda x: x.intersects(ems_poly))
tract_ids = census_tract_shp[census_tract_shp['in_ems']]['GEOID'].values
sdf = df[df['tract_fips'].isin(tract_ids)]
return np.sum(sdf['size']) + len(sdf)
def pop_by_ems(ems_shp, census_tract_shp) :
df = pd.read_csv(os.path.join(pop_path, 'IL2018_Households'))
pop_df = pd.DataFrame( { 'covid_region' : ems_shp['new_restor'],
'population' : [count_total_pop_for_ems(df, x, census_tract_shp) for x in ems_shp['geometry']]})
pop_df.to_csv(os.path.join(datapath, 'EMS Population', 'covid_region_population_from_RTI.csv'), index=False)
print(np.sum(pop_df['population']))
def pop_age_structure_by_ems(ems_shp, census_tract_shp, agebins, output_fname) :
right_edges = [x-1 for x in agebins[1:]]
right_edges[-1] = 100
colnames = ['%dto%d' % (x,y) for x,y in zip(agebins[:-1], right_edges)]
hh_df = pd.read_csv(os.path.join(pop_path, 'IL2018_Households'))
person_df = pd.read_csv(os.path.join(pop_path, 'IL2018_Persons'))
adf = pd.DataFrame()
for ems, ems_poly in zip(ems_shp['REGION'], ems_shp['geometry']) :
census_tract_shp['in_ems'] = census_tract_shp['geometry'].apply(lambda x: x.intersects(ems_poly))
tract_ids = census_tract_shp[census_tract_shp['in_ems']]['GEOID'].values
hh_ids = hh_df[hh_df['tract_fips'].isin(tract_ids)]['hh_id'].values
ages = person_df[person_df['hh_id'].isin(hh_ids)]['agep'].values
hist, bins = np.histogram(ages, bins=agebins)
sdf = pd.DataFrame( { binname : [val] for binname, val in zip(colnames, hist)})
sdf['ems'] = ems
adf = pd.concat([adf, sdf])
adf.to_csv(output_fname, index=False)
def ems_pop_structure() :
ems_shp = gpd.read_file(os.path.join(shp_path, 'EMS_Regions', 'EMS_Regions.shp'))
census_tract_shp = gpd.read_file(os.path.join(shp_path, 'tl_2019_17_tract', 'tl_2019_17_tract.shp'))
# pop_by_ems(ems_shp, census_tract_shp)
# agebins = [0, 20, 40, 60, 200]
agebins = [0, 20, 30, 40, 50, 60, 70, 80, 200]
output_fname = os.path.join(datapath, 'EMS Population', 'EMS_population_from_RTI_by_age_8grpLL.csv')
pop_age_structure_by_ems(ems_shp, census_tract_shp, agebins, output_fname)
def ems_proportional_pop_by_county() :
ems_pop_structure()
ems_shp = gpd.read_file(os.path.join(shp_path, 'EMS_Regions', 'EMS_Regions.shp'))
county_shp = gpd.read_file(os.path.join(shp_path, 'IL_BNDY_County', 'IL_BNDY_County_Py.shp'))
census_tract_shp = gpd.read_file(os.path.join(shp_path, 'tl_2019_17_tract', 'tl_2019_17_tract.shp'))
df = pd.read_csv(os.path.join(pop_path, 'IL2018_Households'))
def list_in_ems(small_area_shp, small_area_col, ems_poly):
small_area_shp['in_ems'] = small_area_shp['geometry'].apply(lambda x: x.intersects(ems_poly))
return small_area_shp[small_area_shp['in_ems']][small_area_col].values
ems_county_df = pd.DataFrame()
for ems, ems_poly in zip(ems_shp['REGION'], ems_shp['geometry']) :
counties = list_in_ems(county_shp, 'COUNTY_NAM', ems_poly)
county_shp_df = county_shp[county_shp['COUNTY_NAM'].isin(counties)]
pops = []
for county, county_poly in zip(county_shp_df['COUNTY_NAM'], county_shp_df['geometry']) :
poly = county_poly.intersection(ems_poly)
pops.append(count_total_pop_for_ems(df, poly, census_tract_shp))
cdf = pd.DataFrame({ 'county' : counties,
'pop in ems' : pops})
cdf['EMS'] = int(ems)
ems_county_df = pd.concat([ems_county_df, cdf])
ems_county_df.to_csv(os.path.join(datapath, 'EMS Population', 'EMS_population_by_county.csv'), index=False)
ems_df = pd.read_csv(os.path.join(datapath, 'EMS Population', 'EMS_population_from_RTI.csv'))
ems_df = ems_df.rename(columns={'population' : 'EMS population'})
ems_county_df = pd.merge(left=ems_county_df, right=ems_df, on='EMS', how='left')
ems_county_df['share_of_ems_pop'] = ems_county_df['pop in ems']/ems_county_df['EMS population']
ems_county_df.to_csv(os.path.join(datapath, 'EMS Population', 'EMS_population_by_county.csv'), index=False)
def ems_race_and_ethnicity_structure() :
ems_shp = gpd.read_file(os.path.join(shp_path, 'EMS_Regions', 'EMS_Regions.shp'))
cbg_shp = gpd.read_file(os.path.join(shp_path, 'tl_2016_17_bg', 'tl_2016_17_bg.shp'))
race_df = pd.read_csv(os.path.join(census_path, 'data', 'cbg_b02.csv'))
keep_race_cols = [x for x in race_df.columns.values if '1e' in x]
eth_df = pd.read_csv(os.path.join(census_path, 'data', 'cbg_b03.csv'))
keep_eth_cols = [x for x in eth_df.columns.values if '2e' in x]
ems_race_df = pd.DataFrame()
ems_eth_df = pd.DataFrame()
for ems, ems_poly in zip(ems_shp['REGION'], ems_shp['geometry']) :
cbg_shp['in_ems'] = cbg_shp['geometry'].apply(lambda x: x.intersects(ems_poly))
tract_ids = cbg_shp[cbg_shp['in_ems']]['GEOID'].values
rdf = race_df[race_df['census_block_group'].isin(tract_ids)]
rdf['ems'] = ems
sdf = rdf.groupby('ems').agg(np.sum).reset_index()
sdf = sdf[['ems'] + keep_race_cols]
ems_race_df = pd.concat([ems_race_df, sdf])
rdf = eth_df[eth_df['census_block_group'].isin(tract_ids)]
rdf['ems'] = ems
sdf = rdf.groupby('ems').agg(np.sum).reset_index()
sdf = sdf[['ems'] + keep_eth_cols]
ems_eth_df = pd.concat([ems_eth_df, sdf])
colname_df = pd.read_csv(os.path.join(census_path, 'metadata', 'cbg_field_descriptions.csv'))
colname_df = colname_df.set_index('table_id')
ems_race_df = ems_race_df.rename(columns={
col : colname_df.at[col, 'field_full_name'] for col in keep_race_cols
})
ems_eth_df = ems_eth_df.rename(columns={
col : colname_df.at[col, 'field_full_name'] for col in keep_eth_cols
})
race_output_fname = os.path.join(datapath, 'EMS Population', 'EMS_population_by_race_cbg.csv')
eth_output_fname = os.path.join(datapath, 'EMS Population', 'EMS_population_by_eth_cbg.csv')
ems_race_df.to_csv(race_output_fname, index=False)
ems_eth_df.to_csv(eth_output_fname, index=False)
if __name__ == '__main__' :
# ems_race_and_ethnicity_structure()
# ems_pop_structure()
regions_shp = gpd.read_file(os.path.join(shp_path, 'covid_regions', 'covid_regions.shp'))
census_tract_shp = gpd.read_file(os.path.join(shp_path, 'tl_2019_17_tract', 'tl_2019_17_tract.shp'))
pop_by_ems(regions_shp, census_tract_shp)
```
#### File: covid-chicago/.ipynb_checkpoints/simplemodel_runMultiple-checkpoint.py
```python
import os
import subprocess
## directories
user_path = os.path.expanduser('~')
exe_dir = os.path.join(user_path, 'Box/NU-malaria-team/projects/binaries/compartments/')
if "mrung" in user_path : git_dir = os.path.join(user_path, 'gitrepos/covid-chicago/')
# Selected range values from SEIR Parameter Estimates.xlsx
# Need to update to run for sample distributions, rather than discrete values
initial_infect = [1,5,10]
Ki = [0.0009, 0.05, 0.32]
incubation_pd = [6.63, 4.2, 12.4]
recovery_rate = [6,13, 16 ]
def runExp_singleParamChange(param, paramname ) :
# param = Ki
# paramname = "Ki"
for i in enumerate(param) :
print(i)
fin = open("simplemodel_covid.emodl", "rt")
data = fin.read()
if(paramname == "initial_infect") : data = data.replace('(species I 10)', '(species I ' + str(i[1]) +')')
if (paramname == "Ki") : data = data.replace('(param Ki 0.319)', '(param Ki ' + str(i[1]) +')')
if (paramname == "incubation_pd") : data = data.replace('(param incubation_pd 6.63)', '(param incubation_pd ' + str(i[1]) +')')
if (paramname == "recovery_rate") :data = data.replace('(param recovery_rate 16)', '(param recovery_rate ' + str(i[1]) +')')
fin.close()
fin = open("simplemodel_covid_i.emodl", "wt")
fin.write(data)
fin.close()
# adjust simplemodel.cfg file as well
fin = open("simplemodel.cfg", "rt")
data_cfg = fin.read()
data_cfg = data_cfg.replace('trajectories', 'trajectories_' + paramname + '_' + str(i[1]) )
fin.close()
fin = open("simplemodel_i.cfg", "wt")
fin.write(data_cfg)
fin.close()
file = open('runModel_i.bat', 'w')
file.write('\n"' + os.path.join(exe_dir, "compartments.exe") + '"' + ' -c ' + '"' + os.path.join(git_dir, "simplemodel_i.cfg") +
'"' + ' -m ' + '"' + os.path.join( git_dir, "simplemodel_covid_i.emodl", ) + '"')
file.close()
subprocess.call([r'runModel_i.bat'])
runExp_singleParamChange(initial_infect,"initial_infect" )
runExp_singleParamChange(Ki ,"Ki " )
runExp_singleParamChange(incubation_pd,"incubation_pd" )
runExp_singleParamChange(recovery_rate,"recovery_rate" )
```
#### File: covid-chicago/plotters/data_comparison_spatial_2.py
```python
import argparse
import os
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib.dates as mdates
import seaborn as sns
from processing_helpers import *
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-e",
"--exp_names",
type=str,
nargs='+',
help="Experiment names to compare, example python data_comparison_spatial_2.py -e exp_name1 exp_name2"
)
parser.add_argument(
"-l",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
return parser.parse_args()
def plot_sim_and_ref(exp_names, ems_nr, first_day, last_day, ymax=10000, logscale=False):
if ems_nr == 0:
region_suffix = "_All"
region_label = 'Illinois'
else:
region_suffix = "_EMS-" + str(ems_nr)
region_label = region_suffix.replace('_EMS-', 'COVID-19 Region ')
outcome_channels, channels, data_channel_names, titles = get_datacomparison_channels()
ref_df = load_ref_df(ems_nr)
fig = plt.figure(figsize=(16, 8))
fig.subplots_adjust(right=0.97, wspace=0.5, left=0.1, hspace=0.9, top=0.95, bottom=0.07)
palette = sns.color_palette('tab10', len(exp_names))
axes = [fig.add_subplot(2, 3, x + 1) for x in range(len(channels))]
for c, channel in enumerate(channels):
ax = axes[c]
for d, exp_name in enumerate(exp_names):
column_list = ['time', 'startdate', 'scen_num', 'sample_num', 'run_num']
for chn in outcome_channels:
column_list.append(chn + "_EMS-" + str(ems_nr))
df = load_sim_data(exp_name, region_suffix, column_list=column_list)
df = df[df['date'].between(first_day, last_day)]
df['critical_with_suspected'] = df['critical']
exp_name_label = str(exp_name.split('_')[-1])
mdf = df.groupby('date')[channel].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index()
ax.plot(mdf['date'], mdf['CI_50'], color=palette[d], label=exp_name_label)
ax.fill_between(mdf['date'], mdf['CI_2pt5'], mdf['CI_97pt5'],
color=palette[d], linewidth=0, alpha=0.1)
ax.fill_between(mdf['date'], mdf['CI_25'], mdf['CI_75'],
color=palette[d], linewidth=0, alpha=0.3)
ax.grid(b=True, which='major', color='#999999', linestyle='-', alpha=0.3)
ax.set_title(titles[c], y=0.8, fontsize=12)
axes[-1].legend()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%y'))
ax.set_xlim(first_day, last_day)
if logscale:
ax.set_ylim(0.1, ymax)
ax.set_yscale('log')
ax.plot(ref_df['date'], ref_df[data_channel_names[c]], 'o', color='#303030', linewidth=0, ms=1)
ax.plot(ref_df['date'], ref_df[data_channel_names[c]].rolling(window=7, center=True).mean(), c='k', alpha=1.0)
plt.suptitle(region_label, y=1, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.88)
plot_name = f'compare_to_data_{ems_nr}'
if logscale == False:
plot_name = plot_name + "_nolog"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
if not os.path.exists(os.path.join(plot_path, 'pdf')):
os.makedirs(os.path.join(plot_path, 'pdf'))
plt.savefig(os.path.join(plot_path, plot_name + '.png'))
plt.savefig(os.path.join(plot_path, 'pdf', plot_name + '.pdf'))
if __name__ == '__main__':
args = parse_args()
Location = args.Location
exp_names = ['20210120_IL_ae_base_v1_baseline','20210122_IL_quest_ki13']
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
first_plot_day = pd.Timestamp('2020-02-13')
last_plot_day = pd.Timestamp.today() + pd.Timedelta(15,'days')
plot_path = os.path.join(wdir, 'simulation_output', exp_names[len(exp_names) - 1], '_plots')
"""Get group names"""
grp_list, grp_suffix, grp_numbers = get_group_names(exp_path=os.path.join(wdir, 'simulation_output',exp_names[0]))
for grp_nr in grp_numbers:
print("Start processing region " + str(grp_nr))
#plot_sim_and_ref(exp_names, ems_nr=grp_nr, first_day=first_plot_day,
# last_day=last_plot_day, logscale=True)
plot_sim_and_ref(exp_names, ems_nr=grp_nr, first_day=first_plot_day,
last_day=last_plot_day, logscale=False)
```
#### File: covid-chicago/plotters/data_comparison_spatial_3.py
```python
import argparse
import os
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib.dates as mdates
import seaborn as sns
from processing_helpers import *
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-stem",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
return parser.parse_args()
def plot_sim_and_ref(df, ems_nr, ref_df, channels, data_channel_names,region_label,
titles, param, first_day, last_day, ymax=10000, logscale=False):
fig = plt.figure(figsize=(15, 8))
palette = sns.color_palette('husl', 12)
for c, channel in enumerate(channels):
ax = fig.add_subplot(2, 3, c + 1)
mdf = df.groupby(['date',param])[channel].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index()
for i, rtc in enumerate(mdf[param].unique()):
mdf_sub = mdf[mdf[param] == rtc]
ax.plot(mdf_sub['date'], mdf_sub['CI_50'], color=palette[i], label=rtc)
ax.fill_between(mdf_sub['date'], mdf_sub['CI_2pt5'], mdf_sub['CI_97pt5'], color=palette[i], linewidth=0, alpha=0.2)
ax.set_title(titles[c], y=0.8, fontsize=12)
#ax.legend()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%y'))
ax.set_xlim(first_day, last_day)
ax.grid(b=True, which='major', color='#999999', linestyle='-', alpha=0.3)
if logscale:
ax.set_ylim(0.1, ymax)
ax.set_yscale('log')
ax.plot(ref_df['date'], ref_df[data_channel_names[c]], 'o', color='#303030', linewidth=0, ms=1)
ax.plot(ref_df['date'], ref_df[data_channel_names[c]].rolling(window=7, center=True).mean(), c='k', alpha=1.0)
if c == len(channels)-1:
ax.legend()
fig.suptitle(region_label, y=1, fontsize=14)
fig.tight_layout()
fig.subplots_adjust(top=0.88)
plot_name = 'compare_to_data_covidregion_' + str(ems_nr)
if logscale == False:
plot_name = plot_name + "_nolog"
plt.savefig(os.path.join(plot_path, plot_name + '.png'))
plt.savefig(os.path.join(plot_path, 'pdf', plot_name + '.pdf'), format='PDF')
def compare_ems(exp_name, param, ems_nr,first_day,last_day):
if ems_nr == 0:
region_suffix = "_All"
region_label = 'Illinois'
else:
region_suffix = "_EMS-" + str(ems_nr)
region_suffix2 = "_EMS_" + str(ems_nr)
region_label = region_suffix.replace('_EMS-', 'COVID-19 Region ')
column_list = ['time', 'startdate', 'scen_num', 'sample_num', 'run_num']
outcome_channels, channels, data_channel_names, titles = get_datacomparison_channels()
ref_df = load_ref_df(ems_nr)
for channel in outcome_channels:
column_list.append(channel + region_suffix)
if "ki" in param.lower() :
param = param + region_suffix2
df = load_sim_data(exp_name, region_suffix=region_suffix, column_list=column_list)
df = df[df['date'].between(first_day, last_day)]
df['critical_with_suspected'] = df['critical']
sampled_df = pd.read_csv(os.path.join(wdir, 'simulation_output', exp_name, "sampled_parameters.csv"), usecols=['scen_num', param])
df = pd.merge(how='left', left=df, left_on='scen_num', right=sampled_df, right_on='scen_num')
plot_sim_and_ref(df, ems_nr, ref_df, channels=channels, data_channel_names=data_channel_names, titles=titles,
region_label=region_label, param=param,first_day=first_day,last_day=last_day)
if __name__ == '__main__':
args = parse_args()
Location = args.Location
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
first_plot_day = pd.Timestamp('2020-02-13')
last_plot_day = pd.Timestamp.today()+ pd.Timedelta(15,'days')
stem = args.stem
exp_names = [x for x in os.listdir(os.path.join(wdir, 'simulation_output')) if stem in x]
for exp_name in exp_names:
sim_output_path = os.path.join(wdir, 'simulation_output',exp_name)
plot_path = os.path.join(sim_output_path, '_plots')
"""Get group names"""
grp_list, grp_suffix, grp_numbers = get_group_names(exp_path=sim_output_path)
for grp_nr in grp_numbers:
print("Start processing region " + str(grp_nr))
compare_ems(exp_name, ems_nr=int(grp_nr), param="Ki",
first_day=first_plot_day, last_day=last_plot_day)
```
#### File: covid-chicago/plotters/overflow_numbers.py
```python
import os
import sys
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import matplotlib.dates as mdates
# sns.set(color_codes=True)
mpl.rcParams['pdf.fonttype'] = 42
sns.set_style('whitegrid', {'axes.linewidth': 0.5})
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths()
def load_sim_data(exp_name, input_wdir=None, input_sim_output_path=None):
input_wdir = input_wdir or wdir
sim_output_path_base = os.path.join(input_wdir, 'simulation_output', exp_name)
sim_output_path = input_sim_output_path or sim_output_path_base
df = pd.read_csv(os.path.join(sim_output_path, f'nu_{str(exp_name[:8])}.csv'))
return df
def get_latest_filedate(file_path=os.path.join(datapath, 'covid_IDPH', 'Corona virus reports',
'hospital_capacity_thresholds'), extraThresholds=False):
files = os.listdir(file_path)
files = sorted(files, key=len)
if extraThresholds == False:
files = [name for name in files if not 'extra_thresholds' in name]
if extraThresholds == True:
files = [name for name in files if 'extra_thresholds' in name]
filedates = [item.replace('capacity_weekday_average_', '') for item in files]
filedates = [item.replace('.csv', '') for item in filedates]
latest_filedate = max([int(x) for x in filedates])
fname = f'capacity_weekday_average_{latest_filedate}.csv'
if extraThresholds == True:
fname = f'capacity_weekday_average_{latest_filedate}__extra_thresholds.csv'
return fname
def get_plot(selected_resource_type='hb_availforcovid', errorbars=True):
#from plotnine import ggplot, geom_point, aes, stat_smooth, facet_wrap
fig = plt.figure(figsize=(10, 10))
fig.tight_layout()
selected_resource_type_label ="Number of available ICU beds for COVID-19 patients"
if selected_resource_type == 'hb_availforcovid':
selected_resource_type_label ="Number of available hospital beds for COVID-19 patients"
fig.suptitle(selected_resource_type_label, y=1, fontsize=14)
fig.subplots_adjust(top=0.88)
fig.subplots_adjust(right=0.97, wspace=0.5, left=0.1, hspace=0.9, top=0.95, bottom=0.07)
#palette = sns.color_palette('Set1', 11)
axes = [fig.add_subplot(4, 2, x + 1) for x in range(len(civis_template['date_window_upper_bound'].unique()))]
for c, upper_limit in enumerate(civis_template['date_window_upper_bound'].unique()):
mdf = civis_template
mdf = mdf[(mdf['date_window_upper_bound'] == upper_limit)].reset_index()
mdf = mdf[(mdf['resource_type'] == selected_resource_type)].reset_index()
mdf['region'] = mdf['geography_modeled'].replace(regex=r'covidregion_', value=' ')
mdf['myerr'] = mdf['number_that_exceed_upper'] - mdf['number_that_exceed_lower']
mdf_1 = mdf[(mdf['overflow_threshold_percent'] == 1.00)]
mdf_1b = mdf_1[(mdf_1['number_that_exceed_median'] < 0)]
mdf_2 = mdf[(mdf['overflow_threshold_percent'] == 0.75)]
mdf_2b = mdf_2[(mdf_2['number_that_exceed_median'] < 0)]
ax = axes[c]
upper_limit = pd.to_datetime(upper_limit)
upper_limit = date(upper_limit.year ,upper_limit.month,upper_limit.day)
ax.set_title(upper_limit, y=0.85)
ax.set_xlabel('Covid regions')
ax.set_ylabel('Number of beds available')
ax.axhline(y=0, xmin=0, xmax=12, linewidth=0.8, color='black')
#ax.bar(mdf_1['region'], mdf_1['number_that_exceed_median'], 1, label='1')
if errorbars:
ax.bar(mdf_2['region'], mdf_2['number_that_exceed_median'], 1, yerr=mdf_2['myerr'], label='0.75',
linewidth=1)
ax.bar(mdf_2b['region'], mdf_2b['number_that_exceed_median'], 1, color='red', yerr=mdf_2b['myerr'],
label='0.75', linewidth=1)
plotname = f'covidregion_overflow_numbers_{selected_resource_type}'
else:
ax.bar(mdf_2['region'], mdf_2['number_that_exceed_median'], 1, label='0.75', linewidth=1)
ax.bar(mdf_2b['region'], mdf_2b['number_that_exceed_median'], 1,color='red', label='0.75', linewidth=1)
plotname = f'covidregion_overflow_numbers_{selected_resource_type}_noerrorbars'
fig.tight_layout()
exp_dir = os.path.join(wdir, 'simulation_output', exp_name)
fig.savefig(os.path.join(exp_dir, '_plots',f'{plotname}.png'))
fig.savefig(os.path.join(exp_dir, '_plots', 'pdf', f'{plotname}.pdf'))
def get_numbers(exp_name, load_template=False):
trajectories = load_sim_data(exp_name)
if load_template:
fname = get_latest_filedate()
civis_template = pd.read_csv(
os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'hospital_capacity_thresholds', fname))
civis_template = civis_template.drop_duplicates()
else:
civis_template = pd.read_csv(os.path.join(wdir, 'simulation_output', exp_name,
f'nu_hospitaloverflow_{str(exp_name[:8])}.csv'))
civis_template['date_window_upper_bound'] = pd.to_datetime(civis_template['date_window_upper_bound'])
civis_template['number_that_exceed_median'] = ''
civis_template['number_that_exceed_median'] = ''
civis_template['number_that_exceed_median'] = ''
trajectories['total_hosp_census_lower'] = trajectories['hosp_bed_lower'] + trajectories['icu_lower']
trajectories['total_hosp_census_median'] = trajectories['hosp_bed_median'] + trajectories['icu_median']
trajectories['total_hosp_census_upper'] = trajectories['hosp_bed_upper'] + trajectories['icu_upper']
trajectories['date'] = pd.to_datetime(trajectories['date'])
for index, row in civis_template.iterrows():
upper_limit = pd.to_datetime(row['date_window_upper_bound'])
if row['resource_type'] == 'hb_availforcovid':
metric_root = 'total_hosp_census'
elif row['resource_type'] == 'icu_availforcovid':
metric_root = 'icu'
thresh = row['avg_resource_available']
region = str(row['geography_modeled'])
new = trajectories[(trajectories['date'] == upper_limit)].reset_index()
new = new[(new['geography_modeled'] == region)].reset_index()
civis_template.loc[index, 'number_that_exceed_median'] = thresh - int(new[f'{metric_root}_median'])
civis_template.loc[index, 'number_that_exceed_lower'] = thresh - int(new[f'{metric_root}_lower'])
civis_template.loc[index, 'number_that_exceed_upper'] = thresh- int(new[f'{metric_root}_upper'])
#civis_template['scenario_name'] = trajectories['scenario_name'].unique()
civis_template.to_csv(os.path.join(wdir, 'simulation_output', exp_name,
f'nu_hospitaloverflow_{str(exp_name[:8])}.csv'), index=False)
return civis_template
if __name__ == '__main__':
stem = sys.argv[1]
exp_names = [x for x in os.listdir(os.path.join(wdir, 'simulation_output')) if stem in x]
for exp_name in exp_names:
civis_template = get_numbers(exp_name)
get_plot(selected_resource_type='icu_availforcovid')
get_plot(selected_resource_type='hb_availforcovid')
get_plot(selected_resource_type='icu_availforcovid', errorbars=False)
get_plot(selected_resource_type='hb_availforcovid', errorbars=False)
```
#### File: covid-chicago/tests/test_runScenarios.py
```python
from datetime import datetime
from functools import partial
import yaml
import yamlordereddictloader
import pandas as pd
import pytest
from runScenarios import add_config_parameter_column
import runScenarios as rs
yaml_load = partial(yaml.load, Loader=yamlordereddictloader.Loader)
@pytest.fixture
def original_df():
return pd.DataFrame({'sample_number': [1, 2, 3, 4, 5]})
def test_add_config_parameter_column__int(original_df):
new_df = add_config_parameter_column(original_df, "new_column", 10)
correct_df = pd.DataFrame({
'sample_number': [1, 2, 3, 4, 5],
'new_column': [10]*5})
pd.testing.assert_frame_equal(new_df, correct_df)
def test_add_config_parameter_column__matrix(original_df):
f = {'matrix': [[9, 8], [7, 6]]}
new_df = add_config_parameter_column(original_df, "new_column", f)
assert new_df.shape == (5, 5)
correct_df = pd.DataFrame({
'sample_number': [1, 2, 3, 4, 5],
'new_column1_1': [9]*5,
'new_column1_2': [8]*5,
'new_column2_1': [7]*5,
'new_column2_2': [6]*5,
})
pd.testing.assert_frame_equal(new_df, correct_df)
def test_add_config_parameter_column__random_uniform(original_df):
f = {'np.random': 'uniform', 'function_kwargs': {'low': 5, 'high': 6}}
new_df = add_config_parameter_column(original_df, "new_column", f)
assert new_df.shape == (5, 2)
assert "new_column" in new_df.columns
assert all((new_df["new_column"] >= 5) & (new_df["new_column"] <= 6))
def test_add_config_parameter_column__datetotimestep():
df = pd.DataFrame({'sample_number': [1, 2, 3, 4, 5],
'startdate': [datetime(2020, 2, 20)]*5})
f = {'custom_function': 'DateToTimestep',
'function_kwargs': {'dates': datetime(2020, 3, 1), 'startdate_col': 'startdate'}}
new_df = add_config_parameter_column(df, "new_column", f)
correct_df = pd.DataFrame({
'sample_number': [1, 2, 3, 4, 5],
'startdate': [datetime(2020, 2, 20)]*5,
'new_column': [10]*5})
pd.testing.assert_frame_equal(new_df, correct_df)
def test_add_config_parameter_column__subtract():
df = pd.DataFrame({'sample_number': [1, 2, 3, 4, 5],
'col1': [2, 4, 6, 8, 10],
'col2': [1, 3, 5, 7, 9]})
f = {'custom_function': 'subtract',
'function_kwargs': {'x1': 'col1', 'x2': 'col2'}}
new_df = add_config_parameter_column(df, "new_column", f)
correct_df = pd.DataFrame({
'sample_number': [1, 2, 3, 4, 5],
'col1': [2, 4, 6, 8, 10],
'col2': [1, 3, 5, 7, 9],
'new_column': [1]*5})
pd.testing.assert_frame_equal(new_df, correct_df)
def test_add_config_parameter_column__error():
f = {'weird_function': {}}
with pytest.raises(ValueError, match="Unknown type of parameter"):
add_config_parameter_column(pd.DataFrame, "new_column", f)
@pytest.mark.parametrize("region, expected", [("EMS_11", 1), ("EMS_10", 2)])
def test_add_sampled_parameters_regions(region, expected):
# Test that we correctly add sampled parameters by region, including
# a default for regions not otherwise specified.
config = """
sampled_parameters:
myparam:
EMS_11:
np.random: choice
function_kwargs: {'a': [1]}
np.random: choice
function_kwargs: {'a': [2]}
"""
df_in = pd.DataFrame({'sample_num': [1, 2, 3]})
df_exp = df_in.assign(myparam=len(df_in) * [expected])
df_out = rs.add_parameters(df_in, "sampled_parameters", yaml_load(config), region, None)
pd.testing.assert_frame_equal(df_out, df_exp)
def test_add_sampled_parameters_expand_age():
config = """
sampled_parameters:
myparam:
expand_by_age: True
np.random: choice
function_kwargs:
- {'a': [1]}
- {'a': [2]}
"""
df_in = pd.DataFrame({'sample_num': [1, 2]})
df_exp = df_in.assign(myparam_42=[1, 1], myparam_113=[2, 2])
df_out = rs.add_parameters(df_in, "sampled_parameters", yaml_load(config), None, ['42', '113'])
pd.testing.assert_frame_equal(df_out, df_exp)
def test_add_sampled_parameters_expand_age_same_value():
# "Expand" age parameters even if everything has the same value
config = """
sampled_parameters:
myparam:
expand_by_age: True
np.random: choice
function_kwargs: {'a': [1]}
"""
df_in = pd.DataFrame({'sample_num': [1, 2]})
df_exp = df_in.assign(myparam_42=[1, 1], myparam_113=[1, 1])
df_out = rs.add_parameters(df_in, "sampled_parameters", yaml_load(config), None, ['42', '113'])
pd.testing.assert_frame_equal(df_out, df_exp)
def test_add_sampled_parameters_expand_age_with_defaults():
# Verify that you can provide a "default" for all ages, and set a specific
# parameter later.
config = """
sampled_parameters:
myparam:
expand_by_age: True
np.random: choice
function_kwargs: {'a': [1]}
myparam_0:
np.random: choice
function_kwargs: {'a': [2]}
"""
df_in = pd.DataFrame({'sample_num': [1, 2]})
df_exp = df_in.assign(myparam_0=[2, 2], myparam_42=[1, 1], myparam_113=[1, 1])
df_out = rs.add_parameters(
df_in, "sampled_parameters", yaml_load(config), None, ['0', '42', '113'])
pd.testing.assert_frame_equal(df_out, df_exp)
def test_add_sampled_parameters_expand_age_error():
# We should get an error if the number of distributions doesn't match
# the number of age bins.
config = """
sampled_parameters:
myparam:
expand_by_age: True
np.random: choice
function_kwargs:
- {'a': [1]}
- {'a': [2]}
"""
df_in = pd.DataFrame({'sample_num': [1, 2]})
with pytest.raises(ValueError, match="function_kwargs for myparam have 2 entries"):
rs.add_parameters(df_in, "sampled_parameters", yaml_load(config), None, ['0', '42', '113'])
```
|
{
"source": "jgerhard/showqMD",
"score": 3
}
|
#### File: jgerhard/showqMD/physics.py
```python
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL import GLX
import pyopencl as cl
import sys
import numpy
class Particles(object):
def __init__(self, num, dt, *args, **kwargs):
self.clinit()
self.loadProgram("cornell.cl");
self.totaltime = 0.0
self.num = num
self.num_cl = numpy.uint32(num)
self.dt = numpy.float32(dt)
def loadData(self, pos_vbo, col_vbo, vel):
import pyopencl as cl
mf = cl.mem_flags
self.pos_vbo = pos_vbo
self.col_vbo = col_vbo
self.pos = pos_vbo.data
self.col = col_vbo.data
self.vel = vel
#Setup vertex buffer objects and share them with OpenCL as GLBuffers
self.pos_vbo.bind()
#For some there is no single buffer but an array of buffers
#https://github.com/enjalot/adventures_in_opencl/commit/61bfd373478767249fe8a3aa77e7e36b22d453c4
try:
self.pos_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.pos_vbo.buffer))
self.col_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.col_vbo.buffer))
except AttributeError:
self.pos_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.pos_vbo.buffers[0]))
self.col_cl = cl.GLBuffer(self.ctx, mf.READ_WRITE, int(self.col_vbo.buffers[0]))
self.col_vbo.bind()
#pure OpenCL arrays
self.vel_cl = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=vel)
self.pos_gen_cl = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.pos)
self.vel_gen_cl = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.vel)
self.queue.finish()
# set up the list of GL objects to share with opencl
self.gl_objects = [self.pos_cl, self.col_cl]
def execute(self, sub_intervals):
cl.enqueue_acquire_gl_objects(self.queue, self.gl_objects)
global_size = (self.num,)
local_size_threads = 33 # group size
for i in range(1,64): # choose group size
if (self.num % i == 0) :
local_size_threads = i
local_size = (local_size_threads,)
# pos_shared = cl.LocalMemory(4 * local_size_threads)
# col_shared = cl.LocalMemory(4 * local_size_threads)
kernelargs = (self.pos_cl,
self.vel_cl,
self.pos_gen_cl,
self.vel_gen_cl,
self.col_cl,
self.dt,
self.num_cl)
kernelargsT = (self.pos_gen_cl,
self.vel_gen_cl,
self.pos_cl,
self.vel_cl,
self.col_cl,
self.dt,
self.num_cl)
for i in xrange(0, sub_intervals):
self.program.nbody(self.queue, global_size, local_size, *(kernelargs))
self.program.nbody(self.queue, global_size, local_size, *(kernelargsT)) # change role of kernelargs to do double buffered calc
cl.enqueue_release_gl_objects(self.queue, self.gl_objects)
self.queue.finish()
self.totaltime += 2*self.dt
sys.stdout.write("\rT = {0} fm/c>".format(self.totaltime))
sys.stdout.flush()
def clinit(self):
plats = cl.get_platforms()
from pyopencl.tools import get_gl_sharing_context_properties
self.ctx = cl.Context(properties=get_gl_sharing_context_properties(),
devices=[])
self.queue = cl.CommandQueue(self.ctx)
def loadProgram(self, filename):
#read in the OpenCL source file as a string
f = open(filename, 'r')
fstr = "".join(f.readlines())
#print fstr
#create the program
self.program = cl.Program(self.ctx, fstr).build()
def render(self):
glEnable(GL_POINT_SMOOTH)
glPointSize(2)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#setup the VBOs
self.col_vbo.bind()
glColorPointer(4, GL_FLOAT, 0, self.col_vbo)
self.pos_vbo.bind()
glVertexPointer(4, GL_FLOAT, 0, self.pos_vbo)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
#draw the VBOs
glDrawArrays(GL_POINTS, 0, self.num)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisable(GL_BLEND)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.