id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1830256
|
<gh_stars>1-10
#matrix_builder.py
#builds ecludian distance matrixes
from __future__ import division
from custom import build_distance_matrix
import numpy
import pickle
import time
import sys
if __name__ == "__main__":
try:
size = int(sys.argv[1])
except IndexError:
print("no size given, defaulting to 10")
size = 10
t_start = time.time()
distance_matrix = build_distance_matrix(size)
with open("data/matrix"+str(size)+".p", "wb") as matrix_file:
pickle.dump(distance_matrix, matrix_file)
print("Matrix built\ntime taken "+str(round(time.time()-t_start,1))+"s")
|
StarcoderdataPython
|
11346957
|
<reponame>swkim01/gps
#!/usr/bin/python3
from flask import Flask, render_template
import json
app = Flask(__name__, template_folder=".", static_url_path='')
try:
import gps
except ImportError:
has_gps_module = False
if has_gps_module:
import gpsreceiver
gpsr = gpsreceiver.GpsReceiver()
gpsr.daemon = True
gpsr.start()
else:
gLocation={"lat":35.180, "lon":129.076, "alt":0.0, "speed":0.0}
#respone json for gps location
@app.route('/getLocation')
def get_location():
if has_gps_module:
return json.dumps(gpsr.getLocation())
else:
global gLocation
return json.dumps(gLocation)
@app.route('/')
def do_route():
return render_template("index.html")
@app.route('/<filename>')
def do_file(filename):
return render_template(filename)
@app.route('/javascript/<filename>')
def do_js(filename):
return render_template("./javascript/"+filename)
if __name__ == '__main__':
app.run(host='<host IP>', port=8008)
#app.run(host='localhost', port=8008)
|
StarcoderdataPython
|
12837809
|
import urllib, json
import urllib.request,os
import socket
import random
import re
socket.setdefaulttimeout(50)
#https://www.hide-my-ip.com/fr/proxylist.shtml
# .*(\[\{.*\]).*
KEY="A00239D3-45F6-4A0A-810C-54A347F144C2"
KEY="<KEY>"
KEY="35d28185-0ca1-47f0-8caf-edc457802c9d"
KEY="B36714DE794D0080A183B5A12BEAF8B4"
KEYS=["B36714DE794D0080A183B5A12BEAF8B4","35d28185-0ca1-47f0-8caf-edc457802c9d","A00239D3-45F6-4A0A-810C-54A347F144C2","<KEY>","A00239D3-45F6-4A0A-810C-54A347F144C2"]#,"9b0610b4-cc4e-455a-b64d-e2fd01b5b086"]
SERIES="1"
IDSEASON="118"
urlProxys=["http://192.168.127.12:80","http://192.168.3.11:80"]
with urllib.request.urlopen('https://www.hide-my-ip.com/fr/proxylist.shtml') as response:
print("load list")
html = str(response.read())
#p = re.compile('.*(\[\{"i".*\}\])\;.*')
#line=p.match(html).group(0)
print ("parse1")
p = re.compile('.*\[\{"i"')
print ("parse2")
liner=p.sub( '[{"i"', html, count=1)
print ("parse3")
p = re.compile('\}\];.*')
print ("parse4")
line2=p.sub( '}]', liner, count=1)
print ("parse5")
line3=line2.replace("\\n","")
line4=line3.replace("\\","")
line=line4.replace("}, n {","},{").replace(" n ","")
#line = re.sub(p, "%s", html)
f = open("jjj.json", 'w')
f.write(line)
f.close()
json_object = json.loads(line)
print ("mount node")
for jsone in json_object:
urlProxys.append("http://"+jsone["i"]+":"+jsone["p"])
proxy = urllib.request.FancyURLopener({"http":"http://172.16.58.3:3128"})
#proxy = urllib.request.URLopener()
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError :
return False
return True
def is_file_json(myjson):
if ( os.path.exists(myjson)) :
f = open(myjson, 'r')
return is_json(f.read())
return False
def loadAndWrite(myjson,filie):
proxyMi=True
counter=0
while proxyMi:
proxyurl = random.choice (urlProxys)
try:
proxy = urllib.request.FancyURLopener({"http":random.choice (urlProxys)})
with proxy.open(myjson, data=None ) as url:
response = url.read().decode('utf-8')
data = json.loads(response)
f = open(filie, 'w')
f.write(response)
f.close()
print ("nice:"+filie)
proxyMi=False
except ValueError :
print ('error '+str(counter)+':'+myjson+":"+filie+":"+proxyurl)
if counter>15 :
proxyMi=False
counter=counter+1
except OSError :
print ('retry:'+myjson+":"+filie+":"+proxyurl)
if( not os.path.exists("currenseason")):
os.mkdir("currenseason")
if( not os.path.exists("currenseason/"+SERIES)):
os.mkdir("currenseason/"+SERIES)
if( not is_file_json("currenseason/"+SERIES+"/currentseason.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/currentseason.json"
loadAndWrite(url,"currenseason/"+SERIES+"/currentseason.json")
f = open('currenseason/'+SERIES+'/currentseason.json', 'r')
data=json.loads(f.read())
f.close()
IDSEASON=str(data["current_season"]["id"])
IDSEASON="116"
if( not os.path.exists("conferences")):
os.mkdir("conferences")
if( not os.path.exists("conferences/conferences.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/conferences.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"conferences/conferences.json")
if( not os.path.exists("ladder")):
os.mkdir("ladder")
if( not os.path.exists("ladder/"+SERIES)):
os.mkdir("ladder/"+SERIES)
if( not os.path.exists("ladder/"+SERIES+"/"+IDSEASON)):
os.mkdir("ladder/"+SERIES+"/"+IDSEASON)
if( not is_file_json("ladder/"+SERIES+"/"+IDSEASON+"/lader.json") ):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/ladder.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"ladder/"+SERIES+"/"+IDSEASON+"/lader.json")
f = open("ladder/"+SERIES+"/"+IDSEASON+"/lader.json", 'r')
dataTeam=json.loads(f.read())
f.close()
countyx=0
for teamm in dataTeam['teams']:
countyx=countyx+1
print("TT "+str(countyx)+"/"+str(len(dataTeam['teams'])))
TEAM=str(teamm['id'])
if( not os.path.exists("fixturesandresultswithbyes")):
os.mkdir("fixturesandresultswithbyes")
if( not os.path.exists("fixturesandresultswithbyes/"+SERIES)):
os.mkdir("fixturesandresultswithbyes/"+SERIES)
if( not os.path.exists("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON)):
os.mkdir("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON)
if( not os.path.exists("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM)):
os.mkdir("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM)
if( not is_file_json("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/teams/"+TEAM+"/fixturesandresultswithbyes.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json")
if( not os.path.exists("summary")):
os.mkdir("summary")
if( not os.path.exists("summary/"+SERIES)):
os.mkdir("summary/"+SERIES)
if( not os.path.exists("summary/"+SERIES+"/"+IDSEASON)):
os.mkdir("summary/"+SERIES+"/"+IDSEASON)
if( not os.path.exists("summary/"+SERIES+"/"+IDSEASON+"/"+TEAM)):
os.mkdir("summary/"+SERIES+"/"+IDSEASON+"/"+TEAM)
if( not is_file_json("summary/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/summary.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/teams/"+TEAM+"/summary.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"summary/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/summary.json")
try:
f = open("fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json", 'r')
dataMatchId=json.loads(f.read())
f.close()
except:
print ("err:fixturesandresultswithbyes/"+SERIES+"/"+IDSEASON+"/"+TEAM+"/fixturesandresultswithbyes.json")
dataMatchId=[]
county = 0
for macth in dataMatchId:
county=county+1
print ("NB "+str(county)+"/"+str(len(dataMatchId)))
MATCHID=macth['match_id']
if MATCHID is not None:
if( not os.path.exists("scoreboard")):
os.mkdir("scoreboard")
if( not os.path.exists("scoreboard/"+MATCHID)):
os.mkdir("scoreboard/"+MATCHID)
if( not is_file_json("scoreboard/"+MATCHID+"/scoreboard.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/scoreboard.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"scoreboard/"+MATCHID+"/scoreboard.json")
if( not os.path.exists("breakdown")):
os.mkdir("breakdown")
if( not os.path.exists("breakdown/"+MATCHID)):
os.mkdir("breakdown/"+MATCHID)
if( not is_file_json("breakdown/"+MATCHID+"/breakdown.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/breakdown.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"breakdown/"+MATCHID+"/breakdown.json")
if( not os.path.exists("teamstats")):
os.mkdir("teamstats")
if( not os.path.exists("teamstats/"+MATCHID)):
os.mkdir("teamstats/"+MATCHID)
if( not is_file_json("teamstats/"+MATCHID+"/teamstats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/teamstats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"teamstats/"+MATCHID+"/teamstats.json")
if( not os.path.exists("commentary")):
os.mkdir("commentary")
if( not os.path.exists("commentary/"+MATCHID)):
os.mkdir("commentary/"+MATCHID)
if( not is_file_json("commentary/"+MATCHID+"/commentary.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/commentary.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"commentary/"+MATCHID+"/commentary.json")
if( not os.path.exists("players")):
os.mkdir("players")
if( not os.path.exists("players/"+MATCHID)):
os.mkdir("players/"+MATCHID)
if( not is_file_json("players/"+MATCHID+"/players.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/players.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"players/"+MATCHID+"/players.json")
if (is_file_json("players/"+MATCHID+"/players.json")):
f = open("players/"+MATCHID+"/players.json", 'r')
dataPlayers=json.loads(f.read())
f.close()
playerss=dataPlayers['team_A']['players']
for player in playerss:
PLAYERID=str(player['id'])
if( not os.path.exists("players")):
os.mkdir("players")
if( not os.path.exists("players/"+MATCHID)):
os.mkdir("players/"+MATCHID)
if( not os.path.exists("players/"+MATCHID+"/"+PLAYERID)):
os.mkdir("players/"+MATCHID+"/"+PLAYERID)
if( not is_file_json("players/"+MATCHID+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"players/"+MATCHID+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("playersAll")):
os.mkdir("playersAll")
if( not os.path.exists("playersAll/"+IDSEASON)):
os.mkdir("playersAll/"+IDSEASON)
if( not os.path.exists("playersAll/"+IDSEASON+"/"+PLAYERID)):
os.mkdir("playersAll/"+IDSEASON+"/"+PLAYERID)
if( not is_file_json("playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("biography")):
os.mkdir("biography")
if( not os.path.exists("biography/"+PLAYERID)):
os.mkdir("biography/"+PLAYERID)
if( not is_file_json("biography/"+PLAYERID+"/biography.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/players/"+PLAYERID+"/biography.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"biography/"+PLAYERID+"/biography.json")
playerss=dataPlayers['team_B']['players']
for player in playerss:
PLAYERID=str(player['id'])
if( not os.path.exists("players")):
os.mkdir("players")
if( not os.path.exists("players/"+MATCHID)):
os.mkdir("players/"+MATCHID)
if( not os.path.exists("players/"+MATCHID+"/"+PLAYERID)):
os.mkdir("players/"+MATCHID+"/"+PLAYERID)
if( not is_file_json("players/"+MATCHID+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/matches/"+MATCHID+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"players/"+MATCHID+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("playersAll")):
os.mkdir("playersAll")
if( not os.path.exists("playersAll/"+IDSEASON)):
os.mkdir("playersAll/"+IDSEASON)
if( not os.path.exists("playersAll/"+IDSEASON+"/"+PLAYERID)):
os.mkdir("playersAll/"+IDSEASON+"/"+PLAYERID)
if( not is_file_json("playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/series/"+SERIES+"/seasons/"+IDSEASON+"/players/"+PLAYERID+"/stats.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"playersAll/"+IDSEASON+"/"+PLAYERID+"/stats.json")
if( not os.path.exists("biography")):
os.mkdir("biography")
if( not os.path.exists("biography/"+PLAYERID)):
os.mkdir("biography/"+PLAYERID)
if( not is_file_json("biography/"+PLAYERID+"/biography.json")):
url = "http://api.stats.foxsports.com.au/3.0/api/sports/rugby/players/"+PLAYERID+"/biography.json?userkey="+random.choice (KEYS)
loadAndWrite(url,"biography/"+PLAYERID+"/biography.json")
|
StarcoderdataPython
|
9766899
|
<filename>pmapper/pmap.py
"""Several specialized implementations of the PMAP super-resolution algorithm."""
from .backend import np, ft_fwd, ft_rev, ndimage
from .bayer import decomposite_bayer, demosaic_malvar
# If you wish to understand how PMAP works from this code, it is recommended
# that you read :class:PMAP first. MFPMAP is just PMAP that cycles through the
# frames, and Bayer implementations are the same, but with inner iterations per
# step for the color planes.
# Refs:
# [1] "Multiframe restoration methods for image synthesis and recovery",
# <NAME>, PhD thesis, University of Arizona, 2000
#
# [2] Super-Resolution In a Synthetic Aperture Imaging System
# <NAME> and <NAME>,
# Proceedings of International Conference on Image Processing,
# 26-29 Oct. 1997, DOI 10.1109/ICIP.1997.648103
# implementation notes:
# A version which used matrix DFTs was tried, but proved (slightly) slower for
# 512x512 -- 5.2 ms/iter vs 4.6 ms/iter. FFT has better asymptotic time
# complexity than MDFT, and image resolution is only increasing. Therefore, the
# FFT version is preferred.
#
# A slightly different implementation would replace the prefilter init argument
# with an actual function to be used to perform up/downsampling. This would be
# a bit more flexible, but require a bit more work on the user and likely need
# functools.partial to get everything down to one interface. What is here
# forces a particular resizing algorithm, but it is as good as can be done to
# the author's knowledge anyway.
def pmap_core(fHat, g, H, Hstar, bufup=None, bufdown=None, prefilter=False, zoomfactor=1):
"""Core routine of PMAP, produce a new object estimate.
Parameters
----------
fHat : `numpy.ndarray`
Nth object estimate, ndarray of shape (m, n)
g : `numpy.ndarray`
source image, ndarray of shape (a, b)
H : `numpy.ndarray`
OTF, complex ndarray of shape (m, n)
Hstar : `numpy.ndarray`
complex conjugate of H, ndarray of shape (m, n)
bufup : `numpy.ndarray`, optional
real-valued buffer for upsampled data, of shape (m, n)
bufdown : `numpy.ndarray`, optional
real-valued buffer for downsampled data, of shape (a, b)
prefilter : `bool`, optional
if True, use spline prefiltering
False is generally better at getting realistic image chain aliasing correct
zoomfactor : `float`, optional
ratio m/a
Returns
-------
fHat : `numpy.ndarray`
N+1th object estimate, ndarray of shape (m, n)
"""
if zoomfactor == 1 and fHat.shape[0] != H.shape[0]:
raise ValueError(f'PMAP: zoom factor was given as 1, but fHat and OTF have unequal shapes {fHat.shape} and {H.shape}') # NOQA
Fhat = ft_fwd(fHat)
denom = ft_rev(Fhat * H).real
# denom may be supre-resolved, i.e. have more pixels than g
# zoomfactor is the ratio of their sampling, the below does
# inline up and down scaling as denoted in Ref [1] Eq. 2.26
# re-assign denom and kernel, non-allocating invocation of zoom
if zoomfactor != 1:
denom = ndimage.zoom(denom, 1/zoomfactor, prefilter=prefilter, output=bufdown)
kernel = (g / denom) - 1
kernel = ndimage.zoom(kernel, zoomfactor, prefilter=prefilter, output=bufup)
else:
# kernel is the expression { g/(f_n conv h) - 1 } from 2.16, J. J. Green's thesis
kernel = (g / denom) - 1
R = ft_fwd(kernel)
grad = ft_rev(R * Hstar).real
fHat = fHat * np.exp(grad)
return fHat
class PMAP:
"""Classical PMAP algorithm. Suitable for panchromatic restoration.
Implements Ref [1], Eq. 2.16.
"""
def __init__(self, img, psf, fHat=None, prefilter=False):
"""Initialize a new PMAP problem.
Parameters
----------
img : `numpy.ndarray`
image from the camera, ndarray of shape (m, n)
psf : `numpy.ndarray`
psf corresponding to img, ndarray of shape (a, b)
fhat : `numpy.ndarray`, optional
initial object estimate, ndarray of shape (a, b)
if None, taken to be the img, rescaled if necessary to match PSF
sampling
prefilter : `bool`, optional
if True, uses input stage filters when performing spline-based
resampling, else no input filter is used. No pre-filtering is
generally a best fit for image chain modeling and allows aliasing
into the problem that would be present in a hardware system.
"""
self.img = img
self.psf = psf
otf = ft_fwd(psf)
center = tuple(s // 2 for s in otf.shape)
otf /= otf[center] # definition of OTF, normalize by DC
self.otf = otf
self.otfconj = np.conj(otf)
self.zoomfactor = self.psf.shape[0] / self.img.shape[0]
self.prefilter = prefilter
if fHat is None:
fHat = ndimage.zoom(img, self.zoomfactor, prefilter=prefilter)
self.fHat = fHat
self.bufup = np.empty(self.psf.shape, dtype=self.psf.dtype)
self.bufdown = np.empty(self.img.shape, dtype=self.img.dtype)
self.iter = 0
def step(self):
"""Iterate the algorithm one step.
Returns
-------
fhat : `numpy.ndarray`
updated object estimate, of shape (a, b)
"""
self.fHat = pmap_core(self.fHat, self.img, self.otf, self.otfconj,
self.bufup, self.bufdown, self.prefilter,
self.zoomfactor)
self.iter += 1
return self.fHat
class MFPMAP:
"""Multi-Frame PMAP algorithm. Suitable for panchromatic restoration.
Implements Ref [1], Eq. 2.26.
"""
def __init__(self, imgs, psfs, fHat=None, prefilter=False):
"""Initialize a new PMAP problem.
Parameters
----------
imgs : `numpy.ndarray`
images from the camera, sequence of ndarray of shape (m, n).
The images must be fully co-registered before input to the algorithm.
A (k, m, n) shaped array iterates correctly, as does a list or other
iterable of (m, n) arrays
psfs : `iterable` of `numpy.ndarray`
psfs corresponding to imgs, sequence of ndarray of shape (a, b)
fhat : `numpy.ndarray`
initial object estimate, ndarray of shape (a, b)
if None, taken to be the first img rescaled if necessary to match
PSF sampling
prefilter : `bool`, optional
if True, uses input stage filters when performing spline-based
resampling, else no input filter is used. No pre-filtering is
generally a best fit for image chain modeling and allows aliasing
into the problem that would be present in a hardware system.
Notes
-----
This implementation is optimized for performance on hardware with a large
amount of memory. The OTFs can be computed during each step to use less
memory overall, in exchange for slower iterations.
"""
imgs = np.array(imgs)
psfs = np.array(psfs)
self.imgs = imgs
self.psfs = psfs
otfs = [ft_fwd(psf) for psf in psfs]
center = tuple(s // 2 for s in otfs[0].shape)
for otf in otfs:
otf /= otf[center] # definition of OTF, normalize by DC
self.otfs = otfs
self.otfsconj = [np.conj(otf) for otf in otfs]
self.zoomfactor = self.psfs[0].shape[0] / self.imgs[0].shape[0]
self.prefilter = prefilter
if fHat is None:
fHat = ndimage.zoom(imgs[0], self.zoomfactor, prefilter=prefilter)
self.fHat = fHat
self.bufup = np.empty(self.psfs[0].shape, dtype=self.psfs[0].dtype)
self.bufdown = np.empty(self.imgs[0].shape, dtype=self.imgs[0].dtype)
self.iter = 0
def step(self):
"""Iterate the algorithm one step.
Because this implementation cycles through the images, the steps can be
thought of as mini-batches. Intuitively, you may wish to make len(imgs)
steps at a time.
Returns
-------
fhat : `numpy.ndarray`
updated object estimate, of shape (a, b)
"""
i = self.iter % len(self.otfs)
otf = self.otfs[i]
img = self.imgs[i]
otfconj = self.otfsconj[i]
self.fHat = pmap_core(self.fHat, img, otf, otfconj,
self.bufup, self.bufdown, self.prefilter,
self.zoomfactor)
self.iter += 1
return self.fHat
|
StarcoderdataPython
|
1667943
|
import lxml.etree
def parse_xml(xml):
if not len(xml):
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = parse_xml(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
def read_xml(path):
annotation_xml = lxml.etree.fromstring(open(path).read())
annotation = parse_xml(annotation_xml)['annotation']
return annotation
|
StarcoderdataPython
|
283374
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def binarySearch (arr, l, r, x):
if r >= l:
mid = l + (r - l) // 2
if arr[mid] == x:
return mid
elif arr[mid] > x:
return binarySearch(arr, l, mid-1, x)
else:
return binarySearch(arr, mid + 1, r, x)
else:
return -1
return binarySearch(nums, 0, len(nums)-1, target)
|
StarcoderdataPython
|
1903615
|
# coding=utf-8
"""
The Fax API endpoint send
Documentation: https://voip.ms/m/apidocs.php
"""
from voipms.baseapi import BaseApi
from voipms.helpers import validate_email, convert_bool
class FaxSend(BaseApi):
"""
Send for the Fax endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(FaxSend, self).__init__(*args, **kwargs)
self.endpoint = 'fax'
def fax_message(self, to_number, from_name, from_number, file, **kwargs):
"""
Send a Fax message to a Destination Number
:param to_number: [Required] Destination DID Number (Example: 5552341234)
:type to_number: :py:class:`int`
:param from_name: [Required] Name of the sender
:type from_name: :py:class:`str`
:param from_number: [Required] DID number of the Fax sender (Example: 5552341234)
:type from_number: :py:class:`int`
:param file: [Required] The file must be encoded in Base64 and in one of the following formats: pdf, txt, jpg, gif, png, tif
:type file: :py:class:`str`
:param send_email_enabled: Flag to enable the send of a copy of your Fax via email (True/False default False)
:type send_email_enabled: :py:class:`bool`
:param send_email: Email address where you want send a copy of your Fax.
:type send_email: :py:class:`str`
:param station_id: A word to identify a equipment or department sending the Fax
:type station_id: :py:class:`str`
:param test: Set to true if testing how cancel a Fax Folder (True/False)
:type test: :py:class:`bool`
:returns: :py:class:`dict`
"""
method = "sendFaxMessage"
if not isinstance(to_number, int):
raise ValueError("Destination DID Number needs to be an int (Example: 5552341234)")
if not isinstance(from_name, str):
raise ValueError("Name of the sender needs to be a str")
if not isinstance(from_number, int):
raise ValueError("DID number of the Fax sender needs to be an int (Example: 5552341234)")
if not isinstance(file, str):
raise ValueError("The file must be encoded in Base64 and in one of the following formats: pdf, txt, jpg, gif, png, tif and needs to be a str")
parameters = {
"to_number": to_number,
"from_name": from_name,
"from_number": from_number,
"file": file,
}
if "send_email_enabled" in kwargs:
if not isinstance(kwargs["send_email_enabled"], bool):
raise ValueError("Flag to enable the send of a copy of your Fax via email needs to be a bool (True/False default False)")
parameters["send_email_enabled"] = convert_bool(kwargs.pop("send_email_enabled"))
if "send_email" in kwargs:
send_email = kwargs.pop("send_email")
if not isinstance(send_email, str):
raise ValueError("Email address where you want send a copy of your Fax needs to be a str (Example: <EMAIL>)")
elif not validate_email(send_email):
raise ValueError("Email address where you want send a copy of your Fax is not a correct email syntax")
parameters["send_email"] = send_email
if "station_id" in kwargs:
if not isinstance(kwargs["station_id"], str):
raise ValueError("A word to identify a equipment or department sending the Fax needs to be a str")
parameters["station_id"] = kwargs.pop("station_id")
if "test" in kwargs:
if not isinstance(kwargs["test"], bool):
raise ValueError("Set to true if testing how cancel a Fax Folder needs to be a bool (True/False)")
else:
parameters["test"] = convert_bool(kwargs.pop("test"))
if len(kwargs) > 0:
not_allowed_parameters = ""
for key, value in kwargs.items():
not_allowed_parameters += key + " "
raise ValueError("Parameters not allowed: {}".format(not_allowed_parameters))
return self._voipms_client._get(method, parameters)
|
StarcoderdataPython
|
193772
|
#!/usr/bin/python3
# Additionally format C++ code for further entropy reduction
# Assumes clang-format already run
# Does not work for all possible C++ programs
# Test carefully before reusing in other projects!
import argparse
import os
import re
parser = argparse.ArgumentParser(description="Additionally format C++ code")
parser.add_argument("files", nargs="+")
args = parser.parse_args()
def read_lines(filename):
with open(filename) as f:
return [s.rstrip("\n") for s in f]
def write_lines(filename, lines):
with open(filename, "w") as f:
for s in lines:
f.write(s + "\n")
def flatten(xss):
r = []
for xs in xss:
for x in xs:
r.append(x)
return r
########################################
# format comments
def bare_comment(s):
s = s.strip()
assert s.startswith("//")
s = s[2:]
return s.strip()
def special(s):
s = bare_comment(s)
if len(s) == 1:
return 1
if s in ("namespace", "SORT", "NO_SORT"):
return 1
if s.startswith("clang-format off") or s.startswith("clang-format on"):
return 1
if s.startswith("http"):
return 1
if s.lower().startswith("todo:"):
return 1
def is_sentence_end(s):
if s.endswith("e.g.") or s.endswith("i.e."):
return 0
if s.endswith(".") or s.endswith(".)"):
return 1
if s.endswith("?"):
return 1
if s.endswith(":"):
return 1
def capitalize(s):
for c in s:
if c.isupper():
return s
if len(s) == 1:
return s
if not s[1].isalpha():
return s
return s.capitalize()
def comment_block(i):
m = re.match(r"(\s*)//", lines[i])
dent = m[1]
j = i
words = []
while j < len(lines) and re.match(r"\s*//", lines[j]) and not special(lines[j]):
s = bare_comment(lines[j])
xs = s.split()
words.extend(xs)
j += 1
if i == j:
return 1
k = 0
words[k] = capitalize(words[k])
for k in range(1, len(words)):
if is_sentence_end(words[k - 1]):
words[k] = capitalize(words[k])
k = len(words) - 1
if not is_sentence_end(words[k]):
words[k] += "."
width = 132 - len(dent) * 4 - 3
xs = []
s = ""
for w in words:
if len(s) + 1 + len(w) > width:
xs.append(s)
s = w
else:
if s:
s += " "
s += w
assert s
xs.append(s)
for k in range(len(xs)):
xs[k] = dent + "// " + xs[k]
lines[i:j] = xs
return len(xs)
def comments():
i = 0
while i < len(lines):
m = re.match(r"(\s*)//", lines[i])
if m:
if re.match(r"\s*//SORT$", lines[i]):
lines[i] = m[1] + "// SORT"
i += 1
continue
if re.match(r"\s*//NO_SORT$", lines[i]):
lines[i] = m[1] + "// NO_SORT"
i += 1
continue
s = bare_comment(lines[i])
if s.lower().startswith("todo:"):
s = s[5:]
s = s.strip()
lines[i] = m[1] + "// TODO: " + s
i += 1
continue
i += comment_block(i)
else:
i += 1
########################################
# sort case blocks
def case(i, dent):
case_mark = dent + "(case .*|default):$"
while 1:
if not re.match(case_mark, lines[i]):
raise ValueError(filename + ":" + str(i + 1) + ": case not found")
while re.match(case_mark, lines[i]):
i += 1
if dent + "{" == lines[i]:
i += 1
while dent + "}" != lines[i]:
if re.match(case_mark, lines[i]):
raise ValueError(
filename
+ ":"
+ str(i + 1)
+ ": another case in the middle of block"
)
i += 1
i += 1
return i
else:
while not re.match(case_mark, lines[i]) and dent + "}" != lines[i]:
i += 1
if not re.match(r"\s*\[\[fallthrough\]\];", lines[i - 1]):
return i
def cases(i, dent):
r = []
while dent + "}" != lines[i]:
j = case(i, dent)
r.append(lines[i:j])
i = j
return i, r
def sort_case(c):
i = 0
while re.match(r"\s*(case .*|default):$", c[i]):
i += 1
c[:i] = sorted(c[:i], key=lambda s: (s.lower(), s))
def sort_cases(i, dent):
j, cs = cases(i, dent)
for c in cs:
sort_case(c)
cs = sorted(cs, key=lambda xs: (xs[0].lower(), xs[0]))
lines[i:j] = flatten(cs)
def sort_case_blocks():
for i in range(len(lines)):
m = re.match(r"(\s*)switch \(.*\) {", lines[i])
if m:
m1 = re.match(r"\s*// NO_SORT", lines[i - 1])
if m1:
continue
sort_cases(i + 1, m[1])
########################################
# sort single-line elements
def var_key(x):
m = re.match(r".* (\w+) = ", x)
if m:
x = m[1]
else:
m = re.match(r".* (\w+)\(", x)
if m:
x = m[1]
else:
m = re.match(r".* (\w+);", x)
if m:
x = m[1]
return x.lower(), x
def sort_single():
for i in range(len(lines)):
if re.match(r"\s*// SORT$", lines[i]):
if lines[i + 1].endswith("{"):
continue
j = i + 1
while not re.match(r"\s*///$", lines[j]):
j += 1
lines[i + 1 : j] = sorted(lines[i + 1 : j], key=var_key)
########################################
# sort multi-line elements
def get_multi_element(dent, i, j):
i0 = i
while re.match(r"\s*//", lines[i]):
i += 1
m = re.match(r"(\s*).*{$", lines[i])
if not m:
raise ValueError(filename + ":" + str(i + 1) + ": inconsistent syntax")
if m[1] != dent:
raise ValueError(filename + ":" + str(i + 1) + ": inconsistent indent")
while lines[i] != dent + "}":
i += 1
if i > j:
raise ValueError(filename + ":" + str(i + 1) + ": inconsistent syntax")
i += 1
return lines[i0:i], i
def get_multi_elements(i, j):
m = re.match(r"(\s*).*", lines[i])
dent = m[1]
xss = []
while i < j:
xs, i = get_multi_element(dent, i, j)
xss.append(xs)
while not lines[i]:
i += 1
return xss
def fn_key(xs):
i = 0
while re.match(r"\s*//", xs[i]):
i += 1
x = xs[i]
m = re.match(r".* (\w+)\(", x)
if m:
x = m[1]
return x.lower(), x, xs[i]
def sort_multi_block(i, j):
xss = get_multi_elements(i, j)
xss = sorted(xss, key=fn_key)
for k in range(len(xss) - 1):
xss[k].append("")
xs = flatten(xss)
lines[i:j] = xs
def sort_multi():
i = 0
while i < len(lines):
if re.match(r"\s*// SORT$", lines[i]):
i += 1
if lines[i] == "":
raise ValueError(
filename + ":" + str(i + 1) + ": blank line after SORT directive"
)
if not lines[i].endswith("{") and not re.match(r"\s*//", lines[i]):
continue
j = i
while not re.match(r"\s*///$", lines[j]):
j += 1
sort_multi_block(i, j)
else:
i += 1
########################################
# blank lines before comments
def comment_blank_lines():
i = 1
while i < len(lines):
m = re.match(r"(\s*)//", lines[i])
if m:
if special(lines[i]):
i += 1
continue
if not lines[i - 1]:
i += 1
continue
if re.match(r"\s*//", lines[i - 1]):
i += 1
continue
if re.match(r"\s*#", lines[i - 1]):
i += 1
continue
if lines[i - 1].endswith("{"):
i += 1
continue
if lines[i - 1].endswith(":"):
i += 1
continue
lines[i:i] = [""]
i += 2
else:
i += 1
########################################
# top level
def act():
global lines
lines = read_lines(filename)
old = lines[:]
comments()
sort_case_blocks()
sort_single()
sort_multi()
comment_blank_lines()
if lines == old:
return
print(filename)
write_lines(filename, lines)
for arg in args.files:
if os.path.isfile(arg):
filename = arg
act()
continue
for root, dirs, files in os.walk(arg):
for filename in files:
ext = os.path.splitext(filename)[1]
if ext not in (".cc", ".cpp", ".h"):
continue
filename = os.path.join(root, filename)
act()
|
StarcoderdataPython
|
133619
|
"""This file contains code used in "Think Bayes",
by <NAME>, available from greenteapress.com
Copyright 2012 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import matplotlib.pyplot as pyplot
import thinkplot
import numpy
import csv
import random
import shelve
import sys
import time
import thinkbayes2
import warnings
warnings.simplefilter('error', RuntimeWarning)
FORMATS = ['pdf', 'eps', 'png']
class Locker(object):
"""Encapsulates a shelf for storing key-value pairs."""
def __init__(self, shelf_file):
self.shelf = shelve.open(shelf_file)
def Close(self):
"""Closes the shelf.
"""
self.shelf.close()
def Add(self, key, value):
"""Adds a key-value pair."""
self.shelf[str(key)] = value
def Lookup(self, key):
"""Looks up a key."""
return self.shelf.get(str(key))
def Keys(self):
"""Returns an iterator of keys."""
return self.shelf.iterkeys()
def Read(self):
"""Returns the contents of the shelf as a map."""
return dict(self.shelf)
class Subject(object):
"""Represents a subject from the belly button study."""
def __init__(self, code):
"""
code: string ID
species: sequence of (int count, string species) pairs
"""
self.code = code
self.species = []
self.suite = None
self.num_reads = None
self.num_species = None
self.total_reads = None
self.total_species = None
self.prev_unseen = None
self.pmf_n = None
self.pmf_q = None
self.pmf_l = None
def Add(self, species, count):
"""Add a species-count pair.
It is up to the caller to ensure that species names are unique.
species: string species/genus name
count: int number of individuals
"""
self.species.append((count, species))
def Done(self, reverse=False, clean_param=0):
"""Called when we are done adding species counts.
reverse: which order to sort in
"""
if clean_param:
self.Clean(clean_param)
self.species.sort(reverse=reverse)
counts = self.GetCounts()
self.num_species = len(counts)
self.num_reads = sum(counts)
def Clean(self, clean_param=50):
"""Identifies and removes bogus data.
clean_param: parameter that controls the number of legit species
"""
def prob_bogus(k, r):
"""Compute the probability that a species is bogus."""
q = clean_param / r
p = (1-q) ** k
return p
print(self.code, clean_param)
counts = self.GetCounts()
r = 1.0 * sum(counts)
species_seq = []
for k, species in sorted(self.species):
if random.random() < prob_bogus(k, r):
continue
species_seq.append((k, species))
self.species = species_seq
def GetM(self):
"""Gets number of observed species."""
return len(self.species)
def GetCounts(self):
"""Gets the list of species counts
Should be in increasing order, if Sort() has been invoked.
"""
return [count for count, _ in self.species]
def MakeCdf(self):
"""Makes a CDF of total prevalence vs rank."""
counts = self.GetCounts()
counts.sort(reverse=True)
cdf = thinkbayes2.Cdf(dict(enumerate(counts)))
return cdf
def GetNames(self):
"""Gets the names of the seen species."""
return [name for _, name in self.species]
def PrintCounts(self):
"""Prints the counts and species names."""
for count, name in reversed(self.species):
print(count, name)
def GetSpecies(self, index):
"""Gets the count and name of the indicated species.
Returns: count-species pair
"""
return self.species[index]
def GetCdf(self):
"""Returns cumulative prevalence vs number of species.
"""
counts = self.GetCounts()
items = enumerate(counts)
cdf = thinkbayes2.Cdf(items)
return cdf
def GetPrevalences(self):
"""Returns a sequence of prevalences (normalized counts).
"""
counts = self.GetCounts()
total = sum(counts)
prevalences = numpy.array(counts, dtype=numpy.float) / total
return prevalences
def Process(self, low=None, high=500, conc=1, iters=100):
"""Computes the posterior distribution of n and the prevalences.
Sets attribute: self.suite
low: minimum number of species
high: maximum number of species
conc: concentration parameter
iters: number of iterations to use in the estimator
"""
counts = self.GetCounts()
m = len(counts)
if low is None:
low = max(m, 2)
ns = range(low, high+1)
#start = time.time()
self.suite = Species5(ns, conc=conc, iters=iters)
self.suite.Update(counts)
#end = time.time()
#print 'Processing time' end-start
def MakePrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attributes
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
curves = self.RunSimulations(num_sims, add_reads)
self.pmf_l = self.MakePredictive(curves)
def MakeQuickPrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attribute:
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
pmf = thinkbayes2.Pmf()
_, seen = self.GetSeenSpecies()
for _ in range(num_sims):
_, observations = self.GenerateObservations(add_reads)
all_seen = seen.union(observations)
l = len(all_seen) - len(seen)
pmf.Incr(l)
pmf.Normalize()
self.pmf_l = pmf
def DistL(self):
"""Returns the distribution of additional species, l.
"""
return self.pmf_l
def MakeFigures(self):
"""Makes figures showing distribution of n and the prevalences."""
self.PlotDistN()
self.PlotPrevalences()
def PlotDistN(self):
"""Plots distribution of n."""
pmf = self.suite.DistN()
print('90% CI for N:', pmf.CredibleInterval(90))
pmf.label = self.code
thinkplot.Clf()
thinkplot.PrePlot(num=1)
thinkplot.Pmf(pmf)
root = 'species-ndist-%s' % self.code
thinkplot.Save(root=root,
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def PlotPrevalences(self, num=5):
"""Plots dist of prevalence for several species.
num: how many species (starting with the highest prevalence)
"""
thinkplot.Clf()
thinkplot.PrePlot(num=5)
for rank in range(1, num+1):
self.PlotPrevalence(rank)
root = 'species-prev-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 1],
)
def PlotPrevalence(self, rank=1, cdf_flag=True):
"""Plots dist of prevalence for one species.
rank: rank order of the species to plot.
cdf_flag: whether to plot the CDF
"""
# convert rank to index
index = self.GetM() - rank
_, mix = self.suite.DistOfPrevalence(index)
count, _ = self.GetSpecies(index)
mix.label = '%d (%d)' % (rank, count)
print('90%% CI for prevalence of species %d:' % rank, end=' ')
print(mix.CredibleInterval(90))
if cdf_flag:
cdf = mix.MakeCdf()
thinkplot.Cdf(cdf)
else:
thinkplot.Pmf(mix)
def PlotMixture(self, rank=1):
"""Plots dist of prevalence for all n, and the mix.
rank: rank order of the species to plot
"""
# convert rank to index
index = self.GetM() - rank
print(self.GetSpecies(index))
print(self.GetCounts()[index])
metapmf, mix = self.suite.DistOfPrevalence(index)
thinkplot.Clf()
for pmf in metapmf.Values():
thinkplot.Pmf(pmf, color='blue', alpha=0.2, linewidth=0.5)
thinkplot.Pmf(mix, color='blue', alpha=0.9, linewidth=2)
root = 'species-mix-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 0.3],
legend=False)
def GetSeenSpecies(self):
"""Makes a set of the names of seen species.
Returns: number of species, set of string species names
"""
names = self.GetNames()
m = len(names)
seen = set(SpeciesGenerator(names, m))
return m, seen
def GenerateObservations(self, num_reads):
"""Generates a series of random observations.
num_reads: number of reads to generate
Returns: number of species, sequence of string species names
"""
n, prevalences = self.suite.SamplePosterior()
names = self.GetNames()
name_iter = SpeciesGenerator(names, n)
items = zip(name_iter, prevalences)
cdf = thinkbayes2.Cdf(dict(items))
observations = cdf.Sample(num_reads)
#for ob in observations:
# print ob
return n, observations
def Resample(self, num_reads):
"""Choose a random subset of the data (without replacement).
num_reads: number of reads in the subset
"""
t = []
for count, species in self.species:
t.extend([species]*count)
random.shuffle(t)
reads = t[:num_reads]
subject = Subject(self.code)
hist = thinkbayes2.Hist(reads)
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def Match(self, match):
"""Match up a rarefied subject with a complete subject.
match: complete Subject
Assigns attributes:
total_reads:
total_species:
prev_unseen:
"""
self.total_reads = match.num_reads
self.total_species = match.num_species
# compute the prevalence of unseen species (at least approximately,
# based on all species counts in match
_, seen = self.GetSeenSpecies()
seen_total = 0.0
unseen_total = 0.0
for count, species in match.species:
if species in seen:
seen_total += count
else:
unseen_total += count
self.prev_unseen = unseen_total / (seen_total + unseen_total)
def RunSimulation(self, num_reads, frac_flag=False, jitter=0.01):
"""Simulates additional observations and returns a rarefaction curve.
k is the number of additional observations
num_new is the number of new species seen
num_reads: how many new reads to simulate
frac_flag: whether to convert to fraction of species seen
jitter: size of jitter added if frac_flag is true
Returns: list of (k, num_new) pairs
"""
m, seen = self.GetSeenSpecies()
n, observations = self.GenerateObservations(num_reads)
curve = []
for i, obs in enumerate(observations):
seen.add(obs)
if frac_flag:
frac_seen = len(seen) / float(n)
frac_seen += random.uniform(-jitter, jitter)
curve.append((i+1, frac_seen))
else:
num_new = len(seen) - m
curve.append((i+1, num_new))
return curve
def RunSimulations(self, num_sims, num_reads, frac_flag=False):
"""Runs simulations and returns a list of curves.
Each curve is a sequence of (k, num_new) pairs.
num_sims: how many simulations to run
num_reads: how many samples to generate in each simulation
frac_flag: whether to convert num_new to fraction of total
"""
curves = [self.RunSimulation(num_reads, frac_flag)
for _ in range(num_sims)]
return curves
def MakePredictive(self, curves):
"""Makes a predictive distribution of additional species.
curves: list of (k, num_new) curves
Returns: Pmf of num_new
"""
pred = thinkbayes2.Pmf(label=self.code)
for curve in curves:
_, last_num_new = curve[-1]
pred.Incr(last_num_new)
pred.Normalize()
return pred
def MakeConditionals(curves, ks):
"""Makes Cdfs of the distribution of num_new conditioned on k.
curves: list of (k, num_new) curves
ks: list of values of k
Returns: list of Cdfs
"""
joint = MakeJointPredictive(curves)
cdfs = []
for k in ks:
pmf = joint.Conditional(1, 0, k)
pmf.label = 'k=%d' % k
cdf = pmf.MakeCdf()
cdfs.append(cdf)
print('90%% credible interval for %d' % k, end=' ')
print(cdf.CredibleInterval(90))
return cdfs
def MakeJointPredictive(curves):
"""Makes a joint distribution of k and num_new.
curves: list of (k, num_new) curves
Returns: joint Pmf of (k, num_new)
"""
joint = thinkbayes2.Joint()
for curve in curves:
for k, num_new in curve:
joint.Incr((k, num_new))
joint.Normalize()
return joint
def MakeFracCdfs(curves, ks):
"""Makes Cdfs of the fraction of species seen.
curves: list of (k, num_new) curves
Returns: list of Cdfs
"""
d = {}
for curve in curves:
for k, frac in curve:
if k in ks:
d.setdefault(k, []).append(frac)
cdfs = {}
for k, fracs in d.items():
cdf = thinkbayes2.Cdf(fracs)
cdfs[k] = cdf
return cdfs
def SpeciesGenerator(names, num):
"""Generates a series of names, starting with the given names.
Additional names are 'unseen' plus a serial number.
names: list of strings
num: total number of species names to generate
Returns: string iterator
"""
i = 0
for name in names:
yield name
i += 1
while i < num:
yield 'unseen-%d' % i
i += 1
def ReadRarefactedData(filename='journal.pone.0047712.s001.csv',
clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from http://www.plosone.org/article/
info%3Adoi%2F10.1371%2Fjournal.pone.0047712#s4
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
#_ = reader.next()
_ = next(reader)
subject = Subject('')
subject_map = {}
i = 0
for t in reader:
code = t[0]
if code != subject.code:
# start a new subject
subject = Subject(code)
subject_map[code] = subject
# append a number to the species names so they're unique
species = t[1]
species = '%s-%d' % (species, i)
i += 1
count = int(t[2])
subject.Add(species, count)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map
def ReadCompleteDataset(filename='BBB_data_from_Rob.csv', clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from personal correspondence with <NAME>, received 2-7-13.
Converted from xlsx to csv.
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
header = next(reader)
header = next(reader)
subject_codes = header[1:-1]
subject_codes = ['B'+code for code in subject_codes]
# create the subject map
uber_subject = Subject('uber')
subject_map = {}
for code in subject_codes:
subject_map[code] = Subject(code)
# read lines
i = 0
for t in reader:
otu_code = t[0]
if otu_code == '':
continue
# pull out a species name and give it a number
otu_names = t[-1]
taxons = otu_names.split(';')
species = taxons[-1]
species = '%s-%d' % (species, i)
i += 1
counts = [int(x) for x in t[1:-1]]
# print otu_code, species
for code, count in zip(subject_codes, counts):
if count > 0:
subject_map[code].Add(species, count)
uber_subject.Add(species, count)
uber_subject.Done(clean_param=clean_param)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map, uber_subject
def JoinSubjects():
"""Reads both datasets and computers their inner join.
Finds all subjects that appear in both datasets.
For subjects in the rarefacted dataset, looks up the total
number of reads and stores it as total_reads. num_reads
is normally 400.
Returns: map from code to Subject
"""
# read the rarefacted dataset
sampled_subjects = ReadRarefactedData()
# read the complete dataset
all_subjects, _ = ReadCompleteDataset()
for code, subject in sampled_subjects.items():
if code in all_subjects:
match = all_subjects[code]
subject.Match(match)
return sampled_subjects
def JitterCurve(curve, dx=0.2, dy=0.3):
"""Adds random noise to the pairs in a curve.
dx and dy control the amplitude of the noise in each dimension.
"""
curve = [(x+random.uniform(-dx, dx),
y+random.uniform(-dy, dy)) for x, y in curve]
return curve
def OffsetCurve(curve, i, n, dx=0.3, dy=0.3):
"""Adds random noise to the pairs in a curve.
i is the index of the curve
n is the number of curves
dx and dy control the amplitude of the noise in each dimension.
"""
xoff = -dx + 2 * dx * i / (n-1)
yoff = -dy + 2 * dy * i / (n-1)
curve = [(x+xoff, y+yoff) for x, y in curve]
return curve
def PlotCurves(curves, root='species-rare'):
"""Plots a set of curves.
curves is a list of curves; each curve is a list of (x, y) pairs.
"""
thinkplot.Clf()
color = '#225EA8'
n = len(curves)
for i, curve in enumerate(curves):
curve = OffsetCurve(curve, i, n)
xs, ys = zip(*curve)
thinkplot.Plot(xs, ys, color=color, alpha=0.3, linewidth=0.5)
thinkplot.Save(root=root,
xlabel='# samples',
ylabel='# species',
formats=FORMATS,
legend=False)
def PlotConditionals(cdfs, root='species-cond'):
"""Plots cdfs of num_new conditioned on k.
cdfs: list of Cdf
root: string filename root
"""
thinkplot.Clf()
thinkplot.PrePlot(num=len(cdfs))
thinkplot.Cdfs(cdfs)
thinkplot.Save(root=root,
xlabel='# new species',
ylabel='Prob',
formats=FORMATS)
def PlotFracCdfs(cdfs, root='species-frac'):
"""Plots CDFs of the fraction of species seen.
cdfs: map from k to CDF of fraction of species seen after k samples
"""
thinkplot.Clf()
color = '#225EA8'
for k, cdf in cdfs.items():
xs, ys = cdf.Render()
ys = [1-y for y in ys]
thinkplot.Plot(xs, ys, color=color, linewidth=1)
x = 0.9
y = 1 - cdf.Prob(x)
pyplot.text(x, y, str(k), fontsize=9, color=color,
horizontalalignment='center',
verticalalignment='center',
bbox=dict(facecolor='white', edgecolor='none'))
thinkplot.Save(root=root,
xlabel='Fraction of species seen',
ylabel='Probability',
formats=FORMATS,
legend=False)
class Species(thinkbayes2.Suite):
"""Represents hypotheses about the number of species."""
def __init__(self, ns, conc=1, iters=1000):
hypos = [thinkbayes2.Dirichlet(n, conc) for n in ns]
thinkbayes2.Suite.__init__(self, hypos)
self.iters = iters
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
# call Update in the parent class, which calls Likelihood
thinkbayes2.Suite.Update(self, data)
# update the next level of the hierarchy
for hypo in self.Values():
hypo.Update(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
# draw sample Likelihoods from the hypothetical Dirichlet dist
# and add them up
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of ways the observed species
# might have been chosen from all species
m = len(data)
like *= thinkbayes2.BinomialCoef(dirichlet.n, m)
return like
def DistN(self):
"""Computes the distribution of n."""
pmf = thinkbayes2.Pmf()
for hypo, prob in self.Items():
pmf.Set(hypo.n, prob)
return pmf
class Species2(object):
"""Represents hypotheses about the number of species.
Combines two layers of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def __init__(self, ns, conc=1, iters=1000):
self.ns = ns
self.conc = conc
self.probs = numpy.ones(len(ns), dtype=numpy.float)
self.params = numpy.ones(self.ns[-1], dtype=numpy.float) * conc
self.iters = iters
self.num_reads = 0
self.m = 0
def Preload(self, data):
"""Change the initial parameters to fit the data better.
Just an experiment. Doesn't work.
"""
m = len(data)
singletons = data.count(1)
num = m - singletons
print(m, singletons, num)
addend = numpy.ones(num, dtype=numpy.float) * 1
print(len(addend))
print(len(self.params[singletons:m]))
self.params[singletons:m] += addend
print('Preload', num)
def Update(self, data):
"""Updates the distribution based on data.
data: numpy array of counts
"""
self.num_reads += sum(data)
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
self.m = len(data)
#self.params[:self.m] += data * self.conc
self.params[:self.m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data for all values of n.
Draws one sample from the distribution of prevalences.
data: sequence of observed counts
Returns: numpy array of m likelihoods
"""
gammas = numpy.random.gamma(self.params)
m = len(data)
row = gammas[:m]
col = numpy.cumsum(gammas)
log_likes = []
for n in self.ns:
ps = row / col[n-1]
terms = numpy.log(ps) * data
log_like = terms.sum()
log_likes.append(log_like)
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
def DistN(self):
"""Computes the distribution of n.
Returns: new Pmf object
"""
pmf = thinkbayes2.Pmf(dict(zip(self.ns, self.probs)))
return pmf
def RandomN(self):
"""Returns a random value of n."""
return self.DistN().Random()
def DistQ(self, iters=100):
"""Computes the distribution of q based on distribution of n.
Returns: pmf of q
"""
cdf_n = self.DistN().MakeCdf()
sample_n = cdf_n.Sample(iters)
pmf = thinkbayes2.Pmf()
for n in sample_n:
q = self.RandomQ(n)
pmf.Incr(q)
pmf.Normalize()
return pmf
def RandomQ(self, n):
"""Returns a random value of q.
Based on n, self.num_reads and self.conc.
n: number of species
Returns: q
"""
# generate random prevalences
dirichlet = thinkbayes2.Dirichlet(n, conc=self.conc)
prevalences = dirichlet.Random()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(self.num_reads)
seen = set(sample)
# add up the prevalence of unseen species
q = 0
for species, prev in enumerate(prevalences):
if species not in seen:
q += prev
return q
def MarginalBeta(self, n, index):
"""Computes the conditional distribution of the indicated species.
n: conditional number of species
index: which species
Returns: Beta object representing a distribution of prevalence.
"""
alpha0 = self.params[:n].sum()
alpha = self.params[index]
return thinkbayes2.Beta(alpha, alpha0-alpha)
def DistOfPrevalence(self, index):
"""Computes the distribution of prevalence for the indicated species.
index: which species
Returns: (metapmf, mix) where metapmf is a MetaPmf and mix is a Pmf
"""
metapmf = thinkbayes2.Pmf()
for n, prob in zip(self.ns, self.probs):
beta = self.MarginalBeta(n, index)
pmf = beta.MakePmf()
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf)
return metapmf, mix
def SamplePosterior(self):
"""Draws random n and prevalences.
Returns: (n, prevalences)
"""
n = self.RandomN()
prevalences = self.SamplePrevalences(n)
#print 'Peeking at n_cheat'
#n = n_cheat
return n, prevalences
def SamplePrevalences(self, n):
"""Draws a sample of prevalences given n.
n: the number of species assumed in the conditional
Returns: numpy array of n prevalences
"""
if n == 1:
return [1.0]
q_desired = self.RandomQ(n)
q_desired = max(q_desired, 1e-6)
params = self.Unbias(n, self.m, q_desired)
gammas = numpy.random.gamma(params)
gammas /= gammas.sum()
return gammas
def Unbias(self, n, m, q_desired):
"""Adjusts the parameters to achieve desired prev_unseen (q).
n: number of species
m: seen species
q_desired: prevalence of unseen species
"""
params = self.params[:n].copy()
if n == m:
return params
x = sum(params[:m])
y = sum(params[m:])
a = x + y
#print x, y, a, x/a, y/a
g = q_desired * a / y
f = (a - g * y) / x
params[:m] *= f
params[m:] *= g
return params
class Species3(Species2):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observations
"""
# sample the likelihoods and add them up
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
m = len(data)
self.params[:m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data under all hypotheses.
data: list of observations
"""
# get a random sample
gammas = numpy.random.gamma(self.params)
# row is just the first m elements of gammas
m = len(data)
row = gammas[:m]
# col is the cumulative sum of gammas
col = numpy.cumsum(gammas)[self.ns[0]-1:]
# each row of the array is a set of ps, normalized
# for each hypothetical value of n
array = row / col[:, numpy.newaxis]
# computing the multinomial PDF under a log transform
# take the log of the ps and multiply by the data
terms = numpy.log(array) * data
# add up the rows
log_likes = terms.sum(axis=1)
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
# correct for the number of ways we could see m species
# out of a possible n
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
class Species4(Species):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
m = len(data)
# loop through the species and update one at a time
for i in range(m):
one = numpy.zeros(i+1)
one[i] = data[i]
# call the parent class
Species.Update(self, one)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
Note: this only works correctly if we update one species at a time.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of unseen species the new one
# could have been
m = len(data)
num_unseen = dirichlet.n - m + 1
like *= num_unseen
return like
class Species5(Species2):
"""Represents hypotheses about the number of species.
Combines two laters of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies in increasing order
"""
# loop through the species and update one at a time
m = len(data)
for i in range(m):
self.UpdateOne(i+1, data[i])
self.params[i] += data[i]
def UpdateOne(self, i, count):
"""Updates the suite based on the data.
Evaluates the likelihood for all values of n.
i: which species was observed (1..n)
count: how many were observed
"""
# how many species have we seen so far
self.m = i
# how many reads have we seen
self.num_reads += count
if self.iters == 0:
return
# sample the likelihoods and add them up
likes = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
likes += self.SampleLikelihood(i, count)
# correct for the number of unseen species the new one
# could have been
unseen_species = [n-i+1 for n in self.ns]
likes *= unseen_species
# multiply the priors by the likelihoods and renormalize
self.probs *= likes
self.probs /= self.probs.sum()
def SampleLikelihood(self, i, count):
"""Computes the likelihood of the data under all hypotheses.
i: which species was observed
count: how many were observed
"""
# get a random sample of p
gammas = numpy.random.gamma(self.params)
# sums is the cumulative sum of p, for each value of n
sums = numpy.cumsum(gammas)[self.ns[0]-1:]
# get p for the mth species, for each value of n
ps = gammas[i-1] / sums
log_likes = numpy.log(ps) * count
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
return likes
def MakePosterior(constructor, data, ns, conc=1, iters=1000):
"""Makes a suite, updates it and returns the posterior suite.
Prints the elapsed time.
data: observed species and their counts
ns: sequence of hypothetical ns
conc: concentration parameter
iters: how many samples to draw
Returns: posterior suite of the given type
"""
suite = constructor(ns, conc=conc, iters=iters)
# print constructor.__name__
start = time.time()
suite.Update(data)
end = time.time()
print('Processing time', end-start)
return suite
def PlotAllVersions():
"""Makes a graph of posterior distributions of N."""
data = [1, 2, 3]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Save(root='species3',
xlabel='Number of species',
ylabel='Prob')
def PlotMedium():
"""Makes a graph of posterior distributions of N."""
data = [1, 1, 1, 1, 2, 3, 5, 9]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def SimpleDirichletExample():
"""Makes a plot showing posterior distributions for three species.
This is the case where we know there are exactly three species.
"""
thinkplot.Clf()
thinkplot.PrePlot(3)
names = ['lions', 'tigers', 'bears']
data = [3, 2, 1]
dirichlet = thinkbayes2.Dirichlet(3)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print('mean', names[i], beta.Mean())
dirichlet.Update(data)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print('mean', names[i], beta.Mean())
pmf = beta.MakePmf(label=names[i])
thinkplot.Pmf(pmf)
thinkplot.Save(root='species1',
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
)
def HierarchicalExample():
"""Shows the posterior distribution of n for lions, tigers and bears.
"""
ns = range(3, 30)
suite = Species(ns, iters=8000)
data = [3, 2, 1]
suite.Update(data)
thinkplot.Clf()
thinkplot.PrePlot(num=1)
pmf = suite.DistN()
thinkplot.Pdf(pmf)
thinkplot.Save(root='species2',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def CompareHierarchicalExample():
"""Makes a graph of posterior distributions of N."""
data = [3, 2, 1]
m = len(data)
n = 30
ns = range(m, n)
constructors = [Species, Species5]
iters = [1000, 100]
for constructor, iters in zip(constructors, iters):
suite = MakePosterior(constructor, data, ns, iters)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def ProcessSubjects(codes):
"""Process subjects with the given codes and plot their posteriors.
code: sequence of string codes
"""
thinkplot.Clf()
thinkplot.PrePlot(len(codes))
subjects = ReadRarefactedData()
pmfs = []
for code in codes:
subject = subjects[code]
subject.Process()
pmf = subject.suite.DistN()
pmf.label = subject.code
thinkplot.Pmf(pmf)
pmfs.append(pmf)
print('ProbGreater', thinkbayes2.PmfProbGreater(pmfs[0], pmfs[1]))
print('ProbLess', thinkbayes2.PmfProbLess(pmfs[0], pmfs[1]))
thinkplot.Save(root='species4',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def RunSubject(code, conc=1, high=500):
"""Run the analysis for the subject with the given code.
code: string code
"""
subjects = JoinSubjects()
subject = subjects[code]
subject.Process(conc=conc, high=high, iters=300)
subject.MakeQuickPrediction()
PrintSummary(subject)
actual_l = subject.total_species - subject.num_species
cdf_l = subject.DistL().MakeCdf()
PrintPrediction(cdf_l, actual_l)
subject.MakeFigures()
num_reads = 400
curves = subject.RunSimulations(100, num_reads)
root = 'species-rare-%s' % subject.code
PlotCurves(curves, root=root)
num_reads = 800
curves = subject.RunSimulations(500, num_reads)
ks = [100, 200, 400, 800]
cdfs = MakeConditionals(curves, ks)
root = 'species-cond-%s' % subject.code
PlotConditionals(cdfs, root=root)
num_reads = 1000
curves = subject.RunSimulations(500, num_reads, frac_flag=True)
ks = [10, 100, 200, 400, 600, 800, 1000]
cdfs = MakeFracCdfs(curves, ks)
root = 'species-frac-%s' % subject.code
PlotFracCdfs(cdfs, root=root)
def PrintSummary(subject):
"""Print a summary of a subject.
subject: Subject
"""
print(subject.code)
print('found %d species in %d reads' % (subject.num_species,
subject.num_reads))
print('total %d species in %d reads' % (subject.total_species,
subject.total_reads))
cdf = subject.suite.DistN().MakeCdf()
print('n')
PrintPrediction(cdf, 'unknown')
def PrintPrediction(cdf, actual):
"""Print a summary of a prediction.
cdf: predictive distribution
actual: actual value
"""
median = cdf.Percentile(50)
low, high = cdf.CredibleInterval(75)
print('predicted %0.2f (%0.2f %0.2f)' % (median, low, high))
print('actual', actual)
def RandomSeed(x):
"""Initialize random.random and numpy.random.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
def GenerateFakeSample(n, r, tr, conc=1):
"""Generates fake data with the given parameters.
n: number of species
r: number of reads in subsample
tr: total number of reads
conc: concentration parameter
Returns: hist of all reads, hist of subsample, prev_unseen
"""
# generate random prevalences
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(tr)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract a subset of the data
if tr > r:
random.shuffle(sample)
subsample = sample[:r]
subhist = thinkbayes2.Hist(subsample)
else:
subhist = hist
# add up the prevalence of unseen species
prev_unseen = 0
for species, prev in enumerate(prevalences):
if species not in subhist:
prev_unseen += prev
return hist, subhist, prev_unseen
def PlotActualPrevalences():
"""Makes a plot comparing actual prevalences with a model.
"""
# read data
subject_map, _ = ReadCompleteDataset()
# for subjects with more than 50 species,
# PMF of max prevalence, and PMF of max prevalence
# generated by a simulation
pmf_actual = thinkbayes2.Pmf()
pmf_sim = thinkbayes2.Pmf()
# concentration parameter used in the simulation
conc = 0.06
for code, subject in subject_map.items():
prevalences = subject.GetPrevalences()
m = len(prevalences)
if m < 2:
continue
actual_max = max(prevalences)
print(code, m, actual_max)
# incr the PMFs
if m > 50:
pmf_actual.Incr(actual_max)
pmf_sim.Incr(SimulateMaxPrev(m, conc))
# plot CDFs for the actual and simulated max prevalence
cdf_actual = pmf_actual.MakeCdf(label='actual')
cdf_sim = pmf_sim.MakeCdf(label='sim')
thinkplot.Cdfs([cdf_actual, cdf_sim])
thinkplot.Show()
def ScatterPrevalences(ms, actual):
"""Make a scatter plot of actual prevalences and expected values.
ms: sorted sequence of in m (number of species)
actual: sequence of actual max prevalence
"""
for conc in [1, 0.5, 0.2, 0.1]:
expected = [ExpectedMaxPrev(m, conc) for m in ms]
thinkplot.Plot(ms, expected)
thinkplot.Scatter(ms, actual)
thinkplot.Show(xscale='log')
def SimulateMaxPrev(m, conc=1):
"""Returns random max prevalence from a Dirichlet distribution.
m: int number of species
conc: concentration parameter of the Dirichlet distribution
Returns: float max of m prevalences
"""
dirichlet = thinkbayes2.Dirichlet(m, conc)
prevalences = dirichlet.Random()
return max(prevalences)
def ExpectedMaxPrev(m, conc=1, iters=100):
"""Estimate expected max prevalence.
m: number of species
conc: concentration parameter
iters: how many iterations to run
Returns: expected max prevalence
"""
dirichlet = thinkbayes2.Dirichlet(m, conc)
t = []
for _ in range(iters):
prevalences = dirichlet.Random()
t.append(max(prevalences))
return numpy.mean(t)
class Calibrator(object):
"""Encapsulates the calibration process."""
def __init__(self, conc=0.1):
"""
"""
self.conc = conc
self.ps = range(10, 100, 10)
self.total_n = numpy.zeros(len(self.ps))
self.total_q = numpy.zeros(len(self.ps))
self.total_l = numpy.zeros(len(self.ps))
self.n_seq = []
self.q_seq = []
self.l_seq = []
def Calibrate(self, num_runs=100, n_low=30, n_high=400, r=400, tr=1200):
"""Runs calibrations.
num_runs: how many runs
"""
for seed in range(num_runs):
self.RunCalibration(seed, n_low, n_high, r, tr)
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def Validate(self, num_runs=100, clean_param=0):
"""Runs validations.
num_runs: how many runs
"""
subject_map, _ = ReadCompleteDataset(clean_param=clean_param)
i = 0
for match in subject_map.itervalues():
if match.num_reads < 400:
continue
num_reads = 100
print('Validate', match.code)
subject = match.Resample(num_reads)
subject.Match(match)
n_actual = None
q_actual = subject.prev_unseen
l_actual = subject.total_species - subject.num_species
self.RunSubject(subject, n_actual, q_actual, l_actual)
i += 1
if i == num_runs:
break
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def PlotN(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
xs, ys = zip(*self.n_seq)
if None in xs:
return
high = max(xs+ys)
thinkplot.Plot([0, high], [0, high], color='gray')
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual n',
ylabel='Predicted')
def PlotQ(self, root='species-q'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
thinkplot.Plot([0, 0.2], [0, 0.2], color='gray')
xs, ys = zip(*self.q_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual q',
ylabel='Predicted')
def PlotL(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual l.
"""
thinkplot.Plot([0, 20], [0, 20], color='gray')
xs, ys = zip(*self.l_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual l',
ylabel='Predicted')
def PlotCalibrationCurves(self, root='species5'):
"""Plots calibration curves"""
print(self.total_n)
print(self.total_q)
print(self.total_l)
thinkplot.Plot([0, 100], [0, 100], color='gray', alpha=0.2)
if self.total_n[0] >= 0:
thinkplot.Plot(self.ps, self.total_n, label='n')
thinkplot.Plot(self.ps, self.total_q, label='q')
thinkplot.Plot(self.ps, self.total_l, label='l')
thinkplot.Save(root=root,
axis=[0, 100, 0, 100],
xlabel='Ideal percentages',
ylabel='Predictive distributions',
formats=FORMATS,
)
def RunCalibration(self, seed, n_low, n_high, r, tr):
"""Runs a single calibration run.
Generates N and prevalences from a Dirichlet distribution,
then generates simulated data.
Runs analysis to get the posterior distributions.
Generates calibration curves for each posterior distribution.
seed: int random seed
"""
# generate a random number of species and their prevalences
# (from a Dirichlet distribution with alpha_i = conc for all i)
RandomSeed(seed)
n_actual = random.randrange(n_low, n_high+1)
hist, subhist, q_actual = GenerateFakeSample(
n_actual,
r,
tr,
self.conc)
l_actual = len(hist) - len(subhist)
print('Run low, high, conc', n_low, n_high, self.conc)
print('Run r, tr', r, tr)
print('Run n, q, l', n_actual, q_actual, l_actual)
# extract the data
data = [count for species, count in subhist.Items()]
data.sort()
print('data', data)
# make a Subject and process
subject = Subject('simulated')
subject.num_reads = r
subject.total_reads = tr
for species, count in subhist.Items():
subject.Add(species, count)
subject.Done()
self.RunSubject(subject, n_actual, q_actual, l_actual)
def RunSubject(self, subject, n_actual, q_actual, l_actual):
"""Runs the analysis for a subject.
subject: Subject
n_actual: number of species
q_actual: prevalence of unseen species
l_actual: number of new species
"""
# process and make prediction
subject.Process(conc=self.conc, iters=100)
subject.MakeQuickPrediction()
# extract the posterior suite
suite = subject.suite
# check the distribution of n
pmf_n = suite.DistN()
print('n')
self.total_n += self.CheckDistribution(pmf_n, n_actual, self.n_seq)
# check the distribution of q
pmf_q = suite.DistQ()
print('q')
self.total_q += self.CheckDistribution(pmf_q, q_actual, self.q_seq)
# check the distribution of additional species
pmf_l = subject.DistL()
print('l')
self.total_l += self.CheckDistribution(pmf_l, l_actual, self.l_seq)
def CheckDistribution(self, pmf, actual, seq):
"""Checks a predictive distribution and returns a score vector.
pmf: predictive distribution
actual: actual value
seq: which sequence to append (actual, mean) onto
"""
mean = pmf.Mean()
seq.append((actual, mean))
cdf = pmf.MakeCdf()
PrintPrediction(cdf, actual)
sv = ScoreVector(cdf, self.ps, actual)
return sv
def ScoreVector(cdf, ps, actual):
"""Checks whether the actual value falls in each credible interval.
cdf: predictive distribution
ps: percentages to check (0-100)
actual: actual value
Returns: numpy array of 0, 0.5, or 1
"""
scores = []
for p in ps:
low, high = cdf.CredibleInterval(p)
score = Score(low, high, actual)
scores.append(score)
return numpy.array(scores)
def Score(low, high, n):
"""Score whether the actual value falls in the range.
Hitting the posts counts as 0.5, -1 is invalid.
low: low end of range
high: high end of range
n: actual value
Returns: -1, 0, 0.5 or 1
"""
if n is None:
return -1
if low < n < high:
return 1
if n == low or n == high:
return 0.5
else:
return 0
def FakeSubject(n=300, conc=0.1, num_reads=400, prevalences=None):
"""Makes a fake Subject.
If prevalences is provided, n and conc are ignored.
n: number of species
conc: concentration parameter
num_reads: number of reads
prevalences: numpy array of prevalences (overrides n and conc)
"""
# generate random prevalences
if prevalences is None:
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(num_reads)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract the data
data = [count for species, count in hist.Items()]
data.sort()
# make a Subject and process
subject = Subject('simulated')
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def PlotSubjectCdf(code=None, clean_param=0):
"""Checks whether the Dirichlet model can replicate the data.
"""
subject_map, uber_subject = ReadCompleteDataset(clean_param=clean_param)
if code is None:
subjects = subject_map.values()
subject = random.choice(subjects)
code = subject.code
elif code == 'uber':
subject = uber_subject
else:
subject = subject_map[code]
print(subject.code)
m = subject.GetM()
subject.Process(high=m, conc=0.1, iters=0)
print(subject.suite.params[:m])
# plot the cdf
options = dict(linewidth=3, color='blue', alpha=0.5)
cdf = subject.MakeCdf()
thinkplot.Cdf(cdf, **options)
options = dict(linewidth=1, color='green', alpha=0.5)
# generate fake subjects and plot their CDFs
for _ in range(10):
prevalences = subject.suite.SamplePrevalences(m)
fake = FakeSubject(prevalences=prevalences)
cdf = fake.MakeCdf()
thinkplot.Cdf(cdf, **options)
root = 'species-cdf-%s' % code
thinkplot.Save(root=root,
xlabel='rank',
ylabel='CDF',
xscale='log',
formats=FORMATS,
)
def RunCalibration(flag='cal', num_runs=100, clean_param=50):
"""Runs either the calibration or validation process.
flag: string 'cal' or 'val'
num_runs: how many runs
clean_param: parameter used for data cleaning
"""
cal = Calibrator(conc=0.1)
if flag == 'val':
cal.Validate(num_runs=num_runs, clean_param=clean_param)
else:
cal.Calibrate(num_runs=num_runs)
cal.PlotN(root='species-n-%s' % flag)
cal.PlotQ(root='species-q-%s' % flag)
cal.PlotL(root='species-l-%s' % flag)
cal.PlotCalibrationCurves(root='species5-%s' % flag)
def RunTests():
"""Runs calibration code and generates some figures."""
RunCalibration(flag='val')
RunCalibration(flag='cal')
PlotSubjectCdf('B1558.G', clean_param=50)
PlotSubjectCdf(None)
def main(script):
RandomSeed(17)
RunSubject('B1242', conc=1, high=100)
RandomSeed(17)
SimpleDirichletExample()
RandomSeed(17)
HierarchicalExample()
if __name__ == '__main__':
main(*sys.argv)
|
StarcoderdataPython
|
6586687
|
from re import M
from kivymd.uix.button import MDRectangleFlatIconButton
from kivymd.uix.boxlayout import MDBoxLayout
class ButtonGeneric(MDBoxLayout):
def __init__(self, widget, *args, **kwargs):
super(ButtonGeneric, self).__init__(*args, **kwargs)
self.md_bg_color = widget['cores']['background']
self.widget = widget
self.size_hint = widget['size']
self.pos_hint = widget['pos']
def __call__(self):
btn = MDRectangleFlatIconButton(text=self.widget['tag'],
icon=self.widget['icon'],
theme_text_color='Custom',
text_color=self.widget['cores']['line'],
icon_color = self.widget['cores']['line'],
line_color = self.widget['cores']['line'])
self.add_widget(btn)
return self
def metodo(self,*args):
print('Button generic: ', args)
|
StarcoderdataPython
|
6659631
|
#!/usr/bin/python3
import os
import time
import pickle
import configparser
import shutil
from time import localtime, strftime
from subprocess import call
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--config_file", dest="config_file")
(options, args) = parser.parse_args()
config_file = options.config_file
config_parser = configparser.ConfigParser()
config_parser.read(config_file)
model_name = config_parser['MODEL']['ModelName']
basename = config_parser['PATHS']['BaseName']
nb_iterations = int(config_parser['MODEL']['NumberOfIterations'])
if os.path.exists(basename):
basename = basename
config_file = os.path.join(basename, config_file)
else:
basename = strftime("%Y_%m_%d_%H%M%S_", localtime()) + model_name
os.makedirs(basename)
# copy configuration file
shutil.copyfile(config_file, os.path.join(basename, config_file))
config_file = os.path.join(basename, config_file)
weight_file_path = os.path.join(basename, "weights.h5")
history_file_path = os.path.join(basename, "history.pkl")
tmp_history_file_path = os.path.join(basename, "history_tmp.pkl")
lock_file = os.path.join(basename, "file.lock")
# This was designed to run in a queue system. If this is not what you want to do
# simply comment out the five first elements in the qsub_args list, and just run
# the script directly on the GPU/CPU.
qsub_args = [
# "qsub",
# "-cwd",
# "-l", "gpu=1",
# "-e", os.path.join(basename, "stderr.error"),
# "-o", os.path.join(basename, "stdout.log"),
"./run_job.sh",
weight_file_path,
tmp_history_file_path,
lock_file,
config_file
]
def train():
print("#############################")
print("# Training Settings")
print("#############################")
print("Model : ", model_name)
print("Weight path : ", weight_file_path)
print("History path : ", history_file_path)
train_loss = []
valid_loss = []
train_acc = []
valid_acc = []
# if exists means we are restarting a crashed training
if os.path.isfile(history_file_path):
print("Loading previous history data...")
with open(history_file_path, 'rb') as input:
train_loss = pickle.load(input)
valid_loss = pickle.load(input)
train_acc = pickle.load(input)
valid_acc = pickle.load(input)
for i in range(nb_iterations):
# create lock file
print("Creating lock file: ", lock_file)
open(lock_file, 'a').close()
# submit job, train once
print("Submitting Job ", str(i), "/", str(nb_iterations))
if not i == 0:
call(qsub_args + ['False'])
else:
call(qsub_args + ['True'])
# block until job is finished
while os.path.exists(lock_file):
time.sleep(5)
print("Job " + str(i) + " is done.")
# load all history data and append
print("Loading temporary history data...")
with open(tmp_history_file_path, 'rb') as input:
train_loss = train_loss + pickle.load(input)
valid_loss = valid_loss + pickle.load(input)
train_acc = train_acc + pickle.load(input)
valid_acc = valid_acc + pickle.load(input)
# save all collected history data
print("Save all collected history data...")
with open(history_file_path, 'wb') as output:
pickle.dump(train_loss, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(valid_loss, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(train_acc, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(valid_acc, output, pickle.HIGHEST_PROTOCOL)
train()
|
StarcoderdataPython
|
6583059
|
<filename>tools/nrtest-swmm/nrtest_swmm/output_reader.py
# -*- coding: utf-8 -*-
#
# output_reader.py
#
# Date Created: 11/14/2017
#
# Author: <NAME>
# US EPA - ORD/NRMRL
#
'''
The module output_reader provides the class used to implement the output
generator.
'''
import sys
# project import
import swmm_output as oapi
def output_generator(path_ref):
'''
The output_generator is designed to iterate over a swmm binary file and
yield element results. It is useful for comparing contents of binary files
for numerical regression testing.
The generator yields a numpy array containing the SWMM element result.
Arguments:
path_ref - path to result file
Raises:
SWMM_OutputReaderError()
...
'''
with OutputReader(path_ref) as sor:
for period_index in range(0, sor.report_periods()):
for element_type in oapi.ElementType:
for element_index in range(0, sor.element_count(element_type)):
yield (sor.element_result(element_type, period_index, element_index),
(element_type, period_index, element_index))
class OutputReader():
'''
Provides minimal interface needed to implement the SWMM output generator.
'''
def __init__(self, filename):
self.filepath = filename
self.handle = None
self.count = None
self.get_element_result = {oapi.ElementType.SUBCATCH: oapi.smo_get_subcatch_result,
oapi.ElementType.NODE: oapi.smo_get_node_result,
oapi.ElementType.LINK: oapi.smo_get_link_result,
oapi.ElementType.SYSTEM: oapi.smo_get_system_result}
def __enter__(self):
self.handle = oapi.smo_init()
if sys.version_info < (3,0):
file_path = self.filepath.encode()
else:
file_path = self.filepath
oapi.smo_open(self.handle, file_path)
self.count = oapi.smo_get_project_size(self.handle)
return self
def __exit__(self, type, value, traceback):
self.handle = oapi.smo_close()
def report_periods(self):
return oapi.smo_get_times(self.handle, oapi.Time.NUM_PERIODS)
def element_count(self, element_type):
return self.count[element_type.value]
def element_result(self, element_type, time_index, element_index):
return self.get_element_result[element_type](self.handle, time_index, element_index)
|
StarcoderdataPython
|
5009437
|
<reponame>contradict/Stomp
import serial
import sys
from binascii import hexlify
import threading
def monitor(portname, outfile=sys.stdout, stop_evt=None):
data = b""
extra = b""
with serial.Serial(portname, 115200, timeout=0.01) as sp:
while True if stop_evt is None else not stop_evt.wait(0):
data += sp.read()
while len(data) > 8:
if(data[0] == 0x24 and
data[1] == 0x43 and
data[5] == 0x23):
if len(extra):
outfile.write("extra: %s " % hexlify(extra))
extra = b""
outfile.write(
"cmd: 0x%02x = 0x%04x\n" % (data[2], (data[3] << 8) + data[4]))
data = data[8:]
elif (data[0] == 0x2b and
data[3] == 0x23):
if len(extra):
outfile.write("extra: %s" % hexlify(extra))
extra = b""
outfile.write("rsp: 0x%04x\n" % ((data[1] << 8) + data[2]))
data = data[6:]
else:
extra += data[0:1]
data = data[1:]
def monitorfile(portname, filename, stop):
with open(filename, "w") as f:
monitor(portname, f, stop)
def monitorall(portnames=["/dev/ttyUSB0", "/dev/ttyUSB1", "/dev/ttyUSB2"],
filenames=["curl.txt", "lift.txt", "swing.txt"]):
ts = []
stop = threading.Event()
for pn, fn in zip(portnames, filenames):
ts.append(threading.Thread(target=monitorfile, args=(pn, fn, stop)))
for t in ts:
t.start()
def stopfn():
stop.set()
for t in ts:
t.join()
return stopfn
|
StarcoderdataPython
|
12830824
|
import datetime as _dt
from lib2to3.pgen2.token import OP
import pydantic as pydantic
from pydantic import Field
from uuid import UUID
from typing import List, Optional
from validators import email
from bigfastapi.schemas.organisation_schemas import _OrganizationBase
import bigfastapi.schemas.users_schemas as UserSchema
from datetime import date
from pydantic import BaseModel
class SettingsBase(BaseModel):
email: str
location : str
phone_number : Optional[str] = None
organization_size : Optional[str] = None
organization_type : Optional[str] = None
country : Optional[str] = None
state : Optional[str] = None
city : Optional[str] = None
zip_code : Optional[int] = None
class SettingsUpdate(SettingsBase):
pass
class Settings(SettingsBase):
pass
class Config:
orm_mode = True
|
StarcoderdataPython
|
5016318
|
<filename>Python/PythonExercicios/ex082.py
# Exercício Python 082:
# Crie um programa que vai ler vários números e colocar em uma lista.
# Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente.
# Ao final, mostre o conteúdo das três listas geradas.
print('\033[1;33m====== EX 082 ======\033[m')
lista = list()
listapar = list()
listaimpar = list()
while True:
n = lista.append(int(input('Digite um valor: ')))
op = str(input('Quer continuar? [S/N] ')).upper().strip()
if 'N' in op:
break
for c in lista:
if c % 2 == 0:
listapar.append(c)
else:
listaimpar.append(c)
print(f'A lista completa é {lista}')
print(f'A lista dos pares é {listapar}')
print(f'A lista dos ímpares é {listaimpar}')
|
StarcoderdataPython
|
1888774
|
# stdlib
import ast
from collections import OrderedDict
import copy
import inspect
from itertools import islice
import os
from pathlib import Path
import sys
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import torch
# syft absolute
import syft as sy
from syft.core.node.common.action.save_object_action import SaveObjectAction
from syft.core.plan.plan_builder import ROOT_CLIENT
from syft.core.plan.plan_builder import make_plan
from syft.lib.python import _SyNone
# syft relative
from ...core.pointer.pointer import Pointer
from ...generate_wrapper import GenerateWrapper
from ...lib.util import full_name_with_qualname
from ...logger import critical
from ...logger import info
from ...logger import traceback_and_raise
from ...proto.lib.torch.module_pb2 import Module as Module_PB
from ..python.collections import OrderedDict as SyOrderedDict
from ..python.util import downcast
# from ...core.node.common.service.auth import AuthorizationException
def repr_to_kwargs(repr_str: str) -> Tuple[List[Any], Dict[Any, Any]]:
# for example: repr_str = Conv2d(...).extra_repr()
# produces: > str("1, 32, kernel_size=(3, 3), stride=(1, 1)")
# then we just have to split it into args and kwargs
# using ast.literal_eval we can use python to give us the real primitive types
# from the strings in a safe way
# str("1 ") becomes int(1)
# str("(1, 2) ") becomes tuple(1, 2)
args: List[Any] = []
kwargs: Dict[Any, Any] = {}
parts = repr_str.split(",")
# tuples are split by commas as well, so we will keep a tab on open parentheses
# then concat with "," until we find a close parentheses
# TODO: make work nested with a count and add tests
para_open = False
buffer = ""
for part in parts:
try:
if "(" in part:
para_open = True
buffer = ""
if para_open is True:
buffer += part + ","
if ")" in part:
# remove trailing ,
part = buffer[:-1]
buffer = ""
para_open = False
else:
continue
string = part.strip()
if "=" not in string:
# its an arg
arg = ast.literal_eval(string)
args.append(arg)
else:
# its a kwarg
kv = string.split("=")
key = str(kv[0])
string = kv[1].strip()
value = ast.literal_eval(string)
kwargs[key.strip()] = value
except Exception as e:
info(f"ast.literal_eval failed to parse part: {string}. {e}")
return (args, kwargs)
class Module:
"""
This is our equivalent of torch.nn.Module and aims to have the same external
interface. We will need to support both torch Modules and Module Pointers.
"""
def __init__(self, torch_ref: Any) -> None:
self.setup(torch_ref=torch_ref)
def setup(self, torch_ref: Any) -> None:
# the remote torch means the model is remote
self.remote_model: Optional["Module"] = None
self.local_model: Optional["Module"] = None
self.duet = None
if "syft" in full_name_with_qualname(klass=type(torch_ref)):
info("> Creating remote model")
self.is_local = False
else:
# otherwise we have a local model
info("> Creating local model")
self.is_local = True
self.torch_ref = torch_ref
self.training = False
self._modules: OrderedDict[str, Module] = OrderedDict()
real_module = torch_ref.nn.Module()
self.__dict__["real_module"] = real_module # bypass getattr/setattr
# if issubclass(type(real_module), Pointer):
# try:
# # TODO: this needs fixing but should be on by default for now
# # https://github.com/OpenMined/PySyft/issues/5242
# real_module.searchable = True
# except AuthorizationException as e:
# print(f"Cant make real_module searchable. {e}")
def __setattr__(self, name: str, value: Union[Any, "Module"]) -> None:
# this is how we catch the modules being set during subclass init
# bug where torch.nn.modules isn't the full name on some imports
# TODO: fix this properly
# third party
import torch
if "torch.nn" in full_name_with_qualname(klass=type(value)) or isinstance(
value, torch.nn.Module
):
modules = self.__dict__.get("_modules")
if modules is not None:
modules[name] = value
# attach all the sub modules to a real module so that we can have a
# remote module pointer that acts like a real model
real_module: Optional[OrderedDict] = self.__dict__.get("real_module")
if real_module is not None:
real_module.add_module(name, value) # type: ignore
else:
object.__setattr__(self, name, value)
def __getattr__(self, name: str) -> Union[Any, "Module"]:
modules: Optional[OrderedDict] = self.__dict__.get("_modules")
if modules is not None:
if name in modules:
return modules[name]
return object.__getattribute__(self, name)
def train(self, mode: bool = True) -> "Module":
self.training = mode
for _, module in self.modules.items():
module.train(mode)
return self
def eval(self) -> "Module":
return self.train(False)
def __call__(
self, *args: Union[List[Any], Tuple[Any, ...]], **kwargs: Dict[Any, Any]
) -> Any:
return self.forward(*args, **kwargs)
@property
def modules(self) -> OrderedDict:
modules = self.__dict__.get("_modules")
if modules is not None:
return modules
return OrderedDict()
# local list of remote ListPointers of TensorPointers
def parameters(self, recurse: bool = True) -> Optional[List[Any]]:
params_list: Optional[List[Any]] = None
if self.is_local is True:
# we are local so use normal torch params
params_list = []
for _, module in self.modules.items():
params = module.parameters(recurse)
if params_list is None:
# only on remote create a remote list so we can concat the param list
# pointers without having to actually get them
self.duet = params.client
params_list = self.duet.syft.lib.python.List() # type: ignore
# either way lets concat them until we have a big list of parameters
params_list += params
return params_list
def cuda(self, device: Any) -> "Module":
for _, module in self.modules.items():
module.cuda(device)
return self
def cpu(self) -> "Module":
for _, module in self.modules.items():
module.cpu()
return self
def load_state_dict(self, input: Union[str, os.PathLike, Dict[str, Any]]) -> None:
if not self.is_local:
info("> This model is remote so try calling .get()")
return None
state_dict = {}
if isinstance(input, (str, os.PathLike)):
with open(Path(input), "rb") as f:
state_dict = torch.load(f)
else:
state_dict = dict(input)
if not issubclass(type(state_dict), dict):
traceback_and_raise(
f" Invalid input: {type(input)}. "
+ "Try inputting a state_dict or .pth file."
)
info("> Loading model weights")
layers: Dict[str, Any] = {}
for save_key, values in state_dict.items():
parts = save_key.split(".")
if len(parts) < 2:
info(f" state dict key is too short: {save_key}")
continue
layer = parts[0]
attr = parts[1]
if layer not in layers:
layers[layer] = {}
layers[layer][attr] = values
for layer, sd in layers.items():
local_layer = getattr(self, layer, None)
if local_layer is not None and hasattr(local_layer, "load_state_dict"):
d = local_layer.load_state_dict(sd)
info(f" {layer} state dict loaded with: {d}")
else:
info(f" Model doesnt have layer {layer}")
info("> Finished loading weights")
return None
def state_dict(self) -> Optional[Dict[str, Any]]:
if not self.is_local:
info("> This model is remote so try calling .get()")
return None
info("> Saving model weights")
model_state_dict = OrderedDict()
for name, module in self.modules.items():
if hasattr(module, "state_dict"):
for k, v in module.state_dict().items():
save_key = f"{name}.{k}"
model_state_dict[save_key] = v
info("> Finished saving weights")
return model_state_dict
def save(self, path: Union[str, bytes, os.PathLike]) -> None:
if not self.is_local:
info("> This model is remote so try calling .get()")
return
state_dict = self.state_dict()
torch.save(state_dict, path)
def load(self, path: Union[str, os.PathLike]) -> None:
if not self.is_local:
info("> This model is remote so try calling .get()")
return
self.load_state_dict(input=path)
def send(self, client: Any, send_parameters: bool = True) -> Any:
if not self.is_local:
info("> This model is remote so try calling .get()")
return
info("> Sending local model")
remote_model = copy.copy(self)
remote_model.setup(torch_ref=client.torch)
remote_model.duet = client
for name, module in self.modules.items():
fqn = full_name_with_qualname(klass=type(module))
klass = client.lib_ast.query(fqn, obj_type=type(module))
module_repr = module.extra_repr()
args, kwargs = repr_to_kwargs(repr_str=module_repr)
remote_module_ptr = klass(*args, **kwargs)
remote_model.__setattr__(name, remote_module_ptr)
# if the remote module has state_dict lets get it
if (
send_parameters
and hasattr(module, "state_dict")
and hasattr(remote_module_ptr, "load_state_dict")
):
local_state_ord_dict = module.state_dict()
# cast to dict because OrderedDict is not supported
# get a blocking copy of the state_dict
info(f" Sending local layer: {name}")
# cant import Dict / PrimitiveFactory due to circular imports
remote_state_dict_ptr = client.syft.lib.python.Dict(
dict(local_state_ord_dict)
)
# iterate through the key, values
# weights and biases should be in there
remote_module_ptr.load_state_dict(remote_state_dict_ptr)
info("\n> Finished sending local model <\n\n")
self.remote_model = remote_model
return self.remote_model
def get(
self,
request_block: bool = False,
timeout_secs: int = 20,
reason: str = "",
delete_obj: bool = False,
) -> Optional["Module"]:
if self.is_local:
info("> This model is local. Maybe you meant to call .send()?")
return None
info("> Downloading remote model")
local_model = copy.copy(self)
local_model.setup(torch_ref=torch)
local_model.duet = self.duet
for layer_name, module in self.modules.items():
module_parts = module.path_and_name.split(".")
klass_name = module_parts.pop()
klass = getattr(sys.modules[".".join(module_parts)], klass_name)
repr_ptr = module.extra_repr()
module_repr = repr_ptr.get(
request_block=request_block,
reason=reason,
timeout_secs=timeout_secs,
)
if module_repr is None:
info(f" Request for {reason} extra_repr failed, skipping layer")
continue
args, kwargs = repr_to_kwargs(repr_str=module_repr.upcast())
local_module = klass(*args, **kwargs)
# the local real module has been set on the sy module
local_model.__setattr__(layer_name, local_module)
try:
# if the remote module has state_dict lets get it
if hasattr(module, "state_dict") and hasattr(
local_module, "load_state_dict"
):
info("loading remote state dict")
sd_ptr = module.state_dict()
# get a blocking copy of the state_dict
info(f" Downloading remote layer: {layer_name}")
state_dict = sd_ptr.get(
request_block=request_block,
reason=reason,
timeout_secs=timeout_secs,
delete_obj=delete_obj,
)
# We have to recreate the OrderedDict for load_state_dict to work
ordered_state_dict = OrderedDict()
for elem, item in state_dict.items():
ordered_state_dict[str(elem)] = item
# iterate through the key, values
# weights and biases should be in there
if state_dict is not None:
# TODO: support torch.nn.modules.module._IncompatibleKeys
local_module.load_state_dict(ordered_state_dict)
else:
info(
f" Failed to get {layer_name} state_dict, skipping layer."
)
except Exception as e:
critical(f" Failed to download remote state for {layer_name}.")
traceback_and_raise(e)
info("\n> Finished downloading remote model <\n\n")
self.local_model = local_model
return self.local_model
# zero them so we know they are copied
def zero_layers(self) -> None:
for m in self.modules.values():
if hasattr(m, "weight"):
m.weight.requires_grad_(False).zero_()
if hasattr(m, "bias"):
m.bias.requires_grad_(False).zero_()
# easy way to check the weights have changed
def debug_sum_layers(self) -> None:
info("> Summing layers for debugging: ")
for n, m in self.modules.items():
if hasattr(m, "state_dict"):
if self.is_local:
state_dict = m.state_dict()
else:
state_dict = m.state_dict().get()
for k, v in state_dict.items():
if hasattr(v, "sum"):
s = v.sum().item()
info(f" Layer {n} sum({k}): {s}")
def object2proto(obj: torch.nn.Module, is_child: bool = False) -> Module_PB:
proto = Module_PB()
if "torch.nn." in type(obj).__module__:
proto.module_type = type(obj).__name__
else:
proto.module_type = f"_USER_DEFINED_MODULE_{type(obj).__name__}"
proto.forward.CopyFrom(sy.serialize(obj._forward_plan))
proto.module_repr = obj.extra_repr()
if hasattr(obj, "_uid2attr"):
proto._uid2attr.CopyFrom(sy.serialize(SyOrderedDict(obj._uid2attr)))
proto.parameters.CopyFrom(sy.serialize(SyOrderedDict(obj._parameters)))
for n, m in obj.named_children():
child_proto = object2proto(m, is_child=True)
child_proto.module_name = n
proto.children.append(child_proto)
return proto
def proto2object(proto: Module_PB) -> torch.nn.Module:
is_userdefined = proto.module_type.startswith("_USER_DEFINED_MODULE_")
if is_userdefined:
obj_type = type(
proto.module_type.replace("_USER_DEFINED_MODULE_", ""),
(torch.nn.Module,),
{},
)
else:
obj_type = getattr(torch.nn, proto.module_type)
args, kwargs = repr_to_kwargs(repr_str=proto.module_repr)
obj = obj_type(*args, **kwargs)
for name, param in sy.deserialize(proto.parameters).items():
# if we don't do this check, some torch.nn layers fail ( e.g. Conv2D with bias=False)
if not isinstance(param, _SyNone):
setattr(obj, str(name), param)
if proto.HasField("forward"):
forward_plan = sy.deserialize(proto.forward)
obj._forward_plan = forward_plan
compile_and_forward = create_compile_and_forward_fn(obj)
obj.__call__ = compile_and_forward
obj.forward = compile_and_forward
# obj.__call__ = forward_plan
# obj.forward = forward_plan
for child_proto in proto.children:
setattr(obj, str(child_proto.module_name), sy.deserialize(child_proto))
if proto.HasField("_uid2attr"):
obj._uid2attr = sy.deserialize(proto._uid2attr)
if is_userdefined:
recompile(obj)
return obj
def create_compile_and_forward_fn(obj: "SyModule") -> Callable:
"""Wraps a forward plan in a function that first recompiles the plan, and then
executes the plan
Args:
obj (SyModule): the SyModule
"""
def _compile_and_forward(*args, **kwargs): # type: ignore
recompile(obj)
return obj._forward_plan(*args, **kwargs)
return _compile_and_forward
def recompile(sy_module: "SyModule") -> None:
"""Recompiles the forward plan, if the object state has changed since the
forward plan was created, we update the plan here
Args:
sy_module (SyModule): the module to compile
"""
if hasattr(sy_module, "_forward_plan"):
for action in sy_module._forward_plan.actions: # type: ignore
if (
isinstance(action, SaveObjectAction)
and action.obj.id in sy_module._uid2attr
):
action.obj.data = getattr(
sy_module, str(sy_module._uid2attr[action.obj.id])
)
GenerateWrapper(
wrapped_type=torch.nn.Module,
import_path="torch.nn.Module",
protobuf_scheme=Module_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
class ForwardToPlanConverter(type):
"""This metaclass ensures that:
1) the object is initialized when calling Object()
2) obj._make_forward_plan() is called after initialization
"""
def __call__(cls: Any, *args, **kwargs) -> Any: # type: ignore
# TODO: check if contains input_size
obj = type.__call__(cls, *args, **kwargs)
obj._make_forward_plan()
return obj
class SyModule(torch.nn.Module, metaclass=ForwardToPlanConverter):
"""A `SyModule` is the pointable equivalent of a torch.nn.Module. In order to make
SyModule remotely executable, its `.forward` method is converted into a `Plan` object
when initializing a `SyModule` object. This object has two "modes", in which it behaves
differently. During the "forward plan building stage" it transforms parameters and submodules
into pointer when the user retrieves them. After plan building the model behaves more
like a regular torch.nn.Module, but instead of running a forward method, the user executes
a `Plan`. As the user does not need to understand the building stage, and the .forward API
is fairly similar to a regular torch.nn.Module, there is no need to understand all internals
to use this module.
"""
def __init__(self, *args, input_size: Optional[Tuple[int]] = None, **kwargs) -> None: # type: ignore
"""Initializes an empty SyModule
Args:
input_size (Tuple[Int], optional): input_size of the Module, needs to be defined or inferrable.
Defaults to None.
"""
super().__init__(*args, **kwargs)
self.building_forward = False
self._parameter_pointers: Dict[str, Pointer] = dict()
self.input_size = input_size
def _make_forward_plan(self) -> None:
"""Convert forward function into a `Plan` object
Raises:
ValueError: `.forward` method must be defined
"""
if getattr(self.forward, __name__, None) == "_forward_unimplemented": # type: ignore
raise ValueError("Missing .forward() method for Module")
inputs = self._get_forward_inputs()
self.building_forward = True
plan = make_plan(self.forward, inputs=inputs) # type: ignore
self.forward = self._local_forward
self._forward_plan = plan
self.__call__ = plan
self._create_uid2attr()
self.building_forward = False
self._remove_plan_action_data()
def _remove_plan_action_data(self) -> None:
"""
Sets `action.obj.data` for each symodule action in `self._forward_plan` to `None`.
This greatly reduces the proto memory footprint;
The whole state of `self` is saved in the action, which will be recompiled anyway.
"""
# Remove module action data
for action in self._forward_plan.actions:
if isinstance(action, SaveObjectAction) and action.obj.id in self._uid2attr:
action.obj.data = downcast(None)
def _local_forward(self, *args, **kwargs): # type: ignore
recompile(self)
return self._forward_plan(*args, **kwargs)
def _create_uid2attr(self) -> None:
self._uid2attr = {
param.id_at_location: attr_name
for attr_name, param in self._parameter_pointers.items()
}
def __getattr__(self, name: str) -> Any:
"""A custom getattr method. When retrieving a torch.nn.Module or a torch.nn.Parameter
*during forward plan building*, SyModule instead returns a Pointer to this attribute.
The first time an attribute is retrieved, we send it to the plan builder VM, and store
it in self._parameters_pointers, which will be used for plan Recompilation during
*deserialization*. If an attribute is requested again, we return the pointer from
`_parameters_pointers`
Args:
name (str): name of the attr
Returns:
Any: Attribute value or Pointer to it
"""
# this is __getattr__ instead of __getattribute__ because of the structure of torch.nn.Module
if name in self._parameter_pointers and self.building_forward:
return self._parameter_pointers[name]
res = super().__getattr__(name)
if (
isinstance(res, (torch.nn.Module, torch.nn.Parameter))
and self.building_forward
):
res_ptr = res.send(ROOT_CLIENT)
self._parameter_pointers[name] = res_ptr
return res_ptr
else:
return res
def _get_inp_key(self) -> str:
"""Get key for the `.forward` argument
Returns:
str: input key
"""
forward_signature = inspect.signature(self.forward)
args = list(forward_signature.parameters.items())
if len(args) == 0 or len(args) > 1:
raise ValueError(
"SyModules accept only *precisely 1* argument and no kwargs"
)
k, v = args[0]
if v.default is not inspect.Parameter.empty:
raise ValueError("SyModules accept only args, not kwargs")
inp_key = k
return inp_key
def _get_inp_size(self) -> Tuple[int]:
"""Get input size for this module
Returns:
Tuple[Int]: input size for `.forward`
"""
if not hasattr(self, "input_size") or not isinstance(
self.input_size, (tuple, list)
):
raise ValueError(
"SyModule needs `input_size`: Tuple(Int) as kwarg to trace the forward plan."
"Also, make sure to call **super().__init__(**kwargs)** in ALL your SyModules"
""
)
return self.input_size
def _get_forward_inputs(self) -> Dict[str, Pointer]:
"""Get the dummy inputs for generating the .forward `Plan`
Returns:
Dict[str: Any]: inputs for .forward
"""
input_size = self._get_inp_size()
inp_key = self._get_inp_key()
if isinstance(self, SySequential):
inp_key = "x"
inputs = {inp_key: torch.randn(input_size).send(ROOT_CLIENT)}
return inputs
class SySequential(SyModule):
"""The Syft equivalent of torch.nn.Sequential"""
def __init__(self, *args, input_size: Optional[Tuple[int]] = None): # type: ignore
"""initializes SySequential and stores the submodules
input_size (Tuple[Int], optional): input_size of the Module, needs to be defined or inferrable.
Defaults to None.
"""
super().__init__(input_size=input_size)
for idx, module in enumerate(args):
setattr(self, str(idx), module)
self.n_modules = len(args)
def __iter__(self): # type: ignore
if self.building_forward:
return iter([getattr(self, str(i)) for i in range(self.n_modules)])
else:
return iter(self._modules.values())
def _get_item_by_idx(self, iterator: Iterator, idx: int) -> SyModule:
"""Get the idx-th item of the iterator"""
size = self.n_modules
if not -size <= idx < size:
raise IndexError(f"index {idx} is out of range")
return next(islice(iterator, idx, None))
def __getitem__(self, idx: int) -> SyModule:
if isinstance(idx, slice): # type: ignore
raise ValueError("SySequential does not support slices")
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[slice, int]) -> None:
if isinstance(idx, slice):
raise ValueError("SySequential does not support slices")
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
def forward(self, x: Any = None) -> Any: # type: ignore
"""Sequentially call submodule.forward
Args:
x (Any, optional): input. Defaults to None.
Returns:
Any: Module output
"""
out = x
for i, module in enumerate(self):
# handle indexing in the block, or in the sequential?
if module.__class__.__name__ == "ModulePointer":
# user defined module
out = module.forward(x=out)[0]
else:
# AST module
out = module(out)
return out
def _get_inp_key(self) -> str:
"""Get key for the `.forward` argument, allways x for this module
Returns:
str: "x"
"""
return "x"
def _get_inp_size(self) -> Tuple[int]:
"""Get input size for this module
Returns:
Tuple[Int]: input size for `.forward`
"""
if hasattr(getattr(self, "0"), "input_size"):
return getattr(self, "0").input_size
elif hasattr(self, "input_size"):
return self.input_size # type: ignore
else:
raise ValueError(
"SySequential needs either 1) `input_size`: Tuple(Int) as a kwargs for on of its children \
OR 2) `input_size`: Tuple(Int) as kwarg for itself \
to trace the forward plan"
)
|
StarcoderdataPython
|
6606472
|
class Entries(object):
def __init__(self, http_client):
self.http_client = http_client
def list(self, **kwargs):
return self.http_client.get('v1/entries/', params=kwargs)
def delete(self, id):
return self.http_client.delete('v1/entries/{}/'.format(id))
def stats_idxid(self, idxid):
return self.http_client.get('v1/entries/stats/idxid/{}/'.format(idxid))
def stats_sources(self, **kwargs):
return self.http_client.get('v1/entries/stats/sources/', params=kwargs)
|
StarcoderdataPython
|
11310894
|
<filename>src/pi/lib/state.py<gh_stars>1-10
from lib.position import RobotPosition
from math import pi
class RobotState:
def __init__(self, position, facing):
self.position = position
self.facing = facing.normalise()
self.forward_distance = 0
def forward(self, distance):
self.forward_distance = distance
def turn(self, angle):
self.facing = RobotPosition.from_angle(self.facing.to_angle() + angle * pi / 180.0)
def success(self):
self.position = self.position.add_scaled(self.facing, self.forward_distance)
def collision(self, distance):
self.position = self.position.add_scaled(self.facing, distance)
def clone(self):
return RobotState(self.position, self.facing)
|
StarcoderdataPython
|
1719799
|
from collections import namedtuple
def get_row_col(text):
text = list(text)
convert = []
row = text[1]
column = text[0]
if row == "1":
convert.append(0)
elif row == "2":
convert.append(1)
elif row == "3":
convert.append(2)
if column == "A":
convert.append(0)
elif column == "B":
convert.append(1)
elif column == "C":
convert.append(2)
convert = tuple(convert)
return convert
|
StarcoderdataPython
|
3200775
|
#!/usr/bin/python3
"""Quantum Computer Module.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from abc import ABC, abstractmethod
import math
import psi4
import numpy
from .ciwavefunction import HF_CIWavefunction, CIS_CIWavefunction
from ..project_2.cis import CIS
from ..psithon.util import rearrange_eigenpairs, check_sim
__all__ = ["Computer"]
class Computer(ABC):
def __init__(self, molecule):
ABC.__init__(self)
self.restart_wfn = None
#
self.molecule = molecule
self.nuclear_repulsion_energy = molecule.nuclear_repulsion_energy()
#
self.ciwfn = None
self.nstates = None
self.forces = None
@classmethod
def create(cls, method, molecule, nstates=1):
m = method.lower()
if m == "psi4scf": return psi4SCF_Computer(molecule)
elif m == "mycis": return myCIS_Computer(molecule, nstates)
else: raise ValueError("Wrong method chosen for computer")
def update(self, xyz):
self.molecule.set_geometry(xyz)
self.nuclear_repulsion_energy = self.molecule.nuclear_repulsion_energy()
def compute(self):
self.ciwfn = self._compute_energy()
self.restart_wfn = self.ciwfn.ref_wfn
self.forces = self._compute_forces()
@abstractmethod
def _compute_energy(self): pass
@abstractmethod
def _compute_forces(self): pass
class psi4SCF_Computer(Computer):
def __init__(self, molecule):
Computer.__init__(self, molecule)
self.nstates = 1
def _compute_energy(self):#TODO
def _compute_forces(self):#TODO
class ExcitedState_Computer(Computer):
def __init__(self, molecule, nstates):
Computer.__init__(self, molecule)
self.nstates = nstates
def _compute_forces(self):#TODO
class CIS_Computer(ExcitedState_Computer):
def __init__(self, molecule, nstates):
ExcitedState_Computer.__init__(self, molecule, nstates)
class myCIS_Computer(CIS_Computer):
def __init__(self, molecule, nstates):
CIS_Computer.__init__(self, molecule, nstates)
def _compute_energy(self):#TODO
|
StarcoderdataPython
|
1613506
|
#!/usr/bin/env python3
"""
Este é um bot renovador de livros para o sistema Pergamum da UFBA.
Para usar basta executar o script (de preferencia todos os dias)
com as variáveis de ambiente PERGAMUM_LOGIN e PERGAMUM_PASS setadas
com seu RM e senha da biblioteca.
Além disso, você pode receber um email sempre que algum evento importante
ocorre, como a renovação de um livro, a chegada do limite de renovações,
e até mesmo se ocorrer algum erro no script.
Para isso, informe nas variáveis de ambinete BIB_EMAIL_TO_ADDR seu
endereço de email. É necessário também informar BIB_EMAIL_FROM_ADDR e
BIB_EMAIL_FROM_PASS, que são as credências de algum email qualquer
do Gmail para ser o remedente do seu email.
Yes, this is Portuguese mate.
Copyright (c) 2018 <NAME>
Licensed under the MIT License (https://opensource.org/licenses/MIT)
"""
import os
import re
import sys
import asyncio
import datetime
import logging
from collections import namedtuple
from email.mime.text import MIMEText
from pathlib import Path
import aiohttp
import aiosmtplib
import parsel
PERGAMUM_LOGIN = os.environ['BIB_PERGAMUM_LOGIN']
PERGAMUM_PASS = os.environ['BIB_PERGAMUM_PASS']
BIB_EMAIL_TO_ADDR = os.environ.get('BIB_EMAIL_TO_ADDR')
if BIB_EMAIL_TO_ADDR is not None:
BIB_EMAIL_FROM_ADDR = os.environ['BIB_EMAIL_FROM_ADDR']
BIB_EMAIL_FROM_PASS = os.environ['BIB_EMAIL_FROM_PASS']
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stderr, level=logging.INFO,
format=('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - '
f'{PERGAMUM_LOGIN} - %(message)s'))
SENTRY_DSN = os.environ.get('SENTRY_DSN')
if SENTRY_DSN is not None:
import sentry_sdk
sentry_sdk.init(SENTRY_DSN)
BIB_URL = 'http://www.pergamum.bib.ufba.br'
BIB_MAX_RENEW = 7
DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0',
}
Book = namedtuple('Book', 'name return_date renew_count cod_acervo cod_exemplar')
def list_books(books):
"""Creates a string that, on each line, informs about a book."""
return '\n'.join([f'+ {book.name}: {book.renew_count}: {book.return_date}'
for book in books])
def extract_books(html):
"""Yields a sequence of all books in the HTML."""
selector = parsel.Selector(html)
for li in selector.xpath('/html/body/div[1]/div[2]/ul/li'):
if li.xpath('./@data-role').extract_first() == 'list-divider':
continue
if li.xpath('count(./*)').extract_first() == '0.0':
continue
a = li.xpath('.//a')
ahref = a.xpath('./@href').extract_first()
h2 = a.xpath('normalize-space(./h2)').extract_first()
p1 = a.xpath('normalize-space(./p[1])').extract_first()
p2 = a.xpath('normalize-space(./p[2])').extract_first()
book_name = h2.strip()
return_date = p1.split(':', maxsplit=1)[1].strip()
return_date = datetime.datetime.strptime(return_date, '%d/%m/%Y').date()
renew_count = p2.split(':', maxsplit=1)[1].strip()
renew_count = int(renew_count)
cod_acervo = int(re.search(r'cod_acervo=(\d+)', ahref).group(1))
cod_exemplar = int(re.search(r'cod_exemplar=(\d+)', ahref).group(1))
yield Book(book_name, return_date, renew_count, cod_acervo, cod_exemplar)
async def pergamum_login(session):
"""Logins the web session into the pergamum system."""
login_url = '/'.join([BIB_URL, 'pergamum/mobile/login.php'])
data = {
'flag': 'renovacao.php',
'login': PERGAMUM_LOGIN,
'password': <PASSWORD>,
'button': 'Acessar'
}
headers = {
'Referer': f'{login_url}?flag=renovacao.php'
}
return await session.post(login_url, headers=headers, data=data)
async def pergamum_renovacao_page(session):
renovacao_url = '/'.join([BIB_URL, 'pergamum/mobile/renovacao.php'])
return await session.get(renovacao_url)
async def pergamum_renew(session, book):
"""Renews a book in the specified web session."""
params = {'cod_acervo': book.cod_acervo, 'cod_exemplar': book.cod_exemplar}
renovar_url = '/'.join([BIB_URL, 'pergamum/mobile/confirmar_renovacao.php'])
return await session.get(renovar_url, params=params)
async def email_send(subject, text):
"""Sends an email with the specified subject and text.
The email is sent from an email specified in environ and into a
email also specified in environment variables.
If the environment variables are missing, no email is sent."""
if BIB_EMAIL_TO_ADDR is None:
return
smtp = aiosmtplib.SMTP(hostname='smtp.gmail.com', port=587)
await smtp.connect()
try:
await smtp.starttls()
await smtp.login(BIB_EMAIL_FROM_ADDR, BIB_EMAIL_FROM_PASS)
message = MIMEText(text)
message['From'] = BIB_EMAIL_FROM_ADDR
message['To'] = BIB_EMAIL_TO_ADDR
message['Subject'] = subject
await smtp.send_message(message)
finally:
await smtp.quit()
async def main():
async with aiohttp.ClientSession(headers=DEFAULT_HEADERS) as session:
# For some reason anything but portuguese works correctly.
session.cookie_jar.update_cookies({'idioma_mobile_pessoal': 6})
renew_books = []
due_books = []
email_tasks = []
today = datetime.datetime.now().date()
response = await pergamum_login(session)
books = list(extract_books(await response.text()))
for book in books:
if book.return_date == today:
renew_books.append(book)
elif book.return_date < today:
due_books.append(book)
renewed_books = []
failed_books = []
completed = await asyncio.gather(*[pergamum_renew(session, book)
for book in renew_books],
return_exceptions=True)
response = await pergamum_renovacao_page(session)
current_books = list(extract_books(await response.text()))
for book, result in zip(renew_books, completed):
if isinstance(result, Exception):
logging.error(f"Falhar ao renovar livro {book.name}: {str(result)}")
failed_books.append(book)
elif book in current_books:
logging.error(f"Falha ao renovar livro {book.name}: Estado do livro não foi alterado!")
failed_books.append(book)
try:
logpath = f"~/bibnew-{book.cod_acervo}-{book.cod_exemplar}.html"
Path(logpath).expanduser().write_text(await result.text())
except:
logger.exception("failed to save HTML dump")
else:
renewed_books.append(book)
if len(due_books) > 0:
logging.info(f'Há {len(due_books)} livros vencidos, enviando email.')
msg = 'Os seguintes livros passaram da data de renovação:\n'
msg += list_books(due_books)
coro = email_send('Livros vencidos!', msg)
email_tasks.append(coro)
if len(failed_books) > 0:
logging.info(f'Há {len(failed_books)} livros falhados, enviando email.')
msg = 'Os seguintes livros falharam a ser renovados:\n'
msg += list_books(failed_books)
msg += '\n\nPor favor informe ao administrador do bot.'
coro = email_send('Livros falharam!', msg)
email_tasks.append(coro)
if len(renewed_books) > 0:
logging.info(f'Um total de {len(renewed_books)} livros foram'
f' renovados com sucesso.')
on_limit = [b for b in renewed_books if b.renew_count+1 == BIB_MAX_RENEW]
subject = ('Livros renovados, mas cuidado!' if on_limit
else 'Livros renovados com sucesso.')
msg = 'Os seguintes livros foram renovados:\n'
msg += list_books(renewed_books)
if on_limit:
msg += ('\nNo entanto os seguintes livros não poderão ser renovados na'
' próxima semana! É necessário intervenção pessoal.\n')
msg += list_books(on_limit)
coro = email_send(subject, msg)
email_tasks.append(coro)
if len(email_tasks) == 0:
logging.info(f'Foram encontrados {len(books)} livros mas nenhuma ação'
f' precisa ser tomada.')
await asyncio.gather(*email_tasks)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except Exception:
logger.exception("Erro fatal")
coro = email_send('Erro fatal no renovador de livros!',
f'Por favor, informe ao administrador, um erro fatal ocorreu durante'
f' o processo de verificação e renovação automatica de livros.')
loop.run_until_complete(coro)
finally:
loop.close()
|
StarcoderdataPython
|
5034166
|
<reponame>harvard-nrg/mrverify<filename>mrverify/scanner/siemens/prisma.py
import logging
from mrverify.scanner.siemens import Siemens
logger = logging.getLogger(__name__)
class Prisma(Siemens):
def __init__(self, config):
super().__init__(config['Siemens']['Prisma'])
@classmethod
def check_model(cls, model):
if model in ['Prisma', 'Prisma_fit']:
return True
return False
|
StarcoderdataPython
|
1979881
|
from selenium.webdriver import Chrome
from bs4 import BeautifulSoup
import csv
from time import sleep
import json
driver = Chrome("chromedriver")
allcatlist = ["https://www.noon.com/uae-en/electronics",
"https://www.noon.com/uae-en/beauty", "https://www.noon.com/uae-en/fashion", "https://www.noon.com/uae-en/home-kitchen", "https://www.noon.com/uae-en/sports-outdoors", "https://www.noon.com/uae-en/toys", "https://www.noon.com/uae-en/baby", "https://www.noon.com/uae-en/grocery", "https://www.noon.com/uae-en/automotive-store", "https://www.noon.com/uae-en/tools-and-home-improvement-store", "https://www.noon.com/uae-en/book-store", "https://www.noon.com/uae-en/pet-store", "https://www.noon.com/uae-en/stationery", "https://www.noon.com/uae-en/music-movies-and-tv-shows-store"]
urls_all = ["https://www.noon.com/uae-en/electronics-and-mobiles/mobiles-and-accessories/mobiles-20905",
"https://www.noon.com/uae-en/electronics-and-mobiles/mobiles-and-accessories/accessories-16176",
"https://www.noon.com/uae-en/electronics-and-mobiles/computers-and-accessories/tablets",
"https://www.noon.com/uae-en/electronics-and-mobiles/computers-and-accessories/routers",
"https://www.noon.com/uae-en/electronics-and-mobiles/computers-and-accessories/data-storage",
"https://www.noon.com/uae-en/inputdevices", "https://www.noon.com/uae-en/electronics-and-mobiles/software-10182",
"https://www.noon.com/uae-en/electronics-and-mobiles/computers-and-accessories/laptop-accessories/bags-and-cases-16607/sleeves-and-slipcases-23672",
"https://www.noon.com/uae-en/laptops-best-selling-ae", "https://www.noon.com/uae-en/electronics-and-mobiles/portable-audio-and-video/headphones-24056",
"https://www.noon.com/uae-en/all-speakers", "https://www.noon.com/uae-en/home-and-kitchen/home-appliances-31235",
"https://www.noon.com/uae-en/electronics-and-mobiles/wearable-technology", "https://www.noon.com/uae-en/electronics-and-mobiles/camera-and-photo-16165",
"https://www.noon.com/uae-en/electronics-and-mobiles/television-and-video/televisions", "https://www.noon.com/uae-en/electronics-and-mobiles/television-and-video/home-theater-systems-19095",
"https://www.noon.com/uae-en/electronics-and-mobiles/television-and-video/projectors", "https://www.noon.com/uae-en/electronics-and-mobiles/accessories-and-supplies/audio-and-video-accessories-16161",
"https://www.noon.com/uae-en/electronics-and-mobiles/video-games-10181/gaming-console",
"https://www.noon.com/uae-en/electronics-and-mobiles/video-games-10181/games-34004", "https://www.noon.com/uae-en/electronics-and-mobiles/wearable-technology/virtual-reality-headsets/gaminghub",
"https://www.noon.com/uae-en/electronics-and-mobiles/video-games-10181/gaming-accessories", "https://www.noon.com/uae-en/electronics-and-mobiles/portable-audio-and-video",
"https://www.noon.com/uae-en/beauty-and-health/beauty/fragrance", "https://www.noon.com/uae-en/beauty-and-health/beauty/makeup-16142", "https://www.noon.com/uae-en/nails-20024",
"https://www.noon.com/uae-en/beauty-and-health/beauty/hair-care", "https://www.noon.com/uae-en/beauty-and-health/beauty/skin-care-16813",
"https://www.noon.com/uae-en/beauty-tools-and-accessories", "https://www.noon.com/uae-en/beauty-and-health/beauty/personal-care-16343",
"https://www.noon.com/uae-en/mens-grooming", "https://www.noon.com/uae-en/beauty-and-health/health",
"https://www.noon.com/uae-en/toys-and-games", "https://www.noon.com/uae-en/sports-and-outdoors/outdoor-recreation/camping-and-hiking-16354",
"https://www.noon.com/uae-en/sports-and-outdoors/cycling-16009", "https://www.noon.com/uae-en/sports-and-outdoors/other-sports", "https://www.noon.com/uae-en/sports-and-outdoors/exercise-and-fitness/yoga-16328",
"https://www.noon.com/uae-en/sports-and-outdoors/boating-and-water-sports", "https://www.noon.com/uae-en/sports-and-outdoors/racquet-sports-16542",
"https://www.noon.com/uae-en/build-your-own-gym", "https://www.noon.com/uae-en/sports-and-outdoors/team-sports",
"https://www.noon.com/uae-en/sports-and-outdoors/leisure-sports-and-games", "https://www.noon.com/uae-en/beauty-and-health/health/sports-nutrition",
"https://www.noon.com/uae-en/automotive", "https://www.noon.com/uae-en/tools-and-home-improvement", "https://www.noon.com/uae-en/office-supplies",
"https://www.noon.com/uae-en/books", "https://www.noon.com/uae-en/fashion/men-31225", "https://www.noon.com/uae-en/fashion/women-31229",
"https://www.noon.com/uae-en/view-all-kids-clothing", "https://www.noon.com/uae-en/fashion/luggage-and-bags", "https://www.noon.com/uae-en/jewellery-store", "https://www.noon.com/uae-en/bau-watches-eyewear",
"https://www.noon.com/uae-en/pet-supplies", "https://www.noon.com/uae-en/grocery-store?limit=150", "https://www.noon.com/uae-en/home-and-kitchen/bath-16182", "https://www.noon.com/uae-en/home-and-kitchen/bedding-16171",
"https://www.noon.com/uae-en/home-and-kitchen/furniture-10180", "https://www.noon.com/uae-en/home-appliances", "https://www.noon.com/uae-en/home-and-kitchen/home-decor", "https://www.noon.com/uae-en/home-and-kitchen/household-supplies"
"https://www.noon.com/uae-en/home-and-kitchen/kids-home-store", "https://www.noon.com/uae-en/home-and-kitchen/patio-lawn-and-garden", "https://www.noon.com/uae-en/home-and-kitchen/storage-and-organisation",
"https://www.noon.com/uae-en/tools-and-home-improvement", "https://www.noon.com/uae-en/home-and-kitchen/kitchen-and-dining", "https://www.noon.com/uae-en/kitchenappliances",
"https://www.noon.com/uae-en/baby-products"
]
def GetAllValidProductUrl():
return urls_all
def GetProductLinks(url, start, end):
driver.get(url)
Links = []
prev = ''
for x in range(start, end):
try:
driver.get(url+'?page='+str(x))
if driver.current_url == prev:
break
else:
prev = driver.current_url
except:
break
soup = BeautifulSoup(driver.page_source, "html.parser")
prodContainer = soup.find_all(
'div', attrs={'class': 'jsx-3152181095 productContainer'})
for x in prodContainer:
link = 'https://www.noon.com'+x.find('a')['href']
Links.append(link)
driver.close()
return Links
|
StarcoderdataPython
|
8115670
|
#
# @lc app=leetcode id=169 lang=python3
#
# [169] Majority Element
#
# https://leetcode.com/problems/majority-element/description/
#
# algorithms
# Easy (55.16%)
# Likes: 2284
# Dislikes: 192
# Total Accepted: 481.8K
# Total Submissions: 871.2K
# Testcase Example: '[3,2,3]'
#
# Given an array of size n, find the majority element. The majority element is
# the element that appears more than ⌊ n/2 ⌋ times.
#
# You may assume that the array is non-empty and the majority element always
# exist in the array.
#
# Example 1:
#
#
# Input: [3,2,3]
# Output: 3
#
# Example 2:
#
#
# Input: [2,2,1,1,1,2,2]
# Output: 2
#
#
#
# @lc code=start
from typing import List
class Solution:
def majorityElement(self, nums: List[int]) -> int:
return self.boyer_moore_voting_soln(nums)
def boyer_moore_voting_soln(self, nums: List[int]) -> int:
"""
Boyer-Moore Voting solution
Idea:
Set candidate as +1, non-candidate as -1.
Whenever count becomes 0, switch candidate greedily
"""
candidate = nums[0]
count = 1
for i in range(1, len(nums)):
if nums[i] == candidate:
count += 1
else:
count -= 1
if count == 0:
candidate = nums[i]
count = 1
return candidate
def divide_conquer_soln(self, nums: List[int]) -> int:
"""
Divide and Conquer solution
Runtime: O(nlogn)
Space: O(logn)
"""
def helper(left: int, right: int) -> int:
if left == right:
return nums[left]
mid = (right - left) // 2 + left
left_maj = helper(left, mid)
right_maj = helper(mid + 1, right)
if left_maj == right_maj:
return left_maj
left_count = sum([1 for i in range(left, right + 1)
if nums[i] == left_maj])
right_count = sum([1 for i in range(left, right + 1)
if nums[i] == right_maj])
if left_count > right_count:
return left_maj
else:
return right_maj
return helper(0, len(nums) - 1)
def randomized_soln(self, nums: List[int]) -> int:
"""
Randomized solution
Runtime: O(INF) # average runtime O(1)
Space: O(1)
"""
import random
majority_count = len(nums)//2
while True:
candidate = random.choice(nums)
count = 0
for num in nums:
if num == candidate:
count += 1
if count > majority_count:
return candidate
def sort_soln(self, nums: List[int]) -> int:
"""
Sorting solution
Runtime: O(nlogn)
Space: O(1)
"""
sorted_nums = sorted(nums)
majority_count = len(nums) // 2
for i in range(1, len(sorted_nums)):
if sorted_nums[i] != sorted_nums[i - 1]:
curr_count = 0
else:
curr_count += 1
if curr_count > majority_count:
return sorted_nums[i]
return -1
def hashmap_soln(self, nums: List[int]) -> int:
"""
Hashmap solution
Runtime: O(n)
Space: O(n)
"""
majority_count = len(nums) // 2
counts = dict()
for num in nums:
if num not in counts:
counts[num] = 1
else:
counts[num] += 1
for num, count in counts.items():
if count > majority_count:
return num
# @lc code=end
if __name__ == "__main__":
print(Solution().majorityElement([3, 2, 3]), 3)
print(Solution().majorityElement([2, 2, 1, 1, 1, 2, 2]), 2)
|
StarcoderdataPython
|
11268298
|
import json
import re
import subprocess
import os
from urllib.request import Request
from urllib.request import urlopen
from itertools import repeat
def mock_log_handler(line):
print(line)
class DockerHelper:
@staticmethod
def pushed_tags(registry, repo):
response = urlopen(f"https://{registry}/v1/repositories/{repo}/tags")
raw = response.read().decode()
data = json.loads(raw)
return list(map(lambda x: str(x["name"]), data))
@staticmethod
def decompose_image_id(image_id):
print(image_id)
project, tag = image_id.split(":")
version = re.match(r'.+(\d+\.\d+\.\d+).+', tag).groups()[0]
return (project, tag, version)
@staticmethod
def push_image(image):
print(f"Pushing: {image}")
for i in repeat(None, 3):
if not subprocess.call(["docker", "push", image]):
return 0
return -1
@staticmethod
def rmi(image):
subprocess.call(["docker", "rmi", "-f", image])
@staticmethod
def tag(old, *new):
for n in new:
subprocess.call(["docker", "tag", old, n])
return
@staticmethod
def compile_image(source_folder, tag, args, log_file, no_cache = False, log_handler = None):
args = args or {}
log_handler = log_handler or mock_log_handler
with open(log_file, "w") as f:
cmd = ["docker", "build"]
cmd += ["--no-cache"] if no_cache else []
cmd += ["-t", tag]
cmd += [f"--build-arg={k}={v}" for k, v in args.items()]
cmd += [source_folder]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
while 42 < 1337:
l = p.stdout.readline()
if not l: break
l = l.decode()
f.write(l)
f.flush()
log_handler(l)
p.communicate()
return p.returncode
return -1
class DockerRegistry:
def __init__(self):
pass
def login(self):
# usefull to access some Docker Hub functionality
r = Request("https://hub.docker.com/v2/users/login/",
json.dumps({
"username" : os.environ["DOCKER_HUB_USERNAME"],
"password" : os.environ["DOCKER_HUB_PASSWORD"]
}).encode(),
{'Content-Type': 'application/json'}
)
data = urlopen(r).read()
data = data.decode()
try:
self.access_token = json.loads(data)["token"]
print("Logged in!")
except Exception as e:
print("Wrong docker login data.", e)
def update_description(self, repository, short_description, full_description):
r = Request(f"https://hub.docker.com/v2/repositories/{repository}/",
json.dumps({
"full_description" : full_description,
"description" : short_description,
}).encode(),
headers=self.add_authorization_header({
'Content-Type': 'application/json'
})
)
r.get_method = lambda: "PATCH"
data = urlopen(r).read()
def get_auth_header(self, repository, headers=None):
headers = headers if headers else {}
r = Request(f"https://auth.docker.io/token?service=registry.docker.io&scope=repository:{repository}:pull")
data = urlopen(r).read()
headers["Authorization"] = "Bearer " + json.loads(data)["token"]
return headers
def get_manifest(self, repository, reference):
r = Request(
f"https://index.docker.io/v2/{repository}/manifests/{reference}",
headers=self.get_auth_header(repository, {
"Accept" : "application/vnd.docker.distribution.manifest.v2+json"
})
)
data = None
try:
data = json.loads(urlopen(r).read())
except Exception as e:
pass
return data
def get_digest(self, repository, reference):
manifest = self.get_manifest(repository, reference)
return manifest["config"]["digest"] if manifest else None
def add_authorization_header(self, headers):
headers["Authorization"] = "JWT " + self.access_token
return headers
def authorized(self):
return self.access_token is not None
|
StarcoderdataPython
|
81379
|
<filename>scripts/convertDiagnoseTargets2Table.py
__author__ = 'dan'
import sys
import argparse
import csv
import vcf
import pybedtools
import tabix
from collections import defaultdict
#Arguments and commenad line parsing
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help="Input vcf file")
parser.add_argument('-o', '--output', help="Output text file")
parser.add_argument('-d', '--dict', help="Dictionary of Ensembl to Gene Names")
parser.add_argument('-b', '--bed', help='BED file used with DiagnoseTargets')
parser.add_argument('-s', '--samples', help='Samples to consider. Defaults to all samples, comma-separated list')
args = parser.parse_args()
#Set up sample details
samples = args.samples.split(',')
sample_header_list = "\t".join(samples)
sys.stdout.write("Reading DiagnoseTargets file: %s\n" % args.input)
targets_reader = vcf.Reader(open(args.input, 'r'))
regions = defaultdict(lambda: defaultdict(lambda : defaultdict(str)))
sys.stdout.write("Reading BED file: %s\n" % args.bed)
with open(args.bed, 'rU') as csvfile:
reader = csv.reader(csvfile, dialect='excel-tab')
for row in reader:
regions[row[0]][row[1]][row[2]] = row[3]
sys.stdout.write("Reading Dictionary file: %s\n" % args.dict)
gene_dict = dict()
with open(args.dict, 'rU') as csvfile:
reader = csv.reader(csvfile, dialect='excel-tab')
for row in reader:
gene_dict[row[0]] = row[1]
with open(args.output, 'w') as out:
for record in targets_reader:
format_fields = record.FORMAT.split(':')
info_fields = record.INFO.split(';')
#If no Filter Type definition than all samples passed DiagnoseTargets
#Filtering criteria, nothing to do
if format_fields[0] == 'FT':
#Format sample genotypes in to string
genotypes = []
sample_coverage = dict()
for sample in samples:
sample_coverage[sample] = record.genotype(sample)['FT']
#Retrieve CCDS name info from CCDS BED file and format
try:
region_record = regions[record.CHROM][int(record.POS - 1)][int(record.INFO['END'])]
except:
sys.stderr.write("ERROR: Could not find match in regions dictionary for chrom %s, start %s, end %s\n" %
(record.CHROM, record.POS, record.INFO['END']))
region_record = "NA"
|
StarcoderdataPython
|
6555148
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# DATE: 2021/8/17
# Author: <EMAIL>
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Optional, Type, NoReturn, Union, Callable
Number: Type = Union[str, int]
class Validator(ABC):
def __set_name__(self, owner: object, name: str) -> NoReturn:
self.private_name: str = '_' + name
def __get__(self, obj: object, obj_type: Type = None) -> Any:
return getattr(obj, self.private_name)
def __set__(self, obj: object, value: Any) -> NoReturn:
validate_value: Any = self.validate(value)
value = value if validate_value is None else validate_value
setattr(obj, self.private_name, value)
@abstractmethod
def validate(self, value: Any) -> Any:
""" Validate method. """
class NumberValidate(Validator):
def __init__(self, minvalue: Number = None, maxvalue: Number = None) -> None:
self.minvalue: Number = minvalue
self.maxvalue: Number = maxvalue
def validate(self, value) -> NoReturn:
if not isinstance(value, (int, float)):
raise TypeError(f'Expected {value!r} to be an int or float')
if self.minvalue is not None and value < self.minvalue:
raise ValueError(
f'Expected {value!r} to be at least {self.minvalue!r}'
)
if self.maxvalue is not None and value > self.maxvalue:
raise ValueError(
f'Expected {value!r} to be no more than {self.maxvalue!r}'
)
class StringValidate(Validator):
def __init__(
self, minsize: int = None,
maxsize: int = None,
predicate: Optional[Callable] = None
) -> None:
self.minsize: int = minsize
self.maxsize: int = maxsize
self.predicate: Optional[Callable] = predicate
def validate(self, value: str) -> NoReturn:
if not isinstance(value, str):
raise TypeError(f'Expected {value!r} to be an str')
if self.minsize is not None and len(value) < self.minsize:
raise ValueError(
f'Expected {value!r} to be no smaller than {self.minsize!r}'
)
if self.maxsize is not None and len(value) > self.maxsize:
raise ValueError(
f'Expected {value!r} to be no bigger than {self.maxsize!r}'
)
if self.predicate is not None and not self.predicate(value):
raise ValueError(
f'Expected {self.predicate} to be true for {value!r}'
)
class EnumerateValidate(Validator):
def __init__(self, *options: str) -> None:
self.options: set = set(options)
def validate(self, value: Any) -> NoReturn:
if value not in self.options:
raise ValueError(f'Expected {value!r} to be one of {self.options!r}')
class DirectoryValidate(Validator):
def validate(self, directory: Any) -> Optional[Path]:
if not isinstance(directory, (str, Path)):
raise ValueError(f'Expected {directory!r} to be a like-path object')
path: Path = Path(directory).expanduser().absolute()
if not path.exists():
path.mkdir()
return path
return path
|
StarcoderdataPython
|
38548
|
# -*- coding: utf-8 -*-
import sys
import time
import json
import pickle
import hashlib
import requests
from urlparse import urljoin
from config import *
from spiders.common import *
from spiders.html_parser import *
from logs.log import logger
reload(sys)
sys.setdefaultencoding('utf8')
class Spider(object):
def __init__(self, user_name=None, password=<PASSWORD>):
self.session = requests.Session()
self.uid = None
self.user_name = user_name
self.password = password
def get_hash(self, string):
m = hashlib.md5()
m.update(string)
return m.hexdigest()
def _request(self, url, params={}):
# 应该使用统一的request函数去请求,此处待重构
try:
response = self.session.get(url, headers=FOLLOWER_HEADER, params=params, timeout=10)
return response
except requests.ConnectionError, requests.ConnectTimeout:
logger.error('%s请求超时')
def visit_index(self):
self.session.get(BASE_URL, headers=BASE_HEADER)
def login(self):
url = urljoin(BASE_URL, LOGIN_URL)
if self.check_login():
logger.info('已经登录')
return
data = {
'areacode': 86,
'remember_me': 'on',
'username': self.user_name,
'password': self.get_<PASSWORD>(self.password),
}
if if_int(self.user_name):
data['telephone'] = data.pop('username')
response = self.session.post(url, headers=BASE_HEADER, data=data)
logger.debug(response.content)
if self.check_login():
logger.info('登录成功')
self.get_people_id('8276760920')
self.save_cookies()
return
raise ValueError('登录失败')
def save_cookies(self):
result = self.load_data()
with open('spiders/.session', 'wb') as f:
cookies = requests.utils.dict_from_cookiejar(self.session.cookies)
data = {
'cookies': cookies,
'uid': self.uid,
'user_name': self.user_name,
}
result[self.user_name] = data
pickle.dump(result, f)
@classmethod
def clear_cookies(cls):
with open('spiders/.session', 'wb') as f:
pickle.dump({}, f)
def load_data(self):
with open('spiders/.session') as f:
try:
return pickle.load(f)
except EOFError:
return {}
def load_cookies(self):
with open('spiders/.session') as f:
try:
data = pickle.load(f)
except EOFError:
return {}
result = data.get(self.user_name)
if not result:
logger.info("账户未登录")
return {}
self.uid = result['uid']
cookies = result['cookies']
return cookies
def check_login(self, load_cookie=True):
if load_cookie:
cookies = self.load_cookies()
response = self.session.get(BASE_URL, headers=BASE_HEADER,
cookies=cookies, allow_redirects=False)
else:
response = self.session.get(BASE_URL, headers=BASE_HEADER,
allow_redirects=False)
if response.status_code == 302:
if self.uid is not None:
return True
location = response.headers['Location']
uid = get_uid_from_url(location)
if uid:
self.uid = uid
return True
else:
logger.error(u"从跳转链接解析uid出错了")
return False
def get_people(self):
url = urljoin(BASE_URL, PEOPLE_URL)
respond = self.session.get(url, headers=BASE_HEADER)
result = get_people(respond.content)
logger.info('抓取了%s个大V' % len(result))
return result
def get_people_id(self, path):
url = urljoin(BASE_URL, path)
respond = self.session.get(url, headers=BASE_HEADER)
if respond.status_code == 200:
uid = get_people_id(respond.content)
return uid
else:
logger.error(u'抓取’%s‘用户的id失败' % path)
def get_followers(self, uid):
size = 1000
url = urljoin(BASE_URL, FOLLOWERS_URL)
params = {
'size': size,
'pageNo': 1,
'uid': uid,
'_': int(time.time() * 1000)
}
respond = self._request(url, params=params)
if not respond:
return []
data = respond.json()
max_page = data.get('maxPage')
if not max_page:
logger.error("获取粉丝失败")
logger.error(data)
raise ValueError("获取粉丝失败")
result = data['followers']
for page in range(1, max_page):
time.sleep(FOLLOWER_PAGE_INTEVAL)
logger.info('开始抓取第%s页的粉丝' % page)
params['pageNo'] = page
params['_'] = int(time.time() * 1000)
respond = self._request(url, params=params)
if not respond:
continue
data = respond.json()
result += data['followers']
return self.handle_followers(result)
def handle_followers(self, data):
return [(_['id'], _['screen_name']) for _ in data]
def get_chat_sequence_id(self, uid):
url = CHAT_HISTORY_URL % uid
params = {
'user_id': self.uid,
'limit': 30,
'_': int(time.time() * 1000)
}
cookies = self.load_cookies()
respond = self.session.get(url, headers=CHAT_HEADER, params=params, cookies=cookies)
if respond.status_code == 200:
data = respond.json()
if len(data) > 1:
return data[-1]['sequenceId']
else:
return 96878141
logger.error('获得聊天id失败')
logger.error(respond.content)
return False
def chat(self, uid, msg):
sequenceId = self.get_chat_sequence_id(uid)
if not sequenceId:
return False
data = {
'plain': msg,
'to_group': False,
'toId': uid,
'sequenceId': sequenceId + 1
}
params = {'user_id': self.uid}
cookies = self.load_cookies()
respond = self.session.post(CHAT_URL, headers=CHAT_HEADER, cookies=cookies,
params=params, data=json.dumps(data))
if respond.status_code == 200:
result = respond.json()
error = result.get('error')
if error:
print '发送消息出错了'
logger.debug(respond.content)
raise ValueError(error.encode('utf8'))
return True
logger.debug(respond.status_code)
logger.debug(respond.content)
return False
def post(self, msg, audience=[]):
p = {"api": "/statuses/update.json", "_": int(time.time() * 1000)}
cookie = self.load_cookies()
url = urljoin(BASE_URL, TOKEN_URL)
r = self.session.get(url, params=p, cookies=cookie,
headers=BASE_HEADER)
try:
token = r.json()['token']
except (IndexError, TypeError, ValueError):
logger.error("MLGB 出错了!")
logger.error("\n%s\n", r.text)
return
audience = ' @'.join(audience)
audience = ' @' + audience.strip()
msg = '%s %s' % (msg, audience)
logger.info('发送的内容是: %s' % msg)
msg = msg.encode().decode()
data = {"status": "<p>%s</p>" % msg, "session_token": token}
url = urljoin(BASE_URL, POST_URL)
r = self.session.post(url, data=data, cookies=cookie,
headers=BASE_HEADER)
if r.status_code == 200:
data = r.json()
if not data.get('error_code') > -1:
logger.debug("完事儿了.")
return
logger.error("MLGB 又出错了!")
logger.error("\n%s\n", r.text)
raise ValueError('发广播出错了')
def if_int(item):
try:
int(item)
except ValueError:
return False
return True
|
StarcoderdataPython
|
3257412
|
#!/usr/bin/python3
# SPDX-License-Identifier: Unlicense
import os.path
import sys
import inspect
import datetime
template_cfg=None
replacements={}
filters={}
shortline=0
shortname='<file>'
fullname='<file>'
activeoutname='<Out>'
activeoutline=0
activedatetime=str(datetime.datetime.now().timestamp())
iserr=False
def get_active_input_file():
return fullname
def get_active_output_file():
return activeoutname
def get_active_output_line():
return activeoutline
def get_active_timestamp():
return activedatetime
replacements['__INPUT__'] = get_active_input_file
replacements['__FILE__'] = get_active_output_file
replacements['__LINE__'] = get_active_output_line
replacements['__DATE__'] = get_active_timestamp
def filter_identifier_only(s):
out = ''
for ch in s:
if ch.isalnum() or ch == '_':
out += ch
return out
def filter_spaceline(s):
out = ''
for ch in s:
if ch.isalnum():
out += ch
elif ch.isspace() or ch == '_':
out += '_'
return out
def filter_alnum_only(s):
out = ''
for ch in s:
if ch.isalnum():
out += ch
return out
def filter_alnumspace_only(s):
out = ''
for ch in s:
if ch.isalnum() or ch.isspace():
out += ch
return out
def filter_pathslash(s):
return os.path.join(s,'')
def filter_splitext_0(s):
return os.path.splitext(s)[0]
def filter_splitext_1(s):
return os.path.splitext(s)[1]
def filter_repr(s):
"""Use repr(...) on the given string."""
return repr(s)
def filter_replace(s,old=None,new=None,count=None):
xcount = None
if count:
try:
xcount = int(count)
except ValueError as e:
xcount = None
return s.replace(old,new,xcount)
def filter_slice(s,start=None,stop=None,step=None):
"""slice(s,start=0,stop=-1,step=1) -> str
Extract a slice of the given string, from start to stop, taking
every step characters."""
xstart = None
xstop = None
xstep = None
if start:
try:
xstart = int(start)
except ValueError as e:
xstart = 0
if stop:
try:
xstop = int(stop)
except ValueError as e:
xstop = -1
if step:
try:
xstep = int(step)
except ValueError as e:
xstep = 1
return s[slice(xstart,xstop,xstep)]
def filter_dropchars(s,chars=None):
xchars = str(chars)
out = ''
for x in s:
if x in xchars:
continue
else:
out += x
return out
def filter_onlychars(s,chars=None):
xchars = str(chars)
out = ''
for x in s:
if x in xchars:
out += x
else:
continue
return out
def filter_insertchars(s,chars=None,space=' '):
"""insertchars(s,chars=None,space=' ') -> str
Insert space before each occurence of a character in chars."""
xchars = str(chars)
out = ''
for x in s:
if x in xchars:
out += space
out += x
return out
def filter_upper_only(s):
out = ''
for x in s:
if x.isupper():
out += x
return out
def filter_lower_only(s):
out = ''
for x in s:
if x.islower():
out += x
return out
def filter_help(s):
"""Get help on the filter of the given name."""
global filters
if s in filters:
out = inspect.getdoc(filters[s])
elif len(s) == 0:
out = str('\n').join(filters.keys())
else:
out = None
return str(out) if out else ''
def filter_date(s, f=None):
"""date(s, f=None) -> str
Generate a strftime-style string based on the given timestamp
number (use `__DATE__` for "today and now"), using `f` as the
format."""
ts = datetime.datetime.fromtimestamp(float(s))
return ts.strftime(f if f else '%c')
filters['date'] = filter_date
filters['help'] = filter_help
filters['onlychars'] = filter_onlychars
filters['lower_only'] = filter_lower_only
filters['upper_only'] = filter_upper_only
filters['insertchars'] = filter_insertchars
filters['dropchars'] = filter_dropchars
filters['slice'] = filter_slice
filters['replace'] = filter_replace
filters['pathslash'] = filter_pathslash
filters['basename'] = os.path.basename
filters['abspath'] = os.path.abspath
filters['dirname'] = os.path.dirname
filters['expanduser'] = os.path.expanduser
filters['expandvars'] = os.path.expandvars
filters['normcase'] = os.path.normcase
filters['normpath'] = os.path.normpath
filters['realpath'] = os.path.realpath
filters['splitext_0'] = filter_splitext_0
filters['splitext_1'] = filter_splitext_1
filters['identifier_only'] = filter_identifier_only
filters['spaceline'] = filter_spaceline
filters['alnum_only'] = filter_alnum_only
filters['alnumspace_only'] = filter_alnumspace_only
filters['repr'] = filter_repr
filters['lower'] = str.lower
filters['upper'] = str.upper
filters['strip'] = str.strip
filters['capitalize'] = str.capitalize
filters['casefold'] = str.casefold
filters['lstrip'] = str.lstrip
filters['rstrip'] = str.rstrip
filters['swapcase'] = str.swapcase
filters['title'] = str.title
def warn(message):
print('%s:%i: warning: %s'%(shortname,shortline,message),file=sys.stderr)
return
def err(message):
global iserr
iserr=True
print('%s:%i: error: %s'%(shortname,shortline,message),file=sys.stderr)
return
class FilterRef:
def __init__(self, name, kwvals=None):
self.name = name
self.kwargs = kwvals if kwvals else {}
def string_subst(s):
global replacements
global filters
global current_line
out = ''
l = len(s)
mode = 0
varname = None
varfilter = None
nextfilter = None
nextkwlist = None
nextkw = ''
nextkwval = ''
parencount = 0
j = 0
def lookup_var(varname):
if varname and (varname in replacements):
varx = replacements[varname]
if hasattr(varx,'__call__'):
varvalue = str(varx())
else:
varvalue = str(varx)
elif varname:
warn('unknown variable %s'%repr(varname))
varvalue = ''
else:
warn('substitution without variable name')
varvalue = ''
return varvalue
def post_to_out():
# https://stackoverflow.com/a/11987499
nonlocal varname, varfilter, nextfilter, out, nextkwlist
varvalue = lookup_var(varname)
if varfilter:
for f in varfilter:
if f.name in filters:
try:
varvalue = filters[f.name](varvalue, **f.kwargs)
except TypeError as e:
warn(e)
warn('...while using filter %s'%repr(f.name))
except ValueError as e:
warn(e)
warn('...while using filter %s'%repr(f.name))
elif f.name:
warn('skipping unknown filter %s'%repr(f.name))
else:
warn('skipping empty filter expression')
out += varvalue
varname = None
varfilter = None
nextfilter = None
nextkwlist = None
return
for ch in s:
if mode == 0: #normal copy
if (ch == '$'):
mode = 1
else:
out += ch
elif mode == 1: #substitute?
if (ch == '{'): #substitute!
mode = 3
varname = ''
elif (ch == '$'): #literal $
mode = 0
out += '$'
elif (ch == '#'): #comment
mode = 2
else:
mode = 0
warn('unexpected char %s after "$"'%(repr(ch)))
elif mode == 2: #comment
if (ch == '#'): #end comment
mode = 0
else:
pass
elif mode == 3: #varname
if (ch == ':'): #on to filters
mode = 4
varfilter = []
nextfilter = ''
elif ch == '}':
mode = 0
post_to_out()
else:
varname += ch
elif mode == 4 or mode == 5: #filter
if (ch == ':'): #on to next filter
varfilter.append(FilterRef(nextfilter,nextkwlist))
mode = 4
nextfilter = ''
nextkwlist = None
elif ch == '}':
varfilter.append(FilterRef(nextfilter,nextkwlist))
mode = 0
post_to_out()
elif mode == 4 and ch == '(':
mode = 6 #start the arglist
nextkwlist = {}
nextkw = ''
elif mode == 5:
warn('unexpected char %s after ")"'%(repr(ch)))
else:
nextfilter += ch
elif mode == 6: #keyword argument name
if parencount == 0 and ch == ')' or ch == ',':
#pack up the keyword
if nextkw:
nextkwlist[nextkw] = None
if ch == ')':
mode = 5
else:
nextkw = ''
elif ch == '=':
nextkwval = ''
mode = 7
else:
if ch == '(':
parencount += 1
elif ch == ')':
parencount -= 1
nextkw += ch
elif mode == 7: #keyword argument value
if parencount == 0 and ch == ')' or ch == ',':
#parse the keyword value
if len(nextkwval) > 0 and nextkwval[0].isdigit():
try:
nextkwlist[nextkw] = str(int(nextkwval,0))
except ValueError as e:
warn(str(e))
nextkwlist[nextkw] = str(0)
else: #assert a variable name
nextkwlist[nextkw] = lookup_var(nextkwval)
#pack up the keyword
if ch == ')':
mode = 5
else:
mode = 6
nextkw = ''
else:
if ch == '(':
parencount += 1
elif ch == ')':
parencount -= 1
nextkwval += ch
else:
warn('unexpected subst machine state %s'%repr(mode))
break
if mode != 0:
if (mode in [3,4]):
warn('unterminated replacement')
elif (mode == 2):
warn('unterminated comment')
return out
class FileHeader:
def __init__(self,s=None):
if s:
self.parse(s)
else:
self.fname = None
self.stoptoken = None
return
def parse(self,s):
mode = 0
fname = None
token = None
stoptoken = None
if s[0] != '>':
raise ValueError('mktemplate.FileHeader.parse')
for ch in s[1:]:
if mode == 0:
if ch == '"': #filename
mode = 1
fname = ''
elif ch == '<':
token = ch
mode = 3
elif ch.isspace():
continue
else:
warn("unexpected file header character %s"%repr(ch))
elif mode == 1:
if ch == '\\': #escape
mode = 2
elif ch == '"': #next part
mode = 0
else:
fname += ch
elif mode == 2:
fname += ch
mode = 1
elif mode == 3:
if ch.isspace():
#inspect
if token == '<<<': #stop token next
mode = 4
stoptoken = ''
elif len(token) > 3 and token[0:3] == '<<<':
mode = 0
stoptoken = token[3:]
else:
warn("unexpected file header token %s"%repr(token))
token = None
mode = 0
else:
token += ch
elif mode == 4:
if ch.isspace():
if len(stoptoken):
mode = 0
else:
stoptoken += ch
else:
warn('unexpected file header machine state %s'%repr(mode))
break
self.fname = string_subst(fname) if fname else fname
self.stoptoken = stoptoken
self.cmnt = False
return
class VarHeader:
def __init__(self,s=None):
if s:
self.parse(s)
else:
self.varname = None
self.rtext = None
return
def parse(self,s):
mode = 0
rtext = None
varname = None
isreq = False
if s[0] not in '!=':
raise ValueError('mktemplate.VarHeader.parse')
elif s[0] == '!':
isreq = True
for ch in s[1:]:
if mode == 0:
if ch == '"': #text
mode = 1
rtext = ''
elif ch.isalnum() or ch == '_':
varname = ch
mode = 3
elif ch.isspace():
continue
else:
warn("unexpected variable header character %s"%repr(ch))
elif mode == 1:
if ch == '\\': #escape
mode = 2
elif ch == '"': #next part
mode = 0
else:
rtext += ch
elif mode == 2:
rtext += ch
mode = 1
elif mode == 3:
if ch.isspace():
mode = 0
elif ch.isalnum() or ch == '_':
varname += ch
else:
warn("unexpected variable header token %s"%repr(token))
mode = 0
else:
warn('unexpected variable header machine state %s'%repr(mode))
break
self.varname = varname
self.rtext = string_subst(rtext) if rtext else rtext
self.isreq = isreq
return
if __name__ == '__main__':
def main(argc, argv):
# parse args
global replacements
argi = 1
want_help = False
fname = None
while argi < argc:
if argv[argi] == '-?':
want_help = True
break
elif argv[argi] == '-D':
argi += 1
if argi < argc:
varname = argv[argi]
argi += 1
if argi < argc:
replacements[varname] = argv[argi]
else:
err('incomplete "-D" option')
else:
fname = argv[argi]
argi += 1
if want_help or not fname:
print('''mktemplate.py: generate files from template
usage: python3 mktemplate.py [options] (template_file)
options:
-D (name) (value)
Add a variable named (name) with string (value) to the list
of replacement variables.
syntax (outside of string):
# ...
comment
=var "...string..."
assign a (possibly expanded) string to a variable name
!var
request a variable from the user
>"filename" <<<END_MARKER
begin a string of output to a file (or to `stdout` if
filename is missing). output ends with the END_MARKER
on a line by itself. (note that filename is also a
string.)
syntax (inside a string):
text...text
literal text output
${var}
output the latest string stored in `var`
${var:filter1:filter2:...}
output the string of `var` with filters applied to it
${var:filter(key=value):...}
give a key-value argument to a filter. value can be either
an integer or a variable name. (to pass a string as a
key-value argument, use a variable.)
use the `help` filter to get information on the available filters:
=filter_name "upper"
=empty ""
> <<<EOF
Help text for the "upper" filter:
${filter_name:help}
List of all filters:
${empty:help}
EOF
''',file=sys.stderr)
return 1
if fname != '-':
try:
f = open(fname,'rt')
except IOError as e:
print(e, file=sys.stderr)
return 1
else:
f = sys.stdin
global fullname, shortname, shortline, activeoutline, activeoutname
global iserr
fullname=fname
shortname=os.path.basename(fname)
activeoutname=''
activeoutline=0
shortline=0
shortstop=None
mode=0
g_to_close = False
g = None
g_usable = False
done = False
while not done:
ln = f.readline()
shortline += 1
activeoutline += 1
if not ln:
done = True
break
elif mode == 0:
if ln[0] == '#': #comment
pass
elif ln[0] in '=!': #require a variable
vh = VarHeader(ln)
if not vh.varname:
err('variable line missing variable name')
elif vh.isreq:
if vh.varname not in replacements:
err('missing definition for required variable %s'%repr(vh.varname))
break
elif vh.rtext is not None:
replacements[vh.varname] = vh.rtext
else:
del replacements[vh.varname]
elif ln[0] == '>': #output file
fh = FileHeader(ln)
if fh.fname:
try:
g = open(fh.fname, 'wt')
g_usable = True
g_to_close = True
except IOError as e:
print(e, file=sys.stderr)
warn('skipping file %s'%repr(fh.fname))
g_usable = False
g_to_close = False
activeoutname = fh.fname
else:
g = sys.stdout
g_usable = True
g_to_close = False
activeoutname = '<stdout>'
shortstop = fh.stoptoken
mode = 1
activeoutline = 0
elif mode == 1: #in a file
if shortstop and len(ln) <= len(shortstop)+2 \
and ln.rstrip() == shortstop:
mode = 0
if g_to_close:
g.close()
g = None
g_usable = False
g_to_close = False
elif g_usable:
g.write(string_subst(ln))
else:
pass
f.close()
return 1 if iserr else 0
res = main(len(sys.argv), sys.argv)
exit(int(res))
|
StarcoderdataPython
|
3221798
|
#
# Author: <NAME> (<EMAIL>) 2017
#
#__author__ = "<NAME>"
#__copyright__ = "Copyright 2017, <NAME>"
#__credits__ = ["<NAME>"]
#__license__ = "Apache"
#__version__ = "1.0.0"
#__maintainer__ = "<NAME>"
#__email__ = "<EMAIL>"
#__status__ = "Production"
from __future__ import division
#import direct.directbase.DirectStart
from pandac.PandaModules import *
import sys, os
# Three Axis Coordinate Plane Grid Class (ThreeAxisGrid)
# <NAME> AKA 'Forklift', August 2008
# '<EMAIL>'
#
# The grid can be created with any number of axis planes in mind.
# Simply set size values for the planes you wish to use. Sizes of
# zero will be ignored. By default, you can create single and three
# plane setups. Use plane visibility to disable any planes you don't
# need to acheive a 2 plane setup.
#
# To create a grid, first create an instance of this class. Then call
# its 'create' method to create the grid based on the class member
# variables. 'create' will return a NodePath instance that must be
# parented to 'render' in order to acheive visibility. Once the grid
# is created, its settings cannot be changed as the 'create' method
# generates the geometry procedurally using Panda's LineSeg class.
# If another grid or a different grid is needed, create a new
# instance of the ThreeAxisGrid class and setup as described above.
#
# A 'refresh' method is planned for a future version. This method
# would allow you to change a ThreeAxisGrid instance's settings,
# then recreate the geometry without changing the
# parentNodePath of the instance.
#
# ThreeAxisGrid class member variables are as follows:
# 'xsize' is the x axis length in units of the grid
# 'ysize' is the y axis length in units of the grid
# 'zsize' is the z axis lenght in units of the grid
# 'gridstep' is the spacing in units at which primary grid lines
# will be drawn
# 'subdiv' is the number used to subdivide the main (gridstep based)
# grid lines for drawing secondary grid lines example: if the
# primary grid lines are drawn every 10 units, and subdivision
# is set at 4, then secondary grid lines will be drawn
# every 2.5 units
# 'XYPlaneShow' and so forth: used to disable a plane with the
# creation of 2 plane grids in mind. 1 is on, 0 is off.
# 'endCapLinesShow' is used to turn grid border edges on or off.
# 1 is on, 0 is off.
# 'xaxiscolor' and so forth: axis colors are defaulted to the
# Maya standard axis colors
# 'gridcolor' is the RGB color of the primary grid lines,
# defaulted to black
# 'subdivcolor' is the RGB color of the secondary grid lines,
# defaulted to dark gray
# 'axisThickness' and so forth: sets the thickness of the
# respective component's lines
# 'parentNode' and 'parentNodePath' are used to contain
# the three LineSeg instance nodes and paths
class ThreeAxisGrid:
def __init__(self, xsize=50, ysize=50, zsize=50, gridstep=10, subdiv=10):
# Init passed variables
self.XSize = xsize
self.YSize = ysize
self.ZSize = zsize
self.gridStep = gridstep
self.subdiv = subdiv
# Init default variables
# Plane and end cap line visibility (1 is show, 0 is hide)
self.XYPlaneShow = 1
self.XZPlaneShow = 1
self.YZPlaneShow = 1
self.endCapLinesShow = 1
# Alpha variables for each plane
#self.XYPlaneAlpha = 0.1
#self.XZPlaneAlpha = 0.1
#self.YZPlaneAlpha = 0.1
# Colors (RGBA passed as a VBase4 object)
self.XAxisColor = VBase4(1, 0, 0, 1)
self.YAxisColor = VBase4(0, 1, 0, 1)
self.ZAxisColor = VBase4(0, 0, 1, 1)
self.gridColor = (1, 1, 1, 0.3)
self.subdivColor = VBase4(.35, .35, .35, 0.1)
# Line thicknesses (in pixels)
self.axisThickness = 1
self.gridThickness = 1
self.subdivThickness = 1
# Axis, grid, and subdiv lines must be seperate LineSeg
# objects in order to allow different thicknesses.
# The parentNode groups them together for convenience.
# All may be accessed individually if necessary.
self.parentNode = None
self.parentNodePath = None
self.axisLinesNode = None
self.axisLinesNodePath = None
self.gridLinesNode = None
self.gridLinesNodePath = None
self.subdivLinesNode = None
self.subdivLinesNodePath = None
# Create line objects
self.axisLines = LineSegs()
self.subdivLines = LineSegs()
self.gridLinesXYTop = LineSegs()
self.gridLinesXYBottom = LineSegs()
self.gridLinesXZFront = LineSegs()
self.gridLinesXZBack = LineSegs()
self.gridLinesYZLeft = LineSegs()
self.gridLinesYZRight = LineSegs()
self.gridLinesNodePathXYTop = None
self.gridLinesNodePathXYBottom = None
self.gridLinesNodePathXZFront = None
self.gridLinesNodePathXZBack = None
self.gridLinesNodePathYZLeft = None
self.gridLinesNodePathYZRight = None
self.gridLines = [self.gridLinesXYTop, self.gridLinesXYBottom,
self.gridLinesXZFront, self.gridLinesXZBack,
self.gridLinesYZLeft, self.gridLinesYZRight]
def create(self):
# Set line thicknesses
self.axisLines.setThickness(self.axisThickness)
self.subdivLines.setThickness(self.subdivThickness)
self.gridLinesXYTop.setThickness(self.gridThickness)
self.gridLinesXYBottom.setThickness(self.gridThickness)
self.gridLinesXZFront.setThickness(self.gridThickness)
self.gridLinesXZBack.setThickness(self.gridThickness)
self.gridLinesYZLeft.setThickness(self.gridThickness)
self.gridLinesYZRight.setThickness(self.gridThickness)
if (self.XSize != 0):
# Draw X axis line
self.axisLines.setColor(self.XAxisColor)
self.axisLines.moveTo(-(self.XSize), -self.YSize, -self.ZSize)
self.axisLines.drawTo(self.XSize, -self.YSize, -self.ZSize)
if (self.YSize != 0):
# Draw Y axis line
self.axisLines.setColor(self.YAxisColor)
self.axisLines.moveTo(-self.XSize, -self.YSize, -self.ZSize)
self.axisLines.drawTo(-self.XSize, self.YSize, -self.ZSize)
if (self.ZSize != 0):
# Draw Z axis line
self.axisLines.setColor(self.ZAxisColor)
self.axisLines.moveTo(-self.XSize, -self.YSize, -self.ZSize)
self.axisLines.drawTo(-self.XSize, -self.YSize, self.ZSize)
# Check to see if primary grid lines should be drawn at all
if (self.gridStep != 0):
# Draw primary grid lines
self.gridLinesXYTop.setColor(self.gridColor)
self.gridLinesXYBottom.setColor(self.gridColor)
self.gridLinesXZFront.setColor(self.gridColor)
self.gridLinesXZBack.setColor(self.gridColor)
self.gridLinesYZLeft.setColor(self.gridColor)
self.gridLinesYZRight.setColor(self.gridColor)
# Draw primary grid lines metering x axis if any x-length
if (self.XSize != 0):
if ((self.YSize != 0) and (self.XYPlaneShow != 0)):
# Draw y lines across x axis starting from center moving out
# XY Plane
for x in self.myfrange(0, self.XSize, self.gridStep):
self.gridLinesXYBottom.moveTo(x, -self.YSize, -self.ZSize)
self.gridLinesXYBottom.drawTo(x, self.YSize, -self.ZSize)
self.gridLinesXYBottom.moveTo(-x, -self.YSize, -self.ZSize)
self.gridLinesXYBottom.drawTo(-x, self.YSize, -self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXYBottom.moveTo(self.XSize, -self.YSize, -self.ZSize)
self.gridLinesXYBottom.drawTo(self.XSize, self.YSize, -self.ZSize)
self.gridLinesXYBottom.moveTo(-self.XSize, -self.YSize, -self.ZSize)
self.gridLinesXYBottom.drawTo(-self.XSize, self.YSize, -self.ZSize)
# Draw y lines across x axis starting from center moving out
# XY Plane (TOP)
for x in self.myfrange(0, self.XSize, self.gridStep):
self.gridLinesXYTop.moveTo(x, -self.YSize, self.ZSize)
self.gridLinesXYTop.drawTo(x, self.YSize, self.ZSize)
self.gridLinesXYTop.moveTo(-x, -self.YSize, self.ZSize)
self.gridLinesXYTop.drawTo(-x, self.YSize, self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXYTop.moveTo(self.XSize, -self.YSize, self.ZSize)
self.gridLinesXYTop.drawTo(self.XSize, self.YSize, self.ZSize)
self.gridLinesXYTop.moveTo(-self.XSize, -self.YSize, self.ZSize)
self.gridLinesXYTop.drawTo(-self.XSize, self.YSize, self.ZSize)
if ((self.ZSize != 0) and (self.XZPlaneShow != 0)):
# Draw z lines across x axis starting from center moving out
# XZ Plane
for x in self.myfrange(0, self.XSize, self.gridStep):
self.gridLinesXZBack.moveTo(x, self.YSize, -(self.ZSize))
self.gridLinesXZBack.drawTo(x, self.YSize, self.ZSize)
self.gridLinesXZBack.moveTo(-x, self.YSize, -(self.ZSize))
self.gridLinesXZBack.drawTo(-x, self.YSize, self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXZBack.moveTo(self.XSize, self.YSize, -self.ZSize)
self.gridLinesXZBack.drawTo(self.XSize, self.YSize, self.ZSize)
self.gridLinesXZBack.moveTo(-self.XSize, self.YSize, -self.ZSize)
self.gridLinesXZBack.drawTo(-self.XSize, self.YSize, self.ZSize)
# Draw z lines across x axis starting from center moving out
# XZ Plane (FRONT)
for x in self.myfrange(0, self.XSize, self.gridStep):
self.gridLinesXZFront.moveTo(x, -self.YSize, -(self.ZSize))
self.gridLinesXZFront.drawTo(x, -self.YSize, self.ZSize)
self.gridLinesXZFront.moveTo(-x, -self.YSize, -(self.ZSize))
self.gridLinesXZFront.drawTo(-x, -self.YSize, self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXZFront.moveTo(self.XSize, -self.YSize, -self.ZSize)
self.gridLinesXZFront.drawTo(self.XSize, -self.YSize, self.ZSize)
self.gridLinesXZFront.moveTo(-self.XSize, -self.YSize, -self.ZSize)
self.gridLinesXZFront.drawTo(-self.XSize, -self.YSize, self.ZSize)
# Draw primary grid lines metering y axis if any y-length
if (self.YSize != 0):
if ((self.YSize != 0) and (self.XYPlaneShow != 0)):
# Draw x lines across y axis
# XY Plane
for y in self.myfrange(0, self.YSize, self.gridStep):
self.gridLinesXYBottom.moveTo(-self.XSize, y, -self.ZSize)
self.gridLinesXYBottom.drawTo(self.XSize, y, -self.ZSize)
self.gridLinesXYBottom.moveTo(-self.XSize, -y, -self.ZSize)
self.gridLinesXYBottom.drawTo(self.XSize, -y, -self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXYBottom.moveTo(-(self.XSize), self.YSize, -self.ZSize)
self.gridLinesXYBottom.drawTo(self.XSize, self.YSize, -self.ZSize)
self.gridLinesXYBottom.moveTo(-self.XSize, -(self.YSize), -self.ZSize)
self.gridLinesXYBottom.drawTo(self.XSize, -(self.YSize), -self.ZSize)
# Draw x lines across y axis
# XY Plane (TOP)
for y in self.myfrange(0, self.YSize, self.gridStep):
self.gridLinesXYTop.moveTo(-self.XSize, y, self.ZSize)
self.gridLinesXYTop.drawTo(self.XSize, y, self.ZSize)
self.gridLinesXYTop.moveTo(-self.XSize, -y, self.ZSize)
self.gridLinesXYTop.drawTo(self.XSize, -y, self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXYTop.moveTo(-(self.XSize), self.YSize, self.ZSize)
self.gridLinesXYTop.drawTo(self.XSize, self.YSize, self.ZSize)
self.gridLinesXYTop.moveTo(-self.XSize, -(self.YSize), self.ZSize)
self.gridLinesXYTop.drawTo(self.XSize, -(self.YSize), self.ZSize)
if ((self.ZSize != 0) and (self.YZPlaneShow != 0)):
# Draw z lines across y axis
# YZ Plane
for y in self.myfrange(0, self.YSize, self.gridStep):
self.gridLinesYZLeft.moveTo(-self.XSize, y, -(self.ZSize))
self.gridLinesYZLeft.drawTo(-self.XSize, y, self.ZSize)
self.gridLinesYZLeft.moveTo(-self.XSize, -y, -self.ZSize)
self.gridLinesYZLeft.drawTo(-self.XSize, -y, self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesYZLeft.moveTo(-self.XSize, self.YSize, -(self.ZSize))
self.gridLinesYZLeft.drawTo(-self.XSize, self.YSize, self.ZSize)
self.gridLinesYZLeft.moveTo(-self.XSize, -(self.YSize), -(self.ZSize))
self.gridLinesYZLeft.drawTo(-self.XSize, -(self.YSize), self.ZSize)
# Draw z lines across y axis
# YZ Plane (RIGHT)
for y in self.myfrange(0, self.YSize, self.gridStep):
self.gridLinesYZRight.moveTo(self.XSize, y, -self.ZSize)
self.gridLinesYZRight.drawTo(self.XSize, y, self.ZSize)
self.gridLinesYZRight.moveTo(self.XSize, -y, -self.ZSize)
self.gridLinesYZRight.drawTo(self.XSize, -y, self.ZSize)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesYZRight.moveTo(self.XSize, self.YSize, -self.ZSize)
self.gridLinesYZRight.drawTo(self.XSize, self.YSize, self.ZSize)
self.gridLinesYZRight.moveTo(self.XSize, -(self.YSize), -(self.ZSize))
self.gridLinesYZRight.drawTo(self.XSize, -(self.YSize), self.ZSize)
# Draw primary grid lines metering z axis if any z-length
if (self.ZSize != 0):
if ((self.XSize != 0) and (self.XZPlaneShow != 0)):
# Draw x lines across z axis
# XZ Plane
for z in self.myfrange(0, self.ZSize, self.gridStep):
self.gridLinesXZBack.moveTo(-(self.XSize), self.YSize, z)
self.gridLinesXZBack.drawTo(self.XSize, self.YSize, z)
self.gridLinesXZBack.moveTo(-(self.XSize), self.YSize, -z)
self.gridLinesXZBack.drawTo(self.XSize, self.YSize, -z)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXZBack.moveTo(-(self.XSize), self.YSize, self.ZSize)
self.gridLinesXZBack.drawTo(self.XSize, self.YSize, self.ZSize)
self.gridLinesXZBack.moveTo(-(self.XSize), self.YSize, -(self.ZSize))
self.gridLinesXZBack.drawTo(self.XSize, self.YSize, -(self.ZSize))
# Draw x lines across z axis
# XZ Plane (FRONT)
for z in self.myfrange(0, self.ZSize, self.gridStep):
self.gridLinesXZFront.moveTo(-(self.XSize), -self.YSize, z)
self.gridLinesXZFront.drawTo(self.XSize, -self.YSize, z)
self.gridLinesXZFront.moveTo(-(self.XSize), -self.YSize, -z)
self.gridLinesXZFront.drawTo(self.XSize, -self.YSize, -z)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesXZFront.moveTo(-(self.XSize), -self.YSize, self.ZSize)
self.gridLinesXZFront.drawTo(self.XSize, -self.YSize, self.ZSize)
self.gridLinesXZFront.moveTo(-(self.XSize), -self.YSize, -(self.ZSize))
self.gridLinesXZFront.drawTo(self.XSize, -self.YSize, -(self.ZSize))
if ((self.YSize != 0) and (self.YZPlaneShow != 0)):
# Draw y lines across z axis
# YZ Plane
for z in self.myfrange(0, self.ZSize, self.gridStep):
self.gridLinesYZLeft.moveTo(-self.XSize, -(self.YSize), z)
self.gridLinesYZLeft.drawTo(-self.XSize, self.YSize, z)
self.gridLinesYZLeft.moveTo(-self.XSize, -(self.YSize), -z)
self.gridLinesYZLeft.drawTo(-self.XSize, self.YSize, -z)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesYZLeft.moveTo(-self.XSize, -(self.YSize), self.ZSize)
self.gridLinesYZLeft.drawTo(-self.XSize, self.YSize, self.ZSize)
self.gridLinesYZLeft.moveTo(-self.XSize, -(self.YSize), -(self.ZSize))
self.gridLinesYZLeft.drawTo(-self.XSize, self.YSize, -(self.ZSize))
# Draw y lines across z axis
# YZ Plane (RIGHT)
for z in self.myfrange(0, self.ZSize, self.gridStep):
self.gridLinesYZRight.moveTo(self.XSize, -self.YSize, z)
self.gridLinesYZRight.drawTo(self.XSize, self.YSize, z)
self.gridLinesYZRight.moveTo(self.XSize, -self.YSize, -z)
self.gridLinesYZRight.drawTo(self.XSize, self.YSize, -z)
if (self.endCapLinesShow != 0):
# Draw endcap lines
self.gridLinesYZRight.moveTo(self.XSize, -self.YSize, self.ZSize)
self.gridLinesYZRight.drawTo(self.XSize, self.YSize, self.ZSize)
self.gridLinesYZRight.moveTo(self.XSize, -self.YSize, -self.ZSize)
self.gridLinesYZRight.drawTo(self.XSize, self.YSize, -self.ZSize)
# Check to see if secondary grid lines should be drawn
if (self.subdiv != 0):
# Draw secondary grid lines
self.subdivLines.setColor(self.subdivColor)
if (self.XSize != 0):
adjustedstep = self.gridStep / self.subdiv
print(self.gridStep)
print(self.subdiv)
print(adjustedstep)
print(self.gridStep / self.subdiv)
if ((self.YSize != 0) and (self.XYPlaneShow != 0)):
# Draw y lines across x axis starting from center moving out
# XY
for x in self.myfrange(0, self.XSize, adjustedstep):
self.subdivLines.moveTo(x, -(self.YSize), -self.ZSize)
self.subdivLines.drawTo(x, self.YSize, -self.ZSize)
self.subdivLines.moveTo(-x, -(self.YSize), -self.ZSize)
self.subdivLines.drawTo(-x, self.YSize, -self.ZSize)
if ((self.ZSize != 0) and (self.XZPlaneShow != 0)):
# Draw z lines across x axis starting from center moving out
# XZ
for x in self.myfrange(0, self.XSize, adjustedstep):
self.subdivLines.moveTo(x, 0, -(self.ZSize))
self.subdivLines.drawTo(x, 0, self.ZSize)
self.subdivLines.moveTo(-x, 0, -(self.ZSize))
self.subdivLines.drawTo(-x, 0, self.ZSize)
if (self.YSize != 0):
if ((self.YSize != 0) and (self.XYPlaneShow != 0)):
# Draw x lines across y axis
# XY
for y in self.myfrange(0, self.YSize, adjustedstep):
self.subdivLines.moveTo(-(self.XSize), y, -self.ZSize)
self.subdivLines.drawTo(self.XSize, y, -self.ZSize)
self.subdivLines.moveTo(-(self.XSize), -y, -self.ZSize)
self.subdivLines.drawTo(self.XSize, -y, -self.ZSize)
if ((self.ZSize != 0) and (self.YZPlaneShow != 0)):
# Draw z lines across y axis
# YZ
for y in self.myfrange(0, self.YSize, adjustedstep):
self.subdivLines.moveTo(-self.XSize, y, -(self.ZSize))
self.subdivLines.drawTo(-self.XSize, y, self.ZSize)
self.subdivLines.moveTo(-self.XSize, -y, -(self.ZSize))
self.subdivLines.drawTo(-self.XSize, -y, self.ZSize)
if (self.ZSize != 0):
if ((self.XSize != 0) and (self.XZPlaneShow != 0)):
# Draw x lines across z axis
# XZ
for z in self.myfrange(0, self.ZSize, adjustedstep):
self.subdivLines.moveTo(-self.XSize, self.YSize, z)
self.subdivLines.drawTo(self.XSize, self.YSize, z)
self.subdivLines.moveTo(-self.XSize, self.YSize, -z)
self.subdivLines.drawTo(self.XSize, self.YSize, -z)
if ((self.YSize != 0) and (self.YZPlaneShow != 0)):
# Draw y lines across z axis
# YZ
for z in self.myfrange(0, self.ZSize, adjustedstep):
self.subdivLines.moveTo(-self.XSize, -(self.YSize), z)
self.subdivLines.drawTo(-self.XSize, self.YSize, z)
self.subdivLines.moveTo(-self.XSize, -(self.YSize), -z)
self.subdivLines.drawTo(-self.XSize, self.YSize, -z)
# Create ThreeAxisGrid nodes and nodepaths
# Create parent node and path
self.parentNode = PandaNode('threeaxisgrid-parentnode')
self.parentNodePath = NodePath(self.parentNode)
# Create axis lines node and path, then reparent
self.axisLinesNode = self.axisLines.create()
self.axisLinesNodePath = NodePath(self.axisLinesNode)
self.axisLinesNodePath.reparentTo(self.parentNodePath)
# Create grid lines node and path, then reparent
#self.gridLinesNode = self.gridLines.create()
#self.gridLinesNodePath = NodePath(self.gridLinesNode)
#self.gridLinesNodePath.reparentTo(self.parentNodePath)
gridLinesNode = self.gridLinesXYTop.create()
self.gridLinesNodePathXYTop = NodePath(gridLinesNode)
self.gridLinesNodePathXYTop.reparentTo(self.parentNodePath)
gridLinesNode = self.gridLinesXYBottom.create()
self.gridLinesNodePathXYBottom = NodePath(gridLinesNode)
self.gridLinesNodePathXYBottom.reparentTo(self.parentNodePath)
gridLinesNode = self.gridLinesXZFront.create()
self.gridLinesNodePathXZFront = NodePath(gridLinesNode)
self.gridLinesNodePathXZFront.reparentTo(self.parentNodePath)
gridLinesNode = self.gridLinesXZBack.create()
self.gridLinesNodePathXZBack = NodePath(gridLinesNode)
self.gridLinesNodePathXZBack.reparentTo(self.parentNodePath)
gridLinesNode = self.gridLinesYZLeft.create()
self.gridLinesNodePathYZLeft = NodePath(gridLinesNode)
self.gridLinesNodePathYZLeft.reparentTo(self.parentNodePath)
gridLinesNode = self.gridLinesYZRight.create()
self.gridLinesNodePathYZRight = NodePath(gridLinesNode)
self.gridLinesNodePathYZRight.reparentTo(self.parentNodePath)
# Create subdivision lines node and path then reparent
self.subdivLinesNode = self.subdivLines.create()
self.subdivLinesNodePath = NodePath(self.subdivLinesNode)
self.subdivLinesNodePath.reparentTo(self.parentNodePath)
return self.parentNodePath
# Thanks to <NAME> for this float-accepting range method
def myfrange(self, start, stop=None, step=None):
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
while cur < stop:
yield cur
cur += step
def hideFrontFace(self, angleY=0, angleZ=0):
self.gridLinesNodePathXZFront.show()
self.gridLinesNodePathYZRight.show()
self.gridLinesNodePathXZBack.show()
self.gridLinesNodePathYZLeft.show()
if angleY == 0 and angleZ == 0:
#Front
self.gridLinesNodePathXZFront.hide()
return 1
if angleY == 0 and angleZ == 45:
#Front + Right
self.gridLinesNodePathXZFront.hide()
self.gridLinesNodePathYZRight.hide()
elif angleY == 0 and angleZ == 90:
#Right
self.gridLinesNodePathYZRight.hide()
return 5
elif angleY == 0 and angleZ == 135:
#Back + Right
self.gridLinesNodePathXZBack.hide()
self.gridLinesNodePathYZRight.hide()
elif angleY == 0 and angleZ == 180:
#Back
self.gridLinesNodePathXZBack.hide()
return 3
elif angleY == 0 and angleZ == 225:
#Left + Back
self.gridLinesNodePathYZLeft.hide()
self.gridLinesNodePathXZBack.hide()
elif angleY == 0 and angleZ == 270:
#Left
self.gridLinesNodePathYZLeft.hide()
return 6
elif angleY == 0 and angleZ == 315:
#Left + Front
self.gridLinesNodePathXZFront.hide()
self.gridLinesNodePathYZLeft.hide()
return 0
#grid = ThreeAxisGrid()
#gridnodepath = grid.create()
#gridnodepath.reparentTo(render)
#run()
|
StarcoderdataPython
|
11287699
|
<gh_stars>0
from django.forms import ModelForm
from .models import Attachment
class AttachmentForm(ModelForm):
class Meta:
model = Attachment
fields = ('name', 'file')
|
StarcoderdataPython
|
9730415
|
<reponame>MaximDecherf/F1-21_udp_data<filename>src/F121UdpData/F1Data.py
import socket
from .packets.packet import Packet
class F1Data:
PACKET_SIZE_MAPPER = {'MOTION': 1464, 'SESSION': 625, 'LAP_DATA': 970, 'EVENT': 36, 'PARTICIPANTS': 1257, 'CAR_SETUPS': 1102,
'CAR_TELEMETRY': 1347, 'CAR_STATUS': 1058, 'FINAL_CLASSIFICATION': 839, 'LOBBY_INFO': 1191, 'CAR_DAMAGE': 882, 'SESSION_HISTORY': 1155}
def __init__(self, port=20777, ip="127.0.0.1", filter_packets=None):
self.udp_ip = ip
self.udp_port = port
self.filter_packets = filter_packets
if type(filter_packets) == list:
allowed_packets = []
for packet in filter_packets:
allowed_packets.append(self.PACKET_SIZE_MAPPER[packet])
self.filter_packets = allowed_packets
def run(self, sock):
return self.incoming_data(sock)
def setup_udp_con(self, ip=None, port=None):
if ip == None and port == None:
ip = self.udp_ip
port = self.udp_port
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((ip, port))
return sock
def incoming_data(self, sock):
data, addr = sock.recvfrom(2048) # buffer size is 2048 bytes
if type(self.filter_packets) == list:
if len(data) in self.filter_packets:
packet = Packet(data)
return packet.packet_body.body_data
else:
packet = Packet(data)
return packet.packet_body.body_data
|
StarcoderdataPython
|
1710956
|
import tensorflow as tf
tf.InteractiveSession()
a = tf.zeros((2,2))
b = tf.ones((2,2))
print(a.eval())
print(b .eval())
print(tf.reduce_sum(b, reduction_indices=1).eval())
print(a.get_shape())
print(tf.reshape(a, (1, 4)).eval())
|
StarcoderdataPython
|
95955
|
import unittest
from msdm.domains import GridWorld
class GridWorldTestCase(unittest.TestCase):
def test_feature_locations(self):
gw = GridWorld([
"cacg",
"sabb"])
fl = gw.feature_locations
lf = gw.location_features
fl2 = {}
for l, f in lf.items():
fl2[f] = fl2.get(f, []) + [l,]
assert all(set(fl[f]) == set(fl2[f]) for f in fl.keys())
def test_reachability(self):
gw = GridWorld([
"....#...g",
"....#....",
"#####....",
"s........",
])
assert len(gw.reachable_states()) == 22 #includes terminal
|
StarcoderdataPython
|
14753
|
<gh_stars>1-10
"""
Query construction tests.
"""
from hamcrest import assert_that, is_, equal_to
from influxdbnagiosplugin.query import ExplicitQueryBuilder, SingleMeasurementQueryBuilder
def test_explicit_query():
query = ExplicitQueryBuilder("SHOW MEASUREMENTS")
assert_that(query().query, is_(equal_to(
"SHOW MEASUREMENTS"
)))
def test_single_measurement_query():
query = SingleMeasurementQueryBuilder.for_hostname_and_age(
measurement="disk_free",
hostname="hostname",
age="30s",
where=[],
)
assert_that(query().query, is_(equal_to(
"SELECT time, value FROM disk_free"
" WHERE time > now() - 30s"
" AND host = 'hostname'"
)))
def test_single_measurement_query_where_clause():
query = SingleMeasurementQueryBuilder.for_hostname_and_age(
measurement="disk_free",
hostname="hostname",
age="30s",
where=["path=/"],
)
assert_that(query().query, is_(equal_to(
"SELECT time, value FROM disk_free"
" WHERE time > now() - 30s"
" AND host = 'hostname'"
" AND path = '/'"
)))
def test_single_measurement_query_where_clause_quoted():
query = SingleMeasurementQueryBuilder.for_hostname_and_age(
measurement="disk_free",
hostname="hostname",
age="30s",
where=["path='/'"],
)
assert_that(query().query, is_(equal_to(
"SELECT time, value FROM disk_free"
" WHERE time > now() - 30s"
" AND host = 'hostname'"
" AND path = '/'"
)))
|
StarcoderdataPython
|
12820821
|
<filename>yatai/yatai/configuration/__init__.py
import os
def get_local_config_file():
if "YATAI_CONFIG" in os.environ:
# User local config file for customizing Yatai
return expand_env_var(os.environ.get("YATAI_CONFIG"))
return None
def inject_dependencies():
"""Inject dependencis and configuration for Yatai package"""
from yatai.yatai.configuration.containers import YataiConfiguration, YataiContainer
config_file = get_local_config_file()
if config_file and config_file.endswith('.yml'):
configuration = YataiConfiguration(override_config_file=config_file)
else:
configuration = YataiConfiguration()
YataiContainer.config.set(configuration.as_dict())
def expand_env_var(env_var):
"""Expands potentially nested env var by repeatedly applying `expandvars` and
`expanduser` until interpolation stops having any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
|
StarcoderdataPython
|
300562
|
from .tsl2561 import *
|
StarcoderdataPython
|
12861986
|
#!/usr/bin/env python3
import yoda, sys
import h5py
import numpy as np
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
# Fix size, sometimes there is spillover
# TODO: replace with while if problem persists
if len(out) > num:
out[-2].extend(out[-1])
out = out[0:-1]
if len(out) != num:
raise Exception("something went wrong in chunkIt, the target size differs from the actual size")
return out
def createDatasets(f, binids, variations, depth=1, compression=4):
"""
Create data sets in the HDF5 file.
"""
nbins=len(binids)
nvars=len(variations)
# The fundamental moments/elements of yoda objecs
floats = [
"sumw",
"sumw2",
"sumwx",
"sumwx2",
"sumwy",
"sumwy2",
"sumwxy",
"numEntries",
"xval",
"xerr-",
"xerr+",
"yval",
"yerr-",
"yerr+",
"xmin",
"xmax",
"ymin",
"ymax"
]
# The datasets have 3 axes: binid, weight variation, point in parameter space
for df in floats: f.create_dataset(df, (nbins,nvars,depth), maxshape=(None,None,None), dtype='f' , chunks=True, compression=compression)
# Lookups --- helps when reading data and reconstucting YODA objects
f.create_group("Histo1D")
f.create_group("Histo2D")
f.create_group("Profile1D")
f.create_group("Counter")
f.create_group("Scatter1D")
f.create_group("Scatter2D")
# This is the one that works well with hdf5 when reading std::string in C++
dt = h5py.special_dtype(vlen=str)
# We use these simple lists as lookup tables to associate the elements of the datasets ^^^ with
# the actual YODA Analysis objects
import numpy as np
f.create_dataset("binids", data=np.array(binids, dtype=dt))
f.create_dataset("variations", data=np.array(variations, dtype=dt))
def dbn0ToArray(dbn):
return np.array([dbn.sumW(), dbn.sumW2(), dbn.numEntries()])
def dbn1ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), dbn.xMin(), dbn.xMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), 0, 0])
def H2dbn2ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), dbn.xMin(), dbn.xMax(), dbn.yMin(), dbn.yMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), 0, 0, 0, 0])
def dbn2ToArray(dbn):
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.numEntries(), dbn.xMin(), dbn.xMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.numEntries(), 0, 0])
def point2DToArray(pnt):
return np.array([pnt.val(1), pnt.errMinus(1), pnt.errPlus(1), pnt.val(2), pnt.errMinus(2), pnt.errPlus(2)])
def point1DToArray(pnt):
return np.array([pnt.val(1), pnt.errMinus(1), pnt.errPlus(1)])
def mkSafeHname(hname):
return hname.replace("/","|")
def mkBinids(hdict):
binids= []
for num, hname in enumerate(sorted(list(hdict.keys()))):
if hname.endswith("]"): continue
ao = hdict[hname]
base = ao.path().split("[")[0].replace("/","|")
if ao.type()=="Scatter1D" or ao.type()=="Scatter2D":
temp = ["{}#{}".format(base, i) for i in range(len(ao))]
elif ao.type()=="Counter":
temp = ["{}#{}".format(base, 0)]
else:
suffixes = ["T", "O", "U"]
if ao.type() == "Counter":
suffixes.append(0)
else:
suffixes.extend([i for i in range(len(ao))])
temp = ["{}#{}".format(base, s) for s in suffixes]
binids.extend(temp)
return binids
def mkIndexDict(datadict, allbinids):
ret = {'Histo1D':{}, 'Histo2D':{}, 'Profile1D':{}, 'Scatter1D':{}, 'Scatter2D':{}, 'Counter':{}}
for hname, v in datadict.items():
_hname=mkSafeHname(hname)
try:
ret[datadict[hname].type()][_hname] = [num for num, binid in enumerate(allbinids) if binid.startswith("{}#".format(_hname))]
except Exception as e:
print("oops: ", e)
return ret
def createIndexDS(f, d_idx):
for dtype, objects in d_idx.items():
for _hname, binIdx in objects.items():
f.create_dataset("{}/{}".format(dtype, _hname), data=binIdx , chunks=True)
def fillDatasets(f, binIdx, variations, ddict, hname, depth=0):
if len(binIdx) ==0:
print("Warning, no matching binid for {} --- is this one of the raw ratios maybe???".format(hname))
return
if ddict[hname].type()=='Histo1D':
nFields=7
fdbn = dbn1ToArray
elif ddict[hname].type()=='Histo2D':
nFields=12
fdbn = H2dbn2ToArray
elif ddict[hname].type()=='Profile1D':
fdbn = dbn2ToArray
nFields=9
elif ddict[hname].type()=='Scatter2D':
fdbn = point2DToArray
nFields=6
elif ddict[hname].type()=='Scatter1D':
fdbn = point1DToArray
nFields=3
elif ddict[hname].type()=='Counter':
nFields=3
else:
raise Exception("type {} Not implemented".format(ddict[hname].type()))
# Empty array to be filled and written to datasets
temp = np.zeros((len(binIdx), len(variations), nFields))
hids = [hname]
for v in variations[1:]:
hids.append("{}[{}]".format(hname, v))
# Iterate over variations
for col, hn in enumerate(hids):
# Iterate over bins
H=ddict[hn]
if H.type() == "Counter":
temp[0][col] = np.array([H.sumW(), H.sumW2(), H.numEntries()])
# Things with under/overflow first
elif H.type() not in ["Scatter1D", "Scatter2D", "Histo2D"]:
temp[0][col] = fdbn(H.totalDbn())
temp[1][col] = fdbn(H.overflow())
temp[2][col] = fdbn(H.underflow())
for i in range(len(binIdx)-3):
temp[3+i][col] = fdbn(H.bin(i))
elif H.type() =="Histo2D":
temp[0][col] = fdbn(H.totalDbn())
temp[1][col] = 0.0 # Future proofing
temp[2][col] = 0.0 #
for i in range(len(binIdx)-3):
temp[3+i][col] = fdbn(H.bin(i))
else:
for i in range(len(binIdx)):
temp[i][col] = fdbn(H.point(i))
if ddict[hname].type()=='Histo1D':
f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# elif ddict[hname].type()=='Histo2D':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["sumwy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["sumwy2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# f["sumwxy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,7]
# f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,8]
# f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,9]
# f["ymin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,10]
# f["ymax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,11]
# elif ddict[hname].type()=='Profile1D':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["sumwy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["sumwy2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,7]
# f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,8]
# elif ddict[hname].type()=='Scatter1D':
# f["xval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["xerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["xerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# elif ddict[hname].type()=='Scatter2D':
# f["xval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["xerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["xerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["yval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["yerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["yerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# elif ddict[hname].type()=='Counter':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# else:
# raise Exception("yikes")
if __name__=="__main__":
import sys
import optparse, os, sys
op = optparse.OptionParser(usage=__doc__)
op.add_option("-v", "--debug", dest="DEBUG", action="store_true", default=False, help="Turn on some debug messages")
op.add_option("-o", dest="OUTPUT", default="analysisobjects.h5", help="Output HDF5 file (default: %default)")
opts, args = op.parse_args()
YODAFILES = args
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
binids, VVV, aix, aix_flat, central = None, None, None, None, None
if rank==0:
# TODO if len(args)==1 and os.path.isdir(args[0]) --- hierarchical reading with pnames finding etc
# Let's assume they are all consistent TODO add robustness
DATA0 = yoda.readYODA(args[0])
L = sorted(list(DATA0.keys()))
names = [x for x in L ]# if not "/RAW" in x]
central = [x for x in names if not x.endswith("]")]
variations = [x for x in names if x.endswith("]")]
# TODO In principle one probably should check that all variations are always the
# same, we assume this is the case here
var = []
for c in central:
var.append([x for x in variations if x.startswith(c+"[")])
## Thats the weight and weight variation order we store the data in
VVV = ["CentralWeight"]
import re
p=re.compile("\[(.*?)\]")
for x in var[0]:
try:
VVV.append(p.findall(x)[0])
except Exception as e:
print(x, e)
binids = mkBinids(DATA0)
# Hierarchical, i.e. top layer is the AnalysisObject type
aix = mkIndexDict(DATA0, binids)
# Object name as keys and lists of indices as values
aix_flat = {}
for k, v in aix.items(): aix_flat.update(v)
binids = comm.bcast(binids, root=0)
VVV = comm.bcast(VVV, root=0)
aix = comm.bcast(aix, root=0)
aix_flat = comm.bcast(aix_flat, root=0)
central = comm.bcast(central, root=0)
# NOTE dataset operations are collective
# This require h5py to use and H5 that is build with MPI
try:
f = h5py.File(opts.OUTPUT, "w", driver='mpio', comm=MPI.COMM_WORLD)
except:
f = h5py.File(opts.OUTPUT, "w")
createDatasets(f, binids, VVV, depth=len(YODAFILES))
createIndexDS(f, aix)
rankwork = chunkIt([i for i in range(len(YODAFILES))], size) if rank==0 else None
rankwork = comm.scatter(rankwork, root=0)
# This part is MPI trivial
for num, findex in enumerate(rankwork):
DATA = yoda.readYODA(YODAFILES[findex])
for hname in central:
_hname=mkSafeHname(hname)
fillDatasets(f, aix_flat[_hname], VVV, DATA, hname, depth=findex)
if rank==0:
print("[{}] --- {}/{} complete".format(rank, num, len(rankwork)))
sys.stdout.flush()
f.close()
|
StarcoderdataPython
|
6596401
|
<gh_stars>1-10
import globalvars
from gamestate import *
from random import randint
def play():
setElevatorDestination(7)
#------------ bitte hier stehen lassen
execfile ("functions.py")
|
StarcoderdataPython
|
264292
|
<filename>benchmark_client.py<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
"""
Created by <NAME> on 2013-03-12
Published under the MIT license.
"""
import os, sys, logging
from miniredis.client import RedisClient
from multiprocessing import Pool
import time
import random
log = logging.getLogger()
if __name__=='__main__':
def timed(count):
c = RedisClient()
c.select(1)
seq = range(0,count)
now = time.time()
for i in seq:
it = str(random.choice(seq))
c.set(it, it)
it = str(random.choice(seq))
c.get(it)
return count/(time.time() - now)
p = Pool(4)
print sum(p.map(timed,[25000,25000,25000,25000]))
#print timed(10000)
|
StarcoderdataPython
|
9716548
|
#!/usr/bin/env python3
from ast import literal_eval
import time
import serial as pyserial
import struct
from HeimdallMultiwii.constants import CTYPE_PATTERNS
from HeimdallMultiwii.exeptions import MissingCodeError, ResponseParserNotImpl, MWCMessageNotSupported
from HeimdallMultiwii.mspcommands import MSPMessagesEnum
def validate_code(f):
def __validate_code_wrapper(self, code):
codes = list(map(int, MSPMessagesEnum))
if code is None or code not in codes:
raise MissingCodeError("Please provide message code")
return f(self, code)
return __validate_code_wrapper
class MultiWii:
"""
A class used for serial communication with an MiltiWii compatible FCB (Flight Controller Board)
...
Attributes
----------
serial : serial
Python serial port extension for receiv and send mesages from/to FCB
Methods
-------
open_connection(serport=None)
Prints the animals name and what sound it makes
"""
def __init__(self) -> None:
self.serial = pyserial.Serial(timeout=1)
# self.logger = logging.getLogger('simpleExample')
def __del__(self):
if self.serial.isOpen():
self.close_connection()
def open_connection(self, baud_rate, serport=None):
"""Setup and open serial communication with FCB
:param serport: Serial port for multiwii connection
:param baud_rate: BaudRate for multiwii connection
:return: True if connection was successful established
:raise: Exception if any serial port is provided
"""
if serport is None:
raise Exception("Please provide a Serial Port...")
self.serial.port = serport
self.serial.baudrate = baud_rate
self.serial.bytesize = pyserial.EIGHTBITS
self.serial.parity = pyserial.PARITY_NONE
self.serial.stopbits = pyserial.STOPBITS_ONE
self.serial.write_timeout = 3
self.serial.xonxoff = False
self.serial.rtscts = False
self.serial.dsrdtr = False
return self._connect()
def _connect(self):
"""
Open Serial port for communications
:return: True if Connection was established
"""
try:
wait = 6
self.serial.open()
# self.logger.info("Connecting with board on port: " + self.serial.port)
print("Connecting with board on port: " + self.serial.port)
for i in range(1, wait):
# self.logger.info(wait - i)
time.sleep(1)
except Exception as error:
# self.logger.warning("Error opening " + self.serial.port + " port. " + str(error))
return False
# self.logger.info("Connection Stablished.")
print("Connection Stablished.")
return True
def close_connection(self):
"""
Close Serial port
"""
if self.serial.isOpen():
# self.logger.info("Closing Connection...")
self.serial.close()
# self.logger.info("The connection was closed.")
# else:
# self.logger.info("Connection already closed.")
@validate_code
def send_simple_command(self, code=None):
"""
Send Single commands e.g. for calibrate MAG or ACC
No return response
:param code:
:return: True if Evething is OK
"""
message = self._buildpayload(code)
self._sendmessage(message)
return True
@validate_code
def get_fcb_data(self, code=None):
"""
Send Request Message to the Multiwii FCB
:param code: MSP Request
:return: Miltiwii response dict
"""
message = self._buildpayload(code)
self._sendmessage(message)
return self.readmessage(code)
def _sendmessage(self, message):
self.serial.write(message)
self.serial.flushOutput()
def readmessage(self, code):
data = self.__extract_data(code)
if data:
try:
fmt = CTYPE_PATTERNS[code]
except KeyError:
self.serial.flushInput()
raise ResponseParserNotImpl('El mensaje no puede ser parseado')
if fmt == 'PENDING':
self.serial.flushInput()
raise ResponseParserNotImpl('El mensaje no puede ser parseado')
msg = struct.unpack('<' + fmt, data)
self.serial.flushInput()
return self._process_message(code, msg)
def __extract_data(self, code):
data = b''
try:
header = tuple(self.serial.read(3))
datalength = struct.unpack('<b', self.serial.read())[0]
struct.unpack('<b', self.serial.read())
if header == (0x24, 0x4d, 0x3e) and 0x21 not in header:
data = self.serial.read(datalength)
elif 0x21 in header:
raise MWCMessageNotSupported("The board can't response the message {0}".format(code))
return data
except (pyserial.serialutil.SerialException, struct.error):
return data
# def _flush(self):
# self.serial.flushInput()
# self.serial.flushOutput()
def _buildpayload(self, code: int, size: int = 0, data: list = []):
payload = bytes()
total_data = [ord('$'), ord('M'), ord('<'), size, code] + data
payload += struct.pack('<3bBB%dH' % len(data), *total_data)
data = payload[3:]
checksum = code
if len(data) > 0x02: # cambiar 0x02 a ord(2)
checksum = 0
for byte in data:
checksum ^= byte
payload += struct.pack("<%s" % "H" if checksum > 0xff else "B", checksum)
return payload
def _process_message(self, code, msg):
message = literal_eval(str(msg))
template = _MessagesFormats.TEMPLATES[code]
msglist= list(zip(template, message))
return dict(msglist)
def arm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500, 1500, 2000, 1000]
message = self._buildpayload(MSPMessagesEnum.MSP_SET_RAW_RC.value, 8, data)
self._sendmessage(message)
self.serial.flushOutput()
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def disarm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500, 1500, 1000, 1000]
message = self._buildpayload(MSPMessagesEnum.MSP_SET_RAW_RC.value, 8, data)
self._sendmessage(message)
self.serial.flushOutput()
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def send_rc_signal(self, data):
message = self._buildpayload(MSPMessagesEnum.MSP_SET_RAW_RC.value, 8, data)
self._sendmessage(message)
self.serial.flushOutput()
class _MessagesFormats:
TEMPLATES = {
100: ('VERSION', 'MULTITYPE', 'MSP_VERSION', 'capability'),
101: ('cycleTime', 'i2c_errors_count', 'sensor', 'flag', 'global_conf.currentSet'),
102: ('accx', 'accy', 'accz', 'gyrx', 'gyry', 'gyrz', 'magx', 'magy', 'magz', 'GPS_coord[LAT]', 'GPS_coord[LON]', 'GPS_altitude'),
103: ('servo1', 'servo2', 'servo3', 'servo4', 'servo5', 'servo6', 'servo7', 'servo8'),
104: ('motor1', 'motor2', 'motor3', 'motor4', 'motor5', 'motor6', 'motor7', 'motor8'),
105: ('roll', 'pitch', 'yaw', 'throttle', 'AUX1', 'AUX2', 'AUX3AUX4'),
106: ('GPS_FIX', 'GPS_numSat', 'GPS_coord[LAT]', 'GPS_coord[LON]', 'GPS_altitude', 'GPS_speed', 'GPS_ground_course'),
107: ('GPS_distanceToHome', 'GPS_directionToHome', 'GPS_update'),
108: ('angx', 'angy', 'heading'),
109: ('EstAlt', 'vario'),
110: ('vbat', 'intPowerMeterSum', 'rssi', 'amperage'),
111: ('byteRC_RATE', 'byteRC_EXPO', 'byteRollPitchRate', 'byteYawRate', 'byteDynThrPID', 'byteThrottle_MID', 'byteThrottle_EXPO'),
112: (), # Read more
113: (), # Read more
114: ('intPowerTrigger1', 'conf.minthrottle', 'maxthrottle', 'mincommand', 'failsafe_throttle', 'plog.arm', 'plog.lifetime', 'conf.mag_declination', 'conf.vbatscale', 'conf.vbatlevel_warn1', 'conf.vbatlevel_warn2', 'conf.vbatlevel_crit'),
115: ('motorpin1', 'motorpin2', 'motorpin3', 'motorpin4', 'motorpin5', 'motorpin6', 'motorpin7', 'motorpin8'),
116: (), # Return directly
117: (), # Return directly
118: ('wp_no', 'lat', 'lon', 'AltHold', 'heading', 'time to stay', 'nav flag'),
119: (), # Return directly
200: ('roll', 'pitch', 'yaw', 'throttle', 'AUX1', 'AUX2', 'AUX3AUX4')
}
|
StarcoderdataPython
|
1974217
|
<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import pytest
from wa_kat.analyzers.creation_date_detector import TimeResource
from wa_kat.analyzers.creation_date_detector import mementoweb_api_tags
from wa_kat.analyzers.creation_date_detector import get_whois_tags
from wa_kat.analyzers.creation_date_detector import get_creation_date_tags
# Tests =======================================================================
def test_kitakitsune_mementoweb():
memento_tags = mementoweb_api_tags("http://kitakitsune.org")
assert memento_tags
def test_kitakitsune_whois():
whois_tags = get_whois_tags("kitakitsune.org")
assert whois_tags[0].url == "http://whois.icann.org/en/lookup?name=kitakitsune.org"
assert whois_tags[0].date == "2009-03-27T04:04:05"
assert whois_tags[0].val == "2009"
assert whois_tags[0].source == "Whois"
|
StarcoderdataPython
|
3499502
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 expandtab number
"""
Authors: qianweishuo<<EMAIL>>
Date: 2019/9/29 下午6:17
"""
import os
import re
import sys
import six
from kinoko.misc.log_writer import init_log
def test_writing_files(mocker):
mocker.spy(os.path, 'dirname')
mocker.patch('os.path.isdir', return_value=True)
mocker.patch('os.makedirs')
mocker.patch('sys.stdout')
mocker.patch('sys.stderr')
# don't use `wraps=open`, or the files will get really created
mocker.patch('logging.open' if six.PY3 else '__builtin__.open')
logger = init_log(__name__, is_show_logger_src=True, log_path='./tmp/')
assert len(logger.handlers) == 3 + 2 # file + stream
logger.info('myinfo中文')
assert re.match(r'^INFO:[-\d:\. ]+\[\S+@\S+.py\]\S+.py:\d+ myinfo中文\n?$',
sys.stdout.write.call_args_list[0][0][0]) # first call, args, first arg
if six.PY3:
assert os.linesep == sys.stdout.write.call_args_list[1][0][0]
mocker.patch('os.path.isdir', return_value=False)
init_log(log_path='./tmp')
os.makedirs.assert_called_once()
|
StarcoderdataPython
|
3400923
|
<gh_stars>0
"""
Module for doing the training
"""
from __future__ import division
import numpy as np
import plot as plot
from numpy import matrix
from numpy import linalg
def train (train_X, train_Y, learning_rate=1, delay=0.2, type="curve"):
"""
Trains the linear regression model on training data
Sets the hyperparameter according to the validation data
and final accuracy on test data
"""
Theta = np.matrix([0.0, 0.0]).T
X_matrix = np.matrix(train_X).T
Y_matrix = np.matrix(train_Y).T
X_ones = np.ones((X_matrix.shape[0], 1))
X_matrix = np.hstack((X_ones, X_matrix))
# normalisation (approximate)
X_matrix[:,1] = X_matrix[:,1] / 10
epoch = 0
eta = learning_rate
costData = [] # for plotting real time data
old_cost = 0
converged = None
while (True):
Theta = SGD(X_matrix, Y_matrix, Theta, eta, 20)
cost = compute_cost(X_matrix, Y_matrix, Theta)
# print ('Epoch: %d | Cost: %.8f | Theta: %f, %f' % (epoch, cost, Theta[0,0], Theta[1,0]) )
epoch = epoch + 1
# Stopping condition
if (float(Theta[0]) >= 10000 or float(Theta[1]) >= 10000 or (cost is float('inf')) or (epoch > 6 and cost > 1.5 * old_cost)): # 50 iters for the purpose of plotting
# Diverged
print ("The model is diverging :(, please change the learning rate :)")
converged = False
break
if (epoch > 5000):
# too slow
print ("The learning rate is too small :(. Stopping since taking >5000 epochs. To train faster use eta close to 1...")
converged = False
break
if (epoch > 20 and cost < 1e-5 and abs(old_cost - cost) / old_cost < 0.0001):
# Change in cost is less than 0.01% => Stop
converged = True
break
costData.append( (float(cost), float(Theta[0, 0]), float(Theta[1,0]) / 10.0) )
old_cost = cost
Theta[1,:] = Theta[1,:] / 10
X_matrix[:,1] = X_matrix[:,1] * 10
# Print result
print("Theta0: %.6f | Theta1: %.6f | #Epochs: %d" % (Theta[0], Theta[1], epoch))
plot.regressionPlot(train_X, train_Y, Theta[1,0], Theta[0,0], Xlabel="Acidity", Ylabel="Density of Wine", marker="bx", fileName="Q1/plots/regression.png")
animatedDesent(X_matrix, Y_matrix, np.array(costData), delay, converged=converged)
return Theta
def animatedDesent (X_matrix, Y_matrix, costData, delay=0.2, converged=True):
"""
Plots the J(Theta) curve in 3D space and contours
Shows the real time gradient descent
"""
theta0s = np.linspace(0.3, 1.7, 100)
theta1s = np.linspace(-0.080, 0.080, 105)
if (not converged):
theta0s = np.linspace(-2000, 2000, 100)
theta1s = np.linspace(-2000, 2000, 100)
costMatrix = np.zeros((len(theta1s), len(theta0s)))
for i in range(len(theta1s)):
for j in range(len(theta0s)):
Theta = np.matrix([theta0s[j], theta1s[i]]).T
# compute cost
costMatrix[i][j] = compute_cost(X_matrix, Y_matrix, Theta)
plot.costPlot(theta0s, theta1s, costMatrix, costData, delay=delay, Xlabel="Theta 0", Ylabel="Theta 1", Zlabel="Cost")
plot.contourPlot(theta0s, theta1s, costMatrix, costData, delay=delay, converged=converged, Xlabel="Theta 0", Ylabel="Theta 1", Zlabel="Cost")
def SGD (X, Y, Theta, eta, batch_size=20):
"""
Computes one epoch of the batch gradient decent algorithm
"""
Theta = Theta - eta * compute_gradient(X, Y, Theta)
return Theta
def compute_gradient (X, Y, Theta):
"""
Computes the cost gradient
X = m*n
Y = m*1
Theta = n*1
gradient = (1/m) * X_transpose * (X*Theta - Y)
"""
(m, n) = X.shape
return (1.0/m) * (X.T) * (X*Theta - Y)
def compute_cost (X, Y, Theta):
"""
Computes the J(Theta)
X = m*n
Y = m*1
Theta = n*1
Cost = (1/2m) * (Y-X*Theta)_tran * (Y-X*Theta)
"""
(m, n) = X.shape
error = Y - X*Theta
return (0.5 / m) * error.T * error
|
StarcoderdataPython
|
383075
|
<reponame>picsldev/pyerp
# Librerias Django
from django.db import models
# Librerias de terceros
from apps.base.models import PyPartner
# Librerias en carpetas locales
from .campaign import PyCampaign
from .channel import PyChannel
class MarketingPartner(PyPartner):
class Meta:
app_label = 'base'
channel_id = models.ForeignKey(PyChannel, null=True, blank=True, on_delete=models.CASCADE)
campaign_id = models.ForeignKey(PyCampaign, null=True, blank=True, on_delete=models.CASCADE)
|
StarcoderdataPython
|
1779898
|
##########################
# Test script to check for the presence of brackets in scripted loc
# By Pelmen, https://github.com/Pelmen323
##########################
import glob
import os
import re
from ..test_classes.generic_test_class import FileOpener, ResultsReporter
from ..data.scripted_localisation_functions import scripted_localisation_functions as test_data_list
def test_check_localisation_scripted_brackets(test_runner: object):
filepath = f'{test_runner.full_path_to_mod}localisation\\'
results = {}
test_data = [i.lower() for i in test_data_list]
for filename in glob.iglob(filepath + '**/*.yml', recursive=True):
text_file = FileOpener.open_text_file(filename)
text_file_splitted = text_file.split('\n')[1:]
for line in range(len(text_file_splitted)):
current_line = text_file_splitted[line]
for function in test_data:
if function in current_line:
num_of_functions_in_line = current_line.count(function)
pattern = f'\\[[^\\[]*{function}[a-z]*(?:\\.getshehe)*\\]'
pattern_matches = re.findall(pattern, current_line)
if num_of_functions_in_line != len(pattern_matches):
results[f'{function}, {os.path.basename(filename)}, line {line+2}'] = current_line
ResultsReporter.report_results(results=results, message="Scripted loc syntax issues were found. Check console output")
|
StarcoderdataPython
|
3531024
|
<reponame>demusis/risco_fuzzy_mc<filename>Modelo 01.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
import pandas as pd
import numpy as np
import arisco
import seaborn as sns
sns.set_style('darkgrid')
# Teste
# In[2]:
print(sys.version)
# In[3]:
print(np.version.version) # É necessário a versão >1.16.2
# In[4]:
df = pd.read_excel('Dados_brutos.xlsx', sheet_name='TOTAL') # Carrega a planilha com os dados
df.head()
# In[5]:
i_01 = arisco.sistemaFuzzy(0, 100, n_variavel="Impacto")
# p_01 = arisco.SF(0.5, 0.25, 0, 1, "Probabilidade")
# In[6]:
print(df.Km2.mean(), df.Km2.std(), 0, df.Km2.max())
print(df.Km2.quantile(q=[0.05, 0.25, 0.50, 0.75, 0.95], interpolation='linear'))
sns.distplot(df.Km2)
Area = arisco.variavelFTP(df.Km2, 0, df.Km2.max(), 'Area')
i_01.insereVariavel(Area)
# In[7]:
print(df.Combustivel.mean(), df.Combustivel.std(), 0, 1)
print(df.Combustivel.quantile(q=[0.05, 0.25, 0.50, 0.75, 0.95], interpolation='linear'))
sns.distplot(df.Combustivel)
Combustivel = arisco.variavelFTP(df.Combustivel, 0, 1, 'Combustivel')
i_01.insereVariavel(Combustivel)
# In[8]:
print(df.Vizinhanca.mean(), df.Vizinhanca.std(), 0, df.Vizinhanca.max())
print(df.Vizinhanca.quantile(q=[0.05, 0.25, 0.50, 0.75, 0.95], interpolation='linear'))
sns.distplot(df.Vizinhanca)
Vizinhanca = arisco.variavelFTP(df.Vizinhanca, 0, 1, 'Vizinhanca', likert=2)
i_01.insereVariavel(Vizinhanca)
# In[9]:
print(df.Gestao.mean(), df.Gestao.std(), 0, df.Gestao.max())
print(df.Gestao.quantile(q=[0.05, 0.25, 0.50, 0.75, 0.95], interpolation='linear'))
sns.distplot(df.Gestao)
Gestao = arisco.variavelFTP(df.Gestao, 0, 1, 'Gestao')
i_01.insereVariavel(Gestao)
# In[10]:
print(df.Relevo.mean(), df.Relevo.std(), df.Relevo.min(), df.Relevo.max())
# relevo = arisco.VFGP(df.Relevo.mean(), df.Relevo.std(), df.Relevo.min(), df.Relevo.max(), 'relevo')
# i_01.insere_var(relevo)
# In[11]:
print(df.Renda.mean(), df.Renda.std(), 0, df.Renda.max())
print(df.Renda.quantile(q=[0.05, 0.25, 0.50, 0.75, 0.95], interpolation='linear'))
sns.distplot(df.Renda)
Renda = arisco.variavelFTP(df.Renda, 0, 1, 'Renda')
i_01.insereVariavel(Renda)
# In[12]:
print(df.Vento.mean(), df.Vento.std(), df.Vento.min(), df.Vento.max())
#vento = arisco.VFGP(df.Vento.mean(), df.Vento.std(), df.Vento.min(), df.Vento.max(), 'vento')
#i_01.insere_var(vento)
# In[13]:
print(df.Precipitacao.mean(), df.Precipitacao.std(), df.Precipitacao.min(), df.Precipitacao.max())
# precipitacao = arisco.VFGP(df.Precipitacao.mean(), df.Precipitacao.std(), df.Precipitacao.min(), df.Precipitacao.max(), 'precipitacao')
# i_01.insere_var(precipitacao)
# In[14]:
print(df.Pressao.mean(), df.Pressao.std(), df.Pressao.min(), df.Pressao.max())
Pressao = arisco.variavelFGP(df.Pressao.min(), df.Pressao.max(), 'Pressao', dados=df.Pressao)
i_01.insereVariavel(Pressao)
# In[15]:
print(df.Evaporacao.mean(), df.Evaporacao.std(), 0, df.Evaporacao.max())
print(df.Evaporacao.quantile(q=[0.05, 0.25, 0.50, 0.75, 0.95], interpolation='linear'))
sns.distplot(df.Evaporacao)
Evaporacao = arisco.variavelFGP(df.Evaporacao.min(), df.Evaporacao.max(), 'Evaporacao', dados=df.Evaporacao)
i_01.insereVariavel(Evaporacao)
# In[16]:
i_01.graficosVariaveis()
# In[17]:
i_01.basicoRegras()
# In[18]:
i_01.inicializaSimulacao()
# In[19]:
print('- Impacto ----')
# Área, combustível, vizinhança, gestão, renda, pressão e evaporação.
aux_i = np.array([[10, 0.3, 0.2, 0.2, 0.4, 992, 12]])
print(i_01.calculaSimulacao(aux_i))
print('-----')
# In[20]:
# MC
ic_i_01 = i_01.icMCSimulacao()
print(ic_i_01)
|
StarcoderdataPython
|
8117402
|
<gh_stars>0
# https://open.kattis.com/problems/sevenwonders
import collections
print((lambda c: min(c['T'], c['G'], c['C']) * 7 + sum([c[k] ** 2 for k in c]))(collections.Counter({'T': 0, 'C': 0, 'G': 0}) + collections.Counter(input())))
|
StarcoderdataPython
|
12839194
|
import os, sys, copy
import pickle
import math
import time
import numpy as np
from typing import Dict, Any, List, Set, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pad_sequence
import torch.nn.utils.rnn as rnn_utils
from agent.environment.position import Position
from agent.environment import card as agent_cards
from . import util
from .map_transformations import pose as pose_lib
from .modules import state_embedder as embedder_lib
from .utilities import initialization
from .helpers import state_representation
from .utilities import hex_util
from .utilities.hex_conv_util import HexConv
def getPositionalEncoding(d_model=768, max_len=1024):
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
return pe
def generate_attention_mask_from_mask_indicies_and_instruction_tensors(feature_attention_mask, instruction_tensors) -> torch.tensor:
attention_mask = torch.cat([feature_attention_mask, torch.ones(instruction_tensors.shape).to(util.DEVICE).bool()], 1)
return attention_mask
class CNNLSTMStateEncodingModel(nn.Module):
def __init__(self, config):
super(CNNLSTMStateEncodingModel, self).__init__()
self._d_input = 19
self._d_embed = config["d_embed"]
self._d_model = config["d_model"]
self._embeddings_type = config["embeddings_type"]
self._breakpoint_type = config["breakpoint_type"]
if self._breakpoint_type == "":
pass
elif self._breakpoint_type == "onehot":
self._d_input += 1
else:
raise ValueError("not supported breakpoint type")
self._conv = []
# embedding layer
self._n_depth = config["n_depth"]
if self._embeddings_type == "learned":
if "state_embedder_pretrained_model" in config:
pretrained_model = config["state_embedder_pretrained_model"]
else:
pretrained_model = ""
self._embedder = embedder_lib.StateEmbedder(
self._d_embed, pretrained_model)
self._d_input = self._embedder.embedding_size()
else:
if self._embeddings_type == "onehot":
self._embedder = embedder_lib.StateOnehotEmbedder()
self._d_input = self._embedder.embedding_size()
elif self._embeddings_type == "none":
self._embedder = None
if self._n_depth != 0:
conv_module = nn.ModuleList([])
conv_layer = nn.Conv2d(self._d_input, self._d_model, (1, 1))
conv_module.append(conv_layer)
conv_module.append(nn.LeakyReLU())
if torch.cuda.is_available():
conv_module = conv_module.to(util.DEVICE)
self._conv.append(conv_module)
# convolutional Layer
self._rcpf_size = config["rcpf_size"]
self._cnn_use_norm = config["cnn_use_norm"]
self._cnn_hex = config["cnn_hex"]
self._cnn_actv_func = config["cnn_actv_func"]
padding_size = int((self._rcpf_size-1)/2)
for d in range(self._n_depth-1):
conv_module = nn.ModuleList([])
if d == 0 and self._embeddings_type == "learned":
conv_in_channels = self._d_input
else:
conv_in_channels = self._d_model
if self._cnn_use_norm:
norm = nn.InstanceNorm2d(conv_in_channels)
conv_module.append(norm)
conv_out_channels: int = self._d_model
if self._cnn_hex:
conv_layer = HexConv(conv_in_channels, conv_out_channels,
self._rcpf_size, stride=1, padding=padding_size)
else:
conv_layer = nn.Conv2d(conv_in_channels, conv_out_channels,
(self._rcpf_size, self._rcpf_size), padding=(padding_size, padding_size))
conv_module.append(conv_layer)
if self._cnn_actv_func == "leaky_relu":
conv_module.append(nn.LeakyReLU())
elif self._cnn_actv_func == "tanh":
conv_module.append(nn.Tanh())
if torch.cuda.is_available():
conv_module = conv_module.to(util.DEVICE)
self._conv.append(conv_module)
if len(self._conv) == 0:
self._d_model = self._d_input
self._conv = nn.ModuleList(self._conv)
self._conv_output_channel = conv_out_channels
# feature translation and rotation layers
self._feature_map_size = config["feature_map_size"] if "feature_map_size" in config else 3
self._feature_filter_size = config["feature_filter_size"] if "feature_filter_size" in config else self._feature_map_size
self._rotate_feature_map = config["rotate_feature_map"] if "rotate_feature_map" in config else True
self._feature_cnn_n_depth = config["feature_cnn_n_depth"] if "feature_cnn_n_depth" in config else 0
self._feature_merge_type = config["feature_merge_type"] if "feature_merge_type" in config else "sum"
self._feature_output_dimension = config["feature_output_dimension"] if "feature_output_dimension" in config else 512
self._feature_cnn_actv_func = config["feature_cnn_actv_func"] if "feature_cnn_actv_func" in config else 0
self._feature_cnn_use_norm = config["feature_cnn_use_norm"] if "feature_cnn_use_norm" in config else True
self._feature_conv = []
try:
assert(self._feature_output_dimension * (self._feature_map_size)**2 //
(self._feature_map_size)**2 == self._feature_output_dimension)
except:
raise ValueError(
"Feature output dimension is not divisible by the nubmer of hexes to be clopped.")
for d in range(self._feature_cnn_n_depth):
conv_module = nn.ModuleList([])
if self._feature_cnn_use_norm:
norm = nn.InstanceNorm2d(512) #! not adaptive
conv_module.append(norm)
if self._feature_merge_type == "cat":
traj_output_channel = self._feature_output_dimension // (self._feature_map_size)**2
padding = (self._feature_filter_size-1)//2
if self._cnn_hex:
conv_layer = HexConv(self._conv_output_channel, traj_output_channel,
self._feature_filter_size, stride=1, padding=padding)
else:
conv_layer = nn.Conv2d(self._conv_output_channel, traj_output_channel, (
self._feature_filter_size, self._feature_filter_size), padding=(padding, padding))
self._conv_output_channel = traj_output_channel
elif self._feature_merge_type == "sum":
traj_output_channel = self._conv_output_channel
if self._cnn_hex:
conv_layer = HexConv(self._conv_output_channel, traj_output_channel,
self._feature_map_size, stride=1, padding=0)
else:
conv_layer = nn.Conv2d(self._conv_output_channel, traj_output_channel,
(self._feature_map_size, self._feature_map_size), padding=(0, 0))
conv_module.append(conv_layer)
if self._cnn_actv_func == "tanh":
conv_module.append(nn.Tanh())
self._feature_conv.append(conv_module)
self._feature_conv = nn.ModuleList(self._feature_conv)
if self._feature_merge_type == "cat":
self._conv_output_channel = self._feature_output_dimension
self._d_model = self._feature_output_dimension
elif self._feature_merge_type == "sum":
self._d_model = traj_output_channel
self._rotator = hex_util.Hex_Rotator()
# LSTM Layer
# 0. Pose + breakpoint embedder
# 1. Preprocessing linear layer (optional)
# 2. LSTM layer
# 2.1 Optional skip connection
self._lstm_input_merge_type = config["lstm_input_merge_type"]
self._lstm_output_merge_type = config["lstm_output_merge_type"]
self._lstm_skip = config["lstm_skip"]
if self._lstm_input_merge_type == "cat":
self._traj_break_embedder = embedder_lib.TrajBreakEmbedder(config["lstm_pb_dim"])
lstm_input_dim = self._d_model + config["lstm_pb_dim"]
lstm_output_dim = config["lstm_d_model"]
elif self._lstm_input_merge_type == "add":
self._traj_break_embedder = embedder_lib.TrajBreakEmbedder(self._d_model)
lstm_input_dim = self._d_model
lstm_output_dim = config["lstm_d_model"]
self._lstm = nn.LSTM(
input_size=lstm_input_dim,
hidden_size=lstm_output_dim,
num_layers=config["lstm_num_layers"],
bidirectional=config["lstm_bidirectional"],
dropout=config["lstm_dropout"],
batch_first=True,
)
if config["lstm_bidirectional"]:
lstm_output_dim = lstm_output_dim * 2
else:
lstm_output_dim = config["lstm_d_model"]
if self._lstm_skip:
if self._lstm_output_merge_type == "spatial-cat":
self._d_model = lstm_output_dim + self._d_model // (self._feature_map_size)**2
else:
try:
assert(self._lstm_output_merge_type != "spatial-cat")
except:
raise ValueError(
"Spaitial conceteneation option is only supported for LSTM with a skip coonection.")
self._d_model = lstm_output_dim
if torch.cuda.is_available():
self._lstm.to(util.DEVICE)
def forward(self, x, traj=None, bkpoint=None):
input = x.transpose(1, 3) # [BWHC] ==> [BCHW]
input = input.transpose(2, 3) # [BCHW] ==>[BCWH]
# input processing
input[:, 15, :, :] = torch.clamp(input[:, 15, :, :], 0, 1)
input = input.detach()
input = input.contiguous()
# embeddings layer
if self._embedder is not None:
input = self._embedder(input)
# hex CNN 1
conv_outputs: List[torch.Tensor] = list()
for i, layer in enumerate(self._conv):
conv_in = input if i == 0 else conv_outputs[-1]
x = conv_in
for l in layer:
x = l(x)
# residual coneection (if k != 1)
if (i != 0 and i != self._n_depth):
x = x + conv_outputs[-1]
conv_outputs.append(x)
if len(self._conv) == 0:
final_feature = input
else:
final_feature = conv_outputs[-1]
# cropping features
if self._feature_map_size != 1:
center = (self._feature_map_size-1) // 2
# Syntax: https://discuss.pytorch.org/t/is-there-a-way-to-pad-a-tensor-instead-of-variable/10448/2
final_feature = F.pad(final_feature, (center, center, center, center))
features = []
spatial_features = []
pb_features = []
batch_idx_list = [[i for _ in range(len(t))] for i, t in enumerate(traj)]
final_feature_mask_indicies = [len(t) for t in traj]
batch_idx = []
for l in batch_idx_list:
batch_idx += l
batch_idx = torch.tensor(batch_idx).to(util.DEVICE)
coords = torch.cat(traj,0)
h_mask = coords[:, 0]
w_mask = coords[:, 1]
pose = coords[:, 2]
h_mask = h_mask.detach()
w_mask = w_mask.detach()
if self._feature_map_size == 1:
feature = final_feature[i, :, h_mask, w_mask]
feature = feature.permute(1, 0)
else:
rows = [h_mask + (slack-center) for slack in range(self._feature_map_size)]
rows = torch.stack(rows, 0).unsqueeze(1)
rows = rows.repeat(1, self._feature_map_size, 1)
rows = rows + center # need to add center bc of padding
rows = rows.detach()
cols = [w_mask + (slack-center) for slack in range(self._feature_map_size)]
cols = torch.stack(cols, 0).unsqueeze(0)
cols = cols.repeat(self._feature_map_size, 1, 1)
cols = cols + center # need to add center bc of padding
cols = cols.detach()
batch_idx = batch_idx.unsqueeze(0).unsqueeze(0)
batch_idx = batch_idx.repeat(self._feature_map_size, self._feature_map_size, 1)
feature = final_feature[batch_idx, :, rows, cols]
feature = feature.permute(2, 3, 0, 1) # TxDxHxW
# rotate features
if self._rotate_feature_map:
mask_l = len(h_mask)
# converting to offset coordinates
pose_position = torch.tensor([[center+center//2, center]
for _ in range(mask_l)]).to(util.DEVICE)
pose_rot = (pose-1) * math.radians(60)
pose_obj = pose_lib.Pose(pose_position, pose_rot)
new_feature = self._rotator.translate_and_rotate(feature, pose_obj)
feature = new_feature
# hex CNN 2
feature = feature.contiguous()
x = feature
for i, layer in enumerate(self._feature_conv):
for l in layer:
x = l(x)
spatial_feature = x.view(x.shape[0], x.shape[1], x.shape[2]*x.shape[3]) #LxDX(H*W)
feature = torch.cat([spatial_feature[:, :, i]
for i in range(spatial_feature.shape[2])], 1) # LxDX(H*W)
# attach pose features
bk_onehot = torch.zeros(pose.shape).long().to(util.DEVICE)
pose_bk_raw_features = torch.stack([pose, bk_onehot], 0)
pb_feature = self._traj_break_embedder(pose_bk_raw_features)
if self._lstm_input_merge_type == "cat":
feature = torch.cat([feature, pb_feature], 1)
elif self._lstm_input_merge_type == "add":
feature += pb_feature
spatial_features = torch.split(spatial_feature, final_feature_mask_indicies)
features = torch.split(feature, final_feature_mask_indicies)
# LSTM layer
# reference: https://discuss.pytorch.org/t/how-can-i-compute-seq2seq-loss-using-mask/861
lstm_input = pad_sequence(features, 1, padding_value=0)
unpacked = lstm_input.permute(1, 0, 2)
packed = rnn_utils.pack_padded_sequence(unpacked, final_feature_mask_indicies, enforce_sorted=False)
outputs, _ = self._lstm(packed, None)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(outputs)
final_feature = unpacked.permute(1, 0, 2)
final_feature = final_feature.contiguous()
if self._lstm_skip:
spatial_features = pad_sequence(spatial_features, 1, padding_value=0)
final_feature = final_feature.unsqueeze(-1)
final_feature = final_feature.repeat(1, 1, 1, spatial_features.shape[-1])
final_feature = torch.cat([final_feature, spatial_features], 2)
final_feature = final_feature.permute(0, 1, 3, 2)
final_feature = final_feature.contiguous().view(
(final_feature.shape[0], final_feature.shape[1]*final_feature.shape[2], final_feature.shape[3]))
final_feature = final_feature.contiguous()
# generate attention mask for feature
feature_attention_mask = torch.ones(final_feature.shape[:2]).to(util.DEVICE)
batch_size = final_feature.shape[0]
neighbor_size = spatial_features.shape[-1]
for i in range(batch_size):
feature_attention_mask[i, neighbor_size*final_feature_mask_indicies[i]:] = 0
feature_attention_mask = feature_attention_mask.bool()
return final_feature, feature_attention_mask
def get_dimension(self):
return self._d_model
|
StarcoderdataPython
|
9657343
|
from dagster.tutorials.intro_tutorial.unittesting import (
execute_test_only_final,
execute_test_a_plus_b_final_subdag,
)
def test_only_final():
execute_test_only_final()
def test_a_plus_b_final_subdag():
execute_test_a_plus_b_final_subdag()
|
StarcoderdataPython
|
3510325
|
"""
Osd backfill test
"""
import logging
import time
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rados_start(ctx, remote, cmd):
"""
Run a remote rados command (currently used to only write data)
"""
log.info("rados %s" % ' '.join(cmd))
testdir = teuthology.get_testdir(ctx)
pre = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rados',
];
pre.extend(cmd)
proc = remote.run(
args=pre,
wait=False,
)
return proc
def task(ctx, config):
"""
Test backfill
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'thrashosds task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert num_osds == 3
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2])
manager.wait_for_clean()
# write some data
p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
'--no-cleanup'])
err = p.wait()
log.info('err is %d' % err)
# mark osd.0 out to trigger a rebalance/backfill
manager.mark_out_osd(0)
# also mark it down to it won't be included in pg_temps
manager.kill_osd(0)
manager.mark_down_osd(0)
# wait for everything to peer and be happy...
manager.flush_pg_stats([1, 2])
manager.wait_for_recovery()
# write some new data
p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '30', 'write', '-b', '4096',
'--no-cleanup'])
time.sleep(15)
# blackhole + restart osd.1
# this triggers a divergent backfill target
manager.blackhole_kill_osd(1)
time.sleep(2)
manager.revive_osd(1)
# wait for our writes to complete + succeed
err = p.wait()
log.info('err is %d' % err)
# wait for osd.1 and osd.2 to be up
manager.wait_till_osd_is_up(1)
manager.wait_till_osd_is_up(2)
# cluster must recover
manager.flush_pg_stats([1, 2])
manager.wait_for_recovery()
# re-add osd.0
manager.revive_osd(0)
manager.flush_pg_stats([1, 2])
manager.wait_for_clean()
|
StarcoderdataPython
|
258566
|
import numpy as np
def sigmoidDerivada(sig):
return sig * (1 - sig)
def sigmoid(soma):
return 1 / (1 + np.exp(-soma)) #fórmula da função Sigmoidal
'''
a = sigmoid = (-1.5) # exemplo
b = np.exp(0) # exemplo
c = sigmoid(0.5) # exemplo
d = sigmoidDerivada(c) # exemplo
'''
# Cada registro possue duas entradas
entradas = np.array([[0, 0], # 1º registro
[0, 1], # 2º registro
[1, 0], # 3º registro
[1, 1]]) # 4º registro
saidas = np.array([[0], [1], [1], [0]]) # Operador XOR
'''
# Sinapse da Camada de Entrada para a Camada Oculta
pesos0 = np.array([[-0.424, -0.740, -0.961], # Pesos da 1ª entrada [ou seja, x1]
[0.358, -0.577, -0.469]]) # Pesos do 2ª entrada [ou seja, x2]
# Sinapse da Camada Oculta para a Camada de Saída
pesos1 = np.array([[-0.017], [-0.893], [0.148]])
'''
# Inicialização dos pesos aleatóriamente
pesos0 = 2 * np.random.random((2, 3)) - 1 # dois neurônios na camada de entrada e 3 neurônios na camada oculta
pesos1 = 2 * np.random.random((3, 1)) # três neurônios na camada oculta e apenas um neurônios na camada de saída
epocas = 1000000 # exemplo
taxaAprendizagem = 0.5 # exemplo
momento = 1 # exemplo, neste caso, é neutro
for j in range(epocas):
camadaEntrada = entradas # cópia
somaSinapse0 = np.dot(camadaEntrada, pesos0) # Entrada para Oculta
camadaOculta = sigmoid(somaSinapse0)
somaSinapse1 = np.dot(camadaOculta, pesos1) # Oculta para Saída
camadaSaida = sigmoid(somaSinapse1)
erroCamadaSaida = saidas - camadaSaida
mediaAbsoluta = np.mean(np.abs(erroCamadaSaida)) # média absoluta (ou seja, não negativa) dos valores
print("Erro: " + str(mediaAbsoluta))
derivadaSaida = sigmoidDerivada(camadaSaida)
deltaSaida = erroCamadaSaida * derivadaSaida # fórmula do delta para a Camada de Saída
pesos1Transposta = pesos1.T # '.T' -> matriz transposta para funcionar a multiplicação abaixo
deltaSaidaXPeso = deltaSaida.dot(pesos1Transposta) # '.dot' é uma multiplicação escalar
deltaCamadaOculta = deltaSaidaXPeso * sigmoidDerivada(camadaOculta)
camadaOcultaTransposta = camadaOculta.T # matriz transposta
# Backpropagation / Retropropagação
pesosNovo1 = camadaOcultaTransposta.dot(deltaSaida) # atualização dos pesos
pesos1 = (pesos1 * momento) + (pesosNovo1 * taxaAprendizagem) # fórmula do Back...
camadaEntradaTransposta = camadaEntrada.T # matriz transposta
# Backpropagation / Retropropagação
pesosNovo0 = camadaEntradaTransposta.dot(deltaCamadaOculta) # atualização dos pesos
pesos0 = (pesos0 * momento) + (pesosNovo0 * taxaAprendizagem) # fórmula do Back...
|
StarcoderdataPython
|
4915360
|
'''
This module contains a tool kit for loading data in the app_model_validation.py file
'''
import os
import pandas as pd
training_data_path = 'data/training/220128.csv'
def load_train_and_validation_data(frac_ = 0.8):
'''
This method loads training and testing data as pandas data frames.
'''
# Read file
df = pd.read_csv(training_data_path, sep=';')
# Sample data
train=df.sample(frac=frac_)
test=df.drop(train.index)
train = train.reset_index(drop=True)
test = test.reset_index(drop=True)
return train, test
def load_txt_files(source_path):
'''
This method reads all txt files from the source path and stores
their content into a pandas table (pickle) inside the target path
'''
sections = []
# Iterate over all files in directory
filenames = [f for f in os.listdir(source_path)]
for filename in filenames:
file = open(source_path + filename, 'r')
file_content = file.read()
page_nr = 1
section_idx = 0 # Each section is indexed. The filename and the index are a composite key for a section
for page in file_content.split('\n\n'): # Pages are separated with a double new line
for section in page.split('\n'): # Sections are separated with a single new line
sections.append([filename, section_idx, page_nr, section])
section_idx += 1
page_nr += 1
df = pd.DataFrame(sections, columns=['report_id', 'section_index', 'page_number', 'section_text'])
return df
|
StarcoderdataPython
|
262711
|
<reponame>xiaoxiaofenyge/-<filename>wechatsogou/structuring.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import re
import json
from lxml import etree
from lxml.etree import XML
import requests
from wechatsogou.tools import get_elem_text, list_or_empty, replace_html, get_first_of_element
from wechatsogou.five import str_to_bytes
find_article_json_re = re.compile('var msgList = (.*?)}}]};')
get_post_view_perm = re.compile('<script>var account_anti_url = "(.*?)";</script>')
class WechatSogouStructuring(object):
@staticmethod
def __handle_content_url(content_url):
content_url = replace_html(content_url)
return ('http://mp.weixin.qq.com{}'.format(
content_url) if 'http://mp.weixin.qq.com' not in content_url else content_url) if content_url else ''
@staticmethod
def __get_post_view_perm(text):
result = get_post_view_perm.findall(text)
if not result or len(result) < 1:
return None
r = requests.get('http://weixin.sogou.com{}'.format(result[0]))
if not r.ok:
return None
if r.json().get('code') != 'success':
return None
return r.json().get('msg')
@staticmethod
def get_gzh_by_search(text):
"""从搜索公众号获得的文本 提取公众号信息
Parameters
----------
text : str or unicode
搜索公众号获得的文本
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'view_perm': '', # 最近一月阅读量
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
post_view_perms = WechatSogouStructuring.__get_post_view_perm(text)
page = etree.HTML(text)
lis = page.xpath('//ul[@class="news-list2"]/li')
relist = []
for li in lis:
url = get_first_of_element(li, 'div/div[1]/a/@href')
headimage = get_first_of_element(li, 'div/div[1]/a/img/@src')
wechat_name = get_elem_text(get_first_of_element(li, 'div/div[2]/p[1]'))
info = get_elem_text(get_first_of_element(li, 'div/div[2]/p[2]'))
qrcode = get_first_of_element(li, 'div/div[3]/span/img[1]/@src')
introduction = get_elem_text(get_first_of_element(li, 'dl[1]/dd'))
authentication = get_first_of_element(li, 'dl[2]/dd/text()')
relist.append({
'open_id': headimage.split('/')[-1],
'profile_url': url,
'headimage': headimage,
'wechat_name': wechat_name.replace('red_beg', '').replace('red_end', ''),
'wechat_id': info.replace('微信号:', ''),
'qrcode': qrcode,
'introduction': introduction.replace('red_beg', '').replace('red_end', ''),
'authentication': authentication,
'post_perm': -1,
'view_perm': -1,
})
if post_view_perms:
for i in relist:
if i['open_id'] in post_view_perms:
post_view_perm = post_view_perms[i['open_id']].split(',')
if len(post_view_perm) == 2:
i['post_perm'] = int(post_view_perm[0])
i['view_perm'] = int(post_view_perm[1])
return relist
@staticmethod
def get_article_by_search_wap(keyword, wap_dict):
datas = []
for i in wap_dict['items']:
item = str_to_bytes(i).replace(b'\xee\x90\x8a' + str_to_bytes(keyword) + b'\xee\x90\x8b',
str_to_bytes(keyword))
root = XML(item)
display = root.find('.//display')
datas.append({
'gzh': {
'profile_url': display.find('encGzhUrl').text,
'open_id': display.find('openid').text,
'isv': display.find('isV').text,
'wechat_name': display.find('sourcename').text,
'wechat_id': display.find('username').text,
'headimage': display.find('headimage').text,
'qrcode': display.find('encQrcodeUrl').text,
},
'article': {
'title': display.find('title').text,
'url': display.find('url').text, # encArticleUrl
'main_img': display.find('imglink').text,
'abstract': display.find('content168').text,
'time': display.find('lastModified').text,
},
})
return datas
@staticmethod
def get_article_by_search(text):
"""从搜索文章获得的文本 提取章列表信息
Parameters
----------
text : str or unicode
搜索文章获得的文本
Returns
-------
list[dict]
{
'article': {
'title': '', # 文章标题
'url': '', # 文章链接
'imgs': '', # 文章图片list
'abstract': '', # 文章摘要
'time': '' # 文章推送时间
},
'gzh': {
'profile_url': '', # 公众号最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'isv': '', # 是否加v
}
}
"""
page = etree.HTML(text)
lis = page.xpath('//ul[@class="news-list"]/li')
articles = []
for li in lis:
url = get_first_of_element(li, 'div[1]/a/@href')
if url:
title = get_first_of_element(li, 'div[2]/h3/a')
imgs = li.xpath('div[1]/a/img/@src')
abstract = get_first_of_element(li, 'div[2]/p')
time = get_first_of_element(li, 'div[2]/div/span/script/text()')
gzh_info = li.xpath('div[2]/div/a')[0]
else:
url = get_first_of_element(li, 'div/h3/a/@href')
title = get_first_of_element(li, 'div/h3/a')
imgs = []
spans = li.xpath('div/div[1]/a')
for span in spans:
img = span.xpath('span/img/@src')
if img:
imgs.append(img)
abstract = get_first_of_element(li, 'div/p')
time = get_first_of_element(li, 'div/div[2]/span/script/text()')
gzh_info = li.xpath('div/div[2]/a')[0]
if title is not None:
title = get_elem_text(title).replace("red_beg", "").replace("red_end", "")
if abstract is not None:
abstract = get_elem_text(abstract).replace("red_beg", "").replace("red_end", "")
time = re.findall('timeConvert\(\'(.*?)\'\)', time)
time = list_or_empty(time, int)
profile_url = get_first_of_element(gzh_info, '@href')
headimage = get_first_of_element(gzh_info, '@data-headimage')
wechat_name = get_first_of_element(gzh_info, 'text()')
gzh_isv = get_first_of_element(gzh_info, '@data-isv', int)
articles.append({
'article': {
'title': title,
'url': url,
'imgs': imgs,
'abstract': abstract,
'time': time
},
'gzh': {
'profile_url': profile_url,
'headimage': headimage,
'wechat_name': wechat_name,
'isv': gzh_isv,
}
})
return articles
@staticmethod
def get_gzh_info_by_history(text):
"""从 历史消息页的文本 提取公众号信息
Parameters
----------
text : str or unicode
历史消息页的文本
Returns
-------
dict
{
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
}
"""
page = etree.HTML(text)
profile_area = get_first_of_element(page, '//div[@class="profile_info_area"]')
profile_img = get_first_of_element(profile_area, 'div[1]/span/img/@src')
profile_name = get_first_of_element(profile_area, 'div[1]/div/strong/text()')
profile_wechat_id = get_first_of_element(profile_area, 'div[1]/div/p/text()')
profile_desc = get_first_of_element(profile_area, 'ul/li[1]/div/text()')
profile_principal = get_first_of_element(profile_area, 'ul/li[2]/div/text()')
return {
'wechat_name': profile_name.strip(),
'wechat_id': profile_wechat_id.replace('微信号: ', '').strip('\n'),
'introduction': profile_desc,
'authentication': profile_principal,
'headimage': profile_img
}
@staticmethod
def get_article_by_history_json(text, article_json=None):
"""从 历史消息页的文本 提取文章列表信息
Parameters
----------
text : str or unicode
历史消息页的文本
article_json : dict
历史消息页的文本 提取出来的文章json dict
Returns
-------
list[dict]
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
}
"""
if article_json is None:
article_json = find_article_json_re.findall(text)
if not article_json:
return []
article_json = article_json[0] + '}}]}'
article_json = json.loads(article_json)
items = list()
for listdic in article_json['list']:
if str(listdic['comm_msg_info'].get('type', '')) != '49':
continue
comm_msg_info = listdic['comm_msg_info']
app_msg_ext_info = listdic['app_msg_ext_info']
send_id = comm_msg_info.get('id', '')
msg_datetime = comm_msg_info.get('datetime', '')
msg_type = str(comm_msg_info.get('type', ''))
items.append({
'send_id': send_id,
'datetime': msg_datetime,
'type': msg_type,
'main': 1, 'title': app_msg_ext_info.get('title', ''),
'abstract': app_msg_ext_info.get('digest', ''),
'fileid': app_msg_ext_info.get('fileid', ''),
'content_url': WechatSogouStructuring.__handle_content_url(app_msg_ext_info.get('content_url')),
'source_url': app_msg_ext_info.get('source_url', ''),
'cover': app_msg_ext_info.get('cover', ''),
'author': app_msg_ext_info.get('author', ''),
'copyright_stat': app_msg_ext_info.get('copyright_stat', '')
})
if app_msg_ext_info.get('is_multi', 0) == 1:
for multi_dict in app_msg_ext_info['multi_app_msg_item_list']:
items.append({
'send_id': send_id,
'datetime': msg_datetime,
'type': msg_type,
'main': 0, 'title': multi_dict.get('title', ''),
'abstract': multi_dict.get('digest', ''),
'fileid': multi_dict.get('fileid', ''),
'content_url': WechatSogouStructuring.__handle_content_url(multi_dict.get('content_url')),
'source_url': multi_dict.get('source_url', ''),
'cover': multi_dict.get('cover', ''),
'author': multi_dict.get('author', ''),
'copyright_stat': multi_dict.get('copyright_stat', '')
})
return list(filter(lambda x: x['content_url'], items)) # 删除搜狗本身携带的空数据
@staticmethod
def get_gzh_info_and_article_by_history(text):
"""从 历史消息页的文本 提取公众号信息 和 文章列表信息
Parameters
----------
text : str or unicode
历史消息页的文本
Returns
-------
dict
{
'gzh': {
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'introduction': '', # 描述
'authentication': '', # 认证
'headimage': '' # 头像
},
'article': [
{
'send_id': '', # 群发id,注意不唯一,因为同一次群发多个消息,而群发id一致
'datetime': '', # 群发datatime
'type': '', # 消息类型,均是49,表示图文
'main': 0, # 是否是一次群发的第一次消息
'title': '', # 文章标题
'abstract': '', # 摘要
'fileid': '', #
'content_url': '', # 文章链接
'source_url': '', # 阅读原文的链接
'cover': '', # 封面图
'author': '', # 作者
'copyright_stat': '', # 文章类型,例如:原创啊
},
...
]
}
"""
return {
'gzh': WechatSogouStructuring.get_gzh_info_by_history(text),
'article': WechatSogouStructuring.get_article_by_history_json(text)
}
@staticmethod
def get_gzh_article_by_hot(text):
"""从 首页热门搜索 提取公众号信息 和 文章列表信息
Parameters
----------
text : str or unicode
首页热门搜索 页 中 某一页 的文本
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
page = etree.HTML(text)
lis = page.xpath('/html/body/li')
gzh_article_list = []
for li in lis:
url = get_first_of_element(li, 'div[1]/h4/a/@href')
title = get_first_of_element(li, 'div[1]/h4/a/div/text()')
abstract = get_first_of_element(li, 'div[1]/p[1]/text()')
xpath_time = get_first_of_element(li, 'div[1]/p[2]')
open_id = get_first_of_element(xpath_time, 'span/@data-openid')
headimage = get_first_of_element(xpath_time, 'span/@data-headimage')
gzh_name = get_first_of_element(xpath_time, 'span/text()')
send_time = xpath_time.xpath('a/span/@data-lastmodified')
main_img = get_first_of_element(li, 'div[2]/a/img/@src')
try:
send_time = int(send_time[0])
except ValueError:
send_time = send_time[0]
gzh_article_list.append({
'gzh': {
'headimage': headimage,
'wechat_name': gzh_name,
},
'article': {
'url': url,
'title': title,
'abstract': abstract,
'time': send_time,
'open_id': open_id,
'main_img': main_img
}
})
return gzh_article_list
|
StarcoderdataPython
|
3271037
|
<gh_stars>0
# -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from uuid import uuid4
from ..items import ComponentItem
class GarrysmodSpider(Spider):
name = 'garrysmod'
allowed_domains = ['wiki.garrysmod.com']
start_urls = ['https://wiki.garrysmod.com/navbar/']
state = {}
def parse_nav(self, response):
''' Parse the navigation bar list for our individual API components. '''
# First yield our global component, and pass it's id on to all child requests.
# *This ensures that anything can be linked to the global scope.*
global_uuid = uuid4()
global_item = ComponentItem(
uuid=global_uuid,
name='global',
file='global.lua'
)
yield global_item
category_list = response.xpath('/html/body/ul[1]/li')
for category in category_list:
category_name = (category.xpath('./h1/text()').get() or '').casefold()
if category_name not in self._category_parsers:
continue
component_list = category.xpath('./ul[1]/li')
for component in component_list:
link = component.xpath('./h2/a')
if link.attrib.get('href'):
yield response.follow(link.attrib['href'],
callback=self._category_parsers[category_name],
priority=self._category_priority[category_name],
meta={'global': global_uuid}
)
parse = parse_nav
def parse_hook(self, response):
''' Handles creating a Hook definition and its attributes. '''
def parse_library(self, response):
''' Handles creating a Library definition and its subroutines. '''
def parse_class(self, response):
''' Handles creating a Class definition and its methods/attributes. '''
def parse_dpanel(self, response):
''' Hand;es creating a DPanel subclass and its methods/attributes. '''
def parse_attribute(self, response, component, last_attr=False):
''' Handles parsing a component's method or attribute page. '''
_category_parsers = {
'hooks': parse_hook,
'libraries': parse_library,
'global': parse_attribute,
'classes': parse_class,
'panels': parse_dpanel
}
_category_priority = {
'hooks': 1,
'libraries': 3,
'global': 5,
'classes': 7,
'panels': 9
}
|
StarcoderdataPython
|
9705266
|
import os
from os import path
import math
os.environ['OPENBLAS_NUM_THREADS'] = '1'
import numpy as np
import sys, json, time
import pandas as pd
import multiprocessing as mp # TODO remove from final version
from datetime import datetime, timedelta
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
import random
from training import *
# TODO: Handle all division exceptions
CONST_INFTY = 1000000.0 # If the TSP is not solved, the time is set to this value
CONST_TIMEOUT = 60 # Maximum time in seconds for solving TSP with time windows
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__))) # Get Directory
def haversine(lat1, lng1, lat2, lng2):
# distance between latitudes and longitudes in km
d_lat = (lat2 - lat1) * math.pi / 180.0
d_lon = (lng2 - lng1) * math.pi / 180.0
# convert to radians
lat1 = lat1 * math.pi / 180.0
lat2 = lat2 * math.pi / 180.0
# apply formulae
a = (pow(math.sin(d_lat / 2), 2) +
pow(math.sin(d_lon / 2), 2) *
math.cos(lat1) * math.cos(lat2))
rad = 6371
c = 2 * math.asin(math.sqrt(a))
return rad * c
class Route:
# class constructor
def __init__(self):
self.index = 0
self.key = 'NULL'
self.station_code = 'NULL'
self.date = 'NULL'
self.departure_time = 'NULL'
self.executor_capacity = 0.0
self.score = 'NULL'
self.stop = [] # list of stop objects
self.stop_key_index_dict = {} # dict of stops ordered by what was actually traversed
self.travel_time_dict = {} # travel time between stops in seconds
# derived members
self.num_stops = 0 # number of stops included in the route (includes depot)
self.num_stops_tw = 0 # number of stops with time windows
self.num_tw_violations = 0 # number of stops which violate time windows
self.num_packages = 0 # number of packages to be delivered along the route
self.num_packages_tw = 0 # number of packages with time windows on the route
self.num_scans_delivered = 0 # based on delivered status; not available during apply
self.num_scans_not_delivered = 0 # based on status; N/A for apply; compare with estimated values
self.num_packages_not_delivered = 0 # estimated num of packages not delivered due to scans
self.vol_packages_not_delivered = 0.0 # estimated vol of packages not delivered
self.packages_per_stop = 0.0 # number of packages per stop
self.travel_time = 0.0 # total time taken to travel the actual route (ignores service times)
self.service_time = 0.0 # sum of service times across all stops
self.total_journey_time = 0.0 # sum of travel time plus service time
self.filled_capacity = 0.0 # total volume of all the packages
self.filled_capacity_percent = 0.0 # percentage of truck volume occupied by the packages
self.volume_seconds_traveled = 0.0 # total route volume time in cm3 seconds (uses travel time not journey time)
self.vst_ratio = 0.0 # volume seconds traveled divided by travel time x filled_capacity
self.total_wait_time = 0.0 # if the truck is early at a stop with time windows that are met (0 is good)
self.total_end_slack = 0.0 # difference between end time window and actual end time for time windows met
self.total_max_slack = 0.0 # difference between end time window and start time window + service time
self.slack_ratio = 0.0 # ratio of total end slack to total max slack
# package level time window metrics (assumes every package at a stop is delivered first)
# this is an approx; wait time may not require waiting since they can deliver other packages
self.weighted_pkg_wait_time = 0.0 # total weighted (by volume) wait time for packages
self.weighted_pkg_end_slack = 0.0 # total weighted (by volume) end slack for packages
self.weighted_pkg_max_slack = 0.0 # total weighted (by volume) max slack for packages
self.weighted_pkg_slack_ratio = 0.0 # total weighted (by volume) slack ratios for packages
self.max_segment_time = 0.0 # maximum travel time between stops (excludes to and from origin)
self.var_segment_time = 0.0 # variance of segment time
self.segment_time_ratio = 0.0 # ratio compared to total journey time
self.max_segment_dist = 0.0 # maximum distance traveled between stops (excludes to and from origin)
self.var_segment_dist = 0.0 # variance of segment distance
self.segment_dist_ratio = 0.0 # ratio compared to total distance
self.total_dist = 0.0 # total haversine distance of route
self.average_speed = 0.0 # total haversine distance / total time
self.is_sequence_feasible = True # boolean variable which records if the actual route is feasible or not
self.weekday = 0 # Mon is 0 and Sun is 6
self.is_weekend = False # is set to true if weekday is Sat or Sun
# zonal members
self.zone_stop_dict = {} # dictionary of zones as keys and stop list as values
self.num_zones = 0 # number of stop zones
self.num_zone_switches = 0 # number of times we switch to a different zone along a route
self.switch_stop_ratio = 0 # ratio of number of zone switches to number of stops
self.switch_zone_ratio = 0 # ratio of number of zone switches to number of zones (>= 1 ideal case is num zones)
# TSP related members
self.tsp_solver_status = 0 # 0 Not Solved 1 Success 2 No Soln 3 Timeout 4 Infeasible
self.tsp_route_time = CONST_INFTY # optimal TSP time as provided by the solver
self.tsp_optimality_gap = 0.0 # between the TSP time and the actual route time as a percentage
self.tsp_route_dict = {} # dictionary of stops along the TSP tour which is used for scoring
self.is_tsp_feasible = True # checks if TSP is feasible or not
class Stop:
def __init__(self):
self.key = 0
self.order = -1
self.lat = 0.0
self.lng = 0.0
self.proj_x = 0.0 # UTM projection of lat long
self.proj_y = 0.0 # UTM projection of lat long
self.type = 'NULL'
self.zone_id = 'NULL'
self.package_dict = {} # package data is stored simply as a dictionary since we may not process it further
# derived members
self.planned_service_time = 0.0 # total planned service time for all packages at this stop in seconds
self.is_tw_present = False # indicator to check if there is a TW constraint or not
self.is_tw_violated = False # indicator for violations of time windows
self.start_time_window = 'NULL' # tightest start time for all packages at this stop
self.end_time_window = 'NULL' # tightest end time this stop (in seconds from departure time)
self.actual_start_time = 0.0 # actual start time according to travel time data in seconds from start
self.actual_end_time = 0.0 # actual end time using the planned service time
self.wait_time = 0.0 # positive part of difference between start time window and actual start time
self.slack_time = 0.0 # difference between end time window and actual end time
self.max_slack = 0.0 # difference between end time window and start time window + planned_service_time
# package level time window metrics (assumes every package at the stop is delivered first)
# this is an approx; wait time may not require waiting since they can deliver other packages
self.weighted_pkg_wait_time = 0.0 # weighted (by volume) wait time for packages
self.weighted_pkg_end_slack = 0.0 # weighted (by volume) end slack for packages
self.weighted_pkg_max_slack = 0.0 # weighted (by volume) max slack for packages
self.weighted_pkg_slack_ratio = 0.0 # weighted (by volume) slack ratios for packages
self.num_packages = 0 # total number of packages
self.num_packages_tw = 0 # number of packages with time windows at this stop
self.num_scans_delivered = 0 # total number of packages delivered based on scans
self.num_scans_not_delivered = 0 # total number of packages not delivered based on scans
self.num_packages_not_delivered = 0 # number of packages not delivered due to time windows
self.total_package_vol = 0.0 # total volume of all packages to be delivered at this stop
self.vol_package_undelivered = 0.0 # volume of packages that could not be delivered
# derived members related to TSP
self.tsp_order = -1 # order of stops according to TSP
self.actual_tsp_start_time = 0.0
self.actual_tsp_end_time = 0.0
self.is_tsp_feasible = True
def __repr__(self):
return "(" + str(self.key) + "," + str(self.order) + "," + str(self.zone_id) + ")"
def compute_route_features(self):
"""This function computes several features for each route using the stop features
that can be used for exploratory analysis"""
self.weekday = self.date.weekday()
if self.weekday >= 5:
self.is_weekend = True
self.num_stops = len(self.stop)
self.travel_time = 0.0
self.stop[0].actual_start_time = 0.0
self.stop[0].actual_end_time = 0.0
for i in range(len(self.stop)): # add total number of packages across stops and their volume
self.num_packages += self.stop[i].num_packages
self.num_packages_tw += self.stop[i].num_packages_tw
self.num_scans_delivered += self.stop[i].num_scans_delivered
self.num_scans_not_delivered += self.stop[i].num_scans_not_delivered
self.num_packages_not_delivered += self.stop[i].num_packages_not_delivered
self.vol_packages_not_delivered += self.stop[i].vol_package_undelivered
self.filled_capacity += self.stop[i].total_package_vol
self.service_time += self.stop[i].planned_service_time
self.total_wait_time += self.stop[i].wait_time
self.total_end_slack += self.stop[i].slack_time
self.total_max_slack += self.stop[i].max_slack
self.weighted_pkg_wait_time += self.stop[i].weighted_pkg_wait_time
self.weighted_pkg_end_slack += self.stop[i].weighted_pkg_end_slack
self.weighted_pkg_max_slack += self.stop[i].weighted_pkg_max_slack
self.weighted_pkg_slack_ratio += self.stop[i].weighted_pkg_slack_ratio
self.packages_per_stop = self.num_packages / self.num_stops
self.filled_capacity_percent = self.filled_capacity / self.executor_capacity
current_volume = self.filled_capacity
for i in range(len(self.stop) - 1): # find arrival and departure times at all stops
self.travel_time += self.travel_time_dict[self.stop[i].key][self.stop[i + 1].key]
self.volume_seconds_traveled += current_volume * self.travel_time_dict[self.stop[i].key][
self.stop[i + 1].key]
current_volume -= self.stop[i + 1].total_package_vol
self.travel_time += self.travel_time_dict[self.stop[self.num_stops - 1].key][
self.stop[0].key] # add travel time from last stop to depot
segment_travel_times = [self.travel_time_dict[self.stop[i].key][self.stop[i + 1].key] for i in
range(1, len(self.stop) - 1)]
self.max_segment_time = max(segment_travel_times)
self.var_segment_time = np.var(segment_travel_times)
self.segment_time_ratio = self.max_segment_time / self.travel_time
segment_distances = [haversine(self.stop[i].lat, self.stop[i].lng, self.stop[i + 1].lat, self.stop[i + 1].lng)
for i in range(1, len(self.stop) - 1)]
self.max_segment_dist = max(segment_distances)
self.var_segment_dist = np.var(segment_distances)
self.segment_dist_ratio = self.max_segment_dist / sum(segment_distances)
self.total_dist = sum(segment_distances)
self.total_dist += haversine(self.stop[0].lat, self.stop[0].lng, self.stop[1].lat, self.stop[1].lng)
self.total_dist += haversine(self.stop[self.num_stops - 1].lat, self.stop[self.num_stops - 1].lng,
self.stop[0].lat, self.stop[0].lng)
self.average_speed = self.total_dist / self.travel_time
self.vst_ratio = self.volume_seconds_traveled / (self.filled_capacity * self.travel_time)
self.slack_ratio = self.total_end_slack / self.total_max_slack
self.total_journey_time = self.travel_time + self.service_time
unique_zone_list = [self.stop[i].zone_id for i in range(len(self.stop))]
unique_zone_list = list(set(unique_zone_list))
self.num_zones = len(unique_zone_list)
for zone in unique_zone_list:
self.zone_stop_dict[zone] = []
for i in range(len(self.stop)):
self.zone_stop_dict[self.stop[i].zone_id].append(self.stop[i].order)
for i in range(len(self.stop) - 1):
if self.stop[i].zone_id != self.stop[i + 1].zone_id:
self.num_zone_switches += 1
self.num_zone_switches += 1 # to account for last switch to depot
self.switch_stop_ratio = self.num_zone_switches / self.num_stops
self.switch_zone_ratio = self.num_zone_switches / self.num_zones
def compute_stop_package_features(self):
"""This function computes several features for each stop that can be used for exploratory analysis"""
# Replace NaN zone IDs with zone ID of nearest stop by travel time
# if nearest also has NaN, then find the next nearest stop; Exclude depot station
for i in range(len(self.stop)):
if self.stop[i].zone_id != self.stop[i].zone_id and self.stop[i].type != 'Station':
min_dist = 100000000
nearest_zone_id = "Null"
for j in range(len(self.stop)):
if j != i and self.stop[j].type != 'Station':
if self.travel_time_dict[self.stop[i].key][self.stop[j].key] < min_dist and self.stop[
j].zone_id == self.stop[j].zone_id:
min_dist = self.travel_time_dict[self.stop[i].key][self.stop[j].key]
nearest_zone_id = self.stop[j].zone_id
self.stop[i].zone_id = nearest_zone_id
elif self.stop[i].type == 'Station':
self.stop[i].zone_id = 'Depot'
self.stop[0].actual_start_time = 0.0
self.stop[0].actual_end_time = 0.0
for i in range(len(self.stop)):
self.stop[i].order = i
self.stop[i].num_packages = len(self.stop[i].package_dict.keys())
self.stop[i].start_time_window = datetime.combine(self.date, self.departure_time)
self.stop[i].end_time_window = (self.stop[i].start_time_window + timedelta(hours=24)).strftime(
'%Y-%m-%d %H:%M:%S')
self.stop[i].end_time_window = datetime.strptime(self.stop[i].end_time_window, '%Y-%m-%d %H:%M:%S')
self.stop[i].is_tw_present = False
for package_value in self.stop[i].package_dict.values():
package_start_time = 0.0
package_end_time = 86400.0
package_value['is_tw_present'] = False
if package_value['scan_status'] == "DELIVERED":
self.stop[i].num_scans_delivered += 1
dimension_dict = package_value['dimensions']
temp_prod = 1.0
for value in dimension_dict.values():
temp_prod = temp_prod * value
package_value['volume'] = temp_prod
self.stop[i].total_package_vol += temp_prod
self.stop[i].planned_service_time += package_value['planned_service_time_seconds']
# set the tightest start and end times at each stop
if str(package_value['time_window']['start_time_utc']) != 'nan':
self.stop[i].is_tw_present = True
self.stop[i].num_packages_tw += 1
package_value['is_tw_present'] = True
package_start_time = datetime.strptime(str(package_value['time_window']['start_time_utc']),
'%Y-%m-%d %H:%M:%S')
if package_start_time > self.stop[i].start_time_window:
self.stop[i].start_time_window = package_start_time
package_start_time = datetime.strptime(str(package_value['time_window']['start_time_utc']),
'%Y-%m-%d %H:%M:%S')
package_start_time -= datetime.combine(self.date, self.departure_time)
package_start_time = package_start_time.total_seconds()
if str(package_value['time_window']['end_time_utc']) != 'nan':
package_end_time = datetime.strptime(str(package_value['time_window']['end_time_utc']),
'%Y-%m-%d %H:%M:%S')
if package_end_time < self.stop[i].end_time_window:
self.stop[i].end_time_window = package_end_time
package_end_time = datetime.strptime(str(package_value['time_window']['end_time_utc']),
'%Y-%m-%d %H:%M:%S')
package_end_time -= datetime.combine(self.date, self.departure_time)
package_end_time = package_end_time.total_seconds()
package_value['start_time'] = package_start_time
package_value['end_time'] = package_end_time
# calculate actual arrival and departure times
if i > 0:
self.stop[i].actual_start_time = self.stop[i - 1].actual_end_time + \
self.travel_time_dict[self.stop[i - 1].key][self.stop[i].key]
self.stop[i].actual_end_time = self.stop[i].actual_start_time + self.stop[i].planned_service_time
self.stop[i].num_scans_not_delivered = self.stop[i].num_packages - self.stop[i].num_scans_delivered
# convert start and end time windows in seconds from departure time
self.stop[i].start_time_window -= datetime.combine(self.date, self.departure_time)
self.stop[i].end_time_window -= datetime.combine(self.date, self.departure_time)
self.stop[i].start_time_window = self.stop[i].start_time_window.total_seconds()
self.stop[i].end_time_window = self.stop[i].end_time_window.total_seconds()
self.stop[0].end_time_window = 0.0 # no need to wait at the depot
if i > 0:
if self.stop[i].is_tw_present:
self.stop[i].wait_time = max(self.stop[i].start_time_window - self.stop[i].actual_start_time, 0)
self.stop[i].slack_time = max(self.stop[i].end_time_window - self.stop[i].actual_end_time, 0)
self.stop[i].max_slack = self.stop[i].end_time_window - (
self.stop[i].start_time_window + self.stop[i].planned_service_time)
# calculate time window metrics at a package level
for package_value in self.stop[i].package_dict.values():
if package_value['is_tw_present']:
package_value['wait_time'] = max(package_value['start_time'] - self.stop[i].actual_start_time, 0)
package_value['end_slack'] = max(package_value['end_time'] - self.stop[i].actual_end_time, 0)
package_value['max_slack'] = package_value['end_time'] - (
package_value['start_time'] + package_value['planned_service_time_seconds'])
if package_value['max_slack'] > 0: # avoid div by zero
package_value['slack_ratio'] = package_value['end_slack'] / package_value['max_slack']
else:
package_value['slack_ratio'] = 0
# aggregate these at a stop level
self.stop[i].weighted_pkg_wait_time += package_value['volume'] * package_value['wait_time']
self.stop[i].weighted_pkg_end_slack += package_value['volume'] * package_value['end_slack']
self.stop[i].weighted_pkg_max_slack += package_value['volume'] * package_value['max_slack']
self.stop[i].weighted_pkg_slack_ratio += package_value['volume'] * package_value['slack_ratio']
# check for time window violations
if self.stop[i].is_tw_present:
self.num_stops_tw += 1
if self.stop[i].actual_end_time > self.stop[i].end_time_window: # waiting is allowed
self.stop[i].is_tw_violated = True
self.num_tw_violations += 1
self.is_sequence_feasible = False
# New method for checking package infeasibility
for package_value in self.stop[i].package_dict.values():
if package_value['is_tw_present']:
if self.stop[i].actual_start_time + package_value['planned_service_time_seconds'] > package_value[
'end_time']:
self.stop[i].num_packages_not_delivered += 1
self.stop[i].vol_package_undelivered += package_value['volume']
def display_route_data(self):
"""Function that prints minimal route data to check code progress. Full details are written to a CSV file."""
print(self.index, self.key, self.score, self.num_stops_tw, self.num_packages_tw,
self.num_packages_not_delivered,
'%.2f' % self.vol_packages_not_delivered, '%.2f' % self.total_journey_time, '%.2f' % self.tsp_route_time,
self.is_tsp_feasible, )
# print(self.travel_time_dict)
# if self.index == 162:
# for i in range(len(self.stop)):
# print(self.stop[i].key, self.stop[i].order, self.stop[i].type,
# self.stop[i].zone_id, self.stop[i].num_packages, self.stop[i].num_packages_tw,
# self.stop[i].num_scans_not_delivered, self.stop[i].num_packages_not_delivered,
# self.stop[i].total_package_vol, self.stop[i].vol_package_undelivered, self.stop[i].start_time_window,
# self.stop[i].end_time_window, self.stop[i].actual_start_time, self.stop[i].actual_end_time)
# print(json.dumps(self.stop[i].package_dict, indent=4)) # prints the entire package dictionary
def create_tsp_data(rt):
"""Stores the distance matrix for the stops on the route"""
data = {}
data['time_matrix'] = []
row = []
for i in range(len(rt.stop)):
for j in range(len(rt.stop)):
if rt.stop[i].key != rt.stop[j].key:
row.append(int(math.floor(
rt.travel_time_dict[rt.stop[i].key][rt.stop[j].key] * 10 + rt.stop[i].planned_service_time * 10)))
else:
row.append(0) # diagonal elements of the matrix are set to zeros
data['time_matrix'].append(row)
row = []
data['service_time'] = []
data['time_windows'] = []
for i in range(len(rt.stop)):
left_end_point = math.floor(rt.stop[i].start_time_window * 10)
right_end_point = math.floor(rt.stop[i].end_time_window * 10 - rt.stop[i].planned_service_time * 10)
data['time_windows'].append((int(left_end_point), int(right_end_point)))
data['service_time'].append(rt.stop[i].planned_service_time)
data['num_vehicles'] = 1
data['depot'] = 0
return data
def print_tsp_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {} seconds'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route for vehicle 0:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
plan_output += 'OR tools TSP route time: {} seconds\n'.format(route_distance)
print(plan_output)
def set_tsp_dict(rt, manager, routing, solution):
actual_route_dict = {}
for i in range(len(rt.stop)):
actual_route_dict[rt.stop[i].order] = rt.stop[i].key
count = 0
index = routing.Start(0)
rt.tsp_route_dict = {}
rt.tsp_route_dict[actual_route_dict[index]] = count
rt.stop[index].tsp_order = count
while not routing.IsEnd(index):
index = solution.Value(routing.NextVar(index))
if manager.IndexToNode(index) != 0: # the revisit to depot can be ignored in the dictionary
count += 1
rt.tsp_route_dict[actual_route_dict[index]] = count
rt.stop[index].tsp_order = count
rt.tsp_route_dict = dict(sorted(rt.tsp_route_dict.items(), key=lambda item: item[1]))
def compute_tsp_tour(rt, data):
"""OR tools function that computes the TSP. Settings can be changed for time limits and solution method"""
try:
# print('Solving TSP now')
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['time_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['time_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic. Parameters are set to defaults. Check website for more options.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem
solution = routing.SolveWithParameters(search_parameters)
rt.tsp_solver_status = routing.status()
if rt.tsp_solver_status == 1:
rt.tsp_route_time = solution.ObjectiveValue() / 10.0
rt.tsp_optimality_gap = (rt.total_journey_time - rt.tsp_route_time) / rt.tsp_route_time
# Save TSP tour in the form a dictionary for scoring
set_tsp_dict(rt, manager, routing, solution)
else:
rt.tsp_route_time = CONST_INFTY
rt.tsp_optimality_gap = (rt.total_journey_time - rt.tsp_route_time) / rt.tsp_route_time
# # Print solution on console
# if solution:
# print_tsp_solution(manager, routing, solution)
except:
print('Exception found while analysing TSP for route', rt.index)
rt.tsp_route_time = CONST_INFTY
rt.tsp_optimality_gap = (rt.total_journey_time - rt.tsp_route_time) / rt.tsp_route_time
def check_tsp_feasibility(rt):
"""Function that checks the time window feasibility of a route sequence TODO ordered dictionary required?"""
for stop_key, order in rt.tsp_route_dict.items():
# print(stop_key, order)
# print(rt.stp_dict[stop_key])
index = rt.stop_key_index_dict[stop_key]
if order == 0: # origin depot
rt.stop[index].actual_tsp_start_time = 0.0
rt.stop[index].actual_tsp_end_time = 0.0
prev_stop_key = stop_key
prev_index = rt.stop_key_index_dict[prev_stop_key]
else:
rt.stop[index].actual_tsp_start_time = rt.stop[prev_index].actual_tsp_end_time + \
rt.travel_time_dict[prev_stop_key][stop_key]
rt.stop[index].actual_tsp_end_time = rt.stop[index].actual_tsp_start_time + rt.stop[
index].planned_service_time
prev_stop_key = stop_key
prev_index = rt.stop_key_index_dict[prev_stop_key]
if rt.stop[index].actual_tsp_end_time > rt.stop[index].end_time_window:
rt.stop[index].is_tsp_feasible = False
rt.is_tsp_feasible = False
def read_route_data():
"""Reads the JSON files and populates class variables"""
route_list = []
temp_route = Route()
training_routes_path = path.join(BASE_DIR, 'data/model_build_inputs/route_data.json')
with open(training_routes_path) as f:
route_data = json.load(f)
training_sequence_path = path.join(BASE_DIR, 'data/model_build_inputs/actual_sequences.json')
with open(training_sequence_path) as f:
actual_sequences = json.load(f)
training_package_path = path.join(BASE_DIR, 'data/model_build_inputs/package_data.json')
with open(training_package_path) as f:
package_data = json.load(f)
training_traveltime_path = path.join(BASE_DIR, 'data/model_build_inputs/travel_times.json')
with open(training_traveltime_path) as f:
travel_time = json.load(f)
count = 0
key_index_dict = {}
for key in route_data.keys():
temp_route.index = count
temp_route.key = key
key_index_dict[key] = count
temp_route.station_code = route_data[key]['station_code']
temp_route.date = datetime.strptime(route_data[key]['date_YYYY_MM_DD'], '%Y-%m-%d').date()
temp_route.departure_time = datetime.strptime(route_data[key]['departure_time_utc'], '%H:%M:%S').time()
temp_route.executor_capacity = route_data[key]['executor_capacity_cm3']
temp_route.score = route_data[key]['route_score']
# sort stops based on the actual order in which they have were traversed and store them in the nested class
stop_dict = route_data[key]['stops']
temp_stop = temp_route.Stop()
sorted_stop_dict = dict(sorted(actual_sequences[key]['actual'].items(), key=lambda item: item[1]))
temp_route.stop_key_index_dict = dict(sorted_stop_dict)
for stop_key in sorted_stop_dict.keys():
temp_stop.key = stop_key
temp_stop.lat = stop_dict[stop_key]['lat']
temp_stop.lng = stop_dict[stop_key]['lng']
temp_stop.type = stop_dict[stop_key]['type']
temp_stop.zone_id = stop_dict[stop_key]['zone_id']
temp_stop.package_dict = package_data[key][stop_key]
temp_route.stop.append(temp_stop)
temp_stop = temp_route.Stop()
temp_route.travel_time_dict = travel_time[key]
route_list.append(temp_route)
count += 1
temp_route = Route()
return route_list, key_index_dict, travel_time
def read_training_route_data():
"""Reads the JSON files and populates class variables"""
route_list = []
temp_route = Route()
training_routes_path = path.join(BASE_DIR, 'data/model_build_inputs/route_data.json')
with open(training_routes_path) as f:
route_data = json.load(f)
training_sequence_path = path.join(BASE_DIR, 'data/model_build_inputs/actual_sequences.json')
with open(training_sequence_path) as f:
actual_sequences = json.load(f)
training_package_path = path.join(BASE_DIR, 'data/model_build_inputs/package_data.json')
with open(training_package_path) as f:
package_data = json.load(f)
training_traveltime_path = path.join(BASE_DIR, 'data/model_build_inputs/travel_times.json')
with open(training_traveltime_path) as f:
travel_time = json.load(f)
training_invalid_score = path.join(BASE_DIR, 'data/model_build_inputs/invalid_sequence_scores.json')
with open(training_invalid_score) as f:
invalid_score = json.load(f)
count = 0
random.seed(17)
new_route_data = {}
new_package_data = {}
new_travel_time = {}
new_actual_sequence = {}
new_invalid_sequence_score = {}
key_index_dict = {}
testing_keys = [] # this includes data minus a small fraction of high routes
for key in route_data.keys():
temp_route.index = count
temp_route.key = key
key_index_dict[key] = count
temp_route.station_code = route_data[key]['station_code']
temp_route.date = datetime.strptime(route_data[key]['date_YYYY_MM_DD'], '%Y-%m-%d').date()
temp_route.departure_time = datetime.strptime(route_data[key]['departure_time_utc'], '%H:%M:%S').time()
temp_route.executor_capacity = route_data[key]['executor_capacity_cm3']
temp_route.score = route_data[key]['route_score']
# sort stops based on the actual order in which they have were traversed and store them in the nested class
stop_dict = route_data[key]['stops']
temp_stop = temp_route.Stop()
sorted_stop_dict = dict(sorted(actual_sequences[key]['actual'].items(), key=lambda item: item[1]))
temp_route.stop_key_index_dict = dict(sorted_stop_dict)
for stop_key in sorted_stop_dict.keys():
temp_stop.key = stop_key
temp_stop.lat = stop_dict[stop_key]['lat']
temp_stop.lng = stop_dict[stop_key]['lng']
temp_stop.type = stop_dict[stop_key]['type']
temp_stop.zone_id = stop_dict[stop_key]['zone_id']
temp_stop.package_dict = package_data[key][stop_key]
temp_route.stop.append(temp_stop)
temp_stop = temp_route.Stop()
temp_route.travel_time_dict = travel_time[key]
route_list.append(temp_route)
count += 1
temp_route = Route()
# TODO: Check if we are using this high subset of routes in the final version
if route_data[key]['route_score'] == 'High' and random.uniform(0, 1) < 0.15: # testing data
testing_keys.append(key)
new_route_data[key] = route_data[key]
new_package_data[key] = package_data[key]
new_travel_time[key] = travel_time[key]
new_actual_sequence[key] = actual_sequences[key]
new_invalid_sequence_score[key] = invalid_score[key]
# TODO: Remove this in the final version. We pull the apply data from their new routes.
with open(path.join(BASE_DIR, 'data/model_apply_inputs/new_route_data.json'), "w") as outfile:
json.dump(new_route_data, outfile)
with open(path.join(BASE_DIR, 'data/model_apply_inputs/new_package_data.json'), "w") as outfile:
json.dump(new_package_data, outfile)
with open(path.join(BASE_DIR, 'data/model_apply_inputs/new_travel_times.json'), "w") as outfile:
json.dump(new_travel_time, outfile)
with open(path.join(BASE_DIR, 'data/model_score_inputs/new_actual_sequences.json'), "w") as outfile:
json.dump(new_actual_sequence, outfile)
with open(path.join(BASE_DIR, 'data/model_score_inputs/new_invalid_sequence_scores.json'), "w") as outfile:
json.dump(new_invalid_sequence_score, outfile)
# print(testing_keys)
return route_list, key_index_dict, travel_time, testing_keys
def output_route_df(route, testing_keys):
"""Outputs processed data to a CSV file"""
row_list_training = []
row_list_testing = []
row_list_full = []
for rt in route:
temp_dict = {'Index': rt.index,
'Key': rt.key,
'Station_Code': rt.station_code,
'Date': rt.date,
'Departure_Time': rt.departure_time,
'Executor_Capacity': rt.executor_capacity,
'Score': rt.score,
'Num_Stops': rt.num_stops,
'Num_Stops_TW': rt.num_stops_tw,
'Num_Stops_TW_Violations': rt.num_tw_violations,
'Num_Packages': rt.num_packages,
'Num_Packages_TW': rt.num_packages_tw,
'Num_Scans_Delivered': rt.num_scans_delivered,
'Num_Scans_Not_Delivered': rt.num_scans_not_delivered,
'Num_Packages_Not_Delivered': rt.num_packages_not_delivered,
'Vol_Packages_Not_Delivered': rt.vol_packages_not_delivered,
'Packages_Per_Stop': rt.packages_per_stop,
'Total_Travel_Time': rt.travel_time,
'Total_Service_Time': rt.service_time,
'Total_Journey_Time': rt.total_journey_time,
'Filled_Capacity': rt.filled_capacity,
'Filled_Capacity_Percent': rt.filled_capacity_percent,
'Volume_Seconds_Traveled': rt.volume_seconds_traveled,
'VST_Ratio': rt.vst_ratio,
'Total_Wait_Time': rt.total_wait_time,
'Total_End_Slack': rt.total_end_slack,
'Total_Max_Slack': rt.total_max_slack,
'Slack_Ratio': rt.slack_ratio,
'Weighted_Pkg_Wait_Time': rt.weighted_pkg_wait_time,
'Weighted_Pkg_End_Slack': rt.weighted_pkg_end_slack,
'Weighted_Pkg_Max_Slack': rt.weighted_pkg_max_slack,
'Weighted_Pkg_Slack_Ratio': rt.weighted_pkg_slack_ratio,
'Num_Zones': rt.num_zones,
'Num_Zone_Switches': rt.num_zone_switches,
'Switch_Stop_Ratio': rt.switch_stop_ratio,
'Switch_Zone_Ratio': rt.switch_zone_ratio,
'Max_Segment_Time': rt.max_segment_time,
'Variance_Segment_Time': rt.var_segment_time,
'Segment_Time_Ratio': rt.segment_time_ratio,
'Max_Segment_Dist': rt.max_segment_dist,
'Variance_Segment_Dist': rt.var_segment_dist,
'Segment_Dist_Ratio': rt.segment_dist_ratio,
'Total_Dist': rt.total_dist,
'Average_Speed': rt.average_speed,
'Is_Weekend': int(rt.is_weekend == True),
'Is_Sequence_Feasible': int(rt.is_sequence_feasible == True),
'TSP_Solver_Status': rt.tsp_solver_status,
'TSP_Route_Time': rt.tsp_route_time,
'TSP_Optimality_Gap': rt.tsp_optimality_gap,
'Is_TSP_Feasible': int(rt.is_tsp_feasible == True)
}
if rt.key in testing_keys:
row_list_testing.append(temp_dict)
else:
row_list_training.append(temp_dict)
row_list_full.append(temp_dict)
df = pd.DataFrame(row_list_testing, columns=temp_dict.keys())
output_path = path.join(BASE_DIR, 'data/model_build_outputs/route_summary_testing_jun18.csv')
df.to_csv(output_path, index=False)
df = pd.DataFrame(row_list_training, columns=temp_dict.keys())
output_path = path.join(BASE_DIR, 'data/model_build_outputs/route_summary_training_jun18.csv')
df.to_csv(output_path, index=False)
df = pd.DataFrame(row_list_full, columns=temp_dict.keys())
output_path = path.join(BASE_DIR, 'data/model_build_outputs/route_summary_full_jun18.csv')
df.to_csv(output_path, index=False)
def output_stop_df(route, testing_keys):
"""Function to create a CSV file with lat long locations and order of nodes visited for visualization"""
row_list_training = []
row_list_testing = []
for rt in route:
for i in range(len(rt.stop)):
temp_dict = {'Route_Index': rt.index,
'Route_Key': rt.key,
'Stop_Key': rt.stop[i].key,
'Stop_Order': rt.stop[i].order,
'Latitude': rt.stop[i].lat,
'Longitude': rt.stop[i].lng,
'X_Coordinate': '%.4f' % rt.stop[i].proj_x,
'Y_Coordinate': '%.4f' % rt.stop[i].proj_y,
'Zone_ID': rt.stop[i].zone_id,
'Planned_Service_Time': rt.stop[i].planned_service_time,
'TW_Constraint': rt.stop[i].is_tw_present,
'TW_Violated': rt.stop[i].is_tw_violated,
'Start_Time_Window': rt.stop[i].start_time_window,
'End_Time_Window': rt.stop[i].end_time_window,
'Actual_Start_Time': rt.stop[i].actual_start_time,
'Actual_End_Time': rt.stop[i].actual_end_time,
'Wait_Time': rt.stop[i].wait_time,
'Slack_Time': rt.stop[i].slack_time,
'Max_Slack': rt.stop[i].max_slack,
'Num_Packages': rt.stop[i].num_packages,
'Num_Packages_TW': rt.stop[i].num_packages_tw,
'Num_Scans_Delivered': rt.stop[i].num_scans_delivered,
'Num_Scans_Not_Delivered': rt.stop[i].num_scans_not_delivered,
'Num_Packages_Not_Delivered': rt.stop[i].num_packages_not_delivered,
'Total_Package_Volume': rt.stop[i].total_package_vol,
'Volume_Undelivered': rt.stop[i].vol_package_undelivered,
'TSP_Order': rt.stop[i].tsp_order,
'Actual_TSP_Start_Time': rt.stop[i].actual_tsp_start_time,
'Actual_TSP_End_Time': rt.stop[i].actual_tsp_end_time,
'Is_TSP_Feasible': rt.stop[i].is_tsp_feasible}
if rt.key in testing_keys:
row_list_testing.append(temp_dict)
else:
row_list_training.append(temp_dict)
df = pd.DataFrame(row_list_testing, columns=temp_dict.keys())
output_path = path.join(BASE_DIR, 'data/model_build_outputs/stop_summary_testing_jun18.csv')
df.to_csv(output_path, index=False)
df = pd.DataFrame(row_list_training, columns=temp_dict.keys())
output_path = path.join(BASE_DIR, 'data/model_build_outputs/stop_summary_training_jun18.csv')
df.to_csv(output_path, index=False)
def core_block(rt):
rt.compute_stop_package_features()
rt.compute_route_features()
tsp_data = create_tsp_data(rt)
compute_tsp_tour(rt, tsp_data)
check_tsp_feasibility(rt)
rt.display_route_data()
return rt
if __name__ == '__main__':
# begin = time.time()
# num_cpu = int(mp.cpu_count() * 0.75) # use 75% of CPUs on the machine
# route, route_key_index_dict, travel_time_json, testing_keys = read_training_route_data()
# # route, route_key_index_dict, travel_time_json = read_route_data()
# print("Data reading complete... in", (time.time() - begin), "seconds")
# try:
# """parallel code"""
# begin = time.time()
# print('Beginning parallel block...')
# with mp.Pool(num_cpu) as pool:
# results = pool.map(core_block, [rt for rt in route])
# pool.close()
# pool.join()
# print('Parallel block complete...')
# print('Time for this block is %.2f minutes' % ((time.time() - begin) / 60))
# except:
# """serial code"""
# begin = time.time()
# print('Beginning serial block...')
# results = [core_block(rt) for rt in route]
# print('Serial block complete...')
# print('Time for this block is %.2f minutes' % ((time.time() - begin) / 60))
# output_route_df(results, testing_keys)
# output_stop_df(results, testing_keys)
print('Starting model training...')
train_xgboost_classifier()
print('Model build complete...')
|
StarcoderdataPython
|
6635807
|
<filename>pivoteer/collectors/api.py
import logging
import requests
import json
class PassiveTotal(object):
base_url = "https://api.passivetotal.org"
headers = { 'Content-Type': 'application/json' }
api_versions = {"v2": "/v2",
"current": "/current"}
GET_resources = {"metadata": "/metadata",
"passive": "/dns/passive",
"subdomains": "/subdomains",
"tags": "/user/tags",
"watch_status": "/watching",
"compromise_status": "/ever_compromised",
"dynamic_status": "/dynamic",
"sinkhole_status": "/sinkhole",
"classification": "/classification",
"ssl_cert_by_ip": "/ssl_certificate/ip_address",
"ssl_cert_by_hash": "/ssl_certificate/hash"}
POST_resources = {"set_dynamic_status": "/dynamic",
"set_watch_status": "/watching",
"set_compromise_status": "/ever_compromised",
"add_tag": "/user/tag/add",
"remove_tag": "/user/tag/remove",
"set_classification": "/classification",
"set_sinkhole_status": "/sinkhole"}
def __init__(self, api_username, api_key, api_version=None):
self.__key = api_key
self.__username = api_username
if api_version:
try:
self.api_version = self.api_versions[api_version]
except KeyError:
logging.warning("Unrecognized API version, defaulting to v2")
self.api_version = self.api_versions["v2"]
else:
self.api_version = self.api_versions["v1"]
def retrieve_data(self, query, resource):
if self.__key:
try:
data = '{"query": "' + query + '"}'
data_encode = data.encode('ascii')
api_call = self.GET_resources[resource]
url = self.base_url + self.api_version + api_call
response = requests.get(url, headers=self.headers, data=data_encode, auth=(self._PassiveTotal__username, self._PassiveTotal__key))
json_response = json.loads(response.content.decode('utf-8'))
records = json_response['results']
results = []
for entry in records:
results.append({
'date': entry['collected'],
'firstseen': entry['firstSeen'],
'lastseen': entry['lastSeen'],
'ip': entry['resolve'],
'domain': entry['value'],
'ip_location': {}
})
return results
except KeyError:
logging.warning("Unrecognized API resource or malformed query")
return []
def submit_data(self, query, resource):
if self.__key:
try:
api_call = self.POST_resources[resource]
url = self.base_url + self.api_version + api_call
params = {"api_key": self.__key, "query": query}
response = requests.post(url, params=params)
json_response = json.loads(response.content)
return json_response
except KeyError:
logging.warning("Unrecognized API resource or malformed query")
return []
|
StarcoderdataPython
|
4834333
|
<gh_stars>0
# Write a Python program to find files and skip directories of a given directory.
import os
print([f for f in os.listdir('/home/oem/adi_workspace') if os.path.isfile(os.path.join('/home/oem/adi_workspace', f))])
|
StarcoderdataPython
|
3587316
|
__author__ = 'moshebasanchig'
from hydro.topology_base import Topology
class GeoWidgetTopology(Topology):
def _submit(self, params):
"""
topology consists of several steps, defining one source stream or more, combining and transformations
"""
main_stream = self.query_engine.get('geo_widget', params)
lookup_stream = self.query_engine.get('geo_lookup', params, cache_ttl=1)
combined = self.transformers.combine(main_stream, lookup_stream, left_on=['user_id'], right_on=['user_id'])
aggregated = self.transformers.aggregate(combined, group_by=['country'], operators={'revenue': 'sum', 'spend': 'sum'})
aggregated['ROI'] = aggregated.revenue/(aggregated.spend+aggregated.revenue)
return aggregated
|
StarcoderdataPython
|
1795174
|
<reponame>AmeetR/Monocular-Depth-Estimation-DAV<filename>src/data/DIODE_pointcloud.py
import open3d
import pandas as pd
import cv2
import numpy as np
import matplotlib.pyplot as plt
def plot_depth_map(dm, validity_mask):
validity_mask = validity_mask > 0
MIN_DEPTH = 0.
MAX_DEPTH = min(300, np.percentile(dm, 99))
dm = np.clip(dm, MIN_DEPTH, MAX_DEPTH)
dm = np.log(dm, where=validity_mask)
dm = np.ma.masked_where(~validity_mask, dm)
cmap = plt.cm.jet
cmap.set_bad(color='black')
plt.imshow(dm, cmap=cmap, vmax=np.log(MAX_DEPTH))
intrinsic = open3d.camera.PinholeCameraIntrinsic()
[width, height, fx, fy, cx, cy] = [1024, 768, 886.81, 927.06, 512, 384]
intrinsic.set_intrinsics(width, height, fx, fy, cx, cy)
idx = 255
data = pd.read_csv('C:\\Data\\DIODE\\train.csv')
example = data['img_path'][idx]
example_depth = (np.load(data['depth_path'][idx]) * 1000).astype('uint16')
cv2.imwrite(data['depth_path'][idx].replace('.npy', '.png'), example_depth)
color_raw = open3d.io.read_image(example)
depth_raw = open3d.io.read_image(data['depth_path'][idx].replace('.npy', '.png'))
rgbd_image = open3d.geometry.RGBDImage.create_from_color_and_depth(
color_raw, depth_raw)
validity_mask = np.load(data['depth_mask_path'][idx])
point_cloud = open3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
#point_cloud = open3d.geometry.PointCloud.create_from_depth_image(depth_raw , intrinsic, depth_scale=1.0)
print(point_cloud)
downpcd = point_cloud.voxel_down_sample(voxel_size=0.05)
open3d.visualization.draw_geometries([downpcd])
plt.subplot(1, 2, 1)
plt.title('Redwood grayscale image')
plt.imshow(rgbd_image.color)
plt.subplot(1, 2, 2)
plt.title('Redwood depth image')
depth_array = np.asarray(rgbd_image.depth)
MAX_DEPTH = min(300, np.percentile(depth_array, 99))
plot_depth_map(depth_array, validity_mask)
plt.show()
|
StarcoderdataPython
|
12816332
|
from abc import abstractmethod
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from transformers import AutoModel, AutoConfig
from .layers import WordSequence
class BackBone(nn.Module):
def __init__(self, n_class, binary_mode=False):
if binary_mode:
assert n_class == 2
n_class = 1
self.n_class = n_class
super(BackBone, self).__init__()
self.dummy_param = nn.Parameter(torch.empty(0))
@property
def device(self):
return self.dummy_param.device
def get_device(self):
return self.dummy_param.device
@abstractmethod
def forward(self, batch: Dict, return_features: Optional[bool] = False):
pass
class BERTBackBone(BackBone):
def __init__(self, n_class, model_name='bert-base-cased', fine_tune_layers=-1, binary_mode=False):
super(BERTBackBone, self).__init__(n_class=n_class, binary_mode=binary_mode)
self.model_name = model_name
self.config = AutoConfig.from_pretrained(model_name, num_labels=self.n_class, output_hidden_states=True)
self.model = AutoModel.from_pretrained(model_name, config=self.config)
if fine_tune_layers >= 0:
for param in self.model.base_model.embeddings.parameters(): param.requires_grad = False
if fine_tune_layers > 0:
n_layers = len(self.model.base_model.encoder.layer)
for layer in self.model.base_model.encoder.layer[:n_layers - fine_tune_layers]:
for param in layer.parameters():
param.requires_grad = False
@abstractmethod
def forward(self, batch: Dict, return_features: Optional[bool] = False):
pass
""" backbone for classification """
class LogReg(BackBone):
def __init__(self, n_class, input_size, binary_mode=False, **kwargs):
super(LogReg, self).__init__(n_class=n_class, binary_mode=binary_mode)
self.linear = nn.Linear(input_size, self.n_class)
def forward(self, batch, return_features=False):
x = batch['features'].to(self.get_device())
x = self.linear(x)
return x
class MLP(BackBone):
def __init__(self, n_class, input_size, n_hidden_layers=1, hidden_size=100, dropout=0.0, binary_mode=False, **kwargs):
super(MLP, self).__init__(n_class=n_class, binary_mode=binary_mode)
layers = [nn.Linear(input_size, hidden_size), nn.ReLU(), nn.Dropout(p=dropout)]
for i in range(n_hidden_layers - 1):
layers.extend([nn.Linear(hidden_size, hidden_size), nn.ReLU(), nn.Dropout(p=dropout)])
self.fcs = nn.Sequential(*layers)
self.last_layer = nn.Linear(hidden_size, self.n_class)
self.hidden_size = hidden_size
def forward(self, batch, return_features=False):
x = batch['features'].to(self.get_device())
h = self.fcs(x)
logits = self.last_layer(h)
if return_features:
return logits, h
else:
return logits
""" torchvision for image classification """
class ImageClassifier(BackBone):
def __init__(self, n_class, model_name='resnet18', binary_mode=False, **kwargs):
super(ImageClassifier, self).__init__(n_class=n_class, binary_mode=binary_mode)
pretrained_model = getattr(torchvision.models, model_name)(pretrained=False)
self.model = nn.Sequential(*list(pretrained_model.children())[:-1])
# pretrained_model = getattr(torchvision.models, model_name)(pretrained=load_pretrained)
# self.model = nn.Sequential(*list(pretrained_model.children())[:-1])
# if load_pretrained and (not finetune_pretrained):
# for param in self.model.parameters():
# param.requires_grad = False
self.hidden_size = pretrained_model.fc.in_features
self.fc = nn.Linear(self.hidden_size, n_class)
def forward(self, batch, return_features=False):
h = self.model(batch['image'].to(self.get_device()))
h = torch.flatten(h, 1)
logits = self.fc(h)
if return_features:
return logits, h
else:
return logits
""" BERT for text classification """
#######################################################################################################################
class BertTextClassifier(BERTBackBone):
"""
Bert with a MLP on top for text classification
"""
def __init__(self, n_class, model_name='bert-base-cased', fine_tune_layers=-1, max_tokens=512, binary_mode=False, **kwargs):
super(BertTextClassifier, self).__init__(n_class=n_class, model_name=model_name, fine_tune_layers=fine_tune_layers, binary_mode=binary_mode)
self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
self.classifier = nn.Linear(self.config.hidden_size, self.config.num_labels)
self.max_tokens = max_tokens
self.hidden_size = self.config.hidden_size
def forward(self, batch, return_features=False): # inputs: [batch, t]
device = self.get_device()
outputs = self.model(input_ids=batch["input_ids"].to(device), attention_mask=batch['mask'].to(device))
h = self.dropout(outputs.pooler_output)
output = self.classifier(h)
if return_features:
return output, h
else:
return output
""" BERT for relation classification """
#######################################################################################################################
class FClayer(nn.Module):
def __init__(self, input_dim, hidden_size=100, dropout=0., activation=True):
super(FClayer, self).__init__()
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(input_dim, hidden_size)
self.tanh = nn.Tanh()
self.activation = activation
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
if self.activation:
return self.tanh(x)
else:
return x
class BertRelationClassifier(BERTBackBone):
"""
BERT with a MLP on top for relation classification
"""
def __init__(self, n_class, model_name='bert-base-cased', fine_tune_layers=-1, binary_mode=False, **kwargs):
super(BertRelationClassifier, self).__init__(n_class=n_class, model_name=model_name, fine_tune_layers=fine_tune_layers, binary_mode=binary_mode)
self.fc_cls = FClayer(self.config.hidden_size, self.config.hidden_size, dropout=self.config.hidden_dropout_prob)
self.fc_e1 = FClayer(self.config.hidden_size, self.config.hidden_size, dropout=self.config.hidden_dropout_prob)
self.fc_e2 = FClayer(self.config.hidden_size, self.config.hidden_size, dropout=self.config.hidden_dropout_prob)
self.output = FClayer(self.config.hidden_size * 3, self.n_class, dropout=self.config.hidden_dropout_prob, activation=False)
self.hidden_size = self.config.hidden_size * 3
@staticmethod
def entity_average(hidden_output, e_mask):
"""
Average the entity hidden state vectors (H_i ~ H_j)
:param hidden_output: [batch_size, j-i+1, dim]
:param e_mask: [batch_size, max_seq_len]
e.g. e_mask[0] == [0, 0, 0, 1, 1, 1, 0, 0, ... 0]
:return: [batch_size, dim]
"""
e_mask_unsqueeze = e_mask.unsqueeze(1) # [b, 1, j-i+1]
length_tensor = (e_mask != 0).sum(dim=1).unsqueeze(1) # [batch_size, 1]
sum_vector = torch.bmm(e_mask_unsqueeze.float(), hidden_output).squeeze(1) # [b, 1, j-i+1] * [b, j-i+1, dim] = [b, 1, dim] -> [b, dim]
avg_vector = sum_vector.float() / length_tensor.float() # broadcasting
return avg_vector
def forward(self, batch, return_features=False):
device = self.get_device()
outputs = self.model(input_ids=batch["input_ids"].to(device), attention_mask=batch['mask'].to(device))
bert_out = outputs.last_hidden_state
cls_embs = self.fc_cls(outputs.pooler_output)
ent1_avg = self.fc_e1(self.entity_average(bert_out, batch['e1_mask'].to(device)))
ent2_avg = self.fc_e2(self.entity_average(bert_out, batch['e2_mask'].to(device)))
h = torch.cat([cls_embs, ent1_avg, ent2_avg], dim=-1)
output = self.output(h)
if return_features:
return output, h
else:
return output
""" for sequence tagging """
class CRFTagger(BackBone):
def __init__(self, n_class, use_crf):
super(CRFTagger, self).__init__(n_class=n_class)
self.use_crf = use_crf
if self.use_crf:
self.crf = CRF(n_class)
def calculate_loss(self, batch, batch_label):
device = self.get_device()
outs = self.get_features(batch)
mask = batch['mask'].to(device)
batch_size, seq_len, _ = outs.shape
batch_label = batch_label[:, :seq_len].to(device)
if self.use_crf:
total_loss = self.crf.neg_log_likelihood_loss(outs, mask, batch_label)
total_loss = total_loss / batch_size
else:
outs = outs.view(batch_size * seq_len, -1)
mask = mask.reshape(batch_size * seq_len).bool()
batch_label = batch_label.reshape(batch_size * seq_len)
score = F.log_softmax(outs, 1)
total_loss = F.nll_loss(score[mask], batch_label[mask])
return total_loss
def forward(self, batch):
device = self.get_device()
outs = self.get_features(batch)
mask = batch['mask'].to(device)
if self.use_crf:
scores, tag_seq = self.crf(outs, mask)
else:
batch_size, seq_len, _ = outs.shape
outs = outs.view(batch_size * seq_len, -1)
_, tag = torch.max(outs, 1)
tag = tag.view(batch_size, seq_len)
tag_seq = [[tt for tt, mm in zip(t, m) if mm] for t, m in zip(tag.tolist(), mask.tolist())]
return tag_seq
@abstractmethod
def get_features(self, batch):
pass
class LSTMSeqTagger(CRFTagger):
def __init__(self,
n_class,
word_vocab_size,
char_vocab_size,
use_crf,
dropout,
word_embedding,
word_emb_dim,
word_hidden_dim,
word_feature_extractor,
n_word_hidden_layer,
use_char,
char_embedding,
char_emb_dim,
char_hidden_dim,
char_feature_extractor,
**kwargs):
super(LSTMSeqTagger, self).__init__(n_class=n_class, use_crf=use_crf)
if use_crf:
n_class += 2
self.word_hidden = WordSequence(
word_vocab_size=word_vocab_size,
char_vocab_size=char_vocab_size,
dropout=dropout,
word_embedding=word_embedding,
word_emb_dim=word_emb_dim,
word_hidden_dim=word_hidden_dim,
word_feature_extractor=word_feature_extractor,
n_word_hidden_layer=n_word_hidden_layer,
use_char=use_char,
char_embedding=char_embedding,
char_emb_dim=char_emb_dim,
char_hidden_dim=char_hidden_dim,
char_feature_extractor=char_feature_extractor
)
self.classifier = nn.Linear(word_hidden_dim, n_class)
def get_features(self, batch):
device = self.get_device()
word_inputs = batch['word'].to(device)
word_seq_lengths = batch['word_length']
char_inputs = batch['char'].to(device)
char_seq_lengths = batch['char_length']
char_inputs = char_inputs.flatten(0, 1)
char_seq_lengths = char_seq_lengths.flatten()
feature_out = self.word_hidden(word_inputs, word_seq_lengths, char_inputs, char_seq_lengths)
outs = self.classifier(feature_out)
return outs
class BertSeqTagger(CRFTagger):
"""
BERT for sequence tagging
"""
def __init__(self, n_class, model_name='bert-base-cased', fine_tune_layers=-1, use_crf=True, **kwargs):
super(BertSeqTagger, self).__init__(n_class=n_class, use_crf=use_crf)
self.model_name = model_name
config = AutoConfig.from_pretrained(self.model_name, output_hidden_states=True)
self.model = AutoModel.from_pretrained(self.model_name, config=config)
self.config = config
if fine_tune_layers >= 0:
for param in self.model.base_model.embeddings.parameters(): param.requires_grad = False
if fine_tune_layers > 0:
n_layers = len(self.model.base_model.encoder.layer)
for layer in self.model.base_model.encoder.layer[:n_layers - fine_tune_layers]:
for param in layer.parameters():
param.requires_grad = False
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.use_crf = use_crf
if self.use_crf:
self.classifier = nn.Linear(config.hidden_size, n_class + 2) # consider <START> and <END> token
else:
self.classifier = nn.Linear(config.hidden_size, n_class + 1)
def get_features(self, batch):
device = self.get_device()
outputs = self.model(input_ids=batch["input_ids"].to(device), attention_mask=batch['attention_mask'].to(device))
outs = self.classifier(self.dropout(outputs.last_hidden_state))
if self.use_crf:
return outs
else:
return outs[:, :, :-1]
START_TAG = -2
STOP_TAG = -1
class CRF(BackBone):
def __init__(self, n_class, batch_mode=True):
super(CRF, self).__init__(n_class=n_class)
# Matrix of transition parameters. Entry i,j is the score of transitioning from i to j.
self.n_class = n_class + 2
self.batch_mode = batch_mode
# # We add 2 here, because of START_TAG and STOP_TAG
# # transitions (f_tag_size, t_tag_size), transition value from f_tag to t_tag
init_transitions = torch.randn(self.n_class, self.n_class)
self.START_TAG = -2
self.STOP_TAG = -1
init_transitions[:, self.START_TAG] = -1e5
init_transitions[self.STOP_TAG, :] = -1e5
self.transitions = nn.Parameter(init_transitions, requires_grad=True)
self.start_id = nn.Parameter(torch.LongTensor([self.START_TAG]), requires_grad=False)
self.stop_id = nn.Parameter(torch.LongTensor([self.STOP_TAG]), requires_grad=False)
def _score_sentence_batch(self, feats, tags, mask, transitions=None):
# Gives the score of a provided tag sequence
# tags is ground_truth, a list of ints, length is len(sentence)
# feats is a 2D tensor, len(sentence) * n_class
if transitions is None:
transitions = self.transitions
batch_size = tags.size(0)
seq_len = mask.long().sum(1)
r_batch = torch.arange(batch_size)
pad_start_tags = torch.cat([self.start_id.expand(batch_size, 1), tags], -1)
pad_stop_tags = torch.cat([tags, self.stop_id.expand(batch_size, 1)], -1)
pad_stop_tags[r_batch, seq_len] = self.stop_id
t = transitions[pad_start_tags, pad_stop_tags]
t_score = torch.sum(t.cumsum(1)[r_batch, seq_len])
f_score = torch.sum(torch.gather(feats, -1, tags.unsqueeze(2)).squeeze(2).masked_select(mask.bool()))
score = t_score + f_score
return score
def _score_sentence(self, feats, tags, transitions=None):
# Gives the score of a provided tag sequence
# tags is ground_truth, a list of ints, length is len(sentence)
# feats is a 2D tensor, len(sentence) * n_class
if transitions is None:
transitions = self.transitions
pad_start_tags = torch.cat([self.start_id, tags])
pad_stop_tags = torch.cat([tags, self.stop_id])
r = torch.arange(feats.size(0))
score = torch.sum(transitions[pad_start_tags, pad_stop_tags]) + torch.sum(feats[r, tags])
return score
def _forward_alg_batch(self, feats, mask, transitions=None):
# calculate in log domain
# feats is len(sentence) * n_class
if transitions is None:
transitions = self.transitions
device = self.get_device()
batch_size, max_seq_len, target_size = feats.shape
alpha = torch.full((batch_size, 1, target_size), -10000.0, device=device)
alpha[:, 0, self.START_TAG] = 0.0
mask = mask.bool()
for i in range(max_seq_len):
feat = feats[:, i, :]
mask_i = mask[:, i]
alpha = torch.where(mask_i.view(-1, 1, 1), torch.logsumexp(alpha.transpose(1, 2) + feat.unsqueeze(1) + transitions, dim=1, keepdim=True), alpha)
last = torch.logsumexp(alpha.transpose(1, 2) + 0 + transitions[:, [self.STOP_TAG]], dim=1)
score = torch.sum(last)
return score
def _forward_alg(self, feats, transitions=None):
# calculate in log domain
# feats is len(sentence) * n_class
if transitions is None:
transitions = self.transitions
device = self.get_device()
alpha = torch.full((1, self.n_class), -10000.0, device=device)
alpha[0][self.START_TAG] = 0.0
for feat in feats:
alpha = torch.logsumexp(alpha.T + feat.unsqueeze(0) + transitions, dim=0, keepdim=True)
return torch.logsumexp(alpha.T + 0 + transitions[:, [self.STOP_TAG]], dim=0)[0]
def viterbi_decode_batch(self, feats, mask, transitions=None):
if transitions is None:
transitions = self.transitions
device = self.get_device()
batch_size, max_seq_len, target_size = feats.shape
backtrace = torch.zeros((batch_size, max_seq_len, target_size)).long()
alpha = torch.full((batch_size, 1, target_size), -10000.0, device=device)
alpha[:, 0, self.START_TAG] = 0.0
mask = mask.bool()
for i in range(max_seq_len):
feat = feats[:, i, :]
mask_i = mask[:, i]
smat = (alpha.transpose(1, 2) + feat.unsqueeze(1) + transitions) # (n_class, n_class)
alpha = torch.where(mask_i.view(-1, 1, 1), torch.logsumexp(smat, dim=1, keepdim=True), alpha)
backtrace[:, i, :] = smat.argmax(1)
# backtrack
smat = alpha.transpose(1, 2) + 0 + transitions[:, [self.STOP_TAG]]
best_tag_ids = smat.argmax(1).long()
seq_len = mask.long().sum(1)
best_paths = []
for backtrace_i, best_tag_id, l in zip(backtrace, best_tag_ids, seq_len):
best_path = [best_tag_id.item()]
for bptrs_t in reversed(backtrace_i[1:l]): # ignore START_TAG
best_tag_id = bptrs_t[best_tag_id].item()
best_path.append(best_tag_id)
best_paths.append(best_path[::-1])
return torch.logsumexp(smat, dim=1).squeeze().tolist(), best_paths
def viterbi_decode(self, feats, transitions=None):
if transitions is None:
transitions = self.transitions
device = self.get_device()
backtrace = []
alpha = torch.full((1, self.n_class), -10000.0, device=device)
alpha[0][self.START_TAG] = 0
for feat in feats:
smat = (alpha.T + feat.unsqueeze(0) + transitions) # (n_class, n_class)
backtrace.append(smat.argmax(0)) # column_max
alpha = torch.logsumexp(smat, dim=0, keepdim=True)
# backtrack
smat = alpha.T + 0 + transitions[:, [self.STOP_TAG]]
best_tag_id = smat.flatten().argmax().item()
best_path = [best_tag_id]
for bptrs_t in reversed(backtrace[1:]): # ignore START_TAG
best_tag_id = bptrs_t[best_tag_id].item()
best_path.append(best_tag_id)
return torch.logsumexp(smat, dim=0).item(), best_path[::-1]
def neg_log_likelihood_loss(self, feats, mask, tags, transitions=None):
# sentence, tags is a list of ints
# features is a 2D tensor, len(sentence) * self.n_class
if self.batch_mode:
nll_loss = self._forward_alg_batch(feats, mask, transitions) - self._score_sentence_batch(feats, tags, mask, transitions)
else:
nll_loss = 0.0
batch_size = len(feats)
for i in range(batch_size):
length = mask[i].long().sum()
feat_i = feats[i][:length]
tags_i = tags[i][:length]
forward_score = self._forward_alg(feat_i, transitions)
gold_score = self._score_sentence(feat_i, tags_i, transitions)
nll_loss += forward_score - gold_score
return nll_loss
def forward(self, feats, mask):
# viterbi to get tag_seq
if self.batch_mode:
score, tags = self.viterbi_decode_batch(feats, mask)
else:
tags = []
scores = []
batch_size = len(feats)
for i in range(batch_size):
length = mask[i].long().sum()
feat_i = feats[i][:length]
score, tag_seq = self.viterbi_decode(feat_i)
tags.append(tag_seq)
scores.append(score)
return score, tags
class MultiCRF(CRF):
def __init__(self, n_class, n_source, batch_mode=True):
super(MultiCRF, self).__init__(n_class=n_class, batch_mode=batch_mode)
self.n_source = n_source
init_transitions = torch.randn(n_source, self.n_class, self.n_class)
init_transitions[:, :, self.START_TAG] = -1e5
init_transitions[:, self.STOP_TAG, :] = -1e5
self.transitions = nn.Parameter(init_transitions, requires_grad=True)
def neg_log_likelihood_loss(self, feats, mask, tags, idx=None, attn_weight=None):
if attn_weight is None:
assert idx is not None
transitions = self.transitions[idx]
return super().neg_log_likelihood_loss(feats, mask, tags, transitions)
else:
assert attn_weight is not None, 'weight should not be None in Phase 2!'
transitions_l = torch.tensordot(attn_weight, self.transitions, dims=([1], [0]))
nll_loss = self._forward_alg_batch_w_transitions(feats, mask, transitions_l) - \
self._score_sentence_w_transitions(feats, tags, mask, transitions_l)
return nll_loss
def _score_sentence_w_transitions(self, feats, tags, mask, transitions):
batch_size = tags.size(0)
seq_len = mask.long().sum(1)
r_batch = torch.arange(batch_size)
pad_start_tags = torch.cat([self.start_id.expand(batch_size, 1), tags], -1)
pad_stop_tags = torch.cat([tags, self.stop_id.expand(batch_size, 1)], -1)
pad_stop_tags[r_batch, seq_len] = self.stop_id
t = transitions[r_batch.view(-1, 1), pad_start_tags, pad_stop_tags]
t_score = torch.sum(t.cumsum(1)[r_batch, seq_len])
f_score = torch.sum(torch.gather(feats, -1, tags.unsqueeze(2)).squeeze(2).masked_select(mask.bool()))
score = t_score + f_score
return score
def _forward_alg_batch_w_transitions(self, feats, mask, transitions):
device = self.get_device()
batch_size, max_seq_len, target_size = feats.shape
alpha = torch.full((batch_size, 1, target_size), -10000.0, device=device)
alpha[:, 0, self.START_TAG] = 0.0
mask = mask.bool()
for i in range(max_seq_len):
feat = feats[:, i, :]
mask_i = mask[:, i]
alpha = torch.where(mask_i.view(-1, 1, 1), torch.logsumexp(alpha.transpose(1, 2) + feat.unsqueeze(1) + transitions, dim=1, keepdim=True), alpha)
last = torch.logsumexp(alpha.transpose(1, 2) + 0 + transitions[:, :, [self.STOP_TAG]], dim=1)
score = torch.sum(last)
return score
def viterbi_decode_w_transitions(self, feats, mask, transitions):
device = self.get_device()
batch_size, max_seq_len, target_size = feats.shape
backtrace = torch.zeros((batch_size, max_seq_len, target_size)).long()
alpha = torch.full((batch_size, 1, target_size), -10000.0, device=device)
alpha[:, 0, self.START_TAG] = 0.0
mask = mask.bool()
for i in range(max_seq_len):
feat = feats[:, i, :]
mask_i = mask[:, i]
smat = (alpha.transpose(1, 2) + feat.unsqueeze(1) + transitions) # (n_class, n_class)
alpha = torch.where(mask_i.view(-1, 1, 1), torch.logsumexp(smat, dim=1, keepdim=True), alpha)
backtrace[:, i, :] = smat.argmax(1)
# backtrack
smat = alpha.transpose(1, 2) + 0 + transitions[:, :, [self.STOP_TAG]]
best_tag_ids = smat.argmax(1).long()
seq_len = mask.long().sum(1)
best_paths = []
for backtrace_i, best_tag_id, l in zip(backtrace, best_tag_ids, seq_len):
best_path = [best_tag_id.item()]
for bptrs_t in reversed(backtrace_i[1:l]): # ignore START_TAG
best_tag_id = bptrs_t[best_tag_id].item()
best_path.append(best_tag_id)
best_paths.append(best_path[::-1])
return torch.logsumexp(smat, dim=1).squeeze().tolist(), best_paths
def forward(self, feats, mask, attn_weight):
transitions_l = torch.tensordot(attn_weight, self.transitions, dims=([1], [0]))
return self.viterbi_decode_w_transitions(feats, mask, transitions_l)
|
StarcoderdataPython
|
3532382
|
from collections import defaultdict
# 入力
N = int(input())
A = list(map(int, input().split()))
# dp[s]: A_1 + ... + A_i = s となるような i の個数
dp = defaultdict(int, {0: 1})
# 和
s = 0
# 解
ans = 0
# 各iについて、 A_1 + ... + A_i = A_1 + ... + A_j となるような 0 <= j < N の個数を求める
for a in A:
s += a
ans += dp[s]
dp[s] += 1
# 出力
print(ans)
|
StarcoderdataPython
|
8015810
|
from keras.layers import Dense, Input
from keras.models import Model
from keras.utils import np_utils
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris()
X, y = iris.data, iris.target
# print(y_one_hot)
def create_nn() -> Model:
"""
Create the model.
"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0
)
y_train_one_hot = np_utils.to_categorical(y_train)
y_test_one_hot = np_utils.to_categorical(y_test)
inp = Input(shape=(4,))
x = Dense(16, activation="sigmoid")(inp)
out = Dense(3, activation="softmax")(x)
model = Model(inputs=inp, outputs=out)
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
model.fit(X_train, y_train_one_hot, epochs=100, batch_size=1, verbose=0)
loss, accuracy = model.evaluate(X_test, y_test_one_hot, verbose=0)
print("Accuracy = {:.2f}".format(accuracy))
return model
if __name__ == "__main__":
create_nn()
|
StarcoderdataPython
|
5183882
|
"""
MIT License
Copyright (c) 2021 AkshuAgarwal
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import traceback
from typing import Union, Optional
from discord import (
Client,
AutoShardedClient,
VoiceChannel,
Invite,
)
from discord.http import Route
from discord.ext.commands import Bot, AutoShardedBot, BotMissingPermissions
from .errors import (
DCActivityException,
APIException,
InvalidChannel,
InvalidApplicationID,
)
__all__ = ('DCApplication', 'DCActivity', )
log = logging.getLogger(__name__)
class DCApplication:
"""Available Voice Channel Target Application's IDs."""
betrayal = 773336526917861400
chess = 832012586023256104
fishing = 814288819477020702
poker = 755827207812677713
youtube = 755600276941176913
class DCActivity:
"""Represents DCActivity Connection class.
This class is used to interact with Discord API to create Voice
Channel Invite Links to use Discord's Beta Voice Channel Activities features.
Parameters
-----------
bot: Union[:class:`.Client`, :class:`.AutoShardedClient`, :class:`.Bot`, :class:`.AutoShardedBot`]
The Main Bot class of the Bot.
Raises
-------
:exc:`TypeError`
Invalid class type passed in bot parameter.
"""
def __init__(
self,
bot: Union[Client, AutoShardedClient, Bot, AutoShardedBot]
):
if isinstance(bot, (Client, AutoShardedClient, Bot, AutoShardedBot)):
self.bot = bot
log.info(f'Created DCActivity object with {bot} as bot instance.')
else:
raise TypeError(
'Invalid Client/Bot object parameter passed. '
'Should be discord.Client/AutoShardedClient/Bot/AutoShardedBot type')
self._applications: dict = {
'betrayal': DCApplication.betrayal,
'chess': DCApplication.chess,
'fishing': DCApplication.fishing,
'poker': DCApplication.poker,
'youtube': DCApplication.youtube,
}
@property
def applications(self):
return self._applications
async def _create_invite_object(self, response: dict) -> Invite:
"""Internal Method to create a discord.Invite object from a response JSON."""
if 'errors' in response:
_exc = "\n".join(f"{exc['code']}: {exc['message']}" for exc in response['errors']['_errors'])
raise DCActivityException(
f'Error occured while creating Invite.\n {_exc}')
elif isinstance(response['code'], int) and int(response['code']) == 0:
raise DCActivityException(
f'Error occured while creating Invite.\n Code: {response["code"]}, Message: {response["message"]}')
return Invite(state=self.bot._connection, data=response)
async def create_invite(
self,
voice_channel: Union[VoiceChannel, int],
application: Union[str, int],
*,
max_age: Optional[int] = 86400,
max_uses: Optional[int] = 0,
) -> Invite:
"""|coro|
Retrieves an Invite Link with Voice Channel Activities for the VoiceChannel passed.
Parameters
-----------
voice_channel: Union[:class:`int`, :class:`.VoiceChannel`]
The Voice Channel to create Voice Channel Activity Invite Link for.
application: Union[:class:`str`, :class:`int`, :class:`.DCApplication`]
The Activity Type to create Invite Link for.
max_age: Optional[:class:`int`]
How long the invite should last in seconds. If it’s 0 then the invite doesn’t expire. Should be between 0 to 604800 seconds (7 days). Defaults to 86400 (24 Hours).
max_uses: Optional[:class:`int`]
How many uses the invite could be used for. If it’s 0 then there are unlimited uses. Should be between 0 to 100. Defaults to 0.
Raises
-------
:exc:`TypeError`
Invalid class type passed in voice_channel or application.
:exc:`ValueError`
Any Value passed is Invalid/Not Acceptable.
:exc:`.InvalidChannel`
Voice Channel passed is Invalid.
:exc:`.BotMissingPermissions`
Bot is missing permissions to create invites.
:exc:`.InvalidApplicationID`
Application ID passed is Invalid.
:exc:`.APIException`
API is overloaded while creating Invite.
:exc:`.DCActivityException`
Creating Invite link falied.
Returns
--------
:class:`.Invite`
The Invite that was Created."""
if isinstance(voice_channel, VoiceChannel):
_vc_id = voice_channel.id
elif isinstance(voice_channel, int):
_vc_id = voice_channel
else:
raise TypeError(
'voice_channel parameter must be integer or '
f'discord.VoiceChannel type and not "{type(voice_channel).__name__}"'
)
if isinstance(application, str):
if application.lower().replace(' ', '') in self._applications:
_app_id: int = self._applications[
application.lower().replace(' ', '')]
else:
raise ValueError('Invalid application type passed. '
f'Should be one from {"/".join(i for i in self._applications.keys())}.')
elif isinstance(application, int):
_app_id: int = application
else:
raise TypeError(
'application parameter must be string or integer '
f'and not "{type(application).__name__}"')
if max_uses < 0 or max_uses > 100:
raise ValueError(
'max_uses is limited in the range 0 to 100. '
'Choose between the given range.')
if max_age < 0 or max_age > 604800:
raise ValueError(
'max_age is limited in the range 0 to 604800 seconds. '
'Choose between the given range.')
payload = {
'max_age': max_age,
'max_uses': max_uses,
'target_application_id': str(_app_id),
'target_type': 2,
'temporary': False,
'validate': None
}
try:
response = await self.bot.http.request(
Route('POST', f'/channels/{_vc_id}/invites'), json=payload
)
log.debug(f'Create invite link for target_application_id: {payload["target_application_id"]}')
except Exception as e:
if '10003' in str(e):
raise InvalidChannel('Invalid Channel/ID passed.')
elif '50013' in str(e):
raise BotMissingPermissions(['create_instant_invite'])
elif 'target_application_id' in str(e):
raise InvalidApplicationID(f'Invalid Application ID ({_app_id}) passed.')
elif '130000' in str(e):
log.warn('API Resource overloaded.')
raise APIException(
'API resource is currently overloaded. '
'Try again a little later.')
else:
log.debug(f'Exception occured on application: {application}; Exception: {e}')
traceback.print_exc()
raise DCActivityException(
'Some Exception occured while creating invite.\n'
f'Exception: {e}'
)
return await self._create_invite_object(response)
|
StarcoderdataPython
|
3218871
|
# Generated by Django 3.0.6 on 2020-07-30 20:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0008_auto_20200730_1452'),
]
operations = [
migrations.AlterField(
model_name='fish',
name='description',
field=models.CharField(blank=True, default='M', max_length=1),
),
]
|
StarcoderdataPython
|
186812
|
<reponame>BananaLoaf/restpass<gh_stars>1-10
from restpass.generator import Generator
CHECK = ["y", "eL", "7w1", "uwYO", "1HcR8", "jbGAV4", "KrOoKrg", "gBzUnbWk", "xkFn85JHz", "Sj4WS93zPX"]
if __name__ == "__main__":
phrase = "Hello, World!"
generator = Generator(phrase)
generator.set_rules(digits=True, lowercase=True, uppercase=True)
for i in range(1, 11):
result = generator.generate(i)
assert result == CHECK[i - 1]
print(f"{i} - {result}")
result = generator.generate(10)
assert result == "Sj4WS93zPX"
print("No salt:", result) # No salt
# Sj4WS93zPX
generator.set_salt(b"Some salt")
result = generator.generate(10)
assert result == "0mIGvFXpoH"
print("Some salt:", result)
# 0mIGvFXpoH
generator.set_salt(b"Some other salt")
result = generator.generate(10)
assert result == "iOkyf1VXOJ"
print("Some other salt:", result)
# iOkyf1VXOJ
generator.set_salt(b"Some other salt", b"And a little more salt", b"Overwatch players")
result = generator.generate(10)
assert result == "IqCOWdHxgo"
print("Some other salt, and a little more salt:", result)
# IqCOWdHxgo
|
StarcoderdataPython
|
11224668
|
"""A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
_CITATION = """\
@article{zhongSeq2SQL2017,
author = {<NAME> and
<NAME> and
<NAME>},
title = {Seq2SQL: Generating Structured Queries from Natural Language using
Reinforcement Learning},
journal = {CoRR},
volume = {abs/1709.00103},
year = {2017}
}
"""
_DESCRIPTION = """\
A large crowd-sourced dataset for developing natural language interfaces for relational databases
"""
_DATA_URL = "https://github.com/salesforce/WikiSQL/raw/master/data.tar.bz2"
_AGG_OPS = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"]
_COND_OPS = ["=", ">", "<", "OP"]
class WikiSQL(nlp.GeneratorBasedBuilder):
"""WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
VERSION = nlp.Version("0.1.0")
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"phase": nlp.Value("int32"),
"question": nlp.Value("string"),
"table": {
"header": nlp.features.Sequence(nlp.Value("string")),
"page_title": nlp.Value("string"),
"page_id": nlp.Value("string"),
"types": nlp.features.Sequence(nlp.Value("string")),
"id": nlp.Value("string"),
"section_title": nlp.Value("string"),
"caption": nlp.Value("string"),
"rows": nlp.features.Sequence(nlp.features.Sequence(nlp.Value("string"))),
"name": nlp.Value("string"),
},
"sql": {
"human_readable": nlp.Value("string"),
"sel": nlp.Value("int32"),
"agg": nlp.Value("int32"),
"conds": nlp.features.Sequence(
{
"column_index": nlp.Value("int32"),
"operator_index": nlp.Value("int32"),
"condition": nlp.Value("string"),
}
),
},
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/salesforce/WikiSQL",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_DATA_URL)
dl_dir = os.path.join(dl_dir, "data")
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, "test.jsonl"),
"tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, "dev.jsonl"),
"tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs={
"main_filepath": os.path.join(dl_dir, "train.jsonl"),
"tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
},
),
]
def _convert_to_human_readable(self, sel, agg, columns, conditions):
"""Make SQL query string. Based on https://github.com/salesforce/WikiSQL/blob/c2ed4f9b22db1cc2721805d53e6e76e07e2ccbdc/lib/query.py#L10"""
rep = "SELECT {agg} {sel} FROM table".format(
agg=_AGG_OPS[agg], sel=columns[sel] if columns is not None else "col{}".format(sel)
)
if conditions:
rep += " WHERE " + " AND ".join(["{} {} {}".format(columns[i], _COND_OPS[o], v) for i, o, v in conditions])
return " ".join(rep.split())
def _generate_examples(self, main_filepath, tables_filepath):
"""Yields examples."""
# Build dictionary to table_ids:tables
with open(tables_filepath) as f:
tables = [json.loads(line) for line in f]
id_to_tables = {x["id"]: x for x in tables}
with open(main_filepath) as f:
for idx, line in enumerate(f):
row = json.loads(line)
row["table"] = id_to_tables[row["table_id"]]
del row["table_id"]
# Handle missing data
row["table"]["page_title"] = row["table"].get("page_title", "")
row["table"]["section_title"] = row["table"].get("section_title", "")
row["table"]["caption"] = row["table"].get("caption", "")
row["table"]["name"] = row["table"].get("name", "")
row["table"]["page_id"] = str(row["table"].get("page_id", ""))
# Fix row types
row["table"]["rows"] = [[str(e) for e in r] for r in row["table"]["rows"]]
# Get human-readable version
row["sql"]["human_readable"] = self._convert_to_human_readable(
row["sql"]["sel"], row["sql"]["agg"], row["table"]["header"], row["sql"]["conds"],
)
# Restructure sql->conds
# - wikiSQL provides a tuple [column_index, operator_index, condition]
# as 'condition' can have 2 types (float or str) we convert to dict
for i in range(len(row["sql"]["conds"])):
row["sql"]["conds"][i] = {
"column_index": row["sql"]["conds"][i][0],
"operator_index": row["sql"]["conds"][i][1],
"condition": str(row["sql"]["conds"][i][2]),
}
yield idx, row
|
StarcoderdataPython
|
6622892
|
<reponame>adborden/WeVoteBase
# import_export_open_civic_data/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
# https://github.com/opencivicdata/python-opencivicdata-django
# There are models for the ocd data types
# Other Open Civic Data identifiers that refer to the same division -- for example, those that refer to other
# political divisions whose boundaries are defined to be coterminous with this one.
# For example, ocd-division/country:us/state:wy will include an alias of ocd-division/country:us/state:wy/cd:1,
# since Wyoming has only one Congressional district.
#
# Division Identifiers here:
# Master CSV files with ocd-division-ids
# https://github.com/opencivicdata/ocd-division-ids/tree/master/identifiers
# https://raw.githubusercontent.com/opencivicdata/ocd-division-ids/master/identifiers/country-us.csv
# id,name,sameAs,sameAsNote,validThrough,census_geoid,census_geoid_12,census_geoid_14,openstates_district,placeholder_id,sch_dist_stateid,state_id
# ocd-division/country:us,United States,,,,,,,,,,
# ocd-division/country:us/court_of_appeals:1,United States Court of Appeals for 1st Circuit,,,,,,,,,,
# ocd-division/country:us/court_of_appeals:1/district_court:maine,United States District Court for District of Maine,,,,,,,,,,
# TODO create importer and table to cache this data
### Pulling out geographic divisions
# country / state /
# cd # congressional district, uses census_geoid ex/ ocd-division/country:us/state:ca/cd:12
# circuit_court
# county
# council_district
# school_district
# precinct
# parish
# council_district
# school_district
# precinct
# ward
# council_district
# school_district
# precinct
# place - uses census_geoid
# sldl # State legislature district, lower
# sldu # State legislature district, upper
# country / territory /
# municipio
# sldl # State legislature district, lower
# sldu # State legislature district, upper
|
StarcoderdataPython
|
8126920
|
<filename>trees-and-graphs/reconstruct-a-binary-tree-from-a-preorder-traversal-with-markers.py
# 9.13 in Elements of Programming Interviews in Python (Sep 15, 2016)
# Design an algorithm for reconstructing a binary tree from a preorder traversal
# visit sequence that uses null to mark empty children.
import unittest
# time complexity O(len(sequence))
# space complexity O(len(sequence))
class BinaryTreeNode():
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
def reconstruct_preorder(sequence):
def reconstruct_preorder_helper(sequence_iter):
node = next(sequence_iter)
if node == None:
return None
left_subtree = reconstruct_preorder_helper(sequence_iter)
right_subtree = reconstruct_preorder_helper(sequence_iter)
return BinaryTreeNode(node, left_subtree, right_subtree)
return reconstruct_preorder_helper(iter(sequence))
class Test(unittest.TestCase):
def test_reconstruct_preorder(self):
sequence = ['H', 'B', 'F', None, None, 'E', 'A', None, None, None, \
'C', None, 'D', None, 'G', 'I', None, None, None]
tree = reconstruct_preorder(sequence)
def preorder_w_markers(tree):
sequence = []
preorder_w_markers_helper(tree, sequence)
return sequence
def preorder_w_markers_helper(tree, sequence):
if tree:
sequence.append(tree.data)
if tree.left == None:
sequence.append(None)
else:
preorder_w_markers_helper(tree.left, sequence)
if tree.right == None:
sequence.append(None)
else:
preorder_w_markers_helper(tree.right, sequence)
sequence_2 = preorder_w_markers(tree)
self.assertEqual(sequence, sequence_2);
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
6703808
|
import datetime as dt
import pytest
from note_clerk import planning
@pytest.mark.parametrize(
"date, quarter",
[
(dt.datetime(2020, 1, 1), dt.datetime(2020, 1, 1)),
(dt.datetime(2020, 1, 2), dt.datetime(2020, 1, 1)),
(dt.datetime(2020, 4, 1), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 4, 2), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 5, 2), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 6, 2), dt.datetime(2020, 4, 1)),
(dt.datetime(2020, 7, 2), dt.datetime(2020, 7, 1)),
(dt.datetime(2020, 8, 2), dt.datetime(2020, 7, 1)),
(dt.datetime(2020, 9, 2), dt.datetime(2020, 7, 1)),
(dt.datetime(2020, 10, 2), dt.datetime(2020, 10, 1)),
(dt.datetime(2020, 11, 2), dt.datetime(2020, 10, 1)),
(dt.datetime(2020, 12, 2), dt.datetime(2020, 10, 1)),
],
)
def test_quarter_start(date: dt.datetime, quarter: dt.datetime) -> None:
adjusted = planning.quarter_start(date)
assert adjusted == quarter
def print_with_header(header: str, text: str) -> None:
line = "*" * (len(header) + 4)
print(f"{line}\n* {header} *\n{line}\n{text}")
|
StarcoderdataPython
|
8121914
|
<reponame>vepakom/SplitLearning_Inference
from algos.simba_algo import SimbaDefence
class SplitInference(SimbaDefence):
def __init__(self, config, utils) -> None:
super(SplitInference, self).__init__(utils)
self.initialize(config)
def initialize(self, config):
self.client_model = self.init_client_model(config)
self.put_on_gpus()
self.utils.register_model("client_model", self.client_model)
self.optim = self.init_optim(config, self.client_model)
def forward(self, items):
x = items["x"]
self.z = self.client_model(x)
# z will be detached to prevent any grad flow from the client
z = self.z.detach()
z.requires_grad = True
return z
def backward(self, items):
server_grads = items["server_grads"]
self.optim.zero_grad()
self.z.backward(server_grads)
self.optim.step()
|
StarcoderdataPython
|
8094550
|
<gh_stars>10-100
# from . import core # DONT import core. this project module should be relatively independent
from buildercore import utils, config
from kids.cache import cache
from . import files
import copy
import logging
from functools import reduce
LOG = logging.getLogger(__name__)
#
# project data utilities
#
def set_project_alt(pdata, env, altkey):
"non-destructive update of given project data with the specified alternative configuration."
assert env in ['vagrant', 'aws', 'gcp'], "'env' must be either 'vagrant' or 'aws'"
env_key = env + '-alt'
assert altkey in pdata[env_key], "project has no alternative config %r. Available: %s" % (altkey, list(pdata[env_key].keys()))
pdata_copy = copy.deepcopy(pdata) # don't modify the data given to us
pdata_copy[env] = pdata[env_key][altkey]
return pdata_copy
def find_project(project_location_triple):
"given a triple of (protocol, hostname, path) returns a map of {org => project data}"
plt = project_location_triple
assert utils.iterable(plt), "given triple must be a collection of three values"
assert len(project_location_triple) == 3, "triple must contain three values. got: %r" % project_location_triple
protocol, hostname, path = plt
fnmap = {
# 'file': OrgFileProjects,
'file': files.projects_from_file,
# 'ssh': RemoteBuilderProjects,
# 'https': RemoteBuilderProjects,
}
if not protocol in fnmap.keys():
LOG.info("unhandled protocol %r for %r" % (protocol, plt))
return {} # OrderedDict({})
return fnmap[protocol](path, hostname)
@cache
def _project_map(project_locations_list=None):
"""returns a single map of all projects and their data"""
def merge(orderedDict1, orderedDict2):
orderedDict1.update(orderedDict2)
return orderedDict1
project_locations_list = config.app()['project-locations']
# ll: {'dummy-project1': {'lax': {'aws': ..., 'vagrant': ..., 'salt': ...}, 'metrics': {...}},
# 'dummy-project2': {'example': {}}}
data = map(find_project, project_locations_list)
opm = reduce(merge, data)
# ll: [{'lax': {'aws': ..., 'vagrant': ..., 'salt': ...}, 'metrics': {...}}], {'example': {}}]
data = opm.values()
# ll: {'lax': {...}, 'metrics': {...}, 'example': {...}}
return reduce(merge, data)
def project_map(project_locations_list=None):
"""returns a deepcopy of the cached `_project_map` results.
`cfngen.build_context` is one of probably many functions that are modifying the project data, unintentionally modifying it for all subsequent accesses, including during tests.
this approach should be safer, avoid the speed problems with parsing the project files at the cost of a deepcopy."""
return utils.deepcopy(_project_map(project_locations_list))
def project_list():
"returns a single list of projects, ignoring organization and project data"
return list(project_map().keys())
def project_data(pname):
"returns the data for a single project."
data = project_map()
try:
return data[pname]
except KeyError:
raise ValueError("unknown project %r, known projects %r" % (pname, list(data.keys())))
#
#
#
def filtered_projects(filterfn, *args, **kwargs):
"returns a dict of projects filtered by given filterfn)"
return utils.dictfilter(filterfn, project_map(*args, **kwargs))
def branch_deployable_projects(*args, **kwargs):
"returns a pair of (defaults, dict of projects with a repo)"
return filtered_projects(lambda pname, pdata: 'repo' in pdata, *args, **kwargs)
def projects_with_formulas(*args, **kwargs):
return filtered_projects(lambda pname, pdata: pdata.get('formula-repo'), *args, **kwargs)
def aws_projects(*args, **kwargs):
return filtered_projects(lambda pname, pdata: 'aws' in pdata, *args, **kwargs)
def ec2_projects(*args, **kwargs):
return filtered_projects(lambda pname, pdata: pdata.get('aws', {}).get('ec2'), *args, **kwargs)
#
#
#
def project_formulas():
def fn(pname, pdata):
return [pdata.get('formula-repo')] + pdata.get('formula-dependencies', [])
return utils.dictmap(fn, project_map())
#
#
#
def known_formulas():
"a simple list of all known project formulas"
return utils.lfilter(None, utils.unique(utils.shallow_flatten(project_formulas().values())))
|
StarcoderdataPython
|
4851098
|
#python
import pyodbc #for mssql connection with python
#connect db
conn = pyodbc.connect('Driver={SQL Server};'
'Server=USBLRVDEEPAK1;'
'Database=dbTemp;'
'Trusted_Connection=yes;')
#get table names
def list_of_tables(conn,dbName):
cursor = conn.cursor()
statement = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_CATALOG='" + dbName +"'"
tables = cursor.execute(statement)
return [table[0] for table in tables]
#pipe seperated
def preprocess(values,flag=False):
st = ''
for val in values:
if flag:
st += str(val[0])+'|'
else:
st += str(val)+'|'
return st[:-1]
#read the rows
def read(conn,table,folderName):
cursor = conn.cursor()
statement = "select * from "+table
rows = cursor.execute(statement)
filename = folderName+'/'+table+'.txt'
file = open(filename,'w')
file.write(preprocess(cursor.description,True)+'\n')
for row in rows:
file.write(preprocess(row)+'\n')
file.close()
#generate for all TABLES
def generateForAll(conn,dbName,folderName):
tables = list_of_tables(conn,dbName)
#construct
for table in tables:
read(conn,table,folderName)
#generate for one
def generateForParticular(conn,TableName):
read(conn,TableName,folderName)
#create a folder named tables
generateForAll(conn,'dbTemp','tables')
'''
T = no of tables
R = no of rows of each table
Time complexity : O(TxR)
Space complexity : O(T)
'''
|
StarcoderdataPython
|
9605441
|
<reponame>LIIR-KULeuven/CLDR_CLNER_models<filename>CLDR/final_run/main.py<gh_stars>10-100
import json
import sys
import random
import os
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import configparser
import math
from utils_ import *
from loss import *
from train import *
parser = configparser.ConfigParser()
parser.read("./../../configs/train_RE_final_run.conf")
CHARACTER_BERT_PATH = parser.get("config", "characterBERT_path")
sys.path.append(CHARACTER_BERT_PATH)
from utils.character_cnn import CharacterIndexer
from modeling.character_bert import CharacterBertModel
# Read the given arguments
SPLIT_NUM = parser.get("config", "split_num")
EPOCHS = int(parser.get("config", "epochs"))
BATCH_SIZE = int(parser.get("config", "batch_size"))
NEG_SAMPLES = int(parser.get("config", "neg_samples"))
PATH_OUT = parser.get("config", "path_out")
PATH_OUT = PATH_OUT + 'split_' + str(SPLIT_NUM) + '/'
# Create the output directory if it doesn't exist.
if not os.path.exists(PATH_OUT):
os.makedirs(PATH_OUT)
# For the dataloader
ADJ_WEIGHT = float(parser.get("config", "adj_weight"))
PATH_IN_GENERAL = parser.get("config", "path_in_general")
PATH_IN_GRAPH = parser.get("config", "path_in_graph")
class GraphConvolution(nn.Module):
# GCN layer based on https://arxiv.org/abs/1609.02907
def __init__(self, in_features, out_features, bias=True, init='xavier'):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
# Initialize a matrix for the weights
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
# If a bias vector will be included then initialize it.
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
# Initialization of the weights.
if init == 'uniform':
print('Uniform Initialization')
self.reset_parameters_uniform()
elif init == 'xavier':
print('Xavier Initialization')
self.reset_parameters_xavier()
elif init == 'kaiming':
print('Kaiming Initialization')
self.reset_parameters_kaiming()
else:
raise NotImplementedError
def reset_parameters_uniform(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def reset_parameters_xavier(self):
# Implementation of Xavier Uniform
nn.init.xavier_normal_(self.weight.data, gain=0.02)
if self.bias is not None:
nn.init.constant_(self.bias.data, 0.0)
def reset_parameters_kaiming(self):
nn.init.kaiming_normal_(self.weight.data, a=0, mode='fan_in')
if self.bias is not None:
nn.init.constant_(self.bias.data, 0.0)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' ->' + str(self.out_features) + ')'
class Model_RE(nn.Module):
def __init__(self, nfeat, nhid1, device, init_gc_weights, characterBERT_path):
super(Model_RE, self).__init__()
self.device = device
# Graph part
self.gc1 = GraphConvolution(nfeat, nhid1, init=init_gc_weights)
self.activation_function = nn.ReLU()
# Text encoder part
self.characterBERT_path = characterBERT_path + 'pretrained-models/medical_character_bert/'
self.characterBERT = CharacterBertModel.from_pretrained(self.characterBERT_path)
# Freeze the first 6 encoding layers and the initial embedding layer
modules = [self.characterBERT.embeddings, *self.characterBERT.encoder.layer[:6]]
for module in modules:
for param in module.parameters():
param.requires_grad = False
def graph_forward(self, x, adj):
x = x.to(self.device)
adj = adj.to(self.device)
x = self.gc1(x, adj)
x = self.activation_function(x)
x_conc = torch.flatten(x)
x_conc_un = x_conc.unsqueeze(0)
return x_conc_un
def forward(self, x, adj, sent_id, mask, indexes_of_pairs):
'''
x: the feature matrix for the GCN part for the different graphs
adj: the normalized adjacency matrix
sent_id: the encoded sentence (containing [CLS], [SEP] and [PAD] tokens)
mask: the masking of the sentence (indication of true or padded token)
'''
# Perform the forward pass on the GCN part
graph_out = []
for r_id in x:
tmp_r = []
for g in r_id:
tmp_r.append(self.graph_forward(g, adj))
# Create one tensor for the output of the GCN layer.
# It has the concatenated output of each graph.
tmp_r_tensor = torch.cat(tmp_r, 0)
graph_out.append(tmp_r_tensor)
# Perform the forward pass on the Text Encoder part
# - 1: a tensor with the embeddings of the final layer for each token
# - 2: a tensor with the average embeddings (all tokens considered) of the final layer
output = self.characterBERT(sent_id, attention_mask=mask)
# Isolate the representations which are related to the relations in the text.
sent_out = []
for r_id in indexes_of_pairs:
tmp_r_sent = []
for pair in r_id:
selected_tokens = output[0][0][pair]
# Create one representation for each relation using concatenation.
relation_representation = torch.flatten(selected_tokens)
relation_representation = self.activation_function(relation_representation)
relation_representation_un = relation_representation.unsqueeze(0)
tmp_r_sent.append(relation_representation_un)
# Create one tensor for the selection of the tokens.
# It has the concatenated "relation representations" based on encoder output tokens.
# The first representation is the correct one and the rest are wrong (negative).
tmp_r_sent_tensor = torch.cat(tmp_r_sent, 0)
sent_out.append(tmp_r_sent_tensor)
return graph_out, sent_out, output[0]
def prepare_dataloaders():
# Read the file with the CV splits
with open('../../cv_splits.json') as json_file:
cv_splits = json.load(json_file)
# Find the path files for training
train_files = []
for f in cv_splits['split_' + SPLIT_NUM]['train set']:
#train_files.append('../../../Iteration_3/data/files/' + f + '.json')
train_files.append(f + '.json')
# Define the data loaders
dataset_train = ADE_Dataset(filenames=train_files,
path_in_general=PATH_IN_GENERAL,
path_in_graphs=PATH_IN_GRAPH,
number_of_negative_adj=NEG_SAMPLES,
adj_weight=ADJ_WEIGHT)
# Dataloaders
dataloader_train = DataLoader(dataset_train,
batch_size=BATCH_SIZE,
shuffle=True,
collate_fn=my_collate)
return dataloader_train
if __name__ == "__main__":
# Define the running device by checking if a GPU is available
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Define the model
model = Model_RE(nfeat=768,
nhid1=768,
device=device,
init_gc_weights='kaiming',
characterBERT_path = CHARACTER_BERT_PATH)
model.to(device)
# Initialize the optimizer
optimizer = optim.Adam(model.parameters(), lr=0.00001, weight_decay=0.000001)
# Initialize the loss function
loss_InfoNCE = InfoNCE_loss_vectorized(temperature=0.1)
dataloader_train = prepare_dataloaders()
# Train the model
model_trained, losses_dict = train(model = model,
optimizer = optimizer,
loss_graph_text = loss_InfoNCE,
train_loader = dataloader_train,
epochs = EPOCHS,
checkpoint_path = PATH_OUT,
device = device)
# Save the losses
with open(PATH_OUT + 'losses.json', 'w') as fp:
json.dump(losses_dict, fp)
|
StarcoderdataPython
|
9782142
|
<reponame>forsvarir/python-refactoring-exercises
from enum import Enum
import random
import time
PAUSE_PERIOD = 0
EVEN_BONUS = 10
ODD_PENALTY = 5
MINIMUM_SCORE = 0
class Player:
def __init__(self, name, score = 0):
self.name = name
self.score = score
def add_dice(score1, score2):
return score1 + score2
def rolled_double(total, roller):
even_score = rolled_even(total)
double_score = even_score + roller()
print("Amazing, you rolled a double! Your new score is", double_score)
return double_score
def rolled_even(total):
print("You rolled an even total! You get an extra 10 points!")
new_total = total + EVEN_BONUS
print("Your new total is",new_total)
return new_total
def rolled_low_odd(total):
rolled_odd(total)
print("You cannot go below 0, your new total is 0")
return MINIMUM_SCORE
def rolled_odd(total):
print("You rolled an odd total, you lose 5 points")
new_total = total - ODD_PENALTY
print("Your new total is", new_total)
return new_total
def calculate_score(score1, score2, roller = lambda : random_roller):
dice_total = add_dice(score1, score2)
if(score1 == score2):
return rolled_double(dice_total, roller)
if (dice_total % 2 == 0):
return rolled_even(dice_total)
if (dice_total < ODD_PENALTY):
return rolled_low_odd(dice_total)
return rolled_odd(dice_total)
def pause():
time.sleep(PAUSE_PERIOD)
def random_roller():
return random.randint(1,6)
def play_round(name, roller = lambda : random_roller()):
dice_1 = roller()
dice_2 = roller()
print(name + " has rolled " + str(dice_1) + " and " + str(dice_2))
pause()
print(name + " has rolled a total of " + str(add_dice(dice_1, dice_2)))
return calculate_score(dice_1, dice_2, roller)
def is_valid_name(names, name):
return name in names
def is_invalid_name(names, name):
return not is_valid_name(names, name)
def read_all_lines_from_file(filename):
with open(filename, 'r') as source_file:
return source_file.readlines()
def strip_newlines(items):
return list(map(lambda s: s.rstrip('\n'), items))
def update_scores_file(players):
with open("scores.txt", "a") as source_file:
for player in players:
source_file.write(player.name + ", " + str(player.score) + "\n")
def evaluate_winner(player1, player2, roller = lambda: random_roller):
winner = ""
print(player1.name, "has a score of", player1.score, "and", player2.name, "has a score of", player2.score)
if player1.score > player2.score:
return player1
if player1.score < player2.score:
return player2
while True:
print("It's a draw!")
p1Roll = roller()
p2Roll = roller()
print(player1.name, "rolled", p1Roll, "and", player2.name, "rolled", p2Roll)
if p1Roll > p2Roll:
return player1
if p1Roll < p2Roll:
return player2
def ask_for_name(name):
return input(name + ", what is your name?: ")
def play_game(whitelist):
players = [Player(ask_for_name("Player 1")),Player(ask_for_name("Player 2"))]
if any(is_invalid_name(whitelist, player.name) for player in players):
print("Incorrect names")
exit()
for round_number in range(1, 7):
print("Round",round_number)
pause()
for player in players:
player.score += play_round(player.name)
pause()
winner = evaluate_winner(players[0], players[1])
print(winner.name + " has won!")
update_scores_file(players)
if __name__ == "__main__":
play_game(strip_newlines(read_all_lines_from_file("whitelist.txt")))
|
StarcoderdataPython
|
12843633
|
import ipaddress
# =============================================== DEFAULT CONFIGURATION ================================================
# Default port to bind the translator's unicast server socket to.
DEFAULT_UNICAST_SRV_PORT = 9001
# Default address space to pick multicast destination addresses (groups) from for the translated unicast streams.
DEFAULT_MULTICAST_ADDR_SPACE = ipaddress.IPv4Network('172.16.58.3/8')
# Default port to use when forwarding payload received on the translator's unicast server socket as multicast.
DEFAULT_MULTICAST_PORT = 9002
# URL to use when submitting stream information to the Multicast Menu
MULTICASTMENU_ADD_URL = 'https://multicastmenu.herokuapp.com/add/'
# Email address to use when submitting stream information to the Multicast Menu. Lenny has OK'ed using his email address
# until we have a group email.
MULTICASTMENU_EMAIL = '<EMAIL>'
# Number of worker threads dedicated to submitting stream information to the Multicast Menu.
MULTICASTMENU_THREADS = 10
# ======================================================================================================================
|
StarcoderdataPython
|
6470148
|
<reponame>m2u/m2u
"""Commands for scene events tracking in maya.
Scene tracking observes if files are opened, closed or Maya exits.
That may be important for deleting and recreating callbacks automatically.
When Maya exits, we will try to disconnect from the Editor and save
settings.
"""
import logging
import pymel.api as mapi
import m2u
_lg = logging.getLogger(__name__)
def create_maya_exit_tracker():
mapi.MSceneMessage.addCallback(mapi.MSceneMessage.kMayaExiting,
_on_maya_exiting)
def _on_maya_exiting(data):
m2u.core.editor.disconnect()
m2u.core.settings.save_config()
# Register the exit-tracker when loading this module:
create_maya_exit_tracker()
|
StarcoderdataPython
|
3427992
|
<filename>muninn/muninn/settings/prod.py
from .base import *
DEBUG=False
|
StarcoderdataPython
|
1745685
|
#
# Copyright (C) 2007 <NAME>
# All rights reserved.
# For license terms see the file COPYING.txt.
#
from __future__ import print_function
import unittest, os, shutil, errno, sys, difflib, cgi, re
from roundup.admin import AdminTool
from . import db_test_base
from .test_mysql import skip_mysql
from .test_postgresql import skip_postgresql
class AdminTest(object):
backend = None
def setUp(self):
self.dirname = '_test_admin'
def tearDown(self):
try:
shutil.rmtree(self.dirname)
except OSError as error:
if error.errno not in (errno.ENOENT, errno.ESRCH): raise
def testInit(self):
import sys
self.admin=AdminTool()
sys.argv=['main', '-i', '_test_admin', 'install', 'classic', self.backend]
ret = self.admin.main()
print(ret)
self.assertTrue(ret == 0)
self.assertTrue(os.path.isfile(self.dirname + "/config.ini"))
self.assertTrue(os.path.isfile(self.dirname + "/schema.py"))
def testInitWithConfig_ini(self):
import sys
from roundup.configuration import CoreConfig
self.admin=AdminTool()
sys.argv=['main', '-i', '_test_admin', 'install', 'classic', self.backend]
# create a config_ini.ini file in classic template
templates=self.admin.listTemplates()
config_ini_content = "[mail]\n# comment\ndebug = SendMail.LOG\n"
config_ini_path = templates['classic']['path'] + '/config_ini.ini'
config_ini_file = open(config_ini_path, "w")
config_ini_file.write(config_ini_content)
config_ini_file.close()
try:
ret = self.admin.main()
finally:
try:
# ignore file not found
os.remove(config_ini_path)
except OSError as e: # FileNotFound exception under py3
if e.errno == 2:
pass
else:
raise
print(ret)
self.assertTrue(ret == 0)
self.assertTrue(os.path.isfile(self.dirname + "/config.ini"))
self.assertTrue(os.path.isfile(self.dirname + "/schema.py"))
config=CoreConfig(self.dirname)
self.assertEqual(config['MAIL_DEBUG'], self.dirname + "/SendMail.LOG")
class anydbmAdminTest(AdminTest, unittest.TestCase):
backend = 'anydbm'
@skip_mysql
class mysqlAdminTest(AdminTest, unittest.TestCase):
backend = 'mysql'
class sqliteAdminTest(AdminTest, unittest.TestCase):
backend = 'sqlite'
@skip_postgresql
class postgresqlAdminTest(AdminTest, unittest.TestCase):
backend = 'postgresql'
|
StarcoderdataPython
|
6557172
|
# Suppose you have a bunch of markdown files in a root directory called RD1,
# and you list all these file links in another file called F2, now
# you want to check whether all the files in RD1 are listed in F2.
import os
# Use this filter_list when you want to skip checking some files
# filter_list = ['<file-name1.xx>','<file-name2.xx>',...]
# Walks files in a given folder (e.g. RD1) and get all files' paths and filenames.
for root, dirs, files in os.walk("<absolute-path-of-RD1>", topdown=True):
for name in files:
if '.md' in name: # Check all markdown files
# if '.md' in name and name not in filter_list: # Check all .md files except those in filter_list
pin = 0 # Use a marker to differentiate file names
# Find if the name string is in F2
with open("<absolute-path-of-F2>",'r') as foo:
for line in foo.readlines():
if name in line:
pin = 1
if pin == 0:
# Print the names of all files that are not in F2
print(name, "not in F2")
|
StarcoderdataPython
|
3563457
|
import networkx as nx
from covariant_compositional_networks_tf2.CCN_Model import CCN_Model
import numpy as np
import tensorflow as tf
from ordered_set import OrderedSet
from covariant_compositional_networks_tf2.CCN_Model import CCN_Model
channels_in = 5
feature_vector_shape = [1]
k = 2
model = CCN_Model(optimizer= tf.keras.optimizers.Adam(lr = 0.005), loss=tf.losses.binary_crossentropy, nonlinearity=tf.nn.relu,
feature_vector_shape=feature_vector_shape, num_layers=1, k=k, batch_update_size=60, l1_reg=0.004, save_every=2,
channels_in=[channels_in, 40])
def randomNPGraph(n, p, diagonal=True, undirected=True):
adjM = np.random.binomial(1, p, (n, n))
if diagonal:
for i in range(len(adjM)):
adjM[i, i] = 1
if undirected:
xy = np.mgrid[0:n:1, 0:n:1].reshape(2, -1).T.reshape(n, n, 2)
adjM = np.where(xy[..., 1] > xy[..., 0], adjM, adjM.T)
return adjM
def randomGraphColoring(n, m, max_color=None):
if max_color is None:
max_color = m
coloring = np.zeros((n, max_color))
indices = list((np.arange(n), np.random.randint(m, size=n)))
coloring[tuple(indices)] = 1
return coloring
def checkGraphColoringError(adjM, coloring):
neighbours = [np.where(adjM[i] == 1) for i in range(len(adjM))]
errors = np.array(
[[np.sum(coloring[i] * coloring[j]) if i != j and j in neighbours[i][0] else 0 for j in range(len(adjM))] for i
in range(len(adjM))])
sum_of_errors = np.sum(errors) / 2
return sum_of_errors
def checkIfGraphConnected(adjM):
G = nx.from_numpy_matrix(adjM)
return nx.is_connected(G)
def generateGraphColoring(size, n_range, m_range, p_range):
m_max = m_range[1]-1
graphs = []
while True:
n = np.random.randint(n_range[0], n_range[1])
m = np.random.randint(m_range[0], m_range[1])
p = np.random.uniform(p_range[0], p_range[1])
# print("n: " + str(n) +", m: " +str(m)+ ", p: " + str(p))
NPGraph = randomNPGraph(n, p)
connected = checkIfGraphConnected(NPGraph)
if not connected:
continue
coloring = randomGraphColoring(n, m, max_color=m_max)
coloringError = checkGraphColoringError(NPGraph, coloring)
coloringError = 0.0 if coloringError ==0 else 1
parts = [OrderedSet([i]) for i in range(len(NPGraph))]
graph = [NPGraph, coloring, coloringError, parts]
graphs.append(graph)
if len(graphs) >= size:
break
return graphs
# n = 7 # nodes
# m = 4 # colors
# p = 0.4 # edge probability
# NPGraph = randomNPGraph(n, p)
# coloring = randomGraphColoring(n, m, max_color=None)
#
# connected = checkIfGraphConnected(NPGraph)
# coloringError = checkGraphColoringError(NPGraph, coloring)
# print(coloring)
# print(connected)
# print(coloringError)
data_size = 600
graphs = list(zip(*generateGraphColoring(data_size, (3, 7), (channels_in, channels_in+1), (0.2, 0.5))))
graphsValid = list(zip(*generateGraphColoring(250, (3, 7), (channels_in, channels_in+1), (0.2, 0.5))))
Xval, Yval = model.createTensors(graphsValid[1], graphsValid[2])
model.add_valid(Xval, Yval, graphsValid[0], graphsValid[3])
#print(graphs)
uq=np.unique(graphs[2], return_counts=True)
print(np.unique(graphs[2], return_counts=True))
classW = data_size/(2 * uq[1])
classWdict = {clazz:weight for clazz, weight in zip(uq[0], classW)}
model.class_weights = classWdict
print(classWdict)
adjM = graphs[0]
X = graphs[1]
Y = graphs[2]
parts = graphs[3]
X,Y = model.createTensors(X,Y)
model.fit(X, Y, adjM, parts, 1000)
|
StarcoderdataPython
|
3309557
|
<gh_stars>1-10
# Author:柠檬班-木森
# E-mail:<EMAIL>
import json
import os
import unittest
import time
import copy
from jinja2 import Environment, FileSystemLoader
from concurrent.futures.thread import ThreadPoolExecutor
from apin.core.initEvn import log
from apin.core.testResult import ReRunResult
from apin.core.resultPush import DingTalk, WeiXin, SendEmail
class TestRunner():
"""unittest运行程序"""
def __init__(self, suite: unittest.TestSuite,
filename="reports.html",
report_dir=".",
title='测试报告',
tester='测试员',
desc="XX项目测试生成的报告",
templates=1,
no_report=False
):
"""
初始化用例运行程序
:param suites: 测试套件
:param filename: 报告文件名
:param report_dir:报告文件的路径
:param title:测试套件标题
:param templates: 可以通过参数值1或者2,指定报告的样式模板,目前只有两个模板
:param tester:测试者
:param no_report:不生成测试报告,以json数据格式返回测试结果,默认生成,设置True则不生成报告
"""
if not isinstance(suite, unittest.TestSuite):
raise TypeError("suites 不是测试套件")
if not isinstance(filename, str):
raise TypeError("filename is not str")
if not filename.endswith(".html"):
filename = filename + ".html"
self.suite = suite
self.filename = filename
self.title = title
self.tester = tester
self.desc = desc
self.templates = templates
self.report_dir = report_dir
self.result = []
self.starttime = time.time()
self.no_report = no_report
def __classification_suite(self):
"""
将测试套件中的用例,根据用例类位单位,拆分成多个测试套件,打包成列表类型
:return: list-->[suite,suite,suite.....]
"""
suites_list = []
def wrapper(suite):
for item in suite:
if isinstance(item, unittest.TestCase):
suites_list.append(suite)
break
else:
wrapper(item)
wrapper(copy.deepcopy(self.suite))
return suites_list
def __get_reports(self,thread_count):
"""
生成报告,返回测试汇中的结果
:return: 包含测试结果的字典
"""
# 汇总测试结果
test_result = {
"success": 0,
"all": 0,
"fail": 0,
"skip": 0,
"error": 0,
"results": [],
"testClass": [],
}
# 整合测试结果
for res in self.result:
for item in test_result:
test_result[item] += res.fields[item]
test_result['thread_count'] =thread_count
test_result['runtime'] = '{:.2f} S'.format(time.time() - self.starttime)
test_result["begin_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.starttime))
test_result["title"] = self.title
test_result["tester"] = self.tester
test_result['desc'] = self.desc
if test_result['all'] != 0:
test_result['pass_rate'] = '{:.2f}'.format(test_result['success'] / test_result['all'] * 100)
else:
test_result['pass_rate'] = 0
log.info("用例运行完毕,结果如下:\n共运行:{}条 "
"\n通过:{}条"
"\n失败:{}条"
"\n错误:{}条"
"\n运行时间:{}".format(
test_result['all'], test_result['success'], test_result['fail'], test_result['error'],
test_result['runtime']
))
self.test_result = test_result
# 判断是否要生产测试报告
if os.path.isdir(self.report_dir):
pass
else:
os.mkdir(self.report_dir)
if self.no_report:
return self.__get_results(test_result)
log.info("正在生成测试报告中......")
# 获取历史执行数据
test_result['history'] = self.__handle_history_data(test_result)
# 获取报告模板
template_path = os.path.join(os.path.dirname(__file__), '../templates/reports')
env = Environment(loader=FileSystemLoader(template_path))
if self.templates == 2:
template = env.get_template('templates2.html')
elif self.templates == 3:
template = env.get_template('templates3.html')
else:
template = env.get_template('templates1.html')
file_path = os.path.join(self.report_dir, self.filename)
# 渲染报告模板
res = template.render(test_result)
# 输出报告到文件
with open(file_path, 'wb') as f:
f.write(res.encode('utf8'))
log.info("测试报告已经生成,报告路径为:{}".format(file_path))
self.email_conent = {"file": os.path.abspath(file_path),
"content": env.get_template('templates03.html').render(test_result)
}
return {'success': test_result['success'],
'all': test_result['all'],
'fail': test_result['fail'],
'skip': test_result['skip'],
'error': test_result['error'],
'runtime': test_result['runtime'],
'begin_time': test_result['begin_time'],
'tester': test_result['tester'],
'pass_rate': test_result['pass_rate'],
'report': file_path,
"thread_count":thread_count
}
def __handle_history_data(self, test_result):
"""
处理历史数据
:return:
"""
try:
with open(os.path.join(self.report_dir, 'history.json'), 'r', encoding='utf-8') as f:
history = json.load(f)
except FileNotFoundError as e:
history = []
history.append({'success': test_result['success'],
'all': test_result['all'],
'fail': test_result['fail'],
'skip': test_result['skip'],
'error': test_result['error'],
'runtime': test_result['runtime'],
'begin_time': test_result['begin_time'],
'pass_rate': test_result['pass_rate'],
})
with open(os.path.join(self.report_dir, 'history.json'), 'w', encoding='utf-8') as f:
json.dump(history, f, ensure_ascii=True)
return history
def __get_notice_content(self):
"""获取通知的内容"""
template_path = os.path.join(os.path.dirname(__file__), '../templates/reports')
env = Environment(loader=FileSystemLoader(template_path))
res_text = env.get_template('dingtalk.md').render(self.test_result)
return res_text
def __get_results(self, test_result):
"""返回测试结果"""
results = []
for case in test_result.get('results'):
results.append({k: v for k, v in case.__dict__.items() if not k.startswith('_')})
test_result['results'] = results
return test_result
def run(self, thread_count=1, rerun=0, interval=2):
"""
支持多线程执行
注意点:如果多个测试类共用某一个全局变量,由于资源竞争可能会出现错误
:param thread_count:线程数量,默认位1
:return:测试运行结果
"""
# 将测试套件按照用例类进行拆分
suites = self.__classification_suite()
with ThreadPoolExecutor(max_workers=thread_count) as ts:
for i in suites:
res = ReRunResult(count=rerun, interval=interval)
self.result.append(res)
ts.submit(i.run, result=res).add_done_callback(res.stopTestRun)
ts.shutdown(wait=True)
result = self.__get_reports(thread_count)
return result
def send_email(self, host, port, user, password, to_addrs, is_file=True):
"""
发生报告为附件到邮箱
:param host: str类型,(smtp服务器地址)
:param port: int类型,(smtp服务器地址端口)
:param user: str类型,(邮箱账号)
:param password: str类型(邮箱密码)
:param to_addrs: str(单个收件人) or list(多个收件人)收件人列表,
:return:
"""
sm = SendEmail(host=host, port=port, user=user, password=password)
if is_file:
filename = self.email_conent["file"]
else:
filename = None
content = self.email_conent["content"]
sm.send_email(subject=self.title, content=content, filename=filename, to_addrs=to_addrs)
def get_except_info(self):
"""
获取错误用例和失败用例的报错信息
:return:
"""
except_info = []
num = 0
for i in self.result:
for texts in i.failures:
t, content = texts
num += 1
except_info.append("*{}、用例【{}】执行失败*,\n失败信息如下:".format(num, t._testMethodDoc))
except_info.append(content)
for texts in i.errors:
num += 1
t, content = texts
except_info.append("*{}、用例【{}】执行错误*,\n错误信息如下:".format(num, t._testMethodDoc))
except_info.append(content)
except_str = "\n".join(except_info)
return except_str
def dingtalk_notice(self, url, key=None, secret=None, atMobiles=None, isatall=False, except_info=False):
"""
钉钉通知
:param url: 钉钉机器人的Webhook地址
:param key: (非必传:str类型)如果钉钉机器人安全设置了关键字,则需要传入对应的关键字
:param secret:(非必传:str类型)如果钉钉机器人安全设置了签名,则需要传入对应的密钥
:param atMobiles: (非必传,list类型)发送通知钉钉中要@人的手机号列表,如:[137xxx,188xxx]
:param isatall: 是否@所有人,默认为False,设为True则会@所有人
:param except_info:是否发送未通过用例的详细信息,默认为False,设为True则会发送失败用例的详细信息
:return: 发送成功返回 {"errcode":0,"errmsg":"ok"} 发送失败返回 {"errcode":错误码,"errmsg":"失败原因"}
"""
if not url:
raise ValueError("url(钉钉群机器人的Webhook地址)不能为空")
res_text = self.__get_notice_content()
if except_info:
res_text += '\n ### 未通过用例详情:\n'
res_text += self.get_except_info()
data = {
"msgtype": "markdown",
"markdown": {
"title": '{}({})'.format(self.title, key),
"text": res_text
},
"at": {
"atMobiles": atMobiles,
"isAtAll": isatall
}
}
ding = DingTalk(url=url, data=data, secret=secret)
response = ding.send_info()
return response.json()
def weixin_notice(self, chatid, access_token=None, corpid=None, corpsecret=None):
"""
测试结果推送到企业微信群,【access_token】和【corpid,corpsecret】至少要传一种
可以传入access_token ,也可以传入(corpid,corpsecret)来代替access_token
:param chatid: 企业微信群ID
:param access_token: 调用企业微信API接口的凭证
:param corpid: 企业ID
:param corpsecret:应用的凭证密钥
:return:
"""
# 获取通知结果
res_text = self.__get_notice_content()
data = {
"chatid": chatid,
"msgtype": "markdown",
"markdown": {
"content": res_text
}
}
wx = WeiXin(access_token=access_token, corpid=corpid, corpsecret=corpsecret)
response = wx.send_info(data=data)
return response
|
StarcoderdataPython
|
5190607
|
<reponame>DryptoBZX/contractsV2
#!/usr/bin/python3
import pytest
from brownie import Contract, network
from helpers import setupLoanPool
def test_getTokens(Constants, bzx, accounts, TokenRegistry):
setupLoanPool(Constants, bzx, accounts[1], accounts[2])
setupLoanPool(Constants, bzx, accounts[3], accounts[4])
setupLoanPool(Constants, bzx, accounts[3], accounts[5]) # this will overrider asset account[4]
trproxy = accounts[0].deploy(TokenRegistry, bzx.address)
tr = Contract.from_abi("tr", address=trproxy.address, abi=TokenRegistry.abi, owner=accounts[0])
print("accounts", accounts)
print("loanPoolToUnderlying", bzx.loanPoolToUnderlying(accounts[1]))
tokenList = tr.getTokens(0, 10)
print(tokenList)
assert(tokenList[0][0] == accounts[1])
assert(tokenList[0][1] == accounts[2])
assert(tokenList[1][0] == accounts[3])
assert(tokenList[1][1] == accounts[5])
assert (len(tokenList) == 2)
|
StarcoderdataPython
|
5049209
|
from asg.intermediate_lang import *
from asg.entities import *
from lark import Lark, Transformer
import os
from asg.grammar import *
class SExpressionTransformer(Transformer):
"""
Transform the parsed tree to an SExpressionList object. Also handles strings and literals.
"""
def string(self, string):
if len(string) > 0:
return string[0].value
else:
return ""
def literal(self, literal):
return Literal(literal[0].value)
def number(self, num):
if "." in num[0].value:
return float(num[0].value)
else:
return int(num[0].value)
def list(self, l):
return Atom(l[0], l[1:])
def symbol_to_il(symbol):
name = ""
pin_locations = {}
inputs = []
outputs = []
properties = []
ul_corner = Point(0, 0)
lr_corner = Point(0, 0)
for prop in symbol.children:
if type(prop) != Atom:
continue
if prop.name == "property":
property_key = prop.children[0]
property_value = prop.children[1]
property_id = -1
property_location = [0, 0, 0]
property_effects = []
for c in prop.children:
if type(c) != Atom:
continue
if c.name == "id":
property_id = c.children[0]
if c.name == "at":
property_location = c.children
if c.name == "effects":
property_effects = c.children
properties.append(
LibraryProperty(
property_key,
property_value,
property_id,
property_location,
property_effects,
)
)
if "Value" in prop.children:
name = prop.children[1]
if prop.name == "symbol":
for element in prop.children:
if type(element) != Atom:
continue
if element.name == "pin":
pin_name = -1
pin_location = Point(0, 0)
pin_type = element.children[0]
for attr in element.children:
if type(attr) != Atom:
continue
if attr.name == "at":
pin_location = Point(attr.children[0], -attr.children[1])
if attr.name == "name":
pin_name = attr.children[0]
if pin_type == "input":
inputs.append(pin_name)
else:
outputs.append(pin_name)
if pin_location.x < ul_corner.x:
ul_corner.x = pin_location.x
if pin_location.y < ul_corner.y:
ul_corner.y = pin_location.y
if pin_location.x > lr_corner.x:
lr_corner.x = pin_location.x
if pin_location.y > lr_corner.y:
lr_corner.y = pin_location.y
pin_locations[pin_name] = pin_location
if name == "":
raise Exception(f"Invalid symbol {symbol.children[0]}")
bounding_box = BoundingBox(ul_corner, lr_corner)
bounding_box.expand(2, 0.1)
return LibrarySymbol(
name,
symbol.children[0],
pin_locations,
inputs,
outputs,
symbol,
properties,
bounding_box,
)
def s_expression_to_il(library_file):
"""
Convert an s-expression file to an internal representation
:param library_file: A file descriptor
:return:
"""
# Get paths relative to the location of this file, not the root of the module
script_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(script_dir, "s_expression.lark")) as grammar:
parser = Lark(
grammar.read() + "\n",
parser="lalr",
transformer=SExpressionTransformer(),
start="list",
)
parsed = parser.parse(library_file.read())
symbols = [child for child in parsed.children if child.name == "symbol"]
res = LibraryIL()
for symbol in symbols:
symbol_il = symbol_to_il(symbol)
res.symbols[symbol_il.name] = symbol_il
return res
|
StarcoderdataPython
|
368972
|
<filename>Dragon/python/dragon/vm/onnx/frontend.py
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# Codes are based on:
#
# <https://github.com/pytorch/pytorch/blob/master/caffe2/python/onnx/frontend.py>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from collections import defaultdict
from onnx import (checker, mapping, numpy_helper, GraphProto, OperatorSetIdProto)
from onnx.helper import make_tensor_value_info, make_model, printable_graph
from dragon.vm.onnx.helper import \
(extract_initializer, extract_leaf_tensors,
native_run_graph, fetch_initializer,)
from dragon.vm.onnx.nodes.factory import get_nodes_def
class DragonFrontend(object):
"""This class help dragon to convert
internal protocols to ONNX protocols.
"""
target_opset_version = 9
@staticmethod
def _extract_value_info(tensor):
return make_tensor_value_info(
name=tensor.name,
elem_type=tensor.data_type,
shape=tensor.dims)
@staticmethod
def _ssa_rewrite(op_def, shapes, ssa_names, ssa_outputs):
inputs, outputs = [], []
for e in op_def.input:
inputs.append(ssa_names[e] if e in ssa_names else e)
for e in op_def.output:
outputs.append(e + '/Version_{}'.format(
ssa_outputs[e]) if ssa_outputs[e] > 0 else e)
ssa_outputs[e] += 1
ssa_names[e] = outputs[-1]
shapes[outputs[-1]] = shapes[e][:]
op_def.ClearField('input')
op_def.ClearField('output')
op_def.input.extend(inputs)
op_def.output.extend(outputs)
return op_def, shapes, ssa_names, ssa_outputs
@classmethod
def graph_def_to_onnx_graph(
cls,
graph_def,
init_func=None,
constants=None,
value_info=None,
graph_name=None,
verbose=True,
enforce_no_running=False,
):
if value_info is None: value_info = {}
if not isinstance(value_info, dict):
raise ValueError(
'Please pass value_info as a '
'name -> (type, shape) dictionary')
leaf_tensors = extract_leaf_tensors(graph_def)
initializer = extract_initializer(graph_def)
# Check whether we have got type shape info of all input
missing = (leaf_tensors - set(value_info.keys()) - initializer)
if missing:
raise RuntimeError('Could not find value info of inputs: {}'.format(
', '.join(missing)))
# Check if value_info contains the types/shapes of all the blobs, in
# which case we don't need to infer them by running the net.
run_native_graph = False
for op in graph_def.op:
for name in itertools.chain(op.input, op.output):
if name not in value_info:
run_native_graph = True
break
ws = None
# Get the value info of outputs and initializer
if run_native_graph and not enforce_no_running:
inputs = {}
for name, (elem_type, shape) in value_info.items():
inputs[name] = np.random.randn(*shape).astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[elem_type])
ws, outputs, initializer = native_run_graph(
graph_def, inputs, initializer, init_func)
if enforce_no_running:
# In some cases(e.g. PyTorch), we had ran the graph
# outputs had been in ``value_info`` already
import dragon.core.workspace as ws
initializer = fetch_initializer(initializer)
# Prepare to make the graph
onnx_graph = GraphProto()
onnx_graph.name = graph_name if graph_name else graph_def.name
# Initializer should also be included in the inputs
value_info.update({
init.name: (init.data_type, init.dims)
for init in initializer})
# Add initializer
onnx_graph.initializer.extend(initializer)
# Add inputs
onnx_graph.input.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in leaf_tensors)
# Add outputs
onnx_graph.output.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in set(graph_def.output))
# Add constants
if constants is not None:
for k, v in constants.items():
onnx_graph.initializer.extend(
[numpy_helper.from_array(v, name=k)])
# Add nodes
shapes, ssa_names, ssa_outputs = {}, {}, defaultdict(int)
for op in graph_def.op:
# Get the shape of inputs and outputs
for name in itertools.chain(op.input, op.output):
if ws and ws.HasTensor(name):
blob = ws.FetchTensor(name)
if hasattr(blob, 'shape'):
shapes[name] = blob.shape
else:
shapes[name] = value_info[name][1]
# SSA rewritten
op, shapes, ssa_names, ssa_outputs = \
cls._ssa_rewrite(op, shapes, ssa_names, ssa_outputs)
# Try to translate op => nodes
nodes, const_tensors = get_nodes_def(op, shapes, ws)
# Directly convert outputs as const tensors if necessary
if None in nodes:
const_tensors = [
numpy_helper.from_array(
ws.FetchTensor(name), name=name)
for name in op.output]
else:
onnx_graph.node.extend(nodes)
# Add const tensors
if const_tensors is not None:
onnx_graph.initializer.extend(const_tensors)
onnx_graph.input.extend([
cls._extract_value_info(tensor)
for tensor in const_tensors])
if verbose: print(printable_graph(onnx_graph))
return onnx_graph
@classmethod
def graph_def_to_onnx_model(
cls,
graph_def,
init_func=None,
constants=None,
value_info=None,
graph_name=None,
verbose=True,
enforce_no_running=False,
):
opset_id = OperatorSetIdProto()
opset_id.domain = '' # ONNX default domain
opset_id.version = cls.target_opset_version
model = make_model(
cls.graph_def_to_onnx_graph(
graph_def,
init_func,
constants,
value_info,
graph_name,
verbose,
enforce_no_running,
),
opset_imports=[opset_id], # current supported opset version
producer_name='onnx-dragon', # producer name
)
checker.check_model(model)
return model
graph_def_to_onnx_graph = DragonFrontend.graph_def_to_onnx_graph
graph_def_to_onnx_model = DragonFrontend.graph_def_to_onnx_model
|
StarcoderdataPython
|
8149229
|
import unittest
from mock import patch
class _Success(str):
@property
def failed(self):
return False
class TestVagrantVersion(unittest.TestCase):
def test_vagrant_version_1_3_0(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant version 1.3.0\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 3, 0))
def test_vagrant_version_1_3_1(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant v1.3.1\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 3, 1))
def test_vagrant_version_1_4_3(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant 1.4.3\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 4, 3))
def test_vagrant_version_1_5_0_dev(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant 1.5.0.dev\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 5, 0, 'dev'))
|
StarcoderdataPython
|
6554665
|
<gh_stars>1-10
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.post_request import PostRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.user.user_command import UserCommand
from pyopenproject.model import user as u
class Lock(UserCommand):
def __init__(self, connection, user):
super().__init__(connection)
self.user = user
def execute(self):
try:
json_obj = PostRequest(connection=self.connection,
context=f"{self.CONTEXT}/{self.user.id}/lock",
headers={"Content-Type": "application/hal+json"}).execute()
return u.User(json_obj)
except RequestError as re:
raise BusinessError(f"Error locking user by id: {self.user.id}") from re
|
StarcoderdataPython
|
11251249
|
import math, random, sys, numpy
import pygame
from pygame.locals import *
pygame.init()
# define display surface
ScrW = 1000
ScrH = 700
HW, HH = ScrW / 2, ScrH / 2
win = pygame.display.set_mode((ScrW, ScrH))
pygame.display.set_caption("BommerMan")
vol = 5
FPS = 10
clock = pygame.time.Clock()
# define some colors
black = (0, 0, 0, 255)
white = (255, 255, 255, 255)
CENTER_HANDLE = 4
class spritesheet:
def __init__(self, filename, cols, rows):
self.sheet = pygame.image.load(filename).convert_alpha()
self.cols = cols
self.rows = rows
self.totalCellCount = cols * rows
self.rect = self.sheet.get_rect()
w = self.cellWidth = self.rect.width // cols
h = self.cellHeight = self.rect.height // rows
hw, hh = self.cellCenter = (w // 2, h // 2)
self.cells = list([(i % cols * w, i // cols * h, w, h) for i in range(self.totalCellCount)])
self.handle = list([(0, 0), (-hw, 0), (-w, 0), (0, -hh), (-hw, -hh), (-w, -hh), (0, -h), (-hw, -h), (-w, -h),])
def draw(self, surface, cellIndex, x, y, handle = 0):
surface.blit(self.sheet, (x + self.handle[handle][0], y + self.handle[handle][1]), self.cells[cellIndex])
def spriteSize (self):
return self.cellWidth, self.cellHeight, self.totalCellCount , self.cols , self.rows
class movement:
def __init__ (self, spritesheet, intx = HW, inty = HH):
self.size = spritesheet.spriteSize()
self.x = intx
self.y = inty
self.index = numpy.arange(self.size[3]*self.size[4]).reshape(self.size[4], self.size[3])
self.i = 0
self.j = 0
def xyWalk (self, spritesheet, frames):
self.key = pygame.key.get_pressed()
if self.key [pygame.K_LEFT]:
self.i = 1
self.x -= vol
elif self.key [pygame.K_RIGHT]:
self.i = 3
self.x += vol
elif self.key [pygame.K_UP]:
self.i = 0
self.y -= vol
elif self.key [pygame.K_DOWN]:
self.i = 2
self.y += vol
else :
self.i = 2
self.j = 0
self.j += 1
if self.j == frames: self.j = 0
if self.x > ScrW - self.size[0]: self.x = ScrW - self.size[0]
if self.x < ScrW : self.x = ScrW
if self.y > ScrH - self.size[1]: self.y = ScrH - self.size[1]
if self.y < ScrH : self.x = ScrH
#if
if self.x < HW: circlePosX = self.x
# elif self.x > stageWidth - startScrollingPosX: circlePosX = self.x - self.size[0] + W
else:
circlePosX = startScrollingPosX
stagePosX += - vol
self.rel_x = stagePosX % 2000
spritesheet.draw(win, 0,self.rel_x - 2000,0)
#DS.blit(bg, (self.rel_x - bgWidth, 0))
if self.rel_x < W:
spritesheet.draw(win, 0,self.rel_x,0)
#DS.blit(bg, (self.rel_x, 0))
spritesheet.draw(win, self.index [self.i] [self.j] , self.x, self.y,0)
pygame.display.update()
# exit the program
def events():
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit(0)
def main():
#s = spritesheet(".\Movement.png", 12, 2)
b = spritesheet(".\Background.jpg",1,1)
s = spritesheet(".\FBI.png", 9, 4)
m = movement(s)
w = movement(b)
while True:
events()
m.xyWalk(s, 9)
clock.tick(FPS)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4980134
|
<filename>dbmanage/bkrs/script/backupscripts/mysqlbackup.py<gh_stars>1-10
#!/usr/bin/python
import sys
import string
import shutil
import getopt
import os
import os.path
import syslog
import errno
import logging
import tempfile
import datetime
import subprocess
import readline
import json
from operator import itemgetter
"""
-----------------------------------------------------------------------------
A script to backup mysql databases through the mysqldump utility.
Use the -h or the --help flag to get a listing of options.
Program: Mysql Database Backups
Author: <NAME>
Date: April 28, 2013
Revision: 1.0
Revision | Author | Comment
-----------------------------------------------------------------------------
20130428-1.0 <NAME> Initial creation of script.
-----------------------------------------------------------------------------
"""
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return raw_input(prompt)
finally:
readline.set_startup_hook()
def format_date(raw_date):
return "%s-%s-%s %s:%s:%s" % (raw_date[0:4], raw_date[4:6],
raw_date[6:8], raw_date[8:10], raw_date[10:12], raw_date[12:14])
class MysqlBackup:
def __init__(self, keep=90, databases=None, store=None, user="root",
password=<PASSWORD>, host=None):
self.host = host
self.keep = keep
self.databases = databases
self.store = store
self.user = user
self.password = password
self.host = host
def run_command(self, command=None, shell=False, ignore_errors=False,
ignore_codes=None, get_output=False, path="."):
p = subprocess.Popen([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=path)
out, err = p.communicate()
result = p.returncode
if result and not ignore_errors and (not ignore_codes or result in set(ignore_codes)):
raise BaseException(str(command) + " " + str(result))
def get_databases(self):
if self.databases != None:
return [s.strip() for s in self.databases.strip().split(",")]
list_cmd = "mysql -u" + self.user
if self.host != None:
list_cmd += " -h " + self.host
if self.password != None:
list_cmd += " -p" + self.password
list_cmd += " --silent -N -e 'show databases'"
databases = os.popen(list_cmd).readlines()
return [s.strip() for s in databases]
def restore(self):
dbbackup_path = self.store + os.sep
backups = sorted(os.listdir(dbbackup_path), reverse=True)
# show available options
k = 1
options = {}
prev_date = ""
databases = ""
filenames = ""
print "Available backups to restore:"
for i in range(len(backups)):
data = backups[i].split(".")
date = data[0]
if not prev_date:
prev_date = date
if (date != prev_date):
print "["+str(k)+"]", "(%s) %s" % (format_date(prev_date), databases)
options[k] = {
"date": prev_date,
"databases": databases,
"filenames": filenames
}
k += 1
prev_date = date
databases = ""
filenames = ""
databases += ("" if databases == "" else ",") + data[1]
filenames += ("" if filenames == "" else ",") + backups[i]
print "["+str(k)+"]", "(%s) %s" % (format_date(prev_date), databases)
options[k] = {
"date": prev_date,
"databases": databases,
"filenames": filenames
}
# get the selection
user_input = -1
max_option = len(options.keys())
while True:
user_input = int(raw_input("\nSelect backup: "))
if (user_input < 1) or (max_option < user_input):
print "Error: The value should be between 1 and", max_option
else:
break
# get the databases to restore
date = format_date(options[user_input]["date"])
filenames = options[user_input]["filenames"]
selected_databases = rlinput("Databases to restore: ", options[user_input]["databases"])
databases = ",".join(filter(lambda db: db in selected_databases, self.get_databases()))
if databases == "":
print "Error: The selected databases doesn't match any created databases."
sys.exit()
# ask for confirmation
print "The databases \"%s\" are going to be restored using the version dated \"%s\"" % (databases, date)
confirmation = rlinput("Continue? [Y/n] ", "Y")
if confirmation != "Y":
print "Aborted."
sys.exit()
# expand the filenames of the databases
databases = databases.split(",")
filenames = filter(lambda fln: reduce(lambda x,y: x or y,
map(lambda dbn: dbn in fln, databases)),
filenames.split(","))
# restore the databases
print
for filename in filenames:
db = filename.split(".")[1]
restore_cmd = "gunzip < " + dbbackup_path + filename + \
" | mysql -u " + self.user
if self.host != None:
restore_cmd += " -h " + "'" + self.host + "'"
if self.password != None:
restore_cmd += " -p" + self.password
restore_cmd += " " + db
print "Restoring \"" + db + "\"...",
sys.stdout.flush()
logging.info("Restore db, %s from %s." % (db, dbbackup_path + filename))
self.run_command(restore_cmd)
print "done"
print "Restore complete!"
def backup(self):
padding = len(str(self.keep))
backups = []
# remove files older than keep days
cutdate = datetime.datetime.now() - datetime.timedelta(days=self.keep)
for backup_file in os.listdir(self.store):
bparts = backup_file.split(".")
if bparts[0].isdigit():
dumpdate = datetime.datetime.strptime(bparts[0], "%Y%m%d%H%M%S")
if dumpdate < cutdate:
os.remove(os.path.join(self.store, backup_file))
# get the current date and timestamp and the zero backup name
tstamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
dbs = self.get_databases()
skip = ["information_schema", "performance_schema", "test"]
for db in dbs:
if db in skip:
continue
dbbackup_name = string.join([tstamp, db, "sql"], ".")
dbbackup_path = self.store + os.sep + dbbackup_name
dump_cmd = "mysqldump -u " + self.user
if self.host != None:
dump_cmd += " -h " + "'" + self.host + "'"
if self.password != None:
dump_cmd += " -p" + self.password
dump_cmd += " -e --opt -c " + db + " | gzip > " + dbbackup_path + ".gz"
logging.info("Dump db, %s to %s." % (db, dbbackup_path))
os.popen(dump_cmd)
"""
Prints out the usage for the command line.
"""
def usage():
usage = ["mysqlbackup.py [-hkdbups]\n"]
usage.append(" [-h | --help] prints this help and usage message\n")
usage.append(" [-k | --keep] number of days to keep backups before deleting\n")
usage.append(" [-d | --databases] a comma separated list of databases\n")
usage.append(" [-t | --store] directory locally to store the backups\n")
usage.append(" [-u | --user] the database user\n")
usage.append(" [-p | --password] the database password\n")
usage.append(" [-s | --host] the database server hostname\n")
usage.append(" [-o | --options] the json file to load the options from instead of using command line\n")
usage.append(" [-r | --restore] enables restore mode\n")
message = string.join(usage)
print message
"""
Main method that starts up the backup.
"""
def main(argv):
# set the default values
pid_file = tempfile.gettempdir() + os.sep + "mysqlbackup.pid"
keep = 90
databases = None
user = None
password = None
host = None
store = None
options = None
restore = False
try:
# process the command line options
st = "hn:k:d:t:u:p:s:o:r"
lt = ["help", "keep=", "databases=", "store=", "user=", "password=",
"host=", "options=", "restore"]
opts, args = getopt.getopt(argv, st, lt)
# if no arguments print usage
if len(argv) == 0:
usage()
sys.exit()
# detect if loading options from file and load the json
vals = {}
fopts = None
for opt, arg in opts:
vals[opt] = arg
if ("-o" in vals.keys()) or ("--options" in vals.keys()):
opt = "-o" if "-o" in vals.keys() else "--options"
with open(vals[opt], 'r') as content_file:
fopts = json.load(content_file)
# merge with opts
opts_keys = map(lambda val: val[0], opts)
if fopts:
for key in fopts.keys():
prefix = ""
if key in st.split(":"):
prefix = "-"
elif key in map(lambda t: t[:-1] if t[-1] == "=" else t, lt):
prefix = "--"
else:
continue
if prefix+key not in opts_keys:
opts.append((prefix+key, fopts[key]))
# loop through all of the command line options and set the appropriate
# values, overriding defaults
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-k", "--keep"):
keep = int(arg)
elif opt in ("-d", "--databases"):
databases = arg
elif opt in ("-t", "--store"):
store = arg
elif opt in ("-u", "--user"):
user = arg
elif opt in ("-p", "--password"):
password = arg
elif opt in ("-s", "--host"):
host = arg
elif opt in ("-r", "--restore"):
restore = True
except getopt.GetoptError, msg:
logging.warning(msg)
# if an error happens print the usage and exit with an error
usage()
sys.exit(errno.EIO)
# check options are set correctly
if user == None or store == None:
logging.warning("Backup store directory (-t) and user (-u) are required")
usage()
sys.exit(errno.EPERM)
# process backup, catch any errors, and perform cleanup
try:
# another backup can't already be running, if pid file doesn't exist, then
# create it
if os.path.exists(pid_file):
logging.warning("Backup running, %s pid exists, exiting." % pid_file)
sys.exit(errno.EBUSY)
else:
pid = str(os.getpid())
f = open(pid_file, "w")
f.write("%s\n" % pid)
f.close()
# create the backup object and call its backup method
mysql_backup = MysqlBackup(keep, databases, store, user, password, host)
if restore:
mysql_backup.restore()
else:
mysql_backup.backup()
except(Exception):
logging.exception("Mysql backups failed.")
finally:
os.remove(pid_file)
# if we are running the script from the command line, run the main function
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
8055810
|
from scipy import optimize
import numpy as np
from matplotlib import pyplot as plt
import scipy.integrate as integrate
def curve(x, t):
period = 2 * np.pi / x[1]
if isinstance(t, float):
t = np.array((t,))
y = np.ndarray((t.shape[0],))
for i in range(t.shape[0]):
if t[i] < (period / 4):
y[i] = x[0] * np.sin(t[i] * x[1])
elif t[i] > (period / 4) and t[i] < (1 - (period / 4)):
y[i] = x[0]
elif t[i] > (1 - (period / 4)) and t[i] < (1 + (period / 4)):
y[i] = x[0] * np.sin((t[i] - 1 - period / 2) * x[1])
elif t[i] > (1 + (period / 4)) and t[i] < (2 - (period / 4)):
y[i] = - x[0]
else:
y[i] = x[0] * np.sin((t[i] - 2) * x[1])
return y
def curve_integral(x, t):
integ = np.ndarray((t.shape[0]-1,))
for i in range(t.shape[0]-1):
integ[i] = integrate.quad(lambda t: curve(x, t), 0, t[i+1])[0]
return integ
def generate_up_and_down_bow_target(ns, bow_speed=10, bow_acceleration=0.5):
def objective_function(x, t):
y = curve(x, t)
integ = curve_integral(x,t)
moy = (integ[int(n_points/2)-2] + integ[int(n_points/2)+2])/2
return np.array((bow_acceleration-x[0], bow_speed-x[1], y[-1], y[0], (moy-0.48)*1000))
n_points = 200
t = np.linspace(0, 2, n_points)
x_opt = optimize.least_squares(lambda x: objective_function(x, t), x0=np.array((1, 8))) # x[0] = amplitude et x[1]= 2 pi/ period
return t, x_opt.x
if __name__ == "__main__":
t, x = generate_up_and_down_bow_target(0)
n_points = 200
t = np.linspace(0, 2, n_points)
plt.plot(t, curve(x, t))
plt.plot(t[:-1], curve_integral(x, t), color="red")
n_points = 50
t = np.linspace(0, 2, n_points)
plt.plot(t, curve(x, t), 'k.')
plt.plot(t[:-1], curve_integral(x, t), '.m')
plt.show()
|
StarcoderdataPython
|
1978002
|
<reponame>OneGneissGuy/detrend-ec
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 12:38:52 2018
script to read in conductivity data and correct for drift due to evaporation
@author: jsaracen
"""
import numpy as np
import pandas as pd
from scipy.signal import detrend
input_data_file = 'sc1000_data.csv'
#read the csv file into a pandas dataframe
data = pd.read_csv(input_data_file, index_col=[0])
#set the index to be a datetime index for time series operations
data.index = pd.to_datetime(data.index)
experiment_time = data.index[0] - data.index[-1]
hours = int(abs(experiment_time.total_seconds()/3600))
data.index = np.arange(1,hours+2)
data.index.name = u'Hour of Experiment'
#linearly detrend the data for effects of
detrended = data.apply(detrend, type='linear')
#save the startingin intial conductivity values (no evaporation)
inital_values = data.iloc[0]
# Add the intial value
detrended_plus_initial = detrended + inital_values
#save the output file to the same location as the input data
detrended_plus_initial.to_csv(input_data_file.replace('.csv',
'_detrended.csv'))
#make some figures
ylab = u'Conductivity in microsiemens per centimeter (µS/cm)'
ax = data.plot.line(marker='o')
ax.set_ylabel(ylab)
ax.set_title('Raw')
ax = detrended_plus_initial.plot.line(marker='o')
ax.set_ylabel(ylab)
ax.set_title('Detrended')
|
StarcoderdataPython
|
11143
|
""" shell sort tests module """
import unittest
import random
from sort import shell
from tests import helper
class ShellSortTests(unittest.TestCase):
""" shell sort unit tests class """
max = 100
arr = []
def setUp(self):
""" setting up for the test """
self.arr = random.sample(range(self.max), self.max)
def test_null_input(self):
""" should raise when input array is None """
# arrange
inp = None
# act
with self.assertRaises(TypeError) as ex:
shell.sort(inp)
# assert
self.assertEqual("'NoneType' object is not iterable", str(ex.exception))
def test_empty_input(self):
""" should return [] when input array is empty """
# arrange
inp = []
# act
res = shell.sort(inp)
# assert
self.assertEqual(len(inp), len(res))
def test_sort_a_given_array(self):
""" should sort a given array """
# act
res = shell.sort(self.arr[:])
# assert
self.assertTrue(helper.is_sorted(res))
|
StarcoderdataPython
|
1811223
|
import json
from policy_storage import Policy_Storage
def validate_access_policies(resource_id, user_name):
mongo = Policy_Storage('mongodb')
data = mongo.get_policy_from_resource_id(str(resource_id))
operations = ['AND', 'OR']
if isinstance(data, list):
for i in range(0, len(data)):
if data[i]['config']['resource_id'] == resource_id:
for operation in operations:
if operation in data[i]['config']['rules'][0].keys():
exist_operation = operation
break
if len(data[i]['config']['rules'][0][exist_operation]) == 1:
if data[i]['config']['rules'][0][exist_operation][0]['EQUAL']['user_name'] == user_name:
return True
else:
for j in range(0, len(data[i]['config']['rules'][0][exist_operation])):
if data[i]['config']['rules'][0][exist_operation][j]['EQUAL']['user_name'] == user_name:
return True
return False
def validate_policy_language(policy):
operation = ['AND', 'OR', 'XOR', 'NOT', 'LESS', 'LESSEQUAL', 'GREATER', 'GREATEREQUAL', 'EQUAL']
for i in range(0, len(policy)):
keys_dict = []
for key in policy[i].keys():
keys_dict.append(key)
for key in keys_dict:
if key in operation:
if isinstance(policy[i][key], list):
result = validate_policy_language(policy[i][key])
if result == False:
return False
if key == "NOT" and isinstance(policy[i][key], dict):
for keys in policy[i][key]:
if keys in operation:
result = validate_policy_language(policy[i][key][keys])
if result == False:
return False
else:
return False
else:
return False
return True
def validate_complete_policies(resource_id, action, dict_request_values):
mongo = Policy_Storage('mongodb')
data = mongo.get_policy_from_resource_id(str(resource_id))
decisions = {}
if isinstance(data, list):
for i in range(0, len(data)):
try:
if data[i]['config']['resource_id'] == resource_id and data[i]['config']['action'] == action and "delegate" not in data[i]['config']:
result = validate_all_acces_policies(data[i]['config']['rules'], dict_request_values)
decisions[i] = [result, None]
elif "delegate" in data[i]['config']:
decisions[i] = [None, data[i]['config']['delegate']]
except KeyError:
decisions[i] = [False, None]
return decisions
def validate_all_acces_policies(data, dict_request_values):
policy = data
aux_list = []
aux_list_result = []
for i in range(0, len(policy)):
for key in policy[i].keys():
aux_list.append(key)
operations = []
values_operations = []
list_values = []
values_operations = []
if isinstance(policy[i][key], list):
for j in range(0, len(policy[i][key])):
for k, v in policy[i][key][j].items():
aux_dict = {}
aux_dict[k] = v
aux = []
aux = validate_multiples_conditions(aux_dict, dict_request_values)
values_operations = values_operations + aux
list_values = conditions_validator(key, values_operations)
aux_list_result = aux_list_result + list_values
else:
operations.append(key)
values_operations.append(policy[i][key])
list_values = validate_operations(operations, values_operations, dict_request_values)
aux_list_result = aux_list_result + list_values
permit_acces = True
for i in range(0, len(aux_list)):
if aux_list_result[i] is False:
permit_acces = False
return permit_acces
def validate_operations(operations, values_operations, dict_request_values):
list_values = []
for i in range(0, len(operations)):
for key in values_operations[i].keys():
if key in dict_request_values:
if operations[i] == 'LESS':
if dict_request_values[key] < values_operations[i][key]:
list_values.append(True)
else:
list_values.append(False)
elif operations[i] == 'LESSEQUAL':
if dict_request_values[key] <= values_operations[i][key]:
list_values.append(True)
else:
list_values.append(False)
elif operations[i] == 'GREATER':
if dict_request_values[key] > values_operations[i][key]:
list_values.append(True)
else:
list_values.append(False)
elif operations[i] == 'GREATEREQUAL':
if dict_request_values[key] >= values_operations[i][key]:
list_values.append(True)
else:
list_values.append(False)
elif operations[i] == 'EQUAL':
if key == 'emails' or key == 'groups':
valid = False
for row in range(0, len(dict_request_values[key])):
if dict_request_values[key][row]['value'] == values_operations[i][key][row]['value']:
valid = True
break
if valid is True:
list_values.append(True)
else:
list_values.append(False)
else:
if dict_request_values[key] == values_operations[i][key]:
list_values.append(True)
else:
list_values.append(False)
else:
a = False
for k in dict_request_values:
if 'User' in str(k):
try:
a = dict_request_values[k][key]
except KeyError:
pass
list_values.append(a)
return list_values
def validate_multiples_conditions(policy_row, dict_request_values):
operations = []
operations_conditions = []
values_operations = []
list_values = []
for key3 in policy_row.keys():
if ('AND' == key3) or ('NOT' == key3) or ('OR' == key3) or ('XOR' == key3):
operations_conditions.append(key3)
if isinstance(policy_row[key3], list):
operations = []
values_operations = []
list_values = []
for i in range(0, len(policy_row[key3])):
for key2 in policy_row[key3][i].keys():
if ('AND' != key2) and ('NOT' != key2) and ('OR' != key2) and ('XOR' != key2):
operations.append(key2)
values_operations.append(policy_row[key3][i][key2])
aux_list_result = validate_operations(operations, values_operations, dict_request_values)
list_values = conditions_validator(operations_conditions[0], aux_list_result)
else:
operations_conditions.append(key2)
list_values = validate_multiples_conditions(policy_row[key3][i][key2], dict_request_values)
return list_values
else:
list_values = []
for key in policy_row.keys():
if ('AND' != key) and ('NOT' != key) and ('OR' != key) and ('XOR' != key):
aux = []
operations.append(key)
values_operations.append(policy_row[key])
aux = validate_operations(operations, values_operations, dict_request_values)
list_values = list_values + aux
else:
aux_list_result = []
aux = []
aux_list_result = validate_multiples_conditions(policy_row[key], dict_request_values)
aux = conditions_validator(operations_conditions[0], aux_list_result)
list_values = list_values + aux
return list_values
def conditions_validator(key, values):
aux_list_result = []
if key == 'AND':
istrue = True
for k in range(0, len(values)):
if values[k] is False:
istrue = False
aux_list_result.append(istrue)
elif key == 'OR':
istrue = False
for k in range(0, len(values)):
if values[k] is True:
istrue = True
aux_list_result.append(istrue)
elif key == 'XOR':
count = 0
for k in range(0, len(values)):
if values[k] is True:
count = count + 1
if count == 1:
aux_list_result.append(True)
else:
aux_list_result.append(False)
elif key == 'NOT':
for k in range(0, len(values)):
if values[k] is True:
aux_list_result.append(False)
else:
aux_list_result.append(True)
return aux_list_result
|
StarcoderdataPython
|
3458730
|
<gh_stars>100-1000
import torch
import os
from im2mesh.utils.io import save_mesh
import time
from im2mesh.utils.onet_generator import Generator3D as Generator3DONet
class Generator3D(object):
''' Generator class for Occupancy Networks 4D.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
device (device): pytorch device
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
simplify_nfaces (int): number of faces the mesh should be simplified to
n_time_steps (int): number of time steps to generate
only_ent_time_points (bool): whether to only generate end points
'''
def __init__(self, model, device=None, points_batch_size=100000,
threshold=0.5, refinement_step=0,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1,
sample=False, simplify_nfaces=None, n_time_steps=17,
only_end_time_points=False, **kwargs):
self.n_time_steps = n_time_steps
self.only_end_time_points = only_end_time_points
self.onet_generator = Generator3DONet(
model, device=device,
points_batch_size=points_batch_size,
threshold=threshold, refinement_step=refinement_step,
resolution0=resolution0, upsampling_steps=upsampling_steps,
with_normals=with_normals, padding=padding,
sample=sample,
simplify_nfaces=simplify_nfaces)
def generate_mesh_t0(self, z=None, c_t=None, data=None, stats_dict={}):
''' Generates mesh at first time step.
Args:
z (tensor): latent code z
c_t (tensor): latent conditioned temporal code c_t
data (dict): data dictionary
stats_dict (dict): statistics dictionary
'''
t = torch.tensor([0.]).view(1, 1).to(self.onet_generator.device)
kwargs = {'t': t}
mesh = self.onet_generator.generate_from_latent(
z, c_t, stats_dict=stats_dict, **kwargs)
return mesh
def get_time_steps(self):
''' Return time steps values.
'''
n_steps = self.n_time_steps
device = self.onet_generator.device
if self.only_end_time_points:
t = torch.tensor([0., 1.]).to(device)
else:
t = (torch.arange(1, n_steps).float() / (n_steps - 1)).to(device)
return t
def generate_meshes_t(self, z=None, c_t=None, data=None, stats_dict={}):
''' Generates meshes at time steps > 0.
Args:
z (tensor): latent code z
c_t (tensor): latent conditioned temporal code c_t
data (dict): data dictionary
stats_dict (dict): statistics dictionary
'''
t = self.get_time_steps()
meshes = []
for i, t_v in enumerate(t):
kwargs = {'t': t_v.view(1, 1)}
stats_dict_i = {}
mesh = self.onet_generator.generate_from_latent(
z, c_t, stats_dict=stats_dict_i, **kwargs)
meshes.append(mesh)
for k, v in stats_dict_i.items():
stats_dict[k] += v
return meshes
def export_mesh(self, mesh, model_folder, modelname, start_idx=0, n_id=1):
''' Exports a mesh.
Args:
mesh(trimesh): mesh to export
model_folder (str): model folder
model_name (str): name of the model
start_idx (int): start id of sequence
n_id (int): number of mesh in the sequence (e.g. 1 -> start)
'''
out_path = os.path.join(
model_folder, '%s_%04d_%04d.off' % (modelname, start_idx, n_id))
save_mesh(mesh, out_path)
return out_path
def export_meshes_t(self, meshes, model_folder, modelname, start_idx=0,
start_id_seq=2):
''' Exports meshes.
Args:
meshes (list): list of meshes to export
model_folder (str): model folder
model_name (str): name of the model
start_idx (int): start id of sequence
start_id_seq (int): start number of first mesh in the sequence
'''
out_files = []
for i, m in enumerate(meshes):
out_file = self.export_mesh(
m, model_folder, modelname, start_idx, n_id=start_id_seq + i)
out_files.append(out_file)
return out_files
def export(self, meshes, mesh_dir, modelname, start_idx=0, start_id_seq=1):
''' Exports a list of meshes.
Args:
meshes (list): list of meshes to export
model_folder (str): model folder
model_name (str): name of the model
start_idx (int): start id of sequence
start_id_seq (int): start number of first mesh in the sequence
'''
model_folder = os.path.join(mesh_dir, modelname, '%05d' % start_idx)
if not os.path.isdir(model_folder):
os.makedirs(model_folder)
return self.export_meshes_t(
meshes, model_folder, modelname, start_idx=0, start_id_seq=1)
def generate(self, data, return_stats=True, **kwargs):
''' Generates meshes for input data.
Args:
data (dict): data dictionary
return_stats (bool): whether to return statistics
'''
self.onet_generator.model.eval()
stats_dict = {}
device = self.onet_generator.device
inputs = data.get('inputs', torch.empty(1, 0)).to(device)
meshes = []
with torch.no_grad():
t0 = time.time()
c_t = self.onet_generator.model.encode_inputs(inputs)
# Only for testing
z = self.onet_generator.model.get_z_from_prior(
(1,), sample=self.onet_generator.sample).to(device)
stats_dict['time (encode inputs)'] = time.time() - t0
# Generate and save first mesh
mesh_t0 = self.generate_mesh_t0(
z, c_t, data, stats_dict=stats_dict)
meshes.append(mesh_t0)
# Generate and save later time steps
meshes_t = self.generate_meshes_t(
z=z, c_t=c_t, data=data, stats_dict=stats_dict)
meshes.extend(meshes_t)
return meshes, stats_dict
|
StarcoderdataPython
|
11221860
|
<gh_stars>0
# -*- coding: utf-8 -*-
import re
"""
Created on Tue Jun 2 11:46:18 2020
class format city
@author: Rizilip
"""
## landmarks
# mansion tours
def formatString(f,m):
##
string = f.strip()
name = ""
method = m
match = re.findall("[\w+\s+ ..\w+\s+]+", string)
match = str(match)
if method == "indeedCity":
for character in match:
if character.isalpha() :
name += character
if character == ", ":
name += ",%2C+"
if character == " ":
name += "+"
if method == "indeedTask":
for character in match:
if character.isalpha() or character.isnumeric():
name += character
if character == " ":
name += "+"
if character == "\+":
name += character
if character == " ":
pass# name += "+"
return name
""" if method == 1:
for character in match:
if character.isalpha() :
name += character
if character == ", ":
name += "-"
if character == " ":
name += "-"
if method == 2:
for character in match:
if character.isalpha() :
name += character
if character == ", ":
# name += "+"
pass
if character == " ":
name += "+"
if method == 3:
for character in match:
if character.isalpha() :
name += character
if character == ",":
name += ", "
if character == " ":
name += character
name = "{:>20s}".format(name)
if method == 4:
for character in match:
if character.isalpha() or character == " ":
name += character
name = str(name)
chunks = name.split()
name = chunks[-1] + "/"
name += chunks[0]
if chunks[1] != chunks[-1]:
name+="_" + chunks[1]
if method == "indeed":
for character in match:
if character.isalpha() :
name += character
if character == ", ":
name += "%2C+"
pass
if character == " ":
name += "+"
"""
#Sacramento%2C+CA
|
StarcoderdataPython
|
3560116
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#读取Excel表格中的信息,并将信息写入json文件中
from collections import OrderedDict
from pyexcel_xls import get_data
from pyexcel_xls import save_data
import json
map_filename='editor\\地图信息.xlsx'
current_sheet=1
while True:
xls_data=get_data(map_filename)
messages=xls_data[str(current_sheet)]
save_filename='informations\\maps\\'+str(current_sheet)+'.json'
with open(save_filename,'w') as f:
json.dump(messages,f)
current_sheet+=1
if current_sheet==7:
break
monster_filename='editor\怪物信息.xlsx'
current_sheet=1
while True:
xls_data=get_data(monster_filename)
messages=xls_data[str(current_sheet)]
del messages[0]
save_filename='informations\\monsters\\'+str(current_sheet)+'.json'
with open(save_filename,'w') as f:
json.dump(messages,f)
current_sheet+=1
if current_sheet==7:
break
print('finished')
|
StarcoderdataPython
|
1918427
|
import os
import shutil
from cement.utils import fs
from cement.utils.misc import rando as _rando
import pytest
@pytest.fixture(scope="function")
def tmp(request):
t = fs.Tmp()
yield t
# cleanup
if os.path.exists(t.dir) and t.cleanup is True:
shutil.rmtree(t.dir)
@pytest.fixture(scope="function")
def key(request):
yield _rando()
@pytest.fixture(scope="function")
def rando(request):
yield _rando()
|
StarcoderdataPython
|
5142423
|
import pandas as pd
import numpy as np
from rdkit import Chem
from scipy import stats
import pubchempy as pcp
df = pd.read_excel("../2_bbb_all_complete_CID_out_smiles_fixed_updated.xlsx")
df = df[~df["logBB"].isna()]
df["logBB"] = df["logBB"].astype(float)
# remove molecules with logBB <= -9
df = df[df["logBB"] > -9]
# a dictionary to host inchi keys and isomeric smiles
for idx, row in df.iterrows():
mol = Chem.MolFromSmiles(row["smiles_fixed_rdkit"])
df.loc[idx, "Inchi"] = Chem.inchi.MolToInchi(mol)
df.to_excel("regression_inchi.xlsx", index=None, engine="openpyxl")
df = pd.read_excel("regression_inchi.xlsx")
# generate a dictionary to host all the inchi and isomeric smiles (or canonical smiles if isomeric smiles is not avaliable)
def append_value(dict_obj, key, value):
if key in dict_obj:
if not isinstance(dict_obj[key], list):
dict_obj[key] = [dict_obj[key]]
dict_obj[key].append(value)
else:
dict_obj[key] = value
return dict_obj
inchi_smi_dict = {inchi:[] for inchi in df["Inchi"].to_list()}
for idx, row in df.iterrows():
inchi_smi_dict = append_value(inchi_smi_dict, row["Inchi"], row["smiles_fixed_rdkit"])
# exam how inchi has more than one isomeric smiles
counter = 0
for key, value in inchi_smi_dict.items():
if len(value) >= 2:
counter += 1
print(counter)
# use non-redundant isomeric smiles for inchi_smi_dict
# manually inspect inchies with more than one non-redundant smiles
inchi_smi_dict = {inchi: set(smi) for inchi, smi in inchi_smi_dict.items()}
counter = 0
for key, value in inchi_smi_dict.items():
if len(value) >= 2:
print(key, value)
# the same inchi may have more than one inchi values, 12 in total
# but they are just resonance structure, so use inchi as an identifier
###########################################################################
df = pd.read_excel("regression_inchi.xlsx")
# smiles fixing with 02_clean_smiles_chembl_way_20210214.py
#########################################################################
df_unique = df.drop_duplicates(subset="Inchi", keep="first").reset_index(drop=True)
# df_duplicated = df.drop_duplicates(subset="Inchi", keep=False).reset_index(drop=True)
# df_unique["logBB"] = [[] for _ in np.arange(df_unique.shape[0])]
df_unique["logBB"] = ""
df_unique["compound_name"] = ""
df_unique["CID"] = ""
df_unique["new_name"] = ""
df_unique["iupac_name"] = ""
df_unique["reference"] = ""
df_unique["NO."] = ""
df["logBB"] = df["logBB"].astype(float)
# append compound_name, CID, logBB, new_name, iupac_name to the df_unique
# for idx_unique, row_unique in df_unique.iterrows():
# for idx, row in df.iterrows():
# if row["Inchi"] == row_unique["Inchi"]:
# # logBB
# df_unique.loc[idx_unique, "logBB"] = df_unique.loc[idx_unique, "logBB"] + "|" + str(row["logBB"])
# # compound_name
# df_unique.loc[idx_unique, "compound_name"] = df_unique.loc[idx_unique, "compound_name"] + "|" + str(row["compound_name"])
# # CID
# df_unique.loc[idx_unique, "CID"] = df_unique.loc[idx_unique, "CID"] + "|" + str(row["CID"])
# # new_name
# df_unique.loc[idx_unique, "new_name"] = df_unique.loc[idx_unique, "new_name"] + "|" + str(row["new_name"])
# # iupac_name
# df_unique.loc[idx_unique, "iupac_name"] = df_unique.loc[idx_unique, "iupac_name"] + "|" + str(row["iupac_name"])
# df_unique.to_excel("tmp.xlsx", index=None, engine="openpyxl")
# a more efficient way
for idx_unique, row_unique in df_unique.iterrows():
inchi_unique = row_unique["Inchi"]
df_inchi_matching = df[df["Inchi"] == inchi_unique].reset_index(drop=True)
for _, row_matching in df_inchi_matching.iterrows():
# logBB
# df_unique.loc[idx_unique, "logBB"] = df_unique.loc[idx_unique, "logBB"] + str(row_matching["logBB"]) + "|"
df_unique.loc[idx_unique, "logBB"] = df_unique.loc[idx_unique, "logBB"] + str(round(row_matching["logBB"], 2)) + "|"
# compound_name
df_unique.loc[idx_unique, "compound_name"] = df_unique.loc[idx_unique, "compound_name"] + str(row_matching["compound_name"]) + "|"
# CID
df_unique.loc[idx_unique, "CID"] = df_unique.loc[idx_unique, "CID"] + str(row_matching["CID"]) + "|"
# new_name
df_unique.loc[idx_unique, "new_name"] = df_unique.loc[idx_unique, "new_name"] + str(row_matching["new_name"]) + "|"
# iupac_name
df_unique.loc[idx_unique, "iupac_name"] = df_unique.loc[idx_unique, "iupac_name"] + str(row_matching["iupac_name"]) + "|"
# reference
df_unique.loc[idx_unique, "reference"] = df_unique.loc[idx_unique, "reference"] + str(row_matching["reference"]) + "|"
# original NO.
df_unique.loc[idx_unique, "NO."] = df_unique.loc[idx_unique, "NO."] + str(row_matching["NO."]) + "|"
df_unique.to_excel("regression_logBB_combined.xlsx", index=None, engine="openpyxl")
##################################################
# preprocess logBB data
from copy import deepcopy
df = pd.read_excel("regression_logBB_combined.xlsx")
# df_bak = deepcopy(df)
# filter molecules with max(logBB) – min(logBB) > 1
counter = 0
for idx, row in df.iterrows():
logBB_values = [float(logBB) for logBB in row["logBB"].strip("|").split("|")]
if max(logBB_values) - min(logBB_values) > 1:
counter += 1
df.loc[idx, "logBB"] = np.nan
df = df.dropna(subset=["logBB"]).reset_index(drop=True)
df["std"] = np.nan
df["group"] = ""
for idx, row in df.iterrows():
# round logBB values to two decimal points as this is the most data hold for
logBB_values = [logBB for logBB in row["logBB"].strip("|").split("|")]
# find the minimum decimal places
decimal_places = min([logBB[::-1].find('.') for logBB in logBB_values])
logBB_values = [round(float(logBB), decimal_places) for logBB in logBB_values]
# set logBB values if there is only one
if len(logBB_values) == 1:
df.loc[idx, "logBB"] = logBB_values[0]
df.loc[idx, "group"] = "A"
df.loc[idx, "std"] = 0
else:
mean_logBB = np.multiply(np.ones(len(logBB_values)),
np.average(logBB_values))
mean_logBB = np.around(mean_logBB, decimals=decimal_places)
# set logBB values if all the values are the same or within 5% difference
if np.allclose(np.array(logBB_values), mean_logBB, atol=0, rtol=0.05):
df.loc[idx, "logBB"] = mean_logBB[0]
df.loc[idx, "group"] = "B"
df.loc[idx, "std"] = np.std(logBB_values)
else:
# if less than 3 values, use average value
if len(logBB_values) < 3:
df.loc[idx, "logBB"] = mean_logBB[0]
df.loc[idx, "group"] = "C"
df.loc[idx, "std"] = np.std(logBB_values)
# if more than 3 values, use mode
else:
# not using stats.mode() because it can not handel the suitation when two mode values are avaliable
# stats.mode(logBB_values)[0]
values, counts = np.unique(logBB_values, return_counts=True)
sorted_idx = np.argsort(counts)[::-1]
values_sorted = values[sorted_idx]
counts_sorted = counts[sorted_idx]
# when there is only one number of maximum counts
if counts_sorted[0] > counts_sorted[1]:
df.loc[idx, "logBB"] = values_sorted[0]
df.loc[idx, "group"] = "D"
df.loc[idx, "std"] = np.std(logBB_values)
# when there are more than one maximum counts, they are equal
else:
# more than 3 unique values
if len(values_sorted) >= 3:
# when there are two mode numbers
# counts_sorted[0] == counts_sorted[1] is a fact in such a condition as it
# is sorted
# the first 3 counts are the same
if counts_sorted[1] == counts_sorted[2]:
df.loc[idx, "logBB"] = sum(values_sorted[:3]) / 3
df.loc[idx, "group"] = "dropped_E"
df.loc[idx, "std"] = np.std(logBB_values)
# the first 2 counts are the same
else:
df.loc[idx, "logBB"] = sum(values_sorted[:2]) / 2
df.loc[idx, "group"] = "dropped_F"
df.loc[idx, "std"] = np.std(logBB_values)
# as counts_sorted is in descening order, counts_sorted[0] will not be less than counts_sorted[1]
# counts_sorted[0] == counts_sorted[1] and counts_sorted[0] == counts_sorted[2]
# when there are two unique count values
else:
# these two unique values are the same
if counts_sorted[0] == counts_sorted[1]:
df.loc[idx, "logBB"] = mean_logBB[0]
df.loc[idx, "group"] = "dropped_G"
df.loc[idx, "std"] = np.std(logBB_values)
# the first one is greater than the second one
else:
df.loc[idx, "logBB"] = values_sorted[0]
df.loc[idx, "group"] = "dropped_H"
df.loc[idx, "std"] = np.std(logBB_values)
#iupac name
for idx, row in df.iterrows():
iupac_names = [name.lower() for name in row["iupac_name"].strip("|").split("|")
if name != "nan" if not name.isdigit() if len(name) != 1]
if len(iupac_names) >= 1:
df.loc[idx, "iupac_name"] = iupac_names[0].lstrip()
else:
df.loc[idx, "iupac_name"] = ""
# deal with compound_name, new_name
df["new_compound_name"] = ""
for idx, row in df.iterrows():
# new_compound_name
compound_names = [name.lower() for name in row["compound_name"].strip("|").split("|")
if name != "nan" if not name.isdigit() if len(name) != 1]
new_names = [name.lower() for name in row["new_name"].strip("|").split("|")
if name != "nan" if not name.isdigit() if len(name) != 1]
# these names found in pubchem come first
names = list(set(new_names + compound_names))
# when compound_names list is not empty
if names != []:
df.loc[idx, "new_compound_name"] = names[0].lstrip()
else:
df.loc[idx, "new_compound_name"] = row["iupac_name"]
# deal with CID
# for idx, row in df.iterrows():
# cids = list(set([int(float(cid)) for cid in row["CID"].strip("|").split("|") if cid != "nan"]))
# if len(cids) != 0:
# df.loc[idx, "CID"] = cids[0]
# else:
# df.loc[idx, "CID"] = ""
# deal with smiles and CID
# df["smiles_fixed_rdkit"] = df["smiles_fixed_rdkit"].astype(str)
# df["CID"] = df["CID"].astype(str)
# for idx, row in df.iterrows():
# # smiles_list = [smi.lower() for smi in row["smiles_fixed_rdkit"].strip("|").split("|")
# # if smi != "nan" if not smi.isdigit() if len(smi) != 1]
# smiles_list = [smi for smi in row["smiles_fixed_rdkit"].strip("|").split("|")
# if smi != "nan" if not smi.isdigit()]
# smiles_list = list(set(smiles_list))
# cids = list(set([int(float(cid)) for cid in row["CID"].strip("|").split("|") if cid != "nan"]))
# if len(smiles_list) >= 1:
# # df.loc[idx, "smiles_fixed_rdkit"] = smiles_list[0].lstrip()
# # get new CID from the smiles if CID is none
# # else: use old CID
# if len(cids) == 0:
# ## try to get CID until using up the smiles
# # flag to indicate if we found new CID and smiles
# flag = False
# for smi in smiles_list:
# try:
# # because can get an error with
# # O=[SH](O)(c1ccc2cc[nH]c2c1)N1CCCC1CCN1CCC(Oc2cccc(Cl)c2)CC1
# compound = pcp.get_compounds(identifier=smi, namespace="smiles")
# cid_new = compound[0].cid
# if cid_new is not None:
# flag = True
# break
# except:
# print("error found when searching pubchem")
# if flag is True:
# df.loc[idx, "smiles_fixed_rdkit"] = smi
# df.loc[idx, "CID"] = cid_new
# else:
# df.loc[idx, "smiles_fixed_rdkit"] = smiles_list[0]
# df.loc[idx, "CID"] = ""
# else:
# # use old CIDs
# df.loc[idx, "smiles_fixed_rdkit"] = smiles_list[0]
# if len(cids) >= 1:
# df.loc[idx, "CID"] = cids[0]
# else:
# df.loc[idx, "CID"] = ""
###########################################################
df["CID"] = df["CID"].fillna("")
df["CID"] = df["CID"].astype(str)
# deal with CID
df["CID"] = df["CID"].astype(str)
for idx, row in df.iterrows():
# no need to deal with CID for regression data again
if pd.isnull(row["logBB"]):
cids = list(set([int(float(cid)) for cid in row["CID"].strip("|").split("|") if cid != "nan"]))
if len(cids) != 0:
df.loc[idx, "CID"] = cids[0]
else:
df.loc[idx, "CID"] = ""
# deal with SMILES
df["smiles_fixed_rdkit"] = df["smiles_fixed_rdkit"].astype(str)
for idx, row in df.iterrows():
smi_strings = list(set([smi for smi in row["smiles_fixed_rdkit"].strip("|").split("|") if smi != "nan"]))
if len(cids) != 0:
df.loc[idx, "smiles_fixed_rdkit"] = smi_strings[0]
else:
df.loc[idx, "smiles_fixed_rdkit"] = ""
df = df.sort_values(by=["group", "logBB"])
df.to_excel("regression_clean_done.xlsx", index=None, engine="openpyxl")
# clean the data manually
|
StarcoderdataPython
|
5042328
|
from typing import List
import autofit as af
import autogalaxy as ag
from autogalaxy.aggregator.abstract import AbstractAgg
from autolens.lens.ray_tracing import Tracer
def _tracer_from(fit: af.Fit, galaxies: List[ag.Galaxy]) -> Tracer:
"""
Returns a `Tracer` object from a PyAutoFit database `Fit` object and an instance of galaxies from a non-linear
search model-fit.
This function adds the `hyper_model_image` and `hyper_galaxy_image_path_dict` to the galaxies before constructing
the `Tracer`, if they were used.
Parameters
----------
fit
A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.
galaxies
A list of galaxies corresponding to a sample of a non-linear search and model-fit.
Returns
-------
Tracer
The tracer computed via an instance of galaxies.
"""
hyper_model_image = fit.value(name="hyper_model_image")
hyper_galaxy_image_path_dict = fit.value(name="hyper_galaxy_image_path_dict")
galaxies_with_hyper = []
if hyper_galaxy_image_path_dict is not None:
galaxy_path_list = [
gal[0] for gal in fit.instance.path_instance_tuples_for_class(ag.Galaxy)
]
for (galaxy_path, galaxy) in zip(galaxy_path_list, galaxies):
if galaxy_path in hyper_galaxy_image_path_dict:
galaxy.hyper_model_image = hyper_model_image
galaxy.hyper_galaxy_image = hyper_galaxy_image_path_dict[galaxy_path]
galaxies_with_hyper.append(galaxy)
return Tracer.from_galaxies(galaxies=galaxies_with_hyper)
return Tracer.from_galaxies(galaxies=galaxies)
class TracerAgg(AbstractAgg):
"""
Wraps a PyAutoFit aggregator in order to create generators of tracers corresponding to the results of a non-linear
search model-fit.
"""
def make_object_for_gen(self, fit, galaxies) -> Tracer:
"""
Creates a `Tracer` object from a `ModelInstance` that contains the galaxies of a sample from a non-linear
search.
Parameters
----------
fit
A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.
galaxies
A list of galaxies corresponding to a sample of a non-linear search and model-fit.
Returns
-------
Tracer
A tracer whose galaxies are a sample of a PyAutoFit non-linear search.
"""
return _tracer_from(fit=fit, galaxies=galaxies)
|
StarcoderdataPython
|
3592514
|
<reponame>zhouyijiaren/commons<filename>src/python/twitter/common/log/tracer.py
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from contextlib import contextmanager
import os
import sys
import threading
import time
__all__ = ('Tracer',)
class Trace(object):
__slots__ = ('msg', 'verbosity', 'parent', 'children', '_clock', '_start', '_stop')
def __init__(self, msg, parent=None, verbosity=1, clock=time):
self.msg = msg
self.verbosity = verbosity
self.parent = parent
if parent is not None:
parent.children.append(self)
self.children = []
self._clock = clock
self._start = self._clock.time()
self._stop = None
def stop(self):
self._stop = self._clock.time()
def duration(self):
assert self._stop is not None
return self._stop - self._start
class Tracer(object):
"""
A multi-threaded tracer.
"""
@classmethod
def env_filter(cls, env_variable):
def predicate(verbosity):
try:
env_verbosity = int(os.environ.get(env_variable, -1))
except ValueError:
env_verbosity = -1
return verbosity <= env_verbosity
return predicate
def __init__(self, predicate=None, output=sys.stderr, clock=time):
"""
If predicate specified, it should take a "verbosity" integer and determine whether
or not to log, e.g.
def predicate(verbosity):
try:
return verbosity < int(os.environ.get('APP_VERBOSITY', 0))
except ValueError:
return False
output defaults to sys.stderr, but can take any file-like object.
"""
self._predicate = predicate or (lambda verbosity: True)
self._length = None
self._output = output
self._isatty = getattr(output, 'isatty', False) and output.isatty()
self._lock = threading.RLock()
self._local = threading.local()
self._clock = clock
def should_log(self, V):
return self._predicate(V)
def log(self, msg, V=0, end='\n'):
if not self.should_log(V):
return
if not self._isatty and end == '\r':
# force newlines if we're not a tty
end = '\n'
trailing_whitespace = ''
with self._lock:
if self._length and self._length > len(msg):
trailing_whitespace = ' ' * (self._length - len(msg))
self._output.write(msg + trailing_whitespace + end)
self._output.flush()
self._length = len(msg) if end == '\r' else 0
def print_trace_snippet(self):
parent = self._local.parent
parent_verbosity = parent.verbosity
if not self.should_log(parent_verbosity):
return
traces = []
while parent:
if self.should_log(parent.verbosity):
traces.append(parent.msg)
parent = parent.parent
self.log(' :: '.join(reversed(traces)), V=parent_verbosity, end='\r')
def print_trace(self, indent=0, node=None):
node = node or self._local.parent
with self._lock:
self.log(' ' * indent + ('%s: %.1fms' % (node.msg, 1000.0 * node.duration())),
V=node.verbosity)
for child in node.children:
self.print_trace(indent=indent + 2, node=child)
@contextmanager
def timed(self, msg, V=0):
if getattr(self._local, 'parent', None) is None:
self._local.parent = Trace(msg, verbosity=V, clock=self._clock)
else:
parent = self._local.parent
self._local.parent = Trace(msg, parent=parent, verbosity=V, clock=self._clock)
self.print_trace_snippet()
yield
self._local.parent.stop()
if self._local.parent.parent is not None:
self._local.parent = self._local.parent.parent
else:
self.print_trace()
self._local.parent = None
def main(args):
import random
tracer = Tracer(output=open(args[0], 'w')) if len(args) > 0 else Tracer()
def process(name):
with tracer.timed(name):
with tracer.timed('acquiring'):
with tracer.timed('downloading'):
time.sleep(3 * random.random())
if random.random() > 0.66:
tracer.log('%s failed downloading!' % name)
return
with tracer.timed('unpacking'):
time.sleep(1 * random.random())
with tracer.timed('configuring'):
time.sleep(0.5 * random.random())
with tracer.timed('building'):
time.sleep(5.0 * random.random())
if random.random() > 0.66:
tracer.log('%s failed building!' % name)
return
with tracer.timed('installing'):
time.sleep(2.0 * random.random())
workers = [threading.Thread(target=process, args=('worker %d' % k,)) for k in range(5)]
for worker in workers:
worker.start()
for worker in workers:
worker.join()
if __name__ == '__main__':
main(sys.argv[1:])
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.