max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
uoftscrapers/scrapers/buildings/__init__.py | kshvmdn/uoft-scrapers | 49 | 12797151 | <filename>uoftscrapers/scrapers/buildings/__init__.py
from ..utils import Scraper, LayersScraper
from bs4 import BeautifulSoup
from collections import OrderedDict
from decimal import *
import os
import re
class Buildings:
"""A scraper for UofT's buildings.
UofT Map is located at http://map.utoronto.ca/.
"""
host = 'http://map.utoronto.ca/'
campuses = ['utsg', 'utm', 'utsc']
@staticmethod
def scrape(location='.'):
"""Update the local JSON files for this scraper."""
Scraper.logger.info('Buildings initialized.')
for campus in Buildings.campuses:
data = Buildings.get_map_json(campus)
regions = Buildings.get_regions_json(campus)['buildings']
for building in data['buildings']:
_id = building['id']
code = building['code']
name = building['title']
short_name = LayersScraper.get_value(building, 'short_name')
lat = LayersScraper.get_value(building, 'lat', True)
lng = LayersScraper.get_value(building, 'lng', True)
street = ' '.join(filter(None,
LayersScraper.get_value(building, 'street').split(' ')))
city = LayersScraper.get_value(building, 'city')
province = LayersScraper.get_value(building, 'province')
country = LayersScraper.get_value(building, 'country')
postal = LayersScraper.get_value(building, 'postal')
polygon = []
for region in regions:
if region['id'] == _id:
lat_lng = region['center_point']
if lat_lng:
lat_lng = lat_lng[1:-2].split(', ')
if len(lat_lng) == 2:
lat = float(lat_lng[0])
lng = float(lat_lng[1])
polygon = region['points']
doc = OrderedDict([
('id', _id),
('code', code),
('name', name),
('short_name', short_name),
('campus', campus.upper()),
('address', OrderedDict([
('street', street),
('city', city),
('province', province),
('country', country),
('postal', postal)
])),
('lat', lat),
('lng', lng),
('polygon', polygon)
])
Scraper.save_json(doc, location, _id)
Scraper.logger.info('Buildings completed.')
@staticmethod
def get_map_json(campus):
"""Retrieve the JSON structure from host."""
Scraper.logger.info('Scraping %s.' % campus.upper())
Scraper.get(Buildings.host)
headers = {'Referer': Buildings.host}
data = Scraper.get('%s%s%s' % (
Buildings.host,
'data/map/',
campus
), headers=headers, json=True)
return data
@staticmethod
def get_regions_json(campus):
"""Retrieve the JSON structure from host."""
Scraper.get(Buildings.host)
headers = {'Referer': Buildings.host}
data = Scraper.get('%s%s%s' % (
Buildings.host,
'data/regions/',
campus
), headers=headers, json=True)
return data
| 3.046875 | 3 |
character/models/__init__.py | SamusChief/myth-caster-api | 0 | 12797152 | <gh_stars>0
""" Models for Characters """
from .ancestry import Ancestry, SubAncestry
from .background import Background
from .character import Character, ClassAndLevel, InventoryAdventuringGear, \
InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency
from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \
SpellsKnownAtLevel, SpellSlotsAtLevel
from .feature import Feature
| 1.023438 | 1 |
i3-wm/myScript/files/get-schedule-text.py | Mirasire/dotfiles | 5 | 12797153 | <reponame>Mirasire/dotfiles
#!/bin/python
#feature: html -> text
#reglex
import re
#import BeautifulSoup
from bs4 import BeautifulSoup
def sigleOrDoubel(str0):
num=2;
if str0.find('单')!=-1:
num=1
elif str0.find('每')!=-1:
num=0
return num
if __name__ == "__main__":
#dealwith the printf
soup = BeautifulSoup(open("schedule.html","r",encoding='GBK'))
#print(soup.prettify())
tbody=soup.find_all("tbody")
all_tr=tbody[0].find_all('tr')
class_list=[[] for i in range(10)]
for tr in all_tr[0:len(all_tr)-1]:
all_td=tr.find_all('td')
num=0
for td in all_td:
info=td.contents;
if num==0:
timeinfo=info[2].split('-')
if tr['class'][0]=='odd':
for i in range(1,8):
if len(class_list[i])!=0 and class_list[i][-1][0]==1:
class_list[i][-1][0]=0
class_list[i][-1][1][1]=timeinfo[1]
elif len(info)==7:
#0=1-16, 1=single(1-16), 2=double(1-16)
#[flag,time_info,class_name,class_location,week_range,SigleorDouble]
class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])])
num+=1
print("class_info=[",end='')
for i in class_list:
if len(i)>=1:
print(i,('' if(i==class_list[5]) else ','),end='')
num+=1
print("]")
| 2.84375 | 3 |
src/tools/ncbi/ncbi_entrez_download_fasta.py | uct-cbio/galaxy-tools | 0 | 12797154 | <gh_stars>0
#!/usr/bin/python
# Retrieve FASTA sequence from NCBI based on an Entrez query.
# to start a new download
# entrez_download_fasta.py -m <EMAIL> -d nucleotide -e "Cypripedioideae[Orgn] AND matK[Gene]" -f out.fasta -i download_info.txt
# to continue a previous download
# entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w "NCID_1_38065753_192.168.127.12_9001_1300878409_78339627" -q 1 -s 11 -n 38 -f out_continue.fasta -i info_continue.txt
import sys
import os
import string
import re
from optparse import OptionParser
from Bio import Entrez
def main():
usage = "usage: %prog -m EMAIL -d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE"
parser = OptionParser(usage=usage)
parser.add_option("-m", "--email", dest="email", help="Email address. Need to provide this when doing an Entrez search and fetch")
parser.add_option("-d", "--database", dest="database", help="Database e.g. nucleotide")
parser.add_option("-e", "--entrez_query", dest="entrez_query", help="Entrez query e.g. \"Bacteria\"[Organism] OR \"Archaea\"[Organism] OR prokaryotes[All Fields] not \"Escherichia coli\"[Organism]")
parser.add_option("-c", "--continue_download", action="store_true", dest="continue_download", default=False, help="If flag is specified program will continue a previous download. User need to provide the WEB_ENV, QUERY_KEY and SEQ_START")
parser.add_option("-w", "--web_env", dest="web_env", help="Please provide the previous web_env.")
parser.add_option("-q", "--query_key", dest="query_key", help="Please provide the previous query_key.")
parser.add_option("-s", "--start_pos", dest="start_pos", help="Please provide position of the sequence to start downloading from. E.g. the position where the previous download failed.")
parser.add_option("-n", "--nr_seq_query", dest="nr_seq_query", help="Please provide the number of sequences found in the original query.")
parser.add_option("-f", "--fasta_file", dest="fasta_file", help="FASTA output file")
parser.add_option("-i", "--info_file", dest="info_file", help="Information related to the download. Contains the web_env and query_key for the search.")
(options, args) = parser.parse_args()
if not options.continue_download:
print "Start a new download..."
if not options.email:
print "Please specify an email address. Need to provide this when doing an Entrez search or fetch (-m EMAIL)"
return - 1
if not options.database:
print "Please specify the database to fetch info from (-d DATABASE)"
return - 2
if not options.entrez_query:
print "Please specify an entrez query (-e ENTREZ_QUERY)"
return - 3
if not options.fasta_file:
print "Please specify the FASTA output file (-f FASTA_FILE)"
return - 4
if not options.info_file:
print "Please specify the download info file (-i INFO_FILE)"
return - 5
# Need to to some checking on the on the length of the arguments provided. Currently not working because we have 2 options
# (1) Start new download and (2) Continue previous download. Need to handle these separately. Sort it out later. Not to crucial.
else:
print "Continue a previous download..."
if not options.email:
print "Please specify an email address. Need to provide this when doing an Entrez search or fetch (-m EMAIL)"
return - 6
if not options.database:
print "Please specify the database to fetch info from (-d DATABASE)"
return - 7
if not options.web_env:
print "Please specify the previous web_env (-w WEB_ENV)"
return - 8
if not options.query_key:
print "Please specify the previous query_key (-q QUERY_KEY)"
return - 9
if not options.start_pos:
print "Please specify the position of the sequence to start downloading from (-s START_POS)"
return - 10
if not options.nr_seq_query:
print "Please specify the number of sequences in original query (-n NR_SEQ_QUERY)"
return - 11
if not options.fasta_file:
print "Please specify the FASTA output file (-f FASTA_FILE)"
return - 12
if not options.info_file:
print "Please specify the download info file (-i INFO_FILE)"
return - 13
if (len(args) > 0):
print "Too many arguments"
return - 14
BATCH_SIZE = 100
# Input strings generated by browser/galaxy needs to be replaced
mapped_chars = { '>' :'__gt__',
'<' :'__lt__',
'\'' :'__sq__',
'"' :'__dq__',
'[' :'__ob__',
']' :'__cb__',
'{' :'__oc__',
'}' :'__cc__',
'@' :'__at__',
}
# Start a new download
if(not options.continue_download):
email = options.email
database = options.database
entrez_query = options.entrez_query
fasta_file = options.fasta_file
info_file = options.info_file
for key, value in mapped_chars.items():
database = database.replace(value, key)
entrez_query = entrez_query.replace(value, key)
email = email.replace(value, key)
Entrez.email = email
# Open info_file for writing
info_file_fd = open(info_file, "w")
info_file_fd.write('Email address: %s\n' % Entrez.email)
info_file_fd.write('Database: %s\n' % database)
info_file_fd.write('Entrez query: %s\n' % entrez_query)
try:
handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y')
results = Entrez.read(handle)
handle.close()
except Exception, e:
info_file_fd.write( "Error raised when trying do an Entrez.esearch: %s\n" % str(e))
info_file_fd.close()
sys.exit( "Error raised! Exiting now!")
gi_list = results["IdList"]
nr_seq_query = int(results["Count"])
# assert count == len(gi_list) # do not do this test gi_list it is always 20
# Get web_env and query_key from search results
web_env = results["WebEnv"]
query_key = results["QueryKey"]
# Write query specific info to info_file
info_file_fd.write('Number of sequences in query: %d\n' % nr_seq_query)
info_file_fd.write('Number of sequences to be dowloaded: %d\n' % nr_seq_query)
info_file_fd.write('Download sequences from position: %d\n' % 0)
info_file_fd.write('web_env: %s\n' % web_env)
info_file_fd.write('query_key: %s\n'% query_key)
info_file_fd.write('Downloading sequences in batches of %d\n' % BATCH_SIZE)
info_file_fd.close()
# Now retrieve the FASTA sequences in batches of 5
fasta_file_fd = open(fasta_file, "w")
for start in range(0,nr_seq_query, BATCH_SIZE):
end = min(nr_seq_query, start + BATCH_SIZE)
print "Dowloading sequence %i to %i" % (start+1, end)
try:
fetch_handle = Entrez.efetch(db=database, rettype="fasta", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key)
data = fetch_handle.read()
except Exception, e:
info_file_fd = open(info_file, "a")
info_file_fd.write( "Error raised when trying do an Entrez.efind: %s\n" % str(e))
info_file_fd.close()
sys.exit( "Error raised! Exiting now!")
fetch_handle.close()
fasta_file_fd.write(data)
info_file_fd = open(info_file, "a")
info_file_fd.write('Downloaded sequence %i to %i\n' % (start+1, end))
info_file_fd.close()
fasta_file_fd.close()
else: # Continue a previous download
email = options.email
database = options.database
web_env = options.web_env
query_key = options.query_key
start_pos = int(options.start_pos); # should check if start position is a integer
nr_seq_query = int(options.nr_seq_query); # should check if nr_seq_query is a integer
fasta_file = options.fasta_file
info_file = options.info_file
for key, value in mapped_chars.items():
database = database.replace(value, key)
email = email.replace(value, key)
web_env = web_env.replace(value, key)
query_key = query_key.replace(value, key)
Entrez.email = email
# Open info_file for writing
info_file_fd = open(info_file, "w")
info_file_fd.write('Email address: %s\n' % Entrez.email)
info_file_fd.write('Database: %s\n' % database)
# Write query specific info to info_file
info_file_fd.write('Number of sequences in original query: %d\n' % nr_seq_query)
info_file_fd.write('Number of sequences to be dowloaded: %d\n' % (int(nr_seq_query) - int(start_pos) + 1))
info_file_fd.write('Download sequences from position: %d\n' % start_pos)
info_file_fd.write('web_env: %s\n' % web_env)
info_file_fd.write('query_key: %s\n' % query_key)
info_file_fd.write('Downloading sequences in batches of %d\n' % BATCH_SIZE)
info_file_fd.close()
info_file_fd = open(info_file, "a")
# Now retrieve the FASTA sequences in batches of BATCH_SIZE
fasta_file_fd = open(fasta_file, "w")
for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE):
end = min(nr_seq_query, start + BATCH_SIZE)
print "Dowloading sequence %i to %i" % (start+1, end)
try:
fetch_handle = Entrez.efetch(db=database, rettype="fasta", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key)
data = fetch_handle.read()
except Exception, e:
info_file_fd = open(info_file, "a")
info_file_fd.write( "Error raised when trying do an Entrez.efind: %s\n" % str(e))
info_file_fd.write( "Retrying...")
info_file_fd.close()
# sys.exit( "Error raised! Exiting now!") # do not exit anymore
fetch_handle.close()
fasta_file_fd.write(data)
info_file_fd = open(info_file, "a")
info_file_fd.write('Downloaded sequence %i to %i\n' % (start+1, end))
info_file_fd.close()
fasta_file_fd.close()
if __name__ == "__main__":
sys.exit(main())
| 2.71875 | 3 |
__init__.py | karttur/geoimagine02-ancillary | 0 | 12797155 | """
ancillary
==========================================
Package belonging to Karttur´s GeoImagine Framework.
Author
------
<NAME> (<EMAIL>)
"""
from .version import __version__, VERSION, metadataD
from .ancillary import ProcessAncillary
from .searchjson import SearchJsonTandemX, UnZipJsonTandemX
__all__ = ['ProcessAncillary'] | 0.96875 | 1 |
examples/fractal.py | Lnk2past/copperhead | 12 | 12797156 | <filename>examples/fractal.py
import sys
import copperhead as cpp
extra_compile_args = "'/std:c++14'" if sys.version.split('[')[1].startswith('MSC') else "'-std=c++14'"
config = {
'extra_compile_args': extra_compile_args
}
mandelbrot_cpp = r'''
#include <cmath>
#include <complex>
#include <fstream>
inline int mandelbrot(const std::complex<double> &c)
{
const int max_iter {100};
int i {0};
std::complex<double> z {0.0, 0.0};
while (std::abs(z) < 2.0 && i < max_iter)
{
z = z * z + c;
++i;
}
return max_iter - i;
}
void compute(std::string filename, double x, double y, double h)
{
const auto n {std::lround(2.5 / h)};
std::ofstream f(filename);
for (long yidx {0}; yidx < n; ++yidx)
{
for (long xidx {0}; xidx < n; ++xidx)
{
f << mandelbrot(std::complex<double>(x, y)) << " ";
x += h;
}
f << "\n";
y -= h;
x -= 2.5;
}
}'''
compute = cpp.generate('compute', 'void(std::string, double, double, double)', mandelbrot_cpp, config=config)
compute('fractal.dat', -2.0, 1.25, 0.005)
| 2.859375 | 3 |
src/data_analysis/background_subtractors.py | otimgren/centrex-data-analysis | 0 | 12797157 | """
Objects used for subtracting background from camera images
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import pandas as pd
class BackgroundSubtractor:
"""
Subtracts background from an image
"""
@abstractmethod
def subtract_background(self, image: np.ndarray) -> np.ndarray:
"""
Subtracts background from an image
"""
...
class AcquiredBackgroundSubtractor(BackgroundSubtractor):
"""
Subtracts a background that is based on multiple images stored in an hdf dataset.
"""
def __init__(self, background_dset: pd.DataFrame) -> None:
super().__init__()
self.background_dset = background_dset
# Calculate background image
self.calculate_mean_background(background_dset)
def subtract_background(self, image: np.ndarray) -> np.ndarray:
# Subtract background
image_bs = image - self.mean_background
# Return background subtracted image
return image_bs
def calculate_mean_background(self, df: pd.DataFrame) -> None:
"""
Calculates the mean of the background images
"""
data = self.background_dset["CameraData"]
self.mean_background = np.nanmean(np.array(list(data)), axis=0)
class FittedBackgroundSubtractor(BackgroundSubtractor):
"""
Subtracts a background based on fit around the image.
"""
# todo
| 3.5 | 4 |
bokeh/plotting.py | csaid/bokeh | 1 | 12797158 | <reponame>csaid/bokeh
from __future__ import print_function
from functools import wraps
import itertools
import time
import logging
import os
import uuid
import warnings
from . import browserlib
from . import _glyph_functions as gf
from .document import Document
from .embed import notebook_div, file_html, autoload_server
from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend
from .palettes import brewer
from .plotting_helpers import (
get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat
)
from .resources import Resources
from .session import Cloud, DEFAULT_SERVER_URL, Session
logger = logging.getLogger(__name__)
_default_document = Document()
_default_session = None
_default_file = None
_default_notebook = None
def curdoc():
''' Return the current document.
Returns:
doc : the current default document object.
'''
try:
"""This is used when we need to call the plotting API from within
the server, within a request context. (Applets do this for example)
in this case you still want the API to work but you don't want
to use the global module level document
"""
from flask import request
doc = request.bokeh_server_document
logger.debug("returning config from flask request")
return doc
except (ImportError, RuntimeError, AttributeError):
return _default_document
def curplot():
''' Return the current default plot object.
Returns:
plot : the current default plot (or None)
'''
return curdoc().curplot()
def cursession():
''' Return the current session, if there is one.
Returns:
session : the current default session object (or None)
'''
return _default_session
def hold(value=True):
''' Set or clear the plot hold status on the current document.
This is a convenience function that acts on the current document, and is equivalent to curdoc().hold(...)
Args:
value (bool, optional) : whether hold should be turned on or off (default: True)
Returns:
None
'''
curdoc().hold(value)
def figure(**kwargs):
''' Activate a new figure for plotting.
All subsequent plotting operations will affect the new figure.
This function accepts all plot style keyword parameters.
Returns:
None
'''
curdoc().figure(**kwargs)
def output_server(docname, session=None, url="default", name=None):
""" Cause plotting commands to automatically persist plots to a Bokeh server.
Can use explicitly provided Session for persistence, or the default
session.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If session is None, use the default session
url (str, optianal) : URL of the Bokeh server (default: "default")
if url is "default" use session.DEFAULT_SERVER_URL
name (str, optional) :
if name is None, use the server URL as the name
Additional keyword arguments like **username**, **userapikey**,
and **base_url** can also be supplied.
Returns:
None
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
global _default_session
if url == "default":
url = DEFAULT_SERVER_URL
if name is None:
name = url
if not session:
if not _default_session:
_default_session = Session(name=name, root_url=url)
session = _default_session
session.use_doc(docname)
session.load_document(curdoc())
def output_cloud(docname):
""" Cause plotting commands to automatically persist plots to the Bokeh
cloud server.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
output_server(docname, session=Cloud())
def output_notebook(url=None, docname=None, session=None, name=None):
if session or url or name:
if docname is None:
docname = "IPython Session at %s" % time.ctime()
output_server(docname, url=url, session=session, name=name)
else:
from . import load_notebook
load_notebook()
global _default_notebook
_default_notebook = True
def output_file(filename, title="Bokeh Plot", autosave=True, mode="inline", root_dir=None):
""" Outputs to a static HTML file.
.. note:: This file will be overwritten each time show() or save() is invoked.
Args:
autosave (bool, optional) : whether to automatically save (default: True)
If **autosave** is True, then every time plot() or one of the other
visual functions is called, this causes the file to be saved. If it
is False, then the file is only saved upon calling show().
mode (str, optional) : how to inlude BokehJS (default: "inline")
**mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'.
In the 'relative(-dev)' case, **root_dir** can be specified to indicate the
base directory from which the path to the various static files should be
computed.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
"""
global _default_file
_default_file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir, minified=False),
'autosave' : autosave,
'title' : title,
}
if os.path.isfile(filename):
print("Session output file '%s' already exists, will be overwritten." % filename)
def show(browser=None, new="tab", url=None):
""" 'shows' the current plot, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". (See the webbrowser module documentation in the
standard lib for more details.)
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
"""
filename = _default_file['filename'] if _default_file else None
session = cursession()
notebook = _default_notebook
# Map our string argument to the webbrowser.open argument
new_param = {'tab': 2, 'window': 1}[new]
controller = browserlib.get_browser_controller(browser=browser)
plot = curplot()
if not plot:
warnings.warn("No current plot to show. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
if notebook and session:
import IPython.core.displaypub as displaypub
push(session=session)
snippet = autoload_server(plot, cursession())
displaypub.publish_display_data('bokeh', {'text/html': snippet})
elif notebook:
import IPython.core.displaypub as displaypub
displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)})
elif session:
push()
if url:
controller.open(url, new=new_param)
else:
controller.open(session.object_link(curdoc()._plotcontext))
elif filename:
save(filename)
controller.open("file://" + os.path.abspath(filename), new=new_param)
def save(filename=None, resources=None):
""" Updates the file with the data for the current document.
If a filename is supplied, or output_file(...) has been called, this will
save the plot to the given filename.
Args:
filename (str, optional) : filename to save document under (default: None)
if `filename` is None, the current output_file(...) filename is used if present
resources (Resources, optional) : BokehJS resource config to use
if `resources` is None, the current default resource config is used
Returns:
None
"""
if filename is None and _default_file:
filename = _default_file['filename']
if resources is None and _default_file:
resources = _default_file['resources']
if not filename:
warnings.warn("save() called but no filename was supplied and output_file(...) was never called, nothing saved")
return
if not resources:
warnings.warn("save() called but no resources was supplied and output_file(...) was never called, nothing saved")
return
if not curplot():
warnings.warn("No current plot to save. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
html = file_html(curdoc(), resources, _default_file['title'])
with open(filename, "w") as f:
f.write(html)
def push(session=None, document=None):
""" Updates the server with the data for the current document.
Args:
session (Sesion, optional) : filename to save document under (default: None)
if `sessiokn` is None, the current output_server(...) session is used if present
document (Document, optional) : BokehJS document to push
if `document` is None, the current default document is pushed
Returns:
None
"""
if not session:
session = cursession()
if not document:
document = curdoc()
if session:
return session.store_document(curdoc())
else:
warnings.warn("push() called but no session was supplied and output_server(...) was never called, nothing pushed")
def _doc_wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
retval = func(curdoc(), *args, **kwargs)
if cursession() and curdoc()._autostore:
push()
if _default_file and _default_file['autosave']:
save()
return retval
wrapper.__doc__ += "\nThis is a convenience function that acts on the current document, and is equivalent to curdoc().%s(...)" % func.__name__
return wrapper
annular_wedge = _doc_wrap(gf.annular_wedge)
annulus = _doc_wrap(gf.annulus)
arc = _doc_wrap(gf.arc)
asterisk = _doc_wrap(gf.asterisk)
bezier = _doc_wrap(gf.bezier)
circle = _doc_wrap(gf.circle)
circle_cross = _doc_wrap(gf.circle_cross)
circle_x = _doc_wrap(gf.circle_x)
cross = _doc_wrap(gf.cross)
diamond = _doc_wrap(gf.diamond)
diamond_cross = _doc_wrap(gf.diamond_cross)
image = _doc_wrap(gf.image)
image_rgba = _doc_wrap(gf.image_rgba)
image_url = _doc_wrap(gf.image_url)
inverted_triangle = _doc_wrap(gf.inverted_triangle)
line = _doc_wrap(gf.line)
multi_line = _doc_wrap(gf.multi_line)
oval = _doc_wrap(gf.oval)
patch = _doc_wrap(gf.patch)
patches = _doc_wrap(gf.patches)
quad = _doc_wrap(gf.quad)
quadratic = _doc_wrap(gf.quadratic)
ray = _doc_wrap(gf.ray)
rect = _doc_wrap(gf.rect)
segment = _doc_wrap(gf.segment)
square = _doc_wrap(gf.square)
square_cross = _doc_wrap(gf.square_cross)
square_x = _doc_wrap(gf.square_x)
text = _doc_wrap(gf.text)
triangle = _doc_wrap(gf.triangle)
wedge = _doc_wrap(gf.wedge)
x = _doc_wrap(gf.x)
_marker_types = {
"asterisk": asterisk,
"circle": circle,
"circle_cross": circle_cross,
"circle_x": circle_x,
"cross": cross,
"diamond": diamond,
"diamond_cross": diamond_cross,
"inverted_triangle": inverted_triangle,
"square": square,
"square_x": square_x,
"square_cross": square_cross,
"triangle": triangle,
"x": x,
"*": asterisk,
"+": cross,
"o": circle,
"ox": circle_x,
"o+": circle_cross,
}
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print(list(sorted(_marker_types.keys())))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
def scatter(*args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
*args : The data to plot. Can be of several forms:
(X, Y)
Two 1D arrays or iterables
(XNAME, YNAME)
Two bokeh DataSource/ColumnsRef
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are
also accepted as keyword parameters.
Examples:
>>> scatter([1,2,3],[4,5,6], fill_color="red")
>>> scatter("data1", "data2", source=data_source, ...)
"""
ds = kwargs.get("source", None)
names, datasource = _handle_1d_data_args(args, datasource=ds)
kwargs["source"] = datasource
markertype = kwargs.get("marker", "circle")
# TODO: How to handle this? Just call curplot()?
if not len(_color_fields.intersection(set(kwargs.keys()))):
kwargs['color'] = get_default_color()
if not len(_alpha_fields.intersection(set(kwargs.keys()))):
kwargs['alpha'] = get_default_alpha()
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
return _marker_types[markertype](*args, **kwargs)
def gridplot(plot_arrangement, name=None):
""" Generate a plot that arranges several subplots into a grid.
Args:
plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid
name (str) : name for this plot
.. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]]
Returns:
grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>`
"""
grid = GridPlot(children=plot_arrangement)
if name:
grid._id = name
# Walk the plot_arrangement and remove them from the plotcontext,
# so they don't show up twice
subplots = itertools.chain.from_iterable(plot_arrangement)
curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots))
curdoc().add(grid)
curdoc()._current_plot = grid # TODO (bev) don't use private attrs
if _default_session:
push()
if _default_file and _default_file['autosave']:
save()
return grid
def xaxis():
""" Get the current axis objects
Returns:
Returns axis object or splattable list of axis objects on the current plot
"""
p = curplot()
if p is None:
return None
axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0]
return _list_attr_splat(axis)
def yaxis():
""" Get the current `y` axis object(s)
Returns:
Returns y-axis object or splattable list of y-axis objects on the current plot
"""
p = curplot()
if p is None:
return None
axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1]
return _list_attr_splat(axis)
def axis():
""" Get the current `x` axis object(s)
Returns:
Returns x-axis object or splattable list of x-axis objects on the current plot
"""
return _list_attr_splat(xaxis() + yaxis())
def legend():
""" Get the current :class:`legend <bokeh.objects.Legend>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
legends = [obj for obj in p.renderers if isinstance(obj, Legend)]
return _list_attr_splat(legends)
def xgrid():
""" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0]
return _list_attr_splat(grid)
def ygrid():
""" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns y-grid object or splattable list of y-grid objects on the current plot
"""
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1]
return _list_attr_splat(grid)
def grid():
""" Get the current :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns grid object or splattable list of grid objects on the current plot
"""
return _list_attr_splat(xgrid() + ygrid())
def load_object(obj):
"""updates object from the server
"""
cursession().load_object(obj, curdoc())
| 2.671875 | 3 |
auton_survival/__init__.py | PotosnakW/auton-survival | 15 | 12797159 | <reponame>PotosnakW/auton-survival
r'''
[](https://travis-ci.org/autonlab/DeepSurvivalMachines)
[](https://codecov.io/gh/autonlab/DeepSurvivalMachines)
[](https://opensource.org/licenses/MIT)
[](https://github.com/autonlab/auton-survival)
<img align=right style="align:right;" src="https://ndownloader.figshare.com/files/34052981" width=30%>
<br>
The `auton-survival` Package
---------------------------
The python package `auton-survival` is repository of reusable utilities for projects
involving censored Time-to-Event Data. `auton-survival` provides a flexible APIs
allowing rapid experimentation including dataset preprocessing, regression,
counterfactual estimation, clustering and phenotyping and propensity adjusted evaluation.
**For complete details on** `auton-survival` **see**:
<h3>• <a href="https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf">White Paper</a> • <a href="https://autonlab.github.io/auton-survival/">Documentation</a> • <a href="https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/">Demo Notebooks</a></h3>
What is Survival Analysis?
--------------------------
**Survival Analysis** involves estimating when an event of interest, \( T \)
would take places given some features or covariates \( X \). In statistics
and ML these scenarious are modelled as regression to estimate the conditional
survival distribution, \( \mathbb{P}(T>t|X) \). As compared to typical
regression problems, Survival Analysis differs in two major ways:
* The Event distribution, \( T \) has positive support ie.
\( T \in [0, \infty) \).
* There is presence of censoring ie. a large number of instances of data are
lost to follow up.
Survival Regression
-------------------
#### `auton_survival.models`
Training a Deep Cox Proportional Hazards Model with `auton-survival`
```python
from auton_survival import datasets, preprocessing, models
# Load the SUPPORT Dataset
outcomes, features = datasets.load_dataset("SUPPORT")
# Preprocess (Impute and Scale) the features
features = preprocessing.Preprocessor().fit_transform(features)
# Train a Deep Cox Proportional Hazards (DCPH) model
model = models.cph.DeepCoxPH(layers=[100])
model.fit(features, outcomes.time, outcomes.event)
# Predict risk at specific time horizons.
predictions = model.predict_risk(features, t=[8, 12, 16])
```
#### `auton_survival.estimators`
This module provides a wrapper `auton_survival.estimators.SurvivalModel` to model
survival datasets with standard survival (time-to-event) analysis methods.
The use of the wrapper allows a simple standard interface for multiple different
survival regression methods.
`auton_survival.estimators` also provides convenient wrappers around other popular
python survival analysis packages to experiment with Random Survival Forests and
Weibull Accelerated Failure Time regression models.
```python
from auton_survival import estimators
# Train a Deep Survival Machines model using the SurvivalModel class.
model = estimators.SurvivalModel(model='dsm')
model.fit(features, outcomes)
# Predict risk at time horizons.
predictions = model.predict_risk(features, times=[8, 12, 16])
```
#### `auton_survival.experiments`
Modules to perform standard survival analysis experiments. This module
provides a top-level interface to run `auton-survival` style experiments
of survival analysis, involving cross-validation style experiments with
multiple different survival analysis models
```python
# auton-survival Style Cross Validation Experiment.
from auton_survival.experiments import SurvivalRegressionCV
# Define the Hyperparameter grid to perform Cross Validation
hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth' : [3, 5],
'max_features' : ['sqrt', 'log2']}
# Train a RSF model with cross-validation using the SurvivalRegressionCV class
model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid)
model.fit(features, outcomes)
```
Phenotyping and Knowledge Discovery
-----------------------------------
#### `auton_survival.phenotyping`
`auton_survival.phenotyping` allows extraction of latent clusters or subgroups
of patients that demonstrate similar outcomes. In the context of this package,
we refer to this task as **phenotyping**. `auton_survival.phenotyping` allows:
- **Unsupervised Phenotyping**: Involves first performing dimensionality
reduction on the inpute covariates \( x \) followed by the use of a clustering
algorithm on this representation.
```python
from auton_survival.phenotyping import ClusteringPhenotyper
# Dimensionality reduction using Principal Component Analysis (PCA) to 8 dimensions.
dim_red_method, = 'pca', 8
# We use a Gaussian Mixture Model (GMM) with 3 components and diagonal covariance.
clustering_method, n_clusters = 'gmm', 3
# Initialize the phenotyper with the above hyperparameters.
phenotyper = ClusteringPhenotyper(clustering_method=clustering_method,
dim_red_method=dim_red_method,
n_components=n_components,
n_clusters=n_clusters)
# Fit and infer the phenogroups.
phenotypes = phenotyper.fit_phenotype(features)
# Plot the phenogroup specific Kaplan-Meier survival estimate.
auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes)
```
- **Factual Phenotyping**: Involves the use of structured latent variable
models, `auton_survival.models.dcm.DeepCoxMixtures` or
`auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that
demonstrate differential observed survival rates.
- **Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate
heterogenous treatment effects. That is, the learnt phenogroups have differential
response to a specific intervention. Relies on the specially designed
`auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model.
Dataset Loading and Preprocessing
---------------------------------
Helper functions to load and prerocsss various time-to-event data like the
popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival analysis.
#### `auton_survival.datasets`
```python
# Load the SUPPORT Dataset
from auton_survival import dataset
features, outcomes = datasets.load_dataset('SUPPORT')
```
#### `auton_survival.preprocessing`
This module provides a flexible API to perform imputation and data
normalization for downstream machine learning models. The module has
3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor`
class is a composite transform that does both Imputing ***and*** Scaling with
a single function call.
```python
# Preprocessing loaded Datasets
from auton_survival import datasets
features, outcomes = datasets.load_topcat()
from auton_survival.preprocessing import Preprocessing
features = Preprocessor().fit_transform(features,
cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'],
num_feats=['height', 'weight'])
# The `cat_feats` and `num_feats` lists would contain all the categorical and
# numerical features in the dataset.
```
Evaluation and Reporting
-------------------------
#### `auton_survival.metrics`
Helper functions to generate standard reports for common Survival Analysis tasks.
Citing and References
----------------------
Please cite the following if you use `auton-survival`:
[auton-survival: an Open-Source Package for Regression,
Counterfactual Estimation, Evaluation and Phenotyping
with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a>
```
@article{nagpal2022autonsurvival,
url = {https://arxiv.org/abs/2204.07276},
author = {<NAME> <NAME> <NAME>},
title = {auton-survival: an Open-Source Package for Regression,
Counterfactual Estimation, Evaluation and Phenotyping with
Censored Time-to-Event Data},
publisher = {arXiv},
year = {2022},
}
```
Additionally, models and methods in `auton_survival` come from the following papers.
Please cite the individual papers if you employ them in your research:
[1] [Deep Survival Machines:
Fully Parametric Survival Regression and
Representation Learning for Censored Data with Competing Risks."
IEEE Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a>
```
@article{nagpal2021dsm,
title={Deep survival machines: Fully parametric survival regression and representation learning for censored data with competing risks},
author={<NAME> and <NAME> <NAME>},
journal={IEEE Journal of Biomedical and Health Informatics},
volume={25},
number={8},
pages={3163--3175},
year={2021},
publisher={IEEE}
}
```
[2] [Deep Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI
Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a>
```
@InProceedings{pmlr-v146-nagpal21a,
title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates},
author={<NAME> <NAME> <NAME>},
booktitle={Proceedings of AAAI Spring Symposium on Survival Prediction - Algorithms, Challenges, and Applications 2021},
series={Proceedings of Machine Learning Research},
publisher={PMLR},
}
```
[3] [Deep Cox Mixtures for Survival Regression. Conference on Machine Learning for
Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a>
```
@inproceedings{nagpal2021dcm,
title={Deep Cox mixtures for survival regression},
author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>},
booktitle={Machine Learning for Healthcare Conference},
pages={674--708},
year={2021},
organization={PMLR}
}
```
[4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a>
```
@article{nagpal2022counterfactual,
title={Counterfactual Phenotyping with Censored Time-to-Events},
author={<NAME> <NAME> <NAME> <NAME>},
journal={arXiv preprint arXiv:2202.11089},
year={2022}
}
```
## Installation
```console
foo@bar:~$ git clone https://github.com/autonlab/auton_survival
foo@bar:~$ pip install -r requirements.txt
```
Compatibility
-------------
`auton-survival` requires `python` 3.5+ and `pytorch` 1.1+.
To evaluate performance using standard metrics
`auton-survival` requires `scikit-survival`.
Contributing
------------
`auton-survival` is [on GitHub]. Bug reports and pull requests are welcome.
[on GitHub]: https://github.com/autonlab/auton-survival
License
-------
MIT License
Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
<img align="right" height ="120px" src="https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg">
<img align="right" height ="110px" src="https://www.cs.cmu.edu/~chiragn/auton_logo.png">
<br><br><br><br><br>
'''
__version__ = "0.1.0"
from .models.dsm import DeepSurvivalMachines
from .models.dcm import DeepCoxMixtures
from .models.cph import DeepCoxPH, DeepRecurrentCoxPH
from .models.cmhe import DeepCoxMixturesHeterogenousEffects
| 1.367188 | 1 |
src/packModules/filewrite.py | PauloHenriqueRCS/InterPy | 0 | 12797160 | <filename>src/packModules/filewrite.py
def filewrite(outcontent,filename):
try:
filecontent = open("outFiles/outcontent.txt", mode="a", encoding="utf-8")
filecontent.write("\n\n\n=========={}==========\n".format(filename))
filecontent.write("\n".join(str(el) for el in outcontent))
except IOError as identifier:
print(str(identifier))
finally:
filecontent.close()
| 3.203125 | 3 |
api/models.py | szczesnymateo/words-world-api | 0 | 12797161 | from fractions import Fraction
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
@receiver(post_save, sender=User)
def create_blank_statistics(sender, instance=None, created=False, **kwargs):
if created:
Statistic.objects.create(user=instance)
class Language(models.Model):
name = models.CharField(max_length=32)
users = models.ManyToManyField(User, related_name='selected_languages', blank=True)
language_code = models.CharField(max_length=32, null=True, blank=True)
def __str__(self):
return self.name
def __eq__(self, other):
if isinstance(other, Language):
return self.name == other.name
else:
return False
class Achievement(models.Model):
LEVEL_CHOICES = (
("1", "Bronze"),
("2", "Silver"),
("3", "Gold"),
("4", "Diamond"),
)
condition = models.TextField(max_length=2048)
name = models.CharField(max_length=128)
font_awesome_icon = models.TextField(max_length=2048)
users = models.ManyToManyField(User, related_name="achievements", blank=True)
level = models.CharField(max_length=1, choices=LEVEL_CHOICES)
score = models.IntegerField()
def __str__(self):
return str(self.name)
def try_award_to(self, user):
has_achievement = self in user.achievements.all()
if has_achievement:
return False
condition_result = eval(str(self.condition))
if condition_result:
user.achievements.add(self)
return True
else:
return False
class UserFollowing(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following')
following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by')
class Statistic(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics')
correctly_swiped_taboo_cards = models.IntegerField(default=0)
swiped_taboo_cards = models.IntegerField(default=0)
correctly_ans_flashcards = models.IntegerField(default=0)
ans_flashcards = models.IntegerField(default=0)
translated_words = models.IntegerField(default=0)
@property
def taboo_efficiency(self):
if self.swiped_taboo_cards is not 0:
return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2)
else:
return 0
class TabooCard(models.Model):
key_word = models.CharField(max_length=128)
black_list = models.CharField(max_length=2048)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards')
language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards')
times_shown = models.IntegerField(default=0)
answered_correctly = models.IntegerField(default=0)
@property
def difficulty(self):
if self.times_shown is 0:
return "NOT ENOUGH STATS"
ratio = Fraction(self.answered_correctly, self.times_shown)
if 0 <= ratio < 0.25:
return "INSANE"
elif 0.25 <= ratio < 0.5:
return "HARD"
elif 0.5 <= ratio < 0.75:
return "MEDIUM"
elif 0.75 <= ratio:
return "EASY"
@property
def card_efficiency(self):
if self.times_shown is not 0:
return round(Fraction(self.answered_correctly, self.times_shown), 2)
else:
return 0
def __str__(self):
return str(self.pk) + ' | ' + str(self.key_word) + ' | ' + str(self.language.language_code)
@receiver(post_save, sender=Statistic)
@receiver(post_save, sender=UserFollowing)
@receiver(post_save, sender=User)
def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs):
if isinstance(instance, User):
if not created:
grant_achievements(instance)
else:
grant_achievements(instance.user)
def grant_achievements(user):
for achievement in Achievement.objects.all():
achievement.try_award_to(user)
| 2.09375 | 2 |
heterocl/compute_api.py | Kins1ley/tvm | 0 | 12797162 | <filename>heterocl/compute_api.py
"""Compute APIs in HeteroCL"""
#pylint: disable=no-member, redefined-builtin, too-many-arguments, missing-docstring
import numbers
from collections import OrderedDict
from tvm import expr_hcl as _expr, stmt as _stmt
from tvm.tir import IterVar as _IterVar
from util import get_index, get_name, make_for, CastRemover
from tensor import Scalar, Tensor, TensorSlice
from schedule import Stage
from debug import APIError
from module import Module
##############################################################################
# Helper classes and functions
##############################################################################
class ReplaceReturn(CastRemover):
"""Replace all Return statement with a Store statement.
Attributes
----------
buffer_var : Var
The buffer variable of the Store statement
dtype : Type
The data type of the Store statement
index : Expr
The index of the Store statement
"""
def __init__(self, buffer_var, dtype, index):
self.buffer_var = buffer_var
self.dtype = dtype
self.index = index
def mutate_KerenlDef(self, node):
"""Omit the KernelDef statement
We do not need to replace the Return statement inside.
"""
#pylint: disable=no-self-use
return node
def mutate_Return(self, node):
"""Replace the Return statement with a Store statement
"""
return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index)
def process_fcompute(fcompute, shape):
"""Pre-process the fcompute field of an API.
"""
# check API correctness
if not callable(fcompute):
raise APIError("The construction rule must be callable")
# prepare the iteration variables
args = [] # list of arguments' names
nargs = 0 # number of arguments
if isinstance(fcompute, Module):
args = fcompute.arg_names
nargs = len(args)
else:
args = list(fcompute.__code__.co_varnames)
nargs = fcompute.__code__.co_argcount
# automatically create argument names
if nargs < len(shape):
for i in range(nargs, len(shape)):
args.append("args" + str(i))
elif nargs > len(shape):
raise APIError("The number of arguments exceeds the number of dimensions")
return args, len(shape)
def compute_body(name,
lambda_ivs,
fcompute,
shape=(),
dtype=None,
tensor=None,
attrs=OrderedDict()):
"""Create a stage and perform the computation.
If `tensor` is `None`, no tensor is returned.
Parameters
----------
name : str
The name of the stage
lambda_ivs : list of IterVar
A list contains the iteration variables in the lambda function if
exists
fcompute : callable
The computation rule
shape : tuple, optional
The output shape or the iteration domain
dtype : Type, optional
The data type of the output/updated tensor
tensor : Tensor, optional
The tensor to be updated. Create a new one if it is `None`
Returns
-------
Tensor or None
"""
var_list = [i.var for i in lambda_ivs]
return_tensor = True if tensor is None else False
with Stage(name, dtype, shape) as stage:
if not return_tensor:
stage.input_stages.add(tensor.last_update)
else:
tensor = Tensor(shape, stage._dtype, name, stage._buf)
buffer_var = tensor._buf.data
dtype = tensor.dtype
shape = tensor.shape
stage.stmt_stack.append([])
ret = fcompute(*var_list)
print(dir(ret))
print(dir(ret.a))
print(dir(ret.b))
stage.lhs_tensors.add(tensor)
for t in stage.lhs_tensors:
t.last_update = stage
stmt = None
if ret is None:
# replace all hcl.return_ with Store stmt
indices = lambda_ivs
index, _, _ = get_index(shape, indices, 0)
stmt = stage.pop_stmt()
stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt)
stmt = make_for(indices, stmt, 0)
elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)):
indices = lambda_ivs
index, _, _ = get_index(shape, indices, 0)
stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index))
stmt = make_for(indices, stage.pop_stmt(), 0)
elif isinstance(ret, Tensor): # reduction
ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+"_i" + str(i), 0)
for i in range(0, len(ret.shape))]
non_reduce_ivs = []
indices = []
rid = 0
for iv in lambda_ivs:
if iv.var.name[0] == "_":
indices.append(ret_ivs[rid])
rid += 1
else:
indices.append(iv)
non_reduce_ivs.append(iv)
if rid != len(ret.shape):
raise APIError("Incorrect number of reduction axes in lambda arguments")
index, _, _ = get_index(shape, indices, 0)
st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index)
stage.emit(make_for(ret_ivs, st, 0))
stmt = stage.pop_stmt()
stage.input_stages.remove(stage)
if non_reduce_ivs:
stmt = make_for(non_reduce_ivs, stmt, 0)
else:
raise APIError("Unknown return type of the computation rule")
# add attributes to the loop
if isinstance(stmt, _stmt.For):
stmt = _make.For(stmt.loop_var,
stmt.min, stmt.extent,
0, 0, stmt.body,
list(attrs.keys()),
list(attrs.values()))
stage.emit(stmt)
stage.axis_list = indices + stage.axis_list
if return_tensor:
tensor._tensor = stage._op
return tensor
return None
##############################################################################
# APIs exposed to users
##############################################################################
def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()):
"""Construct a new tensor based on the shape and the compute function.
The API **returns a new tensor**. The shape must be a tuple. The number of
elements in the tuple decides the dimension of the returned tensor. The
second field `fcompute` defines the construction rule of the returned
tensor, which must be callable. The number of arguments should match the
dimension defined by `shape`, which *we do not check*. This, however,
provides users more programming flexibility.
The compute function specifies how we calculate each element of the
returned tensor. It can contain other HeteroCL APIs, even imperative DSL.
Parameters
----------
shape : tuple
The shape of the returned tensor
fcompute : callable
The construction rule for the returned tensor
name : str, optional
The name of the returned tensor
dtype : Type, optional
The data type of the placeholder
Returns
-------
Tensor
Examples
--------
.. code-block:: python
# example 1.1 - anonymous lambda function
A = hcl.compute((10, 10), lambda x, y: x+y)
# equivalent code
for x in range(0, 10):
for y in range(0, 10):
A[x][y] = x + y
# example 1.2 - explicit function
def addition(x, y):
return x+y
A = hcl.compute((10, 10), addition)
# example 1.3 - imperative function definition
@hcl.def_([(), ()])
def addition(x, y):
hcl.return_(x+y)
A = hcl.compute((10, 10), addition)
# example 2 - undetermined arguments
def compute_tanh(X):
return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args]))
A = hcl.placeholder((10, 10))
B = hcl.placeholder((10, 10, 10))
tA = compute_tanh(A)
tB = compute_tanh(B)
# example 3 - mixed-paradigm programming
def return_max(x, y):
with hcl.if_(x > y):
hcl.return_(x)
with hcl.else_:
hcl.return_(y)
A = hcl.compute((10, 10), return_max)
"""
# check API correctness
if not isinstance(shape, tuple):
raise APIError("The shape of compute API must be a tuple")
# properties for the returned tensor
shape = CastRemover().mutate(shape)
name = get_name("compute", name)
# prepare the iteration variables
args, nargs = process_fcompute(fcompute, shape)
lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for n in range(0, nargs)]
# call the helper function that returns a new tensor
tensor = compute_body(name, lambda_ivs, fcompute, shape, dtype, attrs=attrs)
return tensor | 2.1875 | 2 |
build/lib/Kronos_heureka_code/Zeit/Uhrzeit/Stunde/Stunde.py | heureka-code/Kronos-heureka-code | 0 | 12797163 | from Kronos_heureka_code.__VergleichsStamm import VergleichsStammZahl
from Kronos_heureka_code.Zeit.Uhrzeit.Stunde.StundeException import *
class Stunde(VergleichsStammZahl):
def __init__(self, stunde: int):
self.__stunde: [int, None] = None
self.stunde = stunde
pass
def __repr__(self):
return f"<Stunde {self.__str__()}>"
def __str__(self):
return str(self.stunde).zfill(2) if self.stunde else "00"
def __int__(self):
return self.stunde
@property
def stunde(self):
return self.__stunde
@stunde.setter
def stunde(self, stunde: int):
if type(stunde) != int:
raise StundeKeineGanzeZahl(stunde)
if stunde < 0:
raise StundeZuKlein(stunde)
if stunde > 60:
raise StundeZuGross(stunde)
self.__stunde = stunde
pass
pass
| 2.453125 | 2 |
toolcall/auth_backend.py | thebjorn/toolcall | 0 | 12797164 | <gh_stars>0
# -*- coding: utf-8 -*-
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
class DKSSOBlindTrustAuthenticator(ModelBackend):
def authenticate(self, username=None, password=<PASSWORD>, **kw):
if not kw.get('sso_login'):
return None
return User.objects.get(username=username)
| 2.140625 | 2 |
src/modules/single.py | szachovy/MLstats | 1 | 12797165 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
from statsmodels import robust
class Singular_description(object):
'''
Display statistics from every numerical column in data set.
Base class for Mutual description instance.
Outcomes are represented from the beggining (after hoover),
in each histogram plot in the page.
Class covers the most general feature statistics used in data analysis.
'''
def __init__(self):
# Handled by cursor in common.py file in `Mutual_description`
self.column = ""
def histogram(self, plot_number):
# Generate histogram and save as a static file
# size and ticks are adjusted with accordance to display size
sns.set_style("whitegrid")
fig, ax = plt.subplots()
fig.set_size_inches(12, 12)
ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k')
fig.patch.set_alpha(0.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi)
# return fig
# plt.show()
def measurement(self):
# call for measurement category of the feature
# possible outcomes are:
# -- quantitive continous
# -- quantitive discrete categorical
# -- quantitive discrete numerical
if self.dataset[self.column].dtypes == 'float64':
for value in self.dataset[self.column].values:
if float(value) != int(value):
return 'quantitive continous'
if len(pd.unique(self.dataset[self.column])) == 2:
return 'quantitive discrete categorical'
else:
return 'quantitive discrete numerical'
def average(self):
# TODO: remove
return np.average(self.dataset[self.column])
def expected_value(self):
# call for expected value from feature distribution
return np.mean(self.dataset[self.column])
def median(self):
# call for median from feature distribution
return np.median(self.dataset[self.column])
def mode(self):
# call for mode from feature distribution
return scipy.stats.mode(self.dataset[self.column])
def standard_deviation(self):
# call for standard deviation from feature distribution
return np.std(self.dataset[self.column])
def absolute_deviation_from_mean(self):
# call for absolute deviation from mean from feature distribution
return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column])))
def absolute_deviation_from_median(self):
# call for mode from feature distribution
return scipy.stats.median_absolute_deviation(self.dataset[self.column])
def quarter_deviation(self):
# call for quarter devaition from feature distribution
q75, q25 = np.percentile(self.dataset[self.column], [75 ,25])
return (q75 - q25)
def coefficient_of_variation(self):
# call for coefficient of variation from feature distribution
return scipy.stats.variation(self.dataset[self.column])
def gini_coefficient(self):
# call for gini coefficient from feature distribution
# TODO: refactorize
mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean()
rmad = mad/np.mean(self.dataset[self.column])
return 0.5 * rmad
def asymmetry_factor(self):
# call for asymmetry factor from feature distribution
return scipy.stats.skew(self.dataset[self.column])
def entropy(self):
# call for entropy from feature distribution
return scipy.stats.entropy(self.dataset[self.column])
| 3.078125 | 3 |
paragraph_encoder/train_para_encoder.py | rajarshd/Multi-Step-Reasoning | 122 | 12797166 | <filename>paragraph_encoder/train_para_encoder.py
import torch
import numpy as np
import json
import os
import pickle
import sys
import logging
import shutil
from tqdm import tqdm
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data.sampler import RandomSampler
import config
from model import utils, data, vector
from model.retriever import LSTMRetriever
from multi_corpus import MultiCorpus
from torch.utils.data.sampler import SequentialSampler, RandomSampler
import math
logger = logging.getLogger()
global_timer = utils.Timer()
stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0}
def make_data_loader(args, corpus, train_time=False):
dataset = data.MultiCorpusDataset(
args,
corpus,
args.word_dict,
args.feature_dict,
single_answer=False,
para_mode=args.para_mode,
train_time=train_time
)
sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=args.data_workers,
collate_fn=vector.batchify(args, args.para_mode, train_time=train_time),
pin_memory=True
)
return loader
def init_from_checkpoint(args):
logger.info('Loading model from saved checkpoint {}'.format(args.pretrained))
model = torch.load(args.pretrained)
word_dict = model['word_dict']
feature_dict = model['feature_dict']
args.vocab_size = len(word_dict)
args.embedding_dim_orig = args.embedding_dim
args.word_dict = word_dict
args.feature_dict = feature_dict
ret = LSTMRetriever(args, word_dict, feature_dict)
# load saved param values
ret.model.load_state_dict(model['state_dict']['para_clf'])
optimizer = None
parameters = ret.get_trainable_params()
if args.optimizer == 'sgd':
optimizer = optim.SGD(parameters, args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optimizer == 'adamax':
optimizer = optim.Adamax(parameters,
weight_decay=args.weight_decay)
elif args.optimizer == 'nag':
optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' % args.optimizer)
optimizer.load_state_dict(model['state_dict']['optimizer'])
logger.info('Model loaded...')
return ret, optimizer, word_dict, feature_dict
def init_from_scratch(args, train_exs):
logger.info('Initializing model from scratch')
word_dict = feature_dict = None
# create or get vocab
word_dict = utils.build_word_dict(args, train_exs)
if word_dict is not None:
args.vocab_size = len(word_dict)
args.embedding_dim_orig = args.embedding_dim
args.word_dict = word_dict
args.feature_dict = feature_dict
ret = LSTMRetriever(args, word_dict, feature_dict)
# --------------------------------------------------------------------------
# TRAIN/VALID LOOP
# --------------------------------------------------------------------------
# train
parameters = ret.get_trainable_params()
optimizer = None
if parameters is not None and len(parameters) > 0:
if args.optimizer == 'sgd':
optimizer = optim.SGD(parameters, args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optimizer == 'adamax':
optimizer = optim.Adamax(parameters,
weight_decay=args.weight_decay)
elif args.optimizer == 'nag':
optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' % args.optimizer)
else:
pass
return ret, optimizer, word_dict, feature_dict
def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None):
args.train_time = True
para_loss = utils.AverageMeter()
ret_model.model.train()
for idx, ex in enumerate(train_loader):
if ex is None:
continue
inputs = [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True))
for e in ex[:]]
ret_input = [*inputs[:4]]
scores, _, _ = ret_model.score_paras(*ret_input)
y_num_occurrences = Variable(ex[-2])
labels = (y_num_occurrences > 0).float()
labels = labels.cuda()
# BCE logits loss
batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels)
optimizer.zero_grad()
batch_para_loss.backward()
torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(),
2.0)
optimizer.step()
para_loss.update(batch_para_loss.data.item())
if math.isnan(para_loss.avg):
import pdb
pdb.set_trace()
if idx % 25 == 0 and idx > 0:
logger.info('Epoch = {} | iter={}/{} | para loss = {:2.4f}'.format(
stats['epoch'],
idx, len(train_loader),
para_loss.avg))
para_loss.reset()
def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True):
total_exs = 0
args.train_time = False
ret_model.model.eval()
accuracy = 0.0
for idx, ex in enumerate(tqdm(dev_loader)):
if ex is None:
raise BrokenPipeError
inputs = [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True))
for e in ex[:]]
ret_input = [*inputs[:4]]
total_exs += ex[0].size(0)
scores, _, _ = ret_model.score_paras(*ret_input)
scores = F.sigmoid(scores)
y_num_occurrences = Variable(ex[-2])
labels = (y_num_occurrences > 0).float()
labels = labels.data.numpy()
scores = scores.cpu().data.numpy()
scores = scores.reshape((-1))
if save_scores:
for i, pid in enumerate(ex[-1]):
corpus.paragraphs[pid].model_score = scores[i]
scores = scores > 0.5
a = scores == labels
accuracy += a.sum()
logger.info('Eval accuracy = {} '.format(accuracy/total_exs))
top1 = get_topk(corpus)
return top1
def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False):
all_question_vectors = []
all_para_vectors = []
qid2idx = {}
cum_num_lens = []
all_correct_ans = {}
cum_num_len = 0
for question_i, qid in enumerate(corpus.questions):
labels = []
all_question_vectors.append(question_vectors[qid])
qid2idx[qid] = question_i
cum_num_len += len(corpus.questions[qid].pids)
cum_num_lens.append(cum_num_len)
for para_i, pid in enumerate(corpus.questions[qid].pids):
if corpus.paragraphs[pid].ans_occurance > 0:
labels.append(para_i)
all_para_vectors.append(para_vectors[pid])
all_correct_ans[qid] = labels
all_para_vectors = np.stack(all_para_vectors)
all_question_vectors = np.stack(all_question_vectors)
assert all_para_vectors.shape[0] == cum_num_lens[-1]
assert all_question_vectors.shape[0] == len(cum_num_lens)
assert all_question_vectors.shape[0] == len(qid2idx)
assert all_question_vectors.shape[0] == len(all_correct_ans)
## saving code
if train:
OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, "train/")
else:
if args.is_test == 0:
OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, "dev/")
else:
OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, "test/")
logger.info("Printing vectors at {}".format(OUT_DIR))
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
else:
shutil.rmtree(OUT_DIR, ignore_errors=True)
os.makedirs(OUT_DIR)
json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w'))
json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w'))
all_cumlen = np.array(cum_num_lens)
np.save(OUT_DIR + "document", all_para_vectors)
np.save(OUT_DIR + "question", all_question_vectors)
np.save(OUT_DIR + "all_cumlen", cum_num_lens)
def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False):
total_exs = 0
args.train_time = False
ret_model.model.eval()
para_vectors = {}
question_vectors = {}
for idx, ex in enumerate(tqdm(data_loader)):
if ex is None:
raise BrokenPipeError
inputs = [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True))
for e in ex[:]]
ret_input = [*inputs[:4]]
total_exs += ex[0].size(0)
scores, doc, ques = ret_model.score_paras(*ret_input)
scores = scores.cpu().data.numpy()
scores = scores.reshape((-1))
if save_scores:
for i, pid in enumerate(ex[-1]):
para_vectors[pid] = doc[i]
for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]):
if qid not in question_vectors:
question_vectors[qid] = ques[i]
for i, pid in enumerate(ex[-1]):
corpus.paragraphs[pid].model_score = scores[i]
get_topk(corpus)
print_vectors(args, para_vectors, question_vectors, corpus, train, test)
def get_topk(corpus):
top1 = 0
top3 = 0
top5 = 0
for qid in corpus.questions:
para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids]
sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True)
if sorted_para_scores[0][1] > 0:
top1 += 1
if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0:
top3 += 1
if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0:
top5 += 1
top1 = top1/len(corpus.questions)
top3 = top3/len(corpus.questions)
top5 = top5/len(corpus.questions)
logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1, top3 ,top5 ))
return top1
def get_topk_tfidf(corpus):
top1 = 0
top3 = 0
top5 = 0
for qid in corpus.questions:
para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in
corpus.questions[qid].pids]
sorted_para_scores = sorted(para_scores, key=lambda x: x[0])
# import pdb
# pdb.set_trace()
if sorted_para_scores[0][1] > 0:
top1 += 1
if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0:
top3 += 1
if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0:
top5 += 1
logger.info(
'top1 = {}, top3 = {}, top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions),
top5 / len(corpus.questions)))
def run_predictions(args, data_loader, model, eval_on_train_set=False):
args.train_time = False
top_1 = 0
top_3 = 0
top_5 = 0
total_num_questions = 0
map_counter = 0
cum_num_lens = []
qid2idx = {}
sum_num_paras = 0
all_correct_answers = {}
for ex_counter, ex in tqdm(enumerate(data_loader)):
ret_input = [*ex]
y_num_occurrences = ex[3]
labels = (y_num_occurrences > 0)
try:
topk_paras, docs, ques = model.return_topk(5,*ret_input)
except RuntimeError:
import pdb
pdb.set_trace()
num_paras = ex[1]
qids = ex[-1]
if args.save_para_clf_output:
docs = docs.cpu().data.numpy()
ques = ques.cpu().data.numpy()
if ex_counter == 0:
documents = docs
questions = ques
else:
documents = np.concatenate([documents, docs])
questions = np.concatenate([questions, ques])
### create map and cum_num_lens
for i, qid in enumerate(qids):
qid2idx[qid] = map_counter
sum_num_paras += num_paras[i]
cum_num_lens.append(sum_num_paras)
all_correct_answers[map_counter] = []
st = sum(num_paras[:i])
for j in range(num_paras[i]):
if labels[st+j] == 1:
all_correct_answers[map_counter].append(j)
### Test case:
assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]])
map_counter += 1
counter = 0
for q_counter, ranked_para_ids in enumerate(topk_paras):
total_num_questions += 1
for i, no_paras in enumerate(ranked_para_ids):
if labels[counter + no_paras ] ==1:
if i <= 4:
top_5 += 1
if i <= 2:
top_3 += 1
if i <= 0:
top_1 += 1
break
counter += num_paras[q_counter]
logger.info('Accuracy of para classifier when evaluated on the annotated dev set.')
logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format(
(top_1 * 1.0 / total_num_questions),
(top_3 * 1.0 / total_num_questions),
(top_5 * 1.0 / total_num_questions)))
## saving code
if args.save_para_clf_output:
if eval_on_train_set:
OUT_DIR = "/iesl/canvas/sdhuliawala/vectors_web/train/"
else:
OUT_DIR = "/iesl/canvas/sdhuliawala/vectors_web/dev/"
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
else:
shutil.rmtree(OUT_DIR, ignore_errors=True)
os.mkdir(OUT_DIR)
#Test cases
assert cum_num_lens[-1] == documents.shape[0]
assert questions.shape[0] == documents.shape[0]
assert len(cum_num_lens) == len(qid2idx)
assert len(cum_num_lens) == len(all_correct_answers)
json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w'))
json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w'))
all_cumlen = np.array(cum_num_lens)
np.save(OUT_DIR + "document", documents)
np.save(OUT_DIR + "question", questions)
np.save(OUT_DIR + "all_cumlen", all_cumlen)
return (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions)
def save(args, model, optimizer, filename, epoch=None):
params = {
'state_dict': {
'para_clf': model.state_dict(),
'optimizer': optimizer.state_dict()
},
'word_dict': args.word_dict,
'feature_dict': args.feature_dict
}
args.word_dict = None
args.feature_dict = None
params['config'] = vars(args)
if epoch:
params['epoch'] = epoch
try:
torch.save(params, filename)
# bad hack for not saving dictionary twice
args.word_dict = params['word_dict']
args.feature_dict = params['feature_dict']
except BaseException:
logger.warn('[ WARN: Saving failed... continuing anyway. ]')
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
def main(args):
# PRINT CONFIG
logger.info('-' * 100)
logger.info('CONFIG:\n%s' % json.dumps(vars(args), indent=4, sort_keys=True))
# small can't test
if args.small == 1:
args.test = 0
if args.small == 1:
args.train_file_name = args.train_file_name + "_small"
args.dev_file_name = args.dev_file_name + "_small"
if args.test == 1:
args.test_file_name = args.test_file_name + "_small"
args.train_file_name = args.train_file_name + ".pkl"
args.dev_file_name = args.dev_file_name + ".pkl"
if args.test == 1:
args.test_file_name = args.test_file_name + ".pkl"
logger.info("Loading pickle files")
fin = open(os.path.join(args.data_dir, args.src, "data", args.domain, args.train_file_name), "rb")
all_train_exs = pickle.load(fin)
fin.close()
fin = open(os.path.join(args.data_dir, args.src, "data", args.domain, args.dev_file_name), "rb")
all_dev_exs = pickle.load(fin)
fin.close()
if args.test == 1:
fin = open(os.path.join(args.data_dir, args.src, "data", args.domain, args.test_file_name), "rb")
all_test_exs = pickle.load(fin)
fin.close()
logger.info("Loading done!")
logger.info("Num train examples {}".format(len(all_train_exs.paragraphs)))
logger.info("Num dev examples {}".format(len(all_dev_exs.paragraphs)))
if args.test == 1:
logger.info("Num test examples {}".format(len(all_test_exs.paragraphs)))
if args.pretrained is None:
ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs)
else:
ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args)
# make data loader
logger.info("Making data loaders...")
if word_dict == None:
args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs))
word_dict = args.word_dict
train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True)
dev_loader = make_data_loader(args, all_dev_exs)
if args.test:
test_loader = make_data_loader(args, all_test_exs)
if args.eval_only:
logger.info("Saving dev paragraph vectors")
save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None)
logger.info("Saving train paragraph vectors")
save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True)
if args.test:
args.is_test = 1
logger.info("Saving test paragraph vectors")
save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None)
else:
get_topk_tfidf(all_dev_exs)
for epoch in range(args.num_epochs):
stats['epoch'] = epoch
train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None)
logger.info('checkpointing model at {}'.format(args.model_file))
## check pointing##
save(args, ret_model.model, optimizer, args.model_file+".ckpt", epoch=stats['epoch'])
logger.info("Evaluating on the full dev set....")
top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None)
if stats['best_acc'] < top1:
stats['best_acc'] = top1
logger.info('Best accuracy {}'.format(stats['best_acc']))
logger.info('Saving model at {}'.format(args.model_file))
logger.info("Logs saved at {}".format(args.log_file))
save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch'])
if __name__ == '__main__':
# MODEL
logger.info('-' * 100)
# Parse cmdline args and setup environment
args = config.get_args()
# Set cuda
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
# Set random state
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
if args.cuda:
torch.cuda.manual_seed(args.random_seed)
# Set logging
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if args.log_file:
if args.checkpoint:
logfile = logging.FileHandler(args.log_file, 'a')
else:
logfile = logging.FileHandler(args.log_file, 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv))
# Run!
main(args) | 2.375 | 2 |
PRS/PRS_extract_phenotypes.py | yochaiedlitz/T2DM_UKB_predictions | 1 | 12797167 | <reponame>yochaiedlitz/T2DM_UKB_predictions
import pandas
import os
import numpy as np
import sys
from pysnptools.snpreader.bed import Bed
import subprocess
cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/'
rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData'
pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes')
#pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes')
pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes')
pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes')
iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt'
PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S'
glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv'
def extract(*args,**kwargs):
known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo',
's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9',
'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs',
'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger',
'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions',
'genotek_only', 'swab_only']
ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger',
'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions']
drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception']
meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts',
'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream',
'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad',
'Cooked green beans', 'Cooked mushrooms', 'Watermelon',
'Grilled cheese', 'Bissli', 'Pullet', 'Hummus',
'Chinese Chicken Noodles', 'Shakshouka', 'Tahini',
'Chicken breast', 'Steak', 'Light Bread',
'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger',
'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles',
'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice',
'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino',
'Low fat Milk', 'Pickled cucumber', 'Soymilk',
'Dates', 'Croissant', 'Biscuit', 'Potato chips',
'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad',
'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers',
'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton',
'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar',
'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice',
'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan',
'Cooked cauliflower', 'Cooked Sweet potato', 'Butter',
'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick',
'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas',
'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds',
'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream',
'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup',
'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum',
'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva',
'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad',
'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant',
'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine',
'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese',
'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake',
'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli',
'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans',
'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies',
'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk',
'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies',
'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs',
'Granola', 'Beet', 'Couscous', 'Beet Salad',
'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee',
'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah',
'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried dates',
'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio',
'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate',
'Turkey Shawarma', 'Olive oil', #u'Parmesan\xc2\xa0cheese',
'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack',
'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish',
'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew',
'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes',
'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice',
'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes',
'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup',
'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese',
'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese',
'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals',
'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese',
'Fried onions', 'Ice cream', 'Cream Cake', 'Green cabbage',
'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese',
'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil',
'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal',
'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade',
'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt',
known_args+= ffq_args
known_args+= drug_args
known_kwargs = ['ratio', 'threshold','taxa']
for arg in args: assert arg in known_args, 'unkown arg: %s'%(arg)
for kwarg in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg)
if ('16s' in args): assert 'dic' not in args, '16s and dic are mutually exclusive'
if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \
'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu'
if 'include_allPNP' in args: assert 'dic' not in args, 'include_allPNP does not support dicotomize bacteria'
if 'IsGenotek' in args: assert 'covars' not in args, 'IsGenotek and covars are mutually exclusive'
if 'otu' in args: assert '16s' in args
if 'dic' in args:
pheno =pandas.read_csv(pheno_fn_bacDic,sep='\t')
pheno.set_index('IID', inplace=True, drop=True)
pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\t')
pheno_nodic.set_index('IID', inplace=True, drop=True)
pheno_s = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']]
pheno_g = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']]
else:
if 'include_allPNP' in args:
pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\t')
else:
pheno =pandas.read_csv(pheno_fn_bac,sep='\t')
pheno.set_index('IID', inplace=True, drop=True)
if 'include_allPNP'in args:
status, output = subprocess.getstatusoutput("cut -f 1 %s -d ' ' | cut -f 1 -d '_'"%os.path.join(rawDataPath,'tmp','dfukim.txt'))
pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\n')])]
if ('16s' in args):
pheno = pheno[[c for c in pheno if c[:2] not in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]]
for taxa_level in ['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']:
df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\t', index_col=0)
df_taxa[df_taxa<1e-3] = 1e-4
df_taxa = np.log10(df_taxa)
pheno = pheno.merge(df_taxa, left_index=True, right_index=True)
pheno_s = pheno[[c for c in pheno.columns if c[:2]=='s_']]
pheno_g = pheno[[c for c in pheno.columns if c[:2]=='g_']]
### for c in pheno:
### if (c[:2] not in ['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print c
alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1)
alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1)
pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan
pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan
pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides']
if 'genotek_only' in args:
pheno = pheno.loc[pheno['IsGenotek']==1]
if 'swab_only' in args:
pheno = pheno.loc[pheno['IsGenotek']==0]
mb_columns = []
if 'taxa' in kwargs:
if kwargs['taxa'][0]=='*':
kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]
elif kwargs['taxa'][1]=='_':
kwargs['taxa']=[kwargs['taxa']]
for taxa in kwargs['taxa']:
taxadf=pheno.filter(regex=(taxa))
mb_columns += taxadf.columns.values.tolist()
if 'all_bac' in args:
args=list(args)+['s','g','f','o','c','p']
if 's' in args:
mb_columns += [c for c in pheno.columns if c[:2]=='s_' ]
if 'g' in args:
mb_columns += [c for c in pheno.columns if c[:2]=='g_' ]
if 'f' in args:
mb_columns += [c for c in pheno.columns if c[:2]=='f_' ]
if 'o' in args:
mb_columns += [c for c in pheno.columns if c[:2]=='o_' ]
if 'c' in args:
mb_columns += [c for c in pheno.columns if c[:2]=='c_' ]
if 'p' in args:
mb_columns += [c for c in pheno.columns if c[:2]=='p_' ]
if 'otu' in args:
mb_columns += [c for c in pheno.columns if c[:4]=='OTU_' ]
if 'no_log' in args:
assert 'dic' not in args, 'dic and no_log are mutually exclusive'
pheno[mb_columns] = 10**pheno[mb_columns]
if 'all_non_bac' in args:
args=list(args)+['covars','blood','glucose','ffq','antropo']
mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']
if 'include_allPNP' not in args or ('PCs') in args:
mb_columns += [c for c in pheno.columns if c[:2]=='PC']
if 'lactose' in args:
mb_columns += ['lactose']
if 'blood' in args:
# mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine',
# 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%',
# 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC',
# 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium',
# 'Triglycerides','TotalProtein','TotalBilirubin','Urea']
mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine',
'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%',
'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC',
'RDW','Sodium','TSH','WBC','LDLCholesterol']
if 'glucose' in args:
mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose',
'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse']
if 'ffq' in args:
mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age']
if 'antropo' in args:
mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist']
if 's_stats_pheno' in args:
s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol']
mb_columns+=s_stats
mb_columns=list(set(mb_columns))
if 'fid' in args:
mb_columns = ['FID']+mb_columns
########################FFQ START#####################
if 'questionnaires' in args:
args=list(args)+ffq_args
mb_columns_extra=[]
if 'activity' in args:
mb_columns_extra += ['Work activity','Physical activity - mins','Physical activity - freq']
if 'activityTypesFreq' in args:
mb_columns_extra += ['T1Activity kind','Type 1 activity - freq','T2Activity kind',
'Type 2 activity - freq','T3Activity kind','Type 3 activity - freq']
if 'bloodType' in args:
mb_columns_extra += ['Blood A','Blood B','Blood RH-']
if 'cereals' in args:
mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge Freq',
'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq',
'Fries Freq', 'Pasta or Flakes Freq']
if 'delivery' in args:
mb_columns_extra += ['C-Section','Home delivery','Was breastfed']
if 'dressSweetners' in args:
mb_columns_extra += ['Oil as an addition for Salads or Stews Freq','Mayonnaise Including Light Freq',
'Thousand Island Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit syrup, Maple syrup Freq',
'White or Brown Sugar Freq', 'Artificial Sweeteners Freq',]
if 'drinks' in args:
mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet Soda Freq',
'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq',
'Green Tea Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq']
if 'fruits' in args:
mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange or Grapefruit Freq', 'Orange or Grapefruit Juice Freq',
'Apple Freq', 'Apricot Fresh or Dry, or Loquat Freq', 'Grapes or Raisins Freq', 'Banana Freq',
'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq',
'Pear Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad Freq']
if 'hunger' in args:
mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger']
if 'legumes' in args:
mb_columns_extra += ['Falafel in Pita Bread Freq', 'Cooked Legumes Freq', 'Processed Meat Free Products Freq']
if 'meatProducts' in args:
mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard Boiled or Soft Freq', 'Schnitzel Turkey or Chicken Freq',
'Chicken or Turkey With Skin Freq', 'Chicken or Turkey Without Skin Freq', 'Sausages Freq',
'Sausages such as Salami Freq', 'Pastrami or Smoked Turkey Breast Freq',
'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab in Pita Bread Freq',
'Falafel in Pita version 2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq',
'Mixed Meat Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or Turkey Dishes Freq',
'Beef or Chicken Soup Freq', 'Internal Organs Freq', 'Fish Cooked, Baked or Grilled Freq', 'Fried Fish Freq',
'Canned Tuna or Tuna Salad Freq', 'Fish (not Tuna) Pickled, Dried, Smoked, Canned Freq']
if 'pastry' in args:
mb_columns_extra += ['Ordinary Bread or Challah Freq', 'Light Bread Freq', 'Wholemeal or Rye Bread Freq', 'Baguette Freq',
'Roll or Bageles Freq', 'Pita Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers Freq',
'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq']
if 'qualityOfLiving' in args:
mb_columns_extra += ['Stress','Sleep quality']
if 'smoking' in args:
mb_columns_extra += ['Currently smokes','Ever smoked']
if 'sweets' in args:
mb_columns_extra += ['Milk or Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes or Cream Cakes Freq',
'Yeast Cakes and Cookies as Rogallach, Croissant or Donut Freq', 'Cake, Torte Cakes, Chocolate Cake Freq',
'Fruit Pie or Cake Freq', 'Coated or Stuffed Cookies, Waffles or Biscuits Freq',
'Simple Cookies or Biscuits Freq', 'Ice Cream or Popsicle which contains Dairy Freq',
'Popsicle Without Dairy Freq', 'Black or White Grains, Watermelon Seeds Freq',
'Nuts, almonds, pistachios Freq','Peanuts Freq']
if 'vegetables' in args:
mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red Pepper Freq', 'Green Pepper Freq',
'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans or Okra Cooked Freq',
'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or Red Cabbage Freq',
'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice Freq', 'Corn Freq',
'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion Freq',
'Fresh Vegetable Salad Without Dressing or Oil Freq', 'Fresh Vegetable Salad With Dressing or Oil Freq',
'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq',
'Tahini Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq']
if 'womenOnlyQuestions' in args:
mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period',
'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding']
if 'other' in args:
#AddingIrisGlucose
df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum')
pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right')
mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale']
pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan
if 'drugs' in args:
mb_columns+=drug_args
else:
for arg in drug_args:
if arg in args:
mb_columns += [arg]
mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra]
mb_columns+=mb_columns_extra
if 'meals' in args:
mealsColumns=[val.replace(' ','_') for val in meals]
#Correct by total calories
pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0)
pheno.replace(np.nan, 0,inplace=True)
mb_columns += mealsColumns
########################FFQ END#####################
#for c in pheno: print c
mb_columns=list(set(mb_columns))
pheno= pheno[mb_columns]
if 'threshold' not in kwargs:
threshold = -4
else:
threshold=kwargs['threshold']
if 'ratio' in kwargs:
ratio=kwargs['ratio']
mb_columns = [c for c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']]
other_columns = [c for c in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']]
if 'dic' in args:
presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum()
else:
presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum()
presence=presence[presence > len(presence)*ratio].index.values.tolist()
pheno=pheno[other_columns+presence]
if ('keep_related' not in args):
#bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read()
df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None)
df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None)
df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)]
pheno=pheno[(~pheno.index.isin(df_related.index))]
if ('keep_sterile') not in args:
if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index
else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index
pheno=pheno[~pheno.index.isin(sterile_individuals)]
if 'keep_household' not in args:
#noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\t')
#pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)]
#new code that decides which individuals to remove on the fly
import ForPaper.VertexCut as vc
df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True)
df_household = df_household[[c for c in df_household.columns if int(c) in pheno.index]]
df_household = df_household[df_household.index.isin(pheno.index)]
remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)]
pheno=pheno[~pheno.index.isin(remove_inds)]
if 'keep_missingCovars' not in args:
#One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g'
#3 participant 86356,762339,805175 have no 'Age','Gender'
# if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values):
keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values
beforeNumParticpants=pheno.shape[0]
pheno=pheno.loc[keep_inds]
afterNumParticpants=pheno.shape[0]
if beforeNumParticpants-afterNumParticpants>0:
pass
#print "Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
#print beforeNumParticpants-afterNumParticpants
#print "805175 has no FFQ!!!!! that is why we remove him"
features_to_drop=[]
if ('IsGenotek' not in args) and ('covars' not in args) and ('covars_noPCs' not in args):
features_to_drop += ['IsGenotek']
if ('covars' not in args) and ('covars_noPCs' not in args) and ('other' not in args):
if 'calories' not in args:
features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']
else:
features_to_drop +=['Age','Gender']
if ('include_allPNP' not in args) and ('PCs' not in args):
features_to_drop+=['PC1','PC2','PC3','PC4','PC5']
pheno=pheno.drop(features_to_drop,axis=1)
if ('-9' not in args):
pheno.replace(-9, np.nan, inplace=True)
if 'permute' in args:
pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns)
return pheno
if __name__=="__main__":
# pheno=extract('dic','covars','keep_household',"pastry",ratio=0.2)#'all_bac'
phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP',
print(phenoAll.shape)
print(phenoAll.columns)
phenoAll = extract('s','include_allPNP')
print(phenoAll.shape)
print(phenoAll.columns)
phenoChip = extract('keep_household','s','keep_related')
print(phenoChip.shape)
print(phenoChip.columns)
# print "Only in chip:"
# print set(phenoChip.index.values)-set(phenoAll.index.values)
# print len(set(phenoChip.index)-set(phenoAll.index))
print(pheno.columns.values.tolist())
print(pheno.shape)
sum=0
for participant in pheno[['Age','Gender']].index.values:
# if np.isnan(pheno.loc[participant,'Calories_kcal']) or \
# np.isnan(pheno.loc[participant,'Carbs_g']) or \
# np.isnan(pheno.loc[participant,'Fat_g']) or \
# np.isnan(pheno.loc[participant,'Protain_g']):
# sum+=1
# print participant
# print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']]
# print sum
if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) :
print("Participant %s, age %s, gender %s" %(participant,pheno.loc[participant,'Age'],pheno.loc[participant,'Gender']))
# print pheno[['median_Without_BMI_ALT_Overall']]
| 2.015625 | 2 |
ebel/validate.py | e-bel/ebel | 1 | 12797168 | <filename>ebel/validate.py
"""Collect of methods used for validating a BEL file."""
import os
import re
import csv
import difflib
import logging
from typing import Iterable, Union, Optional
from textwrap import fill
import numpy as np
import pandas as pd
import ebel.database
from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json
logger = logging.getLogger(__name__)
def validate_bel_file(bel_script_path: str,
force_new_db: bool = False,
line_by_line: bool = False,
reports: Union[Iterable[str], str] = None,
bel_version: str = '2_1',
tree: bool = False,
sqlalchemy_connection_str: str = None,
json_file: bool = True,
force_json: bool = False,):
"""Validate BEL script for correct syntax following eBNF grammar.
Parameters
----------
bel_script_path: str
Path to BEL file or directory contaiing BEL files.
force_new_db: bool
Delete current database of namespaces/values and generate a new one. Defaults to False.
line_by_line: bool
TODO: Write this.
reports: Iterable[str] or str
List of file paths to write reports to. Multiple formats of the report can be generated at once. Acceptable
formats include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD
bel_version: {'1', '2', '2_1'}
Which BEL grammar version should be used for validating the BEL file. Current available are 1.0, 2.0, and 2.1.
Defaults to the most recent version.
tree: bool
Generates a tree of relationships derived from the BEL file. Defaults to False.
sqlalchemy_connection_str: str
Path to SQLLite database to be used for storing/looking up used namespaces and values. If None given, it uses
the generated e(BE:L) database (default).
json_file: bool
If True, generates a JSON file that can be used for importing BEL relationships into an e(BE:L) generated
OrientDB database. Only creates the JSON file when there are no grammar or syntax errors. Defaults to True.
force_json: bool
If True, will create an importable JSON file even if there are namespace/value errors. Defaults to False.
Returns
-------
dict
Dictionary of file paths and results for each BEL file processed.
Examples
--------
Task: Validate BEL script `my.bel` for BEL syntax 2.0, create error
reports in Markdown and JSON format. In case of no errors create a JSON file
for the import of BEL network into Cytoscape:
> ebel validate my.bel -v 2 -r error_report.md,error_report.json
"""
validation_results = dict()
if bel_script_path.startswith('"') and bel_script_path.endswith('"'):
bel_script_path = bel_script_path[1:-1]
if reports and reports.startswith('"') and reports.endswith('"'):
reports = reports[1:-1]
if line_by_line:
# TODO: This is perhaps not working
result = check_bel_script_line_by_line(bel_script_path,
error_report_file_path=reports,
bel_version=bel_version)
if reports:
logger.info("Wrote report to %s\n" % reports)
else:
logger.info("\n".join([x.to_string() for x in result]) + "\n")
else:
if sqlalchemy_connection_str:
ebel.database.set_connection(sqlalchemy_connection_str)
bel_files = _create_list_bel_files(bel_path=bel_script_path)
validation_results['bel_files_checked'] = bel_files
for bel_file in bel_files:
# Create dict to be filled for individual BEL files.
validation_results[bel_file] = dict()
logger.info(f"Processing {bel_file}")
result = check_bel_script(
bel_script_path=bel_file,
force_new_db=force_new_db,
bel_version=bel_version,
)
if json_file:
if not result['errors'] or force_json:
json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version)
validation_results[bel_file]['json'] = json_file
if tree:
if result['errors']:
logger.error("Tree can not be printed because errors still exists\n")
else:
logger.debug(result['tree'])
validation_results[bel_file]['tree'] = result['tree']
if result['warnings'] and reports:
report_paths = _write_report(reports, result, report_type='warnings')
validation_results[bel_file]['reports'] = report_paths
elif result['errors']:
if not reports:
logger.info('\n'.join([x.to_string() for x in result['errors']]) + "\n")
else:
_write_report(reports, result, report_type='errors')
def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None):
"""Repair a BEL document.
Parameters
----------
bel_script_path : str
Path to the BEL file.
new_file_path : str (optional)
Export repaired version of file to new path.
"""
# if evidence:
# regular expression for missing continuous line (\ at the end of line)
with open(bel_script_path, "r", encoding="utf-8") as belfile:
content = belfile.read()
new_content = content
for regex_pattern in re.findall(r'\n((SET\s+(DOCUMENT\s+Description|Evidence|SupportingText)'
r'\s*=\s*)"(((?<=\\)"|[^"])+)"\s*\n*)',
content):
if regex_pattern[2].startswith("DOCUMENT"):
new_prefix = "SET DOCUMENT Description = "
else:
new_prefix = "SET Support = "
new_evidence_text = re.sub(r"(\\?[\r\n]+)|\\ ", " ", regex_pattern[3].strip())
new_evidence_text = re.sub(r"\s{2,}", " ", new_evidence_text)
new_evidence_text = re.sub(r'(\\)(\w)', r'\g<2>', new_evidence_text)
new_evidence_text = fill(new_evidence_text, break_long_words=False).replace("\n", " \\\n")
new_evidence = new_prefix + '"' + new_evidence_text + '"\n\n'
new_content = new_content.replace(regex_pattern[0], new_evidence)
if content != new_content:
if new_file_path:
with open(new_file_path + ".diff2repaired", "w") as new_file:
new_file.write('\n'.join(list(difflib.ndiff(content.split("\n"), new_content.split("\n")))))
else:
with open(bel_script_path, "w") as output_file:
output_file.write(new_content)
def _write_odb_json(bel_path: str, results: dict, bel_version: str) -> str:
json_path = bel_path + ".json"
if int(bel_version[0]) > 1:
json_tree = bel_to_json(results['tree'])
open(json_path, "w").write(json_tree)
return json_path
def _create_list_bel_files(bel_path: str) -> list:
"""Export all BEL files in directory as list. If single file is passed, returns a list with that path."""
if os.path.isdir(bel_path):
bel_files = []
for file in os.listdir(bel_path):
if file.endswith(".bel"):
bel_file_path = os.path.join(bel_path, file)
bel_files.append(bel_file_path)
else:
bel_files = [bel_path]
return bel_files
def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) -> list:
"""Write report in different types depending on the file name suffix in reports.
Parameters
----------
reports : Iterable[str] or str
List of report formats or comma separated list of report file names.
result : dict
return value of check_bel_script methode.
report_type : str
`report_type` could be 'warnings' or 'errors'.
Returns
-------
list
List of file paths for the reports written.
"""
# TODO: report_type options should be constants
errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]]
columns = [report_type[:-1] + "_class", "url", "keyword", "entry", "line_number", "column", "hint"]
df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns)
df.index += 1
if isinstance(reports, str):
reports = reports.split(",")
for report in reports:
if report.endswith('.csv'):
df.to_csv(report)
if report.endswith('.xls'):
df.to_excel(report)
if report.endswith('.xlsx'):
df.to_excel(report, engine='xlsxwriter')
if report.endswith('.tsv'):
df.to_csv(report, sep='\t')
if report.endswith('.json'):
df.to_json(report)
if report.endswith('.txt'):
open(report, "w").write(df.to_string())
if report.endswith('.html'):
df.to_html(report)
if report.endswith('.md'):
cols = df.columns
df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols)
if df.hint.dtype == np.str:
df.hint = df.hint.str.replace(r'\|', '|')
if df.entry.dtype == np.str:
df.entry = df.entry.str.replace(r'\|', '|')
df.url = [("[url](" + str(x) + ")" if not pd.isna(x) else '') for x in df.url]
url_template = "[%s](" + report.split(".bel.")[0] + ".bel?expanded=true&viewer=simple#L%s)"
df.line_number = [url_template % (x, x) for x in df.line_number]
df3 = pd.concat([df2, df])
df3.to_csv(report, sep="|", index=False, quoting=csv.QUOTE_NONE, escapechar="\\")
return reports
| 2.796875 | 3 |
pipestat/const.py | pepkit/pipestat | 1 | 12797169 | import os
from sqlalchemy.dialects.postgresql.json import JSONB
from sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer, String
PKG_NAME = "pipestat"
LOCK_PREFIX = "lock."
REPORT_CMD = "report"
INSPECT_CMD = "inspect"
REMOVE_CMD = "remove"
RETRIEVE_CMD = "retrieve"
STATUS_CMD = "status"
SUBPARSER_MSGS = {
REPORT_CMD: "Report a result.",
INSPECT_CMD: "Inspect a database.",
REMOVE_CMD: "Remove a result.",
RETRIEVE_CMD: "Retrieve a result.",
STATUS_CMD: "Manage pipeline status.",
}
STATUS_GET_CMD = "get"
STATUS_SET_CMD = "set"
STATUS_SUBPARSER_MESSAGES = {
STATUS_SET_CMD: "Set status.",
STATUS_GET_CMD: "Get status.",
}
DOC_URL = "http://pipestat.databio.org/en/latest/db_config/"
# DB config keys
CFG_DATABASE_KEY = "database"
CFG_NAME_KEY = "name"
CFG_HOST_KEY = "host"
CFG_PORT_KEY = "port"
CFG_PASSWORD_KEY = "password"
CFG_USER_KEY = "user"
CFG_DIALECT_KEY = "dialect" # sqlite, mysql, postgresql, oracle, or mssql
CFG_DRIVER_KEY = "driver"
DB_CREDENTIALS = [
CFG_HOST_KEY,
CFG_PORT_KEY,
CFG_PASSWORD_KEY,
CFG_USER_KEY,
CFG_NAME_KEY,
CFG_DIALECT_KEY,
CFG_DRIVER_KEY,
]
# object attribute names
DB_ONLY_KEY = "_database_only"
CONFIG_KEY = "_config"
SCHEMA_KEY = "_schema"
STATUS_KEY = "_status"
STATUS_SCHEMA_KEY = "_status_schema"
STATUS_SCHEMA_SOURCE_KEY = "_status_schema_source"
STATUS_FILE_DIR = "_status_file_dir"
RES_SCHEMAS_KEY = "_result_schemas"
DB_BASE_KEY = "_declarative_base"
DB_ORMS_KEY = "_orms"
DATA_KEY = "_data"
NAME_KEY = "_name"
FILE_KEY = "_file"
RECORD_ID_KEY = "_record_id"
DB_SESSION_KEY = "_db_session"
DB_SCOPED_SESSION_KEY = "_db_scoped_session"
DB_ENGINE_KEY = "_db_engine"
HIGHLIGHTED_KEY = "_highlighted"
DB_COLUMN_KEY = "db_column"
DB_RELATIONSHIP_KEY = "relationship"
DB_RELATIONSHIP_NAME_KEY = "name"
DB_RELATIONSHIP_TABLE_KEY = "table"
DB_RELATIONSHIP_COL_KEY = "column"
DB_RELATIONSHIP_BACKREF_KEY = "backref"
DB_RELATIONSHIP_ELEMENTS = [
DB_RELATIONSHIP_BACKREF_KEY,
DB_RELATIONSHIP_COL_KEY,
DB_RELATIONSHIP_NAME_KEY,
DB_RELATIONSHIP_TABLE_KEY,
]
# schema keys
SCHEMA_PROP_KEY = "properties"
SCHEMA_TYPE_KEY = "type"
SCHEMA_DESC_KEY = "description"
# DB column names
ID = "id"
RECORD_ID = "record_identifier"
STATUS = "status"
RESERVED_COLNAMES = [ID, RECORD_ID]
CANONICAL_TYPES = {
"image": {
"type": "object",
"properties": {
"path": {"type": "string"},
"thumbnail_path": {"type": "string"},
"title": {"type": "string"},
},
"required": ["path", "thumbnail_path", "title"],
},
"file": {
"type": "object",
"properties": {
"path": {"type": "string"},
"title": {"type": "string"},
},
"required": ["path", "title"],
},
}
ENV_VARS = {
"namespace": "PIPESTAT_NAMESPACE",
"config": "PIPESTAT_CONFIG",
"results_file": "PIPESTAT_RESULTS_FILE",
"schema": "PIPESTAT_RESULTS_SCHEMA",
"status_schema": "PIPESTAT_SATUS_SCHEMA",
"record_identifier": "PIPESTAT_RECORD_ID",
}
CLASSES_BY_TYPE = {
"number": float,
"integer": int,
"object": dict,
"image": dict,
"file": dict,
"string": str,
"array": list,
"boolean": bool,
}
SQL_CLASSES_BY_TYPE = {
"number": Float,
"integer": Integer,
"object": JSONB,
"image": JSONB,
"file": JSONB,
"string": String(500),
"array": JSONB,
"boolean": Boolean,
}
CFG_SCHEMA = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "schemas", "pipestat_config_schema.yaml"
)
STATUS_SCHEMA = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "schemas", "status_schema.yaml"
)
STATUS_TABLE_SCHEMA = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "schemas", "status_table_schema.yaml"
)
| 2.359375 | 2 |
src/server/endpoints/__init__.py | lee14257/delphi-epidata | 0 | 12797170 | from . import (
afhsb,
cdc,
covid_hosp_facility_lookup,
covid_hosp_facility,
covid_hosp_state_timeseries,
covidcast_nowcast,
covidcast_meta,
covidcast,
delphi,
dengue_nowcast,
dengue_sensors,
ecdc_ili,
flusurv,
fluview_clinicial,
fluview_meta,
fluview,
gft,
ght,
ilinet,
kcdc_ili,
meta_afhsb,
meta_norostat,
meta,
nidss_dengue,
nidss_flu,
norostat,
nowcast,
paho_dengue,
quidel,
sensors,
twitter,
wiki,
signal_dashboard_status,
signal_dashboard_coverage,
)
endpoints = [
afhsb,
cdc,
covid_hosp_facility_lookup,
covid_hosp_facility,
covid_hosp_state_timeseries,
covidcast_nowcast,
covidcast_meta,
covidcast,
delphi,
dengue_nowcast,
dengue_sensors,
ecdc_ili,
flusurv,
fluview_clinicial,
fluview_meta,
fluview,
gft,
ght,
ilinet,
kcdc_ili,
meta_afhsb,
meta_norostat,
meta,
nidss_dengue,
nidss_flu,
norostat,
nowcast,
paho_dengue,
quidel,
sensors,
twitter,
wiki,
signal_dashboard_status,
signal_dashboard_coverage,
]
__all__ = ["endpoints"]
| 1.070313 | 1 |
fwla-center-folder.py | FWLA/fwla-center-folder | 0 | 12797171 | import logging
import random
import requests
import board
import neopixel
import smbus2
from apscheduler.schedulers.blocking import BlockingScheduler
class LedController:
def reset(self):
pass
def set(self, id):
pass
class LoggingLedController(LedController):
def reset(self):
logging.info('Reset')
def set(self, id):
logging.info('set {}'.format(id))
# Controller for I2C connected LEDs
class I2CLedController(LoggingLedController):
def __init__(self):
self.bus = smbus2.SMBus(1)
self.bus.write_byte_data(0x20, 0x00, 0x00)
self.bus.write_byte_data(0x20, 0x01, 0x00)
def reset(self):
super(I2CLedController, self).reset()
self.bus.write_byte_data(0x20, 0x14, 0x00)
self.bus.write_byte_data(0x20, 0x15, 0x00)
def set(self, id):
super(I2CLedController, self).set(id)
register = 0x14
if id / 8 > 0:
register = 0x15
bitmask = id % 8
self.bus.write_byte_data(0x20, register, bitmask)
# Controller for WS2812 connected LEDs
class WS2812LedController(LedController):
def __init__(self, color):
self._color = color
self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False)
self._pixels.fill((0, 0, 0))
self._pixels.show()
def reset(self):
super(WS2812LedController, self).reset()
self._pixels.fill((0, 0, 0))
self._pixels.show()
def set(self, id):
super(WS2812LedController, self).set(id)
self._pixels.fill((0, 0, 0))
self._pixels[id] = self._color
self._pixels.show()
# BASIC OPTIONS
logging.basicConfig(level=logging.INFO)
TEST_ENV = 'http://192.168.0.199:8080/v1/display'
PROD_ENV = 'http://10.24.6.35/api/v1/display'
url = TEST_ENV
color = (0, 0, 255)
controller = WS2812LedController(color)
def job():
address = get_active_address()
if address < 0:
controller.reset()
else:
controller.set(address)
def get_mock_address():
return random.randint(-1, 100)
def get_active_address():
try:
r = requests.get(url, timeout=2)
data = r.json()
if (data['state'] != 'OPERATION'):
logging.debug('Not operation state.')
return -1
if 'operation' not in data:
logging.debug('No operation.')
return -1
operation = data['operation']
if 'realEstate' not in operation:
logging.debug('No realEstate.')
return -1
realEstate = operation['realEstate']
if 'folderAddress' not in realEstate:
logging.debug('No folderAddress.')
return -1
folderAddress = int(realEstate['folderAddress'])
return folderAddress
except Exception as e:
logging.warn('Exception when getting data.')
logging.warn(e)
return -1
def init():
logging.info('Starting process.')
scheduler = BlockingScheduler()
scheduler.add_job(job, 'interval', seconds=5)
try:
scheduler.start()
except (KeyboardInterrupt):
controller.reset()
logging.info('Stopping process.')
if __name__ == "__main__":
init()
| 2.640625 | 3 |
dolfyn/test/test_read_adp.py | aidanbharath/dolfyn | 0 | 12797172 | <reponame>aidanbharath/dolfyn
from dolfyn.main import read_example as read
import dolfyn.test.base as tb
import sys
load = tb.load_tdata
save = tb.save_tdata
dat_rdi = load('RDI_test01.h5')
dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5')
dat_awac = load('AWAC_test01.h5')
dat_awac_ud = load('AWAC_test01_ud.h5')
dat_sig = load('BenchFile01.h5')
dat_sigi = load('Sig1000_IMU.h5')
dat_sigi_ud = load('Sig1000_IMU_ud.h5')
dat_wr1 = load('winriver01.h5')
dat_wr2 = load('winriver02.h5')
def test_read(make_data=False):
td_rdi = read('RDI_test01.000') # This uses the built-in declination!
td_sig = read('BenchFile01.ad2cp')
td_sigi = read('Sig1000_IMU.ad2cp', userdata=False)
td_sigi_ud = read('Sig1000_IMU.ad2cp')
td_awac = read('AWAC_test01.wpr', userdata=False)
td_awac_ud = read('AWAC_test01.wpr')
td_wr1 = read('winriver01.PD0')
td_wr2 = read('winriver02.PD0')
if make_data:
save(td_rdi, 'RDI_test01.h5')
save(td_sig, 'BenchFile01.h5')
save(td_sigi, 'Sig1000_IMU.h5')
save(td_sigi_ud, 'Sig1000_IMU_ud.h5')
save(td_awac, 'AWAC_test01.h5')
save(td_awac_ud, 'AWAC_test01_ud.h5')
save(td_wr1, 'winriver01.h5')
save(td_wr2, 'winriver02.h5')
return
if sys.version_info.major == 2:
# This is a HACK for Py2
# for some reason a very small numer of the values in temp_mag
# are not the same for py2?
# !CLEANUP!
# BUG that's loading different data??!
td_sigi.pop('sys.temp_mag')
dat_sigi_tmp = dat_sigi.copy()
dat_sigi_tmp.pop('sys.temp_mag')
td_sigi_ud.pop('sys.temp_mag')
dat_sigi_ud_tmp = dat_sigi_ud.copy()
dat_sigi_ud_tmp.pop('sys.temp_mag')
else:
dat_sigi_tmp = dat_sigi
dat_sigi_ud_tmp = dat_sigi_ud
def msg(infile):
testfile = infile.split('.')[0] + '.h5'
return ("The output of read('{}') does not match '{}'."
.format(infile, testfile))
for dat1, dat2, msg in [
(td_rdi, dat_rdi,
msg('RDI_test01.000')),
(td_sig, dat_sig,
msg('BenchFile01.ad2cp')),
(td_sigi, dat_sigi_tmp,
msg('Sig1000_IMU.ad2cp')),
(td_sigi_ud, dat_sigi_ud_tmp,
msg('Sig1000_IMU_ud.ad2cp')),
(td_awac, dat_awac,
msg('AWAC_test01.wpr')),
(td_awac_ud, dat_awac_ud,
msg('AWAC_test01.wpr+userdata')),
(td_wr1, dat_wr1,
msg('winriver01.PD0')),
(td_wr2, dat_wr2,
msg('winriver02.PD0')),
]:
yield tb.data_equiv, dat1, dat2, msg
if __name__ == '__main__':
for func, dat1, dat2, msg in test_read():
func(dat1, dat2, msg)
| 2.265625 | 2 |
api/hpcpm/api/resources/endpoints/nodes/computation_node/SoftLimit.py | tomix86/hpcpm | 2 | 12797173 | from flask_restful import Resource, request, abort
from flask_restful_swagger import swagger
from hpcpm.api import log
from hpcpm.api.helpers.database import database
from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found
from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \
COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \
DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \
NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE
class SoftLimit(Resource):
@swagger.operation(
notes='This endpoint is used for setting soft limit for given device.',
nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit',
parameters=[
COMPUTATION_NODE_PARAM_NAME,
DEVICE_IDENTIFIER_PARAM,
DEVICE_SOFT_LIMIT_PARAM
],
responseMessages=[
DEVICE_SOFT_LIMIT_SET_RESPONSE,
DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE,
COMPUTATION_NODE_NOT_FOUND_RESPONSE
]
)
def put(self, name, device_id):
soft_limit = request.args.get('soft_limit')
abort_when_not_int(soft_limit)
computation_node = abort_when_node_not_found(name)
if int(soft_limit) < 0:
log.error(str.format('Number is not positive: {}', soft_limit))
abort(400)
if not any(d['id'] == device_id for d in computation_node['backend_info']['devices']):
log.error('There is no such device: %s', device_id)
abort(404)
limit_info = {
'name': name,
'device_id': device_id,
'soft_limit': soft_limit
}
upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info)
if upsert_result.modified_count:
log.info('Power limit for device %s:%s was already set in a database to %s', name, device_id, soft_limit)
log.info('Stored power limit info %s', limit_info)
else:
log.info('Stored power limit info %s on id %s', limit_info, upsert_result.upserted_id)
return 'Soft limit successfully set', 201
@swagger.operation(
notes='This endpoint is used for getting soft limit information from database',
nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit',
parameters=NODE_AND_DEVICE_PARAMS,
responseMessages=[
COMPUTATION_NODE_FETCHED_RESPONSE,
DEVICE_NOT_FOUND_RESPONSE
]
)
def get(self, name, device_id):
result = database.get_soft_limit_for_device(name, device_id)
if not result:
log.info('No such device %s:%s', name, device_id)
abort(404)
log.info('Successfully get device %s:%s soft limit info: %s', name, device_id, result)
return result, 200
@swagger.operation(
notes='This endpoint is used for removing soft limit information from database and device',
nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit',
parameters=NODE_AND_DEVICE_PARAMS,
responseMessages=[
COMPUTATION_NODE_FETCHED_RESPONSE,
DEVICE_NOT_FOUND_RESPONSE,
]
)
def delete(self, name, device_id):
result = database.delete_soft_limit_info(name, device_id)
if not result:
log.info('No such device %s:%s', name, device_id)
abort(404)
log.info('Successfully removed soft limit for device %s:%s soft limit info: %s', name, device_id,
result)
return result, 200
| 2.328125 | 2 |
orchestra/tests/workflows/test_dir/load_sample_data.py | code-review-doctor/orchestra | 444 | 12797174 | <reponame>code-review-doctor/orchestra
def load(workflow_version):
""" Dummy loading function. """
pass
| 1.039063 | 1 |
deepfake_multiple.py | andyboyad/first-order-model | 0 | 12797175 | import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
from IPython.display import HTML
import warnings
import sys
import os
from demo import load_checkpoints
from demo import make_animation
from skimage import img_as_ubyte
warnings.filterwarnings("ignore")
if len(sys.argv) < 6:
print("Usage: deepfake_multiple.py <source name> <template name> <final_vid_name> <rows> <columns> <no shuffle arg>")
sys.exit()
source_folder = os.path.join(os.curdir, "resources", "combos", sys.argv[1])
image_folder = os.path.join(os.curdir, "resources", "combos", sys.argv[1], "images")
template_video = os.path.join(os.curdir, "resources", "combos", sys.argv[1], sys.argv[2])
template_video_name = sys.argv[2]
gen_vid_folder = os.path.join(os.curdir, "resources", "combos", sys.argv[1], "gen")
final_vid = os.path.join(os.curdir, "resources", "combos", sys.argv[1], sys.argv[3])
final_vid_name = sys.argv[3]
x = int(sys.argv[4])
y = int(sys.argv[5])
shuffle = ""
if len(sys.argv) > 6:
print("SHOULD NOT CREATE SHUFFLE")
shuffle="noshuffle"
list_images = os.listdir(image_folder)
driving_video = imageio.mimread(template_video)
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
checkpoint_path='vox-cpk.pth.tar')
for image in list_images:
image_path = os.path.join(image_folder, image)
source_image = imageio.imread(image_path)
source_image = resize(source_image, (256, 256))[..., :3]
gen_vid_name = image.split(".")[0]
gen_vid_name = f"{gen_vid_name}_gen.mp4"
gen_vid = os.path.join(gen_vid_folder, gen_vid_name)
if not os.path.exists(gen_vid):
predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions])
combiner = os.path.join(os.curdir, "resources", "combos", "createcombo.py")
os.system(f"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}")
sys.exit()
#Resize image and video to 256x256
#save resulting video
#predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True)
#imageio.mimsave("testing.mp4", [img_as_ubyte(frame) for frame in predictions2])
#os.system(f"python3 {createvid} {template_video} {gen_vid} {final_vid}")
#print(f"VIDEO GENERATED: {final_vid}") | 2.28125 | 2 |
integreat_cms/api/v3/events.py | Integreat/cms-v2 | 21 | 12797176 | <reponame>Integreat/cms-v2
"""
This module includes functions related to the event API endpoint.
"""
from copy import deepcopy
from datetime import timedelta
from django.conf import settings
from django.http import JsonResponse
from django.utils import timezone
from django.utils.html import strip_tags
from ...cms.models.events.event_translation import EventTranslation
from ...cms.utils.slug_utils import generate_unique_slug
from ..decorators import json_response
from .locations import transform_poi
def transform_event(event):
"""
Function to create a JSON from a single event object.
:param event: The event which should be converted
:type event: ~integreat_cms.cms.models.events.event.Event
:return: data necessary for API
:rtype: dict
"""
return {
"id": event.id,
"start_date": event.start_date,
"end_date": event.end_date,
"all_day": event.is_all_day,
"start_time": event.start_time,
"end_time": event.end_time,
"recurrence_id": event.recurrence_rule.id if event.recurrence_rule else None,
"timezone": settings.CURRENT_TIME_ZONE,
}
def transform_event_translation(event_translation):
"""
Function to create a JSON from a single event_translation object.
:param event_translation: The event translation object which should be converted
:type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation
:return: data necessary for API
:rtype: dict
"""
event = event_translation.event
if event.location:
location_translation = (
event.location.get_public_translation(event_translation.language.slug)
or event.location.best_translation
)
else:
location_translation = None
absolute_url = event_translation.get_absolute_url()
return {
"id": event_translation.id,
"url": settings.BASE_URL + absolute_url,
"path": absolute_url,
"title": event_translation.title,
"modified_gmt": event_translation.last_updated.strftime("%Y-%m-%d %H:%M:%S"),
"excerpt": strip_tags(event_translation.content),
"content": event_translation.content,
"available_languages": event_translation.available_languages,
"thumbnail": event.icon.url if event.icon else None,
"location": transform_poi(event.location, location_translation),
"event": transform_event(event),
"hash": None,
}
def transform_event_recurrences(event_translation, today):
"""
Yield all future recurrences of the event.
:param event_translation: The event translation object which should be converted
:type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation
:param today: The first date at which event may be yielded
:type today: ~datetime.date
:return: An iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS``
:rtype: Iterator[:class:`~datetime.date`]
"""
event = event_translation.event
recurrence_rule = event.recurrence_rule
if not recurrence_rule:
return
# In order to avoid unnecessary computations, check if any future event
# may be valid and return early if that is not the case
if (
recurrence_rule.recurrence_end_date
and recurrence_rule.recurrence_end_date < today
):
return
event_length = event.end_date - event.start_date
start_date = event.start_date
event_translation.id = None
# Store language and slug for usage in loop
current_language = event_translation.language
current_slug = event_translation.slug
# Calculate all recurrences of this event
for recurrence_date in recurrence_rule.iter_after(start_date):
if recurrence_date - max(start_date, today) > timedelta(
days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS
):
break
if recurrence_date < today or recurrence_date == start_date:
continue
# Create all temporary translations of this recurrence
recurrence_translations = {}
if event.region.fallback_translations_enabled:
languages = event.region.active_languages
else:
languages = event.public_languages
for language in languages:
# Create copy in memory to make sure original translation is not affected by changes
event_translation = deepcopy(event_translation)
# Fake the requested language
event_translation.language = language
event_translation.slug = generate_unique_slug(
**{
"slug": f"{current_slug}-{recurrence_date}",
"manager": EventTranslation.objects,
"object_instance": event_translation,
"foreign_model": "event",
"region": event.region,
"language": language,
}
)
# Reset id to make sure id does not conflict with existing event translation
event_translation.event.id = None
# Set date to recurrence date
event_translation.event.start_date = recurrence_date
event_translation.event.end_date = recurrence_date + event_length
# Clear cached property in case url with different language was already calculated before
try:
del event_translation.url_prefix
except AttributeError:
pass
recurrence_translations[language.slug] = event_translation
# Set the prefetched public translations to make sure the recurrence translations are correctly listed in available languages
for recurrence_translation in recurrence_translations.values():
recurrence_translation.event.prefetched_public_translations_by_language_slug = (
recurrence_translations
)
# Update translation object with the one with prefetched temporary translations
event_translation = recurrence_translations[current_language.slug]
# Clear cached property in case available languages with different recurrence was already calculated before
try:
del event_translation.available_languages
except AttributeError:
pass
yield transform_event_translation(event_translation)
@json_response
# pylint: disable=unused-argument
def events(request, region_slug, language_slug):
"""
List all events of the region and transform result into JSON
:param request: The current request
:type request: ~django.http.HttpRequest
:param region_slug: The slug of the requested region
:type region_slug: str
:param language_slug: The slug of the requested language
:type language_slug: str
:return: JSON object according to APIv3 events endpoint definition
:rtype: ~django.http.JsonResponse
"""
region = request.region
# Throw a 404 error when the language does not exist or is disabled
region.get_language_or_404(language_slug, only_active=True)
result = []
now = timezone.now().date()
for event in region.events.prefetch_public_translations().filter(archived=False):
event_translation = event.get_public_translation(language_slug)
if event_translation:
if event.end_date >= now:
result.append(transform_event_translation(event_translation))
for future_event in transform_event_recurrences(event_translation, now):
result.append(future_event)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
| 2.34375 | 2 |
groupon_data.py | AndrewSLowe/yipitdata | 1 | 12797177 | import pandas as pd
file_name = r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx'
df = pd.read_excel(file_name)
# Q4_13_NA dataframe
Q4_13_NA = df
# (138534, 7) There are 138534 items in the dataframe.
Q4_13_NA.shape
Q4_13_NA.describe()
Q4_13_NA.describe(exclude='number')
Q4_13_NA[]
segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')]
segment.shape
segment = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')]
| 2.78125 | 3 |
codes/synthetic.py | facebookresearch/metamulti | 1 | 12797178 | <reponame>facebookresearch/metamulti
#!/usr/bin/env python3
"""
Plot the subpopulation deviations for a range of synthetic toy examples.
Copyright (c) Meta Platforms, Inc. and affiliates.
This script creates a directory, "unweighted", in the working directory if the
directory does not already exist, then creates many files there. The filenames
are "synth####.pdf", "synth####.txt", "reverse####.pdf", "reverse####.jpg",
"randwalk####.pdf", and "randwalk####.txt", where "####" ranges through the
powers of 2 from 0002 to 4096. Each pdf file plots the cumulative differences
between the subpopulation and the full population, controlling for the
specified number of covariates. The corresponding txt files report metrics
about the plots. The files named "reverse####.pdf" and "reverse####.txt"
condition on the covariates in the reverse order from those named
"synth####.pdf" and "synth####.txt". The files named "randwalk####.pdf" and
"randwalk####.txt" use the same distribution of responses for the subpopulation
as for the full population.
The data consists of a full population of 1,000 individual members and a
subpopulation of 100 subselected uniformly at random from the full population.
Each member of the full population consists of p independent and identically
distributed draws from the uniform distribution over the interval (0, 1),
where p is the number of covariates. We condition on all the covariates.
We generate the responses via the following procedure, which consists of only a
single stage for the files whose names begin "randwalk...", but consists of two
separate stages for the files whose names begin "synth..." or "reverse..."): we
collect together the covariates for all the members into a 1000 x p matrix x,
construct the p x 1 vector v whose entries are independent and identically
distributed draws from the standard normal distribution, and finally then apply
the Heaviside function to every entry of "centered" (= x-0.5) applied to v
(the Heaviside function is also known as the unit step function, and takes
the value 0 for negative arguments and the value 1 for positive arguments).
The result is a 1000 x 1 vector of 0s and 1s whose entries are the responses
for the corresponding members of the full population. That concludes the first
stage of the procedure. For the files whose names begin "synth..." or begin
"reverse...", we set the responses for all members of the subpopulation to 1,
as the second stage of the procedure.
This source code is licensed under the MIT license found in the LICENSE file in
the root directory of this source tree.
"""
import math
import numpy as np
from numpy.random import default_rng
import os
from hilbertcurve.hilbertcurve import HilbertCurve
from subpop import cumulative
# Set the number of examples.
m = 1000
# Set the size of the subpopulation.
n = 100
# Set the number of bits in the discretization (mantissa).
precision = 64
# Determine the data type from precision.
if precision == 8:
dtype = np.uint8
elif precision == 16:
dtype = np.uint16
elif precision == 32:
dtype = np.uint32
elif precision == 64:
dtype = np.uint64
else:
raise TypeError(f'There is no support for precision = {precision}.')
# Create a directory as needed.
dir = 'unweighted'
try:
os.mkdir(dir)
except FileExistsError:
pass
dir += '/'
# Consider both the original ordering of covariates and the reverse ordering,
# as well as a complete lack of significant deviation in the responses
# for the subpopulation.
for (reverse, diff) in [(False, True), (True, True), (False, False)]:
print(f'reverse = {reverse}')
print(f'diff = {diff}')
pmax = 12
# Set the number (p) of covariates.
for p in [2**k for k in range(1, pmax + 1)]:
print(f'p = {p}')
# Set up the random number generator.
rng = default_rng(seed=543216789)
# Generate a random permutation for the indices of the subpopulation.
inds = rng.permutation((m))[:n]
# Generate data at random.
x = rng.integers(2**precision - 1, size=(m, p), dtype=dtype)
if reverse:
x = x[:, ::-1]
# Perform the Hilbert mapping from p dimensions to one dimension.
hc = HilbertCurve(precision, p)
ints = hc.distances_from_points(x)
assert np.unique(ints).size == x.shape[0]
# Sort according to the scores.
perm = np.argsort(ints)
x = x[perm, :]
invperm = np.arange(len(perm))
invperm[perm] = np.arange(len(perm))
inds = invperm[inds]
inds = np.sort(inds)
# Construct scores for plotting.
imin = np.min(ints)
imax = np.max(ints)
s = (np.sort(ints) - imin) / (imax - imin)
# Ensure uniqueness even after roundoff errors.
eps = np.finfo(np.float64).eps
s = s + np.arange(0, s.size * eps, eps)
s = s.astype(np.float64)
# Form a random direction.
w = rng.standard_normal(size=(p))
w /= np.linalg.norm(w, ord=2)
if reverse:
w = w[::-1]
# Generate responses based on the random direction and membership
# in the subpopulation.
centered = x.astype(np.float64) - 2**(precision - 1)
r = (np.sign(centered @ w) + 1) / 2
if diff:
r[inds] = 1
# Pad with zeros the number in the filename so that every filename
# has the same number of characters for its length.
max_digits = math.ceil(pmax * math.log(2) / math.log(10))
if reverse and diff:
name = 'reverse'
elif diff:
name = 'synth'
else:
name = 'randwalk'
filename = dir + name + str(p).zfill(max_digits) + '.pdf'
# Construct the graph of cumulative differences.
majorticks = 10
minorticks = 100
kuiper, kolmogorov_smirnov, lenscale = cumulative(
r, s, inds, majorticks, minorticks, filename=filename)
# Save metrics in a text file.
filename = filename[:-4] + '.txt'
with open(filename, 'w') as f:
f.write('m:\n')
f.write(f'{len(s)}\n')
f.write('n:\n')
f.write(f'{len(inds)}\n')
f.write('lenscale:\n')
f.write(f'{lenscale}\n')
f.write('Kuiper:\n')
f.write(f'{kuiper:.4}\n')
f.write('Kolmogorov-Smirnov:\n')
f.write(f'{kolmogorov_smirnov:.4}\n')
f.write('Kuiper / lenscale:\n')
f.write(f'{(kuiper / lenscale):.4}\n')
f.write('Kolmogorov-Smirnov / lenscale:\n')
f.write(f'{(kolmogorov_smirnov / lenscale):.4}\n')
| 2.9375 | 3 |
tests/unit/agent/test_remote_agent.py | DEX-Company/ocean-py | 0 | 12797179 | <filename>tests/unit/agent/test_remote_agent.py
"""
Test RemoteAgent Unit
"""
import pytest
import requests
import secrets
from starfish.agent.remote_agent import RemoteAgent
from starfish.exceptions import StarfishConnectionError
from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter
from starfish.network.ddo import DDO
def test_remote_agent_set_http_client():
ddo = DDO.create('http://localhost:3030')
agent = RemoteAgent(ddo)
assert(agent.http_client)
new_client = object()
agent.http_client = new_client
assert(agent.http_client)
assert(isinstance(agent.http_client, object))
def test_remote_agent_get_adapter():
ddo = DDO.create('http://localhost:3030')
agent = RemoteAgent(ddo)
assert(agent.adapter)
assert(isinstance(agent.adapter, RemoteAgentAdapter))
def test_remote_agent_get_meta_list():
ddo = DDO.create('http://localhost')
agent = RemoteAgent(ddo)
with pytest.raises(StarfishConnectionError):
result = agent.get_metadata_list()
def test_remote_agent_is_metadata_match():
filter = {
'name': 'test'
}
metadata = {
'name': 'test',
'more_data': 'test_data'
}
assert(RemoteAgent.is_metadata_match(filter, metadata))
metadata = {}
for index in range(0, 100):
name = f'name_{index}'
metadata[name] = secrets.token_hex(64)
filter = {}
for index in range(10, secrets.randbelow(60)):
name = f'name_{index}'
filter[name] = metadata[name]
assert(RemoteAgent.is_metadata_match(filter, metadata))
filter['new_value'] = 'cannot match'
assert(not RemoteAgent.is_metadata_match(filter, metadata))
| 2.296875 | 2 |
examples/typical_script.py | Zulko/pompei | 46 | 12797180 | <reponame>Zulko/pompei
"""
This is a typical script to reconstruct one frame of a movie using a mosaic
of other frames with the Python package Pompei. It generates this picture of
general Maximus in Gladiator using 1100+ frames of the movie.
http://i.imgur.com/Eoglcof.jpg
This script goes in five steps:
1. Extract one frame every 5 second of the movie. Compute their 'signatures'
2. Extract one special frame (the one to be reconstructed) from the movie.
3. Split this frame into subregions and compute the signature of each region.
4. Run an algorithm to find (using the signatures) wich frames of the movie
match best with the different regions of the picture to reconstruct.
The algorithm also ensures that many different frames are used.
5. Assemble the selected best-matching frames into one big picture and save.
The code is well commented to paliate for the lack of documentation. For more,
see the functions doctrings.
"""
from pompei import (movie_to_folder,
get_image_signatures_from_folder,
compute_signatures_in_image,
find_best_matches,
best_matches_to_image)
# When comparing the frames of the movie to the regions of the picture to
# reconstruct, each frame and each region will be reduced to Nh x Nw
# zones from which the mean colors are computed. Here we choose 3 x 3.
# The resulting set of 9 colors is called the signature of the region/frame.
signatures_nh=3
signatures_nw=3
### STEP 1 - EXTRACTING THE FRAMES OF THE MOVIE
# For this example we treat gladiator. The result is this mosaic
# http://i.imgur.com/Eoglcof.jpg
foldername = "gladiator" # name of the folder for the frame pictures
filename = 'gladiator.flv' # the video file, from a legally-baught DVD
# The next call extracts the frames from the movie. At the same time it computes
# the signatures of the frames and store them in file gladiator/signatures.txt
# It's pretty long (5 minutes) and should only be done once, then you can
# comment it out if you want to fine-tune the parameters in the next lines.
image_folder_signatures = movie_to_folder(filename, foldername,
fps=1.0/5, # take one frame every 5 seconds
resize_factor=0.2, # downsize all frames of a factor 1/5
signatures_nh=signatures_nh,
signatures_nw=signatures_nw,
subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits.
# Get the signatures of each frame, already computed at the previous step.
image_folder_signatures = get_image_signatures_from_folder(foldername)
### STEP 2 - READING THE IMAGE TO BE RECONSTRUCTED
# Now we load the image to reconstruct. This could be any image but out of
# simplicity we choose one frame frame of the movie, so that it will have the
# same dimensions as the frames that will compose it.
# We take the scene just before "My name is Maximus...".
import moviepy.editor as mpy
image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array.
### STEP 3 - SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS
nh = nw = 60
image_signatures = compute_signatures_in_image(image, signatures_nh,
signatures_nw, nh, nw)
### STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE.
# This step is quite quick because we work with signatures (i.e. reduced
# version of the images.
# The algorithm first attributes to each region of the final picture the movie
# frame that matches best. Some frames will be used more than once.
# Then, goal=5 means that the algorithm will iteratively diversify the frames
# used until the most used frames is used 5 times or less.
# npasses=3000 tells the algorithm to give up after 3000 iterations if it
# cannot reach its goal of 5. Choosing a lower npasses (like npasses=100) can be
# good sometimes to avoid over-diversification.
best_matches = find_best_matches(image_signatures, image_folder_signatures,
npasses=3000,goal=5)
### STEP 5 - ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE
# This produces the final picture: gladiator.png
# This will take long and produce a heavy PNG (50Mo) which can then be
# downsized by converting it to JPEG.
best_matches_to_image("%s.png"%foldername, best_matches, foldername) | 3.359375 | 3 |
DataGrooming/maf2vcf.py | rschenck/DPhilRotation1Part2 | 1 | 12797181 | '''
Script to convert a MAF to a vcf4.2 file using python >=3.6.
Created by <NAME>
8 March 2018
'''
import os
import sys
from optparse import OptionParser
import subprocess
from functools import wraps
import datetime
import time
import numpy as np
def OptionParsing():
usage = 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>'
parser = OptionParser(usage)
parser.add_option('-i', '--input_maf', dest="maf", default=None, help=".maf file to be converted.")
parser.add_option('-o', '--output_dir', dest="outDir", default=None, help="Output directory for .vcf file")
parser.add_option('-r', '--ref_genome', dest="refGenome", default="/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa", help="Reference genome to be used for maf2vcf conversion.")
parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help="Use this flag to verify reference matching to maf file. Default=False")
parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help="Use this flag to turn on verbose mode. Default=False")
(options, args) = parser.parse_args()
if options.maf is None or options.outDir is None or options.refGenome is None:
print("ERROR: Please include arguments for maf file, output directory, and reference genome (single fasta file).")
sys.exit()
else:
pass
return (options, parser)
def fn_timer(function):
'''
Use this as a wrapper at the top of any function you want to get run time information about.
:param function: Function of interest.
:return: A function to wrap around a function.
'''
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
print ("INFO: Total time running %s: %s minutes" %
(function.__name__, str(round((t1-t0)/60.,2)))
)
return result
return function_timer
def UpdateProgressGetN(fileName):
if fileName[len(fileName)-1]=="z":
cmd = "gzip -cd %s | wc -l" % (fileName)
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
else:
cmd = "wc -l %s" % (fileName)
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
return(int(pipe.read().decode("utf-8").lstrip(" ").split(" ")[0]))
def UpdateProgress(i, n, DisplayText):
'''
Prints a progress bar where appropriate.
:param i: Current Step
:param n: Total number of steps.
:param DisplayText: A string that you want to print out that is informative.
:return: None
'''
sys.stdout.write('\r')
j = (i + 1) / n
sys.stdout.write("[%-20s] %d%%\t INFO: %s" % ('=' * int(20 * j), 100 * j, DisplayText))
sys.stdout.flush()
def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True):
'''
Obtain reference sequence and perform check if needed.
:param check: Whether or not to throw error if the provided reference matches
:param refGenome: Reference Fasta file
:param genomicPos: Genomic Position of interest.
:param ref: Reference sequence to compare to fetched sequence.
:return: Fetched reference sequence.
'''
proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE)
proc.wait()
outInfo = proc.stdout.readlines()
refSeq = ''.join([line.decode('utf-8').rstrip('\n') for line in outInfo[1:]])
if check:
if refSeq == ref:
return(True)
else:
print('ERROR: May not be proper reference genome')
print('ERROR: Improper reference. Found %s at %s. Reference genome shows %s' % (ref, genomicPos, refSeq))
sys.exit()
return(None)
else:
return(refSeq)
def SpotCheckProperReference(mafFile, Options, fileLength):
'''
Randomly samples the file to ensure proper reference file is used. Random sampling is employed to ensure proper
reference is used. Will spot check 2% of a file of more than 200 variants.
:param mafFile: Input mafFile object (opened)
:param Options: Parser Options
:param fileLength: Length of the file being read
:return: None
'''
print("INFO: Verifying maf file.")
if fileLength > 200:
n=0.02
else:
n=1.
a = np.arange(fileLength)
np.random.shuffle(a)
a = list(a[:int(fileLength*n)])
i = 0
count = 0
for line in mafFile:
if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:
# checkIt = len([k for k in a if k==i])
# if checkIt==1:
UpdateProgress(count, len(a), "INFO: Verifying maf file")
count+=1
line = line.rstrip('\n').split('\t')
genomicPos = line[1] + ":" + line[2] + "-" + line[3]
ref = line[7]
mutType = line[5]
variantClass = line[6]
if variantClass != "INS" and variantClass != "TNP" and variantClass !="ONP":
toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref)
if count == len(a):
print('')
return(toContinue)
# else:
# print(checkIt)
# print(line)
# print([k for k in a])
# sys.exit("Problem here")
elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:
print("")
print("ERROR: No header found in maf file.")
elif line.startswith('Hugo_Symbol Chromosome Start_position') == True:
toContinue = True
else:
sys.exit("What the fuck")
i+=1
print('')
return(toContinue)
def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele
tAllele2 = line[9] # Alt Allele
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if ref == tAllele1:
altAllele = tAllele1
refAllele = tAllele2
else:
altAllele = tAllele2
refAllele = tAllele1
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
# Get phasing information and determine reads for vaf==1
if ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == '1':
GT = "1/1" # Appears to be homozygous for alternative allele (germline unlikely since it is called w.r.t normal?)
vaf = reportedVAF # Sets VAF equal to 1
if ref_reads == 'NA':
ref_reads = '.'
total_reads = alt_reads
else:
alt_reads = '.'
total_reads = ref_reads
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Tossing these very strange mutations within the MAF file.
elif ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == 'NA':
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
if Options.verbose:
print("WARNING: %s" % '\t'.join(line))
return(None)
# Simple SNV cases
else:
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = repr(round(int(alt_reads) / float(total_reads), 4))
if vaf != '1.' and strand=="+" or strand=="-":
GT="0|1"
else:
GT="0/1"
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Last check for interesting but unresolved MAF line
if (ref != tAllele1 and ref != tAllele2) or (strand != '+' and strand != '-'):
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
if Options.verbose:
print("WARNING: %s" % '\t'.join(line))
return(None)
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType +";DCC_Project_Code=" + line[44]
# Normal variant field if anything
if line[41]=="NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s"%(line[41])
# Final vcf line out
lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return(lineOut)
def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele Typically
tAllele2 = line[9] # Alt Allele Typically
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if ref == tAllele1:
altAllele = tAllele1
refAllele = tAllele2
else:
altAllele = tAllele2
refAllele = tAllele1
# Obtain the reference sequence + 1 preceding base for the DEL
refAnchorPos = str(int(pos)-1) # Fetch the base that precedes the deletion.
refSeq = SamtoolsFaidx(Options.refGenome, chrom + ":" + refAnchorPos + "-" + line[3], check=False)
if refSeq[1:] != altAllele:
print("ERROR: Deletion alternative allele does not match reference sequence. %s" % ('\t'.join(line)))
sys.exit()
# VCF reference is the preceding base plus the reported deletion in the MAF file.
vcfRef = refSeq
# VCF has base directly preceding the deletion as the alternative base and the variant pos
vcfAlt=refSeq[0]
vcfPos=refAnchorPos
# Get read information
iref_reads = line[37]
ialt_reads = line[36]
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
i_t_vaf = line[43]
# Get phasing information and determine reads for vaf==1
if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'):
GT="0/1"
ref_reads = [read for read in [ref_reads, iref_reads] if read != "NA"][0]
alt_reads = [read for read in [alt_reads, ialt_reads] if read != "NA"][0]
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = str(int(alt_reads)/float(total_reads))
elif i_t_vaf!="" and i_t_vaf!="NA" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':
vaf=i_t_vaf
GT="./."
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
elif (i_t_vaf=="" or i_t_vaf=="NA") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':
GT='./.'
ref_reads='.'
alt_reads='.'
total_reads='.'
vaf='.'
else:
sys.exit("ERROR: Problem processing DEL %s"%('\t'.join(line)))
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[
14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType + ";DCC_Project_Code=" + \
line[44]
# Normal variant field if anything
if line[41] == "NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s" % (line[41])
lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return(lineOut)
def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele Typically
tAllele2 = line[9] # Alt Allele Typically
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if tAllele1 == '-':
altAllele = tAllele2
else:
altAllele = tAllele1
# Obtain the reference sequence + 1 preceding base for the DEL
refAnchorPos = str(int(pos) - 1) # Fetch the base that precedes the deletion.
refSeq = SamtoolsFaidx(Options.refGenome, chrom + ":" + refAnchorPos + "-" + line[3], check=False)
# VCF reference is the preceding base in the insertion in MAF
vcfRef = refSeq[0]
# VCF has base directly preceding the deletion as the alternative base and the variant pos
vcfAlt = refSeq[0]+altAllele
vcfPos = refAnchorPos
# Get read information
iref_reads = line[37]
ialt_reads = line[36]
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
i_t_vaf = line[43]
# Get phasing information and determine reads for vaf==1
if (ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'):
GT = "0/1"
ref_reads = [read for read in [ref_reads, iref_reads] if read != "NA"][0]
alt_reads = [read for read in [alt_reads, ialt_reads] if read != "NA"][0]
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = str(int(alt_reads) / float(total_reads))
elif i_t_vaf != "" and i_t_vaf != "NA" and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA':
vaf = i_t_vaf
GT = "./."
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
elif (
i_t_vaf == "" or i_t_vaf == "NA") and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA':
GT = './.'
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
vaf = '.'
else:
sys.exit("ERROR: Problem processing INS %s" % ('\t'.join(line)))
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[
14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType + ";DCC_Project_Code=" + \
line[44]
# Normal variant field if anything
if line[41] == "NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s" % (line[41])
lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return (lineOut)
def CreateVCFLine(line, errorFile, Options):
line = line.rstrip('\n').split('\t')
# Genomic Position
chrom, pos, id = line[1], line[2], line[10]
# Get rs ID
rsid = line[10]
if rsid == '':
rsid = '.'
elif rsid.startswith("rs") == False:
if Options.verbose:
print("ERROR: %s"%(line))
sys.exit("ERROR: Problem in id column")
# Strand Information
strand = line[4]
# Variant Classification/Type (Type is SNP, INS, DEL, etc.)
mutType = line[5]
variantType = line[6]
# Create proper vcf formatted information
if mutType == '':
mutType = '.'
if variantType == '':
variantType = '.'
# Determine type of variant to continue processing.
linetowrite = None
if variantType=="SNP":
linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="DEL":
linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="INS":
linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="TNP" or variantType=="ONP":
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
else: # This may seem duplicitious, but I explicityly want to know as much of what I'm choosing to filter out as possible...
if Options.verbose:
print("WARNING: Malformed MAF entry. %s"%('\t'.join(line)))
print('')
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
# print(line)
# sys.exit("ERROR: Malformed MAF entry.")
return(linetowrite)
def CreateHeader(ioObject, Options, tumorID, normalID):
now = datetime.datetime.now()
ioObject.write("##fileformat=VCFv4.2\n")
ioObject.write("##fileDate=%s\n"%(now.date()))
ioObject.write("##source=maf2vcf.py\n")
ioObject.write("##reference=%s\n"%(Options.refGenome))
ioObject.write("##sampleColumns=Normal.Tumor\n")
ioObject.write("##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\"HUGO Symbol in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\"Reference context in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\"Genome change in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\"Variant type (SNP,INS,DEL) in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\"Variant Classification (if SNP) in original MAF file.\">\n")
ioObject.write("##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\"DCC Project Code in original MAF file.\">\n")
ioObject.write("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
ioObject.write("##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\"Allelic depths of REF and ALT(s) in the order listed\">\n")
ioObject.write("##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Total read depth across this site\">\n")
ioObject.write("##FORMAT=<ID=VF,Number=1,Type=Float,Description=\"Variant Allele Frequency.\">\n")
ioObject.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\t%s\n"%(normalID,tumorID))
@fn_timer
def ProcessFile(Options):
n = UpdateProgressGetN(Options.maf)
if Options.spotcheck:
with open(Options.maf, 'r') as inFile:
SpotCheckProperReference(inFile, Options, n)
with open(Options.maf,'r') as inFile:
i = 0
for line in inFile:
if i == 1:
toPullIDs = line.rstrip('\n').split('\t')
break
else:
header = line
i+=1
tumorID = toPullIDs[12]
normalID = toPullIDs[13]
count = 0
i = 0
with open(Options.maf, 'r') as inFile:
with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF:
errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf')
with open(errorFile, 'w') as errorOut:
errorOut.write(header)
CreateHeader(outVCF, Options, tumorID, normalID)
for line in inFile:
UpdateProgress(i, n, "Processing Maf File")
if line.startswith('Hugo_Symbol Chromosome Start_position'):
count+=1
i += 1
else:
i += 1
linetoWrite = CreateVCFLine(line, errorFile, Options)
if linetoWrite is not None:
outVCF.write('\t'.join(linetoWrite)+'\n')
print('')
print("INFO: Sorting vcf file.")
vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf')
vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz')
os.system("cat %s | awk '$1 ~ /^#/ {print $0;next} {print $0 | \"LC_ALL=C sort -k1,1 -k2,2n\"}' | gzip > %s"%(vcfFile, vcfFileSorted))
os.system("rm %s"%(vcfFile))
os.system("gzip %s"%(errorFile))
def main():
print("INFO: Processing MAF file.")
FilePath = os.path.dirname(os.path.abspath(__file__))
(Options, Parser) = OptionParsing()
ProcessFile(Options)
if __name__=="__main__":
main() | 2.75 | 3 |
make_cfg.py | kangwonlee/reposetman | 0 | 12797182 | <gh_stars>0
cfg_template = r"""[18pfd_{i:02d}]
list = 18pfd_wk{i:02d}.txt
folder = repository\wk{i:02d}
count_commits = True
run_all = True
pound_count = True
after = 2018-08-27 12:52
"""
for i in range(5, 10+1):
if 8 != i:
print(cfg_template.format(**{'i': i}))
| 2.1875 | 2 |
test/test_finalize.py | sbacchio/tuneit | 0 | 12797183 | from tuneit.graph import visualize
from tuneit.tunable import *
from tuneit.variable import *
from tuneit.tunable import Tunable
from tuneit.finalize import finalize
from pytest import raises
def test_finalize():
with raises(TypeError):
finalize(1)
a = variable(range(10), default=2)
assert finalize(a)[finalize(a).value] == finalize(a).value
c = variable(range(10))
b = finalize(a * a + c)
assert set(b.variables) == set([finalize(a).key, finalize(c).key])
assert b.tunable_variables == b.variables
assert b.compute() == 4
assert b.fixed_variables == b.variables
assert not b.tunable_variables
assert len(b.functions) == 2
assert not b.depends_on(1)
assert b.depends_on(a)
assert b.depends_on(finalize(a).value)
b = b.copy(reset=True)
assert b.tunable_variables == b.variables
assert finalize(a).value.fixed
d = b.copy()
assert d.compute() == 4
assert b.tunable_variables == b.variables
assert d.fixed_variables == b.variables
b.fix("a")
b.fix(finalize(c).value, 1)
assert b.compute() == 5
assert b.fixed_variables == b.variables
with raises(KeyError):
b.fix("foo")
a = variable(range(10), uid=True)
with raises(KeyError):
finalize(a * b).fix("a")
| 2.234375 | 2 |
etoLib/etoLib/Attic/s3_func.py | tonybutzer/eto-draft | 0 | 12797184 | <gh_stars>0
def s3_hello(person_name):
print('Hello There Person:', person_name)
def s3_push_delete_local(local_file, bucket, bucket_filepath):
print('def s3_push_delete_local(local_file, bucket, bucket_filepath):')
| 2.078125 | 2 |
openmdao/utils/tests/test_options_dictionary_feature.py | hwangjt/blue | 0 | 12797185 | <filename>openmdao/utils/tests/test_options_dictionary_feature.py
from openmdao.api import OptionsDictionary, ExplicitComponent
import unittest
from six import PY3, assertRegex
import numpy as np
from openmdao.devtools.testutil import assert_rel_error
class VectorDoublingComp(ExplicitComponent):
def initialize(self):
self.metadata.declare('size', type_=int)
def setup(self):
size = self.metadata['size']
self.add_input('x', shape=size)
self.add_output('y', shape=size)
self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size))
def compute(self, inputs, outputs):
outputs['y'] = 2 * inputs['x']
class LinearCombinationComp(ExplicitComponent):
def initialize(self):
self.metadata.declare('a', default=1., type_=(int, float))
self.metadata.declare('b', default=1., type_=(int, float))
def setup(self):
self.add_input('x')
self.add_output('y')
self.declare_partials('y', 'x', val=self.metadata['a'])
def compute(self, inputs, outputs):
outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b']
class UnitaryFunctionComp(ExplicitComponent):
def initialize(self):
from types import FunctionType
self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType)
def setup(self):
self.add_input('x')
self.add_output('y')
self.declare_partials('y', 'x', method='fd')
def compute(self, inputs, outputs):
func = self.metadata['func']
if func == 'exp':
outputs['y'] = np.exp(inputs['x'])
elif func == 'cos':
outputs['y'] = np.cos(inputs['x'])
elif func == 'sin':
outputs['y'] = np.sin(inputs['x'])
else:
outputs['y'] = func(inputs['x'])
class TestOptionsDictionaryFeature(unittest.TestCase):
def test_simple(self):
from openmdao.api import Problem
prob = Problem()
prob.model = VectorDoublingComp(size=3)
prob.setup()
prob['x'] = [1., 2., 3.]
prob.run_model()
assert_rel_error(self, prob['y'], [2., 4., 6.])
def test_with_default(self):
from openmdao.api import Problem
prob = Problem()
prob.model = LinearCombinationComp(a=2.)
prob.setup()
prob['x'] = 3
prob.run_model()
self.assertEqual(prob['y'], 7.)
def test_values_and_types(self):
from openmdao.api import Problem
prob = Problem()
prob.model = UnitaryFunctionComp(func='cos')
prob.setup()
prob['x'] = 0.
prob.run_model()
self.assertEqual(prob['y'], 1.)
def myfunc(x):
return x ** 2 + 2
prob = Problem()
prob.model = UnitaryFunctionComp(func=myfunc)
prob.setup()
prob['x'] = 2.
prob.run_model()
self.assertEqual(prob['y'], 6.)
if __name__ == "__main__":
unittest.main()
| 2.265625 | 2 |
azure_monitor/examples/metrics/standard.py | yao-cqc/opentelemetry-azure-monitor-python | 13 | 12797186 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=no-name-in-module
import time
import requests
from opentelemetry import metrics
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk.metrics import MeterProvider
from azure_monitor import AzureMonitorMetricsExporter
# Use the default sdk implementation
metrics.set_meter_provider(MeterProvider(stateful=False))
# Track telemetry from the requests library
RequestsInstrumentor().instrument()
meter = RequestsInstrumentor().meter
exporter = AzureMonitorMetricsExporter(
connection_string="InstrumentationKey=<INSTRUMENTATION KEY HERE>"
)
# Export standard metrics from requests library to Azure Monitor
metrics.get_meter_provider().start_pipeline(meter, exporter, 5)
for x in range(10):
for y in range(10):
requests.get("http://example.com")
time.sleep(2)
time.sleep(5)
input("Press any key to exit...")
| 2.296875 | 2 |
python/fabric/fabfile.py | cuongnb14/cookbook | 0 | 12797187 | """
Fabfile template for python3
"""
# -*- coding: utf-8 -*-
from __future__ import print_function
from slackclient import SlackClient
from fabric.api import cd, env, task, run, settings, local
from fabfile_config import *
import traceback
from fabric.contrib.files import exists
LAST_CID_FILE = "last_commit_id.txt"
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class FabSlack(metaclass=Singleton):
sc = SlackClient(SLACK_API_KEY)
def send(self, **kargs):
try:
self.sc.api_call(
"chat.postMessage",
channel="#log-info",
username='Deployment',
# as_user=True,
icon_emoji=":gear:",
**kargs
)
except Exception:
traceback.print_exc()
sc = FabSlack()
@task
def test(target_host):
pass
@task
def set_host(target_host='dev'):
"""Set host before deploy,
NOTE: plz configure ssh config file on your local machine first.
Eg use: `fab set_host:dev deploy`
:param: target_host string
"""
env.use_ssh_config = True
env.hosts = [target_host]
@task
def deploy():
try:
target_host = env.hosts[0]
except IndexError:
target_host = 'dev'
with cd(HOST_API[target_host]['dir']):
do_deploy()
def run_cmd(cmd, target_host=None, local_capture=True):
"""
Run cmd base on local or remote host and return output or print output to terminal screen
:param string cmd: Command to run
:param string target_host: local or remote host name
:param bool local_capture: If true then return output and not print anything to terminal, if false then print output to terminal
:return: Output string if capture=True or return nothing if capture=false
"""
result = ''
with settings(warn_only=True):
fn = "local" if target_host == 'local' else "run"
if fn == 'local':
result = local(cmd, local_capture) # Do not print to terminal and get the output
else:
result = run(cmd, warn_only=True, pty=False)
if result.failed:
print(result.stdout)
attachments = [{
"title": 'Command: {}'.format(result.command),
"color": "danger",
"pretext": 'Detail: {}'.format(result),
"mrkdwn_in": ["text", "pretext"]
}]
sc.send(attachments=attachments, text="Deploy to *{}* error".format(env.hosts[0]))
raise SystemExit()
else:
return result
def do_deploy():
if not exists("{}/{}".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)):
save_last_commit()
run_cmd("git pull")
run_testing()
restart_api()
send_commit_applied()
save_last_commit()
def run_testing():
pass
def restart_api():
pass
def get_current_commit():
return run_cmd("git rev-parse HEAD")
def save_last_commit():
run_cmd("git rev-parse HEAD > {}".format(LAST_CID_FILE))
def get_last_commit():
return run_cmd("cat {}".format(LAST_CID_FILE))
def get_git_logs(last_commit_id, current_commit_id):
return run_cmd("git log {}...{} --oneline --pretty=format:'%s'".format(last_commit_id, current_commit_id))
def send_commit_applied():
last_commit_id = get_last_commit()
current_commit_id = get_current_commit()
commit_applied = get_git_logs(last_commit_id, current_commit_id)
if commit_applied:
commit_applied = "••• " + commit_applied
commit_applied = commit_applied.replace("\n", "\n••• ")
attachments = [
{
"color": "good",
"title": "Commit applied:",
"text": commit_applied,
},
]
sc.send(attachments=attachments, text="Deploy to *{}* success".format(env.hosts[0]))
| 2.109375 | 2 |
worker/ttypes.py | apioo/fusio-worker-python | 0 | 12797188 | <filename>worker/ttypes.py
#
# Autogenerated by Thrift Compiler (0.14.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class Message(object):
"""
Attributes:
- success
- message
"""
def __init__(self, success=None, message=None,):
self.success = success
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Message')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 1)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Connection(object):
"""
Attributes:
- name
- type
- config
"""
def __init__(self, name=None, type=None, config=None,):
self.name = name
self.type = type
self.config = config
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.config = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.config[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Connection')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.STRING, 2)
oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type)
oprot.writeFieldEnd()
if self.config is not None:
oprot.writeFieldBegin('config', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config))
for kiter7, viter8 in self.config.items():
oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)
oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Action(object):
"""
Attributes:
- name
- code
"""
def __init__(self, name=None, code=None,):
self.name = name
self.code = code
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Action')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.code is not None:
oprot.writeFieldBegin('code', TType.STRING, 2)
oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Execute(object):
"""
Attributes:
- action
- request
- context
"""
def __init__(self, action=None, request=None, context=None,):
self.action = action
self.request = request
self.context = context
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.request = Request()
self.request.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.context = Context()
self.context.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Execute')
if self.action is not None:
oprot.writeFieldBegin('action', TType.STRING, 1)
oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action)
oprot.writeFieldEnd()
if self.request is not None:
oprot.writeFieldBegin('request', TType.STRUCT, 2)
self.request.write(oprot)
oprot.writeFieldEnd()
if self.context is not None:
oprot.writeFieldBegin('context', TType.STRUCT, 3)
self.context.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Request(object):
"""
Attributes:
- http
- rpc
"""
def __init__(self, http=None, rpc=None,):
self.http = http
self.rpc = rpc
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.http = HttpRequest()
self.http.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.rpc = RpcRequest()
self.rpc.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Request')
if self.http is not None:
oprot.writeFieldBegin('http', TType.STRUCT, 1)
self.http.write(oprot)
oprot.writeFieldEnd()
if self.rpc is not None:
oprot.writeFieldBegin('rpc', TType.STRUCT, 2)
self.rpc.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class HttpRequest(object):
"""
Attributes:
- method
- headers
- uriFragments
- parameters
- body
"""
def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,):
self.method = method
self.headers = headers
self.uriFragments = uriFragments
self.parameters = parameters
self.body = body
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.headers = {}
(_ktype10, _vtype11, _size9) = iprot.readMapBegin()
for _i13 in range(_size9):
_key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.headers[_key14] = _val15
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.uriFragments = {}
(_ktype17, _vtype18, _size16) = iprot.readMapBegin()
for _i20 in range(_size16):
_key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.uriFragments[_key21] = _val22
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.parameters = {}
(_ktype24, _vtype25, _size23) = iprot.readMapBegin()
for _i27 in range(_size23):
_key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.parameters[_key28] = _val29
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('HttpRequest')
if self.method is not None:
oprot.writeFieldBegin('method', TType.STRING, 1)
oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method)
oprot.writeFieldEnd()
if self.headers is not None:
oprot.writeFieldBegin('headers', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))
for kiter30, viter31 in self.headers.items():
oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30)
oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.uriFragments is not None:
oprot.writeFieldBegin('uriFragments', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments))
for kiter32, viter33 in self.uriFragments.items():
oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32)
oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter34, viter35 in self.parameters.items():
oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34)
oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.body is not None:
oprot.writeFieldBegin('body', TType.STRING, 5)
oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RpcRequest(object):
"""
Attributes:
- arguments
"""
def __init__(self, arguments=None,):
self.arguments = arguments
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('RpcRequest')
if self.arguments is not None:
oprot.writeFieldBegin('arguments', TType.STRING, 1)
oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Context(object):
"""
Attributes:
- routeId
- baseUrl
- app
- user
"""
def __init__(self, routeId=None, baseUrl=None, app=None, user=None,):
self.routeId = routeId
self.baseUrl = baseUrl
self.app = app
self.user = user
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.routeId = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.app = App()
self.app.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.user = User()
self.user.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Context')
if self.routeId is not None:
oprot.writeFieldBegin('routeId', TType.I64, 1)
oprot.writeI64(self.routeId)
oprot.writeFieldEnd()
if self.baseUrl is not None:
oprot.writeFieldBegin('baseUrl', TType.STRING, 2)
oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl)
oprot.writeFieldEnd()
if self.app is not None:
oprot.writeFieldBegin('app', TType.STRUCT, 3)
self.app.write(oprot)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRUCT, 4)
self.user.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class App(object):
"""
Attributes:
- id
- userId
- status
- name
- url
- appKey
- scopes
- parameters
"""
def __init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,):
self.id = id
self.userId = userId
self.status = status
self.name = name
self.url = url
self.appKey = appKey
self.scopes = scopes
self.parameters = parameters
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.userId = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.status = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.scopes = []
(_etype39, _size36) = iprot.readListBegin()
for _i40 in range(_size36):
_elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.scopes.append(_elem41)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.parameters = []
(_etype45, _size42) = iprot.readListBegin()
for _i46 in range(_size42):
_elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.parameters.append(_elem47)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('App')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I64, 1)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.userId is not None:
oprot.writeFieldBegin('userId', TType.I64, 2)
oprot.writeI64(self.userId)
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.I32, 3)
oprot.writeI32(self.status)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 4)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.url is not None:
oprot.writeFieldBegin('url', TType.STRING, 5)
oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url)
oprot.writeFieldEnd()
if self.appKey is not None:
oprot.writeFieldBegin('appKey', TType.STRING, 6)
oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey)
oprot.writeFieldEnd()
if self.scopes is not None:
oprot.writeFieldBegin('scopes', TType.LIST, 7)
oprot.writeListBegin(TType.STRING, len(self.scopes))
for iter48 in self.scopes:
oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.LIST, 8)
oprot.writeListBegin(TType.STRING, len(self.parameters))
for iter49 in self.parameters:
oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class User(object):
"""
Attributes:
- id
- roleId
- categoryId
- status
- name
- email
- points
"""
def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,):
self.id = id
self.roleId = roleId
self.categoryId = categoryId
self.status = status
self.name = name
self.email = email
self.points = points
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.roleId = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.categoryId = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.status = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.points = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('User')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I64, 1)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.roleId is not None:
oprot.writeFieldBegin('roleId', TType.I64, 2)
oprot.writeI64(self.roleId)
oprot.writeFieldEnd()
if self.categoryId is not None:
oprot.writeFieldBegin('categoryId', TType.I64, 3)
oprot.writeI64(self.categoryId)
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.I32, 4)
oprot.writeI32(self.status)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 5)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.email is not None:
oprot.writeFieldBegin('email', TType.STRING, 6)
oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email)
oprot.writeFieldEnd()
if self.points is not None:
oprot.writeFieldBegin('points', TType.I32, 7)
oprot.writeI32(self.points)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Result(object):
"""
Attributes:
- response
- events
- logs
"""
def __init__(self, response=None, events=None, logs=None,):
self.response = response
self.events = events
self.logs = logs
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.response = Response()
self.response.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.events = []
(_etype53, _size50) = iprot.readListBegin()
for _i54 in range(_size50):
_elem55 = Event()
_elem55.read(iprot)
self.events.append(_elem55)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.logs = []
(_etype59, _size56) = iprot.readListBegin()
for _i60 in range(_size56):
_elem61 = Log()
_elem61.read(iprot)
self.logs.append(_elem61)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Result')
if self.response is not None:
oprot.writeFieldBegin('response', TType.STRUCT, 1)
self.response.write(oprot)
oprot.writeFieldEnd()
if self.events is not None:
oprot.writeFieldBegin('events', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.events))
for iter62 in self.events:
iter62.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.logs is not None:
oprot.writeFieldBegin('logs', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.logs))
for iter63 in self.logs:
iter63.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Response(object):
"""
Attributes:
- statusCode
- headers
- body
"""
def __init__(self, statusCode=None, headers=None, body=None,):
self.statusCode = statusCode
self.headers = headers
self.body = body
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.statusCode = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.headers = {}
(_ktype65, _vtype66, _size64) = iprot.readMapBegin()
for _i68 in range(_size64):
_key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.headers[_key69] = _val70
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Response')
if self.statusCode is not None:
oprot.writeFieldBegin('statusCode', TType.I32, 1)
oprot.writeI32(self.statusCode)
oprot.writeFieldEnd()
if self.headers is not None:
oprot.writeFieldBegin('headers', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))
for kiter71, viter72 in self.headers.items():
oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71)
oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.body is not None:
oprot.writeFieldBegin('body', TType.STRING, 3)
oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Event(object):
"""
Attributes:
- eventName
- data
"""
def __init__(self, eventName=None, data=None,):
self.eventName = eventName
self.data = data
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Event')
if self.eventName is not None:
oprot.writeFieldBegin('eventName', TType.STRING, 1)
oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRING, 2)
oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Log(object):
"""
Attributes:
- level
- message
"""
def __init__(self, level=None, message=None,):
self.level = level
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Log')
if self.level is not None:
oprot.writeFieldBegin('level', TType.STRING, 1)
oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Message)
Message.thrift_spec = (
None, # 0
(1, TType.BOOL, 'success', None, None, ), # 1
(2, TType.STRING, 'message', 'UTF8', None, ), # 2
)
all_structs.append(Connection)
Connection.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.STRING, 'type', 'UTF8', None, ), # 2
(3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
all_structs.append(Action)
Action.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.STRING, 'code', 'UTF8', None, ), # 2
)
all_structs.append(Execute)
Execute.thrift_spec = (
None, # 0
(1, TType.STRING, 'action', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'request', [Request, None], None, ), # 2
(3, TType.STRUCT, 'context', [Context, None], None, ), # 3
)
all_structs.append(Request)
Request.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'http', [HttpRequest, None], None, ), # 1
(2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ), # 2
)
all_structs.append(HttpRequest)
HttpRequest.thrift_spec = (
None, # 0
(1, TType.STRING, 'method', 'UTF8', None, ), # 1
(2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2
(3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4
(5, TType.STRING, 'body', 'UTF8', None, ), # 5
)
all_structs.append(RpcRequest)
RpcRequest.thrift_spec = (
None, # 0
(1, TType.STRING, 'arguments', 'UTF8', None, ), # 1
)
all_structs.append(Context)
Context.thrift_spec = (
None, # 0
(1, TType.I64, 'routeId', None, None, ), # 1
(2, TType.STRING, 'baseUrl', 'UTF8', None, ), # 2
(3, TType.STRUCT, 'app', [App, None], None, ), # 3
(4, TType.STRUCT, 'user', [User, None], None, ), # 4
)
all_structs.append(App)
App.thrift_spec = (
None, # 0
(1, TType.I64, 'id', None, None, ), # 1
(2, TType.I64, 'userId', None, None, ), # 2
(3, TType.I32, 'status', None, None, ), # 3
(4, TType.STRING, 'name', 'UTF8', None, ), # 4
(5, TType.STRING, 'url', 'UTF8', None, ), # 5
(6, TType.STRING, 'appKey', 'UTF8', None, ), # 6
(7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ), # 7
(8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ), # 8
)
all_structs.append(User)
User.thrift_spec = (
None, # 0
(1, TType.I64, 'id', None, None, ), # 1
(2, TType.I64, 'roleId', None, None, ), # 2
(3, TType.I64, 'categoryId', None, None, ), # 3
(4, TType.I32, 'status', None, None, ), # 4
(5, TType.STRING, 'name', 'UTF8', None, ), # 5
(6, TType.STRING, 'email', 'UTF8', None, ), # 6
(7, TType.I32, 'points', None, None, ), # 7
)
all_structs.append(Result)
Result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'response', [Response, None], None, ), # 1
(2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None, ), # 2
(3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None, ), # 3
)
all_structs.append(Response)
Response.thrift_spec = (
None, # 0
(1, TType.I32, 'statusCode', None, None, ), # 1
(2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2
(3, TType.STRING, 'body', 'UTF8', None, ), # 3
)
all_structs.append(Event)
Event.thrift_spec = (
None, # 0
(1, TType.STRING, 'eventName', 'UTF8', None, ), # 1
(2, TType.STRING, 'data', 'UTF8', None, ), # 2
)
all_structs.append(Log)
Log.thrift_spec = (
None, # 0
(1, TType.STRING, 'level', 'UTF8', None, ), # 1
(2, TType.STRING, 'message', 'UTF8', None, ), # 2
)
fix_spec(all_structs)
del all_structs
| 2.046875 | 2 |
monotonic_gru.py | arghosh/MNSS | 1 | 12797189 | import torch
import torch.nn as nn
import math
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MonotonicGruCell(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
"""
For each element in the input sequence, each layer computes the following
function:
MonotonicGru Math
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\
h_t = (1 - z_t) * n_t + h_{(t-1)}
\end{array}
"""
self.input_size = input_size
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden=None):
# x is B, input_size
if hidden is None:
hidden = torch.zeros(x.size(0), self.hidden_size).to(device)
gi = self.i2h(x) # B, 3H
gh = self.h2h(hidden) # B, 3H
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate_tmp = i_r + h_r
inputgate_tmp = i_i + h_i
sigmoid = nn.Sigmoid()
resetgate = sigmoid(resetgate_tmp)
inputgate = sigmoid(inputgate_tmp)
hr = self.h2h(hidden * resetgate)
_, _, h_n = hr.chunk(3, 1)
newgate = sigmoid(i_n + h_n)
hy = hidden + (1.-hidden) * inputgate * newgate
return hy
class MonotonicGru(nn.Module):
def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0):
super().__init__()
self.cell = MonotonicGruCell(
input_size=input_size, hidden_size=hidden_size, bias=True)
self.batch_first = batch_first
def forward(self, input_, lengths, hidden=None):
# input_ is of dimensionalty (T, B, input_size, ...)
# lenghths is B,
dim = 1 if self.batch_first else 0
outputs = []
for x in torch.unbind(input_, dim=dim): # x dim is B, I
hidden = self.cell(x, hidden)
outputs.append(hidden.clone())
hidden_states = torch.stack(outputs) # T, B, H
last_states = []
for idx, l in enumerate(lengths):
last_states.append(hidden_states[l-1, idx, :])
last_states = torch.stack(last_states)
return hidden_states, last_states
| 2.875 | 3 |
tests/app/signals.py | systemallica/django-snitch | 16 | 12797190 | <gh_stars>10-100
import datetime
from django.db.models.signals import post_save
from django.dispatch import receiver
from django_celery_beat.models import DAYS
from snitch.schedules.models import Schedule
from tests.app.events import DUMMY_EVENT, EVERY_HOUR
from tests.app.models import OtherStuff
@receiver(post_save, sender=OtherStuff)
def post_save_other_stuff(sender, instance, created, **kwargs):
"""Creates the schedules for other stuff."""
if created:
# sent a dummy event in 2 days
schedule = Schedule(
actor=instance,
verb=DUMMY_EVENT,
limit=1,
every=2,
period=DAYS,
start_time=instance.created + datetime.timedelta(days=2),
)
schedule.save()
# sent a dummy event evert hour, but with cron
schedule = Schedule(
actor=instance, verb=EVERY_HOUR, minute=instance.created.minute, hour="*/1"
)
schedule.save()
| 2.234375 | 2 |
Editor/Noriter/Noriter/UI/SideWindow.py | RangHo/pini-engine | 0 | 12797191 | <filename>Editor/Noriter/Noriter/UI/SideWindow.py
from PySide import QtCore,QtGui
from Noriter.UI.Layout import *
from Noriter.UI import NoriterWindow as nWin
class SideWindow (nWin.NoriterWindow, QtGui.QWidget):
def __init__(self):
super(SideWindow, self).__init__()
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
@LayoutGUI
def GUI(self):
pass
| 1.78125 | 2 |
addons/mrp_byproduct/wizard/change_production_qty.py | jjiege/odoo | 0 | 12797192 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class ChangeProductionQty(models.TransientModel):
_inherit = 'change.production.qty'
@api.model
def _update_product_to_produce(self, prod, qty, old_qty):
modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty)
for sub_product_line in prod.bom_id.sub_products:
move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not in ('done', 'cancel'))
if move:
product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id)
qty1 = sub_product_line.product_qty
qty1 *= product_uom_factor / prod.bom_id.product_qty
modification[move[0]] = (qty1, move[0].product_uom_qty)
move[0].write({'product_uom_qty': qty1})
else:
move = prod._create_byproduct_move(sub_product_line)
modification[move] = (move.product_uom_qty, 0)
return modification
| 2.03125 | 2 |
ife/features/tests/test_colourfulness.py | Collonville/ImageFeatureExtractor | 2 | 12797193 | <filename>ife/features/tests/test_colourfulness.py
import unittest
class TestColoufulness(unittest.TestCase):
def test_colourfulness(self) -> None:
pass
| 1.40625 | 1 |
src/data_utils.py | lrgr/sigma | 14 | 12797194 | import numpy as np
from pomegranate import *
import json
################################################################################
# LOGGING
################################################################################
import logging
# Logging format
FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s'
logging.basicConfig(format=FORMAT)
def get_logger(verbosity=logging.INFO):
'''
Returns logger object
'''
logger = logging.getLogger(__name__)
logger.setLevel(verbosity)
return logger
################################################################################
# UTILS
################################################################################
def sample_and_noise(model, noise_dist, n_seqs, seqs_len):
noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96)))
seqs = []
noised_seqs = []
for i in range(n_seqs):
seq = np.array(model.sample(seqs_len))
seqs.append(seq)
noised_seq = seq.copy()
hits = noise_dist.sample(seqs_len)
for j, hit in enumerate(hits):
if hit == 0:
noised_seq[j] = noise_change_dist.sample()
noised_seqs.append(noised_seq)
return seqs, noised_seqs
def get_emissions(file='data\emissions_for_breast_cancer'):
return np.load(file + '.npy')
def sample_uniform_between_a_b(n_states, a=0.0, b=1.0):
return (b - a) * np.random.sample(n_states) + a
def random_seqs_from_json(file_name, n_seqs=10):
seqs = []
seqs_names = []
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
samples = np.random.permutation(samples)
for i in range(n_seqs):
seqs.append(samples_to_seq[samples[i]])
seqs_names.append(samples[i])
return seqs, seqs_names
def to_json(file_name, dict_to_save):
with open(file_name + '.json', 'w') as fp:
json.dump(dict_to_save, fp)
def full_sample_to_chromosomes_seqs(sample, dists_sample):
np_sample = np.array(sample)
starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0]
return np.split(np_sample, starting_chromosome_idxs)[1:]
def load_json(file_name):
return json.load(open(file_name))
def get_split_sequences(file_name, sample_numbers=None):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
samples_dists = json_file[u'sampleToPrevMutDists']
out_seqs = []
out_names = []
if sample_numbers is None:
sample_numbers = range(len(samples))
for i in sample_numbers:
n = samples[i]
out_names.append(n)
out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n]))
return zip(out_names, out_seqs)
def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
out_seqs = []
out_names = []
for n in samples:
out_names.append(n)
out_seqs.append(samples_to_seq[n])
return zip(out_names, out_seqs)
def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
# finding num_object + counting
num_objects = 0
samples_objects = []
samples_counts = []
for sample in samples:
objects, counts = np.unique(samples_to_seq[sample], return_counts=True)
samples_objects.append(objects)
samples_counts.append(counts)
num_objects = max(num_objects, np.max(objects))
num_objects += 1
count_mat = np.zeros((len(samples), num_objects))
for i in range(len(samples)):
count_mat[i, samples_objects[i]] = samples_counts[i]
return count_mat
def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
return samples
def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None):
json_file = json.load(open(file_name))
samples = json_file[u'samples']
samples_to_seq = json_file[u'sampleToSequence']
samples_dists = json_file[u'sampleToPrevMutDists']
out_seqs = []
out_names = []
if sample_numbers is None:
sample_numbers = range(len(samples))
for i in sample_numbers:
n = samples[i]
out_names.append(n)
out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold))
return zip(out_names, out_seqs)
def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold):
np_sample = np.array(sample)
np_dists = np.array(dists_sample)
starting_chromosome_idxs = np.where(np_dists >= 1e100)[0]
chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:]
chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:]
out = []
for i in range(len(chromosomes)):
chromosome = chromosomes[i]
chromosome_dists = chromosomes_dists[i]
starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0]
seqs = np.split(chromosome, starting_seqs_idxs)[1:]
out.append(seqs)
return out
def seqs_to_seq(seqs):
out = []
for seq in seqs:
out.extend(seq)
return np.array(out)
def seqs_to_seq_of_prefix(seqs):
out = []
for seq in seqs:
out.append(seq[0])
return np.array(out)
def sample_indices_not_in_dir(dir_path):
import os
samples_in_dir = [f[:-5] for f in os.listdir(dir_path)]
samples = get_samples_names()
missing_indices = []
for i in range(len(samples)):
if samples[i] not in samples_in_dir:
missing_indices.append(i)
return missing_indices
| 2.171875 | 2 |
a.py | kavindu23110/groupproject | 0 | 12797195 | import serial
import time
import sys,ast
message='';
c=' '.join(sys.argv[1:])
num=c.replace("[","").replace("]","").split(",")
message=num.pop()
class TextMessage:
# def __init__(self):
# self.recipient = recipient
# self.content = message
def connectPhone(self):
self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts = False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE)
time.sleep(1)
def sendMessage(self,recipient, message):
self.ser.write('ATZ\r'.encode())
time.sleep(0.5)
self.ser.write('AT+CMGF=1\r'.encode())
time.sleep(0.5)
self.ser.write(('''AT+CMGS="''' + recipient + '''"\r''').encode())
time.sleep(0.5)
self.ser.write((message + "\r").encode())
time.sleep(0.5)
self.ser.write(chr(26).encode())
time.sleep(0.5)
def disconnectPhone(self):
self.ser.close()
sms = TextMessage()
sms.connectPhone()
for numbers in num:
print(numbers)
sms.sendMessage(numbers,message)
#time.sleep(0.5)
sms.disconnectPhone()
print ("1")
| 2.78125 | 3 |
service.py | ramrathi/baritone | 3 | 12797196 | from flask import Flask
import baritone
import json
app = Flask(__name__)
@app.route('/')
def hello():
print("Hello from terminal")
return "Hello world"
@app.route('/youtube/<link>')
def youtube(link):
print("ENTERED")
url = 'https://www.youtube.com/watch?v='+link
print(url)
result,status = (baritone.pipeline(url,'youtube'))
convert = {
'url': url,
'text': result,
'converted':status
}
return json.dumps(convert)
if __name__ == '__main__':
print("Starting server")
app.run(host='0.0.0.0') | 2.5625 | 3 |
test/unit/models/rulesets.py | sumanshil/pagerduty-api-python-client | 0 | 12797197 | <gh_stars>0
# Copyright (c) PagerDuty.
# See LICENSE for details.
import json
import unittest
import os.path
import requests_mock
from pypd import Rulesets
class IntegrationTestCase(unittest.TestCase):
def setUp(self):
self.base_url = 'https://api.pagerduty.com'
self.api_key = 'FAUX_API_KEY'
self.limit = 25
base_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'data')
path = os.path.join(base_path, 'sample_rulesets.json')
with open(path) as f:
self.rulesets_data = json.load(f)
self.rulesetid = "0e84de00-9511-4380-9f4f-a7b568bb49a0"
self.rulesets = list(filter(
lambda s: s['id'] == self.rulesetid,
self.rulesets_data["rulesets"],
))[0]
self.ruleset_data = {
'ruleset': self.rulesets,
}
path = os.path.join(base_path, 'sample_rules.json')
with open(path) as f:
self.rules_data = json.load(f)
@requests_mock.Mocker()
def test_fetch_a_ruleset(self, m):
# setup mocked request uris
service_url = '{0}/rulesets/{1}'.format(
self.base_url,
self.rulesetid,
)
m.register_uri(
'GET',
service_url,
json=self.ruleset_data,
complete_qs=False
)
rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key)
data = rulesets.get_ruleset(self.rulesetid)
self.assertEqual(len(data["routing_keys"]), 1)
self.assertEqual(data["routing_keys"][0], "<KEY>")
@requests_mock.Mocker()
def test_fetch_all_rulesets(self, m):
# setup mocked request uris
service_url = '{0}/rulesets'.format(
self.base_url
)
m.register_uri(
'GET',
service_url,
json=self.rulesets_data,
complete_qs=False
)
rulesets = Rulesets._fetch_all(api_key=self.api_key)
data = rulesets[0].get_rulesets()
self.assertEqual(len(data[0]["routing_keys"]), 1)
self.assertEqual(data[0]["routing_keys"][0], "<KEY>") | 2.25 | 2 |
ievv_auth/ievv_api_key/apps.py | appressoas/ievv_auth | 0 | 12797198 | <reponame>appressoas/ievv_auth
from django.apps import AppConfig
class IevvApiKeyConfig(AppConfig):
name = 'ievv_auth.ievv_api_key'
| 1.242188 | 1 |
pylon/aws/_bases.py | ch41rmn/pylon-oss | 3 | 12797199 | <reponame>ch41rmn/pylon-oss
"""Some base classes for AWS"""
class BaseMixin:
def __init__(self, name: str):
self.name = name
def __repr__(self):
return '<{module}.{cls} {name}>'.format(
module=self.__class__.__module__,
cls=self.__class__.__name__,
name=self.name
)
| 2.03125 | 2 |
03-DataWranglingWithMongoDB/P02-WrangleOpenStreetMapData/handler.py | ccampguilhem/Udacity-DataAnalyst | 1 | 12797200 | <gh_stars>1-10
import xml.sax
from collections import Counter, defaultdict
"""
Custom handler for parsing OpenStreetMap XML files.
While parsing the XML file, handler keeps trace of:
- tags count
- tags ancestors
It is possible to register callback functions for start or end events.
The callbacks for start event will be called passing the following arguments:
- stack
- locator
The callbacks for end event will be called passing the following arguments:
- element name
- element children
- locator
Return value of callbacks is ignored by the handler class.
This enables to enhance the parser with 'on the fly' data quality audit or export.
"""
class OpenStreetMapXmlHandler(xml.sax.ContentHandler):
def __init__(self):
"""
Constructor.
This class is intended to be used as a context manager.
The state of object keeps a trace of stack while parsing. This enables to collect information
from children. The stack is destroyed when end event occured. This enables to limit memory usage
while parsing.
The _stack internal variable stores tuples
- element unique identifier
- element name (as provided by start event)
- element attributes (as provided by start event)
"""
xml.sax.ContentHandler.__init__(self) #super not working here ???
def __enter__(self):
"""
Context manager entry point.
"""
self._id = 0 #unique identifier incremented at
self._stack = [ ] #current stack of element being read
self._element_tags = Counter() #counter of element tags
self._element_ancestors = defaultdict(set) #collection of ancestors per tag
self._start_callbacks = [ ] #start event callbacks
self._end_callbacks = [ ] #end event callbacks
self._children = { } #children elements of elements being read
return self
def __exit__(self, *args):
"""
Context manager exit point.
"""
pass
def startElement(self, name, attrs):
"""
Method invoked when starting to read an element in XML dataset.
This method is part of of xml.sax.ContentHandler interface and is overloaded here.
- name: tag of element being read
- attrs: element attributes
"""
#Get identifier for current element
identifier = self._requestUniqueIdentifier()
#Has element a parent? If yes get the id.
try:
parent_tuple = self._stack[-1]
if parent_tuple[1] == 'osm':
#We ignore osm element as it has too many children
parent = None
else:
parent = parent_tuple[0]
except IndexError:
parent = None
#Exploit current stack to get ancestor
ancestor = ".".join([s[1] for s in self._stack])
self._element_ancestors[name].add(ancestor)
#Update tag counter
self._element_tags[name] += 1
#Update parent children (if any)
if parent is not None:
self._children[parent].append((name, attrs))
#Initialisation of own children
self._children[identifier] = [ ]
#Update stack
self._stack.append((identifier, name, attrs))
#Use registered callbacks
for callback in self._start_callbacks:
callback(self._stack, self._locator)
def endElement(self, name):
"""
Method invoked when ending to read an element in XML dataset.
This method is part of of xml.sax.ContentHandler interface and is overloaded here.
- name: tag of element being read
"""
#Get identifier
identifier = self._stack[-1][0]
#Use registered callbacks before element is cleaned
for callback in self._end_callbacks:
callback(name, self._children[identifier], self._locator)
#Cleaning
identifier, name, attrs = self._stack.pop(-1)
del self._children[identifier]
def getTagsCount(self):
"""
Get a dictionnary with tags count.
- return: dictionnary where keys are tags and values are count
"""
return dict(self._element_tags)
def getTagsAncestors(self):
"""
Get a dictionnary with tags ancestors.
- return: dictionnary where keys are tags and values are a sequence of all different ancestors path
"""
return dict(self._element_ancestors)
def registerStartEventCallback(self, func):
"""
Register a callback for start event.
Note that return value of callback is ignored. Any exception raised by callback is not catched by handler,
so you should take care of catching all exceptions within the callback itself.
- func: a callable object taking stack and locator as arguments.
"""
self._start_callbacks.append(func)
def registerEndEventCallback(self, func):
"""
Register a callback for end event.
Note that return value of callback is ignored. Any exception raised by callback is not catched by handler,
so you should take care of catching all exceptions within the callback itself.
- func: a callable object taking element name, element children and locator as arguments.
"""
self._end_callbacks.append(func)
def clearCallbacks(self):
"""
Remove all registered callbacks.
"""
self._end_callbacks = [ ]
self._start_callbacks = [ ]
def _requestUniqueIdentifier(self):
"""
Return a unique identifier used at parsing time.
- return: identifier
"""
self._id += 1
return self._id | 3.15625 | 3 |
da-assistant/stack_build.py | nc6/daml | 0 | 12797201 | #!/usr/bin/env python3
# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from subprocess import call, STDOUT
from shutil import copyfile
import sys
import os
import fileinput
ORIGINAL_FIX_VERSION_HS = "gen-source/Version.hs.template"
GENERATED_VERSION_HS = "DA/Sdk/Cli/Version.hs"
ORIGINAL_MOCKSERVER_HS = "gen-source/Mockserver.hs.template"
GENERATED_MOCKSERVER_HS = "Mockserver.hs"
def main(version=None):
if version is None:
version = "HEAD"
print("Stack Builder started...")
try:
basedir = os.path.dirname(os.path.realpath(__file__))
gen_vsn_hs = "%s/%s" % (basedir, GENERATED_VERSION_HS)
print("Generating %s..." % GENERATED_VERSION_HS)
copyfile("%s/%s" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs)
replace_template_var(gen_vsn_hs, "<VERSION-VAR>", version)
print("Generating %s..." % GENERATED_MOCKSERVER_HS)
copyfile("%s/%s" % (basedir, ORIGINAL_MOCKSERVER_HS), "%s/%s" % (basedir, GENERATED_MOCKSERVER_HS))
print("Running stack build...")
call(["stack", "build"], stderr=sys.stderr, stdout=sys.stdout)
finally:
try:
print("Removing generated files...")
os.remove(GENERATED_VERSION_HS)
os.remove(GENERATED_MOCKSERVER_HS)
except OSError:
pass
def replace_template_var(template_file, var, value):
with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(var, value), end='')
if __name__== "__main__":
if len(sys.argv) > 1:
version = sys.argv[1]
else:
version = None
main(version) | 1.960938 | 2 |
carrus.py | caos21/Grodi | 2 | 12797202 | <reponame>caos21/Grodi
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
""" This module contains the classes functions and helpers to compute
the plasma.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019"
__credits__ = ["<NAME>"]
__license__ = "Apache 2.0"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Beta"
import numpy as np
import scipy.constants as const
from scipy.integrate import solve_ivp
PI = const.pi
KE = 1.0/(4.0*PI*const.epsilon_0)
INVKE = 1.0/KE
KB = const.Boltzmann
QE = const.elementary_charge
ME = const.electron_mass
def coulomb_floatpotential(qcharge, radius):
""" Floating potential
"""
return KE*qcharge/radius
def particle_potenergy(radius, zcharge):
""" Nanoparticle potential energy
"""
return -(KE*zcharge*QE**2)/radius
def tunnel(rtaff, radius, zcharge):
""" Tunneling probability
"""
prefac1 = -2./const.hbar
prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge))
return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff))
-np.sqrt(radius*(rtaff-radius))))
class TunnelFrequency:
""" Computes electron tunnel frequency
"""
def __init__(self, plasmasystem):
self.psys = plasmasystem
self.eaffinity = 4.05*const.elementary_charge
def __call__(self, zcharge, radius):
return self.ptunnel(zcharge, radius)
def rt_affinity(self, radius, zcharge):
""" Computes rt_affinity to particle to escape
"""
ainfinity = self.eaffinity
ainf = ainfinity * INVKE/QE**2
rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius)))
if np.isscalar(rtaff):
if rtaff < 0:
return 1000000.0
else:
rtaff[rtaff < 0] = 1000000.0
return rtaff
def ptunnel(self, zcharge, radius):
""" Tunnel frequency
"""
prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius)
rtaff = self.rt_affinity(radius, zcharge)
return prefac1*tunnel(rtaff, radius, zcharge)
class CollisionFrequency:
""" Stores and computes collision frequencies
"""
def __init__(self, plasmasystem, grid_data):
self.psys = plasmasystem
self.gdata = grid_data
self.tfrequency = TunnelFrequency(self.psys)
self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9,
self.gdata.qpivots*QE, indexing='ij')
self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9,
self.gdata.qpivots, indexing='ij')
self.rmesh2 = self.rmesh**2
self.phid = coulomb_floatpotential(self.qmesh, self.rmesh)
self.ion_velocity = 0.0
def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq):
""" Compute collision frequencies OML theory and Tunnel frequency
"""
kte = (2.0/3.0)*energy*QE
efreqfactor = 4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME))
ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature
ion_energy = (ion_energy_from_temperature
+ 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity)
kti = (2.0/3.0)*ion_energy
ifreqfactor = 4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass))
efreq.fill(0)
ifreq.fill(0)
tfreq.fill(0)
gdata = self.gdata
rmesh2 = self.rmesh2
phid = self.phid
efreq[:, gdata.qpivots < 0] = (efreqfactor * rmesh2[:, gdata.qpivots < 0]
* np.exp(QE*phid[:, gdata.qpivots < 0]/kte))
efreq[:, gdata.qpivots >= 0] = (efreqfactor * rmesh2[:, gdata.qpivots >= 0]
* (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte))
ifreq[:, gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots <= 0]
* (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti))
ifreq[:, gdata.qpivots > 0] = (ifreqfactor * rmesh2[:, gdata.qpivots > 0]
* np.exp(-QE*phid[:, gdata.qpivots > 0]/kti))
for i, diam in enumerate(gdata.dpivots):
for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]):
tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam)
for i, diam in enumerate(gdata.dpivots):
for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]):
if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]):
tfreq[i][j] = 1e6*ifreq[i][j]
class Charging:
""" Compute nanoparticle charging rate
"""
def __init__(self, collision_frequency, grid_data):
"""
"""
self.coll = collision_frequency
self.grid_data = grid_data
self.nvols = self.grid_data.nvols
self.nchrgs = self.grid_data.nchrgs
self.efreq = np.zeros((self.nvols, self.nchrgs))
self.ifreq = np.zeros((self.nvols, self.nchrgs))
self.tfreq = np.zeros((self.nvols, self.nchrgs))
def compute_freqs(self, energy, edensity, idensity):
""" Compute frequencies
"""
self.coll.compute_collisionfreq(energy, edensity, idensity,
self.efreq, self.ifreq, self.tfreq)
def compute_plasmacharging(time, delta_t, grid_data, pchem,
growth_data, charging, plasma_sys):
""" Solve the plasma densities
"""
with_tunnel = plasma_sys.with_tunnel
nel = pchem.past_plasmadensity[0]
nar = pchem.past_plasmadensity[1]
npdensity = growth_data.next_density
ion_loss = np.sum(npdensity*charging.ifreq)/nar
electron_loss = np.sum(npdensity*charging.efreq)/nel
energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel
tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq)
energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq)
pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0,
tunnel_gain, energy_gain, energy_loss])
nano_qdens = np.sum(npdensity*grid_data.qpivots)
pchem.nano_qdens = nano_qdens
nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots)
pchem.nano_qdens_rate = nano_qdens_rate
plasma_sys = pchem.get_system()
sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity,
method='BDF', dense_output=False, t_eval=[time, time+delta_t])
pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1])
# quasineutrality
pchem.next_plasmadensity[3] = (pchem.next_plasmadensity[0]-pchem.nano_qdens
-pchem.next_plasmadensity[1])
| 1.757813 | 2 |
doc/helloworld/src/helloworld/app.py | ukris/typhoonae.buildout | 0 | 12797203 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple hello world application."""
import google.appengine.api.xmpp
import google.appengine.ext.webapp
import google.appengine.ext.webapp.template
import logging
import wsgiref.handlers
class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler):
"""Simple request handler."""
def get(self):
"""Handles get."""
index = google.appengine.ext.webapp.template.render('index.html', {})
self.response.out.write(index)
class XMPPHandler(google.appengine.ext.webapp.RequestHandler):
"""Handles XMPP messages."""
def post(self):
"""Handles post."""
message = google.appengine.api.xmpp.Message(self.request.POST)
logging.info("Received XMPP message: %s" % message.body)
if message.body[0:5].lower() == 'hello':
message.reply("Hi, %s!" % message.sender)
class InviteHandler(google.appengine.ext.webapp.RequestHandler):
"""Invites one to a XMPP chat."""
def post(self):
"""Handles post."""
jid = self.request.get('jid')
if google.appengine.api.xmpp.get_presence(jid):
google.appengine.api.xmpp.send_invite(jid)
self.redirect('/')
app = google.appengine.ext.webapp.WSGIApplication([
('/_ah/xmpp/message/chat/', XMPPHandler),
('/invite', InviteHandler),
('/.*', HelloWorldRequestHandler),
], debug=True)
def main():
"""The main function."""
wsgiref.handlers.CGIHandler().run(app)
if __name__ == '__main__':
main()
| 2.40625 | 2 |
pygipo/management/commands/__init__.py | felixhummel/pygipo | 0 | 12797204 | # vim: set fileencoding=utf-8 filetype=python :
import logging
log = logging.getLogger(__name__) | 1.382813 | 1 |
__init__.py | halibot-extra/mastermind | 0 | 12797205 | <gh_stars>0
from .mastermind import MastermindModule
Default = MastermindModule
| 1.148438 | 1 |
0557_reverse_words_in_a_string_III.py | hotternative/leetcode | 0 | 12797206 | """
Given a string s, reverse the order of characters in each word within a sentence
while still preserving whitespace and initial word order.
1 <= s.length <= 5 * 10**4
s contains printable ASCII characters.
s does not contain any leading or trailing spaces.
There is at least one word in s.
All the words in s are separated by a single space.
"""
import unittest
class Solution:
def reverseWords(self, s: str) -> str:
# Runtime: 36 ms, Memory Usage: 14.7 MB
return ' '.join(w[::-1] for w in s.split(' '))
def reverseWords_two_pointers(self, s: str) -> str:
# Runtime: 148 ms, Memory Usage: 15.2 MB
s = list(s)
i = j = k = 0 # i: start of a word, j: current head location, k: end of a word
while j < len(s):
if s[j] == ' ':
k -= 1
while i < k:
s[k], s[i] = s[i], s[k]
i += 1
k -= 1
i = k = j + 1
elif j == len(s) - 1:
k = j
while i < k:
s[k], s[i] = s[i], s[k]
i += 1
k -= 1
else:
k = j + 1
j += 1
return ''.join(s)
class TestSolution(unittest.TestCase):
methods_to_test = [
func for func in dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')]
def test1(self):
s = "Let's take LeetCode contest"
sol = Solution()
expected_output = "s'teL ekat edoCteeL tsetnoc"
for method in TestSolution.methods_to_test:
method_to_test = getattr(sol, method)
actual_output = method_to_test(s)
assert actual_output == expected_output
def test2(self):
s = "<NAME>"
sol = Solution()
expected_output = "doG gniD"
for method in TestSolution.methods_to_test:
method_to_test = getattr(sol, method)
actual_output = method_to_test(s)
assert actual_output == expected_output
| 3.84375 | 4 |
stelspec/core.py | behrouzz/stelspec | 0 | 12797207 | <filename>stelspec/core.py
"""
Module core
===========
This module retrieves data from ELODIE/SOPHIE archive. It has two classes,
Elodie and Sophie, both could be constructed by passing an object name.
Help on Elodie FITS files:
http://atlas.obs-hp.fr/elodie/500/download.html
Help on Sophie FITS files:
http://atlas.obs-hp.fr/sophie/spec_help.html
"""
import numpy as np
import pandas as pd
import requests
from urllib.request import urlretrieve
from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec
def _get_df(url_, col_dc, int_cols, float_cols):
url = url_ + str(list(col_dc.keys())).replace("'", "").replace(" ", "")[1:-1]
req = requests.request('GET', url)
r = req.content.decode('utf-8')
lines = r.splitlines()
valid_lines = [i for i in lines if i[0]!='#']
cols = valid_lines[0].split(' ')
data_lines = [i.split('\t') for i in valid_lines[1:]]
df = pd.DataFrame(data_lines, columns=cols)
for i in df.columns:
df.loc[df[i]=='', i] = np.nan
df[float_cols] = df[float_cols].astype(float)
df[int_cols] = df[int_cols].astype(int)
return url, df
class Elodie:
def __init__(self, obj):
"""
Elodie class
Parameters
----------
obj (str) : object name
Methods
-------
ccf : return Cross-Correlation Functions table
spect : Spectra table
"""
self.obj = obj
self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?'
def ccf(self):
"""
Elodie Cross-Correlation Functions table
"""
url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d='
int_cols = ['datenuit']
float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit']
url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols)
print(url.replace('a=csv', 'a=htab'))
return df
def spec(self):
"""
Elodie Spectra table
"""
url_ = self.BASE + f'o={self.obj}&a=csv&d='
int_cols = ['dataset']
float_cols = ['exptime','sn','vfit','sigfit','ampfit']
url, df = _get_df(url_, desc_el_spec, int_cols, float_cols)
print(url.replace('a=csv', 'a=htab'))
return df
def get_spec(self, dataset, imanum, path=None, s1d=True):
BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?'
s1 = '&z=s1d' if s1d else ''
PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}'
PAR2 = s1 + '&a=mime:application/x-fits'
url = BASE + PAR1+ PAR2
sp_typ = 's1d_' if s1d else 's2d_'
filename = sp_typ + f'elodie_{dataset}_{imanum}.fits'
path = '' if path is None else path
urlretrieve(url, path+filename)
class Sophie:
def __init__(self, obj):
"""
Sophie class
Parameters
----------
obj (str) : object name
Methods
-------
ccf : return Cross-Correlation Functions table
spect : Spectra table
"""
self.obj = obj
self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?'
def ccf(self):
"""
Sophie Cross-Correlation Functions table
"""
url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d='
int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines']
float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26']
url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols)
print(url.replace('a=csv', 'a=htab'))
return df
def spec(self):
"""
Sophie Spectra table
"""
url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d='
int_cols = ['seq','sseq','slen','nexp','expno']
float_cols = ['bjd','sn26','exptime']
url, df = _get_df(url_, desc_so_spec, int_cols, float_cols)
print(url.replace('a=csv', 'a=htab'))
return df
def get_spec(self, seq, path=None, s1d=True):
s1d = 's1d' if s1d==True else 'e2ds'
url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]'
filename = f'sophie_[{s1d},{seq}].fits'
path = '' if path is None else path
urlretrieve(url, path+filename)
| 2.734375 | 3 |
ondewo/survey/survey_pb2_grpc.py | ondewo/ondewo-survey-client-python | 0 | 12797208 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2
class SurveysStub(object):
"""///// Services ///////
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateSurvey = channel.unary_unary(
'/ondewo.survey.Surveys/CreateSurvey',
request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString,
)
self.GetSurvey = channel.unary_unary(
'/ondewo.survey.Surveys/GetSurvey',
request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString,
)
self.UpdateSurvey = channel.unary_unary(
'/ondewo.survey.Surveys/UpdateSurvey',
request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString,
)
self.DeleteSurvey = channel.unary_unary(
'/ondewo.survey.Surveys/DeleteSurvey',
request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListSurveys = channel.unary_unary(
'/ondewo.survey.Surveys/ListSurveys',
request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString,
)
self.GetSurveyAnswers = channel.unary_unary(
'/ondewo.survey.Surveys/GetSurveyAnswers',
request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,
)
self.GetAllSurveyAnswers = channel.unary_unary(
'/ondewo.survey.Surveys/GetAllSurveyAnswers',
request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,
)
self.CreateAgentSurvey = channel.unary_unary(
'/ondewo.survey.Surveys/CreateAgentSurvey',
request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,
)
self.UpdateAgentSurvey = channel.unary_unary(
'/ondewo.survey.Surveys/UpdateAgentSurvey',
request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,
response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,
)
self.DeleteAgentSurvey = channel.unary_unary(
'/ondewo.survey.Surveys/DeleteAgentSurvey',
request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class SurveysServicer(object):
"""///// Services ///////
"""
def CreateSurvey(self, request, context):
"""Create a Survey and an empty NLU Agent for it
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSurvey(self, request, context):
"""Retrieve a Survey message from the Database and return it
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateSurvey(self, request, context):
"""Update an existing Survey message from the Database and return it
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSurvey(self, request, context):
"""Delete a survey and its associated agent (if existent)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSurveys(self, request, context):
"""Returns the list of all surveys in the server
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSurveyAnswers(self, request, context):
"""Retrieve answers to survey questions collected in interactions with a survey agent for a specific session
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAllSurveyAnswers(self, request, context):
"""Retrieve all answers to survey questions collected in interactions with a survey agent in any session
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAgentSurvey(self, request, context):
"""Populate and configures an NLU Agent from a Survey
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAgentSurvey(self, request, context):
"""Update an NLU agent from a survey
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAgentSurvey(self, request, context):
"""Deletes all data of an NLU agent associated to a survey
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SurveysServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateSurvey': grpc.unary_unary_rpc_method_handler(
servicer.CreateSurvey,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString,
),
'GetSurvey': grpc.unary_unary_rpc_method_handler(
servicer.GetSurvey,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString,
),
'UpdateSurvey': grpc.unary_unary_rpc_method_handler(
servicer.UpdateSurvey,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString,
),
'DeleteSurvey': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSurvey,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListSurveys': grpc.unary_unary_rpc_method_handler(
servicer.ListSurveys,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString,
),
'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler(
servicer.GetSurveyAnswers,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString,
),
'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler(
servicer.GetAllSurveyAnswers,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString,
),
'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler(
servicer.CreateAgentSurvey,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString,
),
'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAgentSurvey,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString,
response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString,
),
'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAgentSurvey,
request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ondewo.survey.Surveys', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Surveys(object):
"""///// Services ///////
"""
@staticmethod
def CreateSurvey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey',
ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.Survey.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetSurvey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey',
ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.Survey.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateSurvey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey',
ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.Survey.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteSurvey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey',
ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListSurveys(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys',
ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetSurveyAnswers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers',
ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAllSurveyAnswers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers',
ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAgentSurvey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey',
ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAgentSurvey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey',
ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,
ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAgentSurvey(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey',
ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 1.859375 | 2 |
sqpyte/opcode.py | hpi-swa-lab/SQPyte | 3 | 12797209 | from rpython.rlib.unroll import unrolling_iterable
dual_implementation_opcodes = [
'Add_Subtract_Multiply_Divide_Remainder',
'AggFinal',
'AggStep',
'Affinity',
'Cast',
'CollSeq',
'Compare',
'Copy',
'EndCoroutine',
'Function',
'Gosub',
'Goto',
'IdxLE_IdxGT_IdxLT_IdxGE',
'IdxRowid',
'IfPos',
'IfZero',
'If_IfNot',
'InitCoroutine',
'Integer',
'IsNull',
'Jump',
'MakeRecord',
'Move',
'MustBeInt',
'Ne_Eq_Gt_Le_Lt_Ge',
'Next',
'NextIfOpen',
'NotExists',
'NotNull',
'Null',
'Once',
'OpenRead_OpenWrite',
'Real',
'RealAffinity',
'ResultRow',
'Return',
'SCopy',
'Seek',
'SeekLT_SeekLE_SeekGE_SeekGT',
'Sequence',
'Variable',
'Yield',
]
unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes)
class OpcodeDefaults(object):
OpenRead_OpenWrite = False
Cast = False
OpcodeDefaults = OpcodeDefaults()
for op in dual_implementation_opcodes:
if not hasattr(OpcodeDefaults, op):
setattr(OpcodeDefaults, op, True)
class OpcodeStatus(object):
_immutable_fields_ = ["frozen", "use_flag_cache"] + dual_implementation_opcodes
def __init__(self, use_flag_cache):
self.use_flag_cache = use_flag_cache
self.frozen = False
for op in unrolling_dual_implementation_opcodes:
setattr(self, op, getattr(OpcodeDefaults, op))
def set_use_translated(self, op, value):
if self.frozen:
raise TypeError("too late to change")
if self.use_flag_cache:
raise TypeError("can't change if flag cache is used")
for whichop in unrolling_dual_implementation_opcodes:
if whichop == op:
setattr(self, whichop, value)
if whichop == "Compare":
self.Jump = value
elif whichop == "Jump":
self.Compare = value
elif whichop == "AggStep":
self.AggFinal = value
elif whichop == "AggFinal":
self.AggStep = value
def freeze(self):
if not self.frozen:
self.frozen = True
def disable_from_cmdline(self, s):
if s == "all":
for op in unrolling_dual_implementation_opcodes:
setattr(self, op, False)
return
specs = s.split(":")
for spec in specs:
if spec:
self.set_use_translated(spec, False)
| 2.25 | 2 |
src/cities.py | codefortallahassee/TEMP_REST_API_Cities | 0 | 12797210 | <filename>src/cities.py
# requires
# pip install airtable
# pip install airtable-python-wrapper
import json
import airtable
from airtable import Airtable
key_File = open("../resources/key")
BASE_ID = keyFile.readline().rstrip() # found in url of API documentation for table
CITIES_TABLE = "USCities"
def create_cities_object():
return Airtable(BASE_ID, CITIES_TABLE)
def airtable_call():
airtable_object = Airtable(BASE_ID, CITIES_TABLE)
records = airtable_object.get_all()
cities_list = []
for record in records:
od = {'Id': []} # Original Dictionary
try:
od['Id'] = record['Cities']['GEO.id']
except KeyError:
continue # every record needs an ID, skip if there is not one
try:
od['Name'] = record['Cities']['GEO.display-label']
except KeyError:
pass
cities_list.append(od)
with open("output.json", "w") as f:
json.dump(service_list, f, indent=2)
return cities_list
| 3.171875 | 3 |
newsdb_report/newsdb.py | stricklandrw/udacity-ipnd | 0 | 12797211 | <gh_stars>0
#!/usr/bin/env python
"""Database article and errors report generator."""
import datetime
import psycopg2
import bleach
DBNAME = "news"
TOP_ARTICLES = "3"
def get_article_counts(parameters):
"""Return all posts from the 'database', most recent first."""
"""Open PostGRES database session and returns a new connection instance"""
db = psycopg2.connect(database=DBNAME)
"""Sets up interaction with the database to perform operations"""
c = db.cursor()
"""Pull article titles and hit counts from article_hits database view"""
"""Example - ('Candidate is jerk, alleges rival', 338647L)"""
sql = '''SELECT * FROM article_hits limit (%s);'''
data = (bleach.clean(parameters),)
"""Execute sends command to the database"""
c.execute(sql, data)
"""Returns all information from executed command"""
print """\r\nWhat are the most popular three articles of all time?\r\n"""
for row in c.fetchall():
title = row[0]
count = row[1]
print '''"''' + str(title) + '''" - ''' + str(count) + """ views"""
"""Closes database connection"""
db.close()
def popular_authors():
"""Return all posts from the 'database', most recent first."""
"""Open PostGRES database session and returns a new connection instance"""
db = psycopg2.connect(database=DBNAME)
"""Sets up interaction with the database to perform operations"""
c = db.cursor()
"""Pull correlated data of authors and the sums of hits against all their
articles from article_hits database view"""
"""Example - ('<NAME>', Decimal('507594'))"""
sql = """SELECT authors.name, SUM( article_hits.hits )
FROM authors, articles, article_hits
WHERE article_hits.article_title = articles.title
AND authors.id = articles.author
GROUP BY authors.name
ORDER BY sum desc;"""
"""Execute sends command to the database"""
c.execute(sql)
"""Return all info from executed command & formats for report view"""
print """\r\nWho are the most popular article authors of all time?\r\n"""
for row in c.fetchall():
author = row[0]
count = row[1]
print str(author) + """ - """ + str(count) + """ views"""
"""Closes database connection"""
db.close()
def error_report():
"""Return all posts from the 'database', most recent first."""
"""Open PostGRES database session and returns a new connection instance"""
db = psycopg2.connect(database=DBNAME)
"""Sets up interaction with the database to perform operations"""
c = db.cursor()
"""Pull days from errors view of database with more than 1% error"""
"""Ex. - ('July 17, 2016', Decimal('97.7'))"""
sql = """SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good
FROM errors
WHERE percent_good < '99'
ORDER BY date;"""
"""Execute sends command to the database"""
c.execute(sql)
"""Returns all info from executed command and formats for report viewing"""
print "\r\nOn which days did more than 1% of requests lead to errors?\r\n"
for row in c.fetchall():
date = row[0]
error_percent = 100 - row[1]
print str(date) + """ - """ + str(error_percent) + """% errors"""
"""Closes database connection"""
db.close()
def report():
"""Generate a report of the most popular 3 articles of all time."""
get_article_counts(TOP_ARTICLES)
"""Generate a report of the most popular authors of all time."""
popular_authors()
"""Generate a report of the day(s) with more than 1% errors requests."""
error_report()
report()
| 3.21875 | 3 |
control/init.py | KentStateRobotics/Robot_Control | 0 | 12797212 | <filename>control/init.py
#Kent State Univeristy - RMC team
#<NAME> 2018
#
#Starts program
import control
SOCK_PORT = 4242
HTTP_PORT = 80
robotContoler = control.control(HTTP_PORT, SOCK_PORT)
| 1.796875 | 2 |
herokuexample/run_glm.py | baogorek/HerokuExample | 0 | 12797213 | import os
import datetime
import psycopg2
import numpy as np
import pandas as pd
#import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
from statsmodels.genmod.families.links import probit
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor() # cursor needed to perform db ops
cur.execute("SELECT * FROM Iris;")
iris_df = pd.DataFrame(cur.fetchall())
X = np.array(iris_df.iloc[:, 0:4])
y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int)
weight = np.ones(150)
probit_link = probit()
bin_family = Binomial(probit_link)
my_glm = GLM(y, X, freq_weights = weight, family = bin_family)
my_glm_fit = my_glm.fit()
theta = my_glm_fit.params
current_dt = datetime.datetime.now()
cur.execute("INSERT INTO scores VALUES (%s, %s, %s, %s, %s)",
(str(current_dt), theta[0], theta[1], theta[2], theta[3]))
conn.commit()
cur.close()
conn.close()
| 2.453125 | 2 |
quadpy/e3r/_stroud_secrest.py | whzup/quadpy | 0 | 12797214 | import numpy
from sympy import Rational as frac
from sympy import pi, sqrt
from ..helpers import article, fsd, pm, pm_roll, untangle
from ._helpers import E3rScheme
citation = article(
authors=["<NAME>", "<NAME>"],
title="Approximate integration formulas for certain spherically symmetric regions",
journal="Math. Comp.",
volume="17",
year="1963",
pages="105-135",
url="https://doi.org/10.1090/S0025-5718-1963-0161473-0",
)
def stroud_secrest_07():
nu, xi = [sqrt(15 - p_m * 3 * sqrt(5)) for p_m in [+1, -1]]
A = frac(3, 5)
B = frac(1, 30)
data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest VII", weights, points, 5, citation)
def stroud_secrest_08():
nu = sqrt(30)
eta = sqrt(10)
A = frac(3, 5)
B = frac(2, 75)
C = frac(3, 100)
data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest VIII", weights, points, 5, citation)
def stroud_secrest_09():
eta = sqrt(10)
xi, nu = [sqrt(15 - p_m * 5 * sqrt(5)) for p_m in [+1, -1]]
A = frac(3, 5)
B = frac(1, 50)
data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest IX", weights, points, 5, citation)
def stroud_secrest_10():
sqrt130 = sqrt(130)
nu = sqrt((720 - 24 * sqrt130) / 11)
xi = sqrt(288 + 24 * sqrt130)
eta = sqrt((-216 + 24 * sqrt130) / 7)
A = (5175 - 13 * sqrt130) / 8820
B = (3870 + 283 * sqrt130) / 493920
C = (3204 - 281 * sqrt130) / 197568
# ERR in Stroud's book: 917568 vs. 197568
D = (4239 + 373 * sqrt130) / 197568
data = [
(A, numpy.array([[0, 0, 0]])),
(B, fsd(3, (nu, 1))),
(C, fsd(3, (xi, 2))),
(D, pm(3, eta)),
]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest X", weights, points, 7, citation)
def stroud_secrest_11():
sqrt5 = sqrt(5)
sqrt39 = sqrt(39)
sqrt195 = sqrt(195)
nu, xi = [
sqrt(-50 + p_m * 10 * sqrt5 + 10 * sqrt39 - p_m * 2 * sqrt195)
for p_m in [+1, -1]
]
eta = sqrt(36 + 4 * sqrt39)
mu, lmbda = [
sqrt(54 + p_m * 18 * sqrt5 + 6 * sqrt39 + p_m * 2 * sqrt195) for p_m in [+1, -1]
]
A = (1725 - 26 * sqrt39) / 2940
B = (1065 + 171 * sqrt39) / 54880
C = (297 - 47 * sqrt39) / 32928
data = [
(A, numpy.array([[0, 0, 0]])),
(B, pm_roll(3, [xi, nu])),
(C, pm(3, eta)),
(C, pm_roll(3, [lmbda, mu])),
]
points, weights = untangle(data)
weights *= 8 * pi
return E3rScheme("Stroud-Secrest XI", weights, points, 7, citation)
| 2.4375 | 2 |
src/filter_structs.py | mimirblockchainsolutions/w3aio | 3 | 12797215 | <gh_stars>1-10
from .types import Types
import logging
log = logging.getLogger(__name__)
"""
{
"addressFilter":<address> OPTIONAL,
"bnFilter":<uint256> OPTIONAL,
"abiFilters": OPTIONAL
[
{
name STRING REQUIRED
rvalue HEXSTR OR INT REQUIRED
type STRING REQUIRED
op STRING REQUIRED
}
]
}
"""
#all and logic mapped
class EventFilter(object):
__slots__=["_abiFilters","_bnFilter","_addressFilter","_callback"]
def __init__(self,fltr,callback):
self._callback = callback
bf = fltr.get("bnFilter",None)
self._bnFilter = BNFilter(bf) if bf else None
af = fltr.get("addressFilter",None)
self._addressFilter = AddressFilter(af) if af else None
self._abiFilters = [ABIFilter(f) for f in fltr.get("abiFilters",[])]
@property
def topic(self):
return self._topic
async def test(self, event_log, event):
if self._abiFilters:
abitest = all([filtr.test(event) for filtr in self._abiFilters])
else:
abitest = True
addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True
bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True
return all([abitest,addrtest,bntest])
@property
def callback(self):
return self._callback
class AddressFilter(object):
__slots__=["_value"]
def __init__(self,value):
self._value = value
def test(self,rvalue):
return self._value == rvalue
class BNFilter(object):
__slots__=["_rvalue","_op"]
def __init__(self,rvalue,op):
self._rvalue = rvalue
Types.checkUint256(self._rvalue)
self._op = op
assert SafeOP.ops(self._op)
def test(self,event_log):
test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue)
assert type(test) is bool
return test
class ABIFilter(object):
__slots__=["_rvalue","_op","_name","_type"]
def __init__(self,fltr):
self._name = fltr["name"]
type_factory = getattr(Types,fltr["type"])
self._rvalue = type_factory(fltr["rvalue"])
self._type_factory = type_factory
self._op = fltr["op"]
assert SafeOP.ops(self._op)
def test(self,event):
lvalue = event[self._name]
lvalue = self._type_factory(lvalue)
result = getattr(SafeOP,self._op)(event[self._name],self._rvalue)
return result
class SafeOP(object):
slots = ['_ops']
@staticmethod
def ops(op):
return op in ['eq','gt','lt','ge','le','ne',]
@staticmethod
def eq(a,b):
return a == b
@staticmethod
def gt(a,b):
return a > b
@staticmethod
def lt(a,b):
return not SafeOP.gt(a,b)
@staticmethod
def ge(a,b):
return SafeOP.gt(a,b) or SafeOP.eq(a,b)
@staticmethod
def le(a,b):
return SafeOP.lt(a,b) or SafeOP.eq(a,b)
@staticmethod
def ne(a,b):
return not SafeOP.eq(a,b)
| 2.46875 | 2 |
update_dd.py | alanmitchell/update-degree-days | 0 | 12797216 | <reponame>alanmitchell/update-degree-days<gh_stars>0
#!/usr/local/bin/python3.6
"""Script that adds monthly heating degree day values to a pickled
Pandas DataFrame with the path 'data/degree_days.pkl' (compression = 'bz2').
It also saves the DataFrame as a CSV file at 'data/degree_days.csv'. The
new degree-day information comes from the AHFC BMON site, https://bms.ahfc.us .
This script is typically run from a Cron job that schedules the script to
run on the first day of the month so that the prior month's degree days will
be available. Dont' run the script late in the month or a partial month
may be prematurely added to the DataFrame because it satisfies the
MIN_COVERAGE check described below.
This script assumes the pickled DataFrame already exists and has the
following format:
month hdd60 hdd65
station
PAED 2018-02-01 1257.648675 1397.648675
PAED 2018-03-01 1028.027773 1183.027773
The index is the National Weather Service 4-letter station code. The
'month' column is a first-of-the-month date identifying the month whose
degree-days are shown. 'hdd60' and 'hdd65' are the heating degree-day
values: the first is base 60 degree F values and the second is base 65
deg F values.
This script will acquire temperature data from the AHFC BMON site in order
to calculate the degree-days for the most recent months not already
present in the DataFrame. All stations found in the index of the DataFrame
will be updated. The script assumes that the BMON sensor ID for a
weather station's temperature data is the 4-letter station code with '_temp'
appended, e.g. 'PAMR_temp'.
The MIN_COVERAGE constant in the script controls the minimum amount of data
coverage a month must have before being included. Missing data is filled
in with the average value for the rest of the hours that do have data.
-----------------------------------
NOTES ON UTILIZING THE DATA
To read this DataFrame back into a Python script, you can excecute the
following if the DataFrame is available on a local drive:
import pandas as pd
df = pd.read_pickle('degree_days.pkl', compression='bz2')
If the file is located on a web server, you can read it with the following
code:
import pandas as pd
import requests
from io import BytesIO
b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content
d = pd.read_pickle(BytesIO(b), compression='bz2')
Once you have a DataFrame, you can extract that portion of the DataFrame that
applies to one site by:
df_one_site = df.loc['PAMR']
or
df_one_site = df.query("station == 'PAMR'")
(slower than above technique)
To extract one site with a subset of the months:
df_one_site = df.query("station == 'PAMR' and month >= '2018-01-01'")
"""
from os.path import dirname, join, realpath
import sys
from datetime import datetime, timedelta
import pandas as pd
import requests
# Minimum fraction of the hours in a month that must have data in order
# to include the month.
MIN_COVERAGE = 0.7
print('\nScript Start: {}'.format(datetime.now().ctime()))
# path to this directory
APP_PATH = dirname(realpath(__file__))
# URL to the AHFC BMON site API
BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/'
def dd_for_site(stn, start_date):
"""Returns a Pandas Dataframe of monthly heating degree-day values for
'stn' (a NWS weather site code). Degree days start in the month
that 'start_date' (Python date/time object)
falls in and continue through the end of available
data. In the returned DataFrame, the index
has a timestamp for each month returned, that being the first day
of the month. The columns of the DataFrame are "hdd65" and "hdd60"
to designate base 65 F degree-days and base 60 F degree-days.
Temperature data used to calculate degree-days comes from the AHFC
BMON site.
Missing hours are assumed to not deviate from the average of the
data present. The column 'coverage' indicates the fraction of
the months hours that actually have data.
"""
# get beginning of month
st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
params = {
'start_ts': st_dt_1.strftime('%Y-%m-%d'),
'averaging': '1H'
}
sensor_id = '{}_temp'.format(stn)
resp = requests.get(BMON_URL.format(sensor_id), params=params).json()
if resp['status']=='success':
df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp'])
df.set_index('ts', inplace=True)
df.index = pd.to_datetime(df.index)
# calculate the percentage of each month that has data
dfc = df.resample('1M').count()
dfc['total_hours'] = [i.day * 24 for i in dfc.index] # index is last day of the month
dfc['coverage'] = dfc.temp / dfc.total_hours
# Now back to the main dataframe to calc degree-days
df['hdd60'] = [(60.0 - x)/24.0 if x<60.0 else 0.0 for x in df.temp]
df['hdd65'] = [(65.0 - x)/24.0 if x<65.0 else 0.0 for x in df.temp]
df.drop(['temp'], axis=1, inplace=True)
dfm = df.resample('1M').mean()
dfm['coverage'] = dfc.coverage
dfm['hdd60'] = dfm.hdd60 * dfc.total_hours
dfm['hdd65'] = dfm.hdd65 * dfc.total_hours
# Convert index timestamps to beginning of the month
mos = [datetime(d.year, d.month, 1) for d in dfm.index]
dfm.index = mos
dfm.index.name = 'month'
else:
raise ValueError(str(resp['data']))
return dfm
if __name__ == '__main__':
df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2')
# list of new DataFrames to add to the existing one
new_dfs = []
for stn in df_exist.index.unique():
print('Processing {}: '.format(stn), end='')
try:
# get last month present for this station
last_mo = df_exist.loc[stn].month.max()
# get a date in the following month
next_mo = last_mo + timedelta(days=32) # could be a DST change in there; add 32 days to be safe
# get degree days for missing months
df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy()
if len(df_new):
# put this DataFrame in a form that can be concatenated to the existing one
df_new.reset_index(inplace=True)
df_new.index = [stn] * len(df_new)
df_new.index.name = 'station'
df_new.drop(columns=['coverage'], inplace=True)
# add it to the list of new DataFrames to eventually add to the
# degree-day DataFrame
new_dfs.append(df_new)
print('{} new months'.format(len(df_new)))
else:
print()
except:
print('{}: {}'.format(*sys.exc_info()[:2]))
# Create a new DataFrame that combines the existing data with the new.
df_final = pd.concat([df_exist] + new_dfs)
# get it sorted by station and month
df_final.reset_index(inplace=True)
df_final.sort_values(['station', 'month'], inplace=True)
df_final.set_index('station', inplace=True)
# Save the DataFrame as a compressed pickle and a CSV file.
df_final.to_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2', protocol=4)
df_final.to_csv(join(APP_PATH, 'data/degree_days.csv'))
| 3.359375 | 3 |
src/Negamax.py | jrinder42/Checkers-AI | 0 | 12797217 | <filename>src/Negamax.py
from copy import deepcopy
import cPickle
from Board import Actions
class Negamax_AB(Actions):
BEST_MOVE = None
def __init__(self):
Actions.__init__(self)
def __deepcopy__(self, memodict={}): # faster than built-in
g = cPickle.loads(cPickle.dumps(self, -1))
return g
def game_over(self, board):
white_pieces = self.get_white_pieces(board)
black_pieces = self.get_black_pieces(board)
if not white_pieces:
print "Black has won"
return True
elif not black_pieces:
print "White has won"
return True
return False
def negamax(self, board, depth, alpha, beta, color):
if depth <= 0 or self.game_over(board):
return color * self.evaluate(board)
moves = self.generate_black_moves(board)
for move in moves:
child = deepcopy(board)
self.move_black(child, *move)
score = -self.negamax(child, depth - 1, -beta, -alpha, -color)
if score >= alpha:
alpha = score
self.BEST_MOVE = move
if alpha >= beta:
break
return alpha
def evaluate(self, board): # for AI
# sum(my pieces) - sum(oponent pieces)
return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board))
class Negamax(Actions):
BEST_MOVE = None
def __init__(self):
Actions.__init__(self)
def __deepcopy__(self, memodict={}): # faster than built-in
g = cPickle.loads(cPickle.dumps(self, -1))
return g
def game_over(self, board):
white_pieces = self.get_white_pieces(board)
black_pieces = self.get_black_pieces(board)
if not white_pieces:
print "Black has won"
return True
elif not black_pieces:
print "White has won"
return True
return False
def negamax(self, board, depth, color):
if depth == 0 or self.game_over(board):
return color * self.evaluate(board)
v = float('-inf')
moves = self.generate_black_moves(board)
self.BEST_MOVE = moves[0]
for move in moves:
child = deepcopy(board)
self.move_black(child, *move)
v = max(v, -self.negamax(child, depth - 1, -color))
self.BEST_MOVE = move
return v
def evaluate(self, board): # for AI
# sum(my pieces) - sum(oponent pieces)
return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board))
| 2.9375 | 3 |
leetcode_python/Heap/ugly-number-ii.py | yennanliu/CS_basics | 18 | 12797218 | # V0
# V1
# https://blog.csdn.net/coder_orz/article/details/51317748
# IDEA : "unly number" : a number is an ugly number
# if all its prime factors are within [2, 3, 5].
# e.g. 6, 8 are ugly number ; while 14 is not
# please note that 1 is ugly number as well
# IDEA : ITERATION
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return False
for i in [2, 3, 5]:
while num%i == 0:
num = num / i
return True if num == 1 else False
# V1'
# https://blog.csdn.net/coder_orz/article/details/51317748
# IDEA : RECURSION
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return False
if num == 1:
return True
if num % 2 == 0:
return self.isUgly(num/2)
elif num % 3 == 0:
return self.isUgly(num/3)
elif num % 5 == 0:
return self.isUgly(num/5)
else:
return False
# V1''
# https://blog.csdn.net/coder_orz/article/details/51317748
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
return num > 0 == 30**30 % num
# V2
# Time: O(n)
# Space: O(1)
import heapq
class Solution(object):
# @param {integer} n
# @return {integer}
def nthUglyNumber(self, n):
ugly_number = 0
heap = []
heapq.heappush(heap, 1)
for _ in range(n):
ugly_number = heapq.heappop(heap)
if ugly_number % 2 == 0:
heapq.heappush(heap, ugly_number * 2)
elif ugly_number % 3 == 0:
heapq.heappush(heap, ugly_number * 2)
heapq.heappush(heap, ugly_number * 3)
else:
heapq.heappush(heap, ugly_number * 2)
heapq.heappush(heap, ugly_number * 3)
heapq.heappush(heap, ugly_number * 5)
return ugly_number
def nthUglyNumber2(self, n):
ugly = [1]
i2 = i3 = i5 = 0
while len(ugly) < n:
while ugly[i2] * 2 <= ugly[-1]: i2 += 1
while ugly[i3] * 3 <= ugly[-1]: i3 += 1
while ugly[i5] * 5 <= ugly[-1]: i5 += 1
ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5))
return ugly[-1]
def nthUglyNumber3(self, n):
q2, q3, q5 = [2], [3], [5]
ugly = 1
for u in heapq.merge(q2, q3, q5):
if n == 1:
return ugly
if u > ugly:
ugly = u
n -= 1
q2 += 2 * u,
q3 += 3 * u,
q5 += 5 * u,
class Solution2(object):
ugly = sorted(2**a * 3**b * 5**c
for a in range(32) for b in range(20) for c in range(14))
def nthUglyNumber(self, n):
return self.ugly[n-1]
| 4 | 4 |
bin/reverse-geocode.py | straup/buildingequalsyes | 1 | 12797219 | #!/usr/bin/env python
import sys
import shapely
import sqlite3
import urllib2
import json
import time
import reverse_geoplanet
from shapely.geometry import Polygon
from shapely.geometry import LineString
def munge(path, reversegeo_endpoint) :
#
rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint)
dbconn = sqlite3.connect(path)
dbcurs = dbconn.cursor()
dbcurs.execute("SELECT COUNT(id) AS count FROM ways")
row = dbcurs.fetchone()
count = row[0]
offset = 0
limit = 5000
while offset < count :
sql = "SELECT * FROM ways LIMIT %s, %s" % (offset, limit)
print "%s (%s)" % (sql, count)
dbcurs.execute(sql)
for row in dbcurs.fetchall():
way_id, lat, lon, woeid, nodes, tags = row
if lat and lon:
pass
# continue
if woeid > 0:
continue
nodes = nodes.split(',')
points = []
for node_id in nodes:
dbcurs.execute("SELECT * FROM nodes WHERE id=?", (node_id, ))
node = dbcurs.fetchone()
points.append((node[2], node[1]))
center = None
if len(points) == 2:
line = LineString(points)
center = line.centroid
else :
points.append(points[0])
poly = Polygon(points)
center = poly.centroid
if not center:
print "no centroid for way %s" % way_id
print poly
continue
lat = center.y
lon = center.x
woeid = 0
geo = rg.reverse_geocode(lat, lon)
if geo:
woeid = geo['woeid']
print "[%s] update %s lat: %s, lon: %s, woeid: %s" % (offset, way_id, lat, lon, woeid)
dbcurs.execute("UPDATE ways SET lat=?, lon=?, woeid=? WHERE id=?", (lat, lon, woeid, way_id))
dbconn.commit()
time.sleep(2)
offset += limit
return
if __name__ == '__main__' :
path = sys.argv[1]
reversegeo = sys.argv[2]
munge(path, reversegeo)
| 2.71875 | 3 |
workflow/src/crawler.py | showyou/docker-chatbot-niko | 0 | 12797220 | <filename>workflow/src/crawler.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import json
import datetime
from sqlalchemy import and_
import random
# /home/*/hama_dbとかが返ってくる
#exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit("/",1)[0]
exec_path = "."
conf_path = exec_path+"/common/config.json"
ng_char_path = exec_path+"/common/ng_char.json"
sys.path.insert(0,exec_path)
from common import auth_api, model
import tweepy
# 格納しないテキストのリスト
g_ngTrend = [
"オフパコ",
"フルチン"
]
# ファイルから読み出すので空に変更
g_ng_char = []
dbSession = None
def read_json(fileName):
file = open(fileName,'r')
a = json.loads(file.read())
file.close()
return a
# NGな単語かNGな語句が入っていたらTrue そうでないならFalse
def is_ng_trend(trend):
if trend in g_ngTrend: return True
for ng_char in g_ng_char:
if ng_char in trend:
return True
return False
"""
テキストが適合している = True
重複してたり、RTだったり = False
"""
def check_text(text, dbSession):
if( is_ng_trend(text) ): return False
#jTime = created_at + datetime.timedelta(hours = 9)
query = dbSession.query(model.Trend).filter(
model.Trend.text == text
)
if( query.count() > 0 ): return False
#ここに品詞判定辺り入れる
t = model.Trend()
t.text = text
#t.datetime = jTime
dbSession.add(t)
return True
def main():
# twitterから発言を取ってきてDBに格納する
userdata = read_json(conf_path)
g_ng_char = read_json(ng_char_path)
tw = auth_api.connect(userdata["consumer_token"],
userdata["consumer_secret"], exec_path+"/common/")
#print tw.rate_limit_status()
dbSession = model.startSession(userdata)
page_number = 0
update_flag = True
while update_flag:
update_flag = False
page_number += 1
if page_number > 1: break
#l = tw.home_timeline(page = page_number, count=10)
#Toyko座標ベタ打ち
woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid']
trends_place = tw.trends_place(woeid)
l = trends_place[0]['trends']
for s in l:
trend = s['name']
if trend.startswith("#"): trend = trend[1:]
#print(trend)
update_flag = check_text(trend,
dbSession)
if(not(update_flag)): continue
if(random.randint(0,1)):
text = "な、なによ……! ニコだって" + trend +\
"くらいできるんだから!!"
else:
text = trend + "と言えば?\nニコニー♪\nかわいい" +\
trend +"と言えば?\nニコニー♪"
try:
tw.update_status(text)
print("trend "+trend)
except tweepy.TweepError:
pass
#print("flag: ", update_flag)
if update_flag: break
dbSession.commit()
if __name__ == "__main__":
main()
| 2.421875 | 2 |
scripts/upload_questions.py | thecrux4020/telegram-bot | 9 | 12797221 | import boto3
from boto3.dynamodb.types import TypeDeserializer, TypeSerializer
import json
import logging
DYNAMODB_TABLE_NAME = "quizzes_questions"
def setup_logging():
""" Basic logging setup """
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
return logging
def load_questions():
with open("questions.json", "r") as f:
questions = json.loads(f.read())
return questions
def check_limitations(question):
if not "explanation" in question:
raise KeyError(f"explanation key not found in question id: {question['question_id']}")
elif not "question" in question:
raise KeyError(f"question key not found in question id: {question['question_id']}")
elif not "options" in question:
raise KeyError(f"options key not found in question id: {question['question_id']}")
elif not "correct_option" in question:
raise KeyError(f"correct_option key not found in question id: {question['question_id']}")
if len(question["explanation"]) > 200:
raise ValueError("explanation value is greater than 200 chars")
if len(question["question"]) > 255:
raise ValueError("question value is greater than 255 chars")
if len(question["options"]) > 10:
raise ValueError("options array is greater than 10")
for option in question["options"]:
if len(option) > 100:
raise ValueError(f"option: {option} is grater than 100 chars")
def serialize(question, type_serializer = TypeSerializer()):
question = {k: type_serializer.serialize(v) for k,v in question.items()}
return question
def upload_to_dynamo(client, question):
raw_question = serialize(question)
client.put_item(
TableName=DYNAMODB_TABLE_NAME,
Item=raw_question
)
def main():
client_dynamo = boto3.client('dynamodb')
logger = setup_logging()
logger.info("loadding questions from questions.json")
questions = load_questions()
logger.info("start processing questions")
for question in questions:
logger.info(f"check limitations for question id: {question['question_id']} ")
check_limitations(question)
logger.info(f"Limitation check pass, start uploading to dynamodb")
upload_to_dynamo(client_dynamo, question)
if __name__ == "__main__":
main() | 2.453125 | 2 |
assignment1/cs231n/classifiers/linear_svm.py | ivanistheone/cs231n | 1 | 12797222 | <gh_stars>1-10
from builtins import range
import numpy as np
from random import shuffle
from past.builtins import xrange
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # 3073 x 10 zeros
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in range(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in range(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
dW[:,j] += 1*X[i]
dW[:,y[i]] += -1*X[i]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
# 1/N factor in front
dW /= num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
#############################################################################
# TODO: #
# Compute the gradient of the loss function and store it dW. #
# Rather than first computing the loss and then computing the derivative, #
# it may be simpler to compute the derivative at the same time that the #
# loss is being computed. As a result you may need to modify some of the #
# code above to compute the gradient. #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dW += 2*W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
num_train = X.shape[0]
#############################################################################
# TODO: #
# Implement a vectorized version of the structured SVM loss, storing the #
# result in loss. #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
S = X.dot(W) # scores (N,C)
# build mask selecting only the correct classes (one-hot encodig of y_i)
mask = np.eye(W.shape[1], dtype=bool)[y]
# correct scores which we'll be subtracting from all other
correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1)
correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec
# compute margins
M = S - correct_scores + 1 # margins (N,C)
M[mask] = 0
pM = np.where(M>0, M, 0) # positive marings
# compute loss
loss = 1.0/num_train * np.sum(pM) + reg * np.sum(W * W)
# maring conributions + regularization
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#############################################################################
# TODO: #
# Implement a vectorized version of the gradient for the structured SVM #
# loss, storing the result in dW. #
# #
# Hint: Instead of computing the gradient from scratch, it may be easier #
# to reuse some of the intermediate values that you used to compute the #
# loss. #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# We'll use dpM to store two contributions that tells us which rows of X we
# should to include in the calculation of dW = X.T.dot(dpM)
dpM = np.zeros((X.shape[0], W.shape[1])) # N x C zeros
# first contributoin (all active margins for others)
pMactive = np.where(M>0, 1, 0)
dpM += pMactive
# second contributoin subtract fro self self sum of others active
dpM[mask] = -1*np.sum(pMactive, axis=1)
# gadiaent
dW = 1.0/num_train * X.T.dot(dpM) + 2*W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
| 3.140625 | 3 |
turq/examples.py | XiaoboHe/turq | 45 | 12797223 | import pkgutil
import xml.etree.ElementTree
import docutils.core
def load_pairs():
# Load pairs of "example ID, rules code" for the test suite.
rst_code = _load_rst()
xml_code = docutils.core.publish_string(rst_code, writer_name='xml')
tree = xml.etree.ElementTree.fromstring(xml_code)
parsed = []
for section in tree.findall('./section'):
slug = section.get('ids').replace('-', '_')
for i, block in enumerate(section.findall('./literal_block'), start=1):
parsed.append(('%s_%d' % (slug, i), block.text))
return parsed
def load_html(initial_header_level):
# Render an HTML fragment ready for inclusion into a page.
rst_code = _load_rst()
parts = docutils.core.publish_parts(
rst_code, writer_name='html',
settings_overrides={'initial_header_level': initial_header_level})
return parts['fragment']
def _load_rst():
return pkgutil.get_data('turq', 'examples.rst')
| 2.3125 | 2 |
easy/python3/c0050_204_count-primes/00_leetcode_0050.py | drunkwater/leetcode | 0 | 12797224 | <gh_stars>0
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#204. Count Primes
#Description:
#Count the number of prime numbers less than a non-negative number, n.
#Credits:
#Special thanks to @mithmatt for adding this problem and creating all test cases.
#class Solution:
# def countPrimes(self, n):
# """
# :type n: int
# :rtype: int
# """
# Time Is Money | 3.09375 | 3 |
forum/migrations/0001_initial.py | ghcis/forum | 0 | 12797225 | # Generated by Django 3.2.7 on 2021-09-11 13:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Thread',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='标题')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')),
('content', models.TextField(verbose_name='内容')),
('upvotes', models.IntegerField(default=0)),
('downvotes', models.IntegerField(default=0)),
('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
),
]
| 1.828125 | 2 |
src/EcmsApi/tables/_base.py | Poseidon-Dev/ecms-api | 0 | 12797226 | <reponame>Poseidon-Dev/ecms-api<filename>src/EcmsApi/tables/_base.py
import os
__all__ = ['TableMixin']
class TableMixin:
"""
Table Mixin class to get quick properties from the table
"""
NAMESPACE = os.getenv('ECMS_HOST')
FORIEGN_KEYS = {}
def __init__(self):
self.TABLE = self.__class__
self.TABLE_NAME = self.__class__.__name__
@property
def namespace(self):
return self.NAMESPACE
@property
def table(self):
return self.TABLE.__class__.__name__
@property
def id(self):
return f'{self.TABLE_NAME}ID'
@property
def cols(self):
return [{k: v} for k, v in self.TABLE.__dict__.items() if '__' not in k]
@property
def column_names(self):
return [col for cols in self.cols for col, _ in cols.items()]
| 2.1875 | 2 |
Hashtable/1124. Longest Well-Performing Interval.py | viewv/leetcode | 2 | 12797227 | class Solution:
def longestWPI(self, hours: List[int]) -> int:
res = dict()
s = 0
ans = 0
for i, c in enumerate(hours):
s += 1 if c > 8 else -1
if s > 0:
ans = i + 1
if s not in res:
res[s] = i
if s - 1 in res:
ans = max(ans, i-res[s-1])
return ans
| 2.875 | 3 |
pymyenergi/ct.py | CJNE/pymyenergi | 5 | 12797228 | <reponame>CJNE/pymyenergi
class CT:
def __init__(self, name, value) -> None:
self._name = name
self._value = value
@property
def name(self):
"""Name of CT clamp"""
return self._name
@property
def power(self):
"""Power reading of CT clamp in W"""
return self._value
| 2.59375 | 3 |
ch5/exercises/ans5_6.py | chunhua2017/pythonprogrammingdemo | 4 | 12797229 | # 先确认在VSCode的Settings中,勾选“Terminal:Excute In File Dir”
# 在当前文件夹下将hello_world.txt文件复制为hello_world_bak.txt
src = r'hello_world.txt'
dst = r'hello_world_bak.txt'
import shutil
shutil.copyfile(src, dst) | 1.929688 | 2 |
apps/vehicle_counting/worker/worker.py | microsoft/nxs | 5 | 12797230 | <filename>apps/vehicle_counting/worker/worker.py
import os
import cv2
from nxs_libs.db import NxsDbFactory, NxsDbType
from apps.vehicle_counting.app_types.app_request import (
InDbTrackingAppRequest,
RequestStatus,
)
from apps.vehicle_counting.worker.utils import *
DB_TASKS_COLLECTION_NAME = "tasks"
DB_COUNTS_COLLECTION_NAME = "counts"
DB_LOGS_COLLECTION_NAME = "logs"
STORAGE_LOGS_DIR_PATH = "logs"
def main():
import argparse
parser = argparse.ArgumentParser(description="Vehicle Counting App")
parser.add_argument("--video_uuid", type=str)
parser.add_argument("--nxs_url", type=str)
parser.add_argument("--nxs_api_key", type=str)
parser.add_argument(
"--object_detector_uuid",
type=str,
default="bbff897256c9431eb19a2ad311749b39",
)
parser.add_argument(
"--tracker_uuid",
type=str,
default="451ffc2ee1594fe2a6ace17fca5117ab",
)
parser.add_argument("--blobstore_conn_str", type=str)
parser.add_argument("--blobstore_container", type=str)
parser.add_argument("--cosmosdb_conn_str", type=str)
parser.add_argument("--cosmosdb_db_name", type=str)
parser.add_argument(
"--debug", default=False, type=lambda x: (str(x).lower() == "true")
)
args = parser.parse_args()
args.video_uuid = os.environ["VIDEO_UUID"]
args.nxs_url = os.environ["NXS_URL"]
args.nxs_api_key = os.environ["NXS_API_KEY"]
args.blobstore_conn_str = os.environ["BLOBSTORE_CONN_STR"]
args.blobstore_container = os.environ["BLOBSTORE_CONTAINER"]
args.cosmosdb_conn_str = os.environ["COSMOSDB_URL"]
args.cosmosdb_db_name = os.environ["COSMOSDB_NAME"]
try:
db_client = NxsDbFactory.create_db(
NxsDbType.MONGODB,
uri=args.cosmosdb_conn_str,
db_name=args.cosmosdb_db_name,
)
db_client.update(
DB_TASKS_COLLECTION_NAME,
{
"video_uuid": args.video_uuid,
"zone": "global",
},
{"status": RequestStatus.RUNNING},
)
video_info = InDbTrackingAppRequest(
**db_client.query(
DB_TASKS_COLLECTION_NAME, {"video_uuid": args.video_uuid}
)[0]
)
if video_info.skip_frames is None:
video_info.skip_frames = 3
if video_info.count_interval_secs is None:
video_info.count_interval_secs = 900 # 15 mins
INFER_URL = f"{args.nxs_url}/api/v2/tasks/tensors/infer"
OBJECT_DETECTOR_UUID = args.object_detector_uuid
TRACKER_UUID = args.tracker_uuid
cap = cv2.VideoCapture(video_info.video_url)
frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS)))
cap.release()
rois = []
lines = []
for region in video_info.regions:
points = []
for p in region.roi.points:
points.append(
NxsPoint(
int(p.x * frame_width),
int(p.y * frame_height),
)
)
rois.append(NxsRoi(points=points))
line = region.line
lines.append(
NxsLine(
p0=NxsPoint(
x=int(line.p0.x * frame_width),
y=int(line.p0.y * frame_height),
),
p1=NxsPoint(
x=int(line.p1.x * frame_width),
y=int(line.p1.y * frame_height),
),
)
)
if ".m3u8" not in video_info.video_url:
from apps.vehicle_counting.worker.online_worker import (
OnlineVehicleTrackingApp,
)
app = OnlineVehicleTrackingApp(
video_uuid=video_info.video_uuid,
frame_width=frame_width,
frame_height=frame_height,
frame_rate=frame_rate,
nxs_infer_url=INFER_URL,
nxs_api_key=args.nxs_api_key,
detector_uuid=OBJECT_DETECTOR_UUID,
tracker_uuid=TRACKER_UUID,
video_url=video_info.video_url,
rois=rois,
lines=lines,
tracking_classes=video_info.tracking_classes,
visualize=False,
collect_logs=args.debug,
skip_frame=video_info.skip_frames,
blobstore_conn_str=args.blobstore_conn_str,
blobstore_container_name=args.blobstore_container,
cosmosdb_conn_str=args.cosmosdb_conn_str,
cosmosdb_db_name=args.cosmosdb_db_name,
counting_report_interval_secs=video_info.count_interval_secs,
job_duration=video_info.job_duration,
)
else:
from apps.vehicle_counting.worker.offline_worker import (
OfflineVehicleTrackingApp,
)
app = OfflineVehicleTrackingApp(
video_uuid=video_info.video_uuid,
frame_width=frame_width,
frame_height=frame_height,
frame_rate=frame_rate,
nxs_infer_url=INFER_URL,
nxs_api_key=args.nxs_api_key,
detector_uuid=OBJECT_DETECTOR_UUID,
tracker_uuid=TRACKER_UUID,
video_url=video_info.video_url,
rois=rois,
lines=lines,
tracking_classes=video_info.tracking_classes,
visualize=False,
collect_logs=args.debug,
skip_frame=video_info.skip_frames,
blobstore_conn_str=args.blobstore_conn_str,
blobstore_container_name=args.blobstore_container,
cosmosdb_conn_str=args.cosmosdb_conn_str,
cosmosdb_db_name=args.cosmosdb_db_name,
counting_report_interval_secs=video_info.count_interval_secs,
job_duration=video_info.job_duration,
)
app.run_tracking()
db_client = NxsDbFactory.create_db(
NxsDbType.MONGODB,
uri=args.cosmosdb_conn_str,
db_name=args.cosmosdb_db_name,
)
if app.job_completed:
db_client.update(
DB_TASKS_COLLECTION_NAME,
{
"video_uuid": args.video_uuid,
"zone": "global",
},
{"status": RequestStatus.COMPLETED, "error": ""},
)
else:
db_client.update(
DB_TASKS_COLLECTION_NAME,
{
"video_uuid": args.video_uuid,
"zone": "global",
},
{"status": RequestStatus.FAILED, "error": "stream ended"},
)
except Exception as e:
print(e)
db_client = NxsDbFactory.create_db(
NxsDbType.MONGODB,
uri=args.cosmosdb_conn_str,
db_name=args.cosmosdb_db_name,
)
db_client.update(
DB_TASKS_COLLECTION_NAME,
{
"video_uuid": args.video_uuid,
"zone": "global",
},
{"status": RequestStatus.FAILED, "error": str(e)},
)
if __name__ == "__main__":
main()
| 2.28125 | 2 |
pyadlml/plot/feature_importance.py | tcsvn/pyadlml | 4 | 12797231 | class FeatureImportance(object):
def __init__(self, md, test_x, test_z):
self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z)
def save_plot_feature_importance(self, file_path):
fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance(
self._skater_model,
ascending=True,
ax=None,
progressbar=False,
# model-scoring: difference in log_loss or MAE of training_labels
# given perturbations. Note this vary rarely makes any significant
# differences
method='model-scoring')
# corss entropy or f1 ('f1', 'cross_entropy')
#scorer_type='cross_entropy') # type: Figure, axes
#scorer_type='f1') # type: Figure, axes
import matplotlib.pyplot as plt
plt.tight_layout()
fig.savefig(file_path, dpi=fig.dpi)
plt.close(fig)
def _create_skater_stuff(mdl, test_x, test_z):
from skater.model import InMemoryModel
from skater.core.explanations import Interpretation
from hassbrain_algorithm.benchmark.interpretation import ModelWrapper
from hassbrain_algorithm.benchmark.interpretation import _boolean2str
wrapped_model = ModelWrapper(mdl)
class_names = mdl.get_state_lbl_lst()
feature_names = mdl.get_obs_lbl_lst()
# this has to be done in order for skater to recognize the values as categorical and not numerical
test_x = _boolean2str(test_x)
# create interpretation
interpreter = Interpretation(test_x,
#class_names=class_names,
feature_names=feature_names)
# create model
# supports classifiers with or without probability scores
examples = test_x[:10]
skater_model = InMemoryModel(wrapped_model.predict,
#target_names=class_names,
feature_names=feature_names,
model_type='classifier',
unique_values=class_names,
probability=False,
examples=examples)
interpreter.load_data(test_x,
training_labels=test_z,
feature_names=feature_names)
# todo flag for deletion (3lines below)
# if this can savely be deleted
tmp = interpreter.data_set.feature_info
for key, val in tmp.items():
val['numeric'] = False
return skater_model, interpreter
| 2.640625 | 3 |
app/search.py | S4G4R/tv-tracker | 0 | 12797232 | <gh_stars>0
import tmdbsimple as tmdb
def search_movie(title):
"""
Connects to API to search for a specific movie by title.
"""
search = tmdb.Search()
response = search.movie(query=title)
return search.results
def search_tv(title):
"""
Connects to API to search for a specific tv show by title.
"""
search = tmdb.Search()
response = search.tv(query=title)
return search.results
def search_by_id(id, type):
"""
Connects to API to search for a specific movie or show by id.
"""
if type == 'tv':
result = tmdb.TV(id)
else :
result = tmdb.Movies(id)
return result.info()
| 2.703125 | 3 |
setup.py | JakobHavtorn/scholarscrape | 0 | 12797233 | <filename>setup.py
from setuptools import find_packages, setup
# Read requirements files
requirements_file = "requirements.txt"
with open(requirements_file) as buffer:
requirements = buffer.read().splitlines()
requirements = list(set(requirements))
requirements_string = "\n ".join(requirements)
print(f"Found the following requirements to be installed from {requirements_file}:\n {requirements_string}")
# Collect packages
packages = find_packages(exclude=("tests", "experiments"))
print("Found the following packages to be created:\n {}".format("\n ".join(packages)))
# Get long description from README
with open("README.md", "r") as readme:
long_description = readme.read()
setup(
name="scholarscrape",
version="1.0.0",
packages=packages,
python_requires=">=3.10.0",
install_requires=requirements,
setup_requires=[],
ext_modules=[],
url="https://github.com/JakobHavtorn/scholarscrape",
author="<NAME>",
description="Interfacing with Semanticscholar API for greatness",
long_description=long_description,
long_description_content_type="text/markdown",
)
| 1.992188 | 2 |
delivery_addresses/tests/test_delivery_address.py | stanwood/traidoo-api | 3 | 12797234 | <reponame>stanwood/traidoo-api<filename>delivery_addresses/tests/test_delivery_address.py
import random
import pytest
from model_bakery import baker
from delivery_addresses.models import DeliveryAddress
pytestmark = pytest.mark.django_db
def test_get_own_delivery_addresses(client_seller, seller, buyer):
baker.make_recipe("delivery_addresses.delivery_address", user=buyer)
address_1, address_2 = baker.make_recipe(
"delivery_addresses.delivery_address", user=seller, _quantity=2
)
response = client_seller.get("/delivery_addresses")
assert response.json() == [
{
"city": address_1.city,
"companyName": address_1.company_name,
"id": address_1.id,
"street": address_1.street,
"zip": address_1.zip,
},
{
"city": address_2.city,
"companyName": address_2.company_name,
"id": address_2.id,
"street": address_2.street,
"zip": address_2.zip,
},
]
def test_get_own_delivery_address(client_seller, seller):
address = baker.make_recipe("delivery_addresses.delivery_address", user=seller)
response = client_seller.get(f"/delivery_addresses/{address.id}")
assert response.json() == {
"city": address.city,
"companyName": address.company_name,
"id": address.id,
"street": address.street,
"zip": address.zip,
}
def test_get_someone_else_delivery_address(client_seller, buyer):
address = baker.make_recipe("delivery_addresses.delivery_address", user=buyer)
response = client_seller.get(f"/delivery_addresses/{address.id}")
assert response.status_code == 404
def test_add_delivery_address(client_seller, seller, faker):
data = {
"city": faker.city(),
"companyName": faker.company(),
"street": faker.street_address(),
"zip": faker.zipcode(),
}
response = client_seller.post("/delivery_addresses", data)
assert response.json() == {
"id": DeliveryAddress.objects.first().id,
"companyName": data["companyName"],
"street": data["street"],
"zip": data["zip"],
"city": data["city"],
}
def test_edit_delivery_address(client_seller, seller, faker):
address = baker.make_recipe("delivery_addresses.delivery_address", user=seller)
data = {
"city": faker.city(),
"companyName": faker.company(),
"street": faker.street_address(),
"zip": faker.zipcode(),
}
response = client_seller.put(f"/delivery_addresses/{address.id}", data)
assert response.json() == {
"id": address.id,
"companyName": data["companyName"],
"street": data["street"],
"zip": data["zip"],
"city": data["city"],
}
address.refresh_from_db()
assert address.city == data["city"]
assert address.company_name == data["companyName"]
assert address.street == data["street"]
assert address.zip == data["zip"]
def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker):
address = baker.make_recipe("delivery_addresses.delivery_address", user=seller)
data = {
"city": faker.city(),
"companyName": faker.company(),
"street": faker.street_address(),
"zip": faker.zipcode(),
}
response = client_buyer.put(f"/delivery_addresses/{address.id}", data)
assert response.status_code == 404
def test_partially_edit_delivery_address(client_seller, seller, faker):
address = baker.make_recipe("delivery_addresses.delivery_address", user=seller)
data = {
"city": faker.city(),
"companyName": faker.company(),
"street": faker.street_address(),
"zip": faker.zipcode(),
}
for key, value in data.items():
response = client_seller.patch(f"/delivery_addresses/{address.id}", data)
assert response.json()[key] == value
def test_partially_edit_someone_else_delivery_address(
client_buyer, seller, buyer, faker
):
address = baker.make_recipe("delivery_addresses.delivery_address", user=seller)
response = client_buyer.patch(
f"/delivery_addresses/{address.id}", {"city": faker.city()}
)
assert response.status_code == 404
def test_delete_delivery_address(client_seller, seller, faker):
address = baker.make_recipe("delivery_addresses.delivery_address", user=seller)
response = client_seller.delete(f"/delivery_addresses/{address.id}")
with pytest.raises(DeliveryAddress.DoesNotExist):
address.refresh_from_db()
def test_delete_delivery_address_when_in_use(client_seller, seller, faker):
address = baker.make_recipe("delivery_addresses.delivery_address", user=seller)
cart = baker.make_recipe("carts.cart", delivery_address=address, user=seller)
response = client_seller.delete(f"/delivery_addresses/{address.id}")
assert response.status_code == 400
assert response.json() == {
"message": "Cannot be deleted due to protected related entities.",
"code": "protected_error",
}
def test_delete_someone_else_delivery_address(client_seller, buyer, faker):
address = baker.make_recipe("delivery_addresses.delivery_address", user=buyer)
response = client_seller.delete(f"/delivery_addresses/{address.id}")
assert response.status_code == 404
assert not address.refresh_from_db()
| 2.265625 | 2 |
frame/noneatable/non_eatable.py | Rosikobu/snake-reloaded | 0 | 12797235 | import pygame
import random
from pygame.math import Vector2
#from .config import xSize, ySize, cell_size, cell_number
from .loc_conf import xSize, ySize, cell_number, cell_size
class NonEatable():
def __init__(self, screen, ip1,ip2,ip3,ip4):
# Lade Textur
self._load_texture(ip1,ip2,ip3,ip4)
# Zufällige Koordinaten für Futter
xPos1 = random.randint(0,cell_number - 2)
yPos1 = random.randint(0,cell_number - 2)
while(not self.is_start_pos_ok(xPos1,yPos1)):
xPos1 = random.randint(0,cell_number - 2)
yPos1 = random.randint(0,cell_number - 2)
xPos2 = xPos1
yPos2 = yPos1 + 1
xPos3 = xPos1 + 1
yPos3 = yPos1
xPos4 = xPos1 + 1
yPos4 = yPos1 + 1
self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)]
self.pyScreen = screen
def is_start_pos_ok(self,xPos1,yPos1):
if(xPos1 == 6 and yPos1 == 10):
return False
if(xPos1 == 7 and yPos1 == 10):
return False
if(xPos1 == 8 and yPos1 == 10):
return False
return True
def _load_texture(self, ip1,ip2,ip3,ip4):
''' Laden der Texutren '''
self.ft1 = pygame.image.load(ip1).convert_alpha()
self.ft2 = pygame.image.load(ip2).convert_alpha()
self.ft3 = pygame.image.load(ip3).convert_alpha()
self.ft4 = pygame.image.load(ip4).convert_alpha()
def draw_barrier(self):
food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size)
food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size)
food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size)
food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size)
self.pyScreen.blit(self.ft1, food_obj1)
self.pyScreen.blit(self.ft2, food_obj2)
self.pyScreen.blit(self.ft3, food_obj3)
self.pyScreen.blit(self.ft4, food_obj4)
def change_position(self):
xPos1 = random.randint(0,cell_number - 2)
yPos1 = random.randint(0,cell_number - 2)
self.lis = [Vector2(xPos1,yPos1),Vector2(xPos1,yPos1+1),Vector2(xPos1+1,yPos1),Vector2(xPos1+1,yPos1+1)]
| 2.640625 | 3 |
main.py | aaryanDhakal22/scamn-t | 0 | 12797236 | import pyautogui
from time import sleep
from random import choice
sleep(3)
names = open("names.txt","r").readlines()
names = [i[:-1] for i in names]
passwords = open("pass.txt",'r').readlines()
passwords = [i[:-1] for i in passwords if passwords.index(i) % 2 == 0]
for i in range(100):
print("hehe :) ")
pyautogui.click()
nametotake = names[i+100]
passtotake = choice(passwords)
if len(passtotake) < 8:
passtotake = nametotake[:nametotake.index('.')] +passtotake
pyautogui.write(nametotake)
pyautogui.press("TAB")
pyautogui.write(passtotake)
pyautogui.press("ENTER")
sleep(1)
with pyautogui.hold('alt'):
pyautogui.press('left')
print(nametotake)
print(passtotake)
print("Done\n\n")
sleep(6) | 3.203125 | 3 |
roster/client.py | trustedhousesitters/roster-python | 1 | 12797237 | <filename>roster/client.py<gh_stars>1-10
import time
import sys
import os
import signal
import random
import threading
from urlparse import urlparse
from datetime import datetime, timedelta
from registry import NewRegistry
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2 import connect_to_region
from boto.dynamodb2.items import Item
from boto.dynamodb2.exceptions import ItemNotFound
HEARTBEAT_INTERVAL = 1 # 1second
TTL = 5
class Service(object):
def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs):
self.Name = Name.get('S') if isinstance(Name, dict) else Name
self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint
self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry # unix timestamp
self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat
# Unregister the service
def Unregister(self):
self.stopHeartbeat = True
class ClientConfig(object):
def __init__(self, registry_name='', *args, **kwargs):
self.registry_name = registry_name
self.region = ''
endpoint = os.getenv('DYNAMODB_ENDPOINT', '')
if endpoint != '':
self.endpoint = endpoint
self.endpoint_data = urlparse(self.endpoint)
def SetRegion(self, region):
"""
Set region
"""
self.region = region
def GetRegistryName(self):
"""
Get registry table name
"""
return self.registry_name or 'roster'
def GetHashKey(self):
return self.name
def GetRangeKey(self):
return self.endpoint
def GetConnection(self):
# Environment var
if self.region == '':
self.region = os.getenv('AWS_REGION', '')
# Default
if self.region == '':
self.region = "us-west-2"
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '')
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '')
if self.endpoint:
return DynamoDBConnection(
host=self.endpoint_data.hostname,
port=self.endpoint_data.port,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
is_secure=False
)
else:
return connect_to_region(self.region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
class CleanExit(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_type is KeyboardInterrupt:
return True
return exc_type is None
class Client(object):
def __init__(self, svc, config, registry):
self.svc = svc
self.config = config
self.registry = registry
@classmethod
def new(cls, *args, **kwargs):
config = ClientConfig(*args, **kwargs)
svc = config.GetConnection()
registry = NewRegistry(svc, config.GetRegistryName())
return Client(svc=svc, config=config, registry=registry)
# Register the service in the registry
def Register(self, name, endpoint):
# Check whether the registry has been previously created. If not create before registration.
if not self.registry.Exists():
table, err = self.registry.Create()
if err:
return None, err
# Create Service
self.service = Service(Name=name, Endpoint=endpoint)
# Ensure call heartbeat at least once
self.heartbeat()
# Start heartbeat check
t = threading.Thread(target=heartbeat_check, args=(self,))
t.daemon = True # causes the thread to terminate when the main process ends.
t.start()
return self.service, None
# Heartbeat function - updates expiry
def heartbeat(self, terminate=False):
# if self.service.stopHeartbeat:
# return
# Update service Expiry based on TTL and current time
self.service.Expiry = int(time.mktime(datetime.now().timetuple()))
if not terminate:
self.service.Expiry += TTL
table = self.registry.Table()
item_info = {
'Name': self.service.Name,
'Endpoint': self.service.Endpoint
}
if table.has_item(**item_info):
item = table.get_item(**item_info)
else:
item = Item(table, self.service.__dict__)
item['Expiry'] = self.service.Expiry
item.save()
# Query the registry for named service
def Discover(self, name):
now = int(time.mktime(datetime.now().timetuple()))
items = self.svc.scan(
self.registry.name,
filter_expression = 'Expiry > :ExpiryVal AND #N = :NameVal',
expression_attribute_names = {
'#N': 'Name'
},
expression_attribute_values = {
':NameVal': {
'S': name
},
':ExpiryVal': {
'N': str(now)
}
}
)
# Randomly select one of the available endpoints (in effect load balancing between available endpoints)
count = items.get('Count')
if count == 0:
return None, Exception('roster: No matching service found')
else:
return Service(**items['Items'][random.randint(0, count - 1)]), None
# Returns the non loopback local IP of the host the client is running on
def get_local_ip(self):
import socket
try:
for ip in socket.gethostbyname_ex(socket.gethostname())[2]:
if not ip.startswith("127."):
return ip, ''
except Exception:
pass
return '', Exception("roster: No non loopback local IP address could be found")
# Heartbeat function - updates expiry
def heartbeat_check(client):
# with CleanExit():
while True:
if client.service.stopHeartbeat:
client.heartbeat(terminate=True)
break
time.sleep(HEARTBEAT_INTERVAL)
client.heartbeat()
| 2.234375 | 2 |
nvflare/private/fed/client/admin_msg_sender.py | ArnovanHilten/NVFlare | 2 | 12797238 | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the FLAdmin Client to send the request message to the admin server."""
import threading
from multiprocessing.dummy import Pool as ThreadPool
import grpc
import nvflare.private.fed.protos.admin_pb2 as admin_msg
import nvflare.private.fed.protos.admin_pb2_grpc as admin_service
from nvflare.private.admin_defs import Message
from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message
from .admin import Sender
lock = threading.Lock()
class AdminMessageSender(Sender):
"""AdminMessageSender to send the request message to the admin server."""
def __init__(
self,
client_name,
root_cert=None,
ssl_cert=None,
private_key=None,
server_args=None,
secure=False,
is_multi_gpu=False,
rank=0,
):
"""To init the AdminMessageSender.
Args:
client_name: client name
root_cert: root certificate
ssl_cert: SSL certificate
private_key: private key
server_args: server args
secure: True/False
is_multi_gpu: True/False
rank: local process rank
"""
self.client_name = client_name
self.root_cert = root_cert
self.ssl_cert = ssl_cert
self.private_key = private_key
self.secure = secure
self.servers = server_args
self.multi_gpu = is_multi_gpu
self.rank = rank
self.pool = ThreadPool(len(self.servers))
def send_reply(self, message: Message):
"""Call to send the request message.
Args:
message: request message
"""
if self.rank == 0:
# self.send_client_reply(message)
for taskname in tuple(self.servers):
self._send_client_reply(message, taskname)
def _send_client_reply(self, message, taskname):
try:
with self._set_up_channel(self.servers[taskname]) as channel:
stub = admin_service.AdminCommunicatingStub(channel)
reply = admin_msg.Reply()
reply.client_name = self.client_name
reply.message.CopyFrom(message_to_proto(message))
# reply.message = message_to_proto(message)
stub.SendReply(reply)
except BaseException:
pass
def retrieve_requests(self) -> [Message]:
"""Send the message to retrieve pending requests from the Server.
Returns: list of messages.
"""
messages = []
if self.rank == 0:
items = self.pool.map(self._retrieve_client_requests, tuple(self.servers))
for item in items:
messages.extend(item)
return messages
def _retrieve_client_requests(self, taskname):
try:
message_list = []
with self._set_up_channel(self.servers[taskname]) as channel:
stub = admin_service.AdminCommunicatingStub(channel)
client = admin_msg.Client()
client.client_name = self.client_name
messages = stub.Retrieve(client)
for i in messages.message:
message_list.append(proto_to_message(i))
except Exception as e:
messages = None
return message_list
def send_result(self, message: Message):
"""Send the processor results to server.
Args:
message: message
"""
if self.rank == 0:
for taskname in tuple(self.servers):
try:
with self._set_up_channel(self.servers[taskname]) as channel:
stub = admin_service.AdminCommunicatingStub(channel)
reply = admin_msg.Reply()
reply.client_name = self.client_name
reply.message.CopyFrom(message_to_proto(message))
stub.SendResult(reply)
except BaseException:
pass
def _set_up_channel(self, channel_dict):
"""Connect client to the server.
Args:
channel_dict: grpc channel parameters
Returns: an initialised grpc channel
"""
if self.secure:
with open(self.root_cert, "rb") as f:
trusted_certs = f.read()
with open(self.private_key, "rb") as f:
private_key = f.read()
with open(self.ssl_cert, "rb") as f:
certificate_chain = f.read()
call_credentials = grpc.metadata_call_credentials(
lambda context, callback: callback((("x-custom-token", self.client_name),), None)
)
credentials = grpc.ssl_channel_credentials(
certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs
)
composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials)
channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials)
else:
channel = grpc.insecure_channel(**channel_dict)
return channel
| 2.015625 | 2 |
src/pandalchemy/generate_code.py | Dogeek/pandalchemy | 3 | 12797239 | <filename>src/pandalchemy/generate_code.py
from io import StringIO
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlacodegen.codegen import CodeGenerator
from importlib import import_module
def main():
connection_string = 'sqlite:///chinook.db'
engine = create_engine(connection_string)
code_name = 'chinook_models_nojoined.py'
generate_code_file(code_name, engine, nojoined=True)
import_code(code_name)
def generate_code(engine, **kwargs):
""" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False,
nojoined=False, noinflect=False, nobackrefs=False,
flask=False, ignore_cols=None, noclasses=False, nocomments=False)
"""
metadata = MetaData()
metadata.reflect(bind=engine)
codegen = CodeGenerator(metadata, **kwargs)
sio = StringIO()
codegen.render(sio)
return sio.getvalue()
def generate_file(file_name, text):
with open(file_name, 'w') as text_file:
text_file.write(text)
def generate_code_file(file_name, engine, **kwargs):
generate_file(file_name, generate_code(engine, **kwargs))
def import_code(file_name):
import_module(file_name)
if __name__ == '__main__':
main()
| 2.734375 | 3 |
benchmarks/launch_benchmark.py | s1113950/models | 0 | 12797240 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import signal
import subprocess
import sys
from argparse import ArgumentParser
from common import base_benchmark_util
class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil):
"""Launches benchmarking job based on the specified args """
def main(self):
args, unknown = self.parse_args(sys.argv[1:])
try:
self.validate_args(args)
except (IOError, ValueError) as e:
print("\nError: {}".format(e))
sys.exit(1)
self.run_docker_container(args)
def parse_args(self, args):
super(LaunchBenchmark, self).define_args()
# Additional args that are only used with the launch script
arg_parser = ArgumentParser(
parents=[self._common_arg_parser],
description="Parse args for benchmark interface")
arg_parser.add_argument(
"--docker-image", help="Specify the docker image/tag to use",
dest="docker_image", default=None, required=True)
arg_parser.add_argument(
"--debug", help="Launches debug mode which doesn't execute "
"start.sh", action="store_true")
return arg_parser.parse_known_args(args)
def validate_args(self, args):
"""validate the args"""
# validate the shared args first
super(LaunchBenchmark, self).validate_args(args)
# Check for spaces in docker image
if ' ' in args.docker_image:
raise ValueError("docker image string "
"should not have whitespace(s)")
# validate that we support this framework by checking folder names
benchmark_dir = os.path.dirname(os.path.realpath(__file__))
if glob.glob("{}/*/{}".format(benchmark_dir, args.framework)) == []:
raise ValueError("The specified framework is not supported: {}".
format(args.framework))
# if neither benchmark_only or accuracy_only are specified, then enable
# benchmark_only as the default
if not args.benchmark_only and not args.accuracy_only:
args.benchmark_only = True
def run_docker_container(self, args):
"""
Runs a docker container with the specified image and environment
variables to start running the benchmarking job.
"""
benchmark_scripts = os.path.dirname(os.path.realpath(__file__))
intelai_models = os.path.join(benchmark_scripts, os.pardir, "models")
if args.model_name:
# find the path to the model's benchmarks folder
search_path = os.path.join(
benchmark_scripts, "*", args.framework, args.model_name,
args.mode, args.precision)
matches = glob.glob(search_path)
if len(matches) > 1:
# we should never get more than one match
raise ValueError("Found multiple model locations for {} {} {}"
.format(args.framework,
args.model_name,
args.precision))
elif len(matches) == 0:
raise ValueError("No model was found for {} {} {}"
.format(args.framework,
args.model_name,
args.precision))
# use the benchmarks directory path to find the use case
dir_list = matches[0].split("/")
# find the last occurrence of framework in the list
framework_index = len(dir_list) - 1 - dir_list[::-1].index(
args.framework)
# grab the use case name from the path
use_case = str(dir_list[framework_index - 1])
# find the intelai_optimized model directory
optimized_model_dir = os.path.join(
benchmark_scripts, os.pardir, "models", use_case,
args.framework, args.model_name)
# if we find an optimized model, then we will use that path
if os.path.isdir(intelai_models):
intelai_models = optimized_model_dir
mount_benchmark = "/workspace/benchmarks"
mount_external_models_source = "/workspace/models"
mount_intelai_models = "/workspace/intelai_models"
workspace = os.path.join(mount_benchmark, "common", args.framework)
mount_output_dir = False
output_dir = os.path.join(workspace, 'logs')
if args.output_dir != "/models/benchmarks/common/tensorflow/logs":
# we don't need to mount log dir otherwise since default is workspace folder
mount_output_dir = True
output_dir = args.output_dir
in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \
else ""
in_graph_filename = os.path.basename(args.input_graph) if \
args.input_graph else ""
env_vars = ["--env", "DATASET_LOCATION_VOL={}".format(args.data_location),
"--env", "CHECKPOINT_DIRECTORY_VOL={}".format(args.checkpoint),
"--env", "EXTERNAL_MODELS_SOURCE_DIRECTORY={}".format(args.model_source_dir),
"--env", "INTELAI_MODELS={}".format(intelai_models),
"--env", "BENCHMARK_SCRIPTS={}".format(benchmark_scripts),
"--env", "SOCKET_ID={}".format(args.socket_id),
"--env", "MODEL_NAME={}".format(args.model_name),
"--env", "MODE={}".format(args.mode),
"--env", "PRECISION={}".format(args.precision),
"--env", "VERBOSE={}".format(args.verbose),
"--env", "BATCH_SIZE={}".format(args.batch_size),
"--env", "WORKSPACE={}".format(workspace),
"--env", "IN_GRAPH=/in_graph/{}".format(in_graph_filename),
"--env", "MOUNT_BENCHMARK={}".format(mount_benchmark),
"--env", "MOUNT_EXTERNAL_MODELS_SOURCE={}".format(mount_external_models_source),
"--env", "MOUNT_INTELAI_MODELS_SOURCE={}".format(mount_intelai_models),
"--env", "USE_CASE={}".format(use_case),
"--env", "FRAMEWORK={}".format(args.framework),
"--env", "NUM_CORES={}".format(args.num_cores),
"--env", "NUM_INTER_THREADS={}".format(args.num_inter_threads),
"--env", "NUM_INTRA_THREADS={}".format(args.num_intra_threads),
"--env", "DATASET_LOCATION=/dataset",
"--env", "CHECKPOINT_DIRECTORY=/checkpoints",
"--env", "BENCHMARK_ONLY={}".format(args.benchmark_only),
"--env", "ACCURACY_ONLY={}".format(args.accuracy_only),
"--env", "OUTPUT_RESULTS={}".format(args.output_results),
"--env", "NOINSTALL=False",
"--env", "OUTPUT_DIR={}".format(output_dir)]
# by default we will install, user needs to set NOINSTALL=True
# manually after they get into `--debug` mode
# since they need to run one time without this flag
# to get stuff installed
# Add custom model args as env vars
for custom_arg in args.model_args:
if "=" not in custom_arg:
raise ValueError("Expected model args in the format "
"`name=value` but received: {}".
format(custom_arg))
env_vars.append("--env")
env_vars.append("{}".format(custom_arg))
# Add proxy to env variables if any set on host
for environment_proxy_setting in [
"http_proxy",
"ftp_proxy",
"https_proxy",
"no_proxy",
]:
if not os.environ.get(environment_proxy_setting):
continue
env_vars.append("--env")
env_vars.append("{}={}".format(
environment_proxy_setting,
os.environ.get(environment_proxy_setting)
))
volume_mounts = ["--volume", "{}:{}".format(benchmark_scripts, mount_benchmark),
"--volume", "{}:{}".format(args.model_source_dir, mount_external_models_source),
"--volume", "{}:{}".format(intelai_models, mount_intelai_models),
"--volume", "{}:/dataset".format(args.data_location),
"--volume", "{}:/checkpoints".format(args.checkpoint),
"--volume", "{}:/in_graph".format(in_graph_dir)]
if mount_output_dir:
volume_mounts.extend([
"--volume", "{}:{}".format(output_dir, output_dir)])
docker_run_cmd = ["docker", "run"]
# only use -it when debugging, otherwise we might get TTY error
if args.debug:
docker_run_cmd.append("-it")
docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [
"--privileged", "-u", "root:root", "-w",
workspace, args.docker_image, "/bin/bash"]
if not args.debug:
docker_run_cmd.append("start.sh")
if args.verbose:
print("Docker run command:\n{}".format(docker_run_cmd))
self._run_docker_cmd(docker_run_cmd)
def _run_docker_cmd(self, docker_run_cmd):
"""runs docker proc and exits on ctrl c"""
p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid)
try:
p.communicate()
except KeyboardInterrupt:
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
if __name__ == "__main__":
util = LaunchBenchmark()
util.main()
| 2.25 | 2 |
apps/api/tests/test_segmenting.py | IT2901-24-2018/orm | 2 | 12797241 | import unittest
from apps.api.segmenter.road_segmenter import geometry_to_list
from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson
from apps.data.road_segmenting.road_filter import filter_road
from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH
from api.segmenter.calculate_distance import calculate_road_length_simple
from api.segmenter.road_segmenter import segment_network, split_segment
def convert(road):
road = filter_road(road)
road["the_geom"] = geometry_to_list(road["the_geom"])
return road
class TestSegmenting(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kommune = 5001
cls.vegref = "kg"
cls.max_segment_distance = MAX_SEGMENT_LENGTH
cls.min_coordinates_length = MIN_COORDINATES_LENGTH
network = vegnet_to_geojson(cls.kommune, cls.vegref)
cls.count, cls.road_net = network[0], network[1]["features"]
# Apparently the setUpClass is a bit funky and the road_net does not stay filtered after setUpClass is run,
# so instead it is done in each test function it is needed instead of here.
road_net_list = []
for road in cls.road_net:
road_net_list.append(filter_road(road))
cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length)
def setUp(self):
"""
Needs to be here for the tests to run
"""
pass
def test_road_segmenter_list(self):
"""
The road_segmenter function should return a list
:return: Nothing
"""
self.assertIsInstance(self.road_net_segmented, list, "The road segmenter did not return a list")
def test_road_segmenter_list_elements(self):
"""
Every element in the split segments should be a dict
:return: Nothing
"""
error_message = "Not all elements in the split list are of type dict \n"
for segment in self.road_net_segmented:
self.assertIsInstance(segment, dict, error_message)
def test_split_segment_geometry_len(self):
"""
Given a list of roads segments, the split segments should always have a length
of 2 or more
:return: Nothing
"""
error_message = "Segment has less than " + str(self.min_coordinates_length) + " GPS coordinates"
for segment in self.road_net_segmented:
# coordinates are split by commas, so the count of commas+1 should be the same as the count of coordinates
coordinates_amount = segment["the_geom"].count(",")
self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message)
def test_road_filter(self):
"""
The road_filter function should return a string, otherwise segmentation will crash in later stages
:return: Nothing
"""
for road in self.road_net:
road = filter_road(road)
self.assertIsInstance(road["the_geom"], str, "road_filter should turn geometry into a string")
def test_geometry_conversion(self):
"""
The geometry_to_list function should return a dictionary containing coordinates as a list,
otherwise the segmenter can't split segments
:return: Nothing
"""
for road in self.road_net:
road = convert(road)
self.assertIsInstance(road["the_geom"], dict, "geometry_to_list should return a "
"dictionary")
self.assertIsInstance(road["the_geom"]["coordinates"], list, "geometry_to_list should return a turn the "
"coordinates into a list")
def test_calculate_road_length(self):
"""
The total distance of the segmented road should be similar to the length before segmentation, within
a margin given by the variable "margin"
:return: Nothing
"""
margin = 3
for road in self.road_net:
road = convert(road)
length_before = calculate_road_length_simple(road["the_geom"]["coordinates"])
road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length)
length_after = 0
for segment in road_segmented:
length_after += calculate_road_length_simple(segment["the_geom"]["coordinates"])
self.assertLess(abs(length_after - length_before), margin, "The difference between the original "
"length and the segmented length is "
"too large")
def test_split_segment_chaining(self):
"""
Every connected segment should start with the end gps point of the previous segment
:return: Nothing
"""
for road in self.road_net:
road = convert(road)
road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length)
for i in range(1, len(road_segmented)):
curr_segment = road_segmented[i]
prev_segment = road_segmented[i-1]
end_coordinate = len(prev_segment["the_geom"]["coordinates"])-1
self.assertEqual(curr_segment["the_geom"]["coordinates"][0],
prev_segment["the_geom"]["coordinates"][end_coordinate],
"Segments are not correctly chained")
def test_split_segment_negative_length(self):
"""
No road segments should have a negative road length
:return: Nothing
"""
for segment in self.road_net_segmented:
self.assertGreater(segment["stretchdistance"], 0, "Stretchdistance must be of at least 1 meter")
def test_duplicate_segments(self):
"""
Test if there are multiple segments with the same coordinates
"""
length = len(self.road_net_segmented)-1
for i in range(length):
road = self.road_net_segmented[i]["the_geom"]
for x in range(i+1, length):
other_road = self.road_net_segmented[x]["the_geom"]
self.assertNotEqual(road, other_road, "Duplicate segment geometry coordinates")
def test_missing_coordinates(self):
"""
All original coordinates should still be present after segmenting road network
:return: Nothing
"""
for road in self.road_net:
road = convert(road)
coordinates_original = road["the_geom"]["coordinates"]
road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length)
coordinates_segmented = []
for segment in road_segmented:
coordinates_segmented.extend(segment["the_geom"]["coordinates"])
for coordinate in coordinates_original:
self.assertTrue(coordinate in coordinates_segmented, "Missing coordinate after segmenting")
def test_over_and_undersegmenting(self):
"""
The segmenter should only run on segments that are over the limit in length, it should never segment something
shorter than that. In other words the segmented road should still be only one segment
:return: Nothing
"""
i = 0
for road in self.road_net:
i += 1
converted_road = convert(road)
road_coords_length = len(converted_road["the_geom"]["coordinates"])
road_distance = calculate_road_length_simple(converted_road["the_geom"]["coordinates"])
road_segmented = segment_network([filter_road(road)], self.max_segment_distance,
self.min_coordinates_length)
road_segmented_length = len(road_segmented)
if road_distance < self.max_segment_distance:
self.assertTrue(road_segmented_length == 1, "This road was segmented, but should not have been.")
elif road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance:
self.assertTrue(road_segmented_length > 1, ("This road should have been segmented, but was not. "
"Stretchdistance:", road_distance, "Coordinates:",
converted_road["the_geom"]["coordinates"], i))
if __name__ == "__main__":
unittest.main()
| 2.90625 | 3 |
python/trinsic/proto/services/trustregistry/v1/__init__.py | musaib072/sdk | 6 | 12797242 | <reponame>musaib072/sdk
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: services/trust-registry/v1/trust-registry.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import AsyncIterator, Dict
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class RegistrationStatus(betterproto.Enum):
CURRENT = 0
EXPIRED = 1
TERMINATED = 2
REVOKED = 3
NOT_FOUND = 10
@dataclass(eq=False, repr=False)
class AddFrameworkRequest(betterproto.Message):
governance_framework: "GovernanceFramework" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class AddFrameworkResponse(betterproto.Message):
status: "__common_v1__.ResponseStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class RemoveFrameworkRequest(betterproto.Message):
governance_framework: "GovernanceFramework" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class RemoveFrameworkResponse(betterproto.Message):
status: "__common_v1__.ResponseStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class SearchRegistryRequest(betterproto.Message):
query: str = betterproto.string_field(1)
continuation_token: str = betterproto.string_field(2)
options: "__common_v1__.RequestOptions" = betterproto.message_field(5)
@dataclass(eq=False, repr=False)
class SearchRegistryResponse(betterproto.Message):
items_json: str = betterproto.string_field(1)
has_more: bool = betterproto.bool_field(2)
count: int = betterproto.int32_field(3)
continuation_token: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class GovernanceFramework(betterproto.Message):
governance_framework_uri: str = betterproto.string_field(1)
trust_registry_uri: str = betterproto.string_field(2)
description: str = betterproto.string_field(3)
@dataclass(eq=False, repr=False)
class RegisterIssuerRequest(betterproto.Message):
did_uri: str = betterproto.string_field(1, group="authority")
x509_cert: str = betterproto.string_field(2, group="authority")
credential_type_uri: str = betterproto.string_field(10)
valid_from_utc: int = betterproto.uint64_field(11)
valid_until_utc: int = betterproto.uint64_field(12)
governance_framework_uri: str = betterproto.string_field(20)
@dataclass(eq=False, repr=False)
class RegisterIssuerResponse(betterproto.Message):
status: "__common_v1__.ResponseStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class RegisterVerifierRequest(betterproto.Message):
did_uri: str = betterproto.string_field(1, group="authority")
x509_cert: str = betterproto.string_field(2, group="authority")
presentation_type_uri: str = betterproto.string_field(10)
valid_from_utc: int = betterproto.uint64_field(11)
valid_until_utc: int = betterproto.uint64_field(12)
governance_framework_uri: str = betterproto.string_field(20)
@dataclass(eq=False, repr=False)
class RegisterVerifierResponse(betterproto.Message):
status: "__common_v1__.ResponseStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class UnregisterIssuerRequest(betterproto.Message):
did_uri: str = betterproto.string_field(1, group="authority")
x509_cert: str = betterproto.string_field(2, group="authority")
credential_type_uri: str = betterproto.string_field(10)
governance_framework_uri: str = betterproto.string_field(20)
@dataclass(eq=False, repr=False)
class UnregisterIssuerResponse(betterproto.Message):
status: "__common_v1__.ResponseStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class UnregisterVerifierRequest(betterproto.Message):
did_uri: str = betterproto.string_field(1, group="authority")
x509_cert: str = betterproto.string_field(2, group="authority")
presentation_type_uri: str = betterproto.string_field(10)
governance_framework_uri: str = betterproto.string_field(20)
@dataclass(eq=False, repr=False)
class UnregisterVerifierResponse(betterproto.Message):
status: "__common_v1__.ResponseStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class CheckIssuerStatusRequest(betterproto.Message):
governance_framework_uri: str = betterproto.string_field(1)
did_uri: str = betterproto.string_field(2, group="member")
x509_cert: str = betterproto.string_field(3, group="member")
credential_type_uri: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class CheckIssuerStatusResponse(betterproto.Message):
status: "RegistrationStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class CheckVerifierStatusRequest(betterproto.Message):
governance_framework_uri: str = betterproto.string_field(1)
did_uri: str = betterproto.string_field(2, group="member")
x509_cert: str = betterproto.string_field(3, group="member")
presentation_type_uri: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class CheckVerifierStatusResponse(betterproto.Message):
status: "RegistrationStatus" = betterproto.enum_field(1)
@dataclass(eq=False, repr=False)
class FetchDataRequest(betterproto.Message):
governance_framework_uri: str = betterproto.string_field(1)
query: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class FetchDataResponse(betterproto.Message):
response_json: str = betterproto.string_field(1)
has_more_results: bool = betterproto.bool_field(2)
continuation_token: str = betterproto.string_field(3)
class TrustRegistryStub(betterproto.ServiceStub):
async def add_framework(
self, *, governance_framework: "GovernanceFramework" = None
) -> "AddFrameworkResponse":
request = AddFrameworkRequest()
if governance_framework is not None:
request.governance_framework = governance_framework
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/AddFramework",
request,
AddFrameworkResponse,
)
async def remove_framework(
self, *, governance_framework: "GovernanceFramework" = None
) -> "RemoveFrameworkResponse":
request = RemoveFrameworkRequest()
if governance_framework is not None:
request.governance_framework = governance_framework
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/RemoveFramework",
request,
RemoveFrameworkResponse,
)
async def search_registry(
self,
*,
query: str = "",
continuation_token: str = "",
options: "__common_v1__.RequestOptions" = None,
) -> "SearchRegistryResponse":
request = SearchRegistryRequest()
request.query = query
request.continuation_token = continuation_token
if options is not None:
request.options = options
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/SearchRegistry",
request,
SearchRegistryResponse,
)
async def register_issuer(
self,
*,
did_uri: str = "",
x509_cert: str = "",
credential_type_uri: str = "",
valid_from_utc: int = 0,
valid_until_utc: int = 0,
governance_framework_uri: str = "",
) -> "RegisterIssuerResponse":
request = RegisterIssuerRequest()
if did_uri:
request.did_uri = did_uri
if x509_cert:
request.x509_cert = x509_cert
request.credential_type_uri = credential_type_uri
request.valid_from_utc = valid_from_utc
request.valid_until_utc = valid_until_utc
request.governance_framework_uri = governance_framework_uri
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/RegisterIssuer",
request,
RegisterIssuerResponse,
)
async def register_verifier(
self,
*,
did_uri: str = "",
x509_cert: str = "",
presentation_type_uri: str = "",
valid_from_utc: int = 0,
valid_until_utc: int = 0,
governance_framework_uri: str = "",
) -> "RegisterVerifierResponse":
request = RegisterVerifierRequest()
if did_uri:
request.did_uri = did_uri
if x509_cert:
request.x509_cert = x509_cert
request.presentation_type_uri = presentation_type_uri
request.valid_from_utc = valid_from_utc
request.valid_until_utc = valid_until_utc
request.governance_framework_uri = governance_framework_uri
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/RegisterVerifier",
request,
RegisterVerifierResponse,
)
async def unregister_issuer(
self,
*,
did_uri: str = "",
x509_cert: str = "",
credential_type_uri: str = "",
governance_framework_uri: str = "",
) -> "UnregisterIssuerResponse":
request = UnregisterIssuerRequest()
if did_uri:
request.did_uri = did_uri
if x509_cert:
request.x509_cert = x509_cert
request.credential_type_uri = credential_type_uri
request.governance_framework_uri = governance_framework_uri
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer",
request,
UnregisterIssuerResponse,
)
async def unregister_verifier(
self,
*,
did_uri: str = "",
x509_cert: str = "",
presentation_type_uri: str = "",
governance_framework_uri: str = "",
) -> "UnregisterVerifierResponse":
request = UnregisterVerifierRequest()
if did_uri:
request.did_uri = did_uri
if x509_cert:
request.x509_cert = x509_cert
request.presentation_type_uri = presentation_type_uri
request.governance_framework_uri = governance_framework_uri
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier",
request,
UnregisterVerifierResponse,
)
async def check_issuer_status(
self,
*,
governance_framework_uri: str = "",
did_uri: str = "",
x509_cert: str = "",
credential_type_uri: str = "",
) -> "CheckIssuerStatusResponse":
request = CheckIssuerStatusRequest()
request.governance_framework_uri = governance_framework_uri
if did_uri:
request.did_uri = did_uri
if x509_cert:
request.x509_cert = x509_cert
request.credential_type_uri = credential_type_uri
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus",
request,
CheckIssuerStatusResponse,
)
async def check_verifier_status(
self,
*,
governance_framework_uri: str = "",
did_uri: str = "",
x509_cert: str = "",
presentation_type_uri: str = "",
) -> "CheckVerifierStatusResponse":
request = CheckVerifierStatusRequest()
request.governance_framework_uri = governance_framework_uri
if did_uri:
request.did_uri = did_uri
if x509_cert:
request.x509_cert = x509_cert
request.presentation_type_uri = presentation_type_uri
return await self._unary_unary(
"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus",
request,
CheckVerifierStatusResponse,
)
async def fetch_data(
self, *, governance_framework_uri: str = "", query: str = ""
) -> AsyncIterator["FetchDataResponse"]:
request = FetchDataRequest()
request.governance_framework_uri = governance_framework_uri
request.query = query
async for response in self._unary_stream(
"/services.trustregistry.v1.TrustRegistry/FetchData",
request,
FetchDataResponse,
):
yield response
class TrustRegistryBase(ServiceBase):
async def add_framework(
self, governance_framework: "GovernanceFramework"
) -> "AddFrameworkResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def remove_framework(
self, governance_framework: "GovernanceFramework"
) -> "RemoveFrameworkResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def search_registry(
self,
query: str,
continuation_token: str,
options: "__common_v1__.RequestOptions",
) -> "SearchRegistryResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def register_issuer(
self,
did_uri: str,
x509_cert: str,
credential_type_uri: str,
valid_from_utc: int,
valid_until_utc: int,
governance_framework_uri: str,
) -> "RegisterIssuerResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def register_verifier(
self,
did_uri: str,
x509_cert: str,
presentation_type_uri: str,
valid_from_utc: int,
valid_until_utc: int,
governance_framework_uri: str,
) -> "RegisterVerifierResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def unregister_issuer(
self,
did_uri: str,
x509_cert: str,
credential_type_uri: str,
governance_framework_uri: str,
) -> "UnregisterIssuerResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def unregister_verifier(
self,
did_uri: str,
x509_cert: str,
presentation_type_uri: str,
governance_framework_uri: str,
) -> "UnregisterVerifierResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def check_issuer_status(
self,
governance_framework_uri: str,
did_uri: str,
x509_cert: str,
credential_type_uri: str,
) -> "CheckIssuerStatusResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def check_verifier_status(
self,
governance_framework_uri: str,
did_uri: str,
x509_cert: str,
presentation_type_uri: str,
) -> "CheckVerifierStatusResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def fetch_data(
self, governance_framework_uri: str, query: str
) -> AsyncIterator["FetchDataResponse"]:
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"governance_framework": request.governance_framework,
}
response = await self.add_framework(**request_kwargs)
await stream.send_message(response)
async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"governance_framework": request.governance_framework,
}
response = await self.remove_framework(**request_kwargs)
await stream.send_message(response)
async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"query": request.query,
"continuation_token": request.continuation_token,
"options": request.options,
}
response = await self.search_registry(**request_kwargs)
await stream.send_message(response)
async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"did_uri": request.did_uri,
"x509_cert": request.x509_cert,
"credential_type_uri": request.credential_type_uri,
"valid_from_utc": request.valid_from_utc,
"valid_until_utc": request.valid_until_utc,
"governance_framework_uri": request.governance_framework_uri,
}
response = await self.register_issuer(**request_kwargs)
await stream.send_message(response)
async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"did_uri": request.did_uri,
"x509_cert": request.x509_cert,
"presentation_type_uri": request.presentation_type_uri,
"valid_from_utc": request.valid_from_utc,
"valid_until_utc": request.valid_until_utc,
"governance_framework_uri": request.governance_framework_uri,
}
response = await self.register_verifier(**request_kwargs)
await stream.send_message(response)
async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"did_uri": request.did_uri,
"x509_cert": request.x509_cert,
"credential_type_uri": request.credential_type_uri,
"governance_framework_uri": request.governance_framework_uri,
}
response = await self.unregister_issuer(**request_kwargs)
await stream.send_message(response)
async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"did_uri": request.did_uri,
"x509_cert": request.x509_cert,
"presentation_type_uri": request.presentation_type_uri,
"governance_framework_uri": request.governance_framework_uri,
}
response = await self.unregister_verifier(**request_kwargs)
await stream.send_message(response)
async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"governance_framework_uri": request.governance_framework_uri,
"did_uri": request.did_uri,
"x509_cert": request.x509_cert,
"credential_type_uri": request.credential_type_uri,
}
response = await self.check_issuer_status(**request_kwargs)
await stream.send_message(response)
async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"governance_framework_uri": request.governance_framework_uri,
"did_uri": request.did_uri,
"x509_cert": request.x509_cert,
"presentation_type_uri": request.presentation_type_uri,
}
response = await self.check_verifier_status(**request_kwargs)
await stream.send_message(response)
async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"governance_framework_uri": request.governance_framework_uri,
"query": request.query,
}
await self._call_rpc_handler_server_stream(
self.fetch_data,
stream,
request_kwargs,
)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/services.trustregistry.v1.TrustRegistry/AddFramework": grpclib.const.Handler(
self.__rpc_add_framework,
grpclib.const.Cardinality.UNARY_UNARY,
AddFrameworkRequest,
AddFrameworkResponse,
),
"/services.trustregistry.v1.TrustRegistry/RemoveFramework": grpclib.const.Handler(
self.__rpc_remove_framework,
grpclib.const.Cardinality.UNARY_UNARY,
RemoveFrameworkRequest,
RemoveFrameworkResponse,
),
"/services.trustregistry.v1.TrustRegistry/SearchRegistry": grpclib.const.Handler(
self.__rpc_search_registry,
grpclib.const.Cardinality.UNARY_UNARY,
SearchRegistryRequest,
SearchRegistryResponse,
),
"/services.trustregistry.v1.TrustRegistry/RegisterIssuer": grpclib.const.Handler(
self.__rpc_register_issuer,
grpclib.const.Cardinality.UNARY_UNARY,
RegisterIssuerRequest,
RegisterIssuerResponse,
),
"/services.trustregistry.v1.TrustRegistry/RegisterVerifier": grpclib.const.Handler(
self.__rpc_register_verifier,
grpclib.const.Cardinality.UNARY_UNARY,
RegisterVerifierRequest,
RegisterVerifierResponse,
),
"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer": grpclib.const.Handler(
self.__rpc_unregister_issuer,
grpclib.const.Cardinality.UNARY_UNARY,
UnregisterIssuerRequest,
UnregisterIssuerResponse,
),
"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier": grpclib.const.Handler(
self.__rpc_unregister_verifier,
grpclib.const.Cardinality.UNARY_UNARY,
UnregisterVerifierRequest,
UnregisterVerifierResponse,
),
"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus": grpclib.const.Handler(
self.__rpc_check_issuer_status,
grpclib.const.Cardinality.UNARY_UNARY,
CheckIssuerStatusRequest,
CheckIssuerStatusResponse,
),
"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus": grpclib.const.Handler(
self.__rpc_check_verifier_status,
grpclib.const.Cardinality.UNARY_UNARY,
CheckVerifierStatusRequest,
CheckVerifierStatusResponse,
),
"/services.trustregistry.v1.TrustRegistry/FetchData": grpclib.const.Handler(
self.__rpc_fetch_data,
grpclib.const.Cardinality.UNARY_STREAM,
FetchDataRequest,
FetchDataResponse,
),
}
from ...common import v1 as __common_v1__
| 1.585938 | 2 |
modes.py | GreensCH/PYNQ-HLS-ImageProcessPlatform | 0 | 12797243 | <filename>modes.py
#mvt模式
#模型层
class User(object):
def __init__(self,uname,pwd):
self.uname=uname
self.pwd=<PASSWORD>
| 2.375 | 2 |
metrics/compute.py | XinyiYS/CML-RewardDistribution | 1 | 12797244 | import numpy as np
import pickle
import os
from pathlib import Path
from metrics.class_imbalance import get_classes, class_proportion
from metrics.phi_div import average_dkl
from metrics.wasserstein import wasserstein_2
def compute_metrics(ds,
split,
inv_temp,
num_parties,
num_classes,
alpha,
lengthscale,
party_datasets,
party_labels,
reference_dataset,
candidate_datasets,
candidate_labels,
rewards,
deltas,
mus):
print("Computing metrics")
party_datasets_with_rewards = []
for i in range(num_parties):
party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0))
print("Length of rewards: {}".format([len(r) for r in rewards]))
print("alpha:\n{}".format(alpha))
print("Calculating average DKLs before")
dkls_before = average_dkl(party_datasets, reference_dataset)
print(dkls_before)
print("Calculating average DKLs after")
dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset)
print(dkls_after)
print("Correlation coefficient with alpha: \n{}".format(np.corrcoef(alpha, dkls_after)[0, 1]))
class_props = []
for result in rewards:
class_props.append(
class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes))
print("Class proportions and class imbalance of rewards: {}".format(class_props))
print("Calculating Wasserstein-2 before")
wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)]
wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset)
for i in range(num_parties)]
print("Wasserstein-2 before: \n{}".format(wass_before))
print("Wasserstein-2 after: \n{}".format(wass_after))
print("Correlation coefficient with alpha: \n{}".format(np.corrcoef(alpha, wass_after)[0, 1]))
#Save metrics
Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True)
pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels,
rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before, dkls_after),
open("data/metrics/metrics-{}-{}-{}.p".format(ds, split, inv_temp), 'wb'))
| 2.640625 | 3 |
self_supervised/loss/loss_intrin_selfsup.py | fgitmichael/SelfSupevisedSkillDiscovery | 0 | 12797245 | <filename>self_supervised/loss/loss_intrin_selfsup.py
import torch
import torch.nn.functional as F
from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder
import rlkit.torch.pytorch_util as ptu
def reconstruction_based_rewards(
mode_latent_model: ModeLatentNetworkWithEncoder,
obs_seq: torch.Tensor,
action_seq: torch.Tensor,
skill_seq: torch.Tensor,
)->torch.Tensor:
"""
Args:
mode_latent_model : latent variable model
obs_seq : (N, S, obs_dim) tensor
action_seq : (N, S, action_dim) tensor
skill_seq : (N, S, skill_dim) tensor
Return:
Loss : tensor
Overall goal for the SAC-Agent is a high reconstruction error for
the mode latent model. Hence ll should be low. Changing transitions, that
have a high gradient, should have high influence on the reconstruction error.
Transition that have a high gradient on ll should be changed further and therefore
given a low reward.
"""
batch_dim = 0
seq_dim = -2
data_dim = -1
batch_size = obs_seq.size(batch_dim)
seq_len = obs_seq.size(seq_dim)
obs_seq.requires_grad = True
posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features(
obs_seq=obs_seq
)
action_recon = mode_latent_model.reconstruct_action(
features_seq=features_seq,
mode_sample=posterior['samples']
)
ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum()
mse = F.mse_loss(action_recon['samples'], action_seq)
ll.backward()
gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True)
assert gradients_per_transition.shape == torch.Size((batch_size, seq_len, 1))
return -torch.abs(gradients_per_transition)
| 2.046875 | 2 |
django_request_user/__init__.py | aiakos/aiakos | 4 | 12797246 | <filename>django_request_user/__init__.py
default_app_config = 'django_request_user.apps.DjangoRequestUserConfig'
| 1.171875 | 1 |
src/orders/migrations/0003_auto_20200207_1825.py | saurabhsuryawanshi/ecommerce | 0 | 12797247 | # Generated by Django 2.0 on 2020-02-07 12:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('addresses', '0001_initial'),
('orders', '0002_auto_20200204_1253'),
]
operations = [
migrations.AddField(
model_name='order',
name='billing_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'),
),
migrations.AddField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='addresses.Address'),
),
]
| 1.664063 | 2 |
GUI/printer/Pillow-2.7.0/Tests/test_file_libtiff.py | y-gupta/rfid-auth-system | 5 | 12797248 | from helper import unittest, PillowTestCase, hopper, py3
import os
import io
from PIL import Image, TiffImagePlugin
class LibTiffTestCase(PillowTestCase):
def setUp(self):
codecs = dir(Image.core)
if "libtiff_encoder" not in codecs or "libtiff_decoder" not in codecs:
self.skipTest("tiff support not available")
def _assert_noerr(self, im):
"""Helper tests that assert basic sanity about the g4 tiff reading"""
# 1 bit
self.assertEqual(im.mode, "1")
# Does the data actually load
im.load()
im.getdata()
try:
self.assertEqual(im._compression, 'group4')
except:
print("No _compression")
print (dir(im))
# can we write it back out, in a different form.
out = self.tempfile("temp.png")
im.save(out)
class TestFileLibTiff(LibTiffTestCase):
def test_g4_tiff(self):
"""Test the ordinary file path load path"""
file = "Tests/images/hopper_g4_500.tif"
im = Image.open(file)
self.assertEqual(im.size, (500, 500))
self._assert_noerr(im)
def test_g4_large(self):
file = "Tests/images/pport_g4.tif"
im = Image.open(file)
self._assert_noerr(im)
def test_g4_tiff_file(self):
"""Testing the string load path"""
file = "Tests/images/hopper_g4_500.tif"
with open(file, 'rb') as f:
im = Image.open(f)
self.assertEqual(im.size, (500, 500))
self._assert_noerr(im)
def test_g4_tiff_bytesio(self):
"""Testing the stringio loading code path"""
file = "Tests/images/hopper_g4_500.tif"
s = io.BytesIO()
with open(file, 'rb') as f:
s.write(f.read())
s.seek(0)
im = Image.open(s)
self.assertEqual(im.size, (500, 500))
self._assert_noerr(im)
def test_g4_eq_png(self):
""" Checking that we're actually getting the data that we expect"""
png = Image.open('Tests/images/hopper_bw_500.png')
g4 = Image.open('Tests/images/hopper_g4_500.tif')
self.assert_image_equal(g4, png)
# see https://github.com/python-pillow/Pillow/issues/279
def test_g4_fillorder_eq_png(self):
""" Checking that we're actually getting the data that we expect"""
png = Image.open('Tests/images/g4-fillorder-test.png')
g4 = Image.open('Tests/images/g4-fillorder-test.tif')
self.assert_image_equal(g4, png)
def test_g4_write(self):
"""Checking to see that the saved image is the same as what we wrote"""
file = "Tests/images/hopper_g4_500.tif"
orig = Image.open(file)
out = self.tempfile("temp.tif")
rot = orig.transpose(Image.ROTATE_90)
self.assertEqual(rot.size, (500, 500))
rot.save(out)
reread = Image.open(out)
self.assertEqual(reread.size, (500, 500))
self._assert_noerr(reread)
self.assert_image_equal(reread, rot)
self.assertEqual(reread.info['compression'], 'group4')
self.assertEqual(reread.info['compression'], orig.info['compression'])
self.assertNotEqual(orig.tobytes(), reread.tobytes())
def test_adobe_deflate_tiff(self):
file = "Tests/images/tiff_adobe_deflate.tif"
im = Image.open(file)
self.assertEqual(im.mode, "RGB")
self.assertEqual(im.size, (278, 374))
self.assertEqual(
im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0))
im.load()
def test_write_metadata(self):
""" Test metadata writing through libtiff """
img = Image.open('Tests/images/hopper_g4.tif')
f = self.tempfile('temp.tiff')
img.save(f, tiffinfo=img.tag)
loaded = Image.open(f)
original = img.tag.named()
reloaded = loaded.tag.named()
# PhotometricInterpretation is set from SAVE_INFO,
# not the original image.
ignored = [
'StripByteCounts', 'RowsPerStrip',
'PageNumber', 'PhotometricInterpretation']
for tag, value in reloaded.items():
if tag not in ignored:
if tag.endswith('Resolution'):
val = original[tag]
self.assert_almost_equal(
val[0][0]/val[0][1], value[0][0]/value[0][1],
msg="%s didn't roundtrip" % tag)
else:
self.assertEqual(
original[tag], value, "%s didn't roundtrip" % tag)
for tag, value in original.items():
if tag not in ignored:
if tag.endswith('Resolution'):
val = reloaded[tag]
self.assert_almost_equal(
val[0][0]/val[0][1], value[0][0]/value[0][1],
msg="%s didn't roundtrip" % tag)
else:
self.assertEqual(
value, reloaded[tag], "%s didn't roundtrip" % tag)
def test_g3_compression(self):
i = Image.open('Tests/images/hopper_g4_500.tif')
out = self.tempfile("temp.tif")
i.save(out, compression='group3')
reread = Image.open(out)
self.assertEqual(reread.info['compression'], 'group3')
self.assert_image_equal(reread, i)
def test_little_endian(self):
im = Image.open('Tests/images/16bit.deflate.tif')
self.assertEqual(im.getpixel((0, 0)), 480)
self.assertEqual(im.mode, 'I;16')
b = im.tobytes()
# Bytes are in image native order (little endian)
if py3:
self.assertEqual(b[0], ord(b'\xe0'))
self.assertEqual(b[1], ord(b'\x01'))
else:
self.assertEqual(b[0], b'\xe0')
self.assertEqual(b[1], b'\x01')
out = self.tempfile("temp.tif")
# out = "temp.le.tif"
im.save(out)
reread = Image.open(out)
self.assertEqual(reread.info['compression'], im.info['compression'])
self.assertEqual(reread.getpixel((0, 0)), 480)
# UNDONE - libtiff defaults to writing in native endian, so
# on big endian, we'll get back mode = 'I;16B' here.
def test_big_endian(self):
im = Image.open('Tests/images/16bit.MM.deflate.tif')
self.assertEqual(im.getpixel((0, 0)), 480)
self.assertEqual(im.mode, 'I;16B')
b = im.tobytes()
# Bytes are in image native order (big endian)
if py3:
self.assertEqual(b[0], ord(b'\x01'))
self.assertEqual(b[1], ord(b'\xe0'))
else:
self.assertEqual(b[0], b'\x01')
self.assertEqual(b[1], b'\xe0')
out = self.tempfile("temp.tif")
im.save(out)
reread = Image.open(out)
self.assertEqual(reread.info['compression'], im.info['compression'])
self.assertEqual(reread.getpixel((0, 0)), 480)
def test_g4_string_info(self):
"""Tests String data in info directory"""
file = "Tests/images/hopper_g4_500.tif"
orig = Image.open(file)
out = self.tempfile("temp.tif")
orig.tag[269] = 'temp.tif'
orig.save(out)
reread = Image.open(out)
self.assertEqual('temp.tif', reread.tag[269])
def test_12bit_rawmode(self):
""" Are we generating the same interpretation
of the image as Imagemagick is? """
TiffImagePlugin.READ_LIBTIFF = True
# Image.DEBUG = True
im = Image.open('Tests/images/12bit.cropped.tif')
im.load()
TiffImagePlugin.READ_LIBTIFF = False
# to make the target --
# convert 12bit.cropped.tif -depth 16 tmp.tif
# convert tmp.tif -evaluate RightShift 4 12in16bit2.tif
# imagemagick will auto scale so that a 12bit FFF is 16bit FFF0,
# so we need to unshift so that the integer values are the same.
im2 = Image.open('Tests/images/12in16bit.tif')
if Image.DEBUG:
print (im.getpixel((0, 0)))
print (im.getpixel((0, 1)))
print (im.getpixel((0, 2)))
print (im2.getpixel((0, 0)))
print (im2.getpixel((0, 1)))
print (im2.getpixel((0, 2)))
self.assert_image_equal(im, im2)
def test_blur(self):
# test case from irc, how to do blur on b/w image
# and save to compressed tif.
from PIL import ImageFilter
out = self.tempfile('temp.tif')
im = Image.open('Tests/images/pport_g4.tif')
im = im.convert('L')
im = im.filter(ImageFilter.GaussianBlur(4))
im.save(out, compression='tiff_adobe_deflate')
im2 = Image.open(out)
im2.load()
self.assert_image_equal(im, im2)
def test_compressions(self):
im = hopper('RGB')
out = self.tempfile('temp.tif')
for compression in ('packbits', 'tiff_lzw'):
im.save(out, compression=compression)
im2 = Image.open(out)
self.assert_image_equal(im, im2)
im.save(out, compression='jpeg')
im2 = Image.open(out)
self.assert_image_similar(im, im2, 30)
def test_cmyk_save(self):
im = hopper('CMYK')
out = self.tempfile('temp.tif')
im.save(out, compression='tiff_adobe_deflate')
im2 = Image.open(out)
self.assert_image_equal(im, im2)
def xtest_bw_compression_w_rgb(self):
""" This test passes, but when running all tests causes a failure due
to output on stderr from the error thrown by libtiff. We need to
capture that but not now"""
im = hopper('RGB')
out = self.tempfile('temp.tif')
self.assertRaises(
IOError, lambda: im.save(out, compression='tiff_ccitt'))
self.assertRaises(IOError, lambda: im.save(out, compression='group3'))
self.assertRaises(IOError, lambda: im.save(out, compression='group4'))
def test_fp_leak(self):
im = Image.open("Tests/images/hopper_g4_500.tif")
fn = im.fp.fileno()
os.fstat(fn)
im.load() # this should close it.
self.assertRaises(OSError, lambda: os.fstat(fn))
im = None # this should force even more closed.
self.assertRaises(OSError, lambda: os.fstat(fn))
self.assertRaises(OSError, lambda: os.close(fn))
def test_multipage(self):
# issue #862
TiffImagePlugin.READ_LIBTIFF = True
im = Image.open('Tests/images/multipage.tiff')
# file is a multipage tiff, 10x10 green, 10x10 red, 20x20 blue
im.seek(0)
self.assertEqual(im.size, (10, 10))
self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0))
self.assertTrue(im.tag.next)
im.seek(1)
self.assertEqual(im.size, (10, 10))
self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0))
self.assertTrue(im.tag.next)
im.seek(2)
self.assertFalse(im.tag.next)
self.assertEqual(im.size, (20, 20))
self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255))
TiffImagePlugin.READ_LIBTIFF = False
def test__next(self):
TiffImagePlugin.READ_LIBTIFF = True
im = Image.open('Tests/images/hopper.tif')
self.assertFalse(im.tag.next)
im.load()
self.assertFalse(im.tag.next)
def test_4bit(self):
# Arrange
test_file = "Tests/images/hopper_gray_4bpp.tif"
original = hopper("L")
# Act
TiffImagePlugin.READ_LIBTIFF = True
im = Image.open(test_file)
TiffImagePlugin.READ_LIBTIFF = False
# Assert
self.assertEqual(im.size, (128, 128))
self.assertEqual(im.mode, "L")
self.assert_image_similar(im, original, 7.3)
def test_save_bytesio(self):
# PR 1011
# Test TIFF saving to io.BytesIO() object.
TiffImagePlugin.WRITE_LIBTIFF = True
TiffImagePlugin.READ_LIBTIFF = True
# Generate test image
pilim = hopper()
def save_bytesio(compression=None):
buffer_io = io.BytesIO()
pilim.save(buffer_io, format="tiff", compression=compression)
buffer_io.seek(0)
pilim_load = Image.open(buffer_io)
self.assert_image_similar(pilim, pilim_load, 0)
# save_bytesio()
save_bytesio('raw')
save_bytesio("packbits")
save_bytesio("tiff_lzw")
TiffImagePlugin.WRITE_LIBTIFF = False
TiffImagePlugin.READ_LIBTIFF = False
if __name__ == '__main__':
unittest.main()
# End of file
| 2.28125 | 2 |
tests/test_pyspark_hb_app_processor.py | ChinmaySKulkarni/hb-data | 0 | 12797249 | <filename>tests/test_pyspark_hb_app_processor.py<gh_stars>0
import unittest
from unittest.mock import Mock
from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session
class TestAppProcessor(unittest.TestCase):
def test_parse_configs(self):
conf = parse_configs("test-conf.yml")
self.assertEqual('value1', conf['key1'])
self.assertEqual('value2', conf['key2'])
self.assertEqual(2, len(conf))
def test_get_or_generate_spark_session(self):
test_map = {'key1': 'val1', 'key2': 'val2'}
test_master_url = "TEST1"
test_app_name = "TEST_APP"
mock_spark_session_builder = Mock()
# we have to do this since we use the builder pattern when creating the SparkSession
attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder}
mock_spark_session_builder.configure_mock(**attrs)
get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name)
mock_spark_session_builder.master.assert_called_with(test_master_url)
mock_spark_session_builder.appName.assert_called_with(test_app_name)
self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count)
mock_spark_session_builder.getOrCreate.assert_called_once()
if __name__ == '__main__':
unittest.main()
| 2.59375 | 3 |
main.py | saber-khakbiz/DataScrapting-IR-Post | 0 | 12797250 | from stdiomask import getpass
from cowsay import daemon, ghostbusters, kitty
from check_validation_ID_Post import check_validation
from driver_chrome import *
from DataScrapting import *
tracking_ID = getpass("Enter Your Post Id(24 digit): ")
check_validation(tracking_ID)
URL = f"https://tracking.post.ir/?id={tracking_ID}&client=app"
driver = driverChomre(URL)
page_source = PageSource(driver)
soup = mining(page_source)
warning = soup.warning()
security = soup.security()
if warning == None and security == None :
dst_lst = soup.FindAll()
new_lst = [(i.text, j.text) for i,j in zip(
[i for i in dst_lst if not dst_lst.index(i)%2],
[i for i in dst_lst if dst_lst.index(i)%2]
)
]
new_lst.reverse()
print("\n*******************************************************************")
for i,dst in enumerate(new_lst):
print(f"\t\t\t{i+1}\n")
print(f"{dst[0]}\n")
print(f"{dst[1]}")
print("========================================================================")
elif warning != None :
ghostbusters(f"\n {warning.text}")
else:
daemon("از سمت شما ترافیک بالایی سمت سرویس های ما ارسال می شود! لطفا چند دقیقه دیگر امتحان کنید.") | 2.34375 | 2 |
Subsets and Splits