repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hakril/PythonForWindows | samples/process/thread.py | 1 | 1631 | import sys
import os.path
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
print("Creating a notepad") ## Replaced calc.exe by notepad.exe cause of windows 10.
notepad = windows.utils.create_process(r"C:\windows\system32\notepad.exe")
# You don't need to do that in our case, but it's useful to now
print("Priting threads")
for th in notepad.threads:
print(" * {0}".format(th))
print("Writing some code in memory")
if notepad.bitness == 32:
code = "mov eax, 0x42424242; label :start ; jmp :start; nop; nop; ret"
rawcode = x86.assemble(code)
else:
code = "mov rax, 0x4242424242424242; label :start ; jmp :start; nop; nop; ret"
rawcode = x64.assemble(code)
print("Allocating memory")
with notepad.allocated_memory(0x1000) as addr:
print("Writing code at <{0:#x}>".format(addr))
notepad.write_memory(addr, rawcode)
print("Creating thread on injected code")
t = notepad.create_thread(addr, 0x11223344)
print("New thread is {0}".format(t))
print("Suspending thread")
t.suspend()
ctx = t.context
print("Thread context is {0}".format(ctx))
print("Dumping thread context:")
ctx.dump()
print("Changing context")
ctx.pc += 2 # EIP / RIP
ctx.func_result = 0x12345678 # EAX / RAX
print("Setting new thread context")
t.set_context(ctx)
print("Resuming thread")
t.resume()
print("Waiting thread")
t.wait()
print("Thread has exit: {0}".format(t.is_exit))
print("Thread exit value = {0:#x}".format(t.exit_code))
| bsd-3-clause | -8,247,220,387,132,427,000 | 28.125 | 84 | 0.667075 | false |
start-jsk/jsk_apc | demos/grasp_data_generator/scripts/generate_evaluation_data.py | 1 | 3866 | import argparse
import cv2
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path as osp
import PIL.Image
import PIL.ImageDraw
import yaml
from chainercv.utils.mask.mask_to_bbox import mask_to_bbox
from grasp_data_generator.visualizations \
import vis_occluded_instance_segmentation
filepath = osp.dirname(osp.realpath(__file__))
dataset_dir = osp.join(filepath, '../data/evaluation_data')
yamlpath = osp.join(filepath, '../yaml/dualarm_grasping_label_names.yaml')
def main(datadir, visualize):
time = datetime.datetime.now()
timestamp = time.strftime('%Y%m%d_%H%M%S')
with open(yamlpath, 'r') as yaml_f:
label_names = yaml.load(yaml_f)[1:]
for scene_d in sorted(os.listdir(datadir)):
scenedir = osp.join(datadir, scene_d)
ins_imgs = []
label = []
for time_d in sorted(os.listdir(scenedir))[::-1]:
timedir = osp.join(scenedir, time_d)
savedir = osp.join(dataset_dir, timestamp, time_d)
if not osp.exists(savedir):
os.makedirs(savedir)
rgbpath = osp.join(timedir, 'masked_rgb.png')
annopath = osp.join(timedir, 'masked_rgb.json')
rgb = cv2.imread(rgbpath)[:, :, ::-1]
with open(annopath, 'r') as json_f:
data = json.load(json_f)
H, W = data['imageHeight'], data['imageWidth']
msk = np.zeros((H, W), dtype=np.uint8)
msk = PIL.Image.fromarray(msk)
draw = PIL.ImageDraw.Draw(msk)
shape = data['shapes'][0]
label_name = shape['label']
points = shape['points']
xy = [tuple(point) for point in points]
draw.polygon(xy=xy, outline=1, fill=1)
msk = np.array(msk, dtype=np.int32)
next_ins_imgs = []
next_label = []
for ins_id, (ins_img, lbl) in enumerate(zip(ins_imgs, label)):
occ_msk = np.logical_and(ins_img > 0, msk > 0)
ins_img[occ_msk] = 2
if not np.any(ins_img == 1):
print('{} is occluded and no more visible'
.format(label_names[lbl]))
else:
next_ins_imgs.append(ins_img)
next_label.append(lbl)
ins_imgs = next_ins_imgs
label = next_label
ins_imgs.append(msk[None])
lbl = label_names.index(label_name)
label.append(lbl)
if visualize:
vis_rgb = rgb.transpose((2, 0, 1))
vis_ins_imgs = np.concatenate(
ins_imgs, axis=0).astype(np.int32)
bbox = mask_to_bbox(vis_ins_imgs > 0)
vis_occluded_instance_segmentation(
vis_rgb, vis_ins_imgs, label, bbox,
label_names=label_names)
plt.show()
rgb_savepath = osp.join(savedir, 'rgb.png')
ins_imgs_savepath = osp.join(savedir, 'ins_imgs.npz')
label_savepath = osp.join(savedir, 'labels.yaml')
cv2.imwrite(rgb_savepath, rgb)
np.savez_compressed(
ins_imgs_savepath,
ins_imgs=np.concatenate(ins_imgs, axis=0).astype(np.int32))
np.savez_compressed
with open(label_savepath, 'w+') as yaml_save_f:
yaml_save_f.write(yaml.dump(label))
with open(osp.join(dataset_dir, timestamp, 'label_names.yaml'), 'w+') as f:
f.write(yaml.dump(label_names))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--visualize', '-v', action='store_true')
parser.add_argument('--data-dir', '-d')
args = parser.parse_args()
datadir = osp.join(filepath, args.data_dir)
main(datadir, args.visualize)
| bsd-3-clause | 2,756,688,793,698,856,400 | 34.46789 | 79 | 0.55432 | false |
scanny/python-pptx | pptx/oxml/chart/shared.py | 1 | 6097 | # encoding: utf-8
"""Shared oxml objects for charts."""
from __future__ import absolute_import, division, print_function, unicode_literals
from pptx.oxml import parse_xml
from pptx.oxml.ns import nsdecls
from pptx.oxml.simpletypes import (
ST_LayoutMode,
XsdBoolean,
XsdDouble,
XsdString,
XsdUnsignedInt,
)
from pptx.oxml.xmlchemy import (
BaseOxmlElement,
OptionalAttribute,
RequiredAttribute,
ZeroOrOne,
)
class CT_Boolean(BaseOxmlElement):
"""
Common complex type used for elements having a True/False value.
"""
val = OptionalAttribute("val", XsdBoolean, default=True)
class CT_Boolean_Explicit(BaseOxmlElement):
"""Always spells out the `val` attribute, e.g. `val=1`.
At least one boolean element is improperly interpreted by one or more
versions of PowerPoint. The `c:overlay` element is interpreted as |False|
when no `val` attribute is present, contrary to the behavior described in
the schema. A remedy for this is to interpret a missing `val` attribute
as |True| (consistent with the spec), but always write the attribute
whenever there is occasion for changing the element.
"""
_val = OptionalAttribute("val", XsdBoolean, default=True)
@property
def val(self):
return self._val
@val.setter
def val(self, value):
val_str = "1" if bool(value) is True else "0"
self.set("val", val_str)
class CT_Double(BaseOxmlElement):
"""
Used for floating point values.
"""
val = RequiredAttribute("val", XsdDouble)
class CT_Layout(BaseOxmlElement):
"""
``<c:layout>`` custom element class
"""
manualLayout = ZeroOrOne("c:manualLayout", successors=("c:extLst",))
@property
def horz_offset(self):
"""
The float value in ./c:manualLayout/c:x when
c:layout/c:manualLayout/c:xMode@val == "factor". 0.0 if that XPath
expression finds no match.
"""
manualLayout = self.manualLayout
if manualLayout is None:
return 0.0
return manualLayout.horz_offset
@horz_offset.setter
def horz_offset(self, offset):
"""
Set the value of ./c:manualLayout/c:x@val to *offset* and
./c:manualLayout/c:xMode@val to "factor". Remove ./c:manualLayout if
*offset* == 0.
"""
if offset == 0.0:
self._remove_manualLayout()
return
manualLayout = self.get_or_add_manualLayout()
manualLayout.horz_offset = offset
class CT_LayoutMode(BaseOxmlElement):
"""
Used for ``<c:xMode>``, ``<c:yMode>``, ``<c:wMode>``, and ``<c:hMode>``
child elements of CT_ManualLayout.
"""
val = OptionalAttribute("val", ST_LayoutMode, default=ST_LayoutMode.FACTOR)
class CT_ManualLayout(BaseOxmlElement):
"""
``<c:manualLayout>`` custom element class
"""
_tag_seq = (
"c:layoutTarget",
"c:xMode",
"c:yMode",
"c:wMode",
"c:hMode",
"c:x",
"c:y",
"c:w",
"c:h",
"c:extLst",
)
xMode = ZeroOrOne("c:xMode", successors=_tag_seq[2:])
x = ZeroOrOne("c:x", successors=_tag_seq[6:])
del _tag_seq
@property
def horz_offset(self):
"""
The float value in ./c:x@val when ./c:xMode@val == "factor". 0.0 when
./c:x is not present or ./c:xMode@val != "factor".
"""
x, xMode = self.x, self.xMode
if x is None or xMode is None or xMode.val != ST_LayoutMode.FACTOR:
return 0.0
return x.val
@horz_offset.setter
def horz_offset(self, offset):
"""
Set the value of ./c:x@val to *offset* and ./c:xMode@val to "factor".
"""
self.get_or_add_xMode().val = ST_LayoutMode.FACTOR
self.get_or_add_x().val = offset
class CT_NumFmt(BaseOxmlElement):
"""
``<c:numFmt>`` element specifying the formatting for number labels on a
tick mark or data point.
"""
formatCode = RequiredAttribute("formatCode", XsdString)
sourceLinked = OptionalAttribute("sourceLinked", XsdBoolean)
class CT_Title(BaseOxmlElement):
"""`c:title` custom element class."""
_tag_seq = ("c:tx", "c:layout", "c:overlay", "c:spPr", "c:txPr", "c:extLst")
tx = ZeroOrOne("c:tx", successors=_tag_seq[1:])
spPr = ZeroOrOne("c:spPr", successors=_tag_seq[4:])
del _tag_seq
def get_or_add_tx_rich(self):
"""Return `c:tx/c:rich`, newly created if not present.
Return the `c:rich` grandchild at `c:tx/c:rich`. Both the `c:tx` and
`c:rich` elements are created if not already present. Any
`c:tx/c:strRef` element is removed. (Such an element would contain
a cell reference for the axis title text in the chart's Excel
worksheet.)
"""
tx = self.get_or_add_tx()
tx._remove_strRef()
return tx.get_or_add_rich()
@property
def tx_rich(self):
"""Return `c:tx/c:rich` or |None| if not present."""
richs = self.xpath("c:tx/c:rich")
if not richs:
return None
return richs[0]
@staticmethod
def new_title():
"""Return "loose" `c:title` element containing default children."""
return parse_xml(
"<c:title %s>"
" <c:layout/>"
' <c:overlay val="0"/>'
"</c:title>" % nsdecls("c")
)
class CT_Tx(BaseOxmlElement):
"""
``<c:tx>`` element containing the text for a label on a data point or
other chart item.
"""
strRef = ZeroOrOne("c:strRef")
rich = ZeroOrOne("c:rich")
def _new_rich(self):
return parse_xml(
"<c:rich %s>"
" <a:bodyPr/>"
" <a:lstStyle/>"
" <a:p>"
" <a:pPr>"
" <a:defRPr/>"
" </a:pPr>"
" </a:p>"
"</c:rich>" % nsdecls("c", "a")
)
class CT_UnsignedInt(BaseOxmlElement):
"""
``<c:idx>`` element and others.
"""
val = RequiredAttribute("val", XsdUnsignedInt)
| mit | -5,779,250,776,741,064,000 | 26.21875 | 82 | 0.579957 | false |
quantopian/zipline | zipline/data/bundles/csvdir.py | 1 | 8039 | """
Module for building a complete dataset from local directory with csv files.
"""
import os
import sys
from logbook import Logger, StreamHandler
from numpy import empty
from pandas import DataFrame, read_csv, Index, Timedelta, NaT
from trading_calendars import register_calendar_alias
from zipline.utils.cli import maybe_show_progress
from . import core as bundles
handler = StreamHandler(sys.stdout, format_string=" | {record.message}")
logger = Logger(__name__)
logger.handlers.append(handler)
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest
class CSVDIRBundle:
"""
Wrapper class to call csvdir_bundle with provided
list of time frames and a path to the csvdir directory
"""
def __init__(self, tframes=None, csvdir=None):
self.tframes = tframes
self.csvdir = csvdir
def ingest(self,
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
self.tframes,
self.csvdir)
@bundles.register("csvdir")
def csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None):
"""
Build a zipline data bundle from the directory with csv files.
"""
if not csvdir:
csvdir = environ.get('CSVDIR')
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError("'daily' and 'minute' directories "
"not found in '%s'" % csvdir)
divs_splits = {'divs': DataFrame(columns=['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date']),
'splits': DataFrame(columns=['sid', 'ratio',
'effective_date'])}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(item.split('.csv')[0]
for item in os.listdir(ddir)
if '.csv' in item)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == 'minute':
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(_pricing_iter(ddir, symbols, metadata,
divs_splits, show_progress),
show_progress=show_progress)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
metadata['exchange'] = "CSVDIR"
asset_db_writer.write(equities=metadata)
divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int)
divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int)
adjustment_writer.write(splits=divs_splits['splits'],
dividends=divs_splits['divs'])
def _pricing_iter(csvdir, symbols, metadata, divs_splits, show_progress):
with maybe_show_progress(symbols, show_progress,
label='Loading custom pricing data: ') as it:
files = os.listdir(csvdir)
for sid, symbol in enumerate(it):
logger.debug('%s: sid %s' % (symbol, sid))
try:
fname = [fname for fname in files
if '%s.csv' % symbol in fname][0]
except IndexError:
raise ValueError("%s.csv file is not in %s" % (symbol, csvdir))
dfr = read_csv(os.path.join(csvdir, fname),
parse_dates=[0],
infer_datetime_format=True,
index_col=0).sort_index()
start_date = dfr.index[0]
end_date = dfr.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + Timedelta(days=1)
metadata.iloc[sid] = start_date, end_date, ac_date, symbol
if 'split' in dfr.columns:
tmp = 1. / dfr[dfr['split'] != 1.0]['split']
split = DataFrame(data=tmp.index.tolist(),
columns=['effective_date'])
split['ratio'] = tmp.tolist()
split['sid'] = sid
splits = divs_splits['splits']
index = Index(range(splits.shape[0],
splits.shape[0] + split.shape[0]))
split.set_index(index, inplace=True)
divs_splits['splits'] = splits.append(split)
if 'dividend' in dfr.columns:
# ex_date amount sid record_date declared_date pay_date
tmp = dfr[dfr['dividend'] != 0.0]['dividend']
div = DataFrame(data=tmp.index.tolist(), columns=['ex_date'])
div['record_date'] = NaT
div['declared_date'] = NaT
div['pay_date'] = NaT
div['amount'] = tmp.tolist()
div['sid'] = sid
divs = divs_splits['divs']
ind = Index(range(divs.shape[0], divs.shape[0] + div.shape[0]))
div.set_index(ind, inplace=True)
divs_splits['divs'] = divs.append(div)
yield sid, dfr
register_calendar_alias("CSVDIR", "NYSE")
| apache-2.0 | -5,625,354,991,096,741,000 | 34.414097 | 79 | 0.524941 | false |
CLandauGWU/group_e | supp_funcs.py | 1 | 9214 | def zoneConcentration(shp_gdf, raw, pntLst, bufr=None):
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
import geopandas as gpd
pnt = pntLst[0]
pnt_isCalled = pntLst[1]
for url in pnt:
if url[-3:] == 'zip':
pnt = url
assert isinstance(pnt, str) #Must extract a zipfile from pnt!
#Convenience assignment of projection type
crs='EPSG:4326'
#Extract and read points into memory
pnt = down_extract_zip(pnt)
ftr = gpd.read_file(pnt, crs=crs)
#Flag properties within distance "bufr" of featured locations
if not bufr:
bufr = 1/250 #Hard to say what a good buffer is.
assert isinstance(bufr, float) #buffer must be float!
#Frame up the buffer shapes
ftr.geometry = ftr.geometry.buffer(bufr)
ftr['flag'] = 1
if 'NAME' in ftr:
ftr.drop(['NAME'], axis=1, inplace=True)
#Frame up the raw address points data
pointy = raw[['NAME', 'Points', 'dummy_counter']]
pointy = gpd.GeoDataFrame(pointy, crs=ftr.crs,
geometry=pointy.Points)
pointy = gpd.sjoin(pointy, ftr,
how='left', op='intersects')
denom = pointy.groupby('NAME').sum()
denom = denom.dummy_counter
numer = pointy.groupby('NAME').sum()
numer = numer.flag
pct_ftr_coverage = pd.DataFrame(numer/denom)
pct_ftr_coverage.columns = [
pnt_isCalled
]
pct_ftr_coverage.fillna(0, inplace=True)
pct_ftr_coverage.crs = pointy.crs
shp_gdf = shp_gdf.merge(pct_ftr_coverage,
how="left", left_on='NAME', right_index=True)
del pct_ftr_coverage, raw, pointy, denom, numer
return shp_gdf
del shp_gdf
def pointInZone(shp_gdf, raw, zoneLst):
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
import geopandas as gpd
zone = zoneLst[0]
zone_isCalled = zoneLst[1]
for url in zone:
if url[-3:] == 'zip':
zone = url
assert isinstance(zone, str) #Must extract a zipfile from pnt!
#Convenience assignment of projection type
crs='EPSG:4326'
#Extract and read points into memory
zone = down_extract_zip(zone)
zone = gpd.read_file(zone, crs=crs)
zone['flag'] = 1
if 'NAME' in zone:
zone.drop(['NAME'], axis=1, inplace=True)
#Frame up the raw address points data
pointy = raw[['NAME', 'Points', 'dummy_counter']]
pointy = gpd.GeoDataFrame(pointy, crs=zone.crs,
geometry=pointy.Points)
pointy = gpd.sjoin(pointy, zone,
how='left', op='intersects')
numer = pointy.groupby('NAME').sum()
numer = numer.flag
inzone = pointy.groupby('NAME').sum()
inzone = inzone.dummy_counter #This was calling denom.dummy_counter which is undeclared
flaginzone = pd.DataFrame(inzone)
flaginzone.columns = [
zone_isCalled
]
flaginzone.fillna(0, inplace=True)
flaginzone.crs = pointy.crs
shp_gdf = shp_gdf.merge(flaginzone,
how="left", left_on='NAME', right_index=True)
del flaginzone, pointy, inzone, numer, raw
return shp_gdf
del shp_gdf
def oecdGdpQs(shp_gdf, raw, url, i=None):
#This extracts U.S. GDP on a quarterly
#basis to the correct time unit of analysis
import numpy as np
import pandas as pd
import geopandas as gpd
if not 'Q_GDP' in shp_gdf.columns:
shp_gdf['Q_GDP'] = 0
Qbins = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
yr = round(i)
q = round((i-yr)*100)
assert q < 14
for ij in range(0, 4):
if q in Qbins[ij]:
q = 'Q'+ str(ij+1)
df = pd.read_csv(url[0], encoding='utf-8')
df = df[df.LOCATION == 'USA']
df[['q', 'yr']]= df.Time.str.split('-', expand=True)
df['q'] = df['q'].astype(str)
df['yr'] = df['yr'].astype(int)
df = df[(df.q == q)]
df = df[(df.yr == yr)]
i_gdp = list(df['Value'])
i_gdp = i_gdp[0]
shp_gdf['Q_GDP'][shp_gdf['month']==i] = i_gdp
return shp_gdf
del shp_gdf
def metro_prox(shp_gdf, raw, bufr=None):
#Flag properties within distance "bufr" of metro stations
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
import geopandas as gpd
if not bufr:
bufr = 1/250 #Hard to say what a good buffer is.
assert isinstance(bufr, float) #buffer must be float!
#Frame up the metro buffer shapes
metro = down_extract_zip(
'https://opendata.arcgis.com/datasets/54018b7f06b943f2af278bbe415df1de_52.zip'
)
metro = gpd.read_file(metro, crs=shp_gdf.crs)
metro.geometry = metro.geometry.buffer(bufr)
metro['bymet'] = 1
metro.drop(['NAME'], axis=1, inplace=True)
#Frame up the raw address points data
pointy = raw[['NAME', 'Points', 'dummy_counter']]
pointy = gpd.GeoDataFrame(pointy, crs=metro.crs,
geometry=pointy.Points)
pointy = gpd.sjoin(pointy, metro,
how='left', op='intersects')
denom = pointy.groupby('NAME').sum()
denom = denom.dummy_counter
numer = pointy.groupby('NAME').sum()
numer = numer.bymet
pct_metro_coverage = pd.DataFrame(numer/denom)
pct_metro_coverage.columns = [
'pct_metro_coverage'
]
pct_metro_coverage.fillna(0, inplace=True)
pct_metro_coverage.crs = pointy.crs
shp_gdf = shp_gdf.merge(pct_metro_coverage,
how="left", left_on='NAME', right_index=True)
return shp_gdf
def clim_ingest(shp_gdf, raw, filepath, i=None):
#Adds monthly average, max and min temp, from National Airport
import numpy as np
import pandas as pd
import geopandas as gpd
#NOAA NCDC data mining is not worth implementing in this workflow
#Pull the data from disk
df = pd.read_csv(filepath)
#Only want National Airport
df = df[df.NAME == 'WASHINGTON REAGAN NATIONAL AIRPORT, VA US']
#Express the dates as datetime objects
df.DATE = pd.to_datetime(df.DATE)
yr = round(i)
month = round((i-yr)*100)
#Narrow it down to just the one row that matches "i"
df = df[df.DATE.dt.year == yr]
df = df[df.DATE.dt.month == month]
assert df.shape[0] == 1 #Only one row should match "i"
for tag in ['TAVG', 'TMAX', 'TMIN']: #iterate thru values we want
#Establishes the column if needed
if not tag in shp_gdf.columns:
shp_gdf[tag] = 0
#Extract the value of df[tag]
val = list(df[tag])
val = val[0]
#Assign the extracted value to all shp_gdf[tag] rows where 'month' is t-i
shp_gdf[tag][shp_gdf['month']==i] = val
return shp_gdf
del shp_gdf
def ITSPExtract(shp_gdf, raw, i=None):
"""Read in tax extract data, pare it down to month i,
spatial join on the shape geodataframe shp_gdf. Return shp_gdf.
"""
from downloading_funcs import addr_shape, down_extract_zip
import pandas as pd
from shapely.geometry import Point, Polygon
import geopandas as gpd
crs='EPSG:4326'
df = pd.read_csv('./data/Integrated_Tax_System_Public_Extract.csv')
df.SALEDATE = pd.to_datetime(df.SALEDATE)
yr = round(i)
month = round((i-yr)*100)
#Narrow it down to just the one row that matches "i"
df = df[df.SALEDATE.dt.year == yr]
df = df[df.SALEDATE.dt.month == month]
df = df.sort_values(['SALEDATE'])
df = df.reset_index(drop=True)
#ITSPE has no geospatial data, so we need to merge on addresspoints.
adr_df = pd.read_csv('./data/Address_Points.csv')
#Regex to clean off the regime codes and any other NaN.
adr_df['SSL'] = adr_df['SSL'].str.replace(r'\D+', '')
df['SSL'] = df['SSL'].str.replace(r'\D+', '')
adr_df = pd.merge(adr_df, df, how='inner', on=['SSL', 'SSL'], suffixes=['', '_tax'])
del df
adr_df['geometry'] = [
Point(xy) for xy in zip(
adr_df.LONGITUDE.apply(float), adr_df.LATITUDE.apply(float)
)
]
adr_df = gpd.GeoDataFrame(adr_df, crs=shp_gdf.crs, geometry=adr_df.geometry)
adr_df = adr_df.dropna(subset=['SALEPRICE'])
pointy = gpd.sjoin(shp_gdf, adr_df,
how='left', op='intersects')
pointy = pointy.dropna(subset=['SALEPRICE'])
sales = pointy.groupby('NAME').sum()
sales = sales.SALEPRICE
sales.columns = ['realPropertySaleVolume'
]
sales = pd.DataFrame(sales)
shp_gdf = shp_gdf.merge(sales,
how="left", left_on='NAME', right_index=True)
del sales, raw, pointy
return shp_gdf
del adr_df, shp_gdf | mit | 5,698,252,796,958,923,000 | 28.822006 | 91 | 0.572173 | false |
timole/sopernovus | dev/prod2csv.py | 1 | 7254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re, sys, json
from pymongo import MongoClient
a = None
def parseColumnNames(f):
line = f.readline()
return line.split(',')
inputFilename = sys.argv[1]
inputFilenameCron = sys.argv[2]
outputfilename = sys.argv[3]
outputfilenameIds = sys.argv[4]
outputfilenameMunicipalityIds = sys.argv[5]
client = MongoClient('localhost', 27017)
db = client['lupapiste']
applications = db.applications
apps = {}
i = 0
for application in applications.find():
appId = application["_id"]
appFields = {}
if "primaryOperation" in application.keys():
if application["primaryOperation"] is not None and "name" in application["primaryOperation"].keys():
op = application["primaryOperation"]["name"]
else:
op = ""
appFields["primaryOperation"] = op
apps[appId] = appFields
if i % 1000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
i = i + 1
f = open(inputFilename, "r")
fcron = open(inputFilenameCron, "r")
out = open(outputfilename, "w")
outIds = open(outputfilenameIds, "w")
outMunicipalityIds = open(outputfilenameMunicipalityIds, "w")
columnNames = parseColumnNames(f)
print("Column names")
i = 0
for col in columnNames:
print `i` + ": " + col
i = i + 1
out.write("datetime;applicationId;operation;municipalityId;userId;role;action;target\n")
ids = {}
idSeq = 100000
municipalityIds = {}
municipalityIdSeq = 1000
userIds = {}
userIdSeq = 100000
parsed = 0
errors = 0
for line in f:
fields = line.split(',')
datetime = re.match("\"(.*) .*", fields[1]).group(1)
# print "ts: " + datetime
rawMatch = re.match(".*? - (.*)\"", line)
js = rawMatch.group(1).replace("\"\"", "\"")
try:
data = json.loads(js)
except ValueError:
errors = errors + 1
#sys.stdout.write('E')
#print("Error parsing json")
continue
if data["type"] == "command":
# print(data)
action = data["action"]
if action == "login" or action == "register-user" or action == "update-user" or action == "update-user-organization" or action == "reset-password" or action == "users-for-datatables" or action == "impersonate-authority" or action == "frontend-error" or action == "browser-timing":
continue
# if not id in data["data"].keys():
# continue
id = ""
role = ""
userId = ""
try:
if action == "create-application":
id = ""
role = data["user"]["role"]
userId = data["user"]["id"]
else:
if action == "neighbor-response":
# print(data)
id = data["data"]["applicationId"]
role = "neighbor"
userId = data["data"]["neighborId"]
else:
userId = data["user"]["id"]
role = data["user"]["role"]
id = data["data"]["id"]
except:
#sys.stdout.write('i')
errors = errors + 1
#print("No id for " + data["action"])
target = ""
try:
if action == "update-doc":
target = data["data"]["updates"][0][0]
if action == "upload-attachment":
target = data["data"]["attachmentType"]["type-id"]
if action == "mark-seen":
target = data["data"]["type"]
if action == "approve-doc":
target = data["data"]["path"]
if action == "add-comment":
target = data["data"]["target"]["type"]
if action == "create-doc":
target = data["data"]["schemaName"]
if action == "invite-with-role":
target = data["data"]["role"]
except:
#sys.stdout.write('t')
target = ""
errors = errors + 1
if id != "":
if not id in ids.keys():
ids[id] = str(idSeq)
idSeq = idSeq + 1
pubId = ids[id]
else:
pubId = ""
pubMunicipalityId = ""
municipalityId = ""
if id != "":
if id is not None and len(id.split('-')) == 4:
municipalityId = id.split('-')[1]
if not municipalityId in municipalityIds.keys():
municipalityIds[municipalityId] = str(municipalityIdSeq)
municipalityIdSeq = municipalityIdSeq + 1
pubMunicipalityId = municipalityIds[municipalityId]
if not userId in userIds.keys():
userIds[userId] = str(userIdSeq)
userIdSeq = userIdSeq + 1
pubUserId = userIds[userId]
op = ""
if id in apps.keys():
app = apps[id]
op = app["primaryOperation"]
l = datetime + ";" + pubId + ";" + op + ";" + pubMunicipalityId + ";" + pubUserId + ";" + role + ";" + action + ";" + target + "\n"
# print(l)
out.write(l)
parsed = parsed + 1
if parsed % 1000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
columnNames = parseColumnNames(fcron)
for line in fcron:
fields = line.split(',')
datetime = re.match("\"(.*) .*", fields[1]).group(1)
# print "ts: " + datetime
raw = fields[7]
rawMatch = re.match(".*?\[(LP.*?)\].*", raw)
id = rawMatch.group(1)
jsMatch = re.match(".*? - (.*)\"", line)
js = jsMatch.group(1).replace("\"\"", "\"")
try:
data = json.loads(js)
except ValueError:
errors = errors + 1
#sys.stdout.write('E')
#print("Error parsing json")
continue
if data["event"] == "Found new verdict":
if id != "":
if not id in ids.keys():
ids[id] = str(idSeq)
idSeq = idSeq + 1
pubId = ids[id]
else:
pubId = ""
op = ""
if id in apps.keys():
app = apps[id]
op = app["primaryOperation"]
l = datetime + ";" + pubId + ";" + op + ";" + pubMunicipalityId + ";" + pubUserId + ";" + role + ";" + action + ";" + target + "\n"
# print(l)
out.write(l)
# else:
#errors = errors + 1
#sys.stdout.write('N')
parsed = parsed + 1
if parsed % 10000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
outIds.write("applicationId;originalApplicationId\n")
for idKey in ids.keys():
id = ids[idKey]
if id is None or idKey is None:
print "Error: None:"
print("id")
print(id)
print("idKey")
print(idKey)
else:
outIds.write(id + ";" + idKey + "\n")
outMunicipalityIds.write("municipalityId;originalMunicipalityId\n")
for idKey in municipalityIds.keys():
id = municipalityIds[idKey]
if id is None or idKey is None:
print "Error: None:"
print("id")
print(id)
print("idKey")
print(idKey)
else:
outMunicipalityIds.write(id + ";" + idKey + "\n")
outMunicipalityIds.close()
outIds.close()
out.close()
print
print "Errors: " + str(errors)
print "Parsed: " + str(parsed)
| mit | 6,015,316,454,777,002,000 | 26.793103 | 288 | 0.514613 | false |
StackVista/sts-agent-integrations-core | varnish/test_varnish.py | 1 | 8304 | # stdlib
import os
import re
import subprocess
import mock
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
# 3p
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
# project
from tests.checks.common import AgentCheckTest, Fixtures
# This is a small extract of metrics from varnish. This is meant to test that
# the check gather metrics. This the check return everything from varnish
# without any selection/rename, their is no point in having a complete list.
COMMON_METRICS = [
'varnish.uptime', # metrics where the 'MAIN' prefix was removed
'varnish.sess_conn', # metrics where the 'MAIN' prefix was removed
'varnish.sess_drop', # metrics where the 'MAIN' prefix was removed
'varnish.sess_fail', # metrics where the 'MAIN' prefix was removed
'varnish.client_req_400', # metrics where the 'MAIN' prefix was removed
'varnish.client_req_417', # metrics where the 'MAIN' prefix was removed
'varnish.client_req', # metrics where the 'MAIN' prefix was removed
'varnish.cache_hit', # metrics where the 'MAIN' prefix was removed
'varnish.cache_hitpass', # metrics where the 'MAIN' prefix was removed
'varnish.cache_miss', # metrics where the 'MAIN' prefix was removed
'varnish.backend_conn', # metrics where the 'MAIN' prefix was removed
'varnish.backend_unhealthy', # metrics where the 'MAIN' prefix was removed
'varnish.backend_busy', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_eof', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_bad', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_none', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_1xx', # metrics where the 'MAIN' prefix was removed
'varnish.pools', # metrics where the 'MAIN' prefix was removed
'varnish.busy_sleep', # metrics where the 'MAIN' prefix was removed
'varnish.busy_wakeup', # metrics where the 'MAIN' prefix was removed
'varnish.busy_killed', # metrics where the 'MAIN' prefix was removed
'varnish.sess_queued', # metrics where the 'MAIN' prefix was removed
'varnish.sess_dropped', # metrics where the 'MAIN' prefix was removed
'varnish.n_object', # metrics where the 'MAIN' prefix was removed
'varnish.n_vampireobject', # metrics where the 'MAIN' prefix was removed
'varnish.n_vcl', # metrics where the 'MAIN' prefix was removed
'varnish.n_vcl_avail', # metrics where the 'MAIN' prefix was removed
'varnish.n_vcl_discard', # metrics where the 'MAIN' prefix was removed
'varnish.bans', # metrics where the 'MAIN' prefix was removed
'varnish.bans_completed', # metrics where the 'MAIN' prefix was removed
'varnish.bans_obj', # metrics where the 'MAIN' prefix was removed
'varnish.bans_req', # metrics where the 'MAIN' prefix was removed
'varnish.MGT.child_start',
'varnish.MGT.child_exit',
'varnish.MGT.child_stop',
'varnish.MEMPOOL.busyobj.live',
'varnish.MEMPOOL.busyobj.pool',
'varnish.MEMPOOL.busyobj.allocs',
'varnish.MEMPOOL.busyobj.frees',
'varnish.SMA.s0.c_req',
'varnish.SMA.s0.c_fail',
'varnish.SMA.Transient.c_req',
'varnish.SMA.Transient.c_fail',
'varnish.VBE.boot.default.req',
'varnish.LCK.backend.creat',
'varnish.LCK.backend_tcp.creat',
'varnish.LCK.ban.creat',
'varnish.LCK.ban.locks',
'varnish.LCK.busyobj.creat',
'varnish.LCK.mempool.creat',
'varnish.LCK.vbe.creat',
'varnish.LCK.vbe.destroy',
'varnish.LCK.vcl.creat',
'varnish.LCK.vcl.destroy',
'varnish.LCK.vcl.locks',
]
VARNISH_DEFAULT_VERSION = "4.1.7"
VARNISHADM_PATH = "varnishadm"
SECRETFILE_PATH = "secretfile"
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
def debug_health_mock(*args, **kwargs):
if args[0][0] == VARNISHADM_PATH or args[0][1] == VARNISHADM_PATH:
return (Fixtures.read_file('debug_health_output', sdk_dir=FIXTURE_DIR), "", 0)
else:
return (Fixtures.read_file('stats_output', sdk_dir=FIXTURE_DIR), "", 0)
@attr(requires='varnish')
class VarnishCheckTest(AgentCheckTest):
CHECK_NAME = 'varnish'
def _get_varnish_stat_path(self):
varnish_version = os.environ.get('FLAVOR_VERSION', VARNISH_DEFAULT_VERSION).split('.', 1)[0]
return "%s/ci/varnishstat%s" % (os.path.dirname(os.path.abspath(__file__)), varnish_version)
def _get_config_by_version(self, name=None):
config = {
'instances': [{
'varnishstat': self._get_varnish_stat_path(),
'tags': ['cluster:webs']
}]
}
if name:
config['instances'][0]['name'] = name
return config
def test_check(self):
config = self._get_config_by_version()
self.run_check_twice(config)
for mname in COMMON_METRICS:
self.assertMetric(mname, count=1, tags=['cluster:webs', 'varnish_name:default'])
def test_inclusion_filter(self):
config = self._get_config_by_version()
config['instances'][0]['metrics_filter'] = ['SMA.*']
self.run_check_twice(config)
for mname in COMMON_METRICS:
if 'SMA.' in mname:
self.assertMetric(mname, count=1, tags=['cluster:webs', 'varnish_name:default'])
else:
self.assertMetric(mname, count=0, tags=['cluster:webs', 'varnish_name:default'])
def test_exclusion_filter(self):
# FIXME: Bugfix not released yet for version 5 so skip this test for this version:
# See https://github.com/varnishcache/varnish-cache/issues/2320
config = self._get_config_by_version()
config['instances'][0]['metrics_filter'] = ['^SMA.Transient.c_req']
self.load_check(config)
version, _ = self.check._get_version_info([self._get_varnish_stat_path()])
if str(version) == '5.0.0':
raise SkipTest('varnish bugfix for exclusion blob not released yet for version 5 so skip this test')
self.run_check_twice(config)
for mname in COMMON_METRICS:
if 'SMA.Transient.c_req' in mname:
self.assertMetric(mname, count=0, tags=['cluster:webs', 'varnish_name:default'])
elif 'varnish.uptime' not in mname:
self.assertMetric(mname, count=1, tags=['cluster:webs', 'varnish_name:default'])
@mock.patch('_varnish.geteuid')
@mock.patch('_varnish.Varnish._get_version_info')
@mock.patch('_varnish.get_subprocess_output', side_effect=debug_health_mock)
def test_command_line(self, mock_subprocess, mock_version, mock_geteuid):
mock_version.return_value = LooseVersion('4.0.0'), True
mock_geteuid.return_value = 0
config = self._get_config_by_version()
config['instances'][0]['varnishadm'] = VARNISHADM_PATH
config['instances'][0]['secretfile'] = SECRETFILE_PATH
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], [VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'debug.health'])
self.assertServiceCheckOK("varnish.backend_healthy", tags=['backend:default'], count=1)
mock_version.return_value = LooseVersion('4.1.0'), True
mock_geteuid.return_value = 1
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], ['sudo', VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'backend.list', '-p'])
# This the docker image is in a different repository, we check that the
# verison requested in the FLAVOR_VERSION is the on running inside the
# container.
def test_version(self):
varnishstat = self._get_varnish_stat_path()
output = subprocess.check_output([varnishstat, "-V"])
res = re.search(r"varnish-(\d+\.\d\.\d)", output)
if res is None:
raise Exception("Could not retrieve varnish version from docker")
version = res.groups()[0]
self.assertEquals(version, os.environ.get('FLAVOR_VERSION', VARNISH_DEFAULT_VERSION))
| bsd-3-clause | -5,991,643,320,303,564,000 | 45.915254 | 112 | 0.639692 | false |
Azure/azure-sdk-for-python | sdk/conftest.py | 1 | 2456 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import os
import pytest
def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"markers", "live_test_only: mark test to be a live test only"
)
config.addinivalue_line(
"markers", "playback_test_only: mark test to be a playback test only"
)
def pytest_runtest_setup(item):
is_live_only_test_marked = bool([mark for mark in item.iter_markers(name="live_test_only")])
if is_live_only_test_marked:
from devtools_testutils import is_live
if not is_live():
pytest.skip("live test only")
is_playback_test_marked = bool([mark for mark in item.iter_markers(name="playback_test_only")])
if is_playback_test_marked:
from devtools_testutils import is_live
if is_live() and os.environ.get('AZURE_SKIP_LIVE_RECORDING', '').lower() == 'true':
pytest.skip("playback test only")
try:
from azure_devtools.scenario_tests import AbstractPreparer
@pytest.fixture(scope='session', autouse=True)
def clean_cached_resources():
yield
AbstractPreparer._perform_pending_deletes()
except ImportError:
pass | mit | 941,394,154,836,049,400 | 41.362069 | 99 | 0.67671 | false |
brainiak/brainiak | setup.py | 1 | 5433 | from distutils import sysconfig
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
import os
import site
import sys
import setuptools
from copy import deepcopy
assert sys.version_info >= (3, 5), (
"Please use Python version 3.5 or higher, "
"lower versions are not supported"
)
# https://github.com/pypa/pip/issues/7953#issuecomment-645133255
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
ext_modules = [
Extension(
'brainiak.factoranalysis.tfa_extension',
['brainiak/factoranalysis/tfa_extension.cpp'],
),
Extension(
'brainiak.fcma.fcma_extension',
['brainiak/fcma/src/fcma_extension.cc'],
),
Extension(
'brainiak.fcma.cython_blas',
['brainiak/fcma/cython_blas.pyx'],
),
Extension(
'brainiak.eventseg._utils',
['brainiak/eventseg/_utils.pyx'],
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'unix': ['-g0', '-fopenmp'],
}
# FIXME Workaround for using the Intel compiler by setting the CC env var
# Other uses of ICC (e.g., cc binary linked to icc) are not supported
if (('CC' in os.environ and 'icc' in os.environ['CC'])
or (sysconfig.get_config_var('CC') and 'icc' in sysconfig.get_config_var('CC'))):
c_opts['unix'] += ['-lirc', '-lintlc']
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.9',
'-ftemplate-depth-1024']
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' %
self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = deepcopy(opts)
ext.extra_link_args = deepcopy(opts)
lang = ext.language or self.compiler.detect_language(ext.sources)
if lang == 'c++':
ext.extra_compile_args.append(cpp_flag(self.compiler))
ext.extra_link_args.append(cpp_flag(self.compiler))
build_ext.build_extensions(self)
def finalize_options(self):
super().finalize_options()
import numpy
import pybind11
self.include_dirs.extend([
numpy.get_include(),
pybind11.get_include(user=True),
pybind11.get_include(),
])
setup(
name='brainiak',
use_scm_version=True,
setup_requires=[
'cython',
# https://github.com/numpy/numpy/issues/14189
# https://github.com/brainiak/brainiak/issues/493
'numpy!=1.17.*,<1.20',
'pybind11>=1.7',
'scipy!=1.0.0',
'setuptools_scm',
],
install_requires=[
'cython',
# Previous versions fail of the Anaconda package fail on MacOS:
# https://travis-ci.org/brainiak/brainiak/jobs/545838666
'mpi4py>=3',
'nitime',
# https://github.com/numpy/numpy/issues/14189
# https://github.com/brainiak/brainiak/issues/493
'numpy!=1.17.*,<1.20',
'scikit-learn[alldeps]>=0.18',
# See https://github.com/scipy/scipy/pull/8082
'scipy!=1.0.0',
'statsmodels',
'pymanopt',
'theano>=1.0.4', # See https://github.com/Theano/Theano/pull/6671
'pybind11>=1.7',
'psutil',
'nibabel',
'joblib',
'wheel', # See https://github.com/astropy/astropy-helpers/issues/501
'pydicom',
],
extras_require={
'matnormal': [
'tensorflow',
'tensorflow_probability',
],
},
author='Princeton Neuroscience Institute and Intel Corporation',
author_email='[email protected]',
url='http://brainiak.org',
description='Brain Imaging Analysis Kit',
license='Apache 2',
keywords='neuroscience, algorithm, fMRI, distributed, scalable',
long_description=long_description,
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt},
packages=find_packages(),
include_package_data=True,
python_requires='>=3.5',
zip_safe=False,
)
| apache-2.0 | -1,000,340,240,095,070,700 | 30.77193 | 93 | 0.598196 | false |
ryfeus/lambda-packs | pytorch/source/torch/cuda/nccl.py | 1 | 1644 | import warnings
import torch.cuda
__all__ = ['all_reduce', 'reduce', 'broadcast', 'all_gather', 'reduce_scatter']
SUM = 0 # ncclRedOp_t
def is_available(tensors):
devices = set()
for tensor in tensors:
if tensor.is_sparse:
return False
if not tensor.is_contiguous():
return False
if not tensor.is_cuda:
return False
device = tensor.get_device()
if device in devices:
return False
devices.add(device)
if not hasattr(torch._C, '_nccl_all_reduce'):
warnings.warn('PyTorch is not compiled with NCCL support')
return False
return True
def version():
return torch._C._nccl_version()
def unique_id():
return torch._C._nccl_unique_id()
def init_rank(num_ranks, uid, rank):
return torch._C._nccl_init_rank(num_ranks, uid, rank)
def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None):
if outputs is None:
outputs = inputs
torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms)
def reduce(inputs, outputs=None, root=0, op=SUM, streams=None, comms=None):
if outputs is None:
outputs = inputs
torch._C._nccl_reduce(inputs, outputs, root, op, streams, comms)
def broadcast(inputs, root=0, streams=None, comms=None):
torch._C._nccl_broadcast(inputs, root, streams, comms)
def all_gather(inputs, outputs, streams=None, comms=None):
torch._C._nccl_all_gather(inputs, outputs, streams, comms)
def reduce_scatter(inputs, outputs, op=SUM, streams=None, comms=None):
torch._C._nccl_reduce_scatter(inputs, outputs, op, streams, comms)
| mit | -5,757,900,926,952,547,000 | 25.095238 | 79 | 0.646594 | false |
w0921444648/IT110_DJANGO_ATTEMPT2 | mysite/mysite/settings.py | 1 | 3236 | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#m0orppq%#*(33*!j@3=tphdly3b^5xv5&xvy_q0(wx!q_oiw)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| gpl-2.0 | 2,290,409,260,022,670,000 | 25.52459 | 91 | 0.689431 | false |
paeschli/scons-builder | modules/sdl_check.py | 1 | 3517 | import os
from builder.btools import RegisterCustomTest
from builder.btools import AddConfigKey
from builder.btools import runCommand
from builder.bconfig import getAutoconfPrefix
def CheckSDL(ctx, write_config_h=False, add_to_compiler_env=False):
ctx.Message('Checking for SDL... ')
confprefix = getAutoconfPrefix(ctx.env)
platform = ctx.env['PLATFORM']
if platform == 'win32':
savedVars = ctx.env.SaveVars('LIBS')
if ctx.env.IsMSVC_Debug() :
sdllibs = ['SDLd', 'SDLmaind']
else:
sdllibs = ['SDL', 'SDLmain']
ctx.env.Append(LIBS = sdllibs)
ret = ctx.TryLink("""
#include <SDL.h>
int main(int argc, char **argv)
{
SDL_Init(SDL_INIT_VIDEO);
SDL_Quit();
return 0;
}
""", extension='.c')
ctx.env.RestoreVars(savedVars)
if ret:
ctx.env.DeclarePackage('sdl',
trigger_libs=['SDL', 'SDLmain'],
trigger_frameworks=['SDL'],
LIBS = sdllibs)
ctx.env.Replace(LIBSDL = sdllibs)
else:
ret, output = ctx.TryAction('sdl-config --version')
if ret:
vars = ctx.env.ParseFlags('!sdl-config --cflags --libs')
ctx.env.DeclarePackage('sdl', vars=vars,
trigger_libs=['SDL', 'SDLmain'],
trigger_frameworks=['SDL'])
if add_to_compiler_env:
ctx.env.Append(CPPPATH = vars.get('CPPPATH'))
ctx.env.Append(LIBPATH = vars.get('LIBPATH'))
ctx.env.Replace(LIBSDL = vars.get('LIBS'))
key = confprefix+'HAVE_LIBSDL'
if not (write_config_h and AddConfigKey(ctx, key, ret)):
# no config file is specified or it is disabled, use compiler options
if ret and add_to_compiler_env:
ctx.env.Append(CPPDEFINES=[key])
ctx.Result(ret)
return ret
RegisterCustomTest('CheckSDL', CheckSDL)
def CheckSDLTTF(ctx, write_config_h=False, add_to_compiler_env=False):
# We assume here that SDL is available
ctx.Message('Checking for SDL_ttf... ')
confprefix = getAutoconfPrefix(ctx.env)
platform = ctx.env['PLATFORM']
savedLIBS = ctx.env.SaveVars('LIBS')
sdllibs = ctx.env.get('LIBSDL', [])
sdlttflibs = ['SDL_ttf']
savedVars = None
if ctx.env.GetPackage('sdl'):
savedVars = ctx.env.RequirePackage('sdl')
ctx.env.Append(LIBS = sdlttflibs + sdllibs)
ret = ctx.TryLink("""
#include <SDL_ttf.h>
int main(int argc, char **argv) {
TTF_Init();
TTF_Quit();
return 0;
}
""", extension='.c')
ctx.env.RestoreVars(savedLIBS)
if savedVars:
ctx.env.RestoreVars(savedVars)
if ret:
ctx.env.DeclarePackage('sdlttf', vars={'LIBS' : sdlttflibs},
dependencies='sdl',
trigger_libs=['SDL_ttf'])
ctx.env.Replace(LIBSDLTTF=sdlttflibs)
key = confprefix+'HAVE_LIBSDL_TTF'
if not (write_config_h and AddConfigKey(ctx, key, ret)):
# no config file is specified or it is disabled, use compiler options
if ret and add_to_compiler_env:
ctx.env.Append(CPPDEFINES=[key])
ctx.Result(ret)
return ret
RegisterCustomTest('CheckSDLTTF', CheckSDLTTF)
| gpl-2.0 | -1,037,780,981,023,716,200 | 30.684685 | 77 | 0.553597 | false |
Tinche/cattrs | tests/metadata/test_roundtrips.py | 1 | 3631 | """Test both structuring and unstructuring."""
from typing import Optional, Union
import attr
import pytest
from attr import fields, make_class
from hypothesis import HealthCheck, assume, given, settings
from hypothesis.strategies import sampled_from
from cattr import Converter, UnstructureStrategy
from . import nested_typed_classes, simple_typed_attrs, simple_typed_classes
unstructure_strats = sampled_from(list(UnstructureStrategy))
@given(simple_typed_classes(), unstructure_strats)
def test_simple_roundtrip(cls_and_vals, strat):
"""
Simple classes with metadata can be unstructured and restructured.
"""
converter = Converter(unstruct_strat=strat)
cl, vals = cls_and_vals
inst = cl(*vals)
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(simple_typed_attrs(defaults=True), unstructure_strats)
def test_simple_roundtrip_defaults(cls_and_vals, strat):
"""
Simple classes with metadata can be unstructured and restructured.
"""
a, _ = cls_and_vals
cl = make_class("HypClass", {"a": a})
converter = Converter(unstruct_strat=strat)
inst = cl()
assert converter.unstructure(
converter.structure({}, cl)
) == converter.unstructure(inst)
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(nested_typed_classes(), unstructure_strats)
def test_nested_roundtrip(cls_and_vals, strat):
"""
Nested classes with metadata can be unstructured and restructured.
"""
converter = Converter(unstruct_strat=strat)
cl, vals = cls_and_vals
# Vals are a tuple, convert into a dictionary.
inst = cl(*vals)
assert inst == converter.structure(converter.unstructure(inst), cl)
@settings(
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow]
)
@given(
simple_typed_classes(defaults=False),
simple_typed_classes(defaults=False),
unstructure_strats,
)
def test_union_field_roundtrip(cl_and_vals_a, cl_and_vals_b, strat):
"""
Classes with union fields can be unstructured and structured.
"""
converter = Converter(unstruct_strat=strat)
cl_a, vals_a = cl_and_vals_a
cl_b, vals_b = cl_and_vals_b
a_field_names = {a.name for a in fields(cl_a)}
b_field_names = {a.name for a in fields(cl_b)}
assume(a_field_names)
assume(b_field_names)
common_names = a_field_names & b_field_names
assume(len(a_field_names) > len(common_names))
@attr.s
class C(object):
a = attr.ib(type=Union[cl_a, cl_b])
inst = C(a=cl_a(*vals_a))
if strat is UnstructureStrategy.AS_DICT:
assert inst == converter.structure(converter.unstructure(inst), C)
else:
# Our disambiguation functions only support dictionaries for now.
with pytest.raises(ValueError):
converter.structure(converter.unstructure(inst), C)
def handler(obj, _):
return converter.structure(obj, cl_a)
converter.register_structure_hook(Union[cl_a, cl_b], handler)
assert inst == converter.structure(converter.unstructure(inst), C)
@given(simple_typed_classes(defaults=False))
def test_optional_field_roundtrip(cl_and_vals):
"""
Classes with optional fields can be unstructured and structured.
"""
converter = Converter()
cl, vals = cl_and_vals
@attr.s
class C(object):
a = attr.ib(type=Optional[cl])
inst = C(a=cl(*vals))
assert inst == converter.structure(converter.unstructure(inst), C)
inst = C(a=None)
unstructured = converter.unstructure(inst)
assert inst == converter.structure(unstructured, C)
| mit | -1,373,793,975,189,969,200 | 30.301724 | 77 | 0.689066 | false |
scienceopen/pybashutils | getIP_curl.py | 1 | 2046 | #!/usr/bin/env python
"""
gets interface IPv4 and IPv6 public addresses using libCURL
This uses the "reflector" method, which I feel is more reliable for finding public-facing IP addresses,
WITH THE CAVEAT that man-in-the-middle, etc. attacks can defeat the reflector method.
PyCurl does not have a context manager.
https://ident.me ipv6 and ipv4
https://api.ipify.org # ipv4 only
"""
from argparse import ArgumentParser
import ipaddress
import pycurl
from io import BytesIO
from typing import List, Union
length = 45 # http://stackoverflow.com/questions/166132/maximum-length-of-the-textual-representation-of-an-ipv6-address
URL = 'https://ident.me'
def main():
p = ArgumentParser()
p.add_argument('iface', help='network interface to use', nargs='?')
p.add_argument('--url', help='plain text server',
default='https://ident.me')
P = p.parse_args()
addr = getip(P.url, P.iface)
for a in addr:
print(a)
def getip(url: str = None, iface: str = None) -> List[Union[ipaddress.IPv4Address, ipaddress.IPv6Address]]:
if url is None:
url = URL
addrs = []
for v in (pycurl.IPRESOLVE_V4, pycurl.IPRESOLVE_V6):
addr = _public_addr(v, url, iface)
if addr is not None:
addrs.append(addr)
return addrs
def _public_addr(v, url: str, iface: str = None) -> Union[None, ipaddress.IPv4Address, ipaddress.IPv6Address]:
B = BytesIO()
C = pycurl.Curl()
addr = None
# %% set options
C.setopt(pycurl.TIMEOUT, 3) # 1 second is too short for slow connections
if iface:
C.setopt(pycurl.INTERFACE, iface)
C.setopt(C.URL, url) # type: ignore
C.setopt(pycurl.IPRESOLVE, v)
C.setopt(C.WRITEDATA, B) # type: ignore
# %% get public IP address
ret = None
try:
C.perform()
ret = B.getvalue()
C.close()
except pycurl.error:
pass
# %% validate response
if ret:
addr = ipaddress.ip_address(ret.decode('utf8'))
return addr
if __name__ == '__main__':
main()
| bsd-3-clause | 2,727,372,804,540,858,000 | 26.28 | 120 | 0.646139 | false |
github-borat/cinder | cinder/volume/drivers/vmware/error_util.py | 1 | 2480 | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception classes and SOAP response error checking module.
"""
from cinder import exception
from cinder.openstack.common.gettextutils import _
NOT_AUTHENTICATED = 'NotAuthenticated'
class VimException(exception.CinderException):
"""The VIM Exception class."""
def __init__(self, msg):
exception.CinderException.__init__(self, msg)
class SessionOverLoadException(VimException):
"""Session Overload Exception."""
pass
class VimAttributeException(VimException):
"""VI Attribute Error."""
pass
class VimConnectionException(VimException):
"""Thrown when there is a connection problem."""
pass
class VimFaultException(VimException):
"""Exception thrown when there are faults during VIM API calls."""
def __init__(self, fault_list, msg):
super(VimFaultException, self).__init__(msg)
self.fault_list = fault_list
class VMwareDriverException(exception.CinderException):
"""Base class for all exceptions raised by the VMDK driver.
All exceptions raised by the vmdk driver should raise an exception
descended from this class as a root. This will allow the driver to
potentially trap problems related to its own internal configuration
before halting the cinder-volume node.
"""
message = _("VMware VMDK driver exception.")
class VMwaredriverConfigurationException(VMwareDriverException):
"""Base class for all configuration exceptions.
"""
message = _("VMware VMDK driver configuration error.")
class InvalidAdapterTypeException(VMwareDriverException):
"""Thrown when the disk adapter type is invalid."""
message = _("Invalid disk adapter type: %(invalid_type)s.")
class InvalidDiskTypeException(VMwareDriverException):
"""Thrown when the disk type is invalid."""
message = _("Invalid disk type: %(disk_type)s.")
| apache-2.0 | 2,716,783,250,249,688,000 | 30 | 78 | 0.722581 | false |
dbreen/connectfo | game/scenes/about.py | 1 | 1898 | import pygame
import random
from game import constants
from game.media import media
from game.scene import Scene
class Bouncy(object):
def __init__(self, surf):
self.surf = surf
self.pos_x = random.randrange(0, constants.SCREEN_WIDTH - surf.get_width())
self.pos_y = random.randrange(0, constants.SCREEN_HEIGHT - surf.get_height())
self.vel_x = random.randrange(2, 8)
self.vel_y = random.randrange(2, 8)
def update(self):
self.pos_x += self.vel_x
self.pos_y += self.vel_y
if self.pos_x < 0:
self.pos_x = 0
self.vel_x = -self.vel_x
if self.pos_y < 0:
self.pos_y = 0
self.vel_y = -self.vel_y
if self.pos_x + self.surf.get_width() >= constants.SCREEN_WIDTH:
self.pos_x = constants.SCREEN_WIDTH - self.surf.get_width() - 1
self.vel_x = -self.vel_x
if self.pos_y + self.surf.get_height() >= constants.SCREEN_HEIGHT:
self.pos_y = constants.SCREEN_HEIGHT - self.surf.get_height() - 1
self.vel_y = -self.vel_y
def draw(self, screen):
screen.blit(self.surf, (self.pos_x, self.pos_y))
class AboutScene(Scene):
def load(self):
font = pygame.font.Font(constants.MENU_FONT, 36)
self.bouncers = [Bouncy(font.render("Dan is better than Matt!!", True, constants.WHITE))]
for i in range(0, 5):
self.bouncers.append(Bouncy(media[random.choice(['img.dragon1', 'img.dragon2'])]))
def render(self, screen):
screen.fill(constants.BLACK)
for bouncer in self.bouncers:
bouncer.update()
bouncer.draw(screen)
def do_event(self, event):
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
self.manager.switch_scene('main')
| mit | 2,367,316,040,705,137,000 | 33.811321 | 97 | 0.570601 | false |
diogen737/neuro-nets | cnn_tpe/data_preproc.py | 1 | 8311 | import os, os.path
import math, random
import numpy as np
import utils
from skimage import io
from tqdm import tqdm
from itertools import repeat
from face_preproc import FaceDetector, FaceAligner, clip_to_range
DEV_RATIO = 0.04
TEST_RATIO = 0.06
IMG_SIZE = 217
BORDER = 5
DATA_DIR = '../data/cnn_tpe/'
TRAIN_DIR = DATA_DIR + 'train/'
TEST_DIR = DATA_DIR + 'test/'
DEV_DIR = DATA_DIR + 'dev/'
PROTOCOL_DIR = DATA_DIR
RAW_DATA_DIR = '../data/faces/'
class DataOrganizer:
def __init__(self, ):
self.face_detector = FaceDetector()
self.face_aligner = FaceAligner('../data/cnn_tpe/shape_predictor_68_face_landmarks.dat',
'../data/cnn_tpe/face_template.npy')
def __list_data(self, data_path):
train_dir = os.path.join(data_path, 'train')
test_dir = os.path.join(data_path, 'test')
dev_dir = os.path.join(data_path, 'dev')
train = []
test = []
subjects = utils.get_folders(train_dir)
subjects.sort()
for subj in subjects:
subj_train_dir = os.path.join(train_dir, subj)
subj_test_dir = os.path.join(test_dir, subj)
train_files = utils.get_images(os.path.join(train_dir, subj))
test_files = utils.get_images(os.path.join(test_dir, subj))
train_files.sort()
test_files.sort()
train_files = list(map(lambda s: os.path.join(subj_train_dir, s), train_files))
test_files = list(map(lambda s: os.path.join(subj_test_dir, s), test_files))
subj = int(subj.split('_')[1])
train.extend(zip(train_files, repeat(subj)))
test.extend(zip(test_files, repeat(subj)))
dev = utils.get_images(dev_dir)
dev.sort(key=lambda s: int(os.path.splitext(s)[0]))
dev = list(map(lambda s: os.path.join(dev_dir, s), dev))
dev = list(zip(dev, repeat(-1)))
return train, test, dev
def __load_file(self, filename, imsize=96, border=0):
total_size = imsize + 2 * border
img = io.imread(filename)
faces = self.face_detector.detect_faces(img, top=1)
if len(faces) == 0:
return None
_, _, face = self.face_aligner.align_face(img, faces[0], dim=imsize, border=border)
face = np.reshape(face, (1, total_size, total_size, 3))
face = clip_to_range(face)
del img
return face.astype(np.float32)
def __get_subjects(self, entries):
subjects = list(set(map(lambda e: e.subject, entries)))
n_subjects = len(subjects)
n_dev_subjects = max(1, math.ceil(n_subjects * DEV_RATIO))
random.shuffle(subjects)
return subjects[:n_dev_subjects], subjects[n_dev_subjects:]
def __load_data(self, data, not_found_policy='throw_away', available_subjects=None, imsize=96, border=0):
n_data = len(data)
total_size = imsize + 2 * border
images = np.zeros((n_data, total_size, total_size, 3), dtype=np.float32)
labels = np.zeros((n_data,), dtype=np.int)
if available_subjects is not None:
available_subjects = set(available_subjects)
black = np.zeros((1, total_size, total_size, 3), dtype=np.float32)
face_not_found_on = []
img_ptr = 0
for filename, subject in tqdm(data):
if available_subjects is not None:
if subject not in available_subjects:
continue
face_img = self.__load_file(filename, imsize=imsize, border=border)
if face_img is None:
face_not_found_on.append(filename)
if not_found_policy == 'throw_away':
continue
elif not_found_policy == 'replace_black':
face_img = black
else:
raise Exception('Face not found on {}'.format(filename))
images[img_ptr] = face_img
labels[img_ptr] = subject
img_ptr += 1
images = images[:img_ptr]
labels = labels[:img_ptr]
if len(face_not_found_on) > 0:
print('[Warning] Faces was not found on:')
for f in face_not_found_on:
print(' - {}'.format(f))
return images, labels
def organize_data(self):
for dir in {DATA_DIR, TRAIN_DIR, TEST_DIR, DEV_DIR}:
if not os.path.exists(dir):
print('Creating {}'.format(dir))
os.makedirs(dir)
entries = utils.grab_db_plain(RAW_DATA_DIR, '-')
subjects_dev, subjects = self.__get_subjects(entries)
n_files = len(entries)
n_subjects = len(subjects)
n_subjects_dev = len(subjects_dev)
print('-' * 10)
print('Total files: {}'.format(n_files))
print('Total subjects: {}'.format(n_subjects + n_subjects_dev))
print('-' * 10)
print('Taking for development set {:.2f}% of subjects'.format(DEV_RATIO * 100))
print('Number of subjects for development set: {}'.format(n_subjects_dev))
print('Number of subjects for train/test set: {}'.format(n_subjects))
dev_files = []
protocol_data = []
for subj in subjects_dev:
subj_entries = list(map(lambda e: e.path, filter(lambda e: e.subject == subj, entries)))
n_subj_entries = len(subj_entries)
dev_files.extend(subj_entries)
protocol_data.append(n_subj_entries)
print('-' * 10)
n_dev_files = len(dev_files)
protocol = np.zeros((n_dev_files, n_dev_files), dtype=np.bool)
k = 0
for i in protocol_data:
protocol[k:k + i, k:k + i] = 1
k += i
np.save(PROTOCOL_DIR + 'dev_protocol', protocol)
n_test_files = 0
h = 0
print('Copying files...')
utils.copy_files(dev_files, DEV_DIR)
for subj in subjects:
subj_name = 'subject_' + str(h)
h += 1
subj_entries = list(map(lambda e: e.path, filter(lambda e: e.subject == subj, entries)))
n_subj_entries = len(subj_entries)
random.shuffle(subj_entries)
for_test = 0
if n_subj_entries > 1:
for_test = min(1, math.ceil(n_subj_entries * TEST_RATIO))
n_test_files += for_test
entries_test, entries_train = subj_entries[:for_test], subj_entries[for_test:]
subj_train_dir = os.path.join(TRAIN_DIR, subj_name)
subj_test_dir = os.path.join(TEST_DIR, subj_name)
utils.copy_files(entries_train, subj_train_dir)
utils.copy_files(entries_test, subj_test_dir)
print('Dev files: {}'.format(len(dev_files)))
print('Test files: {}'.format(n_test_files))
print('Train files: {}'.format(n_files - n_test_files - n_dev_files))
print('Done!')
def preprocess_data(self):
train, test, dev = self.__list_data('../data/cnn_tpe')
print('Loading train files...')
train_x, train_y = self.__load_data(train, imsize=IMG_SIZE, border=BORDER, not_found_policy='throw_away')
del train
mean = train_x.mean(axis=0)
stddev = train_x.std(axis=0)
np.save('../data/cnn_tpe/mean', mean)
np.save('../data/cnn_tpe/stddev', stddev)
train_x -= mean
train_x /= stddev
np.save('../data/cnn_tpe/train_x', train_x)
np.save('../data/cnn_tpe/train_y', train_y)
del train_x
print('Loading test files...')
test_x, test_y = self.__load_data(test, imsize=IMG_SIZE, border=BORDER, not_found_policy='throw_away', available_subjects=train_y)
del test, train_y
test_x -= mean
test_x /= stddev
np.save('../data/cnn_tpe/test_x', test_x)
np.save('../data/cnn_tpe/test_y', test_y)
del test_x, test_y
print('Loading dev files...')
dev_x, _ = self.__load_data(dev, imsize=IMG_SIZE, border=BORDER, not_found_policy='replace_black')
del dev
dev_x -= mean
dev_x /= stddev
np.save('../data/cnn_tpe/dev_x', dev_x)
def main():
organizer = DataOrganizer()
organizer.organize_data()
organizer.preprocess_data()
if __name__ == '__main__':
main() | mit | -120,674,693,867,939,920 | 31.46875 | 138 | 0.561425 | false |
shermp/KoboPatchGUI | PatchEdit.py | 1 | 7070 | import re
import io, os, sys
def iterDic(dic):
"""
Return a python 2/3 compatible iterable
:param dic:
:param pythonTwo:
:return:
"""
if sys.version_info.major == 2:
return dic.viewitems()
else:
return dic.items()
class Patch:
"""
Create an object that contains information about each individual patch
"""
def __init__(self, name, status, group, patch_file):
self.name = name
self.status = status
self.help_text = ''
self.group = group
self.patch_file = patch_file
self.patch_replacements = []
def get_patch_replacements(self, data):
"""
Generate a list of possible strings for replacement. Using the data generated here has not yet been
implemented, and may never be implemented.
:param data:
:return:
"""
start = 0
find = re.compile(r'^#{0,1}replace_.+?$')
for (index, line) in enumerate(data):
if 'patch_name = '+self.name in line:
start = index
break
for line in data[start:]:
if '</Patch>' in line:
break
m = find.search(line)
if m:
self.patch_replacements.append(m.group())
def get_help_text(self, text):
"""
From the text in the patch file, search for appropriate text to be used for help on what the patch does.
:param text:
:return:
"""
search_str = r'<Patch>(\npatch_name = ' + re.escape(self.name) + r'.+?)</Patch>'
search_str = search_str.replace('\\`', '`')
re_match_help_txt = re.search(search_str, text, flags=re.DOTALL | re.UNICODE)
text = re_match_help_txt.group(1)
if '##' not in text:
self.help_text = text
else:
help_t = ''
help_patt = r'## (.+?\n)'
help_t_match = re.finditer(help_patt, text, flags=re.DOTALL | re.UNICODE)
for match in help_t_match:
help_t += match.group(1)
self.help_text = help_t
def gen_patch_obj_list(fn, patch_text):
"""
From the text in the patch files, generate patch objects and store them in a list
:param fn:
:param patch_text:
:return:
"""
patch_obj_list = []
search_pattern = r'<Patch>.+?patch_name = (`[^`]+`).+?patch_enable = (`[^`]+`).+?</Patch>'
re_find_attrib = re.compile(search_pattern, flags=re.DOTALL | re.UNICODE)
attrib_match_list = re_find_attrib.finditer(patch_text)
for match in attrib_match_list:
mut_ex_group = ''
group_pattern = r'patch_group = (`[^`]+`)'
group_match = re.search(group_pattern, match.group(0), flags=re.DOTALL | re.UNICODE)
if group_match:
mut_ex_group = group_match.group(1)
patch_obj = Patch(name=match.group(1), status=match.group(2), group=mut_ex_group, patch_file=fn)
patch_obj.get_help_text(patch_text)
patch_obj_list.append(patch_obj)
return patch_obj_list
def read_patch_files(fn_dic):
"""
Read the patch files into a dictionary
:param fn_dic:
:return:
"""
error_msg = None
for fn in fn_dic:
try:
with io.open(os.path.normpath(fn), 'r', encoding='utf8') as patch_file:
fn_dic[fn] = ''
for line in patch_file:
fn_dic[fn] += line
except EnvironmentError:
error_msg = 'There was a problem reading the file.\n\nCheck that you have permission to read the file.'
return fn_dic, error_msg
def apply_changes(patch_obj_dic, file_dic):
"""
If all checks are passed, write the changes to the patch file. Note that the original file is overwritten
:return:
"""
success = False
error_title = None
error_msg = None
# Checks that mutually exclusive options have not been set together. If they have, alert the user,
# and abort before writing to file(s)
for (fn, patch_obj_list) in iterDic(patch_obj_dic):
mut_exl_dic = {}
for obj in patch_obj_list:
if obj.group and 'yes' in obj.status:
if obj.group not in mut_exl_dic:
mut_exl_dic[obj.group] = []
mut_exl_dic[obj.group].append(obj.name)
else:
mut_exl_dic[obj.group].append(obj.name)
for (group, names) in iterDic(mut_exl_dic):
if len(names) > 1:
name_str = '\n'
for name in names:
name_str += ' ' + name + '\n'
error_title = 'Mutually Exlusive Options Detected!'
error_msg = 'The following options cannot be enabled together: \n' + name_str + \
fn + ' was not written.'
success = False
return success, error_title, error_msg
# If checks passed, prepare and then write data to file(s)
for (fn, patch_obj_list) in iterDic(patch_obj_dic):
for obj in patch_obj_list:
file_dic = prep_for_writing(fn, obj, file_dic)
r_p_f_success, error_title, error_msg = write_patch_files(fn, file_dic)
if not r_p_f_success:
success = False
return success, error_title, error_msg
success = True
return success, error_title, error_msg
def prep_for_writing(patch_fn, patch_object, file_dic):
"""
Using regex, search and replace the patch enabled/disabled status in the patch text.
:param patch_fn:
:param patch_object:
:return:
"""
search_pattern = r'(patch_name = ' + re.escape(patch_object.name) + r'.+?patch_enable = )' + \
r'`.+?`'
search_pattern = search_pattern.replace('\\`', '`')
search_replace = r'\1' + patch_object.status
s = re.sub(search_pattern, search_replace, file_dic[patch_fn], flags=re.DOTALL | re.UNICODE)
file_dic[patch_fn] = s
return file_dic
def write_patch_files(fn, file_dic):
"""
Write the changes to file(s)
:param fn:
:return:
"""
succsess = False
error_title = None
error_msg = None
try:
with io.open(os.path.normpath(fn), 'w', encoding='utf8') as patch_file:
patch_file.write(file_dic[fn])
succsess = True
return succsess, error_title, error_msg
except EnvironmentError:
error_title = 'File Error!'
error_msg = 'There was a problem writing to the following file:\n\n' + \
fn + '\n\n' \
'Check that the file isn\'t in use by another program, and that you have write ' \
'permissions to the file and folder'
return succsess, error_title, error_msg
def calc_grid_pos(pos, cols):
"""
A little function to calculate the grid position of checkboxes
:param pos:
:param cols:
:return:
"""
calc_row = pos // cols
calc_col = pos % cols
return calc_row, calc_col
def edit_repl_opts(event, ext_pos, pos, patch_obj):
pass | mit | -6,138,971,067,062,184,000 | 32.995192 | 115 | 0.563366 | false |
edeposit/edeposit.amqp.storage | src/edeposit/amqp/storage/storage_handler.py | 1 | 7204 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import transaction
from BTrees.OOBTree import OOBTree
from BTrees.OOBTree import OOTreeSet
from BTrees.OOBTree import intersection
from zeo_connector import transaction_manager
from zeo_connector.examples import DatabaseHandler
import settings
# Exceptions ==================================================================
class InvalidType(Exception):
"""
Raised in case that object you are trying to store doesn't have required
interface.
"""
class UnindexableObject(Exception):
"""
Raised in case, that object doesn't have at least one attribute set.
"""
# Functions & classes =========================================================
class StorageHandler(DatabaseHandler):
"""
Object database with indexing by the object attributes.
Each stored object is required to have following properties:
- indexes (list of strings)
- project_key (string)
For example::
class Person(Persistent):
def __init__(self, name, surname):
self.name = name
self.surname = surname
@property
def indexes(self):
return [
"name",
"surname",
]
@property
def project_key(self):
return PROJECT_KEY
Note:
I suggest to use properties, because that way the values are not stored
in database, but constructed at request by the property methods.
"""
def __init__(self, project_key, conf_path=settings.ZEO_CLIENT_PATH):
"""
Constructor.
Args:
project_key (str): Project key which is used for the root of DB.
conf_path (str): Path to the client zeo configuration file. Default
:attr:`.settings.ZEO_CLIENT_PATH`.
"""
super(self.__class__, self).__init__(
conf_path=conf_path,
project_key=project_key
)
@transaction_manager
def _zeo_key(self, key, new_type=OOBTree):
"""
Get key from the :attr:`zeo` database root. If the key doesn't exist,
create it by calling `new_type` argument.
Args:
key (str): Key in the root dict.
new_type (func/obj): Object/function returning the new instance.
Returns:
obj: Stored object, or `new_type`.
"""
zeo_key = self.zeo.get(key, None)
if zeo_key is None:
zeo_key = new_type()
self.zeo[key] = zeo_key
return zeo_key
def _get_db_fields(self, obj):
"""
Return list of database dictionaries, which are used as indexes for
each attributes.
Args:
cached (bool, default True): Use cached connection to database.
Returns:
list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`.
"""
for field in obj.indexes:
yield field, self._zeo_key(field)
def _check_obj_properties(self, pub, name="pub"):
"""
Make sure, that `pub` has the right interface.
Args:
pub (obj): Instance which will be checked.
name (str): Name of the instance. Used in exception. Default `pub`.
Raises:
InvalidType: When the `pub` is not instance of `obj_type`.
"""
if not hasattr(pub, "indexes"):
raise InvalidType("`%s` doesn't have .indexes property!" % name)
if not pub.indexes:
raise InvalidType("`%s.indexes` is not set!" % name)
if not hasattr(pub, "project_key"):
raise InvalidType(
"`%s` doesn't have .project_key property!" % name
)
if not pub.project_key:
raise InvalidType("`%s.project_key` is not set!" % name)
def _put_into_indexes(self, obj):
"""
Put publication into all indexes.
Attr:
obj (obj): Indexable object.
Raises:
UnindexableObject: When there is no index (property) which can be
used to index `obj` in database.
"""
no_of_used_indexes = 0
for field_name, db_index in list(self._get_db_fields(obj)):
attr_value = getattr(obj, field_name)
if attr_value is None: # index only by set attributes
continue
container = db_index.get(attr_value, None)
if container is None:
container = OOTreeSet()
db_index[attr_value] = container
container.insert(obj)
no_of_used_indexes += 1
# make sure that atleast one `attr_value` was used
if no_of_used_indexes <= 0:
raise UnindexableObject(
"You have to use atleast one of the identificators!"
)
def store_object(self, obj):
"""
Save `obj` into database and into proper indexes.
Attr:
obj (obj): Indexable object.
Raises:
InvalidType: When the `obj` doesn't have right properties.
Unindexableobjlication: When there is no indexes defined.
"""
self._check_obj_properties(obj)
with transaction.manager:
self._put_into_indexes(obj)
def _get_subset_matches(self, query):
"""
Yield publications, at indexes defined by `query` property values.
Args:
query (obj): Object implementing proper interface.
Yields:
list: List of matching publications.
"""
for field_name, db_index in self._get_db_fields(query):
attr = getattr(query, field_name)
if attr is None: # don't use unset attributes
continue
results = db_index.get(attr, OOTreeSet())
if results:
yield results
def search_objects(self, query):
"""
Return list of objects which match all properties that are set
(``not None``) using AND operator to all of them.
Example:
result = storage_handler.search_objects(
DBPublication(isbn="azgabash")
)
Args:
query (obj): Object implementing proper interface with some of the
properties set.
Returns:
list: List of matching objects or ``[]`` if no match was found.
Raises:
InvalidType: When the `query` doesn't implement required
properties.
"""
self._check_obj_properties(query, "query")
# AND operator between results
final_result = None
for result in self._get_subset_matches(query):
if final_result is None:
final_result = result
continue
final_result = intersection(final_result, result)
# if no result is found, `final_result` is None, and I want []
if not final_result:
return []
return list(final_result)
| mit | 4,649,346,435,320,711,000 | 28.52459 | 79 | 0.54678 | false |
byashimov/django-controlcenter | tests/test_templatetags.py | 1 | 11715 | import collections
import json
from django import VERSION
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from controlcenter import app_settings, widgets
from controlcenter.templatetags.controlcenter_tags import (
_method_prop,
attrlabel,
attrvalue,
change_url,
changelist_url,
external_link,
is_sequence,
jsonify,
)
from test_models import TestUser0, TestUser1
from . import TestCase
class SimpleTagsTest(TestCase):
def test_jsonify(self):
data = {'a': None, 'b': 0}
json_data = jsonify(data)
# Marked safe
self.assertTrue(hasattr(json_data, '__html__'))
self.assertEqual(json_data, json.dumps(data))
def test_is_sequence(self):
self.assertTrue(is_sequence(list()))
self.assertTrue(is_sequence(tuple()))
self.assertFalse(is_sequence(dict()))
self.assertFalse(is_sequence(User()))
def test_changelist_url(self):
widget = widgets.ItemList(request=None)
widget.changelist_url = 'test'
# Original
admin_changelist_url = '/admin/auth/user/'
# String test
self.assertEqual(changelist_url(widget), 'test')
# Model test
widget.changelist_url = User
self.assertEqual(changelist_url(widget),
admin_changelist_url + '')
# Tuple with params test
widget.changelist_url = (User, {'username__exact': 'user0'})
self.assertEqual(changelist_url(widget),
admin_changelist_url + '?username__exact=user0')
# Same with string no question sign
widget.changelist_url = (User, 'username__exact=user0')
self.assertEqual(changelist_url(widget),
admin_changelist_url + '?username__exact=user0')
# Same with question sign
widget.changelist_url = (User, '?username__exact=user0')
self.assertEqual(changelist_url(widget),
admin_changelist_url + '?username__exact=user0')
# Asserts first item is a Model
widget.changelist_url = (None, {'username__exact': 'user0'})
with self.assertRaises(AssertionError):
self.assertEqual(changelist_url(widget), admin_changelist_url)
# Asserts last items is either basestring or dict
widget.changelist_url = (User, None)
with self.assertRaises(AssertionError):
self.assertEqual(changelist_url(widget), admin_changelist_url)
def test_method_prop(self):
class Test(object):
foo = True
def bar(self):
pass
bar.allow_tags = True
def baz(self):
pass
baz.allow_tags = False
def egg(self):
pass
test = Test()
# Attribute is not callable
self.assertIsNone(_method_prop(test, 'foo', 'allow_tags'))
# Has the property
self.assertEqual(_method_prop(test, 'bar', 'allow_tags'), True)
# Has it but it's False
self.assertFalse(_method_prop(test, 'baz', 'allow_tags'))
# Doesn't have
self.assertIsNone(_method_prop(test, 'egg', 'allow_tags'))
# Doesn't exist
self.assertIsNone(_method_prop(test, 'doesnt_exist', 'allow_tags'))
class AttrTagsTest(TestCase):
def setUp(self):
class TestUserWidget0(widgets.ItemList):
model = TestUser0
list_display = ('foo', 'egg')
# Should override models method
def foo(self, obj):
return 'new foo value'
foo.short_description = 'new foo label'
# Doesn't have description
def bar(self, obj):
return 'new bar value'
def allows_tags(self, obj):
return '<br>'
allows_tags.allow_tags = True
def no_tags(self, obj):
return '<br>'
class TestUserWidget1(TestUserWidget0):
list_display = None
class TestUserWidget2(TestUserWidget0):
list_display = ((app_settings.SHARP, ) +
TestUserWidget0.list_display)
class TestUserWidget3(TestUserWidget2):
model = TestUser1
self.user0 = TestUser0(username='user0')
self.widget0 = TestUserWidget0(request=None)
self.widget1 = TestUserWidget1(request=None)
self.widget2 = TestUserWidget2(request=None)
self.widget3 = TestUserWidget3(request=None)
self.mapping = {'baz': 'mapping baz'}
self.sequence = ['foo value', 'egg value']
self.namedtuple = collections.namedtuple('User', ['egg'])('egg value')
def test_attrlabel(self):
# Widget overrides
self.assertEqual(attrlabel(self.widget0, 'foo'), 'new foo label')
# Widget's has no description, takes model's one
self.assertEqual(attrlabel(self.widget0, 'bar'), 'original bar label')
# Empty description
self.assertEqual(attrlabel(self.widget0, 'baz'), '')
# Field's verbose name
self.assertEqual(attrlabel(self.widget0, 'test_field'), 'My title')
# No description found
self.assertEqual(attrlabel(self.widget0, 'egg'), 'egg')
# No attribute found
self.assertEqual(attrlabel(self.widget0, 'unknown'), 'unknown')
# Pk field
self.assertEqual(attrlabel(self.widget0, 'id'), 'ID')
self.assertEqual(attrlabel(self.widget0, 'pk'), 'ID')
# Id is not defined
self.assertEqual(attrlabel(self.widget3, 'id'), 'id')
self.assertEqual(attrlabel(self.widget3, 'pk'), 'primary')
def test_attrvalue(self):
# New method
self.assertEqual(
attrvalue(self.widget0, self.user0, 'foo'), 'new foo value')
# Old method
self.assertEqual(
attrvalue(self.widget0, self.user0, 'egg'), 'original egg value')
# Allow tags test
self.assertEqual(
attrvalue(self.widget0, self.user0, 'allows_tags'), '<br>')
self.assertEqual(
attrvalue(self.widget0, self.user0, 'no_tags'), '<br>')
# Attribute test
self.assertEqual(
attrvalue(self.widget0, self.user0, 'username'), 'user0')
# 1) if method wasn't found in widget,
# doesn't pass instance to it's method
# 2) returns empty value because gots None
self.assertEqual(attrvalue(self.widget0, self.user0, 'baz'), '')
# No attribute found -- empty value
self.assertEqual(
attrvalue(self.widget0, self.user0, 'unknown'), '')
# Mapping test
self.assertEqual(
attrvalue(self.widget0, self.mapping, 'baz'), 'mapping baz')
# Key not found, not KeyError
self.assertEqual(
attrvalue(self.widget0, self.mapping, 'unknown'), '')
# Requires list_display to map it to values
self.assertEqual(
attrvalue(self.widget0, self.sequence, 'egg'), 'egg value')
self.assertEqual(attrvalue(self.widget1, self.sequence, 'egg'), '')
# Namedtuple doesn't require it
# with list_display
self.assertEqual(
attrvalue(self.widget0, self.namedtuple, 'egg'), 'egg value')
# without list_display
self.assertEqual(
attrvalue(self.widget1, self.namedtuple, 'egg'), 'egg value')
# Sharp test
self.assertEqual(
attrvalue(self.widget2, self.sequence, 'egg'), 'egg value')
# IndexError test
self.assertEqual(
attrvalue(self.widget2, self.sequence[:-1], 'egg'), '')
class ChangeurlTest(TestCase):
def setUp(self):
for i in range(10):
username = 'user{}'.format(i)
User.objects.create_user(username, username + '@example.com',
username + 'password')
self.obj = User.objects.first()
self.obj_url = '/admin/auth/user/{}/'.format(self.obj.pk)
if VERSION > (1, 9):
self.obj_url += 'change/'
# Model queryset
class ModelQuerySet(widgets.ItemList):
queryset = User.objects.all()
# Deferred queryset
class DeferredQuerySet(widgets.ItemList):
queryset = User.objects.defer('email')
# Dict
class ValuesDict(widgets.ItemList):
queryset = User.objects.values('pk', 'email')
# List
class ValuesList(widgets.ItemList):
queryset = User.objects.values_list('pk', 'email')
# List
class ValuesListNoPk(widgets.ItemList):
queryset = User.objects.values_list('email')
# Namedtuple
class NamedtupleList(ValuesList):
klass = collections.namedtuple('User', 'pk email')
def values(self):
vals = super(NamedtupleList, self).values
return [self.klass._make(x) for x in vals]
self.widgets = [
ModelQuerySet,
DeferredQuerySet,
ValuesDict,
ValuesList,
NamedtupleList,
]
for widget in self.widgets:
setattr(self, widget.__name__, widget)
def equal(self, klass, value):
widget = klass(request=None)
self.assertEqual(change_url(widget, widget.values[0]), value)
def test_non_registered(self):
# It's not registered so no reverse is possible
class NonRegisteredModel(widgets.ItemList):
queryset = ContentType.objects.all()
self.equal(NonRegisteredModel, None)
def test_no_model(self):
# Model queryset + Deferred
self.equal(self.ModelQuerySet, self.obj_url)
self.equal(self.DeferredQuerySet, self.obj_url)
# widget.model is not defined, so it can't build
# change_url from Dict, List, Namedtuple
self.equal(self.ValuesDict, None)
self.equal(self.ValuesList, None)
self.equal(self.NamedtupleList, None)
def test_with_model(self):
for widget in self.widgets:
class Widget(widget):
model = User
if widget is self.ValuesList:
# No widget.values_list_defined
self.equal(Widget, None)
else:
self.equal(Widget, self.obj_url)
def test_with_model_and_list_display(self):
for widget in self.widgets:
class Widget(widget):
model = User
list_display = (app_settings.SHARP, 'pk', 'email')
# Need pk to build url for ValuesList
self.equal(Widget, self.obj_url)
class IdWidget(Widget):
list_display = ('id', 'email')
# Alias test pk == id and also no sharp sign in list_display
self.equal(IdWidget, self.obj_url)
def test_no_pk(self):
class NoPkList(self.NamedtupleList):
klass = collections.namedtuple('User', 'email')
model = User
queryset = model.objects.values_list('email')
self.equal(NoPkList, None)
class ExternalLinkTest(TestCase):
def test_no_label(self):
self.assertEqual(
external_link('http://example.com'),
'<a href="http://example.com" target="_blank" '
'rel="noreferrer" rel="noopener">http://example.com</a>',
)
def test_with_label(self):
self.assertEqual(
external_link('http://example.com', 'my-example-link'),
'<a href="http://example.com" target="_blank" '
'rel="noreferrer" rel="noopener">my-example-link</a>',
)
| bsd-3-clause | -5,575,421,691,948,398,000 | 31.541667 | 78 | 0.583696 | false |
danceasarxx/pyfunk | pyfunk/monads/helpers.py | 1 | 1936 | from pyfunk import combinators as _, collections as __
@_.curry
def fmap(fn, f):
"""
Generic version of fmap.
@sig fmap :: Functor f => (a -> b) -> f a -> f b
"""
return f.fmap(fn) if hasattr(f, 'fmap') else __.fmap(fn, f)
@_.curry
def chain(fn, c):
"""
Generic version of chain
@sig chain :: Chain c => (a -> c b) -> c a -> c b
"""
return c.chain(fn)
@_.curry
def ap(fof, fn, f):
"""
Generic ap
@sig ap :: Applicative a, Functor f => (t -> a t) -> (x -> y) -> f x -> a y
"""
return fof(fn).ap(f)
def chaincompose(*fns):
"""
Composes functions that produce Chains
@sig mcompose :: Chain c => (y -> c z)...(x -> c y) -> (x -> c z)
"""
last = fns[-1:]
rest = tuple(list(map(chain, fns[:-1])))
chained = rest + last
return _.compose(*chained)
@_.curry
def liftA2(fn, f1, f2):
"""
Generic version of liftA2
@sig ap :: Functor f => (x -> y -> z) -> f x -> f y -> a z
"""
return f1.fmap(fn).ap(f2)
@_.curry
def liftA3(fn, f1, f2, f3):
"""
Generic version of liftA3
@sig ap :: Functor f => (w -> x -> y -> z) f v -> f x -> f y -> a z
"""
return f1.fmap(fn).ap(f2).ap(f3)
# def doMonad(fn):
# """
# The do monad helps to run a series of serial mappings on monads to
# remove the need for callbacks. e.g
# @doMonad
# def read(path):
# xfile = yield openIO(path)
# lines = split(xfile)
# # chain calls use join
# xresults = join(yield openURLs(xfiles))
# Note: This function has no test
# """
# def init(*args, **kwargs):
# gen = fn(*args, **kwargs)
# def stepper(result):
# try:
# result = gen.send(result)
# except StopIteration:
# return result
# else:
# return result.fmap(stepper)
# return stepper(None)
# return init
| gpl-3.0 | 1,954,826,466,861,489,700 | 22.325301 | 79 | 0.502583 | false |
TkTech/sachi | sachi/backends/solr.py | 1 | 6451 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import logging
from sachi import six, fields
from sachi.query import QAll
from sachi.backends.base import SearchBackend
logger = logging.getLogger(__name__)
FIELD_MAPPINGS = {
fields.TextField: 'text_en',
fields.StringField: 'string',
fields.DateTimeField: 'date'
}
class SolrBackend(SearchBackend):
'''A solr-backed search interface.
.. note::
As a caveat, the Solr API in v6 does not currently provide a method of
changing the UNIQUE KEY. As such, your ID field should always be called
`id` in your indexes.
:param connection: An active pysolr connection.
'''
def __init__(self, connection, index):
super(SolrBackend, self).__init__(connection, index)
def update_schema(self):
'''Update the Solr schema using the Schema API.
This method will not remove or modify fields not defined in the index.
.. note::
This method isn't magical. You must reindex your documents after
changing the schema or the state of your index will become
indeterminent.
'''
# Get the current Solr schema and swap it into a name-keyed dict.
current_schema = self.connection.schema
current_fields = {
f['name']: f
for f in current_schema['schema']['fields']
}
# We need to find all of the currently defined fields, then find any
# defined in our index. If we find it, we compare it to see if it's
# changed and replace the field. If we don't find it, we create it.
# TODO: If Solr ever allows us to do an upsert, we can remove all of
# this.
logger.debug('Comparing Solr & Index schema...')
to_create, to_replace = [], []
for field_name, schema_field in six.iteritems(self._index.schema):
solr_field = current_fields.get(field_name)
if solr_field:
l1 = (
solr_field.get('stored', True),
solr_field.get('indexed', True),
solr_field.get('multiValued', False),
solr_field['type']
)
l2 = (
schema_field.stored,
schema_field.indexed,
schema_field.multivalued,
FIELD_MAPPINGS[schema_field.__class__]
)
if l1 != l2:
logger.debug('Replacing field %s', field_name)
to_replace.append((field_name, schema_field))
else:
logger.debug('Skipping unchanged field %s', field_name)
else:
logger.debug('Creating field %s', field_name)
to_create.append((field_name, schema_field))
self.connection.schema_update({
'add-field': [
{
'indexed': f.indexed,
'stored': f.stored,
'multiValued': f.multivalued,
'name': n,
'type': FIELD_MAPPINGS[f.__class__]
} for n, f in to_create
],
'replace-field': [
{
'indexed': f.indexed,
'stored': f.stored,
'multiValued': f.multivalued,
'name': n,
'type': FIELD_MAPPINGS[f.__class__]
} for n, f in to_replace
],
})
def search(self, q):
# By default we sort by score, however this can be overwritten.
order_by = 'score desc'
if q.sort_keys:
order_by = ', '.join(
'{0} {1}'.format(
k[:1] if k.startswith('-') else k,
'DESC' if k.startswith('-') else 'ASC'
) for k in q.sort_keys
)
facets = {}
if q.facets:
# TODO: Proper support for DateTimeField & Query facets. Right
# now we're only supporting basic terms facetting.
for facet in q.facets:
facets[facet.name] = {
'type': 'terms',
'field': facet.name + '_facet',
'mincount': facet.at_least,
'limit': facet.limit
}
results = self.connection.select({
'rows': q.limit,
'start': q.start,
'q': '*:*' if q.query is QAll else q.query,
'sort': order_by,
'df': self._index.default_field.index_keyname,
'facet': 'true' if q.facets else 'false',
'json.facet': json.dumps(facets)
})
response = results['response']
fmt_response = {
'query': q.query,
'start': q.start,
'limit': q.limit,
'count': response['numFound'],
'results': [{
# Filter out any fields which aren't included in our schema,
# or we'll end up with things like _version_.
k: v
for k, v in six.iteritems(r)
if k in self._index.schema and not k.endswith('_facet')
} for r in response['docs']],
'facets': {
'terms': {}
}
}
# Process facets into our standard representation, only for those
# we were asked to provide (solr by default will have additional
# facets in here, such as count.)
for facet in q.facets:
if facet.name not in results['facets']:
continue
fmt_response['facets']['terms'][facet.name] = {
b['val']: b['count']
for b in results['facets'][facet.name]['buckets']
}
return fmt_response
def index(self, objects):
self.connection.index(
list(self._index.apply(objects))
)
def clear(self):
self.connection.delete_by_query('*:*', commit=True)
def count(self):
results = self.connection.select({
'rows': 0,
'q': '*:*'
})
return results['response']['numFound']
def refresh(self):
self.connection.commit()
def remove(self, objects):
self.connection.delete_by_ids(
list(self._index.apply_only_ids(objects))
)
| mit | -4,865,335,914,014,963,000 | 32.42487 | 79 | 0.502868 | false |
Ventrosky/python-scripts | network-recon/sql-scan.py | 1 | 1585 | #!/usr/bin/env python
import sys, os, subprocess
def nmapScriptsScan(ip, port):
print "[-] Starting nmap ms-sql script scan for " + ip + ":" + port
nmapCmd = "nmap -sV -Pn -v -p "+port+" --script=ms-sql* -oN reports/sql/"+ip+"_"+port+"_nmap "+ip+ " >> reports/sql/"+ip+"_"+port+"_nmapOutput.txt"
subprocess.check_output(nmapCmd, shell=True)
print "[-] Completed nmap ms-sql script scan for " + ip + ":" + port
def hydraScan(ip, port):
print "[-] Starting ms-sql against " + ip + ":" + port
hydraCmd = "hydra -L wordlists/users.txt -P wordlists/passwords.txt -f -e n -o reports/sql/"+ip+"_"+port+"_ncrack.txt -u "+ip+" -s "+port + "mssql"
try:
results = subprocess.check_output(hydraCmd, shell=True)
resultarr = results.split("\n")
for result in resultarr:
if "login:" in result:
print "[*] Valid ms-sql credentials found: " + result
resultList=result.split()
self.username=resultList[4]
if resultList[6]:
self.password=resultList[6]
else:
self.password=''
except:
print "[-] No valid ms-sql credentials found"
print "[-] Completed hydra ms-sql against " + ip + ":" + port
def main():
if len(sys.argv) != 3:
print "Passed: ",sys.argv
print "Usage: sql-scan.py <ip> <port> "
sys.exit(0)
ip = str(sys.argv[1])
port = str(sys.argv[2])
nmapScriptsScan( ip, port)
hydraScan( ip, port)
main()
| gpl-3.0 | 442,974,220,446,875,260 | 37.658537 | 152 | 0.543849 | false |
braingineer/ikelos | ikelos/layers/rtn.py | 1 | 30197 | '''
recurrent tree networks
author: bcm
'''
from __future__ import absolute_import, print_function
from keras.layers import Recurrent, time_distributed_dense, LSTM
import keras.backend as K
from keras import activations, initializations, regularizers
from keras.engine import Layer, InputSpec
import ikelos.backend.theano_backend as IKE
import numpy as np
class DualCurrent(Recurrent):
''' modified from keras's lstm; the recurrent tree network
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(DualCurrent, self).__init__(**kwargs)
def get_initial_states(self, x):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.permute_dimensions(x, [1,0,2]) # (timesteps, samples, input_dim)
reducer = K.zeros((self.input_dim, self.output_dim))
initial_state = K.dot(initial_state, reducer) # (timesteps, samples, output_dim)
initial_states = [initial_state for _ in range(len(self.states))]
return initial_states
def build(self, input_shapes):
assert isinstance(input_shapes, list)
rnn_shape, indices_shape = input_shapes
self.input_spec = [InputSpec(shape=rnn_shape), InputSpec(shape=indices_shape)]
input_dim = rnn_shape[2]
self.input_dim = input_dim
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensors of shape (output_dim)
self.states = [None, None]
''' add a second incoming recurrent connection '''
self.W_i = self.init((input_dim, self.output_dim),
name='{}_W_i'.format(self.name))
self.U_i_me = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_i_me'.format(self.name))
self.U_i_other = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_i_other'.format(self.name))
self.b_i = K.zeros((self.output_dim,), name='{}_b_i'.format(self.name))
self.W_f = self.init((input_dim, self.output_dim),
name='{}_W_f'.format(self.name))
self.U_f_me = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_f_me'.format(self.name))
self.U_f_other = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_f_other'.format(self.name))
self.b_f = self.forget_bias_init((self.output_dim,),
name='{}_b_f'.format(self.name))
self.W_c = self.init((input_dim, self.output_dim),
name='{}_W_c'.format(self.name))
self.U_c_me = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_c_me'.format(self.name))
self.U_c_other = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_c_other'.format(self.name))
self.b_c = K.zeros((self.output_dim,), name='{}_b_c'.format(self.name))
self.W_o = self.init((input_dim, self.output_dim),
name='{}_W_o'.format(self.name))
self.U_o_me = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_o_me'.format(self.name))
self.U_o_other = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_o_other'.format(self.name))
self.b_o = K.zeros((self.output_dim,), name='{}_b_o'.format(self.name))
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(K.concatenate([self.W_i,
self.W_f,
self.W_c,
self.W_o]))
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(K.concatenate([self.U_i_me,self.U_i_other,
self.U_f_me,self.U_f_other,
self.U_c_me,self.U_c_other,
self.U_o_me,self.U_o_other]))
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(K.concatenate([self.b_i,
self.b_f,
self.b_c,
self.b_o]))
self.regularizers.append(self.b_regularizer)
self.trainable_weights = [self.W_i, self.U_i_me, self.U_i_other, self.b_i,
self.W_c, self.U_c_me, self.U_c_other, self.b_c,
self.W_f, self.U_f_me, self.U_f_other, self.b_f,
self.W_o, self.U_o_me, self.U_o_other, self.b_o]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[1], input_shape[0], self.output_dim)))
K.set_value(self.states[1],
np.zeros((input_shape[1], input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[1], input_shape[0], self.output_dim)),
K.zeros((input_shape[1], input_shape[0], self.output_dim))]
def compute_mask(self, input, mask):
if self.return_sequences:
if isinstance(mask, list):
return [mask[0], mask[0]]
return [mask, mask]
else:
return [None, None]
def get_output_shape_for(self, input_shapes):
rnn_shape, indices_shape = input_shapes
out_shape = super(DualCurrent, self).get_output_shape_for(rnn_shape)
return [out_shape, out_shape]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return x
def step(self, x, states):
(h_tm1_me, h_tm1_other) = states[0]
(c_tm1_me, c_tm1_other) = states[1]
B_U = states[2]
B_W = states[3]
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
else:
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
i = self.inner_activation(x_i + K.dot(h_tm1_me * B_U[0], self.U_i_me) +
K.dot(h_tm1_other * B_U[0], self.U_i_other))
f_me = self.inner_activation(x_f + K.dot(h_tm1_me * B_U[1], self.U_f_me) +
K.dot(h_tm1_other * B_U[1], self.U_f_me))
f_other = self.inner_activation(x_f + K.dot(h_tm1_me * B_U[1], self.U_f_other) +
K.dot(h_tm1_other * B_U[1], self.U_f_other))
in_c = i * self.activation(x_c + K.dot(h_tm1_me * B_U[2], self.U_c_me) +
K.dot(h_tm1_other * B_U[2], self.U_c_other))
re_c = f_me * c_tm1_me + f_other * c_tm1_other
c = in_c + re_c
o = self.inner_activation(x_o + K.dot(h_tm1_me * B_U[3], self.U_o_me) +
K.dot(h_tm1_other * B_U[3], self.U_o_other))
h = o * self.activation(c)
return h, [h, c]
def call(self, xpind, mask=None):
# input shape: (nb_samples, time (padded with zeros), input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
x, indices = xpind
if isinstance(mask, list):
mask, _ = mask
input_shape = self.input_spec[0].shape
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of timesteps of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
last_output, outputs, states = IKE.dualsignal_rnn(self.step,
preprocessed_input,
initial_states,
indices,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=input_shape[1])
last_tree, last_summary = last_output
tree_outputs, summary_outputs = outputs
if self.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.states[i], states[i]))
self.cached_states = states
return [tree_outputs, summary_outputs]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_config(self):
config = {"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"forget_bias_init": self.forget_bias_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"U_regularizer": self.U_regularizer.get_config() if self.U_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"dropout_W": self.dropout_W,
"dropout_U": self.dropout_U}
base_config = super(DualCurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BranchLSTM(LSTM):
def build(self, input_shapes):
assert isinstance(input_shapes, list)
rnn_shape, indices_shape = input_shapes
super(BranchLSTM, self).build(rnn_shape)
self.input_spec += [InputSpec(shape=indices_shape)]
def get_initial_states(self, x):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.permute_dimensions(x, [1,0,2]) # (timesteps, samples, input_dim)
reducer = K.zeros((self.input_dim, self.output_dim))
initial_state = K.dot(initial_state, reducer) # (timesteps, samples, output_dim)
initial_states = [initial_state for _ in range(len(self.states))]
return initial_states
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[1], input_shape[0], self.output_dim)))
K.set_value(self.states[1],
np.zeros((input_shape[1], input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[1], input_shape[0], self.output_dim)),
K.zeros((input_shape[1], input_shape[0], self.output_dim))]
def get_output_shape_for(self, input_shapes):
rnn_shape, indices_shape = input_shapes
return super(BranchLSTM, self).get_output_shape_for(rnn_shape)
def compute_mask(self, input, mask):
if self.return_sequences:
if isinstance(mask, list):
return mask[0]
return mask
else:
return None
def call(self, xpind, mask=None):
# input shape: (nb_samples, time (padded with zeros), input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
x, indices = xpind
if isinstance(mask, list):
mask, _ = mask
input_shape = self.input_spec[0].shape
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of timesteps of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
last_output, outputs, states = IKE.stack_rnn(self.step,
preprocessed_input,
initial_states,
indices,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=input_shape[1])
if self.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.states[i], states[i]))
self.cached_states = states
if self.return_sequences:
return outputs
else:
return last_output
class RTTN(Recurrent):
'''Recurrent Tree Traversal Network
# Arguments
See GRU
# Notes
-
'''
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
shape_key=None, dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.shape_key = shape_key or {}
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
kwargs['consume_less'] = 'gpu'
super(RTTN, self).__init__(**kwargs)
self.num_actions = 4
def compute_mask(self, input, mask):
if self.return_sequences:
if isinstance(mask, list):
return [mask[0] for _ in range(4)]
return [mask for _ in range(4)]
else:
return [None, None, None, None]
def get_output_shape_for(self, input_shapes):
'''given all inputs, compute output shape for all outputs
crazy shape computations. super verbose and ugly now to make the code readable
'''
## normal in shapes are (batch, sequence, in_size)
## normal out shapes are (batch, sequence, out_size)
## horizon is (batch, sequence, sequence/horizon, features)
## horizon features is going to be concatenated branch and word feature vectors
## p_horizon is (batch, sequence, sequence/horizon)
in_shape = input_shapes[0]
out_shape = super(RTTN, self).get_output_shape_for(in_shape)
b, s, fin = in_shape
b, s, fout = out_shape
w = self.shape_key['word']
h = self.shape_key['horizon']
horizon_shape = (b, s, h, w+fout)
p_horizon_shape = (b, s, h)
#horizon_shape = out_shape[:-1] (self.shape_key['horizon'],
# in_shape[-1] + out_shape[-1])
#p_horizon_shape = out_shape[:-1] + (self.shape_key['horizon'],)
return [out_shape, out_shape, horizon_shape, p_horizon_shape]
def build(self, input_shapes):
assert isinstance(input_shapes, list)
rnn_shape, indices_shape = input_shapes[0], input_shapes[1]
self.input_spec = [InputSpec(shape=rnn_shape), InputSpec(shape=indices_shape)]
self.input_spec += [InputSpec(shape=None) for _ in range(len(input_shapes)-2)]
self.input_dim = rnn_shape[2]
# initial states: all-zero tensor of shape (output_dim)
self.states = [None, None]
assert self.consume_less == "gpu"
### NOTES. the 4 here is for 4 action types: sub/ins, left/right.
self.W_x = self.init((self.num_actions, self.input_dim, 4 * self.output_dim),
name='{}_W_x'.format(self.name))
self.b_x = K.variable(np.zeros(4 * self.output_dim),
name='{}_b_x'.format(self.name))
### used for parent node and traversal node recurrence computations
self.U_p = self.inner_init((self.output_dim, 3 * self.output_dim),
name='{}_U_p'.format(self.name))
self.U_v = self.inner_init((self.output_dim, 3 * self.output_dim),
name='{}_U_v'.format(self.name))
### used for the child node computation
self.U_c = self.init((self.output_dim, 3 * self.output_dim),
name='{}_U_c'.format(self.name))
self.b_c = K.variable(np.zeros(3 * self.output_dim),
name='{}_b_c'.format(self.name))
self.W_ctx = self.init( (self.output_dim, self.shape_key['word'] + self.output_dim),
name='{}_W_context'.format(self.name))
self.trainable_weights = [self.W_x, self.U_c,
self.U_p, self.U_v,
self.b_x, self.b_c,
self.W_ctx]
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
K.set_value(self.states[1],
np.zeros((input_shapes[1], input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim)),
K.zeros((input_shapes[1], input_shape[0], self.output_dim))]
def get_initial_states(self, x):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.permute_dimensions(x, [1,0,2]) # (timesteps, samples, input_dim)
reducer = K.zeros((self.input_dim, self.output_dim))
initial_state = K.dot(initial_state, reducer) # (timesteps, samples, output_dim)
initial_traversal = K.sum(initial_state, axis=0) # traversal is (samples, output_dim)
initial_states = [initial_traversal, initial_state] # this order matches assumptions in rttn scan function
return initial_states
def step(self, x, states):
(h_p, h_v, # 0:parent, 1:traversal
x_type, # 2:treetype(ins/sub,left/right); ints of size (B,). \in {0,1,2,3}
B_U, B_W) = states # 3:Udropoutmask, 4:Wdropoutmask
#### matrix x has all 4 x computations in it
## per move
this_Wx = self.W_x[x_type] ## B, I, 4*O
matrix_x = K.batch_dot(x * B_W[0], this_Wx) + self.b_x
x_zp = matrix_x[:, :self.output_dim]
x_rp = matrix_x[:, self.output_dim: 2 * self.output_dim]
x_rv = matrix_x[:, 2 * self.output_dim: 3 * self.output_dim]
x_ih = matrix_x[:, 3 * self.output_dim:]
#### matrix p has zp, rp; matrix v has zv, rv
matrix_p = K.dot(h_p * B_U[0], self.U_p[:, :2 * self.output_dim])
# zp is for the parent unit update (resulting in child unit)
inner_zp = matrix_p[:, :self.output_dim]
z_p = self.inner_activation(x_zp + inner_zp)
# rp is for gating to the intermediate unit of parent
inner_rp = matrix_p[:, self.output_dim: 2 * self.output_dim]
r_p = self.inner_activation(x_rp + inner_rp)
matrix_v = K.dot(h_v * B_U[0], self.U_v[:, :2 * self.output_dim])
# rv is for the intermediate gate on the traversal unit
# this gets reused for both the parent's and its own intermediate
inner_rv = matrix_v[:, self.output_dim: 2 * self.output_dim]
r_v = self.inner_activation(x_rv + inner_rv)
# the actual recurrence calculations
# h_p * U and h_v * U ; as gated by their r gates
inner_hp = K.dot(r_p * h_p * B_U[0], self.U_p[:, 2 * self.output_dim:])
inner_hv = K.dot(r_v * h_v * B_U[0], self.U_v[:, 2 * self.output_dim:])
# h_c_tilde is the intermediate state
h_c_tilde = self.activation(x_ih + inner_hp + inner_hv)
# h_c is the new child state
h_c = z_p * h_c_tilde + (1 - z_p) * h_p
matrix_c = K.dot(h_c * B_U[0], self.U_c) + self.b_c
hc_zv = matrix_c[:, :self.output_dim]
hc_rv = matrix_c[:, self.output_dim: 2 * self.output_dim]
hc_ih = matrix_c[:, 2 * self.output_dim:]
### zv -> gate h_v and h_v_tilde
### rv -> gate h_v's contribution to h_v_tilde
### ih -> h_c's contribution to h_v_tilde
# zv is for the traversal unit update.
inner_zv = matrix_v[:, :self.output_dim]
z_v = self.inner_activation(hc_zv + inner_zv)
## r_v is calculated with h_c rather than x
r_v = self.inner_activation(hc_rv + inner_rv)
inner_hvplus = K.dot(r_v * h_v * B_U[0], self.U_v[:, 2 * self.output_dim:])
h_vplus_tilde = self.activation(hc_ih + inner_hvplus)
h_vplus = z_v * h_v + (1 - z_v) * h_vplus_tilde
return h_c, h_vplus
def call(self, all_inputs, mask=None):
x_in, topology, x_types, horizon_w, horizon_i = all_inputs
horizon = [horizon_w, horizon_i]
if isinstance(mask, list):
mask = mask[0]
assert not self.stateful
initial_states = self.get_initial_states(x_in)
constants = self.get_constants(x_in)
states = IKE.rttn( self.step,
x_in,
initial_states,
topology,
x_types,
horizon,
self.shape_key,
self.W_ctx,
mask=mask,
constants=constants )
branch_tensor, traversal_tensor, horizon_states, p_horizons = states
return [branch_tensor, traversal_tensor, horizon_states, p_horizons]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'activation': self.activation.__name__,
'inner_activation': self.inner_activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(RTTN, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | mit | 4,406,819,450,685,066,000 | 46.037383 | 114 | 0.515912 | false |
alexhayes/django-toolkit | django_toolkit/font_awesome.py | 1 | 5598 | from django.core.urlresolvers import reverse
from copy import copy
class Icon():
"""
Represents a Bootstrap icon (<i>) tag.
"""
def __init__(self, icon, *css):
self.icon = icon
self.css = css
def render(self, extra_css=[]):
html = '<i class="%s' % self.icon
if self.css:
html += ' %s' % ' '.join([css for css in self.css])
if extra_css:
html += ' %s' % ' '.join([css for css in extra_css])
html += '"></i>'
return html
class BaseCollection():
def __init__(self, *items):
self.items = list(items)
def append(self, item):
self.items.append(item)
class Stack(BaseCollection):
"""
Represents a Font Awesome icon stack.
@see http://fortawesome.github.io/Font-Awesome/examples/
"""
def render(self):
"""
Render the icon stack.
For example:
<span class="icon-stack">
<i class="icon-check-empty icon-stack-base"></i>
<i class="icon-twitter"></i>
</span>
<span class="icon-stack">
<i class="icon-circle icon-stack-base"></i>
<i class="icon-flag icon-light"></i>
</span>
<span class="icon-stack">
<i class="icon-sign-blank icon-stack-base"></i>
<i class="icon-terminal icon-light"></i>
</span>
<span class="icon-stack">
<i class="icon-camera"></i>
<i class="icon-ban-circle icon-stack-base text-error"></i>
</span>
"""
return '<span class="icon-stack">%s</span>' % (
''.join([item.render(['icon-stack-base'] if i == 0 else []) for (i, item) in enumerate(self.items)])
)
class ButtonGroup(BaseCollection):
"""
Font-Awesome ButtonGroup
@see http://fortawesome.github.io/Font-Awesome/examples/
"""
def render(self):
"""
Render the groups.
Example:
<div class="btn-group">
<a class="btn" href="#"><i class="icon-align-left"></i></a>
<a class="btn" href="#"><i class="icon-align-center"></i></a>
<a class="btn" href="#"><i class="icon-align-right"></i></a>
<a class="btn" href="#"><i class="icon-align-justify"></i></a>
</div>
"""
return '<div class="btn-group">%s</div>' % (
''.join([item.render() for (i, item) in enumerate(self.items)])
)
class Button():
def __init__(self, inner=None, data_tip=None,
view=None, view_kwargs=[], view_args=[], next=None,
href=None,
title=None, attrs={}, target=False,
modal=False, submodal=False,
data_target=True,
css=[]):
self.inner = inner
self.href = href
self.view = view
self.view_args = view_args
self.view_kwargs = view_kwargs
self.title = title
self.attrs = attrs
self.css = [css] if isinstance(css, basestring) else css
self.next = next
self.modal = modal
self.submodal = submodal
self.data_target = data_target
self.data_tip = data_tip
self.target = target
def render(self):
"""
<a class="btn" href="#"><i class="icon-repeat"></i> Reload</a>
or..
<button type="button" class="btn"><i class="icon-repeat"></i> Reload</button>
"""
html = ''
href = self.view if self.view is not None else self.href
attrs = copy(self.attrs)
if self.submodal:
attrs['role'] = "button"
attrs['data-toggle'] = "remote-submodal"
if self.data_target:
if isinstance(self.data_target, basestring):
attrs['data-target'] = self.data_target
else:
attrs['data-target'] = "#submodal"
elif self.modal:
attrs['role'] = "button"
#attrs['data-dismiss'] = "modal"
attrs['data-toggle'] = "modal"
#attrs['data-submodal'] = "true"
#attrs['data-remoteinbody'] = "false"
if self.data_target:
if isinstance(self.data_target, basestring):
attrs['data-target'] = self.data_target
else:
attrs['data-target'] = "#modal"
if self.data_tip:
attrs['data-tip'] = self.data_tip
if self.target:
attrs['target'] = self.target
if 'css_class' not in attrs:
attrs['css_class'] = ''
attrs['css_class'] += ' btn ' + " ".join(self.css)
attrs = ' '.join(['%s="%s"' % (key if key != 'css_class' else 'class', value) for key,value in attrs.iteritems()])
if href:
if self.view:
href = reverse(self.view, args=self.view_args, kwargs=self.view_kwargs)
if self.next:
href += '?next=%s' % (self.next if self.next.startswith('/') else reverse(self.next))
html += '<a href="%s" %s>' % (href, attrs)
else:
html += '<button type="button" %s>' % (attrs,)
if hasattr(self.inner, 'render'):
html += self.inner.render()
else:
html += self.inner
if self.title:
html += self.title
if href:
html += "</a>"
else:
html += "</button>"
return html
| mit | -2,145,452,662,055,833,300 | 30.8125 | 122 | 0.486781 | false |
wursm1/eurobot-hauptsteuerung | docs/conf.py | 1 | 10793 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# eurobot documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 4 20:22:09 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../eurobot'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Eurobot 2015'
copyright = '2015, Wuersch Marcel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'eurobotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'a4paper, oneside',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Software_Hauptsteuerung.tex', 'Software Hauptsteuerung Eurobot 2015 PA1',
'Wuersch Marcel', 'howto'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'eurobot', 'eurobot Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'eurobot', 'eurobot Documentation',
'Author', 'eurobot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'eurobot'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'eurobot'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
todo_include_todos = True
autodoc_default_flags = ['members', 'private-members']
autoclass_content = 'both'
##Read the docs
#import sphinx_rtd_theme
#html_theme = "sphinx_rtd_theme"
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
## Alabaster
#import alabaster
#html_theme_path = [alabaster.get_path()]
#extensions = ['alabaster']
#html_theme = 'alabaster'
#html_sidebars = {
# '**': [
# 'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
# ]
#} | gpl-3.0 | 3,253,172,174,750,404,600 | 29.577904 | 86 | 0.705087 | false |
sergiohr/NeoDB | core/blockdb.py | 1 | 3923 | '''
Created on Apr 20, 2014
@author: sergio
'''
import psycopg2
import neo.core
from .. import dbutils
class BlockDB(neo.core.Block):
'''
classdocs
'''
def __init__(self, id_project = None, id_individual = None, name = None,
description = None, file_origin = None,
file_datetime = None, rec_datetime = None, index = None):
'''
Constructor
'''
neo.core.Block.__init__(self, name, description, file_origin,
file_datetime, rec_datetime, index)
self.id_project = id_project
self.id_individual = id_individual
self.connection = None
def save(self, connection):
# Check mandatory values
if self.id_project == None or self.id_individual == None:
raise StandardError("Block Session must have id_project and id_individual.")
if self.name == None:
raise StandardError("Block Session must have a name.")
other = dbutils.get_id(connection, 'block', name = self.name)
if other != []:
raise StandardError("There is another block session with name '%s'."%self.name)
file_datetime = None
rec_datetime = None
if self.file_datetime:
file_datetime = dbutils.get_ppgdate(self.file_datetime)
if self.rec_datetime:
rec_datetime = dbutils.get_ppgdate(self.rec_datetime)
# QUERY
cursor = connection.cursor()
query = """INSERT INTO block
(id_project, id_individual, name, description, file_datetime,
rec_datetime, file_origin, index)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.execute(query,[self.id_project, self.id_individual,
self.name, self.description, file_datetime,
rec_datetime, self.file_origin, self.index])
connection.commit()
# Get ID
[(id, _)] = dbutils.get_id(connection, 'block', name = self.name)
return id
def get_from_db(self, connection, id):
connection = connection
cursor = connection.cursor()
query = """ SELECT * FROM block WHERE id = %s"""
cursor.execute(query, [id])
results = cursor.fetchall()
if results != []:
self.name = results[0][6]
self.description = results[0][7]
self.file_origin = results[0][8]
self.file_datetime = results[0][3]
self.rec_datetime = results[0][4]
results = {}
results['name'] = self.name
results['description'] = self.description
results['file_origin'] = self.file_origin
results['file_datetime'] = self.file_datetime
results['rec_datetime'] = self.rec_datetime
results['segments'] = self.__get_segments_id(id, connection)
return results
def __get_segments_id(self, id, connection):
cursor = connection.cursor()
query = """ SELECT id FROM segment WHERE id_block = %s"""
cursor.execute(query, [id])
results = cursor.fetchall()
ids = []
for id in results:
ids.append(id[0])
return ids
if __name__ == '__main__':
username = 'postgres'
password = 'postgres'
host = '192.168.2.2'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
#b = BlockDB(id_project = 5, id_individual = 1, name = 'bloque prueba', rec_datetime="19-05-2014")
b = BlockDB()
b.get_from_db(dbconn,2)
print b.save(dbconn) | gpl-3.0 | 8,031,664,291,213,629,000 | 32.538462 | 105 | 0.53454 | false |
pipetree/pipetree | tests/functional/test_providers.py | 1 | 8044 | # MIT License
# Copyright (c) 2016 Morgan McDermott & John Carlyle
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import os.path
import unittest
from tests import isolated_filesystem
from pipetree.config import PipelineStageConfig
from pipetree.providers import LocalDirectoryArtifactProvider,\
LocalFileArtifactProvider,\
ParameterArtifactProvider
from pipetree.exceptions import ArtifactSourceDoesNotExistError,\
InvalidConfigurationFileError,\
ArtifactProviderMissingParameterError,\
ArtifactProviderFailedError
class TestParameterArtifactProvider(unittest.TestCase):
def setUp(self):
self.stage_config = PipelineStageConfig("test_stage_name", {
"type": "ParameterPipelineStage"
})
self.test_parameters = {"int_param": 200, "str_param": "str"}
pass
def tearDown(self):
pass
def test_missing_config(self):
try:
provider = ParameterArtifactProvider(
stage_config=None,
parameters={})
self.assertEqual(provider, "Provider creation should have failed")
except ArtifactProviderMissingParameterError:
pass
def test_missing_parameters(self):
try:
provider = ParameterArtifactProvider(
stage_config=self.stage_config,
parameters={})
self.assertEqual(provider, "Provider creation should have failed")
except ArtifactProviderMissingParameterError:
pass
def test_yield_artifacts(self):
provider = ParameterArtifactProvider(
stage_config=self.stage_config,
parameters=self.test_parameters)
arts = provider.yield_artifacts()
la = list(arts)
self.assertEqual(1, len(la))
yielded_params = la[0].payload
for k in self.test_parameters:
if k not in yielded_params:
raise ArtifactProviderFailedError(
provider = self.__class__.__name__,
error="Missing parameter "+k
)
pass
class TestLocalFileArtifactProvider(unittest.TestCase):
def setUp(self):
self.dirname = 'foo'
self.filename = ['foo.bar', 'foo.baz']
self.filedatas = ['foo bar baz', 'helloworld']
self.fs = isolated_filesystem()
self.fs.__enter__()
self.stage_config = PipelineStageConfig("test_stage_name", {
"type": "LocalFilePipelineStage"
})
# Build directory structure
os.makedirs(self.dirname)
for name, data in zip(self.filename, self.filedatas):
with open(os.path.join(os.getcwd(),
self.dirname,
name), 'w') as f:
f.write(data)
def tearDown(self):
self.fs.__exit__(None, None, None)
def test_missing_config(self):
try:
LocalFileArtifactProvider(path='folder/shim.sham',
stage_config=None)
self.assertEqual(0, "Provider creation should have failed")
except ArtifactProviderMissingParameterError:
pass
def test_load_nonexistant_file(self):
try:
LocalFileArtifactProvider(path='folder/shim.sham',
stage_config=self.stage_config)
self.assertTrue(False, 'This was supposed to raise an exception')
except ArtifactSourceDoesNotExistError:
pass
def test_yield_artifacts(self):
provider = LocalFileArtifactProvider(
path=os.path.join(self.dirname, self.filename[0]),
stage_config=self.stage_config,
read_content=True)
arts = provider.yield_artifacts()
la = list(arts)
self.assertEqual(len(la), 1)
def test_load_file_data(self):
provider = LocalFileArtifactProvider(
path=os.path.join(self.dirname, self.filename[0]),
stage_config=self.stage_config,
read_content=True)
art = provider._yield_artifact()
self.assertEqual(art.item.payload,
self.filedatas[0])
class TestLocalDirectoryArtifactProvider(unittest.TestCase):
def setUp(self):
self.dirname = 'foo'
self.filename = ['foo.bar', 'foo.baz']
self.filedatas = ['foo bar baz', 'helloworld']
self.fs = isolated_filesystem()
self.fs.__enter__()
self.stage_config = PipelineStageConfig("test_stage_name", {
"type": "LocalDirectoryPipelineStage"
})
# Build directory structure
os.makedirs(self.dirname)
for name, data in zip(self.filename, self.filedatas):
with open(os.path.join(os.getcwd(),
self.dirname,
name), 'w') as f:
f.write(data)
def tearDown(self):
self.fs.__exit__(None, None, None)
def test_missing_config(self):
try:
LocalDirectoryArtifactProvider(path='folder/',
stage_config=None)
self.assertEqual(0, "Provider creation should have failed")
except ArtifactProviderMissingParameterError:
pass
def test_load_nonexistant_dir(self):
try:
LocalDirectoryArtifactProvider(path='folder/',
stage_config=self.stage_config)
self.assertTrue(False, 'This was supposed to raise an exception')
except ArtifactSourceDoesNotExistError:
pass
def test_load_file_data(self):
provider = LocalDirectoryArtifactProvider(path=self.dirname,
stage_config=self.stage_config,
read_content=True)
art = provider._yield_artifact(self.filename[0])
self.assertEqual(art.item.payload.decode('utf-8'),
self.filedatas[0])
def test_load_file_names(self):
provider = LocalDirectoryArtifactProvider(path=self.dirname,
stage_config=self.stage_config)
for loaded_name, name in zip(provider.yield_artifacts(),
self.filename):
self.assertEqual(loaded_name, os.path.join(os.getcwd(),
self.dirname,
name))
def test_load_multiple_file_contents(self):
provider = LocalDirectoryArtifactProvider(path=self.dirname,
stage_config=self.stage_config,
read_content=True)
for art, data in zip(provider.yield_artifacts(),
self.filedatas):
art_data = art.item.payload
self.assertEqual(art_data.decode('utf-8'), data)
| mit | -9,089,591,533,696,197,000 | 39.0199 | 81 | 0.595599 | false |
carlsonsantana/HaTeMiLe-for-Python | hatemile/util/css/stylesheetdeclaration.py | 1 | 1294 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module of StyleSheetDeclaration interface.
"""
class StyleSheetDeclaration:
"""
The StyleSheetDeclaration interface contains the methods for access the CSS
declaration.
"""
def get_value(self):
"""
Returns the value of declaration.
:return: The value of declaration.
:rtype: str
"""
pass
def get_values(self):
"""
Returns a list with the values of declaration.
:return: The list with the values of declaration.
:rtype: list(str)
"""
pass
def get_property(self):
"""
Returns the property of declaration.
:return: The property of declaration.
:rtype: str
"""
pass
| apache-2.0 | -7,123,106,261,911,913,000 | 23.884615 | 79 | 0.648377 | false |
lavanoid/pi-rc | control_dune_warrior.py | 1 | 8703 | #!/usr/bin/env python
"""Manually send commands to the RC car."""
import argparse
import json
import pygame
import pygame.font
import socket
import sys
from common import server_up
UP = LEFT = DOWN = RIGHT = False
QUIT = False
# pylint: disable=superfluous-parens
def dead_frequency(frequency):
"""Returns an approprtiate dead signal frequency for the given signal."""
if frequency < 38:
return 49.890
return 26.995
def format_command(
frequency,
useconds
):
"""Returns the JSON command string for this command tuple."""
dead = dead_frequency(frequency)
return {
'frequency': frequency,
'dead_frequency': dead,
'burst_us': useconds,
'spacing_us': useconds,
'repeats': 1,
}
def input_function(type_cast):
"""Returns the input function for the running version of Python for reading
data from stdin.
"""
# pylint: disable=bad-builtin
if sys.version_info.major == 2:
return lambda message: type_cast(raw_input(message))
else:
return lambda message: type_cast(input(message))
def get_command_array(parser):
"""Returns an array of command information that can be used in the
format_command function.
"""
args = parser.parse_args()
read_float = input_function(float)
read_int = input_function(int)
option_to_prompt_and_function = {
'frequency': ('Command frequency? ', read_float),
}
for option, prompt_and_function in option_to_prompt_and_function.items():
if getattr(args, option) is None:
prompt, function = prompt_and_function
setattr(args, option, function(prompt))
return [
float(args.frequency),
]
def make_parser():
"""Builds and returns an argument parser."""
parser = argparse.ArgumentParser(
description='Sends burst commands to Raspberry Pi RC.'
)
parser.add_argument(
'-p',
'--port',
dest='port',
help='The port to send control commands to.',
default=12345,
type=int
)
parser.add_argument(
'-s',
'--server',
dest='server',
help='The server to send control commands to.',
default='127.1'
)
parser.add_argument(
'-f',
'--frequency',
dest='frequency',
help='The frequency to broadcast commands on.'
)
return parser
def to_bit(number):
if number > 0:
return 1
return 0
def ones_count(number):
mask = 1
ones = 0
while mask <= number:
ones += to_bit(mask & number)
mask <<= 1
return ones
def format_dune_warrior_command(throttle, turn, frequency):
"""Formats a command to JSON to the Raspberry Pi."""
command = [format_command(frequency, 500)]
if throttle >= 32 or throttle < 0:
raise ValueError('Invalid throttle')
# Turning too sharply causes the servo to push harder than it can go, so limit this
if turn >= 58 or turn < 8:
raise ValueError('Invalid turn')
even_parity_bit = to_bit(
(
ones_count(throttle)
+ ones_count(turn)
+ 3
) % 2
)
bit_pattern = (
to_bit(turn & 0x8),
to_bit(turn & 0x4),
to_bit(turn & 0x2),
to_bit(turn & 0x1),
0,
0,
to_bit(turn & 0x20),
to_bit(turn & 0x10),
to_bit(throttle & 0x10),
to_bit(throttle & 0x8),
to_bit(throttle & 0x4),
to_bit(throttle & 0x2),
to_bit(throttle & 0x1),
1,
1,
1,
0,
0,
even_parity_bit,
0,
0,
0
)
assert(len(bit_pattern) == 22)
assert(sum(bit_pattern) % 2 == 0)
total_useconds = 1000
for bit in bit_pattern[:-1]:
if bit == 0:
useconds = 127
else:
useconds = 200
command.append(format_command(27.145, useconds))
total_useconds += useconds
if bit_pattern[-1] == 0:
useconds = 127
else:
useconds = 200
total_useconds += useconds
command.append({
'frequency': frequency,
'dead_frequency': dead_frequency(frequency),
'burst_us': useconds,
'spacing_us': 7000 - total_useconds,
'repeats': 1,
})
command_str = json.dumps(command)
if sys.version_info.major == 3:
command_str = bytes(command_str, 'utf-8')
return command_str
def get_keys():
"""Returns a tuple of (UP, DOWN, LEFT, RIGHT, changed) representing which
keys are UP or DOWN and whether or not the key states changed.
"""
change = False
key_to_global_name = {
pygame.K_LEFT: 'LEFT',
pygame.K_RIGHT: 'RIGHT',
pygame.K_UP: 'UP',
pygame.K_DOWN: 'DOWN',
pygame.K_ESCAPE: 'QUIT',
pygame.K_q: 'QUIT',
}
for event in pygame.event.get():
if event.type == pygame.QUIT:
global QUIT
QUIT = True
elif event.type in {pygame.KEYDOWN, pygame.KEYUP}:
down = (event.type == pygame.KEYDOWN)
change = (event.key in key_to_global_name)
if event.key in key_to_global_name:
globals()[key_to_global_name[event.key]] = down
return (UP, DOWN, LEFT, RIGHT, change)
def interactive_control(host, port, frequency):
"""Runs the interactive control."""
pygame.init()
size = (300, 400)
screen = pygame.display.set_mode(size)
# pylint: disable=too-many-function-args
background = pygame.Surface(screen.get_size())
clock = pygame.time.Clock()
black = (0, 0, 0)
white = (255, 255, 255)
big_font = pygame.font.Font(None, 40)
little_font = pygame.font.Font(None, 24)
pygame.display.set_caption('Dune Warrior')
text = big_font.render('Use arrows to move', 1, white)
text_position = text.get_rect(centerx=size[0] / 2)
background.blit(text, text_position)
screen.blit(background, (0, 0))
pygame.display.flip()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while not QUIT:
up, down, left, right, change = get_keys()
if change:
# Something changed, so send a new command
throttle = 16
turn = 32
if up:
throttle = 24
elif down:
throttle = 8
if left:
turn = 12
elif right:
turn = 52
command_json = format_dune_warrior_command(throttle, turn, frequency)
sock.sendto(command_json, (host, port))
# Show the command and JSON
background.fill(black)
text = big_font.render(command_json[:100], 1, white)
text_position = text.get_rect(centerx=size[0] / 2)
background.blit(text, text_position)
pretty = json.dumps(json.loads(command_json), indent=4)
pretty_y_position = big_font.size(command_json)[1] + 10
for line in pretty.split('\n'):
text = little_font.render(line, 1, white)
text_position = text.get_rect(x=0, y=pretty_y_position)
pretty_y_position += little_font.size(line)[1]
background.blit(text, text_position)
screen.blit(background, (0, 0))
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(60)
pygame.quit()
def make_parser():
"""Builds and returns an argument parser."""
parser = argparse.ArgumentParser(
description='Interactive controller for the Raspberry Pi RC.'
)
parser.add_argument(
'-p',
'--port',
dest='port',
help='The port to send control commands to.',
type=int,
default=12345,
)
parser.add_argument(
'-s',
'--server',
dest='server',
help='The server to send control commands to.',
type=str,
default='127.1',
)
parser.add_argument(
'-f',
'--frequency',
dest='frequency',
help='The frequency to broadcast signals on.',
type=float,
default=27.145,
)
return parser
def main():
"""Parses command line arguments and runs the interactive controller."""
parser = make_parser()
args = parser.parse_args()
print('Sending commands to ' + args.server + ':' + str(args.port))
if not server_up(args.server, args.port, args.frequency):
sys.stderr.write('Unable to contact server; did you start it?\n')
sys.exit(1)
interactive_control(args.server, args.port, args.frequency)
if __name__ == '__main__':
main()
| gpl-2.0 | 9,042,649,700,021,385,000 | 25.372727 | 87 | 0.568884 | false |
damaggu/SAMRI | samri/pipelines/utils.py | 1 | 6258 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals, absolute_import
def parse_paravision_date(pv_date):
"""Convert ParaVision-style datetime string to Python datetime object.
Parameters
----------
pv_date : str
ParaVision datetime string.
Returns
-------
`datetime.datetime` : A Python datetime object.
Notes
-----
The datetime object produced does not contain a timezone, and should therefor only be used to determine time deltas relative to other datetimes from the same session.
"""
from datetime import datetime
pv_date, _ = pv_date.split('+')
pv_date += "000"
pv_date = datetime.strptime(pv_date, "%Y-%m-%dT%H:%M:%S,%f")
return pv_date
def fslmaths_invert_values(img_path):
"""Calculates the op_string required to make an fsl.ImageMaths() node invert an image"""
op_string = "-sub {0} -sub {0}".format(img_path)
return op_string
def iterfield_selector(iterfields, selector, action):
"""Include or exclude entries from iterfields based on a selector dictionary
Parameters
----------
iterfields : list
A list of lists (or tuples) containing entries fromatted at (subject_id,session_id,trial_id)
selector : dict
A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values.
action : "exclude" or "include"
Whether to exclude or include (and exclude all the other) matching entries from the output.
"""
name_map = {"subjects": 0, "sessions": 1, "trials":2}
keep = []
for ix, iterfield in enumerate(iterfields):
for key in selector:
selector[key] = [str(i) for i in selector[key]]
if iterfield[name_map[key]] in selector[key]:
keep.append(ix)
break
if action == "exclude":
iterfields = [iterfields[i] for i in range(len(iterfields)) if i not in keep]
elif action == "include":
iterfields = [iterfields[i] for i in keep]
return iterfields
def datasource_exclude(in_files, excludes, output="files"):
"""Exclude file names from a list that match a BIDS-style specifications from a dictionary.
Parameters
----------
in_files : list
A list of flie names.
excludes : dictionary
A dictionary with keys which are "subjects", "sessions", or "scans", and values which are lists giving the subject, session, or scan identifier respectively.
output : string
Either "files" or "len". The former outputs the filtered file names, the latter the length of the resulting list.
"""
if not excludes:
out_files = in_files
else:
exclude_criteria=[]
for key in excludes:
if key in "subjects":
for i in excludes[key]:
exclude_criteria.append("sub-"+str(i))
if key in "sessions":
for i in excludes[key]:
exclude_criteria.append("ses-"+str(i))
if key in "scans":
for i in excludes[key]:
exclude_criteria.append("trial-"+str(i))
out_files = [in_file for in_file in in_files if not any(criterion in in_file for criterion in exclude_criteria)]
if output == "files":
return out_files
elif output == "len":
return len(out_files)
def bids_dict_to_dir(bids_dictionary):
"""Concatenate a (subject, session) or (subject, session, scan) tuple to a BIDS-style path"""
subject = "sub-" + bids_dictionary['subject']
session = "ses-" + bids_dictionary['session']
return "/".join([subject,session])
def ss_to_path(subject_session):
"""Concatenate a (subject, session) or (subject, session, scan) tuple to a BIDS-style path"""
subject = "sub-" + subject_session[0]
session = "ses-" + subject_session[1]
return "/".join([subject,session])
def bids_dict_to_source(bids_dictionary, source_format):
from os import path
source = source_format.format(**bids_dictionary)
return source
def out_path(selection_df, in_path,
in_field='path',
out_field='out_path',
):
"""Select the `out_path` field corresponding to a given `in_path` from a BIDS-style selection dataframe which includes an `out_path` column.
"""
out_path = selection_df[selection_df[in_field]==in_path][out_field].item()
return out_path
def container(selection_df, out_path,
kind='',
out_field='out_path',
):
subject = selection_df[selection_df[out_field]==out_path]['subject'].item()
session = selection_df[selection_df[out_field]==out_path]['session'].item()
container = 'sub-{}/ses-{}'.format(subject,session)
if kind:
container += '/'
container += kind
return container
def bids_naming(subject_session, scan_type, metadata,
extra=['acq'],
extension='.nii.gz',
suffix='',
):
"""
Generate a BIDS filename from a subject-and-session iterator, a scan type, and a `pandas.DataFrame` metadata container.
"""
subject, session = subject_session
filename = 'sub-{}'.format(subject)
filename += '_ses-{}'.format(session)
selection = metadata[(metadata['subject']==subject)&(metadata['session']==session)&(metadata['scan_type']==scan_type)]
if selection.empty:
return
if 'acq' in extra:
acq = selection['acquisition']
if not acq.isnull().all():
acq = acq.item()
filename += '_acq-{}'.format(acq)
trial = selection['trial']
if not trial.isnull().all():
trial = trial.item()
filename += '_trial-{}'.format(trial)
if not suffix:
try:
modality = selection['modality']
except KeyError:
pass
else:
if not modality.isnull().all():
modality = modality.item()
filename += '_{}'.format(modality)
else:
filename += '_{}'.format(suffix)
filename += extension
return filename
def sss_filename(subject_session, scan, scan_prefix="trial", suffix="", extension=".nii.gz"):
"""Concatenate subject-condition and scan inputs to a BIDS-style filename
Parameters
----------
subject_session : list
Length-2 list of subject and session identifiers
scan : string
Scan identifier
suffix : string, optional
Measurement type suffix (commonly "bold" or "cbv")
"""
# we do not want to modify the subject_session iterator entry
from copy import deepcopy
subject_session = deepcopy(subject_session)
subject_session[0] = "sub-" + subject_session[0]
subject_session[1] = "ses-" + subject_session[1]
if suffix:
suffix = "_"+suffix
if scan_prefix:
scan = "".join([scan_prefix,"-",scan,suffix,extension])
else:
scan = "".join([scan,suffix,extension])
subject_session.append(scan)
return "_".join(subject_session)
| gpl-3.0 | -7,383,947,743,070,093,000 | 28.380282 | 167 | 0.693193 | false |
andresailer/DIRAC | tests/Integration/Resources/Catalog/FIXME_Test_CatalogPlugin.py | 1 | 18130 | #! /usr/bin/env python
# FIXME: it has to be seen if this is any useful
# FIXME: to bring back to life
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.Adler import stringAdler
from types import *
import unittest,time,os,shutil,sys
if len(sys.argv) < 2:
print 'Usage: TestCatalogPlugIn.py CatalogClient'
sys.exit()
else:
catalogClientToTest = sys.argv[1]
class CatalogPlugInTestCase(unittest.TestCase):
""" Base class for the CatalogPlugin test case """
def setUp(self):
self.fullMetadata = ['Status', 'ChecksumType', 'OwnerRole', 'CreationDate', 'Checksum', 'ModificationDate', 'OwnerDN', 'Mode', 'GUID', 'Size']
self.dirMetadata = self.fullMetadata + ['NumberOfSubPaths']
self.fileMetadata = self.fullMetadata + ['NumberOfLinks']
self.catalog = FileCatalog(catalogs=[catalogClientToTest])
valid = self.catalog.isOK()
self.assertTrue(valid)
self.destDir = '/lhcb/test/unit-test/TestCatalogPlugin'
self.link = "%s/link" % self.destDir
# Clean the existing directory
self.cleanDirectory()
res = self.catalog.createDirectory(self.destDir)
returnValue = self.parseResult(res,self.destDir)
# Register some files to work with
self.numberOfFiles = 2
self.files = []
for i in xrange(self.numberOfFiles):
lfn = "%s/testFile_%d" % (self.destDir,i)
res = self.registerFile(lfn)
self.assertTrue(res)
self.files.append(lfn)
def registerFile(self,lfn):
pfn = 'protocol://host:port/storage/path%s' % lfn
size = 10000000
se = 'DIRAC-storage'
guid = makeGuid()
adler = stringAdler(guid)
fileDict = {}
fileDict[lfn] = {'PFN':pfn,'Size':size,'SE':se,'GUID':guid,'Checksum':adler}
res = self.catalog.addFile(fileDict)
return self.parseResult(res,lfn)
def parseResult(self,res,path):
self.assertTrue(res['OK'])
self.assertTrue(res['Value'])
self.assertTrue(res['Value']['Successful'])
self.assertTrue(res['Value']['Successful'].has_key(path))
return res['Value']['Successful'][path]
def parseError(self,res,path):
self.assertTrue(res['OK'])
self.assertTrue(res['Value'])
self.assertTrue(res['Value']['Failed'])
self.assertTrue(res['Value']['Failed'].has_key(path))
return res['Value']['Failed'][path]
def cleanDirectory(self):
res = self.catalog.exists(self.destDir)
returnValue = self.parseResult(res,self.destDir)
if not returnValue:
return
res = self.catalog.listDirectory(self.destDir)
returnValue = self.parseResult(res,self.destDir)
toRemove = returnValue['Files'].keys()
if toRemove:
self.purgeFiles(toRemove)
res = self.catalog.removeDirectory(self.destDir)
returnValue = self.parseResult(res,self.destDir)
self.assertTrue(returnValue)
def purgeFiles(self,lfns):
for lfn in lfns:
res = self.catalog.getReplicas(lfn,True)
replicas = self.parseResult(res,lfn)
for se,pfn in replicas.items():
repDict = {}
repDict[lfn] = {'PFN':pfn,'SE':se}
res = self.catalog.removeReplica(repDict)
self.parseResult(res,lfn)
res = self.catalog.removeFile(lfn)
self.parseResult(res,lfn)
def tearDown(self):
self.cleanDirectory()
class FileTestCase(CatalogPlugInTestCase):
def test_isFile(self):
# Test isFile with a file
res = self.catalog.isFile(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertTrue(returnValue)
# Test isFile for missing path
res = self.catalog.isFile(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
# Test isFile with a directory
res = self.catalog.isFile(self.destDir)
returnValue = self.parseResult(res,self.destDir)
self.assertFalse(returnValue)
def test_getFileMetadata(self):
# Test getFileMetadata with a file
res = self.catalog.getFileMetadata(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue['Status'],'-')
self.assertEqual(returnValue['Size'],10000000)
self.metadata = ['Status', 'ChecksumType', 'NumberOfLinks', 'CreationDate', 'Checksum', 'ModificationDate', 'Mode', 'GUID', 'Size']
for key in self.metadata:
self.assertTrue(returnValue.has_key(key))
# Test getFileMetadata for missing path
res = self.catalog.getFileMetadata(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
# Test getFileMetadata with a directory
res = self.catalog.getFileMetadata(self.destDir)
returnValue = self.parseResult(res,self.destDir)
self.assertEqual(returnValue['Status'],'-')
self.assertEqual(returnValue['Size'],0)
self.metadata = ['Status', 'ChecksumType', 'NumberOfLinks', 'CreationDate', 'Checksum', 'ModificationDate', 'Mode', 'GUID', 'Size']
for key in self.metadata:
self.assertTrue(returnValue.has_key(key))
def test_getFileSize(self):
# Test getFileSize with a file
res = self.catalog.getFileSize(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue,10000000)
# Test getFileSize for missing path
res = self.catalog.getFileSize(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
# Test getFileSize with a directory
res = self.catalog.getFileSize(self.destDir)
returnValue = self.parseResult(res,self.destDir)
self.assertEqual(returnValue,0)
def test_getReplicas(self):
# Test getReplicas with a file
res = self.catalog.getReplicas(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue.keys(),['DIRAC-storage'])
self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]])
# Test getReplicas for missing path
res = self.catalog.getReplicas(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
# Test getReplicas with a directory
res = self.catalog.getReplicas(self.destDir)
error = self.parseError(res,self.destDir)
# TODO return an error (currently 'File has zero replicas')
#self.assertEqual(error,"Supplied path not a file")
def test_getReplicaStatus(self):
# Test getReplicaStatus with a file with existing replica
replicaDict = {}
replicaDict[self.files[0]] = 'DIRAC-storage'
res = self.catalog.getReplicaStatus(replicaDict)
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue,'U')
# Test getReplicaStatus with a file with non-existing replica
replicaDict = {}
replicaDict[self.files[0]] = 'Missing'
res = self.catalog.getReplicaStatus(replicaDict)
error = self.parseError(res,self.files[0])
self.assertEqual(error,"No replica at supplied site")
# Test getReplicaStatus for missing path
res = self.catalog.getReplicaStatus(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
# Test getReplicaStatus with a directory
res = self.catalog.getReplicas(self.destDir)
error = self.parseError(res,self.destDir)
# TODO return an error (currently 'File has zero replicas')
#self.assertEqual(error,"Supplied path not a file")
def test_exists(self):
# Test exists with a file
res = self.catalog.exists(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertTrue(returnValue)
# Test exists for missing path
res = self.catalog.exists(self.files[0][:-1])
returnValue = self.parseResult(res,self.files[0][:-1])
self.assertFalse(returnValue)
# Test exists with a directory
res = self.catalog.exists(self.destDir)
returnValue = self.parseResult(res,self.destDir)
self.assertTrue(returnValue)
def test_addReplica(self):
# Test getReplicas with a file
res = self.catalog.getReplicas(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue.keys(),['DIRAC-storage'])
self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]])
# Test the addReplica with a file
registrationDict = {}
registrationDict[self.files[0]] = {'SE':'DIRAC-storage2','PFN':'protocol2://host:port/storage/path%s' % self.files[0]}
res = self.catalog.addReplica(registrationDict)
returnValue = self.parseResult(res,self.files[0])
self.assertTrue(returnValue)
# Check the addReplica worked correctly
res = self.catalog.getReplicas(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(sorted(returnValue.keys()),sorted(['DIRAC-storage','DIRAC-storage2']))
self.assertEqual(sorted(returnValue.values()),sorted(['protocol://host:port/storage/path%s' % self.files[0], 'protocol2://host:port/storage/path%s' % self.files[0]]))
# Test the addReplica with a non-existant file
registrationDict = {}
registrationDict[self.files[0][:-1]] = {'SE':'DIRAC-storage3','PFN':'protocol3://host:port/storage/path%s' % self.files[0]}
res = self.catalog.addReplica(registrationDict)
error = self.parseError(res,self.files[0][:-1])
# TODO When the master fails it should return an error in FileCatalog
#self.assertEqual(error,"No such file or directory")
def test_setReplicaStatus(self):
# Test setReplicaStatus with a file
lfnDict = {}
lfnDict[self.files[0]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0],'SE':'DIRAC-storage' ,'Status':'P'}
res = self.catalog.setReplicaStatus(lfnDict)
returnValue = self.parseResult(res,self.files[0])
self.assertTrue(returnValue)
# Check the setReplicaStatus worked correctly
res = self.catalog.getReplicas(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertFalse(returnValue)
#time.sleep(2)
# Test setReplicaStatus with a file
lfnDict = {}
lfnDict[self.files[0]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0],'SE':'DIRAC-storage' ,'Status':'U'}
res = self.catalog.setReplicaStatus(lfnDict)
returnValue = self.parseResult(res,self.files[0])
self.assertTrue(returnValue)
# Check the setReplicaStatus worked correctly
res = self.catalog.getReplicas(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue.keys(),['DIRAC-storage'])
self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]])
# Test setReplicaStatus with non-existant file
lfnDict = {}
lfnDict[self.files[0][:-1]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0][:-1],'SE':'DIRAC-storage' ,'Status':'U'}
res = self.catalog.setReplicaStatus(lfnDict)
error = self.parseError(res,self.files[0][:-1])
# TODO When the master fails it should return an error in FileCatalog
#self.assertEqual(error,"No such file or directory")
def test_setReplicaHost(self):
# Test setReplicaHost with a file
lfnDict = {}
lfnDict[self.files[0]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0],'SE':'DIRAC-storage' ,'NewSE':'DIRAC-storage2'}
res = self.catalog.setReplicaHost(lfnDict)
returnValue = self.parseResult(res,self.files[0])
self.assertTrue(returnValue)
# Check the setReplicaHost worked correctly
res = self.catalog.getReplicas(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue.keys(),['DIRAC-storage2'])
self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]])
# Test setReplicaHost with non-existant file
lfnDict = {}
lfnDict[self.files[0][:-1]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0][:-1],'SE':'DIRAC-storage' ,'NewSE':'DIRAC-storage2'}
res = self.catalog.setReplicaHost(lfnDict)
error = self.parseError(res,self.files[0][:-1])
# TODO When the master fails it should return an error in FileCatalog
#self.assertEqual(error,"No such file or directory")
class DirectoryTestCase(CatalogPlugInTestCase):
def test_isDirectory(self):
# Test isDirectory with a directory
res = self.catalog.isDirectory(self.destDir)
returnValue = self.parseResult(res,self.destDir)
self.assertTrue(returnValue)
# Test isDirectory with a file
res = self.catalog.isDirectory(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertFalse(returnValue)
# Test isDirectory for missing path
res = self.catalog.isDirectory(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
def test_getDirectoryMetadata(self):
# Test getDirectoryMetadata with a directory
res = self.catalog.getDirectoryMetadata(self.destDir)
returnValue = self.parseResult(res,self.destDir)
self.assertEqual(returnValue['Status'],'-')
self.assertEqual(returnValue['Size'],0)
self.assertEqual(returnValue['NumberOfSubPaths'],self.numberOfFiles)
for key in self.dirMetadata:
self.assertTrue(returnValue.has_key(key))
# Test getDirectoryMetadata with a file
res = self.catalog.getDirectoryMetadata(self.files[0])
returnValue = self.parseResult(res,self.files[0])
self.assertEqual(returnValue['Status'],'-')
self.assertEqual(returnValue['Size'],10000000)
for key in self.dirMetadata:
self.assertTrue(returnValue.has_key(key))
# Test getDirectoryMetadata for missing path
res = self.catalog.getDirectoryMetadata(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
def test_listDirectory(self):
# Test listDirectory for directory
res = self.catalog.listDirectory(self.destDir,True)
returnValue = self.parseResult(res,self.destDir)
self.assertEqual(returnValue.keys(),['Files','SubDirs','Links'])
self.assertFalse(returnValue['SubDirs'])
self.assertFalse(returnValue['Links'])
self.assertEqual(sorted(returnValue['Files'].keys()),sorted(self.files))
directoryFiles = returnValue['Files']
for lfn,fileDict in directoryFiles.items():
self.assertTrue(fileDict.has_key('Replicas'))
self.assertEqual(len(fileDict['Replicas']),1)
self.assertTrue(fileDict.has_key('MetaData'))
for key in self.fileMetadata:
self.assertTrue(fileDict['MetaData'].has_key(key))
# Test listDirectory for a file
res = self.catalog.listDirectory(self.files[0],True)
error = self.parseError(res,self.files[0])
self.assertEqual(error,"Not a directory")
# Test listDirectory for missing path
res = self.catalog.listDirectory(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
def test_getDirectoryReplicas(self):
# Test getDirectoryReplicas for directory
res = self.catalog.getDirectoryReplicas(self.destDir,True)
returnValue = self.parseResult(res,self.destDir)
self.assertTrue(returnValue.has_key(self.files[0]))
fileReplicas = returnValue[self.files[0]]
self.assertEqual(fileReplicas.keys(),['DIRAC-storage'])
self.assertEqual(fileReplicas.values(),['protocol://host:port/storage/path%s' % self.files[0]])
# Test getDirectoryReplicas for a file
res = self.catalog.getDirectoryReplicas(self.files[0],True)
error = self.parseError(res,self.files[0])
self.assertEqual(error,"Not a directory")
# Test getDirectoryReplicas for missing path
res = self.catalog.getDirectoryReplicas(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
def test_getDirectorySize(self):
# Test getDirectorySize for directory
res = self.catalog.getDirectorySize(self.destDir)
returnValue = self.parseResult(res,self.destDir)
for key in ['Files','TotalSize','SubDirs','ClosedDirs','SiteUsage']:
self.assertTrue(returnValue.has_key(key))
self.assertEqual(returnValue['Files'],self.numberOfFiles)
self.assertEqual(returnValue['TotalSize'],(self.numberOfFiles*10000000))
#TODO create a sub dir, check, close it, check
self.assertFalse(returnValue['SubDirs'])
self.assertFalse(returnValue['ClosedDirs'])
usage = returnValue['SiteUsage']
self.assertEqual(usage.keys(),['DIRAC-storage'])
self.assertEqual(usage['DIRAC-storage']['Files'],self.numberOfFiles)
self.assertEqual(usage['DIRAC-storage']['Size'],(self.numberOfFiles*10000000))
# Test getDirectorySize for a file
res = self.catalog.getDirectorySize(self.files[0])
error = self.parseError(res,self.files[0])
self.assertEqual(error,"Not a directory")
# Test getDirectorySize for missing path
res = self.catalog.getDirectorySize(self.files[0][:-1])
error = self.parseError(res,self.files[0][:-1])
self.assertEqual(error,"No such file or directory")
class LinkTestCase(CatalogPlugInTestCase):
#'createLink','removeLink','isLink','readLink'
pass
class DatasetTestCase(CatalogPlugInTestCase):
#'removeDataset','removeFileFromDataset','createDataset'
pass
if __name__ == '__main__':
#TODO getDirectoryMetadata and getFileMetadata should be merged
#TODO Fix the return structure of write operations from FileCatalog
suite = unittest.defaultTestLoader.loadTestsFromTestCase(FileTestCase)
#suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileTestCase))
#suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryTestCase))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| gpl-3.0 | 3,103,157,516,962,040,000 | 44.325 | 170 | 0.703475 | false |
nmalaguti/mini-halite | tournament/migrations/0001_initial.py | 1 | 2061 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-09 05:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('mu', models.FloatField(default=25.0)),
('sigma', models.FloatField(default=8.33333)),
('enabled', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('replay', models.FileField(upload_to='hlt/')),
('seed', models.CharField(max_length=255)),
('width', models.IntegerField()),
('height', models.IntegerField()),
],
options={
'verbose_name_plural': 'matches',
},
),
migrations.CreateModel(
name='MatchResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rank', models.IntegerField()),
('mu', models.FloatField()),
('sigma', models.FloatField()),
('last_frame_alive', models.IntegerField()),
('bot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matches', to='tournament.Bot')),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='results', to='tournament.Match')),
],
),
]
| mit | 5,398,077,657,251,073,000 | 37.886792 | 137 | 0.540029 | false |
icydoge/Constantine | setup.py | 1 | 1375 | """ A setuptools based setup module.
Adapted from:
https://packaging.python.org/en/latest/distributing.html
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
import glob
here = path.abspath(path.dirname(__file__))
try:
long_description = open('README.md').read()
except:
long_description = ""
setup(
name='Constantine',
version='1.2.3',
description='Automatic event poster generator',
long_description=long_description,
url='https://github.com/chongyangshi/Constantine',
author='C Shi',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Topic :: Text Processing :: General',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='pdf generator',
packages = ['Constantine'],
install_requires=['requests'],
extras_require={},
entry_points={
'console_scripts': [
'Constantine=Constantine.__main__:execute',
'Constantine-auto=Constantine.auto_poster:run'
],
},
include_package_data = True
)
| mit | 4,234,800,370,495,308,000 | 27.645833 | 60 | 0.624727 | false |
machinalis/featureforge | tests/test_stats_manager.py | 1 | 2169 | from datetime import timedelta
import mock
from unittest import TestCase
import warnings
from featureforge.experimentation.stats_manager import StatsManager
DEPRECATION_MSG = (
'Init arguments will change. '
'Take a look to http://feature-forge.readthedocs.io/en/latest/experimentation.html'
'#exploring-the-finished-experiments'
)
DB_CONNECTION_PATH = 'featureforge.experimentation.stats_manager.StatsManager.setup_database_connection' # NOQA
class TestStatsManager(TestCase):
def setUp(self):
self.db_name = 'a_db_name'
self.booking_duration = 10
def test_init_with_db_name_as_first_parameter_and_booking_duration_as_second(self):
with mock.patch(DB_CONNECTION_PATH):
st = StatsManager(db_name=self.db_name, booking_duration=self.booking_duration)
self.assertEqual(st._db_config['name'], self.db_name)
self.assertEqual(st.booking_delta, timedelta(seconds=self.booking_duration))
def test_if_init_with_db_name_as_second_argument_will_warning(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always", DeprecationWarning)
# Trigger a warning.
with mock.patch(DB_CONNECTION_PATH):
StatsManager(self.booking_duration, self.db_name)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(str(w[-1].message), DEPRECATION_MSG)
def test_if_use_db_name_as_second_argument_warnings_but_can_continue(self):
with warnings.catch_warnings(record=True):
# Cause all warnings to always be triggered.
warnings.simplefilter("always", DeprecationWarning)
# Trigger a warning.
with mock.patch(DB_CONNECTION_PATH):
st = StatsManager(self.booking_duration, self.db_name)
self.assertEqual(st._db_config['name'], self.db_name)
self.assertEqual(st.booking_delta, timedelta(seconds=self.booking_duration))
| bsd-3-clause | -7,916,223,059,271,612,000 | 43.265306 | 113 | 0.672199 | false |
jodal/mopidy-spotify | tests/conftest.py | 1 | 8870 | from __future__ import unicode_literals
import mock
from mopidy import backend as backend_api
import pytest
import spotify
from mopidy_spotify import backend, library
@pytest.fixture
def config(tmpdir):
return {
'core': {
'cache_dir': '%s' % tmpdir.join('cache'),
'data_dir': '%s' % tmpdir.join('data'),
},
'proxy': {
},
'spotify': {
'username': 'alice',
'password': 'password',
'bitrate': 160,
'volume_normalization': True,
'private_session': False,
'timeout': 10,
'allow_cache': True,
'allow_network': True,
'allow_playlists': True,
'search_album_count': 20,
'search_artist_count': 10,
'search_track_count': 50,
'toplist_countries': ['GB', 'US'],
}
}
@pytest.yield_fixture
def spotify_mock():
patcher = mock.patch.object(backend, 'spotify', spec=spotify)
yield patcher.start()
patcher.stop()
@pytest.fixture
def sp_user_mock():
sp_user = mock.Mock(spec=spotify.User)
sp_user.is_loaded = True
sp_user.canonical_name = 'alice'
return sp_user
@pytest.fixture
def sp_artist_mock():
sp_artist = mock.Mock(spec=spotify.Artist)
sp_artist.is_loaded = True
sp_artist.name = 'ABBA'
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:artist:abba'
sp_link.type = spotify.LinkType.ARTIST
sp_link.as_artist.return_value = sp_artist
sp_artist.link = sp_link
return sp_artist
@pytest.fixture
def sp_unloaded_artist_mock():
sp_artist = mock.Mock(spec=spotify.Artist)
sp_artist.is_loaded = False
sp_artist.name = None
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:artist:abba'
sp_link.type = spotify.LinkType.ARTIST
sp_link.as_artist.return_value = sp_artist
sp_artist.link = sp_link
return sp_artist
@pytest.fixture
def sp_artist_browser_mock(sp_artist_mock, sp_album_mock):
sp_artist_browser = mock.Mock(spec=spotify.ArtistBrowser)
sp_artist_browser.artist = sp_artist_mock
sp_artist_browser.albums = [sp_album_mock, sp_album_mock]
sp_artist_mock.browse.return_value = sp_artist_browser
return sp_artist_browser
@pytest.fixture
def sp_album_mock(sp_artist_mock):
sp_album = mock.Mock(spec=spotify.Album)
sp_album.is_loaded = True
sp_album.name = 'DEF 456'
sp_album.artist = sp_artist_mock
sp_album.year = 2001
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:album:def'
sp_link.type = spotify.LinkType.ALBUM
sp_link.as_album.return_value = sp_album
sp_album.link = sp_link
return sp_album
@pytest.fixture
def sp_unloaded_album_mock(sp_unloaded_artist_mock):
sp_album = mock.Mock(spec=spotify.Album)
sp_album.is_loaded = True
sp_album.is_loaded = False
sp_album.name = None
# Optimally, we should test with both None and sp_unloaded_artist_mock
sp_album.artist = sp_unloaded_artist_mock
sp_album.year = None
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:album:def'
sp_link.type = spotify.LinkType.ALBUM
sp_link.as_album.return_value = sp_album
sp_album.link = sp_link
return sp_album
@pytest.fixture
def sp_album_browser_mock(sp_album_mock, sp_track_mock):
sp_album_browser = mock.Mock(spec=spotify.AlbumBrowser)
sp_album_browser.album = sp_album_mock
sp_album_browser.tracks = [sp_track_mock, sp_track_mock]
sp_album_browser.load.return_value = sp_album_browser
sp_album_mock.browse.return_value = sp_album_browser
return sp_album_browser
@pytest.fixture
def sp_track_mock(sp_artist_mock, sp_album_mock):
sp_track = mock.Mock(spec=spotify.Track)
sp_track.is_loaded = True
sp_track.error = spotify.ErrorType.OK
sp_track.availability = spotify.TrackAvailability.AVAILABLE
sp_track.name = 'ABC 123'
sp_track.artists = [sp_artist_mock]
sp_track.album = sp_album_mock
sp_track.duration = 174300
sp_track.disc = 1
sp_track.index = 7
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:track:abc'
sp_link.type = spotify.LinkType.TRACK
sp_link.as_track.return_value = sp_track
sp_track.link = sp_link
return sp_track
@pytest.fixture
def sp_unloaded_track_mock(sp_unloaded_artist_mock, sp_unloaded_album_mock):
sp_track = mock.Mock(spec=spotify.Track)
sp_track.is_loaded = False
sp_track.error = spotify.ErrorType.OK
sp_track.availability = None
sp_track.name = None
# Optimally, we should test with both None and [sp_unloaded_artist_mock]
sp_track.artists = [sp_unloaded_artist_mock]
# Optimally, we should test with both None and sp_unloaded_album_mock
sp_track.album = sp_unloaded_album_mock
sp_track.duration = None
sp_track.disc = None
sp_track.index = None
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:track:abc'
sp_link.type = spotify.LinkType.TRACK
sp_link.as_track.return_value = sp_track
sp_track.link = sp_link
return sp_track
@pytest.fixture
def sp_starred_mock(sp_user_mock, sp_artist_mock, sp_album_mock):
sp_track1 = sp_track_mock(sp_artist_mock, sp_album_mock)
sp_track1.link.uri = 'spotify:track:oldest'
sp_track1.name = 'Oldest'
sp_track2 = sp_track_mock(sp_artist_mock, sp_album_mock)
sp_track2.link.uri = 'spotify:track:newest'
sp_track2.name = 'Newest'
sp_starred = mock.Mock(spec=spotify.Playlist)
sp_starred.is_loaded = True
sp_starred.owner = sp_user_mock
sp_starred.name = None
sp_starred.tracks = [sp_track1, sp_track2]
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:user:alice:starred'
sp_link.type = spotify.LinkType.STARRED
sp_link.as_playlist.return_value = sp_starred
sp_starred.link = sp_link
return sp_starred
@pytest.fixture
def sp_playlist_mock(sp_user_mock, sp_track_mock):
sp_playlist = mock.Mock(spec=spotify.Playlist)
sp_playlist.is_loaded = True
sp_playlist.owner = sp_user_mock
sp_playlist.name = 'Foo'
sp_playlist.tracks = [sp_track_mock]
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:user:alice:playlist:foo'
sp_link.type = spotify.LinkType.PLAYLIST
sp_link.as_playlist.return_value = sp_playlist
sp_playlist.link = sp_link
return sp_playlist
@pytest.fixture
def sp_unloaded_playlist_mock(sp_unloaded_track_mock):
sp_playlist = mock.Mock(spec=spotify.Playlist)
sp_playlist.is_loaded = False
sp_playlist.owner = None
sp_playlist.name = None
# Optimally, we should test with both None and [sp_unloaded_track_mock]
sp_playlist.tracks = [sp_unloaded_track_mock]
sp_link = mock.Mock(spec=spotify.Link)
sp_link.uri = 'spotify:user:alice:playlist:foo'
sp_link.type = spotify.LinkType.PLAYLIST
sp_link.as_playlist.return_value = sp_playlist
sp_playlist.link = sp_link
return sp_playlist
@pytest.fixture
def sp_playlist_folder_start_mock():
sp_playlist_folder_start = mock.Mock(spec=spotify.PlaylistFolder)
sp_playlist_folder_start.type = spotify.PlaylistType.START_FOLDER
sp_playlist_folder_start.name = 'Bar'
sp_playlist_folder_start.id = 17
return sp_playlist_folder_start
@pytest.fixture
def sp_playlist_folder_end_mock():
sp_playlist_folder_end = mock.Mock(spec=spotify.PlaylistFolder)
sp_playlist_folder_end.type = spotify.PlaylistType.END_FOLDER
sp_playlist_folder_end.id = 17
return sp_playlist_folder_end
@pytest.fixture
def sp_playlist_container_mock():
sp_playlist_container = mock.Mock(spec=spotify.PlaylistContainer)
return sp_playlist_container
@pytest.fixture
def sp_search_mock(sp_album_mock, sp_artist_mock, sp_track_mock):
sp_search = mock.Mock(spec=spotify.Search)
sp_search.is_loaded = True
sp_search.albums = [sp_album_mock]
sp_search.artists = [sp_artist_mock]
sp_search.tracks = [sp_track_mock, sp_track_mock]
return sp_search
@pytest.fixture
def session_mock():
sp_session_mock = mock.Mock(spec=spotify.Session)
sp_session_mock.connection.state = spotify.ConnectionState.LOGGED_IN
sp_session_mock.playlist_container = []
return sp_session_mock
@pytest.fixture
def backend_mock(session_mock, config):
backend_mock = mock.Mock(spec=backend.SpotifyBackend)
backend_mock._config = config
backend_mock._session = session_mock
backend_mock._bitrate = 160
return backend_mock
@pytest.yield_fixture
def backend_listener_mock():
patcher = mock.patch.object(
backend_api, 'BackendListener', spec=backend_api.BackendListener)
yield patcher.start()
patcher.stop()
@pytest.fixture
def provider(backend_mock):
return library.SpotifyLibraryProvider(backend_mock)
| apache-2.0 | -8,233,648,932,760,269,000 | 27.5209 | 76 | 0.677339 | false |
googleapis/googleapis-gen | google/cloud/securitycenter/v1beta1/securitycenter-v1beta1-py/google/cloud/securitycenter/__init__.py | 1 | 4167 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.securitycenter_v1beta1.services.security_center.client import SecurityCenterClient
from google.cloud.securitycenter_v1beta1.services.security_center.async_client import SecurityCenterAsyncClient
from google.cloud.securitycenter_v1beta1.types.asset import Asset
from google.cloud.securitycenter_v1beta1.types.finding import Finding
from google.cloud.securitycenter_v1beta1.types.organization_settings import OrganizationSettings
from google.cloud.securitycenter_v1beta1.types.run_asset_discovery_response import RunAssetDiscoveryResponse
from google.cloud.securitycenter_v1beta1.types.security_marks import SecurityMarks
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import CreateFindingRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import CreateSourceRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import GetOrganizationSettingsRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import GetSourceRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import GroupAssetsRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import GroupAssetsResponse
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import GroupFindingsRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import GroupFindingsResponse
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import GroupResult
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import ListAssetsRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import ListAssetsResponse
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import ListFindingsRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import ListFindingsResponse
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import ListSourcesRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import ListSourcesResponse
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import RunAssetDiscoveryRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import SetFindingStateRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import UpdateFindingRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import UpdateOrganizationSettingsRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import UpdateSecurityMarksRequest
from google.cloud.securitycenter_v1beta1.types.securitycenter_service import UpdateSourceRequest
from google.cloud.securitycenter_v1beta1.types.source import Source
__all__ = ('SecurityCenterClient',
'SecurityCenterAsyncClient',
'Asset',
'Finding',
'OrganizationSettings',
'RunAssetDiscoveryResponse',
'SecurityMarks',
'CreateFindingRequest',
'CreateSourceRequest',
'GetOrganizationSettingsRequest',
'GetSourceRequest',
'GroupAssetsRequest',
'GroupAssetsResponse',
'GroupFindingsRequest',
'GroupFindingsResponse',
'GroupResult',
'ListAssetsRequest',
'ListAssetsResponse',
'ListFindingsRequest',
'ListFindingsResponse',
'ListSourcesRequest',
'ListSourcesResponse',
'RunAssetDiscoveryRequest',
'SetFindingStateRequest',
'UpdateFindingRequest',
'UpdateOrganizationSettingsRequest',
'UpdateSecurityMarksRequest',
'UpdateSourceRequest',
'Source',
)
| apache-2.0 | 1,552,863,748,961,751,300 | 53.116883 | 111 | 0.835613 | false |
rleigh-dundee/openmicroscopy | components/tools/OmeroWeb/omeroweb/webgateway/views.py | 1 | 64885 | #
# webgateway/views.py - django application view handling functions
#
# Copyright (c) 2007, 2008, 2009 Glencoe Software, Inc. All rights reserved.
#
# This software is distributed under the terms described by the LICENCE file
# you can find at the root of the distribution bundle, which states you are
# free to use it only for non commercial purposes.
# If the file is missing please request a copy by contacting
# [email protected].
#
# Author: Carlos Neves <carlos(at)glencoesoftware.com>
import re
import omero
import omero.clients
from django.http import HttpResponse, HttpResponseServerError, HttpResponseRedirect, Http404
from django.utils import simplejson
from django.utils.encoding import smart_str
from django.utils.http import urlquote
from django.core import template_loader
from django.core.urlresolvers import reverse
from django.conf import settings
from django.template import RequestContext as Context
from omero.rtypes import rlong, unwrap
from marshal import imageMarshal, shapeMarshal
try:
from hashlib import md5
except:
from md5 import md5
from cStringIO import StringIO
from omero import client_wrapper, ApiUsageException
from omero.gateway import timeit, TimeIt
import Ice
import settings
#from models import StoredConnection
from webgateway_cache import webgateway_cache, CacheBase, webgateway_tempfile
cache = CacheBase()
connectors = {}
CONNECTOR_POOL_SIZE = 70
CONNECTOR_POOL_KEEP = 0.75 # keep only SIZE-SIZE*KEEP of the connectors if POOL_SIZE is reached
import logging, os, traceback, time, zipfile, shutil
from omeroweb.decorators import login_required
from omeroweb.connector import Connector
logger = logging.getLogger(__name__)
try:
import Image
import ImageDraw
except: #pragma: nocover
try:
from PIL import Image
from PIL import ImageDraw
except:
logger.error('No PIL installed')
def _safestr (s):
return unicode(s).encode('utf-8')
class UserProxy (object):
"""
Represents the current user of the connection, with methods delegating to the connection itself.
"""
def __init__ (self, blitzcon):
"""
Initialises the User proxy with the L{omero.gateway.BlitzGateway} connection
@param blitzcon: connection
@type blitzcon: L{omero.gateway.BlitzGateway}
"""
self._blitzcon = blitzcon
self.loggedIn = False
def logIn (self):
""" Sets the loggedIn Flag to True """
self.loggedIn = True
def isAdmin (self):
"""
True if the current user is an admin
@return: True if the current user is an admin
@rtype: Boolean
"""
return self._blitzcon.isAdmin()
def canBeAdmin (self):
"""
True if the current user can be admin
@return: True if the current user can be admin
@rtype: Boolean
"""
return self._blitzcon.canBeAdmin()
def getId (self):
"""
Returns the ID of the current user
@return: User ID
@rtype: Long
"""
return self._blitzcon.getUserId()
def getName (self):
"""
Returns the Name of the current user
@return: User Name
@rtype: String
"""
return self._blitzcon.getUser().omeName
def getFirstName (self):
"""
Returns the first name of the current user
@return: First Name
@rtype: String
"""
return self._blitzcon.getUser().firstName or self.getName()
# def getPreferences (self):
# return self._blitzcon._user.getPreferences()
#
# def getUserObj (self):
# return self._blitzcon._user
#class SessionCB (object):
# def _log (self, what, c):
# logger.debug('CONN:%s %s:%d:%s' % (what, c._user, os.getpid(), c._sessionUuid))
#
# def create (self, c):
# self._log('create',c)
#
# def join (self, c):
# self._log('join',c)
#
# def close (self, c):
# self._log('close',c)
#_session_cb = SessionCB()
def _createConnection (server_id, sUuid=None, username=None, passwd=None, host=None, port=None, retry=True, group=None, try_super=False, secure=False, anonymous=False, useragent=None):
"""
Attempts to create a L{omero.gateway.BlitzGateway} connection.
Tries to join an existing session for the specified user, using sUuid.
@param server_id: Way of referencing the server, used in connection dict keys. Int or String
@param sUuid: Session ID - used for attempts to join sessions etc without password
@param username: User name to log on with
@param passwd: Password
@param host: Host name
@param port: Port number
@param retry: Boolean
@param group: String? TODO: parameter is ignored.
@param try_super: If True, try to log on as super user, 'system' group
@param secure: If True, use an encrypted connection
@param anonymous: Boolean
@param useragent: Log which python clients use this connection. E.g. 'OMERO.webadmin'
@return: The connection
@rtype: L{omero.gateway.BlitzGateway}
"""
try:
if anonymous:
username = settings.PUBLIC_USER
passwd = settings.PUBLIC_PASSWORD
blitzcon = client_wrapper(username, passwd, host=host, port=port, group=None, try_super=try_super, secure=secure, anonymous=anonymous, useragent=useragent)
blitzcon.connect(sUuid=sUuid)
blitzcon.server_id = server_id
blitzcon.user = UserProxy(blitzcon)
if blitzcon._anonymous and hasattr(blitzcon.c, 'onEventLogs'):
logger.debug('Connecting weblitz_cache to eventslog')
def eventlistener (e):
return webgateway_cache.eventListener(server_id, e)
blitzcon.c.onEventLogs(eventlistener)
return blitzcon
except:
logger.debug(traceback.format_exc())
if not retry:
return None
logger.error("Critical error during connect, retrying after _purge")
logger.debug(traceback.format_exc())
_purge(force=True)
return _createConnection(server_id, sUuid, username, passwd, retry=False, host=host, port=port, group=None, try_super=try_super, anonymous=anonymous, useragent=useragent)
def _purge (force=False):
if force or len(connectors) > CONNECTOR_POOL_SIZE:
keys = connectors.keys()
for i in range(int(len(connectors)*CONNECTOR_POOL_KEEP)):
try:
c = connectors.pop(keys[i])
c.seppuku(softclose=True)
except:
logger.debug(traceback.format_exc())
logger.info('reached connector_pool_size (%d), size after purge: (%d)' %
(CONNECTOR_POOL_SIZE, len(connectors)))
def _split_channel_info (rchannels):
"""
Splits the request query channel information for images into a sequence of channels, window ranges
and channel colors.
@param rchannels: The request string with channel info. E.g 1|100:505$0000FF,-2,3|620:3879$FF0000
@type rchannels: String
@return: E.g. [1, -2, 3] [[100.0, 505.0], (None, None), [620.0, 3879.0]] [u'0000FF', None, u'FF0000']
@rtype: tuple of 3 lists
"""
channels = []
windows = []
colors = []
for chan in rchannels.split(','):
chan = chan.split('|')
t = chan[0].strip()
color = None
if t.find('$')>=0:
t,color = t.split('$')
try:
channels.append(int(t))
ch_window = (None, None)
if len(chan) > 1:
t = chan[1].strip()
if t.find('$')>=0:
t, color = t.split('$')
t = t.split(':')
if len(t) == 2:
try:
ch_window = [float(x) for x in t]
except ValueError:
pass
windows.append(ch_window)
colors.append(color)
except ValueError:
pass
logger.debug(str(channels)+","+str(windows)+","+str(colors))
return channels, windows, colors
def getImgDetailsFromReq (request, as_string=False):
""" Break the GET information from the request object into details on how to render the image.
The following keys are recognized:
z - Z axis position
t - T axis position
q - Quality set (0,0..1,0)
m - Model (g for greyscale, c for color)
p - Projection (see blitz_gateway.ImageWrapper.PROJECTIONS for keys)
x - X position (for now based on top/left offset on the browser window)
y - Y position (same as above)
c - a comma separated list of channels to be rendered (start index 1)
- format for each entry [-]ID[|wndst:wndend][#HEXCOLOR][,...]
zm - the zoom setting (as a percentual value)
@param request: http request with keys above
@param as_string: If True, return a string representation of the rendering details
@return: A dict or String representation of rendering details above.
@rtype: Dict or String
"""
r = request.REQUEST
rv = {}
for k in ('z', 't', 'q', 'm', 'zm', 'x', 'y', 'p'):
if r.has_key(k):
rv[k] = r[k]
if r.has_key('c'):
rv['c'] = []
ci = _split_channel_info(r['c'])
logger.debug(ci)
for i in range(len(ci[0])):
# a = abs channel, i = channel, s = window start, e = window end, c = color
rv['c'].append({'a':abs(ci[0][i]), 'i':ci[0][i], 's':ci[1][i][0], 'e':ci[1][i][1], 'c':ci[2][i]})
if as_string:
return "&".join(["%s=%s" % (x[0], x[1]) for x in rv.items()])
return rv
@login_required()
def render_birds_eye_view (request, iid, size=None,
conn=None, **kwargs):
"""
Returns an HttpResponse wrapped jpeg with the rendered bird's eye view
for image 'iid'. Rendering settings can be specified in the request
parameters as in L{render_image} and L{render_image_region}; see
L{getImgDetailsFromReq} for a complete list.
@param request: http request
@param iid: Image ID
@param conn: L{omero.gateway.BlitzGateway} connection
@param size: Maximum size of the longest side of the resulting bird's eye view.
@return: http response containing jpeg
"""
server_id = request.session['connector'].server_id
img = _get_prepared_image(request, iid, conn=conn, server_id=server_id)
if img is None:
logger.debug("(b)Image %s not found..." % (str(iid)))
raise Http404
img, compress_quality = img
return HttpResponse(img.renderBirdsEyeView(size), mimetype='image/jpeg')
@login_required()
def render_thumbnail (request, iid, w=None, h=None, conn=None, _defcb=None, **kwargs):
"""
Returns an HttpResponse wrapped jpeg with the rendered thumbnail for image 'iid'
@param request: http request
@param iid: Image ID
@param w: Thumbnail max width. 64 by default
@param h: Thumbnail max height
@return: http response containing jpeg
"""
server_id = request.session['connector'].server_id
if w is None:
size = (64,)
else:
if h is None:
size = (int(w),)
else:
size = (int(w), int(h))
user_id = conn.getUserId()
jpeg_data = webgateway_cache.getThumb(request, server_id, user_id, iid, size)
if jpeg_data is None:
prevent_cache = False
img = conn.getObject("Image", iid)
if img is None:
logger.debug("(b)Image %s not found..." % (str(iid)))
if _defcb:
jpeg_data = _defcb(size=size)
prevent_cache = True
else:
raise Http404
else:
jpeg_data = img.getThumbnail(size=size)
if jpeg_data is None:
logger.debug("(c)Image %s not found..." % (str(iid)))
if _defcb:
jpeg_data = _defcb(size=size)
prevent_cache = True
else:
return HttpResponseServerError('Failed to render thumbnail')
else:
prevent_cache = img._thumbInProgress
if not prevent_cache:
webgateway_cache.setThumb(request, server_id, user_id, iid, jpeg_data, size)
else:
pass
rsp = HttpResponse(jpeg_data, mimetype='image/jpeg')
return rsp
@login_required()
def render_roi_thumbnail (request, roiId, w=None, h=None, conn=None, **kwargs):
"""
For the given ROI, choose the shape to render (first time-point, mid z-section) then render
a region around that shape, scale to width and height (or default size) and draw the
shape on to the region
"""
server_id = request.session['connector'].server_id
# need to find the z indices of the first shape in T
roiResult = conn.getRoiService().findByRoi(long(roiId), None, conn.SERVICE_OPTS)
if roiResult is None or roiResult.rois is None:
raise Http404
zz = set()
minT = None
shapes = {}
for roi in roiResult.rois:
imageId = roi.image.id.val
for s in roi.copyShapes():
if s is None: # seems possible in some situations
continue
t = s.getTheT().getValue()
z = s.getTheZ().getValue()
shapes[(z,t)] = s
if minT is None: minT = t
if t < minT:
zz = set([z])
minT = t
elif minT == t:
zz.add(z)
zList = list(zz)
zList.sort()
midZ = zList[len(zList)/2]
s = shapes[(midZ, minT)]
pi = _get_prepared_image(request, imageId, server_id=server_id, conn=conn)
if pi is None:
raise Http404
image, compress_quality = pi
return get_shape_thumbnail (request, conn, image, s, compress_quality)
@login_required()
def render_shape_thumbnail (request, shapeId, w=None, h=None, conn=None, **kwargs):
"""
For the given Shape, redner a region around that shape, scale to width and height (or default size) and draw the
shape on to the region.
"""
server_id = request.session['connector'].server_id
# need to find the z indices of the first shape in T
params = omero.sys.Parameters()
params.map = {'id':rlong(shapeId)}
shape = conn.getQueryService().findByQuery("select s from Shape s join fetch s.roi where s.id = :id", params)
if shape is None:
raise Http404
imageId = shape.roi.image.id.val
pi = _get_prepared_image(request, imageId, server_id=server_id, conn=conn)
if pi is None:
raise Http404
image, compress_quality = pi
return get_shape_thumbnail (request, conn, image, shape, compress_quality)
def get_shape_thumbnail (request, conn, image, s, compress_quality):
"""
Render a region around the specified Shape, scale to width and height (or default size) and draw the
shape on to the region. Returns jpeg data.
@param image: ImageWrapper
@param s: omero.model.Shape
"""
MAX_WIDTH = 250
color = request.REQUEST.get("color", "fff")
colours = {"f00":(255,0,0), "0f0":(0,255,0), "00f":(0,0,255), "ff0":(255,255,0), "fff":(255,255,255), "000":(0,0,0)}
lineColour = colours["f00"]
if color in colours:
lineColour = colours[color]
bg_color = (221,221,221) # used for padding if we go outside the image area
def pointsStringToXYlist(string):
"""
Method for converting the string returned from omero.model.ShapeI.getPoints()
into list of (x,y) points.
E.g: "points[309,427, 366,503, 190,491] points1[309,427, 366,503, 190,491] points2[309,427, 366,503, 190,491]"
"""
pointLists = string.strip().split("points")
if len(pointLists) < 2:
logger.error("Unrecognised ROI shape 'points' string: %s" % string)
return ""
firstList = pointLists[1]
xyList = []
for xy in firstList.strip(" []").split(", "):
x, y = xy.split(",")
xyList.append( ( int( x.strip() ), int(y.strip() ) ) )
return xyList
def xyListToBbox(xyList):
""" Returns a bounding box (x,y,w,h) that will contain the shape represented by the XY points list """
xList, yList = [], []
for xy in xyList:
x, y = xy
xList.append(x)
yList.append(y)
return (min(xList), min(yList), max(xList)-min(xList), max(yList)-min(yList))
bBox = None # bounding box: (x, y, w, h)
shape = {}
theT = s.getTheT().getValue()
theZ = s.getTheZ().getValue()
if type(s) == omero.model.RectI:
shape['type'] = 'Rectangle'
shape['x'] = s.getX().getValue()
shape['y'] = s.getY().getValue()
shape['width'] = s.getWidth().getValue()
shape['height'] = s.getHeight().getValue()
bBox = (shape['x'], shape['y'], shape['width'], shape['height'])
elif type(s) == omero.model.MaskI:
shape['type'] = 'Mask'
shape['x'] = s.getX().getValue()
shape['y'] = s.getY().getValue()
shape['width'] = s.getWidth().getValue()
shape['height'] = s.getHeight().getValue()
bBox = (shape['x'], shape['y'], shape['width'], shape['height'])
# TODO: support for mask
elif type(s) == omero.model.EllipseI:
shape['type'] = 'Ellipse'
shape['cx'] = int(s.getCx().getValue())
shape['cy'] = int(s.getCy().getValue())
shape['rx'] = int(s.getRx().getValue())
shape['ry'] = int(s.getRy().getValue())
bBox = (shape['cx']-shape['rx'], shape['cy']-shape['ry'], 2*shape['rx'], 2*shape['ry'])
elif type(s) == omero.model.PolylineI:
shape['type'] = 'PolyLine'
shape['xyList'] = pointsStringToXYlist(s.getPoints().getValue())
bBox = xyListToBbox(shape['xyList'])
elif type(s) == omero.model.LineI:
shape['type'] = 'Line'
shape['x1'] = int(s.getX1().getValue())
shape['x2'] = int(s.getX2().getValue())
shape['y1'] = int(s.getY1().getValue())
shape['y2'] = int(s.getY2().getValue())
x = min(shape['x1'],shape['x2'])
y = min(shape['y1'],shape['y2'])
bBox = (x, y, max(shape['x1'],shape['x2'])-x, max(shape['y1'],shape['y2'])-y)
elif type(s) == omero.model.PointI:
shape['type'] = 'Point'
shape['cx'] = s.getCx().getValue()
shape['cy'] = s.getCy().getValue()
bBox = (shape['cx']-50, shape['cy']-50, 100, 100)
elif type(s) == omero.model.PolygonI:
shape['type'] = 'Polygon'
shape['xyList'] = pointsStringToXYlist(s.getPoints().getValue())
bBox = xyListToBbox(shape['xyList'])
elif type(s) == omero.model.LabelI:
shape['type'] = 'Label'
shape['x'] = s.getX().getValue()
shape['y'] = s.getY().getValue()
bBox = (shape['x']-50, shape['y']-50, 100, 100)
else:
logger.debug("Shape type not supported: %s" % str(type(s)))
#print shape
# we want to render a region larger than the bounding box
x,y,w,h = bBox
requiredWidth = max(w,h*3/2) # make the aspect ratio (w/h) = 3/2
requiredHeight = requiredWidth*2/3
newW = int(requiredWidth * 1.5) # make the rendered region 1.5 times larger than the bounding box
newH = int(requiredHeight * 1.5)
# Don't want the region to be smaller than the thumbnail dimensions
if newW < MAX_WIDTH:
newW = MAX_WIDTH
newH = newW*2/3
# Don't want the region to be bigger than a 'Big Image'!
def getConfigValue(key):
try:
return conn.getConfigService().getConfigValue(key)
except:
logger.warn("webgateway: get_shape_thumbnail() could not get Config-Value for %s" % key)
pass
max_plane_width = getConfigValue("omero.pixeldata.max_plane_width")
max_plane_height = getConfigValue("omero.pixeldata.max_plane_height")
if max_plane_width is None or max_plane_height is None or (newW > int(max_plane_width)) or (newH > int(max_plane_height)):
# generate dummy image to return
dummy = Image.new('RGB', (MAX_WIDTH, MAX_WIDTH*2/3), bg_color)
draw = ImageDraw.Draw(dummy)
draw.text((10,30), "Shape too large to \ngenerate thumbnail", fill=(255,0,0))
rv = StringIO()
dummy.save(rv, 'jpeg', quality=90)
return HttpResponse(rv.getvalue(), mimetype='image/jpeg')
xOffset = (newW - w)/2
yOffset = (newH - h)/2
newX = int(x - xOffset)
newY = int(y - yOffset)
# Need to check if any part of our region is outside the image. (assume that SOME of the region is within the image!)
sizeX = image.getSizeX()
sizeY = image.getSizeY()
left_xs, right_xs, top_xs, bottom_xs = 0,0,0,0
if newX < 0:
newW = newW + newX
left_xs = abs(newX)
newX = 0
if newY < 0:
newH = newH + newY
top_xs = abs(newY)
newY = 0
if newW+newX > sizeX:
right_xs = (newW+newX) - sizeX
newW = newW - right_xs
if newH+newY > sizeY:
bottom_xs = (newH+newY) - sizeY
newH = newH - bottom_xs
# now we should be getting the correct region
jpeg_data = image.renderJpegRegion(theZ,theT,newX, newY, newW, newH,level=None, compression=compress_quality)
img = Image.open(StringIO(jpeg_data))
# add back on the xs we were forced to trim
if left_xs != 0 or right_xs != 0 or top_xs != 0 or bottom_xs != 0:
jpg_w, jpg_h = img.size
xs_w = jpg_w + right_xs + left_xs
xs_h = jpg_h + bottom_xs + top_xs
xs_image = Image.new('RGBA', (xs_w, xs_h), bg_color)
xs_image.paste(img, (left_xs, top_xs))
img = xs_image
# we have our full-sized region. Need to resize to thumbnail.
current_w, current_h = img.size
factor = float(MAX_WIDTH) / current_w
resizeH = current_h * factor
img = img.resize((MAX_WIDTH, resizeH))
draw = ImageDraw.Draw(img)
if shape['type'] == 'Rectangle':
rectX = int(xOffset * factor)
rectY = int(yOffset * factor)
rectW = int((w+xOffset) * factor)
rectH = int((h+yOffset) * factor)
draw.rectangle((rectX, rectY, rectW, rectH), outline=lineColour)
draw.rectangle((rectX-1, rectY-1, rectW+1, rectH+1), outline=lineColour) # hack to get line width of 2
elif shape['type'] == 'Line':
lineX1 = (shape['x1'] - newX + left_xs) * factor
lineX2 = (shape['x2'] - newX + left_xs) * factor
lineY1 = (shape['y1'] - newY + top_xs) * factor
lineY2 = (shape['y2'] - newY + top_xs) * factor
draw.line((lineX1, lineY1, lineX2, lineY2), fill=lineColour, width=2)
elif shape['type'] == 'Ellipse':
rectX = int(xOffset * factor)
rectY = int(yOffset * factor)
rectW = int((w+xOffset) * factor)
rectH = int((h+yOffset) * factor)
draw.ellipse((rectX, rectY, rectW, rectH), outline=lineColour)
draw.ellipse((rectX-1, rectY-1, rectW+1, rectH+1), outline=lineColour) # hack to get line width of 2
elif shape['type'] == 'Point':
point_radius = 2
rectX = (MAX_WIDTH/2) - point_radius
rectY = int(resizeH/2) - point_radius
rectW = rectX + (point_radius * 2)
rectH = rectY + (point_radius * 2)
draw.ellipse((rectX, rectY, rectW, rectH), outline=lineColour)
draw.ellipse((rectX-1, rectY-1, rectW+1, rectH+1), outline=lineColour) # hack to get line width of 2
elif 'xyList' in shape:
#resizedXY = [ (int(x*factor), int(y*factor)) for (x,y) in shape['xyList'] ]
def resizeXY(xy):
x,y = xy
return (int((x-newX + left_xs)*factor), int((y-newY + top_xs)*factor))
resizedXY = [ resizeXY(xy) for xy in shape['xyList'] ]
#draw.polygon(resizedXY, outline=lineColour) # doesn't support 'width' of line
for l in range(1, len(resizedXY)):
x1, y1 = resizedXY[l-1]
x2, y2 = resizedXY[l]
draw.line((x1, y1, x2, y2), fill=lineColour, width=2)
start_x, start_y = resizedXY[0]
if shape['type'] != 'PolyLine':
draw.line((x2, y2, start_x, start_y), fill=lineColour, width=2)
rv = StringIO()
compression = 0.9
img.save(rv, 'jpeg', quality=int(compression*100))
jpeg = rv.getvalue()
return HttpResponse(jpeg, mimetype='image/jpeg')
def _get_signature_from_request (request):
"""
returns a string that identifies this image, along with the settings passed on the request.
Useful for using as img identifier key, for prepared image.
@param request: http request
@return: String
"""
r = request.REQUEST
rv = r.get('m','_') + r.get('p','_')+r.get('c','_')+r.get('q', '_')
return rv
def _get_prepared_image (request, iid, server_id=None, conn=None, saveDefs=False, retry=True):
"""
Fetches the Image object for image 'iid' and prepares it according to the request query, setting the channels,
rendering model and projection arguments. The compression level is parsed and returned too.
For parameters in request, see L{getImgDetailsFromReq}
@param request: http request
@param iid: Image ID
@param conn: L{omero.gateway.BlitzGateway} connection
@param saveDefs: Try to save the rendering settings, default z and t.
@param retry: Try an extra attempt at this method
@return: Tuple (L{omero.gateway.ImageWrapper} image, quality)
"""
r = request.REQUEST
logger.debug('Preparing Image:%r saveDefs=%r ' \
'retry=%r request=%r conn=%s' % (iid, saveDefs, retry,
r, str(conn)))
img = conn.getObject("Image", iid)
if img is None:
return
if r.has_key('c'):
logger.debug("c="+r['c'])
channels, windows, colors = _split_channel_info(r['c'])
if not img.setActiveChannels(channels, windows, colors):
logger.debug("Something bad happened while setting the active channels...")
if r.get('m', None) == 'g':
img.setGreyscaleRenderingModel()
elif r.get('m', None) == 'c':
img.setColorRenderingModel()
img.setProjection(r.get('p', None))
img.setInvertedAxis(bool(r.get('ia', "0") == "1"))
compress_quality = r.get('q', None)
if saveDefs:
r.has_key('z') and img._re.setDefaultZ(long(r['z'])-1)
r.has_key('t') and img._re.setDefaultT(long(r['t'])-1)
img.saveDefaults()
return (img, compress_quality)
@login_required()
def render_image_region(request, iid, z, t, conn=None, **kwargs):
"""
Returns a jpeg of the OMERO image, rendering only a region specified in query string as
region=x,y,width,height. E.g. region=0,512,256,256
Rendering settings can be specified in the request parameters.
@param request: http request
@param iid: image ID
@param z: Z index
@param t: T index
@param conn: L{omero.gateway.BlitzGateway} connection
@return: http response wrapping jpeg
"""
server_id = request.session['connector'].server_id
# if the region=x,y,w,h is not parsed correctly to give 4 ints then we simply provide whole image plane.
# alternatively, could return a 404?
#if h == None:
# return render_image (request, iid, z, t, server_id=None, _conn=None, **kwargs)
pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn)
if pi is None:
raise Http404
img, compress_quality = pi
tile = request.REQUEST.get('tile', None)
region = request.REQUEST.get('region', None)
level = None
if tile:
try:
img._prepareRenderingEngine()
tiles = img._re.requiresPixelsPyramid()
w, h = img._re.getTileSize()
levels = img._re.getResolutionLevels()-1
zxyt = tile.split(",")
#w = int(zxyt[3])
#h = int(zxyt[4])
level = levels-int(zxyt[0])
x = int(zxyt[1])*w
y = int(zxyt[2])*h
except:
logger.debug("render_image_region: tile=%s" % tile)
logger.debug(traceback.format_exc())
elif region:
try:
xywh = region.split(",")
x = int(xywh[0])
y = int(xywh[1])
w = int(xywh[2])
h = int(xywh[3])
except:
logger.debug("render_image_region: region=%s" % region)
logger.debug(traceback.format_exc())
# region details in request are used as key for caching.
jpeg_data = webgateway_cache.getImage(request, server_id, img, z, t)
if jpeg_data is None:
jpeg_data = img.renderJpegRegion(z,t,x,y,w,h,level=level, compression=compress_quality)
if jpeg_data is None:
raise Http404
webgateway_cache.setImage(request, server_id, img, z, t, jpeg_data)
rsp = HttpResponse(jpeg_data, mimetype='image/jpeg')
return rsp
@login_required()
def render_image (request, iid, z=None, t=None, conn=None, **kwargs):
"""
Renders the image with id {{iid}} at {{z}} and {{t}} as jpeg.
Many options are available from the request dict. See L{getImgDetailsFromReq} for list.
I am assuming a single Pixels object on image with image-Id='iid'. May be wrong
@param request: http request
@param iid: image ID
@param z: Z index
@param t: T index
@param conn: L{omero.gateway.BlitzGateway} connection
@return: http response wrapping jpeg
"""
server_id = request.session['connector'].server_id
pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn)
if pi is None:
raise Http404
img, compress_quality = pi
jpeg_data = webgateway_cache.getImage(request, server_id, img, z, t)
if jpeg_data is None:
jpeg_data = img.renderJpeg(z,t, compression=compress_quality)
if jpeg_data is None:
raise Http404
webgateway_cache.setImage(request, server_id, img, z, t, jpeg_data)
rsp = HttpResponse(jpeg_data, mimetype='image/jpeg')
return rsp
@login_required()
def render_ome_tiff (request, ctx, cid, conn=None, **kwargs):
"""
Renders the OME-TIFF representation of the image(s) with id cid in ctx (i)mage,
(d)ataset, or (p)roject.
For multiple images export, images that require pixels pyramid (big images) will be silently skipped.
If exporting a single big image or if all images in a multple image export are big,
a 404 will be triggered.
A request parameter dryrun can be passed to return the count of images that would actually be exported.
@param request: http request
@param ctx: 'p' or 'd' or 'i'
@param cid: Project, Dataset or Image ID
@param conn: L{omero.gateway.BlitzGateway} connection
@return: http response wrapping the tiff (or zip for multiple files), or redirect to temp file/zip
if dryrun is True, returns count of images that would be exported
"""
server_id = request.session['connector'].server_id
imgs = []
if ctx == 'p':
obj = conn.getObject("Project", cid)
if obj is None:
raise Http404
for d in obj.listChildren():
imgs.extend(list(d.listChildren()))
name = obj.getName()
elif ctx == 'd':
obj = conn.getObject("Dataset", cid)
if obj is None:
raise Http404
imgs.extend(list(obj.listChildren()))
selection = filter(None, request.REQUEST.get('selection', '').split(','))
if len(selection):
logger.debug(selection)
logger.debug(imgs)
imgs = filter(lambda x: str(x.getId()) in selection, imgs)
logger.debug(imgs)
if len(imgs) == 0:
raise Http404
name = '%s-%s' % (obj.getParent().getName(), obj.getName())
elif ctx == 'w':
obj = conn.getObject("Well", cid)
if obj is None:
raise Http404
imgs.extend([x.getImage() for x in obj.listChildren()])
plate = obj.getParent()
coord = "%s%s" % (plate.getRowLabels()[obj.row],plate.getColumnLabels()[obj.column])
name = '%s-%s-%s' % (plate.getParent().getName(), plate.getName(), coord)
else:
obj = conn.getObject("Image", cid)
if obj is None:
raise Http404
imgs.append(obj)
imgs = filter(lambda x: not x.requiresPixelsPyramid(), imgs)
if request.REQUEST.get('dryrun', False):
rv = simplejson.dumps(len(imgs))
c = request.REQUEST.get('callback', None)
if c is not None and not kwargs.get('_internal', False):
rv = '%s(%s)' % (c, rv)
return HttpResponse(rv, mimetype='application/javascript')
if len(imgs) == 0:
raise Http404
if len(imgs) == 1:
obj = imgs[0]
key = '_'.join((str(x.getId()) for x in obj.getAncestry())) + '_' + str(obj.getId()) + '_ome_tiff'
fpath, rpath, fobj = webgateway_tempfile.new(str(obj.getId()) + '-'+obj.getName() + '.ome.tiff', key=key)
if fobj is True:
# already exists
return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)
tiff_data = webgateway_cache.getOmeTiffImage(request, server_id, imgs[0])
if tiff_data is None:
try:
tiff_data = imgs[0].exportOmeTiff()
except:
logger.debug('Failed to export image (2)', exc_info=True)
tiff_data = None
if tiff_data is None:
webgateway_tempfile.abort(fpath)
raise Http404
webgateway_cache.setOmeTiffImage(request, server_id, imgs[0], tiff_data)
if fobj is None:
rsp = HttpResponse(tiff_data, mimetype='image/tiff')
rsp['Content-Disposition'] = 'attachment; filename="%s.ome.tiff"' % (str(obj.getId()) + '-'+obj.getName())
rsp['Content-Length'] = len(tiff_data)
return rsp
else:
fobj.write(tiff_data)
fobj.close()
return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)
else:
try:
img_ids = '+'.join((str(x.getId()) for x in imgs))
key = '_'.join((str(x.getId()) for x in imgs[0].getAncestry())) + '_' + md5(img_ids).hexdigest() + '_ome_tiff_zip'
fpath, rpath, fobj = webgateway_tempfile.new(name + '.zip', key=key)
if fobj is True:
return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)
logger.debug(fpath)
if fobj is None:
fobj = StringIO()
zobj = zipfile.ZipFile(fobj, 'w', zipfile.ZIP_STORED)
for obj in imgs:
tiff_data = webgateway_cache.getOmeTiffImage(request, server_id, obj)
if tiff_data is None:
tiff_data = obj.exportOmeTiff()
if tiff_data is None:
continue
webgateway_cache.setOmeTiffImage(request, server_id, obj, tiff_data)
zobj.writestr(str(obj.getId()) + '-'+obj.getName() + '.ome.tiff', tiff_data)
zobj.close()
if fpath is None:
zip_data = fobj.getvalue()
rsp = HttpResponse(zip_data, mimetype='application/zip')
rsp['Content-Disposition'] = 'attachment; filename="%s.zip"' % name
rsp['Content-Length'] = len(zip_data)
return rsp
except:
logger.debug(traceback.format_exc())
raise
return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)
@login_required()
def render_movie (request, iid, axis, pos, conn=None, **kwargs):
"""
Renders a movie from the image with id iid
@param request: http request
@param iid: Image ID
@param axis: Movie frames are along 'z' or 't' dimension. String
@param pos: The T index (for z axis) or Z index (for t axis)
@param conn: L{omero.gateway.BlitzGateway} connection
@return: http response wrapping the file, or redirect to temp file
"""
server_id = request.session['connector'].server_id
try:
# Prepare a filename we'll use for temp cache, and check if file is already there
opts = {}
opts['format'] = 'video/' + request.REQUEST.get('format', 'quicktime')
opts['fps'] = int(request.REQUEST.get('fps', 4))
opts['minsize'] = (512,512, 'Black')
ext = '.avi'
key = "%s-%s-%s-%d-%s-%s" % (iid, axis, pos, opts['fps'], _get_signature_from_request(request),
request.REQUEST.get('format', 'quicktime'))
pos = int(pos)
pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn)
if pi is None:
raise Http404
img, compress_quality = pi
fpath, rpath, fobj = webgateway_tempfile.new(img.getName() + ext, key=key)
logger.debug(fpath, rpath, fobj)
if fobj is True:
return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)#os.path.join(rpath, img.getName() + ext))
if kwargs.has_key('optsCB'):
opts.update(kwargs['optsCB'](img))
opts.update(kwargs.get('opts', {}))
logger.debug('rendering movie for img %s with axis %s, pos %i and opts %s' % (iid, axis, pos, opts))
#fpath, rpath = webgateway_tempfile.newdir()
if fpath is None:
import tempfile
fo, fn = tempfile.mkstemp()
else:
fn = fpath #os.path.join(fpath, img.getName())
if axis.lower() == 'z':
dext, mimetype = img.createMovie(fn, 0, img.getSizeZ()-1, pos-1, pos-1, opts)
else:
dext, mimetype = img.createMovie(fn, pos-1, pos-1, 0, img.getSizeT()-1, opts)
if dext is None and mimetype is None:
# createMovie is currently only available on 4.1_custom
# http://trac.openmicroscopy.org.uk/ome/ticket/3857
raise Http404
if fpath is None:
movie = open(fn).read()
os.close(fo)
rsp = HttpResponse(movie, mimetype=mimetype)
rsp['Content-Disposition'] = 'attachment; filename="%s"' % (img.getName()+ext)
rsp['Content-Length'] = len(movie)
return rsp
else:
fobj.close()
#shutil.move(fn, fn + ext)
return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)#os.path.join(rpath, img.getName() + ext))
except:
logger.debug(traceback.format_exc())
raise
@login_required()
def render_split_channel (request, iid, z, t, conn=None, **kwargs):
"""
Renders a split channel view of the image with id {{iid}} at {{z}} and {{t}} as jpeg.
Many options are available from the request dict.
Requires PIL to be installed on the server.
@param request: http request
@param iid: Image ID
@param z: Z index
@param t: T index
@param conn: L{omero.gateway.BlitzGateway} connection
@return: http response wrapping a jpeg
"""
server_id = request.session['connector'].server_id
pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn)
if pi is None:
raise Http404
img, compress_quality = pi
compress_quality = compress_quality and float(compress_quality) or 0.9
jpeg_data = webgateway_cache.getSplitChannelImage(request, server_id, img, z, t)
if jpeg_data is None:
jpeg_data = img.renderSplitChannel(z,t, compression=compress_quality)
if jpeg_data is None:
raise Http404
webgateway_cache.setSplitChannelImage(request, server_id, img, z, t, jpeg_data)
rsp = HttpResponse(jpeg_data, mimetype='image/jpeg')
return rsp
def debug (f):
"""
Decorator for adding debugging functionality to methods.
@param f: The function to wrap
@return: The wrapped function
"""
def wrap (request, *args, **kwargs):
debug = request.REQUEST.getlist('debug')
if 'slow' in debug:
time.sleep(5)
if 'fail' in debug:
raise Http404
if 'error' in debug:
raise AttributeError('Debug requested error')
return f(request, *args, **kwargs)
wrap.func_name = f.func_name
return wrap
def jsonp (f):
"""
Decorator for adding connection debugging and returning function result as json, depending on
values in kwargs
@param f: The function to wrap
@return: The wrapped function, which will return json
"""
def wrap (request, *args, **kwargs):
logger.debug('jsonp')
try:
server_id = kwargs.get('server_id', None)
if server_id is None:
server_id = request.session['connector'].server_id
kwargs['server_id'] = server_id
rv = f(request, *args, **kwargs)
if kwargs.get('_raw', False):
return rv
if isinstance(rv, HttpResponse):
return rv
rv = simplejson.dumps(rv)
c = request.REQUEST.get('callback', None)
if c is not None and not kwargs.get('_internal', False):
rv = '%s(%s)' % (c, rv)
if kwargs.get('_internal', False):
return rv
return HttpResponse(rv, mimetype='application/javascript')
except omero.ServerError:
if kwargs.get('_raw', False) or kwargs.get('_internal', False):
raise
return HttpResponseServerError('("error in call","%s")' % traceback.format_exc(), mimetype='application/javascript')
except:
logger.debug(traceback.format_exc())
if kwargs.get('_raw', False) or kwargs.get('_internal', False):
raise
return HttpResponseServerError('("error in call","%s")' % traceback.format_exc(), mimetype='application/javascript')
wrap.func_name = f.func_name
return wrap
@debug
@login_required()
def render_row_plot (request, iid, z, t, y, conn=None, w=1, **kwargs):
"""
Renders the line plot for the image with id {{iid}} at {{z}} and {{t}} as gif with transparent background.
Many options are available from the request dict.
I am assuming a single Pixels object on image with Image ID='iid'. May be wrong
TODO: cache
@param request: http request
@param iid: Image ID
@param z: Z index
@param t: T index
@param y: Y position of row to measure
@param conn: L{omero.gateway.BlitzGateway} connection
@param w: Line width
@return: http response wrapping a gif
"""
if not w:
w = 1
pi = _get_prepared_image(request, iid, conn=conn)
if pi is None:
raise Http404
img, compress_quality = pi
try:
gif_data = img.renderRowLinePlotGif(int(z),int(t),int(y), int(w))
except:
logger.debug('a', exc_info=True)
raise
if gif_data is None:
raise Http404
rsp = HttpResponse(gif_data, mimetype='image/gif')
return rsp
@debug
@login_required()
def render_col_plot (request, iid, z, t, x, w=1, conn=None, **kwargs):
"""
Renders the line plot for the image with id {{iid}} at {{z}} and {{t}} as gif with transparent background.
Many options are available from the request dict.
I am assuming a single Pixels object on image with id='iid'. May be wrong
TODO: cache
@param request: http request
@param iid: Image ID
@param z: Z index
@param t: T index
@param x: X position of column to measure
@param conn: L{omero.gateway.BlitzGateway} connection
@param w: Line width
@return: http response wrapping a gif
"""
if not w:
w = 1
pi = _get_prepared_image(request, iid, conn=conn)
if pi is None:
raise Http404
img, compress_quality = pi
gif_data = img.renderColLinePlotGif(int(z),int(t),int(x), int(w))
if gif_data is None:
raise Http404
rsp = HttpResponse(gif_data, mimetype='image/gif')
return rsp
@login_required()
@jsonp
def imageData_json (request, conn=None, _internal=False, **kwargs):
"""
Get a dict with image information
TODO: cache
@param request: http request
@param conn: L{omero.gateway.BlitzGateway}
@param _internal: TODO: ?
@return: Dict
"""
iid = kwargs['iid']
key = kwargs.get('key', None)
image = conn.getObject("Image", iid)
if image is None:
return HttpResponseServerError('""', mimetype='application/javascript')
rv = imageMarshal(image, key)
return rv
@login_required()
@jsonp
def wellData_json (request, conn=None, _internal=False, **kwargs):
"""
Get a dict with image information
TODO: cache
@param request: http request
@param conn: L{omero.gateway.BlitzGateway}
@param _internal: TODO: ?
@return: Dict
"""
wid = kwargs['wid']
well = conn.getObject("Well", wid)
if well is None:
return HttpResponseServerError('""', mimetype='application/javascript')
prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail')
def urlprefix(iid):
return reverse(prefix, args=(iid,))
xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)}
rv = well.simpleMarshal(xtra=xtra)
return rv
@login_required()
@jsonp
def plateGrid_json (request, pid, field=0, conn=None, **kwargs):
"""
"""
plate = conn.getObject('plate', long(pid))
try:
field = long(field or 0)
except ValueError:
field = 0
if plate is None:
return HttpResponseServerError('""', mimetype='application/javascript')
grid = []
prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail')
thumbsize = int(request.REQUEST.get('size', 64))
logger.debug(thumbsize)
def urlprefix(iid):
return reverse(prefix, args=(iid,thumbsize))
xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)}
server_id = kwargs['server_id']
rv = webgateway_cache.getJson(request, server_id, plate, 'plategrid-%d-%d' % (field, thumbsize))
if rv is None:
plate.setGridSizeConstraints(8,12)
for row in plate.getWellGrid(field):
tr = []
for e in row:
if e:
i = e.getImage()
if i:
t = i.simpleMarshal(xtra=xtra)
t['wellId'] = e.getId()
t['field'] = field
tr.append(t)
continue
tr.append(None)
grid.append(tr)
rv = {'grid': grid,
'collabels': plate.getColumnLabels(),
'rowlabels': plate.getRowLabels()}
webgateway_cache.setJson(request, server_id, plate, simplejson.dumps(rv), 'plategrid-%d-%d' % (field, thumbsize))
else:
rv = simplejson.loads(rv)
return rv
@login_required()
@jsonp
def listImages_json (request, did, conn=None, **kwargs):
"""
lists all Images in a Dataset, as json
TODO: cache
@param request: http request
@param did: Dataset ID
@param conn: L{omero.gateway.BlitzGateway}
@return: list of image json.
"""
dataset = conn.getObject("Dataset", did)
if dataset is None:
return HttpResponseServerError('""', mimetype='application/javascript')
prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail')
def urlprefix(iid):
return reverse(prefix, args=(iid,))
xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)}
return map(lambda x: x.simpleMarshal(xtra=xtra), dataset.listChildren())
@login_required()
@jsonp
def listWellImages_json (request, did, conn=None, **kwargs):
"""
lists all Images in a Well, as json
TODO: cache
@param request: http request
@param did: Well ID
@param conn: L{omero.gateway.BlitzGateway}
@return: list of image json.
"""
well = conn.getObject("Well", did)
if well is None:
return HttpResponseServerError('""', mimetype='application/javascript')
prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail')
def urlprefix(iid):
return reverse(prefix, args=(iid,))
xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)}
return map(lambda x: x.getImage() and x.getImage().simpleMarshal(xtra=xtra), well.listChildren())
@login_required()
@jsonp
def listDatasets_json (request, pid, conn=None, **kwargs):
"""
lists all Datasets in a Project, as json
TODO: cache
@param request: http request
@param pid: Project ID
@param conn: L{omero.gateway.BlitzGateway}
@return: list of dataset json.
"""
project = conn.getObject("Project", pid)
rv = []
if project is None:
return HttpResponse('[]', mimetype='application/javascript')
return [x.simpleMarshal(xtra={'childCount':0}) for x in project.listChildren()]
@login_required()
@jsonp
def datasetDetail_json (request, did, conn=None, **kwargs):
"""
return json encoded details for a dataset
TODO: cache
"""
ds = conn.getObject("Dataset", did)
return ds.simpleMarshal()
@login_required()
@jsonp
def listProjects_json (request, conn=None, **kwargs):
"""
lists all Projects, as json
TODO: cache
@param request: http request
@param conn: L{omero.gateway.BlitzGateway}
@return: list of project json.
"""
rv = []
for pr in conn.listProjects():
rv.append( {'id': pr.id, 'name': pr.name, 'description': pr.description or ''} )
return rv
@login_required()
@jsonp
def projectDetail_json (request, pid, conn=None, **kwargs):
"""
grab details from one specific project
TODO: cache
@param request: http request
@param pid: Project ID
@param conn: L{omero.gateway.BlitzGateway}
@return: project details as dict.
"""
pr = conn.getObject("Project", pid)
rv = pr.simpleMarshal()
return rv
def searchOptFromRequest (request):
"""
Returns a dict of options for searching, based on
parameters in the http request
Request keys include:
- ctx: (http request) 'imgs' to search only images
- text: (http request) the actual text phrase
- start: starting index (0 based) for result
- limit: nr of results to retuen (0 == unlimited)
- author:
- grabData:
- parents:
@param request: http request
@return: Dict of options
"""
try:
r = request.REQUEST
opts = {
'search': unicode(r.get('text', '')).encode('utf8'),
'ctx': r.get('ctx', ''),
'grabData': not not r.get('grabData', False),
'parents': not not bool(r.get('parents', False)),
'start': int(r.get('start', 0)),
'limit': int(r.get('limit', 0)),
'key': r.get('key', None)
}
author = r.get('author', '')
if author:
opts['search'] += ' author:'+author
return opts
except:
logger.error(traceback.format_exc())
return {}
@TimeIt(logging.INFO)
@login_required()
@jsonp
def search_json (request, conn=None, **kwargs):
"""
Search for objects in blitz.
Returns json encoded list of marshalled objects found by the search query
Request keys include:
- text: The text to search for
- ctx: (http request) 'imgs' to search only images
- text: (http request) the actual text phrase
- start: starting index (0 based) for result
- limit: nr of results to retuen (0 == unlimited)
- author:
- grabData:
- parents:
@param request: http request
@param conn: L{omero.gateway.BlitzGateway}
@return: json search results
TODO: cache
"""
opts = searchOptFromRequest(request)
rv = []
logger.debug("searchObjects(%s)" % (opts['search']))
# search returns blitz_connector wrapper objects
def urlprefix(iid):
return reverse('webgateway.views.render_thumbnail', args=(iid,))
xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)}
pks = None
try:
if opts['ctx'] == 'imgs':
sr = conn.searchObjects(["image"], opts['search'], conn.SERVICE_OPTS)
else:
sr = conn.searchObjects(None, opts['search'], conn.SERVICE_OPTS) # searches P/D/I
except ApiUsageException:
return HttpResponseServerError('"parse exception"', mimetype='application/javascript')
def marshal ():
rv = []
if (opts['grabData'] and opts['ctx'] == 'imgs'):
bottom = min(opts['start'], len(sr)-1)
if opts['limit'] == 0:
top = len(sr)
else:
top = min(len(sr), bottom + opts['limit'])
for i in range(bottom, top):
e = sr[i]
#for e in sr:
try:
rv.append(imageData_json(request, server_id, iid=e.id, key=opts['key'], conn=conn, _internal=True))
except AttributeError, x:
logger.debug('(iid %i) ignoring Attribute Error: %s' % (e.id, str(x)))
pass
except omero.ServerError, x:
logger.debug('(iid %i) ignoring Server Error: %s' % (e.id, str(x)))
return rv
else:
return map(lambda x: x.simpleMarshal(xtra=xtra, parents=opts['parents']), sr)
rv = timeit(marshal)()
logger.debug(rv)
return rv
@login_required()
def save_image_rdef_json (request, iid, conn=None, **kwargs):
"""
Requests that the rendering defs passed in the request be set as the default for this image.
Rendering defs in request listed at L{getImgDetailsFromReq}
TODO: jsonp
@param request: http request
@param iid: Image ID
@param conn: L{omero.gateway.BlitzGateway}
@return: http response 'true' or 'false'
"""
server_id = request.session['connector'].server_id
r = request.REQUEST
pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn, saveDefs=True)
if pi is None:
json_data = 'false'
else:
user_id = pi[0]._conn.getEventContext().userId
webgateway_cache.invalidateObject(server_id, user_id, pi[0])
pi[0].getThumbnail()
json_data = 'true'
if r.get('callback', None):
json_data = '%s(%s)' % (r['callback'], json_data)
return HttpResponse(json_data, mimetype='application/javascript')
@login_required()
def list_compatible_imgs_json (request, iid, conn=None, **kwargs):
"""
Lists the images on the same project that would be viable targets for copying rendering settings.
TODO: change method to:
list_compatible_imgs_json (request, iid, server_id=None, conn=None, **kwargs):
@param request: http request
@param iid: Image ID
@param conn: L{omero.gateway.BlitzGateway}
@return: json list of image IDs
"""
json_data = 'false'
r = request.REQUEST
if conn is None:
img = None
else:
img = conn.getObject("Image", iid)
if img is not None:
# List all images in project
imgs = []
for ds in img.getProject().listChildren():
imgs.extend(ds.listChildren())
# Filter the ones that would pass the applySettingsToImages call
img_ptype = img.getPrimaryPixels().getPixelsType().getValue()
img_ccount = img.getSizeC()
img_ew = [x.getLabel() for x in img.getChannels()]
img_ew.sort()
def compat (i):
if long(i.getId()) == long(iid):
return False
pp = i.getPrimaryPixels()
if pp is None or \
i.getPrimaryPixels().getPixelsType().getValue() != img_ptype or \
i.getSizeC() != img_ccount:
return False
ew = [x.getLabel() for x in i.getChannels()]
ew.sort()
if ew != img_ew:
return False
return True
imgs = filter(compat, imgs)
json_data = simplejson.dumps([x.getId() for x in imgs])
if r.get('callback', None):
json_data = '%s(%s)' % (r['callback'], json_data)
return HttpResponse(json_data, mimetype='application/javascript')
@login_required()
@jsonp
def copy_image_rdef_json (request, conn=None, **kwargs):
"""
Copy the rendering settings from one image to a list of images.
Images are specified in request by 'fromid' and list of 'toids'
Returns json dict of Boolean:[Image-IDs] for images that have successfully
had the rendering settings applied, or not.
@param request: http request
@param server_id:
@param conn: L{omero.gateway.BlitzGateway}
@return: json dict of Boolean:[Image-IDs]
"""
server_id = request.session['connector'].server_id
json_data = False
r = request.REQUEST
try:
fromid = long(r.get('fromid', None))
toids = map(lambda x: long(x), r.getlist('toids'))
except TypeError:
fromid = None
except ValueError:
fromid = None
if fromid is not None and len(toids) > 0:
fromimg = conn.getObject("Image", fromid)
frompid = fromimg.getPixelsId()
userid = fromimg.getOwner().getId()
if fromimg.canWrite():
ctx = conn.SERVICE_OPTS.copy()
ctx.setOmeroGroup(fromimg.getDetails().getGroup().getId())
ctx.setOmeroUser(userid)
rsettings = conn.getRenderingSettingsService()
json_data = rsettings.applySettingsToImages(frompid, list(toids), ctx)
if fromid in json_data[True]:
del json_data[True][json_data[True].index(fromid)]
for iid in json_data[True]:
img = conn.getObject("Image", iid)
img is not None and webgateway_cache.invalidateObject(server_id, userid, img)
return json_data
#
# json_data = simplejson.dumps(json_data)
#
# if r.get('callback', None):
# json_data = '%s(%s)' % (r['callback'], json_data)
# return HttpResponse(json_data, mimetype='application/javascript')
@login_required()
@jsonp
def reset_image_rdef_json (request, iid, conn=None, **kwargs):
"""
Try to remove all rendering defs the logged in user has for this image.
@param request: http request
@param iid: Image ID
@param conn: L{omero.gateway.BlitzGateway}
@return: json 'true', or 'false' if failed
"""
img = conn.getObject("Image", iid)
if img is not None and img.resetRDefs():
user_id = conn.getEventContext().userId
server_id = request.session['connector'].server_id
webgateway_cache.invalidateObject(server_id, user_id, img)
return True
json_data = 'true'
else:
json_data = 'false'
return False
# if _conn is not None:
# return json_data == 'true' # TODO: really return a boolean? (not json)
# if r.get('callback', None):
# json_data = '%s(%s)' % (r['callback'], json_data)
# return HttpResponse(json_data, mimetype='application/javascript')
@login_required()
def full_viewer (request, iid, conn=None, **kwargs):
"""
This view is responsible for showing the omero_image template
Image rendering options in request are used in the display page. See L{getImgDetailsFromReq}.
@param request: http request.
@param iid: Image ID
@param conn: L{omero.gateway.BlitzGateway}
@param **kwargs: Can be used to specify the html 'template' for rendering
@return: html page of image and metadata
"""
rid = getImgDetailsFromReq(request)
try:
image = conn.getObject("Image", iid)
if image is None:
logger.debug("(a)Image %s not found..." % (str(iid)))
raise Http404
d = {'blitzcon': conn,
'image': image,
'opts': rid,
'roiCount': image.getROICount(),
'viewport_server': kwargs.get('viewport_server', '/webgateway'),
'object': 'image:%i' % int(iid)}
template = kwargs.get('template', "webgateway/viewport/omero_image.html")
t = template_loader.get_template(template)
c = Context(request,d)
rsp = t.render(c)
except omero.SecurityViolation:
raise Http404
return HttpResponse(rsp)
@login_required()
def get_shape_json(request, roiId, shapeId, conn=None, **kwargs):
roiId = int(roiId)
shapeId = int(shapeId)
shape = conn.getQueryService().findByQuery(
'select shape from Roi as roi ' \
'join roi.shapes as shape ' \
'where roi.id = %d and shape.id = %d' % (roiId, shapeId),
None)
logger.debug('Shape: %r' % shape)
if shape is None:
logger.debug('No such shape: %r' % shapeId)
raise Http404
return HttpResponse(simplejson.dumps(shapeMarshal(shape)),
mimetype='application/javascript')
@login_required()
def get_rois_json(request, imageId, conn=None, **kwargs):
"""
Returns json data of the ROIs in the specified image.
"""
rois = []
roiService = conn.getRoiService()
#rois = webfigure_utils.getRoiShapes(roiService, long(imageId)) # gets a whole json list of ROIs
result = roiService.findByImage(long(imageId), None, conn.SERVICE_OPTS)
for r in result.rois:
roi = {}
roi['id'] = r.getId().getValue()
# go through all the shapes of the ROI
shapes = []
for s in r.copyShapes():
if s is None: # seems possible in some situations
continue
shapes.append(shapeMarshal(s))
# sort shapes by Z, then T.
shapes.sort(key=lambda x:
"%03d%03d"% (x.get('theZ', -1), x.get('theT', -1)));
roi['shapes'] = shapes
rois.append(roi)
rois.sort(key=lambda x: x['id']) # sort by ID - same as in measurement tool.
return HttpResponse(simplejson.dumps(rois), mimetype='application/javascript')
def test (request):
"""
Tests the L{full_viewer} with no args passed to the template.
@param request: http request.
@return: blank page template
"""
context = {}
t = template_loader.get_template('webgateway/viewport/omero_image.html')
c = Context(request,context)
return HttpResponse(t.render(c))
@login_required(isAdmin=True)
@jsonp
def su (request, user, conn=None, **kwargs):
"""
If current user is admin, switch the session to a new connection owned by 'user'
(puts the new session ID in the request.session)
Return False if not possible
@param request: http request.
@param user: Username of new connection owner
@param conn: L{omero.gateway.BlitzGateway}
@param **kwargs: Can be used to specify the html 'template' for rendering
@return: Boolean
"""
conn.setGroupNameForSession('system')
connector = request.session['connector']
connector = Connector(connector.server_id, connector.is_secure)
session = conn.getSessionService().getSession(conn._sessionUuid)
ttl = session.getTimeToIdle().val
connector.omero_session_key = conn.suConn(user, ttl=ttl)._sessionUuid
request.session['connector'] = connector
conn.revertGroupForSession()
conn.seppuku()
return True
| gpl-2.0 | -5,932,802,012,153,358,000 | 36.636311 | 184 | 0.587316 | false |
Edzvu/Edzvu.github.io | M2Crypto-0.35.2/tests/test_x509.py | 1 | 27297 | #!/usr/bin/env python
"""Unit tests for M2Crypto.X509.
Contributed by Toby Allsopp <[email protected]> under M2Crypto's license.
Portions created by Open Source Applications Foundation (OSAF) are
Copyright (C) 2004-2005 OSAF. All Rights Reserved.
Author: Heikki Toivonen
"""
import base64
import logging
import os
import time
import warnings
from M2Crypto import ASN1, BIO, EVP, RSA, Rand, X509, m2 # noqa
from tests import unittest
log = logging.getLogger(__name__)
class X509TestCase(unittest.TestCase):
def callback(self, *args):
pass
def setUp(self):
self.expected_hash = 'BA4212E8B55527570828E7F5A0005D17C64BDC4C'
def mkreq(self, bits, ca=0):
pk = EVP.PKey()
x = X509.Request()
rsa = RSA.gen_key(bits, 65537, self.callback)
pk.assign_rsa(rsa)
rsa = None # should not be freed here
x.set_pubkey(pk)
name = x.get_subject()
name.C = "UK"
name.CN = "OpenSSL Group"
if not ca:
ext1 = X509.new_extension('subjectAltName',
'DNS:foobar.example.com')
ext2 = X509.new_extension('nsComment', 'Hello there')
extstack = X509.X509_Extension_Stack()
extstack.push(ext1)
extstack.push(ext2)
x.add_extensions(extstack)
with self.assertRaises(ValueError):
x.sign(pk, 'sha513')
x.sign(pk, 'sha1')
self.assertTrue(x.verify(pk))
pk2 = x.get_pubkey()
self.assertTrue(x.verify(pk2))
return x, pk
def test_ext(self):
with self.assertRaises(ValueError):
X509.new_extension('subjectKeyIdentifier', 'hash')
ext = X509.new_extension('subjectAltName', 'DNS:foobar.example.com')
self.assertEqual(ext.get_value(), 'DNS:foobar.example.com')
self.assertEqual(ext.get_value(indent=2),
' DNS:foobar.example.com')
self.assertEqual(ext.get_value(flag=m2.X509V3_EXT_PARSE_UNKNOWN),
'DNS:foobar.example.com')
def test_ext_error(self):
with self.assertRaises(X509.X509Error):
X509.new_extension('nonsensicalName', 'blabla')
def test_extstack(self):
# new
ext1 = X509.new_extension('subjectAltName', 'DNS:foobar.example.com')
ext2 = X509.new_extension('nsComment', 'Hello there')
extstack = X509.X509_Extension_Stack()
# push
extstack.push(ext1)
extstack.push(ext2)
self.assertEqual(extstack[1].get_name(), 'nsComment')
self.assertEqual(len(extstack), 2)
# iterator
i = 0
for e in extstack:
i += 1
self.assertGreater(len(e.get_name()), 0)
self.assertEqual(i, 2)
# pop
ext3 = extstack.pop()
self.assertEqual(len(extstack), 1)
self.assertEqual(extstack[0].get_name(), 'subjectAltName')
extstack.push(ext3)
self.assertEqual(len(extstack), 2)
self.assertEqual(extstack[1].get_name(), 'nsComment')
self.assertIsNotNone(extstack.pop())
self.assertIsNotNone(extstack.pop())
self.assertIsNone(extstack.pop())
def test_x509_name(self):
n = X509.X509_Name()
# It seems this actually needs to be a real 2 letter country code
n.C = 'US'
self.assertEqual(n.C, 'US')
n.SP = 'State or Province'
self.assertEqual(n.SP, 'State or Province')
n.L = 'locality name'
self.assertEqual(n.L, 'locality name')
# Yes, 'orhanization' is a typo, I know it and you're smart.
# However, fixing this typo would break later hashes.
# I don't think it is worthy of troubles.
n.O = 'orhanization name'
self.assertEqual(n.O, 'orhanization name')
n.OU = 'org unit'
self.assertEqual(n.OU, 'org unit')
n.CN = 'common name'
self.assertEqual(n.CN, 'common name')
n.Email = '[email protected]'
self.assertEqual(n.Email, '[email protected]')
n.serialNumber = '1234'
self.assertEqual(n.serialNumber, '1234')
n.SN = 'surname'
self.assertEqual(n.SN, 'surname')
n.GN = 'given name'
self.assertEqual(n.GN, 'given name')
self.assertEqual(n.as_text(),
'C=US, ST=State or Province, ' +
'L=locality name, O=orhanization name, ' +
'OU=org unit, CN=common ' +
'name/[email protected]' +
'/serialNumber=1234, ' +
'SN=surname, GN=given name')
self.assertEqual(len(n), 10,
'X509_Name has inappropriate length %d ' % len(n))
n.givenName = 'name given'
self.assertEqual(n.GN, 'given name') # Just gets the first
self.assertEqual(n.as_text(), 'C=US, ST=State or Province, ' +
'L=locality name, O=orhanization name, ' +
'OU=org unit, ' +
'CN=common name/[email protected]' +
'/serialNumber=1234, ' +
'SN=surname, GN=given name, GN=name given')
self.assertEqual(len(n), 11,
'After adding one more attribute X509_Name should ' +
'have 11 and not %d attributes.' % len(n))
n.add_entry_by_txt(field="CN", type=ASN1.MBSTRING_ASC,
entry="Proxy", len=-1, loc=-1, set=0)
self.assertEqual(len(n), 12,
'After adding one more attribute X509_Name should ' +
'have 12 and not %d attributes.' % len(n))
self.assertEqual(n.entry_count(), 12, n.entry_count())
self.assertEqual(n.as_text(), 'C=US, ST=State or Province, ' +
'L=locality name, O=orhanization name, ' +
'OU=org unit, ' +
'CN=common name/[email protected]' +
'/serialNumber=1234, ' +
'SN=surname, GN=given name, GN=name given, ' +
'CN=Proxy')
with self.assertRaises(AttributeError):
n.__getattr__('foobar')
n.foobar = 1
self.assertEqual(n.foobar, 1)
# X509_Name_Entry tests
l = 0
for entry in n:
self.assertIsInstance(entry, X509.X509_Name_Entry)
self.assertIsInstance(entry.get_object(), ASN1.ASN1_Object)
self.assertIsInstance(entry.get_data(), ASN1.ASN1_String)
l += 1
self.assertEqual(l, 12, l)
l = 0
for cn in n.get_entries_by_nid(m2.NID_commonName):
self.assertIsInstance(cn, X509.X509_Name_Entry)
self.assertIsInstance(cn.get_object(), ASN1.ASN1_Object)
data = cn.get_data()
self.assertIsInstance(data, ASN1.ASN1_String)
t = data.as_text()
self.assertIn(t, ("common name", "Proxy",))
l += 1
self.assertEqual(l, 2,
'X509_Name has %d commonName entries instead '
'of expected 2' % l)
# The target list is not deleted when the loop is finished
# https://docs.python.org/2.7/reference\
# /compound_stmts.html#the-for-statement
# so this checks what are the attributes of the last value of
# ``cn`` variable.
cn.set_data(b"Hello There!")
self.assertEqual(cn.get_data().as_text(), "Hello There!")
# OpenSSL 1.0.1h switched from encoding strings as PRINTABLESTRING (the
# first hash value) to UTF8STRING (the second one)
self.assertIn(n.as_hash(), (1697185131, 1370641112),
'Unexpected value of the X509_Name hash %s' %
n.as_hash())
self.assertRaises(IndexError, lambda: n[100])
self.assertIsNotNone(n[10])
def test_mkreq(self):
(req, _) = self.mkreq(1024)
req.save_pem('tests/tmp_request.pem')
req2 = X509.load_request('tests/tmp_request.pem')
os.remove('tests/tmp_request.pem')
req.save('tests/tmp_request.pem')
req3 = X509.load_request('tests/tmp_request.pem')
os.remove('tests/tmp_request.pem')
req.save('tests/tmp_request.der', format=X509.FORMAT_DER)
req4 = X509.load_request('tests/tmp_request.der',
format=X509.FORMAT_DER)
os.remove('tests/tmp_request.der')
self.assertEqual(req.as_pem(), req2.as_pem())
self.assertEqual(req.as_text(), req2.as_text())
self.assertEqual(req.as_der(), req2.as_der())
self.assertEqual(req.as_pem(), req3.as_pem())
self.assertEqual(req.as_text(), req3.as_text())
self.assertEqual(req.as_der(), req3.as_der())
self.assertEqual(req.as_pem(), req4.as_pem())
self.assertEqual(req.as_text(), req4.as_text())
self.assertEqual(req.as_der(), req4.as_der())
self.assertEqual(req.get_version(), 0)
req.set_version(1)
self.assertEqual(req.get_version(), 1)
req.set_version(0)
self.assertEqual(req.get_version(), 0)
def test_mkcert(self):
for utc in (True, False):
req, pk = self.mkreq(1024)
pkey = req.get_pubkey()
self.assertTrue(req.verify(pkey))
sub = req.get_subject()
self.assertEqual(len(sub), 2,
'Subject should be long 2 items not %d' % len(sub))
cert = X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub)
t = int(time.time()) + time.timezone
if utc:
now = ASN1.ASN1_UTCTIME()
else:
now = ASN1.ASN1_TIME()
now.set_time(t)
now_plus_year = ASN1.ASN1_TIME()
now_plus_year.set_time(t + 60 * 60 * 24 * 365)
cert.set_not_before(now)
cert.set_not_after(now_plus_year)
self.assertEqual(str(cert.get_not_before()), str(now))
self.assertEqual(str(cert.get_not_after()), str(now_plus_year))
issuer = X509.X509_Name()
issuer.CN = 'The Issuer Monkey'
issuer.O = 'The Organization Otherwise Known as My CA, Inc.'
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
cert.set_pubkey(cert.get_pubkey()) # Make sure get/set work
ext = X509.new_extension('subjectAltName', 'DNS:foobar.example.com')
ext.set_critical(0)
self.assertEqual(ext.get_critical(), 0)
cert.add_ext(ext)
cert.sign(pk, 'sha1')
with self.assertRaises(ValueError):
cert.sign(pk, 'nosuchalgo')
self.assertTrue(cert.get_ext('subjectAltName').get_name(),
'subjectAltName')
self.assertTrue(cert.get_ext_at(0).get_name(),
'subjectAltName')
self.assertTrue(cert.get_ext_at(0).get_value(),
'DNS:foobar.example.com')
self.assertEqual(cert.get_ext_count(), 1,
'Certificate should have now 1 extension not %d' %
cert.get_ext_count())
with self.assertRaises(IndexError):
cert.get_ext_at(1)
self.assertTrue(cert.verify())
self.assertTrue(cert.verify(pkey))
self.assertTrue(cert.verify(cert.get_pubkey()))
self.assertEqual(cert.get_version(), 2)
self.assertEqual(cert.get_serial_number(), 1)
self.assertEqual(cert.get_issuer().CN, 'The Issuer Monkey')
if m2.OPENSSL_VERSION_NUMBER >= 0x90800f:
self.assertFalse(cert.check_ca())
self.assertFalse(cert.check_purpose(m2.X509_PURPOSE_SSL_SERVER, 1))
self.assertFalse(cert.check_purpose(m2.X509_PURPOSE_NS_SSL_SERVER,
1))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_SSL_SERVER, 0))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_NS_SSL_SERVER,
0))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_ANY, 0))
else:
with self.assertRaises(AttributeError):
cert.check_ca()
def mkcacert(self, utc):
req, pk = self.mkreq(1024, ca=1)
pkey = req.get_pubkey()
sub = req.get_subject()
cert = X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub)
t = int(time.time()) + time.timezone
if utc:
now = ASN1.ASN1_UTCTIME()
else:
now = ASN1.ASN1_TIME()
now.set_time(t)
now_plus_year = ASN1.ASN1_TIME()
now_plus_year.set_time(t + 60 * 60 * 24 * 365)
cert.set_not_before(now)
cert.set_not_after(now_plus_year)
issuer = X509.X509_Name()
issuer.C = "UK"
issuer.CN = "OpenSSL Group"
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
ext = X509.new_extension('basicConstraints', 'CA:TRUE')
cert.add_ext(ext)
cert.sign(pk, 'sha1')
if m2.OPENSSL_VERSION_NUMBER >= 0x0090800f:
self.assertTrue(cert.check_ca())
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_SSL_SERVER,
1))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_NS_SSL_SERVER,
1))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_ANY, 1))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_SSL_SERVER,
0))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_NS_SSL_SERVER,
0))
self.assertTrue(cert.check_purpose(m2.X509_PURPOSE_ANY, 0))
else:
with self.assertRaises(AttributeError):
cert.check_ca()
return cert, pk, pkey
def test_mkcacert(self):
for utc in (True, False):
cacert, _, pkey = self.mkcacert(utc)
self.assertTrue(cacert.verify(pkey))
def test_mkproxycert(self):
for utc in (True, False):
cacert, pk1, _ = self.mkcacert(utc)
end_entity_cert_req, pk2 = self.mkreq(1024)
end_entity_cert = self.make_eecert(cacert, utc)
end_entity_cert.set_subject(end_entity_cert_req.get_subject())
end_entity_cert.set_pubkey(end_entity_cert_req.get_pubkey())
end_entity_cert.sign(pk1, 'sha1')
proxycert = self.make_proxycert(end_entity_cert, utc)
proxycert.sign(pk2, 'sha1')
self.assertTrue(proxycert.verify(pk2))
self.assertEqual(proxycert.get_ext_at(0).get_name(),
'proxyCertInfo')
self.assertEqual(proxycert.get_ext_at(0).get_value(),
'Path Length Constraint: infinite\n' +
'Policy Language: Inherit all\n')
self.assertEqual(proxycert.get_ext_count(), 1,
proxycert.get_ext_count())
self.assertEqual(proxycert.get_subject().as_text(),
'C=UK, CN=OpenSSL Group, CN=Proxy')
self.assertEqual(
proxycert.get_subject().as_text(indent=2,
flags=m2.XN_FLAG_RFC2253),
' CN=Proxy,CN=OpenSSL Group,C=UK')
@staticmethod
def make_eecert(cacert, utc):
eecert = X509.X509()
eecert.set_serial_number(2)
eecert.set_version(2)
t = int(time.time()) + time.timezone
if utc:
now = ASN1.ASN1_UTCTIME()
else:
now = ASN1.ASN1_TIME()
now.set_time(t)
now_plus_year = ASN1.ASN1_TIME()
now_plus_year.set_time(t + 60 * 60 * 24 * 365)
eecert.set_not_before(now)
eecert.set_not_after(now_plus_year)
eecert.set_issuer(cacert.get_subject())
return eecert
def make_proxycert(self, eecert, utc):
proxycert = X509.X509()
pk2 = EVP.PKey()
proxykey = RSA.gen_key(1024, 65537, self.callback)
pk2.assign_rsa(proxykey)
proxycert.set_pubkey(pk2)
proxycert.set_version(2)
if utc:
not_before = ASN1.ASN1_UTCTIME()
not_after = ASN1.ASN1_UTCTIME()
else:
not_before = ASN1.ASN1_TIME()
not_after = ASN1.ASN1_TIME()
not_before.set_time(int(time.time()))
offset = 12 * 3600
not_after.set_time(int(time.time()) + offset)
proxycert.set_not_before(not_before)
proxycert.set_not_after(not_after)
proxycert.set_issuer_name(eecert.get_subject())
proxycert.set_serial_number(12345678)
issuer_name_string = eecert.get_subject().as_text()
seq = issuer_name_string.split(",")
subject_name = X509.X509_Name()
for entry in seq:
l = entry.split("=")
subject_name.add_entry_by_txt(field=l[0].strip(),
type=ASN1.MBSTRING_ASC,
entry=l[1], len=-1, loc=-1, set=0)
subject_name.add_entry_by_txt(field="CN", type=ASN1.MBSTRING_ASC,
entry="Proxy", len=-1, loc=-1, set=0)
proxycert.set_subject_name(subject_name)
# XXX leaks 8 bytes
pci_ext = X509.new_extension("proxyCertInfo",
"critical,language:Inherit all", 1)
proxycert.add_ext(pci_ext)
return proxycert
def test_fingerprint(self):
x509 = X509.load_cert('tests/x509.pem')
fp = x509.get_fingerprint('sha1')
self.assertEqual(fp, self.expected_hash)
def test_load_der_string(self):
with open('tests/x509.der', 'rb') as f:
x509 = X509.load_cert_der_string(f.read())
fp = x509.get_fingerprint('sha1')
self.assertEqual(fp, self.expected_hash)
def test_save_der_string(self):
x509 = X509.load_cert('tests/x509.pem')
s = x509.as_der()
with open('tests/x509.der', 'rb') as f:
s2 = f.read()
self.assertEqual(s, s2)
def test_load(self):
x509 = X509.load_cert('tests/x509.pem')
x5092 = X509.load_cert('tests/x509.der', format=X509.FORMAT_DER)
self.assertEqual(x509.as_text(), x5092.as_text())
self.assertEqual(x509.as_pem(), x5092.as_pem())
self.assertEqual(x509.as_der(), x5092.as_der())
return
def test_load_bio(self):
with BIO.openfile('tests/x509.pem') as bio:
with BIO.openfile('tests/x509.der') as bio2:
x509 = X509.load_cert_bio(bio)
x5092 = X509.load_cert_bio(bio2, format=X509.FORMAT_DER)
with self.assertRaises(ValueError):
X509.load_cert_bio(bio2, format=45678)
self.assertEqual(x509.as_text(), x5092.as_text())
self.assertEqual(x509.as_pem(), x5092.as_pem())
self.assertEqual(x509.as_der(), x5092.as_der())
def test_load_string(self):
with open('tests/x509.pem') as f:
s = f.read()
with open('tests/x509.der', 'rb') as f2:
s2 = f2.read()
x509 = X509.load_cert_string(s)
x5092 = X509.load_cert_string(s2, X509.FORMAT_DER)
self.assertEqual(x509.as_text(), x5092.as_text())
self.assertEqual(x509.as_pem(), x5092.as_pem())
self.assertEqual(x509.as_der(), x5092.as_der())
def test_load_request_bio(self):
(req, _) = self.mkreq(1024)
r1 = X509.load_request_der_string(req.as_der())
r2 = X509.load_request_string(req.as_der(), X509.FORMAT_DER)
r3 = X509.load_request_string(req.as_pem(), X509.FORMAT_PEM)
r4 = X509.load_request_bio(BIO.MemoryBuffer(req.as_der()),
X509.FORMAT_DER)
r5 = X509.load_request_bio(BIO.MemoryBuffer(req.as_pem()),
X509.FORMAT_PEM)
for r in [r1, r2, r3, r4, r5]:
self.assertEqual(req.as_der(), r.as_der())
with self.assertRaises(ValueError):
X509.load_request_bio(BIO.MemoryBuffer(req.as_pem()), 345678)
def test_save(self):
x509 = X509.load_cert('tests/x509.pem')
with open('tests/x509.pem', 'r') as f:
l_tmp = f.readlines()
# -----BEGIN CERTIFICATE----- : -----END CERTIFICATE-----
beg_idx = l_tmp.index('-----BEGIN CERTIFICATE-----\n')
end_idx = l_tmp.index('-----END CERTIFICATE-----\n')
x509_pem = ''.join(l_tmp[beg_idx:end_idx + 1])
with open('tests/x509.der', 'rb') as f:
x509_der = f.read()
x509.save('tests/tmpcert.pem')
with open('tests/tmpcert.pem') as f:
s = f.read()
self.assertEqual(s, x509_pem)
os.remove('tests/tmpcert.pem')
x509.save('tests/tmpcert.der', format=X509.FORMAT_DER)
with open('tests/tmpcert.der', 'rb') as f:
s = f.read()
self.assertEqual(s, x509_der)
os.remove('tests/tmpcert.der')
def test_malformed_data(self):
try:
with self.assertRaises(X509.X509Error):
X509.load_cert_string('Hello')
with self.assertRaises(X509.X509Error):
X509.load_cert_der_string('Hello')
with self.assertRaises(X509.X509Error):
X509.new_stack_from_der(b'Hello')
with self.assertRaises(X509.X509Error):
X509.load_cert('tests/alltests.py')
with self.assertRaises(X509.X509Error):
X509.load_request('tests/alltests.py')
with self.assertRaises(X509.X509Error):
X509.load_request_string('Hello')
with self.assertRaises(X509.X509Error):
X509.load_request_der_string('Hello')
with self.assertRaises(X509.X509Error):
X509.load_crl('tests/alltests.py')
except SystemError:
pass
def test_long_serial(self):
cert = X509.load_cert('tests/long_serial_cert.pem')
self.assertEqual(cert.get_serial_number(), 17616841808974579194)
cert = X509.load_cert('tests/thawte.pem')
self.assertEqual(cert.get_serial_number(),
127614157056681299805556476275995414779)
def test_set_long_serial(self):
cert = X509.X509()
cert.set_serial_number(127614157056681299805556476275995414779)
self.assertEqual(cert.get_serial_number(),
127614157056681299805556476275995414779)
def test_date_after_2050_working(self):
cert = X509.load_cert('tests/bad_date_cert.crt')
self.assertEqual(str(cert.get_not_after()), 'Feb 9 14:57:46 2116 GMT')
def test_easy_rsa_generated(self):
""" Test loading a cert generated by easy RSA.
https://github.com/fedora-infra/fedmsg/pull/389
"""
# Does this raise an exception?
X509.load_cert('tests/easy_rsa.pem')
class X509StackTestCase(unittest.TestCase):
def test_make_stack_from_der(self):
with open("tests/der_encoded_seq.b64", 'rb') as f:
b64 = f.read()
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
seq = base64.decodestring(b64)
stack = X509.new_stack_from_der(seq)
cert = stack.pop()
self.assertIsNone(stack.pop())
cert.foobar = 1
self.assertEqual(cert.foobar, 1)
subject = cert.get_subject()
self.assertEqual(
str(subject),
"/DC=org/DC=doegrids/OU=Services/CN=host/bosshog.lbl.gov")
def test_make_stack_check_num(self):
with open("tests/der_encoded_seq.b64", 'rb') as f:
b64 = f.read()
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
seq = base64.decodestring(b64)
stack = X509.new_stack_from_der(seq)
num = len(stack)
self.assertEqual(num, 1)
cert = stack.pop()
num = len(stack)
self.assertEqual(num, 0)
subject = cert.get_subject()
self.assertEqual(
str(subject),
"/DC=org/DC=doegrids/OU=Services/CN=host/bosshog.lbl.gov")
def test_make_stack(self):
stack = X509.X509_Stack()
cert = X509.load_cert("tests/x509.pem")
issuer = X509.load_cert("tests/ca.pem")
cert_subject1 = cert.get_subject()
issuer_subject1 = issuer.get_subject()
stack.push(cert)
stack.push(issuer)
# Test stack iterator
i = 0
for c in stack:
i += 1
self.assertGreater(len(c.get_subject().CN), 0)
self.assertEqual(i, 2)
stack.pop()
cert_pop = stack.pop()
cert_subject2 = cert_pop.get_subject()
issuer_subject2 = issuer.get_subject()
self.assertEqual(str(cert_subject1), str(cert_subject2))
self.assertEqual(str(issuer_subject1), str(issuer_subject2))
def test_as_der(self):
stack = X509.X509_Stack()
cert = X509.load_cert("tests/x509.pem")
issuer = X509.load_cert("tests/ca.pem")
cert_subject1 = cert.get_subject()
issuer_subject1 = issuer.get_subject()
stack.push(cert)
stack.push(issuer)
der_seq = stack.as_der()
stack2 = X509.new_stack_from_der(der_seq)
stack2.pop()
cert_pop = stack2.pop()
cert_subject2 = cert_pop.get_subject()
issuer_subject2 = issuer.get_subject()
self.assertEqual(str(cert_subject1), str(cert_subject2))
self.assertEqual(str(issuer_subject1), str(issuer_subject2))
class X509ExtTestCase(unittest.TestCase):
def test_ext(self):
if 0: # XXX
# With this leaks 8 bytes:
name = "proxyCertInfo"
value = "critical,language:Inherit all"
else:
# With this there are no leaks:
name = "nsComment"
value = "Hello"
ctx = m2.x509v3_set_nconf()
x509_ext_ptr = m2.x509v3_ext_conf(None, ctx, name, value)
X509.X509_Extension(x509_ext_ptr, 1)
class CRLTestCase(unittest.TestCase):
def test_new(self):
crl = X509.CRL()
self.assertEqual(crl.as_text()[:34],
'Certificate Revocation List (CRL):')
def suite():
st = unittest.TestSuite()
st.addTest(unittest.makeSuite(X509TestCase))
st.addTest(unittest.makeSuite(X509StackTestCase))
st.addTest(unittest.makeSuite(X509ExtTestCase))
st.addTest(unittest.makeSuite(CRLTestCase))
return st
if __name__ == '__main__':
Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
| mit | -5,073,989,765,738,386,000 | 37.774148 | 83 | 0.553211 | false |
Parallel-in-Time/pySDC | pySDC/playgrounds/mpifft/grayscott.py | 1 | 7994 | import numpy as np
from mpi4py import MPI
import matplotlib.pyplot as plt
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.collocation_classes.gauss_lobatto import CollGaussLobatto
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.sweeper_classes.multi_implicit import multi_implicit
from pySDC.implementations.problem_classes.GrayScott_MPIFFT import grayscott_imex_diffusion, grayscott_imex_linear, \
grayscott_mi_diffusion, grayscott_mi_linear
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
def run_simulation(spectral=None, splitting_type=None, ml=None, num_procs=None):
"""
A test program to do SDC, MLSDC and PFASST runs for the 2D NLS equation
Args:
spectral (bool): run in real or spectral space
ml (bool): single or multiple levels
num_procs (int): number of parallel processors
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-12
level_params['dt'] = 8E-00
level_params['nsweeps'] = [1]
level_params['residual_type'] = 'last_abs'
# initialize sweeper parameters
sweeper_params = dict()
# sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['collocation_class'] = CollGaussLobatto
sweeper_params['num_nodes'] = [5]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['Q1'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['Q2'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['QE'] = ['EE'] # You can try PIC here, but PFASST doesn't like this..
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
if ml:
problem_params['nvars'] = [(128, 128), (32, 32)]
else:
problem_params['nvars'] = [(128, 128)]
problem_params['spectral'] = spectral
problem_params['comm'] = comm
problem_params['Du'] = 0.00002
problem_params['Dv'] = 0.00001
problem_params['A'] = 0.04
problem_params['B'] = 0.1
problem_params['newton_maxiter'] = 50
problem_params['newton_tol'] = 1E-11
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 100
step_params['errtol'] = 1E-09
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20 if rank == 0 else 99
# controller_params['predict_type'] = 'fine_only'
controller_params['use_iteration_estimator'] = False
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
if splitting_type == 'diffusion':
description['problem_class'] = grayscott_imex_diffusion
elif splitting_type == 'linear':
description['problem_class'] = grayscott_imex_linear
elif splitting_type == 'mi_diffusion':
description['problem_class'] = grayscott_mi_diffusion
elif splitting_type == 'mi_linear':
description['problem_class'] = grayscott_mi_linear
else:
raise NotImplementedError(f'splitting_type = {splitting_type} not implemented')
if splitting_type == 'mi_diffusion' or splitting_type == 'mi_linear':
description['sweeper_class'] = multi_implicit
else:
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = fft_to_fft
# set time parameters
t0 = 0.0
Tend = 32
f = None
if rank == 0:
f = open('GS_out.txt', 'a')
out = f'Running with ml = {ml} and num_procs = {num_procs}...'
f.write(out + '\n')
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# plt.figure()
# plt.imshow(uinit[..., 0], vmin=0, vmax=1)
# plt.title('v')
# plt.colorbar()
# plt.figure()
# plt.imshow(uinit[..., 1], vmin=0, vmax=1)
# plt.title('v')
# plt.colorbar()
# plt.figure()
# plt.imshow(uinit[..., 0] + uinit[..., 1])
# plt.title('sum')
# plt.colorbar()
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# plt.figure()
# plt.imshow(P.fft.backward(uend[..., 0]))#, vmin=0, vmax=1)
# # plt.imshow(np.fft.irfft2(uend[..., 0]))#, vmin=0, vmax=1)
# plt.title('u')
# plt.colorbar()
# plt.figure()
# plt.imshow(P.fft.backward(uend[..., 1]))#, vmin=0, vmax=1)
# # plt.imshow(np.fft.irfft2(uend[..., 1]))#, vmin=0, vmax=1)
# plt.title('v')
# plt.colorbar()
# # plt.figure()
# # plt.imshow(uend[..., 0] + uend[..., 1])
# # plt.title('sum')
# # plt.colorbar()
# plt.show()
# # exit()
if rank == 0:
# filter statistics by type (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = f' Min/Mean/Max number of iterations: ' \
f'{np.min(niters):4.2f} / {np.mean(niters):4.2f} / {np.max(niters):4.2f}'
f.write(out + '\n')
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
f.write(out + '\n')
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % \
(int(np.argmax(niters)), int(np.argmin(niters)))
f.write(out + '\n')
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
f.write(out + '\n')
print(out)
timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')
out = f'Time to solution: {timing[0][1]:6.4f} sec.'
f.write(out + '\n')
print(out)
f.write('\n')
print()
f.close()
def main():
"""
Little helper routine to run the whole thing
Note: This can also be run with "mpirun -np 2 python grayscott.py"
"""
# run_simulation(spectral=False, splitting_type='diffusion', ml=False, num_procs=1)
# run_simulation(spectral=True, splitting_type='diffusion', ml=False, num_procs=1)
# run_simulation(spectral=True, splitting_type='linear', ml=False, num_procs=1)
# run_simulation(spectral=False, splitting_type='diffusion', ml=True, num_procs=1)
# run_simulation(spectral=True, splitting_type='diffusion', ml=True, num_procs=1)
# run_simulation(spectral=False, splitting_type='diffusion', ml=True, num_procs=10)
# run_simulation(spectral=True, splitting_type='diffusion', ml=True, num_procs=10)
# run_simulation(spectral=False, splitting_type='mi_diffusion', ml=False, num_procs=1)
run_simulation(spectral=True, splitting_type='mi_diffusion', ml=False, num_procs=1)
# run_simulation(spectral=False, splitting_type='mi_linear', ml=False, num_procs=1)
# run_simulation(spectral=True, splitting_type='mi_linear', ml=False, num_procs=1)
if __name__ == "__main__":
main()
| bsd-2-clause | 632,620,661,393,249,800 | 38.97 | 120 | 0.643858 | false |
sbates130272/libdonard | scripts/imgrep.py | 1 | 6622 | #!/usr/bin/env python
########################################################################
##
## Copyright 2014 PMC-Sierra, Inc.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0 Unless required by
## applicable law or agreed to in writing, software distributed under the
## License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
## CONDITIONS OF ANY KIND, either express or implied. See the License for
## the specific language governing permissions and limitations under the
## License.
##
########################################################################
########################################################################
##
## Author: Logan Gunthorpe
##
## Date: Oct 23, 2014
##
## Description:
## Image grep test/example script
##
########################################################################
import os
import sys
import Image
import numpy as np
from numpy import fft
import utils
import time
data = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data"))
class ImageGrepError(Exception):
pass
class ImageGrep(object):
max_size = 8192
threshold = 150
tiff = False
def __init__(self, needle, **kws):
for k,v in kws.iteritems():
if hasattr(self, k):
setattr(self, k, v)
data_needle = os.path.join(data, needle)
if not os.path.exists(needle) and os.path.exists(data_needle):
needle = data_needle
im = Image.open(needle, 'r').convert("L")
self.needle = np.array(im) / 255.
self.needle_size = im.size
revneedle = self.needle[::-1,::-1]
revneedle = self._padimg(revneedle,
self._next_highest_pow2(*revneedle.shape))
edge_detect = np.ones((3,3)) * -1./8
edge_detect[1,1] = 1
edge_detect = self._padimg(edge_detect, revneedle.shape)
edge_needle = self._convolve(revneedle, edge_detect)
self.needle = self._padimg(edge_needle, (self.max_size,
self.max_size))
self.pixels = 0
self.bytes = 0
def _next_highest_pow2(self, *args):
return tuple(1 << (x-1).bit_length() for x in args)
def _padimg(self, a, shape):
padspace = np.zeros(shape)
padspace[:a.shape[0], :a.shape[1]] = a
return padspace
def _convolve(self, a, b):
haystack_fft = fft.rfft2(a)
needle_fft = fft.rfft2(b)
return fft.irfft2((haystack_fft * needle_fft))
def _save_image(self, m, fname):
mx = np.amax(m)
mn = np.amin(m)
if mx > 1.0 or mn < 0:
m = m.copy()
m += -mn
m /= (mx-mn)
Image.fromarray(np.uint8(m*255)).save(fname)
def __call__(self, haystack, *args, **kws):
if utils.istiff(haystack) and self.tiff:
pass
elif utils.isjpeg(haystack) and not self.tiff:
pass
else:
return []
im = Image.open(haystack, 'r').convert("L")
self.pixels += im.size[0] * im.size[1]
self.bytes += os.path.getsize(haystack)
haystack = np.array(im) / 255.
#Pad dimensions to next highest power of 2 seeing this is
# more efficient for the fft
haystack = self._padimg(haystack, self._next_highest_pow2(*haystack.shape))
if max(haystack.shape) > self.max_size:
raise ImageGrepError("Image too large. Increase max_size.")
needle = self.needle[:haystack.shape[0],:haystack.shape[1]]
conv = self._convolve(needle, haystack)
#self._save_image((conv > self.threshold) * 200, "conv.jpg")
w, h = self.needle_size
results = {}
for x, y in zip(*np.nonzero(conv > self.threshold)):
xx, yy = y-w+1, x-h
if xx < 0 or yy < 0 or xx > im.size[0] or yy > im.size[1]:
continue
for (xr, yr, wr, hr), rr in results.iteritems():
if (abs(xx-xr) < h or abs(yy-yr) < w):
if rr < conv[x,y]:
results[xx,yy,w,h] = conv[x,y]
del results[xr,yr,wr,hr]
break
else:
results[xx,yy,w,h] = conv[x, y]
return results.items()
def print_results(self, haystack, *args, **kws):
try:
for (x,y,w,h), r in self(haystack, *args, **kws):
print "%s %5d+%-3d %5d+%-3d (%.2f)" % (haystack+":", x, w, y, h, r)
sys.stdout.flush()
except ImageGrepError, e:
print >> sys.stderr, "%s (%s) " % (haystack, str(e))
if __name__ == "__main__":
import optparse
usage = "usage: %prog [options] IMAGE1 [IMAGE2 DIR1 ...]"
parser = optparse.OptionParser(usage = usage)
parser.add_option("--tiff", action="store_true",
help="process TIFF files rather than JPEGs")
parser.add_option("-M", "--max-size", action="store", type="int",
default=ImageGrep.max_size,
help="maximum supported image size, default: %default")
parser.add_option("-t", "--threshold", action="store", type="float",
default=ImageGrep.threshold,
help="detection threshold, default: %default")
parser.add_option("-n", "--needle", action="store", type="string",
default="pmclogo.png", help="needle image to search for, default : %default")
(options, args) = parser.parse_args()
if not args:
parser.print_usage()
sys.exit(-1)
imgrep = ImageGrep(**options.__dict__)
try:
starttime = time.time()
utils.run(args, imgrep.print_results)
except KeyboardInterrupt:
pass
except IOError, e:
print e
finally:
duration = time.time() - starttime
time.sleep(0.5)
print >> sys.stderr
print >> sys.stderr, ("%.2f%spixels in %.1fs %.2f%spixels/s" %
(utils.si_suffix(imgrep.pixels) +
(duration, ) +
utils.si_suffix(imgrep.pixels / duration)))
print >> sys.stderr, ("%.2f%sBytes in %.1fs %.2f%sB/s" %
(utils.si_suffix(imgrep.bytes) +
(duration, ) +
utils.si_suffix(imgrep.bytes / duration)))
| apache-2.0 | 2,855,813,345,342,170,000 | 32.958974 | 99 | 0.517064 | false |
edy89/Compugrafica | level2.py | 1 | 14250 | import pygame
import sys
import random
import level1_main
import menu
from pygame.locals import *
pygame.init()
reloj = pygame.time.Clock()
opciones = [
("Jugar", level1_main.Nivel_1),
("Creditos", menu.creditos),
("Salir", menu.salir_del_programa)
]
menu = menu.Menu(opciones = opciones)
def Nivel_2(pantalla):
level1_main.pygame.mixer.music.play()
puntos_influencia = 20
contador = 0
contador_inicio = 0
num_jugadores = 0
pag = 0
level_2 = 0
bool_pag = False
bool_level2 = False
bool_lose = False
bandera = True
cursor = level1_main.Cursor()
hecho = False
jugador_aux = level1_main.Jugador('Sprites/nave2.png')
jugador_aux_3 = level1_main.Jugador('Sprites/nave2.png')
jugador_aux_2 = level1_main.Jugador('Sprites/nave.png')
jugador_aux_4 = level1_main.Jugador('Sprites/nave3.png')
# inicializacion del Enemigo Boss
enemigo = level1_main.Boss('Sprites/Bossnave.png')
enemigo.rect.x = level1_main.DIMENSION_VENTANA[0] - 300
enemigo.rect.y = random.randrange(0, level1_main.DIMENSION_VENTANA[1] - 300)
enemigo.disparar = random.randrange(30, 100)
vida_Boss = level1_main.fuente3.render(str(enemigo.vida), 0, level1_main.Blanco)
############### LISTAS #######################
lista_todos = pygame.sprite.Group()
lista_enemigo = pygame.sprite.Group()
lista_jugadores1 = pygame.sprite.Group()
lista_jugadores2 = pygame.sprite.Group()
lista_jugadores3 = pygame.sprite.Group()
lista_balas_jugadores = pygame.sprite.Group()
lista_balas_enemigo = pygame.sprite.Group()
###########################################
while not hecho and bandera:
pos = pygame.mouse.get_pos()
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
hecho = True
quit()
if evento.type == pygame.MOUSEBUTTONDOWN:
if cursor.colliderect(level1_main.boton.rect): # nave 1 en pantalla
level1_main.seleccion_nave.play()
jugador_aux = jugador_aux_3
contador += 1
lista_todos.add(jugador_aux)
if cursor.colliderect(level1_main.boton1.rect): # nave 2 en pantalla
level1_main.seleccion_nave.play()
jugador_aux = jugador_aux_2
contador += 1
lista_todos.add(jugador_aux)
if cursor.colliderect(level1_main.boton2.rect): #nave 3 en pantalla
level1_main.seleccion_nave.play()
jugador_aux = jugador_aux_4
contador += 1
lista_todos.add(jugador_aux)
if cursor.colliderect(level1_main.boton_inicio.rect) and contador_inicio == 0:
lista_enemigo.add(enemigo)
lista_todos.add(enemigo)
contador_inicio += 1
level1_main.pygame.mixer.music.stop()
level1_main.juego.play()
if evento.type == pygame.MOUSEBUTTONUP and contador > 0:
contador -= 1
if puntos_influencia > 0:
if jugador_aux == jugador_aux_2:
if puntos_influencia >= 3:
nave = level1_main.Jugador('Sprites/nave.png')
nave.vida += 1
lista_jugadores1.add(nave)
lista_todos.add(nave)
num_jugadores += 1
puntos_influencia -= 3
bool_lose = True
if jugador_aux == jugador_aux_3:
if puntos_influencia >= 2:
nave = level1_main.Jugador('Sprites/nave2.png')
lista_jugadores2.add(nave)
lista_todos.add(nave)
num_jugadores += 1
puntos_influencia -= 2
bool_lose = True
if jugador_aux == jugador_aux_4:
if puntos_influencia >= 5:
nave = level1_main.Jugador('Sprites/nave3.png')
nave.vida += 2
lista_jugadores3.add(nave)
lista_todos.add(nave)
num_jugadores += 1
puntos_influencia -= 5
bool_lose = True
if pos[1] > 135:
nave.rect.x = pos[0] - 40
nave.rect.y = pos[1] - 40
else:
nave.rect.x = pos[0] - 40 + 90
nave.rect.y = pos[1] - 40 + 135
lista_todos.remove(jugador_aux)
if contador_inicio == 1:
##### Ciclos de llenado de balas de enemigos
for a in lista_enemigo:
if a.disparar == 0:
balae1 = level1_main.Laser1('Sprites/Sol.png')
balae2 = level1_main.Laser2('Sprites/Sol.png')
balae3 = level1_main.Laser3('Sprites/Sol.png')
balae4 = level1_main.Laser4('Sprites/Sol.png')
balae5 = level1_main.Laser5('Sprites/Sol.png')
balae1.rect.x = a.rect.x - 30
balae1.rect.y = a.rect.y
balae2.rect.x = a.rect.x - 30
balae2.rect.y = a.rect.y
balae3.rect.x = a.rect.x - 30
balae3.rect.y = a.rect.y
balae4.rect.x = a.rect.x - 30
balae4.rect.y = a.rect.y
balae5.rect.x = a.rect.x - 30
balae5.rect.y = a.rect.y
lista_balas_enemigo.add(balae1)
lista_todos.add(balae1)
lista_balas_enemigo.add(balae2)
lista_todos.add(balae2)
lista_balas_enemigo.add(balae3)
lista_todos.add(balae3)
lista_balas_enemigo.add(balae4)
lista_todos.add(balae4)
lista_balas_enemigo.add(balae5)
lista_todos.add(balae5)
#### llenado de proyectiles de las naves ("jugadores")
for e in lista_jugadores1:
if e.disparar == 0:
balaj = level1_main.Proyectil_1('Sprites/proyectil.png')
balaj.rect.x = e.rect.x + 40
balaj.rect.y = e.rect.y + 10
lista_balas_jugadores.add(balaj)
lista_todos.add(balaj)
level1_main.sonido.play()
for f in lista_jugadores2:
if f.disparar == 0:
balaj = level1_main.Proyectil('Sprites/proyectil.png')
balaj.rect.x = f.rect.x + 40
balaj.rect.y = f.rect.y + 10
lista_balas_jugadores.add(balaj)
lista_todos.add(balaj)
level1_main.sonido.play()
for g in lista_jugadores3:
if g.disparar == 0:
balaj = level1_main.Proyectil_2('Sprites/proyectil.png')
balaj2 = level1_main.Proyectil_3('Sprites/proyectil.png')
balaj2.rect.x = g.rect.x + 40
balaj.rect.x = g.rect.x + 40
balaj2.rect.y = g.rect.y + 10
balaj.rect.y = g.rect.y + 10
lista_balas_jugadores.add(balaj)
lista_balas_jugadores.add(balaj2)
lista_todos.add(balaj)
lista_todos.add(balaj2)
level1_main.sonido.play()
############################# COLISIONES ########################################
##### Colision de un jugador con la bala de un enemigo ###########
for h1 in lista_jugadores1:
ls_impactoe = pygame.sprite.spritecollide(h1, lista_balas_enemigo, True)
for imp1 in ls_impactoe:
print (h1.vida)
h1.vida -= 1
if h1.vida == 0:
lista_jugadores1.remove(h1)
lista_todos.remove(h1)
num_jugadores -= 1
level1_main.desaparece.play()
for h2 in lista_jugadores2:
ls_impactoe = pygame.sprite.spritecollide(h2, lista_balas_enemigo, True)
for imp2 in ls_impactoe:
print (h2.vida)
h2.vida -= 1
if h2.vida == 1:
lista_jugadores2.remove(h2)
lista_todos.remove(h2)
num_jugadores -= 1
level1_main.desaparece.play()
for h3 in lista_jugadores3:
ls_impactoe = pygame.sprite.spritecollide(h3, lista_balas_enemigo, True)
for imp3 in ls_impactoe:
print (h3.vida)
h3.vida -= 1
if h3.vida == 1:
lista_jugadores3.remove(h3)
lista_todos.remove(h3)
num_jugadores -= 1
level1_main.desaparece.play()
########### Colision de una bala del jugador con un enemigo ###########################
for k in lista_balas_jugadores:
ls_impacto1 = pygame.sprite.spritecollide(k, lista_enemigo, False)
for impacto1 in ls_impacto1:
lista_balas_jugadores.remove(k)
lista_todos.remove(k)
puntos_influencia += 1
print("Boss_life: %d" % enemigo.vida )
vida_Boss = level1_main.fuente3.render(str(enemigo.vida), 0, level1_main.Blanco)
level1_main.explosion.play()
if enemigo.vida > 0:
enemigo.vida -= 1
########################################################################################
jugador_aux.rect.x = pos[0] - 30
jugador_aux.rect.y = pos[1] - 30
if enemigo.vida == 0:
level_2 = 1
bool_level2 = True
bandera = False
level1_main.juego.stop()
level1_main.win.play()
if num_jugadores == 0 and bool_lose:
pag += 1
bandera = False
bool_pag = True
level1_main.juego.stop()
level1_main.pygame.mixer.music.stop()
level1_main.explosion2.play()
level1_main.muerto.play()
pantalla.blit(level1_main.fondo, [0, 0])
while (not bandera):
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if event.type == pygame.KEYUP and bool_pag:
pag += 1
if event.type == pygame.KEYUP and bool_level2:
level_2 += 1
if pag == 1:
pantalla.blit(level1_main.muerte, [0, 0])
level1_main.to_level_2.play()
if pag == 2:
level1_main.juego.stop()
level1_main.pygame.mixer.music.stop()
while not bandera:
for e in pygame.event.get():
if e.type == QUIT:
bandera = True
pantalla.blit(level1_main.fondo, (0, 0))
menu.actualizar()
menu.imprimir(pantalla)
pantalla.blit(level1_main.Titulo, (280, level1_main.DIMENSION_VENTANA[1] / 2 - 280))
level1_main.pygame.mixer.music.play()
level1_main.muerto.stop()
pygame.display.flip()
level1_main.muerto.stop()
reloj.tick(60)
if level_2 == 1:
pantalla.fill(level1_main.NEGRO)
pantalla.blit(level1_main.texto3, (90, level1_main.DIMENSION_VENTANA[1] / 2 - 70))
if level_2 == 2:
pantalla.fill(level1_main.NEGRO)
pantalla.blit(level1_main.texto7, (250, level1_main.DIMENSION_VENTANA[1] / 2 - 70))
if level_2 == 3:
level1_main.win.stop()
while not bandera:
for e in pygame.event.get():
if e.type == QUIT:
bandera = True
quit()
pantalla.blit(level1_main.fondo, (0, 0))
menu.actualizar()
menu.imprimir(pantalla)
pantalla.blit(level1_main.Titulo, (280, level1_main.DIMENSION_VENTANA[1] / 2 - 280))
level1_main.pygame.mixer.music.play()
level1_main.juego.stop()
level1_main.muerto.stop()
pygame.display.flip()
reloj.tick(60)
pygame.display.flip()
puntos_pantalla = level1_main.fuente3.render(str(puntos_influencia), 0, level1_main.Blanco)
cursor.update()
lista_enemigo.update()
lista_jugadores1.update()
lista_jugadores2.update()
lista_jugadores3.update()
lista_balas_jugadores.update()
lista_balas_enemigo.update()
level1_main.boton.update(pantalla,cursor)
level1_main.boton1.update(pantalla,cursor)
level1_main.boton2.update(pantalla,cursor)
level1_main.boton_inicio.update(pantalla, cursor)
level1_main.boton_reset.update(pantalla, cursor)
pantalla.blit(puntos_pantalla, (140,540))
pantalla.blit(vida_Boss, (880, 10))
pantalla.blit(level1_main.texto8, (700, 10))
pantalla.blit(level1_main.texto6, (12, 540))
pantalla.blit(level1_main.texto9,(10,65))
pantalla.blit(level1_main.texto10, (80, 65))
pantalla.blit(level1_main.texto11, (150, 65))
lista_todos.draw(pantalla)
pygame.display.flip()
reloj.tick(60)
| gpl-3.0 | -8,573,626,075,694,006,000 | 40.537313 | 122 | 0.479368 | false |
GoodCloud/johnny-cache | johnny/transaction.py | 1 | 10890 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import transaction as django_transaction
from django.db import connection
try:
from django.db import DEFAULT_DB_ALIAS
except:
DEFUALT_DB_ALIAS = None
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
import django
class TransactionManager(object):
"""TransactionManager is a wrapper around a cache_backend that is
transaction aware.
If we are in a transaction, it will return the locally cached version.
* On rollback, it will flush all local caches
* On commit, it will push them up to the real shared cache backend
(ex. memcached).
"""
_patched_var = False
def __init__(self, cache_backend, keygen):
from johnny import cache, settings
self.timeout = settings.MIDDLEWARE_SECONDS
self.prefix = settings.MIDDLEWARE_KEY_PREFIX
self.cache_backend = cache_backend
self.local = cache.local
self.keygen = keygen(self.prefix)
self._originals = {}
self._dirty_backup = {}
self.local['trans_sids'] = {}
def _get_sid(self, using=None):
if 'trans_sids' not in self.local:
self.local['trans_sids'] = {}
d = self.local['trans_sids']
if self.has_multi_db():
if using is None:
using = DEFAULT_DB_ALIAS
else:
using = 'default'
if using not in d:
d[using] = []
return d[using]
def _clear_sid_stack(self, using=None):
if self.has_multi_db():
if using is None:
using = DEFAULT_DB_ALIAS
else:
using = 'default'
if using in self.local.get('trans_sids', {}):
del self.local['trans_sids']
def has_multi_db(self):
if django.VERSION[:2] in ((1, 2), (1, 3)):
return True
return False
def is_managed(self):
return django_transaction.is_managed()
def get(self, key, default=None, using=None):
if self.is_managed() and self._patched_var:
val = self.local.get(key, None)
if val: return val
if self._uses_savepoints():
val = self._get_from_savepoints(key, using)
if val: return val
return self.cache_backend.get(key, default)
def _get_from_savepoints(self, key, using=None):
sids = self._get_sid(using)
cp = list(sids)
cp.reverse()
for sid in cp:
if key in self.local[sid]:
return self.local[sid][key]
def _trunc_using(self, using):
if self.has_multi_db():
if using is None:
using = DEFAULT_DB_ALIAS
else:
using = 'default'
if len(using) > 100:
using = using[0:68] + self.keygen.gen_key(using[68:])
return using
def set(self, key, val, timeout=None, using=None):
"""
Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump.
"""
if timeout is None:
timeout = self.timeout
if self.is_managed() and self._patched_var:
self.local[key] = val
else:
self.cache_backend.set(key, val, timeout)
def _clear(self, using=None):
if self.has_multi_db():
self.local.clear('%s_%s_*'%(self.prefix, self._trunc_using(using)))
else:
self.local.clear('%s_*'%self.prefix)
def _flush(self, commit=True, using=None):
"""
Flushes the internal cache, either to the memcache or rolls back
"""
if commit:
# XXX: multi-set?
if self._uses_savepoints():
self._commit_all_savepoints(using)
if self.has_multi_db():
c = self.local.mget('%s_%s_*'%(self.prefix, self._trunc_using(using)))
else:
c = self.local.mget('%s_*'%self.prefix)
for key, value in c.iteritems():
self.cache_backend.set(key, value, self.timeout)
else:
if self._uses_savepoints():
self._rollback_all_savepoints(using)
self._clear(using)
self._clear_sid_stack(using)
def _patched(self, original, commit=True):
@wraps(original)
def newfun(using=None):
#1.2 version
original(using=using)
self._flush(commit=commit, using=using)
@wraps(original)
def newfun11():
#1.1 version
original()
self._flush(commit=commit)
if django.VERSION[:2] == (1,1):
return newfun11
elif django.VERSION[:2] in ((1,2), (1,3)):
return newfun
return original
def _uses_savepoints(self):
return connection.features.uses_savepoints
def _sid_key(self, sid, using=None):
if using != None:
return 'trans_savepoint_%s_%s'%(using, sid)
return 'trans_savepoint_%s'%sid
def _create_savepoint(self, sid, using=None):
key = self._sid_key(sid, using)
#get all local dirty items
if self.has_multi_db():
c = self.local.mget('%s_%s_*'%(self.prefix, self._trunc_using(using)))
else:
c = self.local.mget('%s_*'%self.prefix)
#store them to a dictionary in the localstore
if key not in self.local:
self.local[key] = {}
for k, v in c.iteritems():
self.local[key][k] = v
#clear the dirty
self._clear(using)
#append the key to the savepoint stack
sids = self._get_sid(using)
sids.append(key)
def _rollback_savepoint(self, sid, using=None):
sids = self._get_sid(using)
key = self._sid_key(sid, using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
#delete items from localstore
for i in stack:
del self.local[i]
#clear dirty
self._clear(using)
except IndexError, e:
#key not found, don't delete from localstore, restore sid stack
for i in stack:
sids.insert(0, i)
def _commit_savepoint(self, sid, using=None):
#commit is not a commit but is in reality just a clear back to that savepoint
#and adds the items back to the dirty transaction.
key = self._sid_key(sid, using)
sids = self._get_sid(using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
self._store_dirty(using)
for i in stack:
for k, v in self.local[i].iteritems():
self.local[k] = v
del self.local[i]
self._restore_dirty(using)
except IndexError, e:
for i in stack:
sids.insert(0, i)
def _commit_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._commit_savepoint(sids[0], using)
def _rollback_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._rollback_savepoint(sids[0], using)
def _store_dirty(self, using=None):
if self.has_multi_db():
c = self.local.mget('%s_%s_*'%(self.prefix, self._trunc_using(using)))
else:
c = self.local.mget('%s_*'%self.prefix)
backup = 'trans_dirty_store_%s'%self._trunc_using(using)
self.local[backup] = {}
for k, v in c.iteritems():
self.local[backup][k] = v
self._clear(using)
def _restore_dirty(self, using=None):
backup = 'trans_dirty_store_%s'%self._trunc_using(using)
for k, v in self.local.get(backup, {}).iteritems():
self.local[k] = v
del self.local[backup]
def _savepoint(self, original):
@wraps(original)
def newfun(using=None):
if using != None:
sid = original(using=using)
else:
sid = original()
if self._uses_savepoints():
self._create_savepoint(sid, using)
return sid
return newfun
def _savepoint_rollback(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 2:
using = args[1]
else:
using = kwargs.get('using', None)
self._rollback_savepoint(sid, using)
return newfun
def _savepoint_commit(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 1:
using = args[0]
else:
using = kwargs.get('using', None)
self._commit_savepoint(sid, using)
return newfun
def _getreal(self, name):
return getattr(django_transaction, 'real_%s' % name,
getattr(django_transaction, name))
def patch(self):
"""
This function monkey patches commit and rollback
writes to the cache should not happen until commit (unless our state isn't managed).
It does not yet support savepoints.
"""
if not self._patched_var:
self._originals['rollback'] = self._getreal('rollback')
self._originals['commit'] = self._getreal('commit')
self._originals['savepoint'] = self._getreal('savepoint')
self._originals['savepoint_rollback'] = self._getreal('savepoint_rollback')
self._originals['savepoint_commit'] = self._getreal('savepoint_commit')
django_transaction.rollback = self._patched(django_transaction.rollback, False)
django_transaction.commit = self._patched(django_transaction.commit, True)
django_transaction.savepoint = self._savepoint(django_transaction.savepoint)
django_transaction.savepoint_rollback = self._savepoint_rollback(django_transaction.savepoint_rollback)
django_transaction.savepoint_commit = self._savepoint_commit(django_transaction.savepoint_commit)
self._patched_var = True
def unpatch(self):
for fun in self._originals:
setattr(django_transaction, fun, self._originals[fun])
self._patched_var = False
| mit | -1,120,410,348,727,484,800 | 33.245283 | 115 | 0.551974 | false |
the-fascinator/fascinator-portal | src/main/config/portal/default/default/scripts/download.py | 1 | 7607 | import os
from com.googlecode.fascinator.api.indexer import SearchRequest
from com.googlecode.fascinator.api.storage import StorageException
from com.googlecode.fascinator.common.solr import SolrDoc, SolrResult
from java.io import ByteArrayInputStream, ByteArrayOutputStream
from java.lang import Boolean
from java.net import URLDecoder
from org.apache.commons.io import IOUtils
class DownloadData:
def __init__(self):
pass
def __activate__(self, context):
self.services = context["Services"]
self.contextPath = context["contextPath"]
self.pageName = context["pageName"]
self.portalId = context["portalId"]
self.request = context["request"]
self.response = context["response"]
self.formData = context["formData"]
self.page = context["page"]
self.log = context["log"]
self.__metadata = SolrDoc(None)
object = None
payload = None
# URL basics
basePath = self.portalId + "/" + self.pageName
fullUri = URLDecoder.decode(self.request.getAttribute("RequestURI"))
uri = fullUri[len(basePath)+1:]
# Turn our URL into objects
object, payload = self.__resolve(uri)
if object is None:
if uri.endswith("/"):
self.log.error("Object 404: '{}'", uri)
self.response.setStatus(404);
writer = self.response.getPrintWriter("text/plain; charset=UTF-8")
writer.println("Object not found")
writer.close()
return
else:
# Sometimes adding a slash to the end will resolve the problem
self.log.error("Redirecting, object 404: '{}'", uri)
self.response.sendRedirect(context["urlBase"] + fullUri + "/")
return
# Ensure solr metadata is useable
oid = object.getId()
if self.isIndexed():
self.__metadata = self.__solrData.getResults().get(0)
else:
self.__metadata.getJsonObject().put("id", oid)
#print "URI='%s' OID='%s' PID='%s'" % (uri, object.getId(), payload.getId())
# Security check
if self.isAccessDenied(uri):
# Redirect to the object page for standard access denied error
self.response.sendRedirect(context["portalPath"] + "/detail/" + object.getId())
return
## The byte range cache will check for byte range requests first
self.cache = self.services.getByteRangeCache()
processed = self.cache.processRequest(self.request, self.response, payload)
if processed:
# We don't need to return data, the cache took care of it.
return
# Now the 'real' work of payload retrieval
if payload is not None:
filename = os.path.split(payload.getId())[1]
mimeType = payload.getContentType()
if mimeType == "application/octet-stream":
self.response.setHeader("Content-Disposition", "attachment; filename=%s" % filename)
type = payload.getContentType()
# Enocode textual responses before sending
if type is not None and type.startswith("text/"):
out = ByteArrayOutputStream()
IOUtils.copy(payload.open(), out)
payload.close()
writer = self.response.getPrintWriter(type + "; charset=UTF-8")
writer.println(out.toString("UTF-8"))
writer.close()
# Other data can just be streamed out
else:
if type is None:
# Send as raw data
out = self.response.getOutputStream("application/octet-stream")
else:
out = self.response.getOutputStream(type)
IOUtils.copy(payload.open(), out)
payload.close()
object.close()
out.close()
else:
self.response.setStatus(404)
writer = self.response.getPrintWriter("text/plain; charset=UTF-8")
writer.println("Resource not found: uri='%s'" % uri)
writer.close()
def getAllowedRoles(self):
metadata = self.getMetadata()
if metadata is not None:
return metadata.getList("security_filter")
else:
return []
def getMetadata(self):
return self.__metadata
def isAccessDenied(self,uri):
# Admins always have access
if self.page.authentication.is_admin():
return False
slash = uri.find("/")
if slash == -1:
return None, None
oid = uri[:slash]
objectMetadata = self.services.getStorage().getObject(oid).getMetadata()
if objectMetadata is not None:
current_user = self.page.authentication.get_username()
owner = objectMetadata.getProperty("owner")
if current_user == owner:
return False
# Check for normal access
myRoles = self.page.authentication.get_roles_list()
allowedRoles = self.getAllowedRoles()
if myRoles is None or allowedRoles is None:
return True
for role in myRoles:
if role in allowedRoles:
return False
return True
def isDetail(self):
preview = Boolean.parseBoolean(self.formData.get("preview", "false"))
return not (self.request.isXHR() or preview)
def isIndexed(self):
found = self.__solrData.getNumFound()
return (found is not None) and (found == 1)
def __resolve(self, uri):
# Grab OID from the URL
slash = uri.find("/")
if slash == -1:
return None, None
oid = uri[:slash]
# Query solr for this object
self.__loadSolrData(oid)
if not self.isIndexed():
print "WARNING: Object '%s' not found in index" % oid
sid = None
else:
# Query storage for this object
sid = self.__solrData.getResults().get(0).getFirst("storage_id")
try:
if sid is None:
# Use the URL OID
object = self.services.getStorage().getObject(oid)
else:
# We have a special storage ID from the index
object = self.services.getStorage().getObject(sid)
except StorageException, e:
#print "Failed to access object: %s" % (str(e))
return None, None
# Grab the payload from the rest of the URL
pid = uri[slash+1:]
if pid == "":
# We want the source
pid = object.getSourceId()
# Now get the payload from storage
try:
payload = object.getPayload(pid)
except StorageException, e:
#print "Failed to access payload: %s" % (str(e))
return None, None
# We're done
return object, payload
def __loadSolrData(self, oid):
portal = self.page.getPortal()
query = 'id:"%s"' % oid
if self.isDetail() and portal.getSearchQuery():
query += " AND " + portal.getSearchQuery()
req = SearchRequest(query)
req.addParam("fq", 'item_type:"object"')
if self.isDetail():
req.addParam("fq", portal.getQuery())
out = ByteArrayOutputStream()
self.services.getIndexer().search(req, out)
self.__solrData = SolrResult(ByteArrayInputStream(out.toByteArray()))
| gpl-2.0 | 963,536,829,417,706,400 | 35.927184 | 100 | 0.567767 | false |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_03_01/operations/_orders_operations.py | 1 | 20281 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class OrdersOperations(object):
"""OrdersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2019_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OrderList"]
"""Lists all the orders related to a data box edge/gateway device.
Lists all the orders related to a data box edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2019_03_01.models.OrderList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OrderList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OrderList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders'} # type: ignore
def get(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Order"
"""Gets a specific order by name.
Gets a specific order by name.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Order, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2019_03_01.models.Order
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Order"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Order', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
resource_group_name, # type: str
order, # type: "_models.Order"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Order"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Order"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(order, 'Order')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Order', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
resource_group_name, # type: str
order, # type: "_models.Order"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Order"]
"""Creates or updates an order.
Creates or updates an order.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param order: The order to be created or updated.
:type order: ~azure.mgmt.databoxedge.v2019_03_01.models.Order
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Order or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2019_03_01.models.Order]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Order"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
resource_group_name=resource_group_name,
order=order,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Order', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def begin_delete(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the order related to the device.
Deletes the order related to the device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
| mit | -9,060,120,008,967,847,000 | 46.385514 | 213 | 0.633105 | false |
digiholic/universalSmashSystem | engine/abstractFighter.py | 1 | 75599 | import settingsManager
import pygame
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom
import os
import engine.baseActions as baseActions
import engine.collisionBox as collisionBox
import weakref
import engine.hurtbox as hurtbox
import math
import numpy
import spriteManager
import engine.article as article
import engine.controller as controller
import engine.actionLoader as actionLoader
import engine.articleLoader
from global_functions import *
class AbstractFighter():
"""The Abstract Fighter is an individual fighter in the battle. It holds all of the data
needed to create, control, and clear a fighter. It is created initially by the Character Select Screen,
as a container for things like icons and costume selections. It becomes a 'real' fighter when Initialize()
is called, creating an object that can interact with the world.
"""
# Top Level fighter variables #
base_dir = ''
player_num = 0
xml_data = None
# Data loaded from XML #
name = 'Null'
franchise_icon_path = 'sprites/default_franchise_icon.png'
css_icon_path = './sprites/icon_unknown.png'
css_portrait_path =''
sprite_directory = 'sprites/'
sprite_prefix = ''
sprite_width = 64
default_sprite = 'sandbag_idle'
sprite = None
sprite_flip = 'right'
article_sprite_path = ''
article_file = ''
sound_path = ''
action_file = baseActions.__file__
default_stats = {
'weight': 100,
'gravity': .5,
'max_fall_speed': 20.0,
'max_ground_speed': 7.0,
'run_speed': 11.0,
'max_air_speed': 5.5,
'aerial_transition_speed': 9.0,
'crawl_speed': 2.5,
'dodge_speed': 8.5,
'friction': 0.3,
'static_grip': 0.3,
'pivot_grip': 0.6,
'air_resistance': 0.2,
'air_control': 0.2,
'jumps': 1,
'jump_height': 12.5,
'short_hop_height': 8.5,
'air_jump_height': 15.0,
'heavy_land_lag': 4,
'wavedash_lag': 12,
'fastfall_multiplier': 2.0,
'hitstun_elasticity': .8,
'shield_size': 1.0
}
default_vars = dict()
# Data gotten from the XML data, like loading files and folders #
actions = baseActions
stats = dict()
variables = dict()
# Initialized fighter variables #
key_bindings = None
active_hitboxes = None #pygame.sprite.Group()
articles = None #list()
status_effects = None #list()
active_hurtboxes = None #pygame.sprite.Group()
auto_hurtbox = None
armor = None
shield = False
shield_integrity = 100
input_buffer = None
last_input_frame = 0
keys_held = None
hitbox_lock = None #weakref.WeakSet()
ledge_lock = False
mask = None
hit_tagged = None
angle = 0
jumps = 0
damage = 0
landing_lag = 6
platform_phase = 0
tech_window = 0
airdodges = 1
grounded = False
elasticity = 0
ground_elasticity = 0
grab_point = (0, 0)
posx = 0
posy = 0
change_x = 0
change_y = 0
preferred_xspeed = 0
preferred_yspeed = 0
trail_color = "#000000"
#facing right = 1, left = -1
facing = 1
#Adding a move to the disabled moves list prevents it from activating.
#If told to switch to it, the fighter will ignore the request.
disabled_moves = set()
invulnerable = 0
respawn_invulnerable = 0
hitstop = 0
hitstop_vibration = (0,0)
hitstop_pos = (0,0)
custom_timers = list()
current_color = 0
current_costume = 0
css_icon = spriteManager.ImageSprite(settingsManager.createPath('sprites/icon_unknown.png'))
color_palettes = []
palette_display = []
def __init__(self,_baseDir,_playerNum):
""" Create a fighter. To start, all that's needed is the directory it is in, and the player number.
It uses the directory to find its fighter.xml file and begin storing data.
Parameters
-----------
_baseDir : string
The filepath of the folder being loaded. Used to determine the location of fighter.xml, icons, and sprites
_playerNum: int
The number of the controlling player. 0-indexed, so Player 1 is number 0
"""
def loadNodeWithDefault(_tag,_default):
""" An anonymous inner function to quickly pull from XML, giving a default value if the node
is not present or otherwise can't be loaded.
Parameters
-----------
_node : Element
The XML tree to be searching from. The node directly above the one you're looking for.
_tag : string
The name of the XML tag to search for
_default : any type
the default value of the node, in case it cannot find the proper value
Return
-----------
The string value of the Node, or the given default if it is not valid
"""
if self.xml_data is not None:
if self.xml_data.find(_tag) is not None:
if self.xml_data.find(_tag).text is None:
return _default
else: return self.xml_data.find(_tag).text
return _default
self.base_dir = _baseDir
self.player_num = _playerNum
#Load the xml data if fighter.xml exists
if os.path.exists(os.path.join(self.base_dir,'fighter.xml')):
self.xml_data = ElementTree.parse(os.path.join(_baseDir,'fighter.xml')).getroot()
else: self.xml_data = ElementTree.ElementTree().getroot()
#Load the CSS info
self.name = loadNodeWithDefault('name', self.name)
self.franchise_icon_path = loadNodeWithDefault('icon', self.franchise_icon_path)
self.css_icon_path = loadNodeWithDefault('css_icon', self.css_icon_path)
self.css_portrait_path = loadNodeWithDefault('css_portrait', self.css_portrait_path)
#Load the sprite info
self.sprite_directory = loadNodeWithDefault('sprite_directory', os.path.join(self.base_dir,'sprites/'))
self.sprite_prefix = loadNodeWithDefault('sprite_prefix', self.sprite_prefix)
self.sprite_width = int(loadNodeWithDefault('sprite_width', self.sprite_width))
self.default_sprite = loadNodeWithDefault('default_sprite', self.default_sprite)
try:
self.sprite_flip = self.xml_data.find('facing').text
except:
self.sprite_flip = "right"
#Load the article info
self.article_sprite_path = loadNodeWithDefault('article_path', self.article_sprite_path)
self.article_file = loadNodeWithDefault('articles', self.article_file)
#Load sounds
self.sound_path = loadNodeWithDefault('sound_path', self.sound_path)
#Load actions
self.action_file = loadNodeWithDefault('actions', self.action_file)
#Load the article loader
self.article_path_short = loadNodeWithDefault('article_path', '')
self.article_path = os.path.join(self.base_dir,self.article_path_short)
self.article_loader_path = loadNodeWithDefault('articles', None)
print(self.article_loader_path)
if self.article_loader_path == '':
self.article_loader = None
else:
self.article_loader = engine.articleLoader.ArticleLoader(self)
#TODO color palettes
for color_palette in self.xml_data.findall('color_palette'):
color_dict = {}
for color_map in color_palette.findall('color_map'):
from_color = pygame.Color(color_map.attrib['from_color'])
to_color = pygame.Color(color_map.attrib['to_color'])
color_dict[(from_color.r, from_color.g, from_color.b)] = (to_color.r, to_color.g, to_color.b)
self.color_palettes.append(color_dict)
self.palette_display.append(pygame.Color(color_palette.attrib['displayColor']))
while len(self.color_palettes) < 4:
self.color_palettes.append({})
self.costumes = [self.sprite_prefix]
for costume in self.xml_data.findall('costume'):
self.costumes.append(costume.text)
self.current_color = self.player_num
# Now that we've got all the paths, need to actually load the files
if self.css_icon_path[0] == '.': #If the path starts with a period, start from the top of the game directory instead
self.css_icon = spriteManager.ImageSprite(settingsManager.createPath(self.css_icon_path))
else:
self.css_icon = spriteManager.ImageSprite(os.path.join(self.base_dir,self.css_icon_path))
if self.franchise_icon_path[0] == '.': #If the path starts with a period, start from the top of the game directory instead
self.franchise_icon = spriteManager.ImageSprite(settingsManager.createPath(self.franchise_icon_path))
else:
self.franchise_icon = spriteManager.ImageSprite(os.path.join(self.base_dir,self.franchise_icon_path))
#TODO: The ECB crashes unless there is a sprite to pull from, so we load this one even though it'll never actually be drawn
spriteName = self.sprite_prefix + self.default_sprite + '.png'
try:
self.scale = float(self.xml_data.find('scale').text)
except:
self.scale = 1.0
self.sprite = spriteManager.SheetSprite(os.path.join(self.base_dir,self.sprite_directory,spriteName), self.sprite_width)
self.events = dict()
#try:
if self.action_file.endswith('.py'):
self.actions = settingsManager.importFromURI(os.path.join(_baseDir,'fighter.xml'),self.action_file,_suffix=str(self.player_num))
else:
self.actions = actionLoader.ActionLoader(_baseDir,self.action_file)
self.events = self.actions.getGlobalEvents()
#except:
# self.actions = baseActions
# self.action_file = baseActions.__file__
self.stats = self.default_stats.copy()
self.variables = self.default_vars.copy()
self.keys_held = dict()
self.status_effects = list()
self.data_log = None
self.game_state = None
def saveFighter(self,_path=None):
""" Save the fighter's data to XML. Basically the inverse of __init__.
Parameters
-----------
_path : string
The path to store the fighter.xml file in. If left blank, it will use base_dir.
"""
def createElement(_tag,_val):
""" An anonymouse inner function for quickly creating an XML element with a value.
Parameters
-----------
_tag : The XML tag of the element
_val : The data to go into the element
"""
elem = ElementTree.Element(_tag)
if _val is not None:
elem.text = str(_val)
else: elem.text = ''
return elem
tree = ElementTree.Element('fighter')
tree.append(createElement('name', self.name))
tree.append(createElement('icon', self.franchise_icon_path))
tree.append(createElement('css_icon', self.css_icon_path))
tree.append(createElement('scale', self.scale))
tree.append(createElement('sprite_directory', self.sprite_directory))
tree.append(createElement('sprite_prefix', self.sprite_prefix))
tree.append(createElement('sprite_width', self.sprite_width))
tree.append(createElement('default_sprite', self.default_sprite))
tree.append(createElement('article_path', self.article_path_short))
tree.append(createElement('articles', self.article_file))
tree.append(createElement('sound_path', self.sound_path))
tree.append(createElement('actions', self.action_file))
for i,color_dict in enumerate(self.color_palettes):
color_elem = ElementTree.Element('color_palette')
color_elem.attrib['id'] = str(i)
color_elem.attrib['displayColor'] = '#000000'
for from_color,to_color in color_dict.iteritems():
map_elem = ElementTree.Element('color_map')
map_elem.attrib['from_color'] = '#%02x%02x%02x' % from_color
map_elem.attrib['to_color'] = '#%02x%02x%02x' % to_color
color_elem.append(map_elem)
tree.append(color_elem)
stats_elem = ElementTree.Element('stats')
for costume in self.costumes:
if not costume == self.sprite_prefix:
tree.append(createElement('costume', costume))
for tag,val in self.stats.iteritems():
stats_elem.append(createElement(tag, val))
tree.append(stats_elem)
if _path is None:
_path = os.path.join(self.base_dir,'fighter.xml')
xmlfile = xml.dom.minidom.parseString(ElementTree.tostring(tree))
outputFile = open(_path,'w')
outputFile.write(xmlfile.toprettyxml())
def loadSpriteLibrary(self,_color=None):
""" Loads the sprite library for the fighter, with the current
costume and color.
Parameters
-----------
_color : int
The index of the color to use. By default, will use the stored current_color variable,
which is set while selecting. This optional argument should be used when you're overriding
the game's color choice to load up a different palette.
"""
directory = os.path.join(self.base_dir,self.sprite_directory)
try:
scale = float(self.xml_data.find('scale').text)
except:
scale = 1.0
if _color == None: _color = self.current_color
self.sprite = spriteManager.SpriteHandler(str(directory),
self.costumes[self.current_costume % len(self.costumes)],
self.default_sprite,
self.sprite_width,
self.color_palettes[_color % len(self.color_palettes)],
scale,
self.sprite_flip)
self.rect = self.sprite.rect
def initialize(self):
""" This method is called when shit gets real. It creates the collision box, sprite library,
etc. and is ready to start getting updates and doing actions. No parameters, no return value.
Converts this object into an Initialized Fighter Object.
"""
""" Initialize components """
# Initialize key bindings object
self.input_buffer = controller.InputBuffer()
self.key_bindings = settingsManager.getControls(self.player_num)
self.key_bindings.linkObject(self)
self.articles = list()
if self.sound_path:
settingsManager.getSfx().addSoundsFromDirectory(os.path.join(self.base_dir,self.sound_path), self.name)
if self.xml_data is not None:
if self.xml_data.find('stats') is not None:
for stat in self.xml_data.find('stats'):
vartype = type(self.default_stats[stat.tag]).__name__
if vartype == 'int': self.default_stats[stat.tag] = int(stat.text)
if vartype == 'float': self.default_stats[stat.tag] = float(stat.text)
if self.xml_data.find('variables') is not None:
for variable in self.xml_data.find('variables'):
vartype = 'string'
if variable.attrib.has_key('type'): vartype = variable.attrib['type']
val = variable.text
if vartype == 'int': val = int(val)
elif vartype == 'float': val = float(val)
elif vartype == 'bool': val = bool(val)
self.default_vars[variable.tag] = val
self.onRespawn()
########################################################
# UPDATE METHODS #
########################################################
def onRespawn(self):
"""This method initializes things that should be initialized at the start of the game,
and each time the fighter dies.
"""
self.key_bindings.flushInputs()
self.keys_held = dict()
self.stats = self.default_stats.copy()
self.variables = self.default_vars.copy()
self.disabled_moves.clear()
# Evironmental Collision Box
self.ecb = collisionBox.ECB(self)
self.init_boxes()
self.hitbox_lock = weakref.WeakSet()
self.damage = 0
self.change_x = 0
self.change_y = 0
self.jumps = self.stats['jumps']
self.trail_color = settingsManager.getSetting('playerColor' + str(self.player_num))
self.facing = 1
if self.sprite.flip == 'left': self.sprite.flipX()
self.unRotate()
self.current_action = self.getAction('NeutralAction')
if hasattr(self.actions,'loadAction'):
self.doAction('Respawn')
elif hasattr(self.actions, 'Respawn'):
class_ = getattr(self.actions,'Respawn')
self.changeAction(class_())
def init_boxes(self):
self.active_hitboxes = pygame.sprite.Group()
self.active_hurtboxes = pygame.sprite.Group()
self.auto_hurtbox = hurtbox.Hurtbox(self)
self.armor = dict()
def update(self):
""" This method will step the fighter forward one frame. It will resolve movement,
collisions, animations, and all sorts of things. It should be called every frame.
"""
self.ecb.normalize()
self.ecb.store()
self.input_buffer.push()
self.last_input_frame += 1
if self.hitstop > 0:
#We're in hitstop, let's take care of that and ignore a normal update
self.hitstopUpdate()
return
elif self.hitstop == 0 and not self.hitstop_vibration == (0,0):
#self.hitstop_vibration = False #Lolwut?
(self.posx, self.posy) = self.hitstop_pos
self.hitstop_vibration = (0,0)
self.updatePosition()
self.ecb.normalize()
# Allow ledge re-grabs if we've vacated a ledge
if self.ledge_lock:
ledges = pygame.sprite.spritecollide(self.ecb.current_ecb, self.game_state.platform_ledges, False)
if len(ledges) == 0: # If we've cleared out of all of the ledges
self.ledge_lock = False
# Prepare for movement by setting change_x and change_y from acceleration
if self.grounded: self.accel(self.stats['friction'])
else: self.accel(self.stats['air_resistance'])
self.calcGrav()
# Check for transitions, then execute actions
self.current_action.stateTransitions(self)
self.current_action.update(self) #update our action
self.updatePosition()
self.ecb.normalize()
self.collisionUpdate()
self.childUpdate()
self.timerUpdate()
def collisionUpdate(self):
""" Execute movement and resolve collisions.
This function is due for a huge overhaul.
"""
loop_count = 0
while loop_count < 2:
self.updatePosition()
self.ecb.normalize()
bumped = False
block_hit_list = collisionBox.getSizeCollisionsWith(self, self.game_state.platform_list)
if not block_hit_list:
break
for block in block_hit_list:
if block.solid or (self.platform_phase <= 0):
self.platform_phase = 0
if collisionBox.eject(self, block, self.platform_phase > 0):
bumped = True
break
if not bumped:
break
loop_count += 1
# TODO: Crush death if loopcount reaches the 10 resolution attempt ceiling
self.updatePosition()
self.ecb.normalize()
t = 1
to_bounce_block = None
self.updatePosition()
self.ecb.normalize()
block_hit_list = collisionBox.getMovementCollisionsWith(self, self.game_state.platform_list)
for block in block_hit_list:
if self.ecb.pathRectIntersects(block.rect, self.change_x, self.change_y) > 0 and self.ecb.pathRectIntersects(block.rect, self.change_x, self.change_y) < t and collisionBox.catchMovement(self, block, self.platform_phase > 0):
t = self.ecb.pathRectIntersects(block.rect, self.change_x, self.change_y)
to_bounce_block = block
self.posy += self.change_y*t
self.posx += self.change_x*t
self.updatePosition()
self.ecb.normalize()
# Move with the platform
block = reduce(lambda x, y: y if x is None or y.rect.top <= x.rect.top else x, self.checkGround(), None)
if not block is None and self.ecb.current_ecb.rect.centerx > block.rect.left and self.ecb.current_ecb.rect.centerx < block.rect.right:
self.jumps = self.stats['jumps']
self.posx += block.change_x
#if self.ecb.current_ecb.rect.bottom > block.rect.top:
# self.posy += block.rect.top - self.ecb.current_ecb.rect.bottom-block.change_y
self.change_y -= self.stats['gravity'] * settingsManager.getSetting('gravity')
if self.change_y > block.change_y:
self.change_y = block.change_y
self.grounded = self.isGrounded()
if to_bounce_block is not None:
collisionBox.reflect(self, to_bounce_block)
def childUpdate(self):
""" The fighter contains many child objects, that all need to be updated.
This function calls those updates.
"""
if self.mask:self.mask = self.mask.update()
for art in self.articles:
art.update()
for stat in self.status_effects:
stat.update()
def timerUpdate(self):
""" There are several frame counters that determine things like teching, invulnerability,
platform phasing, etc. as well as possible custom timers.
"""
#These max calls will decrement the window, but not below 0
self.tech_window = max(0,self.tech_window-1)
self.shield_integrity = min(100,self.shield_integrity+0.15)
self.platform_phase = max(0,self.platform_phase-1)
finished_timers = []
for timer in self.custom_timers:
time,event = timer
time -= 1
if time <= 0:
for subact in event:
subact.execute(self,self.current_action)
#In order to avoid mucking up the iterative loop, we store finished timers to remove later
finished_timers.append(timer)
for timer in finished_timers:
self.custom_timers.remove(timer)
def hitstopUpdate(self):
""" Handles what to do if the fighter is in hitstop (that freeze frame state when you
get hit). Vibrates the fighter's sprite, and handles SDI
"""
self.hitstop -= 1
loop_count = 0
#QUESTION: Why is this a loop?
#ANSWER: It's so multiple ejections can happen
while loop_count < 2:
self.updatePosition()
self.ecb.normalize()
bumped = False
block_hit_list = collisionBox.getSizeCollisionsWith(self, self.game_state.platform_list)
if not block_hit_list:
break
for block in block_hit_list:
if block.solid or (self.platform_phase <= 0):
self.platform_phase = 0
if collisionBox.eject(self, block, self.platform_phase > 0):
bumped = True
break
if not bumped:
break
loop_count += 1
self.updatePosition()
self.ecb.normalize()
# Vibrate the sprite
if not self.hitstop_vibration == (0,0):
(x,y) = self.hitstop_vibration
self.posx += x
if not self.grounded:
self.posy += y
self.hitstop_vibration = (-x,-y)
#Smash directional influence AKA hitstun shuffling
di_vec = self.getSmoothedInput(int(self.key_bindings.timing_window['smoothing_window']))
self.posx += di_vec[0]*5
if not self.grounded or self.keysContain('jump', _threshold=1):
self.posy += di_vec[1]*5
self.updatePosition()
self.ecb.normalize()
# Move with the platform
block = reduce(lambda x, y: y if x is None or y.rect.top <= x.rect.top else x, self.checkGround(), None)
if not block is None:
self.posx += block.change_x
self.updatePosition()
if self.platform_phase > 0:
self.platform_phase -= 1
self.ecb.normalize()
def draw(self,_screen,_offset,_scale):
if (settingsManager.getSetting('showSpriteArea')):spriteManager.RectSprite(self.sprite.rect).draw(_screen, _offset, _scale)
rect = self.sprite.draw(_screen,_offset,_scale)
if self.mask: self.mask.draw(_screen,_offset,_scale)
if settingsManager.getSetting('showECB'):
self.ecb.draw(_screen,_offset,_scale)
return rect
########################################################
# ACTION MANAGEMENT #
########################################################
def doAction(self,_actionName):
""" Load up the given action. If it's executable, change to it.
If it's not, still execute the setUp (this allows for certain code
to happen, even if the action is not executed.)
If the move is disabled, it won't even bother to
load it, since we shouldn't be doing anything with it.
Parameters
-----------
_actionName : String
The Action to load and switch to
"""
if not _actionName in self.disabled_moves:
# If our action is an ActionLoader, we need to pull it from XML
if hasattr(self.actions,'loadAction'):
action = self.actions.loadAction(_actionName)
if action.last_frame > 0: self.changeAction(action)
else: action.setUp(self)
# If it has an object of the given name, get that object
elif hasattr(self.actions, _actionName):
class_ = getattr(self.actions,_actionName)
action = class_()
if action.last_frame > 0: self.changeAction(action)
else: action.setUp(self)
def changeAction(self,_newAction):
""" Switches from the current action to the given action. Calls tearDown on the
current action, before setting up the new one. If we get this far, the new action
is valid and ready to be executed
Parameters
-----------
_newAction : Action
The Action to switch to
"""
if self.current_action:
self.current_action.tearDown(self,_newAction)
_newAction.setUp(self)
self.current_action = _newAction
def getAction(self,_actionName):
""" Loads an action, without changing to it or executing it.
Since this is just to read, it will load an action that is
disabled, or unexecutable. If you need to change to it, please
use doAction instead, which will make sure the action is valid
before executing.
Parameters
-----------
_actionName : String
The name of the action to load
Return
-----------
Action : The loaded action with the given name. Returns None
if there is no action with that name.
"""
action = None
if hasattr(self.actions,'loadAction'):
action = self.actions.loadAction(_actionName)
elif hasattr(self.actions, _actionName):
class_ = getattr(self.actions,_actionName)
action = class_()
return action
def hasAction(self,_actionName):
""" Returns True if the fighter has an action of the given name.
Does not load the action, change to it, or do anything other than
check if it exists. You do not need to run this before getAction or
doAction, as they check for the action themselves.
Parameters
-----------
_actionName : String
The name of the action to check for
"""
if hasattr(self.actions,'hasAction'):
return self.actions.hasAction(_actionName)
else: return hasattr(self.actions, _actionName)
def loadArticle(self,_articleName):
""" Loads and returns an article. Checks if the articles are loading
from XML or Python, and loads the appropriate one.
Parameters
-----------
_articleName : String
The name of the article to load
Return
-----------
Article : The article of the given name. Returns None if no Article with that name exists.
"""
if hasattr(self.article_loader, 'loadArticle'):
return self.article_loader.loadArticle(_articleName)
elif hasattr(self.article_loader, _articleName):
class_ = getattr(self.article_loader, _articleName)
return(class_(self))
""" All of this stuff below should probably be rewritten or find a way to be removed """
def doGroundMove(self,_direction):
print(self.input_buffer)
if (self.facing == 1 and _direction == 180) or (self.facing == -1 and _direction == 0):
self.flip()
self.doAction('Move')
def doDash(self,_direction):
if (self.facing == 1 and _direction == 180) or (self.facing == -1 and _direction == 0):
self.flip()
self.doAction('Dash')
def doGroundAttack(self):
(key, invkey) = self.getForwardBackwardKeys()
direct = self.netDirection([key, invkey, 'down', 'up'])
if direct == key:
self.doAction('ForwardSmash') if self.checkSmash(key) else self.doAction('ForwardAttack')
elif direct == invkey:
self.flip()
self.doAction('ForwardSmash') if self.checkSmash(invkey) else self.doAction('ForwardAttack')
elif direct == 'down':
self.doAction('DownSmash') if self.checkSmash('down') else self.doAction('DownAttack')
elif direct == 'up':
self.doAction('UpSmash') if self.checkSmash('up') else self.doAction('UpAttack')
else:
self.doAction('NeutralAttack')
def doAirAttack(self):
(forward, backward) = self.getForwardBackwardKeys()
direct = self.netDirection([forward, backward, 'down', 'up'])
if direct == forward:
self.doAction('ForwardAir')
elif direct == backward:
self.doAction('BackAir')
elif direct == 'down':
self.doAction('DownAir')
elif direct == 'up':
self.doAction('UpAir')
else: self.doAction('NeutralAir')
def doGroundSpecial(self):
(forward, backward) = self.getForwardBackwardKeys()
direct = self.netDirection(['up', forward, backward, 'down'])
if direct == 'up':
if self.hasAction('UpSpecial'):
self.doAction('UpSpecial')
else:
self.doAction('UpGroundSpecial')
elif direct == forward:
if self.hasAction('ForwardSpecial'): #If there's a ground/air version, do it
self.doAction('ForwardSpecial')
else: #If there is not a universal one, do a ground one
self.doAction('ForwardGroundSpecial')
elif direct == backward:
self.flip()
if self.hasAction('ForwardSpecial'):
self.doAction('ForwardSpecial')
else:
self.doAction('ForwardGroundSpecial')
elif direct == 'down':
if self.hasAction('DownSpecial'):
self.doAction('DownSpecial')
else:
self.doAction('DownGroundSpecial')
else:
if self.hasAction('NeutralSpecial'):
self.doAction('NeutralSpecial')
else:
self.doAction('NeutralGroundSpecial')
def doAirSpecial(self):
(forward, backward) = self.getForwardBackwardKeys()
direct = self.netDirection(['up', forward, backward, 'down'])
if direct == 'up':
if self.hasAction('UpSpecial'):
self.doAction('UpSpecial')
else:
self.doAction('UpAirSpecial')
elif direct == forward:
if self.hasAction('ForwardSpecial'): #If there's a ground/air version, do it
self.doAction('ForwardSpecial')
else: #If there is not a universal one, do an air one
self.doAction('ForwardAirSpecial')
elif direct == backward:
self.flip()
if self.hasAction('ForwardSpecial'):
self.doAction('ForwardSpecial')
else:
self.doAction('ForwardAirSpecial')
elif direct == 'down':
if self.hasAction('DownSpecial'):
self.doAction('DownSpecial')
else:
self.doAction('DownAirSpecial')
else:
if self.hasAction('NeutralSpecial'):
self.doAction('NeutralSpecial')
else:
self.doAction('NeutralAirSpecial')
def doTech(self):
(forward, backward) = self.getForwardBackwardKeys()
direct = self.netDirection([forward, backward, 'down', 'up'])
if direct == forward:
self.doAction('ForwardTech')
elif direct == backward:
self.doAction('BackwardTech')
elif direct == 'down':
self.doAction('DodgeTech')
else:
if self.hasAction('NormalTech'):
self.doAction('NormalTech')
else:
self.doAction('Getup')
def doHitStun(self,_hitstun,_trajectory):
self.doAction('HitStun')
self.current_action.direction = _trajectory
self.current_action.last_frame = _hitstun
def doProne(self, _length):
self.doAction('Prone')
self.current_action.last_frame = _length
def doShield(self, _newShield=True):
self.doAction('Shield')
self.current_action.new_shield = _newShield
def doShieldStun(self, _length):
self.doAction('ShieldStun')
self.current_action.last_frame = _length
def doLedgeGrab(self,_ledge):
self.doAction('LedgeGrab')
self.current_action.ledge = _ledge
def doTrapped(self, _length):
self.doAction('Trapped')
self.current_action.last_frame = _length
def doStunned(self, _length):
self.doAction('Stunned')
self.current_action.last_frame = _length
def doGrabbed(self, _height):
self.doAction('Grabbed')
self.current_action.height = _height
########################################################
# COLLISIONS AND MOVEMENT #
########################################################
def accel(self,_xFactor):
""" Change speed to get closer to the preferred speed without going over.
Parameters
-----------
_xFactor : float
The factor by which to change xSpeed. Usually self.stats['friction'] or self.stats['air_resistance']
"""
#TODO: I feel like there's a better way to do this but I can't think of one
if self.change_x > self.preferred_xspeed: #if we're going too fast
diff = self.change_x - self.preferred_xspeed
self.change_x -= min(diff,_xFactor*(settingsManager.getSetting('friction') if self.grounded else settingsManager.getSetting('airControl')))
elif self.change_x < self.preferred_xspeed: #if we're going too slow
diff = self.preferred_xspeed - self.change_x
self.change_x += min(diff,_xFactor*(settingsManager.getSetting('friction') if self.grounded else settingsManager.getSetting('airControl')))
# Change ySpeed according to gravity.
def calcGrav(self, _multiplier=1):
""" Changes the ySpeed according to gravity
Parameters
-----------
_multiplier : float
A multiple of gravity to adjust by, in case gravity is changed temporarily
"""
if self.change_y > self.preferred_yspeed:
diff = self.change_y - self.preferred_yspeed
self.change_y -= min(diff, _multiplier*self.stats['gravity'] * settingsManager.getSetting('gravity'))
elif self.change_y < self.preferred_yspeed:
diff = self.preferred_yspeed - self.change_y
self.change_y += min(diff, _multiplier*self.stats['gravity'] * settingsManager.getSetting('gravity'))
def checkGround(self):
self.updatePosition()
return collisionBox.checkGround(self, self.game_state.platform_list, self.tech_window <= 0)
def checkLeftWall(self):
self.updatePosition()
return collisionBox.checkLeftWall(self, self.game_state.platform_list, True)
def checkRightWall(self):
self.updatePosition()
return collisionBox.checkRightWall(self, self.game_state.platform_list, True)
def checkBackWall(self):
self.updatePosition()
return collisionBox.checkBackWall(self, self.game_state.platform_list, True)
def checkFrontWall(self):
self.updatePosition()
return collisionBox.checkFrontWall(self, self.game_state.platform_list, True)
def checkCeiling(self):
self.updatePosition()
return collisionBox.checkCeiling(self, self.game_state.platform_list, True)
def isGrounded(self):
self.updatePosition()
return collisionBox.isGrounded(self, self.game_state.platform_list, self.tech_window <= 0)
def isLeftWalled(self):
self.updatePosition()
return collisionBox.isLeftWalled(self, self.game_state.platform_list, True)
def isRightWalled(self):
self.updatePosition()
return collisionBox.isRightWalled(self, self.game_state.platform_list, True)
def isBackWalled(self):
self.updatePosition()
return collisionBox.isBackWalled(self, self.game_state.platform_list, True)
def isFrontWalled(self):
self.updatePosition()
return collisionBox.isFrontWalled(self, self.game_state.platform_list, True)
def isCeilinged(self):
self.updatePosition()
return collisionBox.isCeilinged(self, self.game_state.platform_list, True)
def setSpeed(self,_speed,_direction):
""" Set the actor's speed. Instead of modifying the change_x and change_y values manually,
this will calculate what they should be set at if you want to give a direction and
magnitude instead.
Parameters
-----------
_speed : float
The total speed you want the fighter to move
_direction : int
The angle of the speed vector in degrees, 0 being right, 90 being up, 180 being left.
"""
(x,y) = getXYFromDM(_direction,_speed)
self.change_x = x
self.change_y = y
########################################################
# ANIMATION FUNCTIONS #
########################################################
def rotateSprite(self,_direction):
""" Rotate's the fighter's sprite a given number of degrees
Parameters
-----------
_direction : int
The degrees to rotate towards. 0 being forward, 90 being up
"""
self.sprite.rotate(-1 * (90 - _direction))
def unRotate(self):
""" Resets rotation to it's proper, straight upwards value """
self.sprite.rotate()
def changeSprite(self,_newSprite,_frame=0):
""" Changes the fighter's sprite to the one with the given name.
Optionally can change into a frame other than zero.
Parameters
-----------
_newSprite : string
The name of the sprite in the SpriteLibrary to change to
_frame : int : default 0
The frame to switch to in the new sprite. Leave off to start the new animation at zero
"""
self.sprite.changeImage(_newSprite)
self.current_action.sprite_name = _newSprite
if _frame != 0: self.sprite.changeSubImage(_frame)
def changeSpriteImage(self,_frame,_loop=False):
""" Change the subimage of the current sprite.
Parameters
-----------
_frame : int
The frame number to change to.
_loop : bool
If True, any subimage value larger than maximum will loop back into a new value.
For example, if _loop is set, accessing the 6th subimage of an animation 4 frames long will get you the second.
"""
self.sprite.changeSubImage(_frame,_loop)
def updatePosition(self):
""" Passes the updatePosition call to the sprite.
See documentation in SpriteLibrary.updatePosition
"""
return self.sprite.updatePosition(self.posx, self.posy)
########################################################
# INPUT FUNCTIONS #
########################################################
def keyPressed(self,_key):
""" Add a key to the buffer. This function should be adding
to the buffer, and ONLY adding to the buffer. Any sort
of calculations and state changes should probably be done
in the stateTransitions function of the current action.
Parameters
-----------
_key : String
The key to append to the buffer
"""
self.input_buffer.append((_key,1.0))
self.keys_held[_key] = 1.0
def keyReleased(self,_key):
""" Removes a key from the buffer. That is to day, it appends
a release to the buffer. It is safe to call this function if the key
is not in the buffer, and it will return False if the key was not in there
to begin with.
Parameters
-----------
_key : String
The key to remove
Return
-----------
If the key was successfully removed, True. False if the key was not present to be removed.
"""
if _key in self.keys_held:
self.input_buffer.append((_key,0))
del self.keys_held[_key]
return True
else: return False
def keyBuffered(self, _key, _from = 1, _state = 0.1, _to = 0):
""" Checks if a key was pressed within a certain amount of frames.
Parameters
-----------
_key : String
The key to search fore
_from : int : 1
The furthest back frame to look to.
_state : float : 0.1
A value from 0 to 1 for a threshold on value before a button registers as a press.
Usually only applies to sticks, since buttons are always 0.0 or 1.0
_to : int : 0
The furthest forward frame to look to.
"""
if any(map(lambda k: _key in k and k[_key] >= _state,self.input_buffer.getLastNFrames(_from, _to))):
self.last_input_frame = 0
return True
return False
def keyTapped(self, _key, _from = None, _state = 0.1, _to = 0):
""" Checks if a key was pressed and released within a certain amount of frames.
Parameters
-----------
_key : String
The key to search fore
_from : int : None
The furthest back frame to look to. If set to None, it will look at the default Buffer
Window in the player's control settings
_state : float : 0.1
A value from 0 to 1 for a threshold on value before a button registers as a press.
Usually only applies to sticks, since buttons are always 0.0 or 1.0
_to : int : 0
The furthest forward frame to look to.
"""
if _from is None:
_from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1)
down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to))
up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to))
if not any(down_frames) or not any(up_frames):
return False
first_down_frame = reduce(lambda j, k: j if j != None else (k if down_frames[k] else None), range(len(down_frames)), None)
last_up_frame = reduce(lambda j, k: k if up_frames[k] else j, range(len(up_frames)), None)
if first_down_frame >= last_up_frame:
self.last_input_frame = 0
return True
return False
#A key press which hasn't been released yet
def keyHeld(self, _key, _from = None, _state = 0.1, _to = 0):
""" Checks if a key was pressed within a certain amount of frames and is still being held.
Parameters
-----------
_key : String
The key to search fore
_from : int : None
The furthest back frame to look to. If set to None, it will look at the default Buffer
Window in the player's control settings
_state : float : 0.1
A value from 0 to 1 for a threshold on value before a button registers as a press.
Usually only applies to sticks, since buttons are always 0.0 or 1.0
_to : int : 0
The furthest forward frame to look to.
"""
if _from is None:
_from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1)
down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to))
up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to))
if not any(down_frames):
return False
if any(down_frames) and not any(up_frames):
self.last_input_frame = 0
return True
first_down_frame = reduce(lambda j, k: j if j != None else (k if down_frames[k] else None), range(len(down_frames)), None)
last_up_frame = reduce(lambda j, k: k if up_frames[k] else j, range(len(up_frames)), None)
if first_down_frame < last_up_frame:
self.last_input_frame = 0
return True
return False
def keyUp(self, _key, _from = 1, _state = 0.1, _to = 0):
""" Checks if a key was released within a certain amount of frames.
Parameters
-----------
_key : String
The key to search fore
_from : int : 1
The furthest back frame to look to.
_state : float : 0.1
A value from 0 to 1 for a threshold on value before a button registers as a press.
Usually only applies to sticks, since buttons are always 0.0 or 1.0
_to : int : 0
The furthest forward frame to look to.
"""
if any(map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to))):
self.last_input_frame = 0
return True
return False
def keyReinput(self, _key, _from = None, _state = 0.1, _to = 0):
""" Checks if a key was pressed twice within a certain amount of time
Parameters
-----------
_key : String
The key to search fore
_from : int : 1
The furthest back frame to look to. If set to None, it will look at the default Buffer
Window in the player's control settings
_state : float : 0.1
A value from 0 to 1 for a threshold on value before a button registers as a press.
Usually only applies to sticks, since buttons are always 0.0 or 1.0
_to : int : 0
The furthest forward frame to look to.
"""
if _from is None:
_from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1)
up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to))
down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to))
if not any(down_frames) or not any(down_frames):
return False
first_up_frame = reduce(lambda j, k: j if j != None else (k if up_frames[k] else None), range(len(up_frames)), None)
last_down_frame = reduce(lambda j, k: k if down_frames[k] else j, range(len(down_frames)), None)
if first_up_frame < last_down_frame:
self.last_input_frame = 0
return True
return False
def keyIdle(self, _key, _from = None, _state = 0.1, _to = 0):
""" Checks if a key was released and not pressed again within a certain amount of time.
Parameters
-----------
_key : String
The key to search fore
_from : int : 1
The furthest back frame to look to. If set to None, it will look at the default Buffer
Window in the player's control settings
_state : float : 0.1
A value from 0 to 1 for a threshold on value before a button registers as a press.
Usually only applies to sticks, since buttons are always 0.0 or 1.0
_to : int : 0
The furthest forward frame to look to.
"""
if _from is None:
_from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1)
up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to))
down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to))
if not any(up_frames):
return False
if any(up_frames) and not any(down_frames):
self.last_input_frame = 0
return True
first_up_frame = reduce(lambda j, k: j if j != None else (k if up_frames[k] else None), range(len(up_frames)), None)
last_down_frame = reduce(lambda j, k: k if down_frames[k] else j, range(len(down_frames)), None)
if first_up_frame >= last_down_frame:
self.last_input_frame = 0
return True
return False
def getSmoothedInput(self, _distanceBack = None, _maxMagnitude = 1.0):
""" Converts buttons into an analog direction. It checks back for a set amount of frames
and averages the inputs into a direction.
Parameters
-----------
_distanceBack : int : None
How many frames to look back to get direction inputs from
_maxMagnitude:
"""
#QUESTION - explain this algorithm a little better
#TODO If this is a gamepad, simply return its analog input
if _distanceBack is None:
smooth_distance = int(self.key_bindings.timing_window['smoothing_window'])
_distanceBack = smooth_distance
else:
smooth_distance = _distanceBack
hold_buffer = reversed(self.input_buffer.getLastNFrames(_distanceBack))
smoothed_x = 0.0
smoothed_y = 0.0
if self.key_bindings.type == "Keyboard":
for frame_input in hold_buffer:
working_x = 0.0
working_y = 0.0
x_decay = float(1.5)/smooth_distance
y_decay = float(1.5)/smooth_distance
if 'left' in frame_input: working_x -= frame_input['left']
if 'right' in frame_input: working_x += frame_input['right']
if 'up' in frame_input: working_y -= frame_input['up']
if 'down' in frame_input: working_y += frame_input['down']
if (working_x > 0 and smoothed_x > 0) or (working_x < 0 and smoothed_x < 0):
x_decay = float(1)/smooth_distance
elif (working_x < 0 and smoothed_x > 0) or (working_x > 0 and smoothed_x < 0):
x_decay = float(4)/smooth_distance
if (working_y < 0 and smoothed_y < 0) or (working_y > 0 and smoothed_y > 0):
y_decay = float(1)/smooth_distance
elif (working_y < 0 and smoothed_y > 0) or (working_y > 0 and smoothed_y < 0):
ySmooth = float(4)/smooth_distance
magnitude = numpy.linalg.norm([working_x, working_y])
if magnitude > _maxMagnitude:
working_x /= magnitude/_maxMagnitude
working_y /= magnitude/_maxMagnitude
if smoothed_x > 0:
smoothed_x -= x_decay
if smoothed_x < 0:
smoothed_x = 0
elif smoothed_x < 0:
smoothed_x += x_decay
if smoothed_x > 0:
smoothed_x = 0
if smoothed_y > 0:
smoothed_y -= y_decay
if smoothed_y < 0:
smoothed_y = 0
elif smoothed_y < 0:
smoothed_y += y_decay
if smoothed_y > 0:
smoothed_y = 0
smoothed_x += working_x
smoothed_y += working_y
else:
left = self.keys_held['left'] if self.keys_held.has_key('left') else 0
right = self.keys_held['right'] if self.keys_held.has_key('right') else 0
up = self.keys_held['up'] if self.keys_held.has_key('up') else 0
down = self.keys_held['down'] if self.keys_held.has_key('down') else 0
smoothed_x = -left+right
smoothed_y = -up+down
final_magnitude = numpy.linalg.norm([smoothed_x, smoothed_y])
if final_magnitude > _maxMagnitude:
smoothed_x /= final_magnitude/_maxMagnitude
smoothed_y /= final_magnitude/_maxMagnitude
return [smoothed_x, smoothed_y]
def getSmoothedAngle(self,_default=90):
""" Returns the angle that the smoothedInput currently points to. 0 being forward, 90 being up
Parameters
-----------
_default : int : 90
What to return if input is [0,0]
"""
inputValue = self.getSmoothedInput()
print(inputValue)
if (inputValue == [0, 0]):
angle = _default
else:
angle = math.atan2(-inputValue[1], inputValue[0])*180.0/math.pi
print('ANGLE:',angle)
return angle
def checkSmash(self,_direction):
""" This function checks if the player has Smashed in a direction. It does this by noting if the direction was
pressed recently and is now above a threshold
Parameters
-----------
_direction : String
The joystick direction to check for a smash in
"""
#TODO different for buttons than joysticks
return self.keyBuffered(_direction, int(self.key_bindings.timing_window['smash_window']), 0.85)
def checkTap(self, _direction, _firstThreshold=0.6):
""" Checks if the player has tapped a button, but not smashed it. If a joystick is used, the checkSmash function should
cover this.
Parameters
-----------
_direction : String
The joystick direction to check for a smash in
_firstThreshold : float : 0.6
"""
if self.key_bindings.type == "Keyboard":
return self.keyBuffered(_direction, _state=1) and self.keyBuffered(_direction, int(self.key_bindings.timing_window['repeat_window'])+1, _firstThreshold, 1)
else:
return self.checkSmash(_direction)
def netDirection(self, _checkDirectionList):
""" Gets the net total direction of all of the directions currently being held.
Parameters
-----------
_checkDirectionList :
"""
coords = self.getSmoothedInput()
if not filter(lambda a: a in ['left', 'right', 'up', 'down'], _checkDirectionList):
return 'neutral'
left_check = -coords[0] if 'left' in _checkDirectionList and 'left' in self.keys_held else -2
right_check = coords[0] if 'right' in _checkDirectionList and 'right' in self.keys_held else -2
up_check = -coords[1] if 'up' in _checkDirectionList and 'up' in self.keys_held else -2
down_check = coords[1] if 'down' in _checkDirectionList and 'down' in self.keys_held else -2
if left_check == -2 and right_check == -2 and up_check == -2 and down_check == -2:
if 'left' in self.keys_held: left_check = self.keys_held['left']
if 'right' in self.keys_held: right_check = self.keys_held['right']
if 'up' in self.keys_held: up_check = self.keys_held['up']
if 'down' in self.keys_held: down_check = self.keys_held['down']
if left_check == -2 and right_check == -2 and up_check == -2 and down_check == -2:
return 'neutral'
check_dict = {'left': left_check, 'right': right_check, 'up': up_check, 'down': down_check}
return max(_checkDirectionList, key=lambda k: check_dict[k])
def keysContain(self,_key,_threshold=0.1):
""" Checks for keys that are currently being held, regardless of when they were pressed.
Parameters
-----------
_key : String
The key to check for.
_threshold : float : 0.1
The value that represents a "press", will check for values lower than the threshold
"""
if _key in self.keys_held:
return self.keys_held[_key] >= _threshold
return False
def getForwardBackwardKeys(self):
""" This returns a tuple of the key for forward, then backward
Useful for checking if the fighter is pivoting, or doing a back air, or getting the
proper key to dash-dance, etc.
The best way to use this is something like
(key,invkey) = actor.getForwardBackwardKeys()
which will assign the variable "key" to the forward key, and "invkey" to the backward key.
"""
if self.facing == 1: return ('right','left')
else: return ('left','right')
########################################################
# COMBAT FUNCTIONS #
########################################################
def applySubactions(self, _subacts):
for subact in _subacts:
subact.execute(self.current_action, self)
return True # Our hit filter stuff expects this
def filterHits(self, _hitbox, _subacts):
if self.lockHitbox(_hitbox):
for subact in _subacts:
subact.execute(self.current_action, self)
return True
return False
def dealDamage(self, _damage):
""" Deal damage to the fighter.
Checks to make sure the damage caps at 999.
If you want to have higher damage, override this function and remove it.
This function is called in the applyKnockback function, so you shouldn't
need to call this function directly for normal attacks, although you can
for things like poison, non-knockback attacks, etc.
Parameters
-----------
_damage : float
The amount of damage to deal
"""
self.damage += float(math.floor(_damage))
self.damage = min(999,max(self.damage,0))
if self.data_log:
self.data_log.addToData('Damage Taken',float(math.floor(_damage)))
def applyHitstop(self,_damage,_hitlagMultiplier):
""" Applies hitstop to the fighter when hit. Also sets the hitstun
vibration.
Parameters
-----------
_damage : int
The amount of damage the attack does
_hitlagMultiplier : float
An amount to multiply the calculated hitstop with
"""
self.hitstop = math.floor((_damage / 4.0 + 2)*_hitlagMultiplier)
if self.grounded:
self.hitstop_vibration = (3,0)
else:
self.hitstop_vibration = (0,3)
self.hitstop_pos = (self.posx, self.posy)
def applyKnockback(self, _total_kb,_trajectory):
"""Do Knockback to the fighter. The knockback calculation is derived from the SSBWiki, and a bit of information from
ColinJF and Amazing Ampharos on Smashboards, it is based off of Super Smash Bros. Brawl's knockback calculation, which
is the one with the most information available
Parameters
-----------
"""
# Get the trajectory as a vector
trajectory_vec = [math.cos(_trajectory/180*math.pi), math.sin(_trajectory/180*math.pi)]
di_vec = self.getSmoothedInput(int(self.key_bindings.timing_window['smoothing_window']))
di_multiplier = 1+numpy.dot(di_vec, trajectory_vec)*.05
_trajectory += numpy.cross(di_vec, trajectory_vec)*13.5
print(_total_kb)
self.setSpeed((_total_kb)*di_multiplier, _trajectory)
def applyHitstun(self,_total_kb,_hitstunMultiplier,_baseHitstun,_trajectory):
"""TODO document this"""
hitstun_frames = math.floor((_total_kb)*_hitstunMultiplier+_baseHitstun)
if hitstun_frames > 0.5:
#If the current action is not hitstun or you're in hitstun, but there's not much of it left
if not isinstance(self.current_action, baseActions.HitStun) or (self.current_action.last_frame-self.current_action.frame)/float(settingsManager.getSetting('hitstun')) <= hitstun_frames+15:
self.doHitStun(hitstun_frames*settingsManager.getSetting('hitstun'), _trajectory)
self.current_action.tech_cooldown = (_total_kb*_hitstunMultiplier)//6
def applyPushback(self, _kb, _trajectory, _hitlag):
""" Pushes back the fighter when they hit a foe. This is the corollary to applyKnockback,
except this one is called on the fighter who lands the hit. It applies the hitlag to the fighter,
and pushes them back slightly from the opponent.
Parameters
-----------
_kb :
_trajectory : int
The direction to push the attacker back. In degrees, zero being forward, 90 being up
_hitlag : int
The hitlag from the attack
"""
self.hitstop = math.floor(_hitlag*settingsManager.getSetting('hitlag'))
print(self.hitstop)
(x, y) = getXYFromDM(_trajectory, _kb)
self.change_x += x
if not self.grounded:
self.change_y += y
def die(self,_respawn = True):
""" This function is called when a fighter dies. It spawns the
death particles and resets some variables.
Parameters
-----------
_respawn : Boolean
Whether or not to respawn the fighter after death
"""
sfxlib = settingsManager.getSfx()
if sfxlib.hasSound('death', self.name):
self.playSound('death')
self.data_log.addToData('Falls',1)
if self.hit_tagged != None:
if hasattr(self.hit_tagged, 'data_log'):
self.hit_tagged.data_log.addToData('KOs',1)
if _respawn:
if self.hit_tagged is not None:
color = settingsManager.getSetting('playerColor' + str(self.hit_tagged.player_num))
else:
color = settingsManager.getSetting('playerColor' + str(self.player_num))
for i in range(0, 11):
next_hit_article = article.HitArticle(self, (self.posx, self.posy), 1, i*30, 30, 1.5, color)
self.articles.append(next_hit_article)
next_hit_article = article.HitArticle(self, (self.posx, self.posy), 1, i*30+10, 60, 1.5, color)
self.articles.append(next_hit_article)
next_hit_article = article.HitArticle(self, (self.posx, self.posy), 1, i*30+20, 90, 1.5, color)
self.articles.append(next_hit_article)
self.onRespawn()
(self.posx, self.posy) = self.game_state.spawn_locations[self.player_num]
self.posy -= 200
self.updatePosition()
self.ecb.normalize()
self.posy += self.ecb.current_ecb.rect.height/2.0
self.ecb.store()
self.createMask([255,255,255], 480, True, 12)
self.respawn_invulnerable = 480
self.doAction('Respawn')
########################################################
# HELPER FUNCTIONS #
########################################################
""" These are ways of getting properly formatted data, accessing specific things,
converting data, etc. """
def getForwardWithOffset(self,_offSet = 0):
""" Get a direction that is angled from the direction the fighter is facing,
rather than angled from right. For example, sending the opponent 30 degrees is
fine when facing right, but if you're facing left, you'd still be sending them to the right!
Hitboxes use this calculation a lot. It'll return the proper angle that is the given offset
from "forward". Defaults to 0, which will give either 0 or 180, depending on the direction
of the fighter.
Parameters
-----------
_offSet : int
The angle to convert
Return
-----------
The adjusted angle for the proper facing angle
"""
if self.facing == 1:
return _offSet
else:
return 180 - _offSet
def getDirectionMagnitude(self):
""" Converts the fighter's current speed from XY components into
a Direction and Magnitude. Angles are in degrees, with 0 being forward
Return
-----------
(direction,magnitude) : Tuple (int,float)
The direction in degrees, and the magnitude in map uints
"""
if self.change_x == 0:
magnitude = self.change_y
direction = 90 if self.change_y < 0 else 270
return (direction,magnitude)
if self.change_y == 0:
magnitude = self.change_x
direction = 0 if self.change_x > 0 else 180
return(direction,magnitude)
direction = math.degrees(math.atan2(-self.change_y, self.change_x))
direction = round(direction)
magnitude = numpy.linalg.norm([self.change_x, self.change_y])
return (direction,magnitude)
def getFacingDirection(self):
""" A simple function that converts the facing variable into a direction in degrees.
Return
-----------
The direction the fighter is facing in degrees, zero being right, 90 being up
"""
if self.facing == 1: return 0
else: return 180
def setGrabbing(self, _other):
""" Sets a grabbing state. Tells this fighter that it's grabbing something else,
and tells that thing what's grabbing it.
Parameters
-----------
_other : GameObject
The object to be grabbing
"""
self.grabbing = _other
_other.grabbed_by = self
def isGrabbing(self):
""" Check whether the fighter is current holding something. If this object says that it's
holding something, but the other object doesn't agree, assume that there is no grab.
Return
-----------
bool : Whether the fighter is currently holding something
"""
if self.grabbing is None:
return False
if self.grabbing and self.grabbing.grabbed_by == self:
return True
return False
def flip(self):
""" Flip the fighter so he is now facing the other way.
Also flips the sprite for you.
"""
self.facing = -self.facing
self.sprite.flipX()
def updateLandingLag(self,_lag,_reset=False):
""" Updates landing lag, but doesn't overwrite a longer lag with a short one.
Useful for things like fast aerials that have short endlag, but you don't want to be
able to override something like an airdodge lag with it.
Parameters
-----------
_lag : int
The number of frames of endlag to set
_reset : bool : False
When True, will always set the landing lag to the given value, regardless of current lag.
"""
if _reset: self.landing_lag = _lag
else:
if _lag > self.landing_lag: self.landing_lag = _lag
def createMask(self,_color,_duration,_pulse = False,_pulse_size = 16):
""" Creates a color mask sprite over the fighter
Parameters
-----------
_color : String
The color of the mask in RGB of the format #RRGGBB
_duration : int
How many frames should the mask stay active
_pulse : bool
Should the mask "flash" in transparency, or just stay solid?
_pulse_size : int
If pulse is true, this is how long it takes for one full rotation of transparency
"""
self.mask = spriteManager.MaskSprite(self.sprite,_color,_duration,_pulse, _pulse_size)
def playSound(self,_sound):
""" Play a sound effect. If the sound is not in the fighter's SFX library, it will play the base sound.
Parameters
-----------
_sound : String
The name of the sound to be played
"""
sfxlib = settingsManager.getSfx()
if sfxlib.hasSound(_sound, self.name):
sfxlib.playSound(_sound, self.name)
else:
sfxlib.playSound(_sound,'base')
def activateHitbox(self,_hitbox):
""" Activates a hitbox, adding it to your active_hitboxes list.
Parameters
-----------
_hitbox : Hitbox
The hitbox to activate
"""
self.active_hitboxes.add(_hitbox)
_hitbox.activate()
def activateHurtbox(self,_hurtbox):
""" Activates a hurtbox, adding it to your active_hurtboxes list.
_hurtbox : Hurtbox
The hitbox to activate
"""
self.active_hurtboxes.add(_hurtbox)
def lockHitbox(self,_hbox):
""" This will "lock" the hitbox so that another hitbox with the same ID from the same fighter won't hit again.
Returns true if it was successful, false if it already exists in the lock.
Parameters
-----------
_hbox : Hitbox
The hitbox we are checking for
"""
#If the hitbox belongs to something, get tagged by it
if not _hbox.owner is None:
self.hit_tagged = _hbox.owner
if _hbox.hitbox_lock is None:
return False
if _hbox.hitbox_lock in self.hitbox_lock:
return False
self.hitbox_lock.add(_hbox.hitbox_lock)
return True
def startShield(self):
""" Creates a shield article and adds it to your active articles list """
self.articles.append(article.ShieldArticle(settingsManager.createPath("sprites/melee_shield.png"),self))
def startParry(self):
""" Creates a parry article and adds it to your active articles list """
self.articles.append(article.ParryArticle(settingsManager.createPath("sprites/melee_shield.png"),self))
def test():
fight = AbstractFighter('',0)
print(fight.__init__.__doc__)
if __name__ == '__main__': test()
| gpl-3.0 | -4,722,417,059,480,184,000 | 40.139911 | 237 | 0.555986 | false |
rockneurotiko/madness-things | Python/Pygame/2-sprites/platfom_simple.py | 1 | 4157 | import pygame
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
BLUE = ( 0, 0, 255)
RED = ( 255, 0, 0)
GREEN = ( 0, 255, 0)
size = (800, 600)
class Player(pygame.sprite.Sprite):
change_x = 0
change_y = 0
level = None
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#The player. take an image, but now a shit
self.image = pygame.Surface([40,60])
self.image.fill(RED)
self.rect = self.image.get_rect()
def update(self):
#Move
#Gravity
self.calc_grav()
#Move l/r
self.rect.x += self.change_x
#Check collissions
blocks_hit = pygame.sprite.spritecollide(self,\
self.level.platform_list, False)
for block in blocks_hit:
if self.change_x > 0:
self.rect.right = block.rect.left
elif self.change_x < 0:
self.rect.left = block.rect.right
#Move u/d
self.rect.y += self.change_y
#Check collissions
blocks_hit = pygame.sprite.spritecollide(self,\
self.level.platform_list, False)
for block in blocks_hit:
if self.change_y > 0:
self.rect.bottom = block.rect.top
elif self.change_y < 0:
self.rect.top = block.rect.bottom
self.change_y = 0
def calc_grav(self):
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
if self.rect.y >= size[1] - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = size[1] - self.rect.height
def jump(self):
#Saltar
#Movemos dos pixeles, vemos si haf colision, y si la hay, se salta
self.rect.y += 2
platform_hit = pygame.sprite.spritecollide(self, \
self.level.platform_list, False)
self.rect.y -= 2
if len(platform_hit) > 0 or self.rect.bottom >= size[1]:
self.change_y = -10
def go_left(self):
self.change_x = -6
def go_right(self):
self.change_x = 6
def stop(self):
self.change_x = 0
class Platform(pygame.sprite.Sprite):
def __init__(self, w, h):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([w,h])
self.image.fill(GREEN)
self.rect = self.image.get_rect()
class Level(object):
#Super class
platform_list = None
enemy_list = None
background = None
def __init__(self, player):
self.platform_list = pygame.sprite.Group()
self.enemy_list = pygame.sprite.Group()
self.player = player
def update(self):
self.platform_list.update()
self.enemy_list.update()
def draw(self, screen):
screen.fill(BLUE)
self.platform_list.draw(screen)
self.enemy_list.draw(screen)
class Level01(Level):
def __init__(self, player):
Level.__init__(self, player)
level = [[210, 70, 500, 500],
[210, 70, 200, 400],
[210, 70, 600, 300],]
for plat in level:
block = Platform(plat[0], plat[1])
block.rect.x = plat[2]
block.rect.y = plat[3]
block.player = self.player
self.platform_list.add(block)
def main():
pygame.init()
screen = pygame.display.set_mode(size)
pygame.display.set_caption('jumper')
pygame.mouse.set_visible(False)
player = Player()
lvl_list = []
lvl_list.append(Level01(player))
current_lvl_no = 0
current_lvl = lvl_list[current_lvl_no]
active_sprite_list = pygame.sprite.Group()
player.level = current_lvl
player.rect.x = 340
player.rect.y = size[1] - player.rect.height
active_sprite_list.add(player)
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.go_left()
if event.key == pygame.K_RIGHT:
player.go_right()
if event.key == pygame.K_UP:
player.jump()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and player.change_x < 0:
player.stop()
if event.key == pygame.K_RIGHT and player.change_x > 0:
player.stop()
active_sprite_list.update()
current_lvl.update()
if player.rect.right > size[0]:
player.rect.right = size[0]
if player.rect.left < 0:
player.rect.left = 0
current_lvl.draw(screen)
active_sprite_list.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit()
if __name__ == '__main__':
main() | mpl-2.0 | 384,585,491,287,128,450 | 21.117021 | 70 | 0.65071 | false |
vileopratama/vitech | docs/tutorials/ebook/Odoo Development Cookbook/OdooDevelopmentCookbook_Code/Chapter13_code/ch13_r02_restrict_access_to_web_accessible_paths/controllers/main.py | 1 | 2112 | # -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import http
from openerp.http import request
class Main(http.Controller):
@http.route('/my_module/all-books', type='http', auth='none')
def all_books(self):
records = request.env['library.book'].sudo().search([])
result = '<html><body><table><tr><td>'
result += '</td></tr><tr><td>'.join(records.mapped('name'))
result += '</td></tr></table></body></html>'
return result
@http.route('/my_module/all-books/mark_mine', type='http', auth='public')
def all_books_mark_mine(self):
records = request.env['library.book'].sudo().search([])
result = '<html><body><table>'
for record in records:
result += '<tr>'
if record.author_ids & request.env.user.partner_id:
result += '<th>'
else:
result += '<td>'
result += record.name
if record.author_ids & request.env.user.partner_id:
result += '</th>'
else:
result += '</td>'
result += '</tr>'
result += '</table></body></html>'
return result
@http.route('/my_module/all-books/mine', type='http', auth='user')
def all_books_mine(self):
records = request.env['library.book'].search([
('author_ids', 'in', request.env.user.partner_id.ids),
])
result = '<html><body><table><tr><td>'
result += '</td></tr><tr><td>'.join(records.mapped('name'))
result += '</td></tr></table></body></html>'
return result
@http.route('/my_module/all-books/mine_base_group_user', type='http',
auth='base_group_user')
def all_books_mine_base_group_user(self):
return self.all_books_mine()
# this is for the exercise
@http.route('/my_module/all-books/mine_groups', type='http',
auth='groups(base.group_no_one)')
def all_books_mine_groups(self):
return self.all_books_mine()
| mit | 2,850,783,390,967,410,000 | 37.381818 | 77 | 0.547134 | false |
ahnolds/rpglib | rpglib/window.py | 1 | 10818 | """
The module for drawing the game world
"""
# Standard Library packages
import sys
# External dependenices
import pygame
# Internal dependencies
import world
# Rendering settings
SCREEN_X = 450
SCREEN_Y = 450
HALF_SCREEN_X = SCREEN_X // 2
HALF_SCREEN_Y = SCREEN_Y // 2
SQUARE_SIZE = 18
SCREEN_BG = pygame.color.Color('black')
# Text font settings
pygame.font.init()
FONT_NAME = 'freemono'
FONT_SIZE = 20
FONT = pygame.font.SysFont(FONT_NAME, FONT_SIZE)
FONT_COLOR = pygame.color.Color('black')
FONT_BG_COLOR = pygame.color.Color('white')
FONT_HL_COLOR = pygame.color.Color('red')
MAX_LINE_LEN = 37
MAX_NUM_LINES = 2
FONT_BOX_HEIGHT = FONT_SIZE * MAX_NUM_LINES
# Key bindings
UP_KEY = pygame.K_UP
DOWN_KEY = pygame.K_DOWN
LEFT_KEY = pygame.K_LEFT
RIGHT_KEY = pygame.K_RIGHT
A_KEY = pygame.K_a
B_KEY = pygame.K_b
START_KEY = pygame.K_SPACE
SEL_KEY = pygame.K_RETURN
# Game mode constants
M_WALK = 0
M_MENU = 1
M_TEXT = 2
M_FIGHT = 3
M_INV = 4
M_SAVE = 5
# Menu option constants and mapping from option to mode
ME_INVENTORY = 'ITEMS'
ME_SAVE = 'SAVE'
ME_EXIT = 'EXIT'
MENU = [ME_INVENTORY, ME_SAVE, ME_EXIT]
MENU_MODES = {
ME_INVENTORY : M_INV,
ME_SAVE : M_SAVE,
ME_EXIT : M_WALK,
}
MENU_BOX_WIDTH = 100
MENU_BOX_HEIGHT = FONT_SIZE * len(MENU)
SHOW_MENU_MODES = set({M_MENU, M_INV, M_SAVE})
# Inventory options
INV_BOX_WIDTH = 100
INV_BOX_HEIGHT = SCREEN_Y
class Window(object):
def __init__(self, world):
"""
Create a new game window
"""
# Get the world
self.world = world
# Start out walking
self.mode = M_WALK
# Initially no text (TODO start text?)
self.textQueue = []
# Default to the top of menus
self.menuPos = 0
self.invPos = 0
# Default to no select hotkey
self.selHotkey = None
# Setup pygame for rendering
pygame.init()
pygame.key.set_repeat(160, 40)
self.screen = pygame.display.set_mode((SCREEN_X, SCREEN_Y))
def draw(self):
"""
Draw the world
"""
# Background
self.screen.fill(SCREEN_BG)
# Image
self.world.draw(self.screen)
# Dialog text
self.printText()
# Menu text
self.printMenu()
# Inventory text
self.printInventory()
# Show the newly drawn screen (automatically double-buffered)
pygame.display.flip()
def handleEvent(self, event):
"""
Handle the current event
"""
if event.type == 0:
# TODO handle internal changes e.g. NPC movement on Null event
pass
elif event.type == pygame.QUIT:
sys.exit(0)
elif event.type == pygame.KEYDOWN:
if self.mode == M_WALK:
if event.key == DOWN_KEY:
# The origin for pygame is at the top left, and down is positive
self.world.movePlayer((0, 1))
elif event.key == UP_KEY:
# The origin for pygame is at the top left, and down is positive
self.world.movePlayer((0, -1))
elif event.key == LEFT_KEY:
self.world.movePlayer((-1, 0))
elif event.key == RIGHT_KEY:
self.world.movePlayer((1, 0))
elif event.key == A_KEY:
text = self.world.interact()
if text is not None:
# Format the text and add it to the queue
self.formatText(text)
elif event.key == START_KEY:
# Bring up the menu
self.mode = M_MENU
elif event.key == SEL_KEY:
if self.selHotkey is not None:
inventory = self.world.player.items
item = inventory[self.selHotkey][0]
text, wasConsumed = item.useMenuInventory()
if wasConsumed:
# Decrement the inventory count
inventory[self.selHotkey].pop()
# If this used up the last one, then unmap the
# hotkey and remove the key from the inventory
if len(inventory[self.selHotkey]) == 0:
del inventory[self.selHotkey]
self.selHotkey = None
if text is not None:
# Format the text and add it to the queue
self.formatText(text)
elif self.mode == M_TEXT:
if event.key == A_KEY:
# Go to the next block of text
self.textQueue.pop(0)
# If there is no more text, switch back to walking
if not self.textQueue:
self.mode = M_WALK
elif self.mode == M_MENU:
if event.key == DOWN_KEY:
# Move down the menu (or loop back to the top)
self.menuPos = (self.menuPos + 1) % len(MENU)
elif event.key == UP_KEY:
# Move up the menu (or loop back to the bottom)
if self.menuPos == 0: self.menuPos = len(MENU)
self.menuPos -= 1
elif event.key == A_KEY:
self.mode = MENU_MODES[MENU[self.menuPos]]
elif event.key == B_KEY:
# Exit the menu
# TODO this should return to the previous mode to allow
# opening the menu during encounters etc
self.mode = M_WALK
elif self.mode == M_INV:
inventory = self.world.player.items
if event.key == DOWN_KEY:
# Move down the menu (or loop back to the top)
self.invPos = (self.invPos + 1) % len(inventory)
elif event.key == UP_KEY:
# Move up the menu (or loop back to the bottom)
if self.invPos == 0: self.invPos = len(inventory)
self.invPos -= 1
elif event.key == A_KEY:
# TODO use item
pass
if event.key == B_KEY:
# Back to the main menu
self.mode = M_MENU
elif self.mode == M_SAVE:
if event.key == B_KEY:
# Back to the main menu
self.mode = M_MENU
def formatText(self, text):
"""
Format the text into blocks for display in a text box and add it to
the text queue
"""
# Switch into text mode
self.mode = M_TEXT
# Fill in any variables in the message
text = text.format(name = self.world.player.name)
# Build the text queue
parts = text.split('\n')
for part in parts:
wordList = part.strip().split()
line = ''
while wordList:
lineLen = len(line.split('\n')[-1] + ' ' +
wordList[0])
if lineLen <= MAX_LINE_LEN:
line += ' ' + wordList.pop(0)
elif len(line.split('\n')) < MAX_NUM_LINES:
line += '\n'
else:
self.textQueue.append(line)
line = ''
if line:
while len(line.split('\n')) < MAX_NUM_LINES:
line += '\n'
self.textQueue.append(line)
def printText(self):
# Only print if there is text in the queue
if self.textQueue:
# Create a box for the text along the bottom
textBox = pygame.Rect(0, SCREEN_Y - FONT_BOX_HEIGHT, SCREEN_X,
FONT_BOX_HEIGHT)
# Draw the text box
pygame.draw.rect(self.screen, FONT_BG_COLOR, textBox)
# Render the text
for num, line in enumerate(reversed(self.textQueue[0].split('\n'))):
text = FONT.render(line, True, FONT_COLOR)
# Position the text
textRect = text.get_rect()
textRect.left = 0
textRect.bottom = SCREEN_Y - num * FONT_SIZE
# Draw the text onto the screen
self.screen.blit(text, textRect)
def printMenu(self):
"""Print the menu if relevant"""
if self.mode in SHOW_MENU_MODES:
# Create a box for the menu along the left
textBox = pygame.Rect(SCREEN_X - MENU_BOX_WIDTH, 0, MENU_BOX_WIDTH,
MENU_BOX_HEIGHT)
# Draw the menu box
pygame.draw.rect(self.screen, FONT_BG_COLOR, textBox)
# Render the menu
for num, line in enumerate(MENU):
# Highlight the current selection
color = FONT_HL_COLOR if num == self.menuPos else FONT_COLOR
text = FONT.render(line, True, color)
# Position the text
textRect = text.get_rect()
textRect.right = SCREEN_X
textRect.top = num * FONT_SIZE
# Draw the text onto the screen
self.screen.blit(text, textRect)
def printInventory(self):
"""Print the inventory if relevant"""
if self.mode == M_INV:
# Create a box for the menu along the left
textBox = pygame.Rect(SCREEN_X - MENU_BOX_WIDTH - INV_BOX_WIDTH, 0,
INV_BOX_WIDTH, INV_BOX_HEIGHT)
# Draw the menu box
pygame.draw.rect(self.screen, FONT_BG_COLOR, textBox)
# Render the menu
for num, line in enumerate(self.world.player.items.iterkeys()):
# Highlight the current selection
color = FONT_HL_COLOR if num == self.invPos else FONT_COLOR
text = FONT.render(line, True, color)
# Position the text
textRect = text.get_rect()
textRect.right = SCREEN_X - MENU_BOX_WIDTH
textRect.top = num * FONT_SIZE
# Draw the text onto the screen
self.screen.blit(text, textRect)
def run(self):
"""
Run the game
"""
# Main loop
while True:
# Get any events that occurred
self.handleEvent(pygame.event.poll())
# Redraw the screen
self.draw()
| gpl-2.0 | 3,770,847,533,449,233,000 | 34.585526 | 84 | 0.495563 | false |
GluonsAndProtons/gluon | gluon/backends/backends/proton_client.py | 1 | 2296 | from oslo_log import log as logging
from gluon.common import exception as exc
from requests import get, put, post, delete
import json
LOG = logging.getLogger(__name__)
logger = LOG
class Client(object):
def __init__(self, backend):
self._backend = backend
def json_get(self, url):
resp = get(url)
if resp.status_code != 200:
raise exc.GluonClientException('Bad return status %d'
% resp.status_code,
status_code=resp.status_code)
try:
rv = json.loads(resp.content)
except Exception as e:
raise exc.MalformedResponseBody(reason="JSON unreadable: %s on %s"
% (e.message, resp.content))
return rv
def do_delete(self, url):
resp = delete(url)
if resp.status_code != 200:
raise exc.GluonClientException('Bad return status %d'
% resp.status_code,
status_code=resp.status_code)
def do_post(self, url, values):
resp = post(url, json=values)
if resp.status_code != 201 or resp.status_code != 201:
raise exc.GluonClientException('Bad return status %d'
% resp.status_code,
status_code=resp.status_code)
try:
rv = json.loads(resp.content)
except Exception as e:
raise exc.MalformedResponseBody(reason="JSON unreadable: %s on %s"
% (e.message, resp.content))
return rv
def do_put(self, url, values):
resp = put(url, json=values)
if resp.status_code != 200:
raise exc.GluonClientException('Bad return status %d'
% resp.status_code,
status_code=resp.status_code)
try:
rv = json.loads(resp.content)
except Exception as e:
raise exc.MalformedResponseBody(reason="JSON unreadable: %s on %s"
% (e.message, resp.content))
return rv
| apache-2.0 | -5,607,964,346,719,619,000 | 37.915254 | 79 | 0.490854 | false |
digitalocean/netbox | netbox/tenancy/filters.py | 1 | 2787 | import django_filters
from django.db.models import Q
from extras.filters import CustomFieldModelFilterSet, CreatedUpdatedFilterSet
from utilities.filters import BaseFilterSet, NameSlugSearchFilterSet, TagFilter, TreeNodeMultipleChoiceFilter
from .models import Tenant, TenantGroup
__all__ = (
'TenancyFilterSet',
'TenantFilterSet',
'TenantGroupFilterSet',
)
class TenantGroupFilterSet(BaseFilterSet, NameSlugSearchFilterSet):
parent_id = django_filters.ModelMultipleChoiceFilter(
queryset=TenantGroup.objects.all(),
label='Tenant group (ID)',
)
parent = django_filters.ModelMultipleChoiceFilter(
field_name='parent__slug',
queryset=TenantGroup.objects.all(),
to_field_name='slug',
label='Tenant group group (slug)',
)
class Meta:
model = TenantGroup
fields = ['id', 'name', 'slug', 'description']
class TenantFilterSet(BaseFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
group_id = TreeNodeMultipleChoiceFilter(
queryset=TenantGroup.objects.all(),
field_name='group',
lookup_expr='in',
label='Tenant group (ID)',
)
group = TreeNodeMultipleChoiceFilter(
queryset=TenantGroup.objects.all(),
field_name='group',
lookup_expr='in',
to_field_name='slug',
label='Tenant group (slug)',
)
tag = TagFilter()
class Meta:
model = Tenant
fields = ['id', 'name', 'slug']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(slug__icontains=value) |
Q(description__icontains=value) |
Q(comments__icontains=value)
)
class TenancyFilterSet(django_filters.FilterSet):
"""
An inheritable FilterSet for models which support Tenant assignment.
"""
tenant_group_id = TreeNodeMultipleChoiceFilter(
queryset=TenantGroup.objects.all(),
field_name='tenant__group',
lookup_expr='in',
label='Tenant Group (ID)',
)
tenant_group = TreeNodeMultipleChoiceFilter(
queryset=TenantGroup.objects.all(),
field_name='tenant__group',
to_field_name='slug',
lookup_expr='in',
label='Tenant Group (slug)',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = django_filters.ModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
field_name='tenant__slug',
to_field_name='slug',
label='Tenant (slug)',
)
| apache-2.0 | 1,279,330,868,893,995,500 | 28.648936 | 109 | 0.634733 | false |
NetApp/manila | manila/tests/api/fakes.py | 1 | 9038 | # Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_utils import timeutils
import routes
import webob
import webob.dec
import webob.request
from manila.api import common as api_common
from manila.api.middleware import auth
from manila.api.middleware import fault
from manila.api.openstack import api_version_request as api_version
from manila.api.openstack import wsgi as os_wsgi
from manila.api import urlmap
from manila.api.v1 import limits
from manila.api.v1 import router as router_v1
from manila.api.v2 import router as router_v2
from manila.api import versions
from manila.common import constants
from manila import context
from manila import exception
from manila import wsgi
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
class Context(object):
pass
class FakeRouter(wsgi.Router):
def __init__(self, ext_mgr=None):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None,
use_no_auth=False, ext_mgr=None):
if not inner_app_v2:
inner_app_v2 = router_v2.APIRouter(ext_mgr)
if fake_auth:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt,
inner_app_v2))
elif use_no_auth:
api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
else:
api_v2 = fault.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app_v2)))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v2
mapper['/'] = fault.FaultWrapper(versions.Versions())
return mapper
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in kwargs.items():
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@classmethod
def blank(cls, *args, **kwargs):
if not kwargs.get('base_url'):
kwargs['base_url'] = 'http://localhost/v1'
use_admin_context = kwargs.pop('use_admin_context', False)
version = kwargs.pop('version', api_version.DEFAULT_API_VERSION)
experimental = kwargs.pop('experimental', False)
out = os_wsgi.Request.blank(*args, **kwargs)
out.environ['manila.context'] = FakeRequestContext(
'fake_user',
'fake',
is_admin=use_admin_context)
out.api_version_request = api_version.APIVersionRequest(
version, experimental=experimental)
return out
class TestRouter(wsgi.Router):
def __init__(self, controller):
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
def app():
"""API application.
No auth, just let environ['manila.context'] pass through.
"""
mapper = urlmap.URLMap()
mapper['/v1'] = router_v1.APIRouter()
mapper['/v2'] = router_v2.APIRouter()
return mapper
fixture_reset_status_with_different_roles_v1 = (
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.STATUS_ERROR,
},
{
'role': 'member',
'valid_code': 403,
'valid_status': constants.STATUS_AVAILABLE,
},
)
fixture_reset_status_with_different_roles = (
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.STATUS_ERROR,
'version': '2.6',
},
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.STATUS_ERROR,
'version': '2.7',
},
{
'role': 'member',
'valid_code': 403,
'valid_status': constants.STATUS_AVAILABLE,
'version': '2.6',
},
{
'role': 'member',
'valid_code': 403,
'valid_status': constants.STATUS_AVAILABLE,
'version': '2.7',
},
)
fixture_reset_replica_status_with_different_roles = (
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.STATUS_ERROR,
},
{
'role': 'member',
'valid_code': 403,
'valid_status': constants.STATUS_AVAILABLE,
},
)
fixture_reset_replica_state_with_different_roles = (
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.REPLICA_STATE_ACTIVE,
},
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.REPLICA_STATE_OUT_OF_SYNC,
},
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.REPLICA_STATE_IN_SYNC,
},
{
'role': 'admin',
'valid_code': 202,
'valid_status': constants.STATUS_ERROR,
},
{
'role': 'member',
'valid_code': 403,
'valid_status': constants.REPLICA_STATE_IN_SYNC,
},
)
fixture_force_delete_with_different_roles = (
{'role': 'admin', 'resp_code': 202, 'version': '2.6'},
{'role': 'admin', 'resp_code': 202, 'version': '2.7'},
{'role': 'member', 'resp_code': 403, 'version': '2.6'},
{'role': 'member', 'resp_code': 403, 'version': '2.7'},
)
fixture_invalid_reset_status_body = (
{'os-reset_status': {'x-status': 'bad'}},
{'os-reset_status': {'status': 'invalid'}}
)
def mock_fake_admin_check(context, resource_name, action, *args, **kwargs):
if context.is_admin:
return
else:
raise exception.PolicyNotAuthorized(action=action)
class FakeResourceViewBuilder(api_common.ViewBuilder):
_collection_name = 'fake_resource'
_detail_version_modifiers = [
"add_field_xyzzy",
"add_field_spoon_for_admins",
"remove_field_foo",
]
def view(self, req, resource):
keys = ('id', 'foo', 'fred', 'alice')
resource_dict = {key: resource.get(key) for key in keys}
self.update_versioned_resource_dict(req, resource_dict, resource)
return resource_dict
@api_common.ViewBuilder.versioned_method("1.41")
def add_field_xyzzy(self, context, resource_dict, resource):
resource_dict['xyzzy'] = resource.get('xyzzy')
@api_common.ViewBuilder.versioned_method("1.6")
def add_field_spoon_for_admins(self, context, resource_dict, resource):
if context.is_admin:
resource_dict['spoon'] = resource.get('spoon')
@api_common.ViewBuilder.versioned_method("3.14")
def remove_field_foo(self, context, resource_dict, resource):
resource_dict.pop('foo', None)
| apache-2.0 | -2,230,026,258,513,124,000 | 27.511041 | 79 | 0.609648 | false |
Tjorriemorrie/twurl | gae/src/views.py | 1 | 16080 | from src import app
from flask import request, render_template, json, Response, abort, redirect
from models import User, Tweet, Link, UserLink
import urllib
import base64
from google.appengine.api import urlfetch, taskqueue, mail
import datetime
import math
from flask.ext.jsontools import jsonapi
import urlparse
import oauth2 as oauth
@app.route('/')
def index():
token = obtainRequestToken()
params = {
}
app.logger.info('index: {}'.format(params))
return render_template('index.html', **params)
def obtainRequestToken():
app.logger.info('Obtaining request token')
app.logger.info('Creating oauth consumer...')
consumer = oauth.Consumer(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
app.logger.info('Creating oauth client...')
client = oauth.Client(consumer)
app.logger.info('Requesting token from twitter...')
resp, content = client.request(app.config['REQUEST_TOKEN_URL'], 'GET')
if resp['status'] != '200':
raise Exception("Invalid response %s." % resp['status'])
request_token = dict(urlparse.parse_qsl(content))
app.logger.info('Request token received: {}'.format(request_token))
return request_token
@app.route('/twitter_callback')
def twitterCallback():
form_data = request.form
app.logger.info('form_data: {}'.format(form_data))
return redirect('/')
###########################################
# USER
###########################################
@app.route('/user', methods=['GET', 'POST'])
@jsonapi
def user():
app.logger.info('formdata {}'.format(request.form))
email = request.form.get('email')
password = request.form.get('password')
# todo validation
user = User.authenticate(email, password)
# todo return proper token
return user.key.urlsafe()
@app.route('/user/main', methods=['GET', 'POST'])
@jsonapi
def userMain():
# todo ensure twurlie is topic if none
data = {}
# get user
app.logger.info('formdata {}'.format(request.form))
user_key = request.form.get('user_key')
user = User.fetchByKey(user_key)
if not user:
abort(403)
# get last userlink per topic
for topic in user.topics:
userLink = UserLink.findLastByUser(topic, user)
if userLink:
data[topic] = {
'key': userLink.key.urlsafe(),
'link_id': userLink.link_id,
'tweeted_count': userLink.tweeted_count,
'priority': userLink.priority,
'read_at': hasattr(userLink, 'read_at') and userLink.read_at
}
else:
data[topic] = None
return data
@app.route('/user/read', methods=['GET', 'POST'])
@jsonapi
def userRead():
# get user
app.logger.info('formdata {}'.format(request.form))
user_key = request.form.get('user_key')
user = User.fetchByKey(user_key)
if not user:
abort(403)
# mark last link
topic = request.form.get('topic')
userLink = UserLink.readLastByUser(topic, user)
return userLink.read_at
@app.route('/topic/create')
def topicCreate():
user = User.query(User.email == '[email protected]').get()
topics = ['python', 'html5']
user.topics = topics
params = {
'user': user,
'topics': topics,
}
user.put()
app.logger.info('topicCreate: {}'.format(params))
return render_template('base.html', **params)
###########################################
# LINKS SCHEDULING
###########################################
# NB this is run first before quota is filled
# 1st create task queue for every topic
# 2nd link every user's every topic
@app.route('/cron/schedule/links', methods=['GET', 'POST'])
def scheduleLinks():
''' Run this after quota reset '''
# get topics
user_topics = User.query(projection=[User.topics], distinct=True).fetch()
topics = [user.topics[0] for user in user_topics]
app.logger.info('Topics fetched: {}'.format(topics))
for topic in topics:
taskqueue.add(url='/cron/schedule/link', params={'topic': topic})
app.logger.info('Created push queue to schedule link for {}'.format(topic))
mail.send_mail(
sender='[email protected]',
to='[email protected]',
subject='Schedule Links',
body='All {} topics pushed'.format(len(topics)),
)
app.logger.info('All {} topics pushed'.format(len(topics)))
return Response('OK')
@app.route('/cron/schedule/link', methods=['GET', 'POST'])
def scheduleLink():
if request.method == 'POST':
app.logger.info('request form: {}'.format(request.form))
topic = request.form.get('topic')
elif request.method == 'GET':
app.logger.info('request args: {}'.format(request.args))
topic = request.args.get('topic')
if not topic:
abort(400)
app.logger.info('Topic param received: {}'.format(topic))
# get users by topic
users = User.fetchByTopic(topic)
# get ordered links by topic
# two inequality filters not supported
week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
links = Link.fetchByTopic(topic)
spamLinks = []
info = {}
# for every user
for user in users:
info[user.email] = None
# get last userlink:
# if not read => skip
# if too soon => skip
lastUserLink = UserLink.findLastByUser(topic, user)
if lastUserLink and not hasattr(lastUserLink, 'read'):
app.logger.info('User has unread UserLink')
continue
# then loop through ordered links
for link in links:
# skip links that has been spammed
# ignore links created before a week ago
# these links will go away since updated_at will keep renewing
if link.created_at < week_ago:
app.logger.debug('Skipping spam link: {}'.format(link.id))
spamLinks.append(link.id)
continue
# and assign first non-userlink to user
# note: search without topic:
# this gives unique link for a list of similar topics
if not UserLink.findByUserAndLink(user, link):
# assign new userlink to user for the topic
UserLink.create(topic, user, link)
info[user.email] = link.id
break
body = '\n'.join(['User {} got link {}'.format(userEmail, linkId) for userEmail, linkId in info.iteritems()])
body += '\n'.join(spamLinks)
mail.send_mail(
sender='[email protected]',
to='[email protected]',
subject='Schedule Link {}'.format(topic),
body=body,
)
app.logger.info('{} users got links'.format(len(info)))
return Response('OK')
###########################################
# SCRAPING TWITTER
###########################################
# NB this is run last as the quota will never be sufficient
# remember to set timeout on task queue so it does not carry over reset
# 1st is to create task queues for every topic
# 2nd remove expired tweets (hold about 1 month - depends on datastore size)
# 3rd delete expired urls/links (hold about 1 month (created between 7 days and 1 months is spam)
# 4th score the urls based on tweets and retweets
# 2nd link every user's every topic
@app.route('/cron/topics', methods=['GET', 'POST'])
def cronTopics():
# res = urlfetch.fetch(
# url='https://api.twitter.com/oauth2/token',
# payload='grant_type=client_credentials',
# method=urlfetch.POST,
# headers={
# 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
# 'Authorization': 'Basic {}'.format(bearer_token_encoded),
# },
# )
# app.logger.info(res)
#
# data = json.loads(res.content)
# app.logger.info('Data: {}'.format(data))
# if 'errors' in data:
# error = data['errors'][0]
# raise Exception('[{} {}] {}'.format(error['code'], error['label'], error['message']))
# access_token = data['access_token']
# token_type = data['token_type']
# bearer_token = '{}:{}'.format(consumer_key, consumer_secret)
# app.logger.info('bearer token: {}'.format(bearer_token))
# bearer_token_encoded = base64.b64encode(bearer_token)
# app.logger.info('bearer token encoded: {}'.format(bearer_token_encoded))
access_token = 'AAAAAAAAAAAAAAAAAAAAABcJYAAAAAAAVviSzyKtPYqYlHpZxoim6DHvfjI%3DU0slNkvBKQRynT62gbvQjEhAlE2PvzVZNia99xAdoJweI2OLqe'
# get topics
user_topics = User.query(projection=[User.topics], distinct=True).fetch()
topics = [user.topics[0] for user in user_topics]
app.logger.info('Topics fetched: {}'.format(topics))
for topic in topics:
# get since ID
since_id = Tweet.since_id(topic)
params = {'topic': topic, 'since_id': since_id}
app.logger.info('Created push queue for {}'.format(params))
taskqueue.add(url='/cron/remove/tweets', params=params)
mail.send_mail(
sender='[email protected]',
to='[email protected]',
subject='Cron Topics',
body='All {} topics pushed'.format(len(topics)),
)
app.logger.info('All {} topics pushed'.format(len(topics)))
return Response('OK')
@app.route('/cron/remove/tweets', methods=['GET', 'POST'])
def removeTweets():
if request.method == 'POST':
app.logger.info('request form: {}'.format(request.form))
topic = request.form.get('topic')
elif request.method == 'GET':
app.logger.info('request args: {}'.format(request.args))
topic = request.args.get('topic')
if not topic:
abort(400)
app.logger.info('Topic param received: {}'.format(topic))
# delete old tweets (> 1 year)
cnt = Tweet.removeOld(datetime.datetime.utcnow() - datetime.timedelta(days=30), topic)
# continue with deleting urls
taskqueue.add(url='/cron/delete/urls', params={'topic': topic})
# mail.send_mail(
# sender='[email protected]',
# to='[email protected]',
# subject='Remove tweets {}'.format(topic),
# body='{} tweets deleted for topic {}'.format(cnt, topic),
# )
app.logger.info('{} tweets deleted for topic {}'.format(cnt, topic))
return Response('OK')
@app.route('/cron/delete/urls', methods=['GET', 'POST'])
def deleteUrls():
if request.method == 'POST':
app.logger.info('request form: {}'.format(request.form))
topic = request.form.get('topic')
elif request.method == 'GET':
app.logger.info('request args: {}'.format(request.args))
topic = request.args.get('topic')
if not topic:
abort(400)
app.logger.info('Topic param received: {}'.format(topic))
cnt = Link.removeOld(topic, datetime.datetime.utcnow() - datetime.timedelta(days=30))
# continue with scoring urls
taskqueue.add(url='/cron/score/urls', params={'topic': topic})
# mail.send_mail(
# sender='[email protected]',
# to='[email protected]',
# subject='Delete Urls {}'.format(topic),
# body='Removed {} links for topic {}'.format(cnt, topic),
# )
return Response('OK')
@app.route('/cron/score/urls', methods=['GET', 'POST'])
def scoreUrls():
if request.method == 'POST':
app.logger.info('request form: {}'.format(request.form))
topic = request.form.get('topic')
elif request.method == 'GET':
app.logger.info('request args: {}'.format(request.args))
topic = request.args.get('topic')
if not topic:
abort(400)
app.logger.info('Topic param received: {}'.format(topic))
tweets = Tweet.fetchByTopic(topic)
# group by url and add score
urlScores = {}
for tweet in tweets:
for url in tweet.urls:
if url not in urlScores:
urlScores[url] = {
'id': url,
'tweeted_count': 0,
'retweeted_sum': 0.,
'favorite_sum': 0.,
}
app.logger.debug('Url added: {}'.format(url))
urlScores[url]['tweeted_count'] += 1
urlScores[url]['retweeted_sum'] += math.log(max(1, tweet.retweet_count))
urlScores[url]['favorite_sum'] += math.log(max(1, tweet.favorite_count))
app.logger.info('All {} tweets parsed and found {} urls'.format(len(tweets), len(urlScores)))
app.logger.info('Saving urls...')
for url, url_info in urlScores.iteritems():
link = Link.create(topic, url, url_info)
# continue to scrape for new tweets
taskqueue.add(url='/cron/topic', params={'topic': topic})
app.logger.info('Task created to scrape for new tweets for {}'.format(topic))
mail.send_mail(
sender='[email protected]',
to='[email protected]',
subject='Score urls {}'.format(topic),
body='{} tweets created {} urls'.format(len(tweets), len(urlScores)),
)
app.logger.info('Scoring urls done for {}'.format(topic))
return Response('OK')
@app.route('/cron/topic', methods=['GET', 'POST'])
def cronTopic():
access_token = 'AAAAAAAAAAAAAAAAAAAAABcJYAAAAAAAVviSzyKtPYqYlHpZxoim6DHvfjI%3DU0slNkvBKQRynT62gbvQjEhAlE2PvzVZNia99xAdoJweI2OLqe'
if request.method == 'POST':
app.logger.info('request form: {}'.format(request.form))
topic = request.form.get('topic')
elif request.method == 'GET':
app.logger.info('request args: {}'.format(request.args))
topic = request.args.get('topic')
if not topic:
abort(400)
since_id = request.form.get('since_id')
app.logger.info('Topic params received: {} {}'.format(topic, since_id))
# Requests / 15-min window (user auth) 180
# Requests / 15-min window (app auth) 450
# 450 / (15 * 60) = 0.5 per second
# thus 1 request every 2 seconds
month_ago = datetime.datetime.utcnow() - datetime.timedelta(days=30)
day_ago = datetime.datetime.utcnow() - datetime.timedelta(days=1)
params = urllib.urlencode({
'q': 'filter:links since:{} until:{} #{} -filter:retweets'.format(
month_ago.strftime('%Y-%m-%d'),
day_ago.strftime('%Y-%m-%d'),
topic,
),
'result_type': 'recent',
'include_entities': 1,
'count': 100,
'since_id': since_id,
})
# count, until, since_id, max_id
app.logger.info('params {}'.format(params))
res = urlfetch.fetch(
url='https://api.twitter.com/1.1/search/tweets.json?{}'.format(params),
method=urlfetch.GET,
headers={
'Authorization': 'Bearer {}'.format(access_token),
},
)
app.logger.info(res)
cnt = 0
max_cnt = 90 if app.config['DEBUG'] else 1222333
while cnt < max_cnt:
content = json.loads(res.content)
metadata = content['search_metadata']
statuses = content['statuses']
# app.logger.info('Metadata: {}'.format(metadata))
# app.logger.info('Statuses: {}'.format(len(statuses)))
cnt += len(statuses)
for status in statuses:
app.logger.info('Processing status')
tweet = Tweet.create(topic, status)
if 'next_results' not in metadata:
app.logger.info('No more statuses')
break
else:
app.logger.info('Fetching more results at {}'.format(metadata['next_results']))
res = urlfetch.fetch(
url='{}{}'.format('https://api.twitter.com/1.1/search/tweets.json', metadata['next_results']),
method=urlfetch.GET,
headers={
'Authorization': 'Bearer {}'.format(access_token),
},
)
# continue with nothing, quota will be obliterated with this
mail.send_mail(
sender='[email protected]',
to='[email protected]',
subject='Cron topic {}'.format(topic),
body='Scraped {} tweets for topic {}'.format(cnt, topic),
)
app.logger.info('Scraped {} tweets for topic {}'.format(cnt, topic))
return Response('OK')
| apache-2.0 | -4,003,832,567,429,699,600 | 32.569937 | 133 | 0.600684 | false |
joelagnel/ns-3 | src/visualizer/visualizer/plugins/mesh.py | 1 | 6674 | import gtk
import ns3
from visualizer.base import InformationWindow
NODE_STATISTICS_MEMORY = 10
class StatisticsCollector(object):
"""
Collects interface statistics for all nodes.
"""
class NetDevStats(object):
__slots__ = ['rxPackets', 'rxBytes', 'txPackets', 'txBytes',
'rxPacketRate', 'rxBitRate', 'txPacketRate', 'txBitRate']
def __init__(self, visualizer):
self.node_statistics = {} # nodeid -> list(raw statistics)
self.visualizer = visualizer
def simulation_periodic_update(self, viz):
nodes_statistics = viz.simulation.sim_helper.GetNodesStatistics()
for stats in nodes_statistics:
try:
raw_stats_list = self.node_statistics[stats.nodeId]
except KeyError:
raw_stats_list = []
self.node_statistics[stats.nodeId] = raw_stats_list
raw_stats_list.append(stats.statistics)
while len(raw_stats_list) > NODE_STATISTICS_MEMORY:
raw_stats_list.pop(0)
def get_interface_statistics(self, nodeId):
try:
raw_stats_list = self.node_statistics[nodeId]
except KeyError:
return []
if len(raw_stats_list) < NODE_STATISTICS_MEMORY:
return []
assert len(raw_stats_list) == NODE_STATISTICS_MEMORY
tx_packets1 = [] # transmitted packets, one value per interface
rx_packets1 = []
tx_bytes1 = []
rx_bytes1 = []
for iface, stats in enumerate(raw_stats_list[0]):
tx_packets1.append(stats.transmittedPackets)
tx_bytes1.append(stats.transmittedBytes)
rx_packets1.append(stats.receivedPackets)
rx_bytes1.append(stats.receivedBytes)
retval = []
k = self.visualizer.sample_period*(NODE_STATISTICS_MEMORY-1)
for iface, stats in enumerate(raw_stats_list[-1]):
outStat = self.NetDevStats()
outStat.txPackets = stats.transmittedPackets
outStat.txBytes = stats.transmittedBytes
outStat.rxPackets = stats.receivedPackets
outStat.rxBytes = stats.receivedBytes
outStat.txPacketRate = (stats.transmittedPackets - tx_packets1[iface])/k
outStat.rxPacketRate = (stats.receivedPackets - rx_packets1[iface])/k
outStat.txBitRate = (stats.transmittedBytes - tx_bytes1[iface])*8/k
outStat.rxBitRate = (stats.receivedBytes - rx_bytes1[iface])*8/k
retval.append(outStat)
return retval
class ShowInterfaceStatistics(InformationWindow):
(
COLUMN_INTERFACE,
COLUMN_TX_PACKETS,
COLUMN_TX_BYTES,
COLUMN_TX_PACKET_RATE,
COLUMN_TX_BIT_RATE,
COLUMN_RX_PACKETS,
COLUMN_RX_BYTES,
COLUMN_RX_PACKET_RATE,
COLUMN_RX_BIT_RATE,
) = range(9)
def __init__(self, visualizer, node_index, statistics_collector):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("Mesh Statistics for node %i" % node_index)
self.visualizer = visualizer
self.statistics_collector = statistics_collector
self.node_index = node_index
self.viz_node = visualizer.get_node(node_index)
self.table_model = gtk.ListStore(*([str]*13))
treeview = gtk.TreeView(self.table_model)
treeview.show()
self.win.vbox.add(treeview)
def add_column(descr, colid):
column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column("Interface", self.COLUMN_INTERFACE)
add_column("Tx Packets", self.COLUMN_TX_PACKETS)
add_column("Tx Bytes", self.COLUMN_TX_BYTES)
add_column("Tx pkt/1s", self.COLUMN_TX_PACKET_RATE)
add_column("Tx bit/1s", self.COLUMN_TX_BIT_RATE)
add_column("Rx Packets", self.COLUMN_RX_PACKETS)
add_column("Rx Bytes", self.COLUMN_RX_BYTES)
add_column("Rx pkt/1s", self.COLUMN_RX_PACKET_RATE)
add_column("Rx bit/1s", self.COLUMN_RX_BIT_RATE)
self.visualizer.add_information_window(self)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
node = ns3.NodeList.GetNode(self.node_index)
stats_list = self.statistics_collector.get_interface_statistics(self.node_index)
self.table_model.clear()
for iface, stats in enumerate(stats_list):
tree_iter = self.table_model.append()
netdevice = node.GetDevice(iface)
interface_name = ns3.Names.FindName(netdevice)
if not interface_name:
interface_name = "(interface %i)" % iface
self.table_model.set(tree_iter,
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_TX_PACKETS, str(stats.txPackets),
self.COLUMN_TX_BYTES, str(stats.txBytes),
self.COLUMN_TX_PACKET_RATE, str(stats.txPacketRate),
self.COLUMN_TX_BIT_RATE, str(stats.txBitRate),
self.COLUMN_RX_PACKETS, str(stats.rxPackets),
self.COLUMN_RX_BYTES, str(stats.rxBytes),
self.COLUMN_RX_PACKET_RATE, str(stats.rxPacketRate),
self.COLUMN_RX_BIT_RATE, str(stats.rxBitRate)
)
def populate_node_menu(viz, node, menu):
menu_item = gtk.MenuItem("Switch On")
menu_item.show()
def _show_it_on(dummy):
print "Switching on\n"
menu_item.connect("activate", _show_it_on)
menu.add(menu_item)
menu_item = gtk.MenuItem("Show Mesh Statistics")
menu_item.show()
def _show_it(dummy_menu_item):
ShowInterfaceStatistics(viz, node.node_index, statistics_collector)
menu_item.connect("activate", _show_it)
menu.add(menu_item)
def register(viz):
statistics_collector = StatisticsCollector(viz)
viz.connect("populate-node-menu", populate_node_menu)
viz.connect("simulation-periodic-update", statistics_collector.simulation_periodic_update)
| gpl-2.0 | -4,180,465,858,970,958,000 | 38.02924 | 94 | 0.596644 | false |
rmmariano/testejenkins | tests/global_imports.py | 1 | 2622 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from common import *
from os import path
from os import remove
from glob import glob
# Imports automáticos
from gluon.cache import Cache
from gluon.globals import Request, Response, Session
from gluon.http import HTTP, redirect
from gluon.sql import DAL, Field, SQLDB
from gluon.sqlhtml import SQLFORM,SQLTABLE
from gluon.validators import *
from gluon.html import *
from gluon.globals import current
# função fake/mock do T
def m__T__(f):
return f
# função fake/mock do URL
def m__URL__(a='', c='', f='', r='', args='', vars='',
anchor='', extension='', env='', hmac_key='', hash_vars='',
salt='', user_signature='', scheme='', host='', port='',
encode_embedded_slash='', url_encode='', language=''):
lfoo=[a,c,f,r,args,vars,anchor,extension,env,hmac_key,hash_vars,
salt,user_signature,scheme,host,port,encode_embedded_slash,url_encode,language]
foo = 'http://'
for f in lfoo:
if f != '':
foo=foo+str(f)+'/'
return foo
# def IS_URL(error_message='Enter a valid URL', mode='http', allowed_schemes=None,
# prepend_scheme='http', allowed_tlds=None):
# pass
# função fake/mock do IS_URL
def m__IS_URL__(foo,**dfoo):
foo = str(foo)
if foo.startswith('http://') or foo.startswith('https://'):
return True
return False
current.request = request = None
current.response = response = None
current.session = session = None
current.cache = cache = None
current.T = T = None
def initVars():
global current, request, response, session, cache, T
current.request = request = Request()
current.response = response = Response()
current.session = session = Session()
current.cache = cache = Cache(request)
current.T = T = m__T__
initVars()
deleteDB()
db = DAL('sqlite://'+DB_PATH)
import gluon.tools as gt
from mock import Mock
gt.URL=Mock(side_effect=m__URL__)
crud = gt.Crud(db)
# # Alguns imports globais do web2py
# # Ja feitos
# from gluon.cache import Cache
# from gluon.globals import Request
# from gluon.globals import Response
# from gluon.globals import Session
# request = Request() #request = Request({})
# cache = Cache() #cache = Cache(request)
# response = Response() #funciona sem parametro
# session = Session() #funciona sem parametro
# from gluon.html import *
# from gluon.http import HTTP
# from gluon.http import redirect
# from gluon.sql import DAL
# from gluon.sql import Field
# from gluon.sql import SQLDB
# from gluon.sqlhtml import SQLFORM
# from gluon.validators import *
# # Dão erro
# import gluon.languages.translator as T #error
# from gluon.contrib.gql import GQLDB #error | mit | 7,813,400,860,123,454,000 | 23.439252 | 83 | 0.695103 | false |
syntheticpp/lyx | lib/lyx2lyx/lyx2lyx_tools.py | 1 | 19009 | # This file is part of lyx2lyx
# -*- coding: utf-8 -*-
# Copyright (C) 2011 The LyX team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''
This module offers several free functions to help with lyx2lyx'ing.
More documentaton is below, but here is a quick guide to what
they do. Optional arguments are marked by brackets.
add_to_preamble(document, text):
Here, text can be either a single line or a list of lines. It
is bad practice to pass something with embedded newlines, but
we will handle that properly.
The routine checks to see whether the provided material is
already in the preamble. If not, it adds it.
Prepends a comment "% Added by lyx2lyx" to text.
insert_to_preamble(document, text[, index]):
Here, text can be either a single line or a list of lines. It
is bad practice to pass something with embedded newlines, but
we will handle that properly.
The routine inserts text at document.preamble[index], where by
default index is 0, so the material is inserted at the beginning.
Prepends a comment "% Added by lyx2lyx" to text.
put_cmd_in_ert(arg):
Here arg should be a list of strings (lines), which we want to
wrap in ERT. Returns a list of strings so wrapped.
A call to this routine will often go something like this:
i = find_token('\\begin_inset FunkyInset', ...)
j = find_end_of_inset(document.body, i)
content = lyx2latex(document[i:j + 1])
ert = put_cmd_in_ert(content)
document.body[i:j+1] = ert
get_ert(lines, i[, verbatim]):
Here, lines is a list of lines of LyX material containing an ERT inset,
whose content we want to convert to LaTeX. The ERT starts at index i.
If the optional (by default: False) bool verbatim is True, the content
of the ERT is returned verbatim, that is in LyX syntax (not LaTeX syntax)
for the use in verbatim insets.
lyx2latex(document, lines):
Here, lines is a list of lines of LyX material we want to convert
to LaTeX. We do the best we can and return a string containing
the translated material.
lyx2verbatim(document, lines):
Here, lines is a list of lines of LyX material we want to convert
to verbatim material (used in ERT an the like). We do the best we
can and return a string containing the translated material.
latex_length(slen):
Convert lengths (in LyX form) to their LaTeX representation. Returns
(bool, length), where the bool tells us if it was a percentage, and
the length is the LaTeX representation.
convert_info_insets(document, type, func):
Applies func to the argument of all info insets matching certain types
type : the type to match. This can be a regular expression.
func : function from string to string to apply to the "arg" field of
the info insets.
'''
import re
import string
from parser_tools import find_token, find_end_of_inset
from unicode_symbols import unicode_reps
# This will accept either a list of lines or a single line.
# It is bad practice to pass something with embedded newlines,
# though we will handle that.
def add_to_preamble(document, text):
" Add text to the preamble if it is not already there. "
if not type(text) is list:
# split on \n just in case
# it'll give us the one element list we want
# if there's no \n, too
text = text.split('\n')
i = 0
prelen = len(document.preamble)
while True:
i = find_token(document.preamble, text[0], i)
if i == -1:
break
# we need a perfect match
matched = True
for line in text:
if i >= prelen or line != document.preamble[i]:
matched = False
break
i += 1
if matched:
return
document.preamble.extend(["% Added by lyx2lyx"])
document.preamble.extend(text)
# Note that text can be either a list of lines or a single line.
# It should really be a list.
def insert_to_preamble(document, text, index = 0):
""" Insert text to the preamble at a given line"""
if not type(text) is list:
# split on \n just in case
# it'll give us the one element list we want
# if there's no \n, too
text = text.split('\n')
text.insert(0, "% Added by lyx2lyx")
document.preamble[index:index] = text
def put_cmd_in_ert(arg):
'''
arg should be a list of lines we want to wrap in ERT.
Returns a list of strings, with the lines so wrapped.
'''
ret = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout", ""]
# It will be faster for us to work with a single string internally.
# That way, we only go through the unicode_reps loop once.
if type(arg) is list:
s = "\n".join(arg)
else:
s = arg
for rep in unicode_reps:
s = s.replace(rep[1], rep[0])
s = s.replace('\\', "\\backslash\n")
ret += s.splitlines()
ret += ["\\end_layout", "", "\\end_inset"]
return ret
def get_ert(lines, i, verbatim = False):
'Convert an ERT inset into LaTeX.'
if not lines[i].startswith("\\begin_inset ERT"):
return ""
j = find_end_of_inset(lines, i)
if j == -1:
return ""
while i < j and not lines[i].startswith("status"):
i = i + 1
i = i + 1
ret = ""
first = True
while i < j:
if lines[i] == "\\begin_layout Plain Layout":
if first:
first = False
else:
ret = ret + "\n"
while i + 1 < j and lines[i+1] == "":
i = i + 1
elif lines[i] == "\\end_layout":
while i + 1 < j and lines[i+1] == "":
i = i + 1
elif lines[i] == "\\backslash":
if verbatim:
ret = ret + "\n" + lines[i] + "\n"
else:
ret = ret + "\\"
else:
ret = ret + lines[i]
i = i + 1
return ret
def lyx2latex(document, lines):
'Convert some LyX stuff into corresponding LaTeX stuff, as best we can.'
content = ""
ert_end = 0
note_end = 0
hspace = ""
for curline in range(len(lines)):
line = lines[curline]
if line.startswith("\\begin_inset Note Note"):
# We want to skip LyX notes, so remember where the inset ends
note_end = find_end_of_inset(lines, curline + 1)
continue
elif note_end >= curline:
# Skip LyX notes
continue
elif line.startswith("\\begin_inset ERT"):
# We don't want to replace things inside ERT, so figure out
# where the end of the inset is.
ert_end = find_end_of_inset(lines, curline + 1)
continue
elif line.startswith("\\begin_inset Formula"):
line = line[20:]
elif line.startswith("\\begin_inset Quotes"):
# For now, we do a very basic reversion. Someone who understands
# quotes is welcome to fix it up.
qtype = line[20:].strip()
# lang = qtype[0]
side = qtype[1]
dbls = qtype[2]
if side == "l":
if dbls == "d":
line = "``"
else:
line = "`"
else:
if dbls == "d":
line = "''"
else:
line = "'"
elif line.startswith("\\begin_inset Newline newline"):
line = "\\\\ "
elif line.startswith("\\noindent"):
line = "\\noindent " # we need the space behind the command
elif line.startswith("\\begin_inset space"):
line = line[18:].strip()
if line.startswith("\\hspace"):
# Account for both \hspace and \hspace*
hspace = line[:-2]
continue
elif line == "\\space{}":
line = "\\ "
elif line == "\\thinspace{}":
line = "\\,"
elif hspace != "":
# The LyX length is in line[8:], after the \length keyword
length = latex_length(line[8:])[1]
line = hspace + "{" + length + "}"
hspace = ""
elif line.isspace() or \
line.startswith("\\begin_layout") or \
line.startswith("\\end_layout") or \
line.startswith("\\begin_inset") or \
line.startswith("\\end_inset") or \
line.startswith("\\lang") or \
line.strip() == "status collapsed" or \
line.strip() == "status open":
#skip all that stuff
continue
# this needs to be added to the preamble because of cases like
# \textmu, \textbackslash, etc.
add_to_preamble(document, ['% added by lyx2lyx for converted index entries',
'\\@ifundefined{textmu}',
' {\\usepackage{textcomp}}{}'])
# a lossless reversion is not possible
# try at least to handle some common insets and settings
if ert_end >= curline:
line = line.replace(r'\backslash', '\\')
else:
# No need to add "{}" after single-nonletter macros
line = line.replace('&', '\\&')
line = line.replace('#', '\\#')
line = line.replace('^', '\\textasciicircum{}')
line = line.replace('%', '\\%')
line = line.replace('_', '\\_')
line = line.replace('$', '\\$')
# Do the LyX text --> LaTeX conversion
for rep in unicode_reps:
line = line.replace(rep[1], rep[0])
line = line.replace(r'\backslash', r'\textbackslash{}')
line = line.replace(r'\series bold', r'\bfseries{}').replace(r'\series default', r'\mdseries{}')
line = line.replace(r'\shape italic', r'\itshape{}').replace(r'\shape smallcaps', r'\scshape{}')
line = line.replace(r'\shape slanted', r'\slshape{}').replace(r'\shape default', r'\upshape{}')
line = line.replace(r'\emph on', r'\em{}').replace(r'\emph default', r'\em{}')
line = line.replace(r'\noun on', r'\scshape{}').replace(r'\noun default', r'\upshape{}')
line = line.replace(r'\bar under', r'\underbar{').replace(r'\bar default', r'}')
line = line.replace(r'\family sans', r'\sffamily{}').replace(r'\family default', r'\normalfont{}')
line = line.replace(r'\family typewriter', r'\ttfamily{}').replace(r'\family roman', r'\rmfamily{}')
line = line.replace(r'\InsetSpace ', r'').replace(r'\SpecialChar ', r'')
content += line
return content
def lyx2verbatim(document, lines):
'Convert some LyX stuff into corresponding verbatim stuff, as best we can.'
content = lyx2latex(document, lines)
content = re.sub(r'\\(?!backslash)', r'\n\\backslash\n', content)
return content
def latex_length(slen):
'''
Convert lengths to their LaTeX representation. Returns (bool, length),
where the bool tells us if it was a percentage, and the length is the
LaTeX representation.
'''
i = 0
percent = False
# the slen has the form
# ValueUnit+ValueUnit-ValueUnit or
# ValueUnit+-ValueUnit
# the + and - (glue lengths) are optional
# the + always precedes the -
# Convert relative lengths to LaTeX units
units = {"text%":"\\textwidth", "col%":"\\columnwidth",
"page%":"\\paperwidth", "line%":"\\linewidth",
"theight%":"\\textheight", "pheight%":"\\paperheight"}
for unit in list(units.keys()):
i = slen.find(unit)
if i == -1:
continue
percent = True
minus = slen.rfind("-", 1, i)
plus = slen.rfind("+", 0, i)
latex_unit = units[unit]
if plus == -1 and minus == -1:
value = slen[:i]
value = str(float(value)/100)
end = slen[i + len(unit):]
slen = value + latex_unit + end
if plus > minus:
value = slen[plus + 1:i]
value = str(float(value)/100)
begin = slen[:plus + 1]
end = slen[i+len(unit):]
slen = begin + value + latex_unit + end
if plus < minus:
value = slen[minus + 1:i]
value = str(float(value)/100)
begin = slen[:minus + 1]
slen = begin + value + latex_unit
# replace + and -, but only if the - is not the first character
slen = slen[0] + slen[1:].replace("+", " plus ").replace("-", " minus ")
# handle the case where "+-1mm" was used, because LaTeX only understands
# "plus 1mm minus 1mm"
if slen.find("plus minus"):
lastvaluepos = slen.rfind(" ")
lastvalue = slen[lastvaluepos:]
slen = slen.replace(" ", lastvalue + " ")
return (percent, slen)
def length_in_bp(length):
" Convert a length in LyX format to its value in bp units "
em_width = 10.0 / 72.27 # assume 10pt font size
text_width = 8.27 / 1.7 # assume A4 with default margins
# scale factors are taken from Length::inInch()
scales = {"bp" : 1.0,
"cc" : (72.0 / (72.27 / (12.0 * 0.376 * 2.845))),
"cm" : (72.0 / 2.54),
"dd" : (72.0 / (72.27 / (0.376 * 2.845))),
"em" : (72.0 * em_width),
"ex" : (72.0 * em_width * 0.4305),
"in" : 72.0,
"mm" : (72.0 / 25.4),
"mu" : (72.0 * em_width / 18.0),
"pc" : (72.0 / (72.27 / 12.0)),
"pt" : (72.0 / (72.27)),
"sp" : (72.0 / (72.27 * 65536.0)),
"text%" : (72.0 * text_width / 100.0),
"col%" : (72.0 * text_width / 100.0), # assume 1 column
"page%" : (72.0 * text_width * 1.7 / 100.0),
"line%" : (72.0 * text_width / 100.0),
"theight%" : (72.0 * text_width * 1.787 / 100.0),
"pheight%" : (72.0 * text_width * 2.2 / 100.0)}
rx = re.compile(r'^\s*([^a-zA-Z%]+)([a-zA-Z%]+)\s*$')
m = rx.match(length)
if not m:
document.warning("Invalid length value: " + length + ".")
return 0
value = m.group(1)
unit = m.group(2)
if not unit in scales.keys():
document.warning("Unknown length unit: " + unit + ".")
return value
return "%g" % (float(value) * scales[unit])
def revert_flex_inset(lines, name, LaTeXname):
" Convert flex insets to TeX code "
i = 0
while True:
i = find_token(lines, '\\begin_inset Flex ' + name, i)
if i == -1:
return
z = find_end_of_inset(lines, i)
if z == -1:
document.warning("Can't find end of Flex " + name + " inset.")
i += 1
continue
# remove the \end_inset
lines[z - 2:z + 1] = put_cmd_in_ert("}")
# we need to reset character layouts if necessary
j = find_token(lines, '\\emph on', i, z)
k = find_token(lines, '\\noun on', i, z)
l = find_token(lines, '\\series', i, z)
m = find_token(lines, '\\family', i, z)
n = find_token(lines, '\\shape', i, z)
o = find_token(lines, '\\color', i, z)
p = find_token(lines, '\\size', i, z)
q = find_token(lines, '\\bar under', i, z)
r = find_token(lines, '\\uuline on', i, z)
s = find_token(lines, '\\uwave on', i, z)
t = find_token(lines, '\\strikeout on', i, z)
if j != -1:
lines.insert(z - 2, "\\emph default")
if k != -1:
lines.insert(z - 2, "\\noun default")
if l != -1:
lines.insert(z - 2, "\\series default")
if m != -1:
lines.insert(z - 2, "\\family default")
if n != -1:
lines.insert(z - 2, "\\shape default")
if o != -1:
lines.insert(z - 2, "\\color inherit")
if p != -1:
lines.insert(z - 2, "\\size default")
if q != -1:
lines.insert(z - 2, "\\bar default")
if r != -1:
lines.insert(z - 2, "\\uuline default")
if s != -1:
lines.insert(z - 2, "\\uwave default")
if t != -1:
lines.insert(z - 2, "\\strikeout default")
lines[i:i + 4] = put_cmd_in_ert(LaTeXname + "{")
i += 1
def revert_font_attrs(lines, name, LaTeXname):
" Reverts font changes to TeX code "
i = 0
changed = False
while True:
i = find_token(lines, name + ' on', i)
if i == -1:
return changed
j = find_token(lines, name + ' default', i)
k = find_token(lines, name + ' on', i + 1)
# if there is no default set, the style ends with the layout
# assure hereby that we found the correct layout end
if j != -1 and (j < k or k == -1):
lines[j:j + 1] = put_cmd_in_ert("}")
else:
j = find_token(lines, '\\end_layout', i)
lines[j:j] = put_cmd_in_ert("}")
lines[i:i + 1] = put_cmd_in_ert(LaTeXname + "{")
changed = True
i += 1
def revert_layout_command(lines, name, LaTeXname):
" Reverts a command from a layout to TeX code "
i = 0
while True:
i = find_token(lines, '\\begin_layout ' + name, i)
if i == -1:
return
k = -1
# find the next layout
j = i + 1
while k == -1:
j = find_token(lines, '\\begin_layout', j)
l = len(lines)
# if nothing was found it was the last layout of the document
if j == -1:
lines[l - 4:l - 4] = put_cmd_in_ert("}")
k = 0
# exclude plain layout because this can be TeX code or another inset
elif lines[j] != '\\begin_layout Plain Layout':
lines[j - 2:j - 2] = put_cmd_in_ert("}")
k = 0
else:
j += 1
lines[i] = '\\begin_layout Standard'
lines[i + 1:i + 1] = put_cmd_in_ert(LaTeXname + "{")
i += 1
def hex2ratio(s):
" Converts an RRGGBB-type hexadecimal string to a float in [0.0,1.0] "
try:
val = int(s, 16)
except:
val = 0
if val != 0:
val += 1
return str(val / 256.0)
def str2bool(s):
"'true' goes to True, case-insensitively, and we strip whitespace."
s = s.strip().lower()
return s == "true"
def convert_info_insets(document, type, func):
"Convert info insets matching type using func."
i = 0
type_re = re.compile(r'^type\s+"(%s)"$' % type)
arg_re = re.compile(r'^arg\s+"(.*)"$')
while True:
i = find_token(document.body, "\\begin_inset Info", i)
if i == -1:
return
t = type_re.match(document.body[i + 1])
if t:
arg = arg_re.match(document.body[i + 2])
if arg:
new_arg = func(arg.group(1))
document.body[i + 2] = 'arg "%s"' % new_arg
i += 3
| gpl-2.0 | -5,966,020,858,891,533,000 | 35.001894 | 110 | 0.559209 | false |
yoseforb/lollypop | src/define.py | 1 | 2679 | #!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This is global object initialised at lollypop start
# member init order is important!
try:
from gi.repository import Secret
SecretSchema = {
"org.gnome.Lollypop.lastfm.login": Secret.SchemaAttributeType.STRING
}
SecretAttributes = {
"org.gnome.Lollypop.lastfm.login": "Last.fm login"
}
except:
Secret = None
SecretSchema = None
SecretAttributes = None
GOOGLE_INC = 8
GOOGLE_MAX = 100
class Lp:
settings = None
db = None
sql = None
albums = None
artists = None
genres = None
tracks = None
playlists = None
player = None
art = None
window = None
notify = None
lastfm = None
debug = False
# Represent what to do on next track
class NextContext:
NONE = 0 # Continue playback
STOP_TRACK = 1 # Stop after current track
STOP_ALBUM = 2 # Stop after current album
STOP_ARTIST = 3 # Stop after current artist
START_NEW_ALBUM = 4 # Start a new album
# Represent playback context
class PlayContext:
genre_id = None
next = NextContext.NONE
class GstPlayFlags:
GST_PLAY_FLAG_VIDEO = 1 << 0 # We want video output
GST_PLAY_FLAG_AUDIO = 1 << 1 # We want audio output
GST_PLAY_FLAG_TEXT = 1 << 3 # We want subtitle output
class ArtSize:
SMALL_RADIUS = 2
RADIUS = 3
SMALL_BORDER = 1
BORDER = 3
SMALL = 32
MEDIUM = 48
BIG = 200
MONSTER = 500
class Shuffle:
NONE = 0 # No shuffle
TRACKS = 1 # Shuffle by tracks on genre
ALBUMS = 2 # Shuffle by albums on genre
TRACKS_ARTIST = 3 # Shuffle by tracks on artist
ALBUMS_ARTIST = 4 # Shuffle by albums on artist
# Order is important
class Type:
NONE = -1
POPULARS = -2
RANDOMS = -3
RECENTS = -4
PLAYLISTS = -5
RADIOS = -6
EXTERNALS = -7
ALL = -8
COMPILATIONS = -999
DEVICES = -1000
SEPARATOR = -2000
| gpl-3.0 | -5,321,358,665,882,580,000 | 24.759615 | 76 | 0.651362 | false |
openqt/algorithms | projecteuler/ac/old/pe051_prime_digit_replacements.py | 1 | 3267 | #!/usr/bin/env python
# coding=utf-8
"""
Prime digit replacements
Problem 51
By replacing the 1st digit of the 2-digit number *3, it turns out that six of
the nine possible values: 13, 23, 43, 53, 73, and 83, are all prime.
By replacing the 3rd and 4th digits of 56**3 with the same digit, this 5-digit
number is the first example having seven primes among the ten generated
numbers, yielding the family: 56003, 56113, 56333, 56443, 56663, 56773,
and 56993. Consequently 56003, being the first member of this family, is the
smallest prime with this property.
Find the smallest prime which, by replacing part of the number (not necessarily
adjacent digits) with the same digit, is part of an eight prime value family.
"""
from __future__ import print_function
from utils import prime_sieve
from pe049_prime_permutations import seq_int
def combinations(seq, k):
"""combinations by lexicographic order
:param seq: choices
:param k: K
:return: next combination
"""
def _inner_dfs(seq, k, vals):
if len(seq) + len(vals) < k:
return
if len(vals) >= k: # got one
yield vals
else:
for i in range(len(seq)):
for j in _inner_dfs(seq[i + 1:], k, vals + [seq[i]]):
yield j
# here we added the extra parameter
for i in _inner_dfs(seq, k, []):
yield i
def mask_same_digits(n, count=2):
"""mask same digit combinations by '*'
:param n: the number
:param count: least same digits
:return: mask list
"""
def _same_digits(seq, count):
m = {}
for pos, val in enumerate(seq): # inverted index
m.setdefault(val, []).append(pos)
for val, pos in m.items(): # multi pos(es)
if len(pos) >= count:
yield pos
def _mask(seq, mask, sign='*'):
for i in mask:
seq[i] = sign
return ''.join(map(str, seq))
seq = seq_int(n)
for pos in _same_digits(seq, count):
for mask in combinations(pos, count): # all possible combinations
yield _mask(seq[:], mask)
# def combine(self, NN, K):
# """Iterative 8-line solution using C(n, k) = C(n-1, k) + C(n-1, k-1)
#
# https://discuss.leetcode.com/topic/40827/iterative-8-line-solution-using-c-n-k-c-n-1-k-c-n-1-k-1
# :param self:
# :param NN:
# :param K:
# :return:
# """
# result = [[[]]]
# for n in range(1, NN + 1):
# newRes = [[[]]] # C(n, 0) = 0
# for k in range(1, n):
# # C(n, k) = C(n-1, k) + C(n-1, k-1)
# newRes.append(result[k] + [_ + [n] for _ in result[k - 1]])
# # C(n, n) = C(n-1, n-1) = 1
# newRes.append([result[n - 1][0] + [n]])
# result = newRes
# return result[K]
if __name__ == '__main__':
# test only
print([i for i in mask_same_digits(222323, 3)])
print([i for i in mask_same_digits(323333, 3)])
print('-' * 30)
caches = {}
for i in prime_sieve(1000000):
for seq in mask_same_digits(i, 3):
caches.setdefault(seq, []).append(i)
print('> caches %d' % len(caches))
for k in caches:
if len(caches[k]) >= 8:
print((k, len(caches[k])), caches[k]) # 121313
| gpl-3.0 | -7,675,758,474,691,941,000 | 28.972477 | 102 | 0.566269 | false |
byt3smith/CIRTKit | modules/reversing/viper/strings.py | 1 | 8205 | # This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import re
from socket import inet_pton, AF_INET6, error as socket_error
from lib.common.abstracts import Module
from lib.core.session import __sessions__
DOMAIN_REGEX = re.compile('([a-z0-9][a-z0-9\-]{0,61}[a-z0-9]\.)+[a-z0-9][a-z0-9\-]*[a-z0-9]', re.IGNORECASE)
IPV4_REGEX = re.compile('[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]')
IPV6_REGEX = re.compile('((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}'
'|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9'
'A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25['
'0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3'
'})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|['
'1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,'
'4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:'
'))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-'
'5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]'
'{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7}'
')|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)){3}))|:)))(%.+)?', re.IGNORECASE | re.S)
TLD = [
'AC', 'ACADEMY', 'ACTOR', 'AD', 'AE', 'AERO', 'AF', 'AG', 'AGENCY', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ', 'AR',
'ARPA', 'AS', 'ASIA', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BAR', 'BARGAINS', 'BB', 'BD', 'BE', 'BERLIN', 'BEST',
'BF', 'BG', 'BH', 'BI', 'BID', 'BIKE', 'BIZ', 'BJ', 'BLUE', 'BM', 'BN', 'BO', 'BOUTIQUE', 'BR', 'BS', 'BT',
'BUILD', 'BUILDERS', 'BUZZ', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CAB', 'CAMERA', 'CAMP', 'CARDS', 'CAREERS', 'CAT',
'CATERING', 'CC', 'CD', 'CENTER', 'CEO', 'CF', 'CG', 'CH', 'CHEAP', 'CHRISTMAS', 'CI', 'CK', 'CL', 'CLEANING',
'CLOTHING', 'CLUB', 'CM', 'CN', 'CO', 'CODES', 'COFFEE', 'COM', 'COMMUNITY', 'COMPANY', 'COMPUTER', 'CONDOS',
'CONSTRUCTION', 'CONTRACTORS', 'COOL', 'COOP', 'CR', 'CRUISES', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DANCE',
'DATING', 'DE', 'DEMOCRAT', 'DIAMONDS', 'DIRECTORY', 'DJ', 'DK', 'DM', 'DNP', 'DO', 'DOMAINS', 'DZ', 'EC',
'EDU', 'EDUCATION', 'EE', 'EG', 'EMAIL', 'ENTERPRISES', 'EQUIPMENT', 'ER', 'ES', 'ESTATE', 'ET', 'EU', 'EVENTS',
'EXPERT', 'EXPOSED', 'FARM', 'FI', 'FISH', 'FJ', 'FK', 'FLIGHTS', 'FLORIST', 'FM', 'FO', 'FOUNDATION', 'FR',
'FUTBOL', 'GA', 'GALLERY', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GIFT', 'GL', 'GLASS', 'GM', 'GN', 'GOV',
'GP', 'GQ', 'GR', 'GRAPHICS', 'GS', 'GT', 'GU', 'GUITARS', 'GURU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HOLDINGS',
'HOLIDAY', 'HOUSE', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IMMOBILIEN', 'IN', 'INDUSTRIES', 'INFO', 'INK',
'INSTITUTE', 'INT', 'INTERNATIONAL', 'IO', 'IQ', 'IR', 'IS', 'IT', 'JE', 'JM', 'JO', 'JOBS', 'JP', 'KAUFEN',
'KE', 'KG', 'KH', 'KI', 'KIM', 'KITCHEN', 'KIWI', 'KM', 'KN', 'KOELN', 'KP', 'KR', 'KRED', 'KW', 'KY', 'KZ',
'LA', 'LAND', 'LB', 'LC', 'LI', 'LIGHTING', 'LIMO', 'LINK', 'LK', 'LR', 'LS', 'LT', 'LU', 'LUXURY', 'LV', 'LY',
'MA', 'MAISON', 'MANAGEMENT', 'MANGO', 'MARKETING', 'MC', 'MD', 'ME', 'MENU', 'MG', 'MH', 'MIL', 'MK', 'ML',
'MM', 'MN', 'MO', 'MOBI', 'MODA', 'MONASH', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MUSEUM', 'MV', 'MW', 'MX',
'MY', 'MZ', 'NA', 'NAGOYA', 'NAME', 'NC', 'NE', 'NET', 'NEUSTAR', 'NF', 'NG', 'NI', 'NINJA', 'NL', 'NO', 'NP',
'NR', 'NU', 'NZ', 'OKINAWA', 'OM', 'ONION', 'ONL', 'ORG', 'PA', 'PARTNERS', 'PARTS', 'PE', 'PF', 'PG', 'PH',
'PHOTO', 'PHOTOGRAPHY', 'PHOTOS', 'PICS', 'PINK', 'PK', 'PL', 'PLUMBING', 'PM', 'PN', 'POST', 'PR', 'PRO',
'PRODUCTIONS', 'PROPERTIES', 'PS', 'PT', 'PUB', 'PW', 'PY', 'QA', 'QPON', 'RE', 'RECIPES', 'RED', 'RENTALS',
'REPAIR', 'REPORT', 'REVIEWS', 'RICH', 'RO', 'RS', 'RU', 'RUHR', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SEXY',
'SG', 'SH', 'SHIKSHA', 'SHOES', 'SI', 'SINGLES', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SOCIAL', 'SOLAR',
'SOLUTIONS', 'SR', 'ST', 'SU', 'SUPPLIES', 'SUPPLY', 'SUPPORT', 'SV', 'SX', 'SY', 'SYSTEMS', 'SZ', 'TATTOO',
'TC', 'TD', 'TECHNOLOGY', 'TEL', 'TF', 'TG', 'TH', 'TIENDA', 'TIPS', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO',
'TODAY', 'TOKYO', 'TOOLS', 'TP', 'TR', 'TRAINING', 'TRAVEL', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UK', 'UNO',
'US', 'UY', 'UZ', 'VA', 'VACATIONS', 'VC', 'VE', 'VENTURES', 'VG', 'VI', 'VIAJES', 'VILLAS', 'VISION', 'VN',
'VOTE', 'VOTING', 'VOTO', 'VOYAGE', 'VU', 'WANG', 'WATCH', 'WED', 'WF', 'WIEN', 'WIKI', 'WORKS', 'WS',
'XN--3BST00M', 'XN--3DS443G', 'XN--3E0B707E', 'XN--45BRJ9C', 'XN--55QW42G', 'XN--55QX5D', 'XN--6FRZ82G',
'XN--6QQ986B3XL', 'XN--80AO21A', 'XN--80ASEHDB', 'XN--80ASWG', 'XN--90A3AC', 'XN--C1AVG', 'XN--CG4BKI',
'XN--CLCHC0EA0B2G2A9GCD', 'XN--D1ACJ3B', 'XN--FIQ228C5HS', 'XN--FIQ64B', 'XN--FIQS8S', 'XN--FIQZ9S',
'XN--FPCRJ9C3D', 'XN--FZC2C9E2C', 'XN--GECRJ9C', 'XN--H2BRJ9C', 'XN--I1B6B1A6A2E', 'XN--IO0A7I', 'XN--J1AMH',
'XN--J6W193G', 'XN--KPRW13D', 'XN--KPRY57D', 'XN--L1ACC', 'XN--LGBBAT1AD8J', 'XN--MGB9AWBF', 'XN--MGBA3A4F16A',
'XN--MGBAAM7A8H', 'XN--MGBAB2BD', 'XN--MGBAYH7GPA', 'XN--MGBBH1A71E', 'XN--MGBC0A9AZCG', 'XN--MGBERP4A5D4AR',
'XN--MGBX4CD0AB', 'XN--NGBC5AZD', 'XN--NQV7F', 'XN--NQV7FS00EMA', 'XN--O3CW4H', 'XN--OGBPF8FL', 'XN--P1AI',
'XN--PGBS0DH', 'XN--Q9JYB4C', 'XN--RHQV96G', 'XN--S9BRJ9C', 'XN--UNUP4Y', 'XN--WGBH1C', 'XN--WGBL6A',
'XN--XKC2AL3HYE2A', 'XN--XKC2DL3A5EE0H', 'XN--YFRO4I67O', 'XN--YGBI2AMMX', 'XN--ZFR164B', 'XXX', 'XYZ', 'YE',
'YT', 'ZA', 'ZM', 'ZONE', 'ZW']
class Strings(Module):
cmd = 'strings'
description = 'Extract strings from file'
authors = ['nex', 'Brian Wallace']
def __init__(self):
super(Strings, self).__init__()
self.parser.add_argument('-a', '--all', action='store_true', help='Print all strings')
self.parser.add_argument('-H', '--hosts', action='store_true', help='Extract IP addresses and domains from strings')
def extract_hosts(self, strings):
results = []
for entry in strings:
to_add = False
if DOMAIN_REGEX.search(entry) and not IPV4_REGEX.search(entry):
if entry[entry.rfind('.') + 1:].upper() in TLD:
to_add = True
elif IPV4_REGEX.search(entry):
to_add = True
elif IPV6_REGEX.search(entry):
try:
inet_pton(AF_INET6, entry)
except socket_error:
continue
else:
to_add = True
if to_add:
if entry not in results:
results.append(entry)
for result in results:
self.log('item', result)
def run(self):
super(Strings, self).run()
if self.args is None:
return
arg_all = self.args.all
arg_hosts = self.args.hosts
if not __sessions__.is_set():
self.log('error', "No session opened")
return
if os.path.exists(__sessions__.current.file.path):
regexp = '[\x20\x30-\x39\x41-\x5a\x61-\x7a\-\.:]{4,}'
strings = re.findall(regexp, __sessions__.current.file.data)
if arg_all:
for entry in strings:
self.log('', entry)
elif arg_hosts:
self.extract_hosts(strings)
else:
self.log('error', 'At least one of the parameters is required')
self.usage()
| mit | 1,134,222,950,011,861,100 | 64.64 | 124 | 0.467642 | false |
ekansa/open-context-py | opencontext_py/apps/imports/fieldannotations/complexdescriptions.py | 1 | 11675 | import uuid as GenUUID
from django.conf import settings
from django.db import models
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.documents.models import OCdocument
from opencontext_py.apps.imports.fields.models import ImportField
from opencontext_py.apps.imports.fieldannotations.models import ImportFieldAnnotation
from opencontext_py.apps.imports.fields.templating import ImportProfile
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.imports.records.process import ProcessCells
from opencontext_py.apps.imports.fieldannotations.general import ProcessGeneral
from opencontext_py.apps.imports.sources.unimport import UnImport
from opencontext_py.apps.ocitems.complexdescriptions.models import ComplexDescription
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.strings.models import OCstring
from opencontext_py.apps.ocitems.strings.manage import StringManagement
# Processes to generate complex descriptions for other manifest recorded entities
class ProcessComplexDescriptions():
FRAG_ID_PREFIX = '#cplxdes-' # fragment id prefix for a complex description
def __init__(self, source_id):
self.source_id = source_id
pg = ProcessGeneral(source_id)
pg.get_source()
self.project_uuid = pg.project_uuid
self.complex_des_fields = []
self.start_row = 1
self.batch_size = settings.IMPORT_BATCH_SIZE
self.end_row = self.batch_size
self.count_active_fields = 0
self.count_new_assertions = 0
self.obs_num_complex_description_assertions = 1
def clear_source(self):
""" Clears a prior import if the start_row is 1.
This makes sure new entities and assertions are made for
this source_id, and we don't duplicate things
"""
if self.start_row <= 1:
# get rid of "documents" related assertions made from this source
unimport = UnImport(self.source_id,
self.project_uuid)
unimport.delete_complex_description_assertions()
def process_complex_batch(self):
""" processes fields for documents
entities starting with a given row number.
This iterates over all containment fields, starting
with the root subjhect field
"""
self.clear_source() # clear prior import for this source
self.end_row = self.start_row + self.batch_size
self.get_complex_description_fields()
label_str_uuids = {}
if len(self.complex_des_fields) > 0:
print('Number of Complex Description Fields: ' + str(len(self.complex_des_fields)))
cp_id_number = 0
for cp_field in self.complex_des_fields:
cp_id_number += 1
pc = ProcessCells(self.source_id,
self.start_row)
distinct_records = pc.get_field_records_by_fl_uuid(cp_field.describes_field.field_num,
False)
if distinct_records is not False:
# sort the list in row_order from the import table
pg = ProcessGeneral(self.source_id)
distinct_records = pg.order_distinct_records(distinct_records)
for row_key, dist_rec in distinct_records.items():
if cp_field.obs_num < 1:
obs_num = 1
else:
obs_num = cp_field.obs_num
obs_node = '#obs-' + str(obs_num)
subject_uuid = dist_rec['imp_cell_obj'].fl_uuid
subject_type = cp_field.describes_field.field_type
subject_ok = dist_rec['imp_cell_obj'].cell_ok
subject_record = dist_rec['imp_cell_obj'].record
if subject_uuid is False or\
len(subject_record) < 1:
subject_ok = False
if subject_uuid == 'False':
subject_ok = False
sort = 0
in_rows = dist_rec['rows']
print('Look for complex description labels in rows: ' + str(in_rows))
if subject_ok is not False:
# OK! we have the subjects of complex descriptions
# with uuids, so now we can make an fl_uuid for each
# of the complex description fields.
complex_uuid = subject_uuid + self.FRAG_ID_PREFIX + str(cp_id_number)
complex_recs = ImportCell.objects\
.filter(source_id=self.source_id,
field_num=cp_field.field_num,
row_num__in=in_rows)\
.exclude(record='')
if len(complex_recs) > 0:
# we have records in the complex description field that are not blank
# and are associated with the subject of the complex description.
# so now, let's record this association.
save_ok = False
new_ass = Assertion()
new_ass.uuid = subject_uuid
new_ass.subject_type = subject_type
new_ass.project_uuid = self.project_uuid
new_ass.source_id = self.source_id + ProcessGeneral.COMPLEX_DESCRIPTION_SOURCE_SUFFIX
new_ass.obs_node = obs_node
new_ass.obs_num = obs_num
new_ass.sort = 100 + cp_id_number
new_ass.visibility = 1
new_ass.predicate_uuid = ComplexDescription.PREDICATE_COMPLEX_DES
new_ass.object_type = 'complex-description'
new_ass.object_uuid = complex_uuid
new_ass.save()
try:
print('Saved complex-description: ' + complex_uuid)
new_ass.save()
save_ok = True
except:
save_ok = False
if save_ok:
self.count_new_assertions += 1
# now look through the complex description records and make labels
for comp_rec in complex_recs:
# first save the fl_uuid for the complex description
comp_rec.fl_uuid = complex_uuid
comp_rec.save()
if isinstance(cp_field.value_prefix, str):
cp_label = cp_field.value_prefix + comp_rec.record
else:
cp_label = comp_rec.record
if cp_label not in label_str_uuids:
# make a uuid for the record value
# adding a source_id suffix keeps this from being deleted as descriptions get processed
sm = StringManagement()
sm.project_uuid = self.project_uuid
sm.source_id = self.source_id + ProcessGeneral.COMPLEX_DESCRIPTION_SOURCE_SUFFIX
oc_string = sm.get_make_string(cp_label)
content_uuid = oc_string.uuid
label_str_uuids[cp_label] = content_uuid
content_uuid = label_str_uuids[cp_label]
save_ok = False
new_ass = Assertion()
new_ass.uuid = complex_uuid
new_ass.subject_type = 'complex-description'
new_ass.project_uuid = self.project_uuid
# adding a source_id suffix keeps this from being deleted as descriptions get processed
new_ass.source_id = self.source_id + ProcessGeneral.COMPLEX_DESCRIPTION_SOURCE_SUFFIX
new_ass.obs_node = '#obs-' + str(self.obs_num_complex_description_assertions)
new_ass.obs_num = self.obs_num_complex_description_assertions
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = ComplexDescription.PREDICATE_COMPLEX_DES_LABEL
new_ass.object_type = 'xsd:string'
new_ass.object_uuid = content_uuid
try:
new_ass.save()
save_ok = True
except:
save_ok = False
if save_ok:
self.count_new_assertions += 1
def get_complex_description_fields(self):
""" Makes a list of document fields
"""
complex_des_fields = []
raw_cp_fields = ImportField.objects\
.filter(source_id=self.source_id,
field_type='complex-description')
for cp_field in raw_cp_fields:
desribes_fields = ImportFieldAnnotation.objects\
.filter(source_id=self.source_id,
field_num=cp_field.field_num,
predicate=ImportFieldAnnotation.PRED_COMPLEX_DES)[:1]
if len(desribes_fields) > 0:
desc_field_objs = ImportField.objects\
.filter(source_id=self.source_id,
field_num=desribes_fields[0].object_field_num,
field_type__in=ImportProfile.DEFAULT_SUBJECT_TYPE_FIELDS)[:1]
if len(desc_field_objs) > 0:
# OK! the complex property field describes a field with the correct field type (ImportProfile.DEFAULT_SUBJECT_TYPE_FIELDS)
# it is OK then to use to make complex descriptions
cp_field.describes_field = desc_field_objs[0]
complex_des_fields.append(cp_field)
self.complex_des_fields = complex_des_fields
self.count_active_fields = len(self.complex_des_fields)
return self.complex_des_fields
| gpl-3.0 | -8,557,080,162,303,926,000 | 59.807292 | 142 | 0.484968 | false |
gabelula/b-counted | .google_appengine/tools/bulkload_client.py | 1 | 1747 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting bulkload_client.py"""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
BULKLOAD_CLIENT_PATH = 'google/appengine/tools/bulkload_client.py'
DIR_PATH = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))))
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
if __name__ == '__main__':
sys.path = EXTRA_PATHS + sys.path
script_path = os.path.join(DIR_PATH, BULKLOAD_CLIENT_PATH)
execfile(script_path, globals())
| apache-2.0 | 3,248,498,652,674,230,300 | 33.254902 | 76 | 0.680595 | false |
haanme/FinnBrain | pipeline_DTI_step1_DTIprep.py | 1 | 4312 | #!/usr/bin/env python
####################################################################
# Python 2.7 script for executing FA, MD calculations for one case #
####################################################################
# Directory where result data are located
experiment_dir = '/Users/eija/Documents/FinnBrain/Jetro_DTI/pipelinedata'
# Protocol that is applied in DTIprep
DTIprep_protocol = '/Users/eija/Documents/FinnBrain/Jetro_DTI/pipelinedata/default_all.xml'
#
# Moves file to results folder, overwriting the existing file
#
# filename - file to be moved
# out_prefix - subject specific prefix
#
def move_to_results(filename, out_prefix):
import os
import shutil
outfile = experiment_dir + '/' + out_prefix + '/' + os.path.basename(filename)
if os.path.isfile(outfile):
os.remove(outfile)
shutil.move(filename,outfile)
return outfile
#
# Gunzips file to results folder, overwriting the existing file
#
# filename - file to be moved (.nii.gz)
# out_prefix - subject specific prefix
#
def gunzip_to(filename, out_prefix, destination):
import os
import shutil
from nipype.interfaces.base import CommandLine
cmd = CommandLine('gunzip -f %s' % (filename))
print "gunzip NII.GZ:" + cmd.cmd
cmd.run()
basename = os.path.basename(filename[:len(filename)-3])
outfile = destination + '/' + basename
if os.path.isfile(outfile):
os.remove(outfile)
shutil.move(filename[:len(filename)-3],outfile)
return outfile
#
# Executes DTIPrep
#
# in_file - DTI file for QC (.nrrd)
# out_prefix - subject specific prefix
#
def dtiprep(in_file, output_prefix):
from glob import glob
import os
from nipype.interfaces.base import CommandLine
from nipype.utils.filemanip import split_filename
_, name, _ = split_filename(in_file)
cmd = CommandLine('DTIPrepExec -c -d -f %s -n %s/%s_notes.txt -p %s -w %s' % ((experiment_dir + '/' + output_prefix),(experiment_dir + '/' + output_prefix),output_prefix,DTIprep_protocol,in_file))
print "DTIPREP:" + cmd.cmd
cmd.run()
qcfile = experiment_dir + '/' + output_prefix + '/' + name + '_QCed.nrrd'
xmlfile = experiment_dir + '/' + output_prefix + '/' + name + '_XMLQCResult.xml'
sumfile = experiment_dir + '/' + output_prefix + '/' + name + '_QCReport.txt'
return qcfile, xmlfile, sumfile
#
# Converts NRRD to FSL Nifti format (Nifti that is gzipped)
#
# in_file - NRRD file to convert
# out_prefix - subject specific prefix
#
def nrrd2nii(in_file, output_prefix):
from os.path import abspath as opap
from nipype.interfaces.base import CommandLine
from nipype.utils.filemanip import split_filename
_, name, _ = split_filename(in_file)
out_vol = experiment_dir + '/' + output_prefix + '/' + ('%s.nii.gz' % name)
out_bval = experiment_dir + '/' + output_prefix + '/' + ('%s.bval' % name)
out_bvec = experiment_dir + '/' + output_prefix + '/' + ('%s.bvec' % name)
cmd = CommandLine(('DWIConvert --inputVolume %s --outputVolume %s --outputBValues %s'
' --outputBVectors %s --conversionMode NrrdToFSL') % (in_file, out_vol,
out_bval, out_bvec))
print "NRRD->NIFTI:" + cmd.cmd
cmd.run()
return opap(out_vol), opap(out_bval), opap(out_bvec)
def check_dependencies():
import os
files = ['DWIconvert', 'DTIPrepExec', 'gunzip']
for file in files:
if os.system('which ' + file) != 0:
return False
return True
def run(nrrd_file, args_subject):
# DTIprep QC-tool
qcfile, _, _ = dtiprep(nrrd_file, args_subject)
# Convert NRRD->NII
dwifile, bval_file, bvec_file = nrrd2nii(qcfile, args.subject)
###############
# Main script #
###############
from argparse import ArgumentParser
import os
if __name__ == "__main__":
if not check_dependencies():
print 'DEPENDENCIES NOT FOUND'
sys.exit(1)
# Parse input arguments into args structure
parser = ArgumentParser()
parser.add_argument("--subject", dest="subject", help="subject id", required=True)
args = parser.parse_args()
nrrd_file = experiment_dir + os.sep + args.subject + os.sep + args.subject + 'DTI.nrrd'
run(nrrd_file, args.subject)
| mit | -2,800,876,285,888,440,000 | 33.222222 | 200 | 0.618506 | false |
ionitadaniel19/testframeworksevolution | src/hybridframework/hybridtests.py | 1 | 2051 | '''
Created on 01.06.2014
@author: ionitadaniel19
'''
def show_answer_hybrid_simple(driver,scenario):
from modularframework.login import LoginPage
from modularframework.testframeworks import TestFrameworksPage
from config.utilities import get_simple_hybrid_driven_scenario_values
from config.constants import *
data_test=get_simple_hybrid_driven_scenario_values(scenario)
login_page=None
test_framework_page=None
actual_answer=None
for data_function in data_test:
if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_REMEMBER_ME:
if login_page is None:
login_page=LoginPage(driver)
login_page.remember_me()
if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_LOGIN:
if login_page is None:
login_page=LoginPage(driver)
if len(data_function[PARAMETERS])==2:
username=data_function[PARAMETERS][0]
pwd=data_function[PARAMETERS][1]
login_page.login(username, pwd)
else:
raise Exception('For function %s there were not enough parameters specified %s.Expected 2.' %(data_function[FRAMEWORK_FUNCTIONS],data_function[PARAMETERS]))
if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_SELECT_ANSWER:
if test_framework_page is None:
test_framework_page=TestFrameworksPage(driver)
if len(data_function[PARAMETERS])==1:
answer=data_function[PARAMETERS][0]
test_framework_page.select_answer(answer)
else:
raise Exception('For function %s there were not enough parameters specified %s.Expected 1.' %(data_function[FRAMEWORK_FUNCTIONS],data_function[PARAMETERS]))
if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_SHOW_ANSWER:
if test_framework_page is None:
test_framework_page=TestFrameworksPage(driver)
actual_answer=test_framework_page.show_answer()
return actual_answer | mit | -3,050,877,202,472,316,400 | 48.073171 | 177 | 0.643588 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_default_security_rules_operations.py | 1 | 8972 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DefaultSecurityRulesOperations(object):
"""DefaultSecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all default security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
default_security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified default network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param default_security_rule_name: The name of the default security rule.
:type default_security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'} # type: ignore
| mit | 4,355,501,811,463,052,300 | 47.76087 | 236 | 0.650022 | false |
cmateu/gaiaerror_py | gaiaerr.py | 1 | 5490 | #!/usr/bin/env python
import numpy as np
import scipy
import sys
import os
import os.path
import argparse
import myutils
#--------Version History----------------------------------------------------------------------------
# 16/nov/2017: parallax mission time scaling factor fixed (goes as (5./tm)**0.5 not **1.)
# 11/oct/2016: VX,VY,VZ unit error fixed (inputs must be passes in mas/yr always, not muas/yr)
#Gaia error code path
gerr_path='/workd/cmateu/gaia_errors_color_tmission'
#gerr_path='/Users/cmateu/trabajo/gaia/gaia_challenge2014_mgc3/gaiaerror_py/'+'gaia_errors_color_tmission'
parser = argparse.ArgumentParser(description='Simulate Gaia errors + constant relative error in distance')
parser.add_argument('infile',metavar='infile(.ne.dat)',help='Input File (x y z vx vy vz Mv VI)',nargs=1,action='store')
parser.add_argument('-tm','--mission_t',help='Gaia mission time span in yr. Default 5.', action='store',default=5.,type=np.float)
parser.add_argument('-v','--verbose',help='Verbose', action='store_true',default=False)
#parse arguments
args = parser.parse_args()
infilen=args.infile[0]
mission_t=args.mission_t
if args.verbose:
print 'Input file:', infilen
print 'Gaia Mission time:',mission_t
#Compute error scaling factor based on mission time (following Brown and deBruijne's prescriptions, priv. comm.)
if mission_t<=10.:
pfactor=(5./mission_t)**0.5 #nominal errors are for mission_t=5., so factor==1 in this case. For parallax err scales as t
factor=(5./mission_t)**1.5 #nominal errors are for mission_t=5., so factor==1 in this case
else:
pfactor=(5./mission_t)**0.5 #If new Gaia is launched, scaling can be conservatively assumed to go as t
factor=(5./mission_t)**1. #If new Gaia is launched, scaling can be conservatively assumed to go as t
#Extra labels
if mission_t==5: tlabel=''
else: tlabel='%.1f' % (mission_t)
#Print auxiliary input file for gaerr code.Aux files have to be unique so multiple threads can be run simultaneuosly
auxinf=infilen+tlabel+'.aux.in'
auxoutf=infilen+tlabel+'.aux.out'
auxfilein=open(auxinf,'w')
auxfilein.write('%s\n%s\n' % (infilen,auxoutf))
auxfilein.close()
#Check whether auxfiles used by gaiaerr code exist in the present dir. Create symbolic links if not
if not os.path.isfile('avdisk.dat'):
if args.verbose: print 'Gaia error code aux files missing, creating symbolic links...'
proc='ln -s %s/*.dat .' % (gerr_path)
os.system(proc)
#Run Gaia error code
if args.verbose: print 'Running Gaia error code...'
os.system('%s/compute_err_color_gaia_tmission < %s' % (gerr_path,auxinf))
#Read gaia error output file
dat=scipy.genfromtxt(auxoutf)
#Get true parallax, simulate gpar by adding gaussian X% error
relerr_par=dat[:,5-1]
xpar=dat[:,12-1]
gpar=dat[:,25-1]
#Rescale parallax err
sigma_par_new=(gpar-xpar)*pfactor
gpar=xpar+sigma_par_new
sigma_par_ref=relerr_par*xpar #sigma used to draw random gaussian par error
sigma_par_ref_new=sigma_par_ref*pfactor
relerr_par_obs_new=sigma_par_ref_new/gpar #this is fobs. relerr_par is ftrue
relerr_par_tru_new=sigma_par_ref_new/xpar
#Recompute gvrad (a lot come out from Merce's code as ****)
xvrad=dat[:,18-1]
sigma_vrad=dat[:,34-1]
gvrad=xvrad+np.random.normal(loc=0.,scale=sigma_vrad,size=xvrad.size)
gl,gb,gmulstar,gmub=dat[:,26-1],dat[:,27-1],dat[:,29-1],dat[:,30-1]
xl,xb,xmulstar,xmub=dat[:,13-1],dat[:,14-1],dat[:,16-1],dat[:,17-1]
#Recompute uncertainties
sigma_mulstar_new=(gmulstar-xmulstar)*factor
sigma_mub_new=(gmub-xmub)*factor
#Recompute 'observed proper motions'
gmulstar=xmulstar+sigma_mulstar_new
gmub=xmub+sigma_mub_new
fp=1000.
#Parallax for my function must be in muas. Proper motions must be in mas/yr (as needed by bovy library)
mydat=myutils.helio_obj(gl,gb,fp*gpar,gmulstar,gmub,gvrad,degree=True,flag_mulstar=True)
#Replace cols appropiately in full matrix
dat[:,25-1]=gpar
dat[:,26-1]=gl
dat[:,27-1]=gb
dat[:,28-1]=mydat.Rhel
dat[:,29-1]=gmulstar
dat[:,30-1]=gmub
dat[:,31-1]=gvrad
#---Proper motion cols----
dat[:,33-1]=dat[:,33-1]*factor #sigma_mub
dat[:,36-1]=dat[:,36-1]*factor #relerr_mub
#Parallax error cols------
dat[:, 5-1]=relerr_par_tru_new #relerr_par
dat[:,32-1]=relerr_par_obs_new #relerr_par_obs
#---Cartesian coords
dat[:,19-1]=-mydat.x
dat[:,20-1]=mydat.y
dat[:,21-1]=mydat.z
dat[:,22-1]=-mydat.vx
dat[:,23-1]=mydat.vy
dat[:,24-1]=mydat.vz
#Header and print formats
head_l=['Av','xV','Gmag','Grvs','relerr_par','xX','xY','xZ','xVX','xVY','xVZ','xpar_mas','xl_deg','xb_deg','xRhel','xmuls_cosb_mas','xmub_mas','xvrad','gX','gY','gZ','gVX','gVY','gVZ','gpar_mas','gl_deg','gb_deg','gRhel','gmuls_cosb_mas','gmub_mas','gvrad','relerr_parobs','sig_mub','sig_vrad','VI','relerr_mub','relerr_vrad']
head_cols=np.arange(len(head_l))+1
hfmts='#%17s '+(len(head_l)-1)*'%18s '
hfmts=hfmts+'\n'
fmts=(dat[0,:].size)*'%18.10f '
#Final output file name
ofilen=infilen.replace('.ne.dat','')+'.ge'+tlabel+'.dat'
#Print output file
if args.verbose: print 'Printing outputfile',ofilen
ofile=open(ofilen,'w')
ofile.write('#Gaia mission time assumed %.1f yr, error scaling factor %.3f\n' % (mission_t,factor))
ofile.write(hfmts % tuple(head_cols))
ofile.write(hfmts % tuple(head_l))
scipy.savetxt(ofile,dat,fmt=fmts)
#Remove aux files
proc='rm -f %s %s' % (auxinf,auxoutf)
os.system(proc)
#proc='rm -f TableVr-Jun2015.dat avdloc.dat avspir.dat myfile.ne.dat allruns.dat avori.dat gfactor-Jun2013.dat rf_allsky.dat avdisk.dat avori2.dat myfile.ge.dat run.dat'
if args.verbose: print 'Done'
| bsd-3-clause | 1,554,841,752,632,622,300 | 37.661972 | 326 | 0.701093 | false |
WilJoey/tn_ckan | ckan/lib/email_notifications.py | 1 | 7858 | '''
Code for generating email notifications for users (e.g. email notifications for
new activities in your dashboard activity stream) and emailing them to the
users.
'''
import datetime
import re
import pylons
import ckan.model as model
import ckan.logic as logic
import ckan.lib.base as base
from ckan.common import ungettext
def string_to_timedelta(s):
'''Parse a string s and return a standard datetime.timedelta object.
Handles days, hours, minutes, seconds, and microseconds.
Accepts strings in these formats:
2 days
14 days
4:35:00 (hours, minutes and seconds)
4:35:12.087465 (hours, minutes, seconds and microseconds)
7 days, 3:23:34
7 days, 3:23:34.087465
.087465 (microseconds only)
:raises ckan.logic.ValidationError: if the given string does not match any
of the recognised formats
'''
patterns = []
days_only_pattern = '(?P<days>\d+)\s+day(s)?'
patterns.append(days_only_pattern)
hms_only_pattern = '(?P<hours>\d?\d):(?P<minutes>\d\d):(?P<seconds>\d\d)'
patterns.append(hms_only_pattern)
ms_only_pattern = '.(?P<milliseconds>\d\d\d)(?P<microseconds>\d\d\d)'
patterns.append(ms_only_pattern)
hms_and_ms_pattern = hms_only_pattern + ms_only_pattern
patterns.append(hms_and_ms_pattern)
days_and_hms_pattern = '{0},\s+{1}'.format(days_only_pattern,
hms_only_pattern)
patterns.append(days_and_hms_pattern)
days_and_hms_and_ms_pattern = days_and_hms_pattern + ms_only_pattern
patterns.append(days_and_hms_and_ms_pattern)
for pattern in patterns:
match = re.match('^{0}$'.format(pattern), s)
if match:
break
if not match:
raise logic.ValidationError('Not a valid time: {0}'.format(s))
gd = match.groupdict()
days = int(gd.get('days', '0'))
hours = int(gd.get('hours', '0'))
minutes = int(gd.get('minutes', '0'))
seconds = int(gd.get('seconds', '0'))
milliseconds = int(gd.get('milliseconds', '0'))
microseconds = int(gd.get('microseconds', '0'))
delta = datetime.timedelta(days=days, hours=hours, minutes=minutes,
seconds=seconds, milliseconds=milliseconds,
microseconds=microseconds)
return delta
def _notifications_for_activities(activities, user_dict):
'''Return one or more email notifications covering the given activities.
This function handles grouping multiple activities into a single digest
email.
:param activities: the activities to consider
:type activities: list of activity dicts like those returned by
ckan.logic.action.get.dashboard_activity_list()
:returns: a list of email notifications
:rtype: list of dicts each with keys 'subject' and 'body'
'''
if not activities:
return []
if not user_dict.get('activity_streams_email_notifications'):
return []
# We just group all activities into a single "new activity" email that
# doesn't say anything about _what_ new activities they are.
# TODO: Here we could generate some smarter content for the emails e.g.
# say something about the contents of the activities, or single out
# certain types of activity to be sent in their own individual emails,
# etc.
subject = ungettext(
"1 new activity from {site_title}",
"{n} new activities from {site_title}",
len(activities)).format(
site_title=pylons.config.get('ckan.site_title'),
n=len(activities))
body = base.render(
'activity_streams/activity_stream_email_notifications.text',
extra_vars={'activities': activities})
notifications = [{
'subject': subject,
'body': body
}]
return notifications
def _notifications_from_dashboard_activity_list(user_dict, since):
'''Return any email notifications from the given user's dashboard activity
list since `since`.
'''
# Get the user's dashboard activity stream.
context = {'model': model, 'session': model.Session,
'user': user_dict['id']}
activity_list = logic.get_action('dashboard_activity_list')(context, {})
# Filter out the user's own activities., so they don't get an email every
# time they themselves do something (we are not Trac).
activity_list = [activity for activity in activity_list
if activity['user_id'] != user_dict['id']]
# Filter out the old activities.
strptime = datetime.datetime.strptime
fmt = '%Y-%m-%dT%H:%M:%S.%f'
activity_list = [activity for activity in activity_list
if strptime(activity['timestamp'], fmt) > since]
return _notifications_for_activities(activity_list, user_dict)
# A list of functions that provide email notifications for users from different
# sources. Add to this list if you want to implement a new source of email
# notifications.
_notifications_functions = [
_notifications_from_dashboard_activity_list,
]
def get_notifications(user_dict, since):
'''Return any email notifications for the given user since `since`.
For example email notifications about activity streams will be returned for
any activities the occurred since `since`.
:param user_dict: a dictionary representing the user, should contain 'id'
and 'name'
:type user_dict: dictionary
:param since: datetime after which to return notifications from
:rtype since: datetime.datetime
:returns: a list of email notifications
:rtype: list of dicts with keys 'subject' and 'body'
'''
notifications = []
for function in _notifications_functions:
notifications.extend(function(user_dict, since))
return notifications
def send_notification(user, email_dict):
'''Email `email_dict` to `user`.'''
import ckan.lib.mailer
if not user.get('email'):
# FIXME: Raise an exception.
return
try:
ckan.lib.mailer.mail_recipient(user['display_name'], user['email'],
email_dict['subject'], email_dict['body'])
except ckan.lib.mailer.MailerException:
raise
def get_and_send_notifications_for_user(user):
# Parse the email_notifications_since config setting, email notifications
# from longer ago than this time will not be sent.
email_notifications_since = pylons.config.get(
'ckan.email_notifications_since', '2 days')
email_notifications_since = string_to_timedelta(
email_notifications_since)
email_notifications_since = (datetime.datetime.now()
- email_notifications_since)
# FIXME: We are accessing model from lib here but I'm not sure what
# else to do unless we add a get_email_last_sent() logic function which
# would only be needed by this lib.
email_last_sent = model.Dashboard.get(user['id']).email_last_sent
activity_stream_last_viewed = (
model.Dashboard.get(user['id']).activity_stream_last_viewed)
since = max(email_notifications_since, email_last_sent,
activity_stream_last_viewed)
notifications = get_notifications(user, since)
# TODO: Handle failures from send_email_notification.
for notification in notifications:
send_notification(user, notification)
# FIXME: We are accessing model from lib here but I'm not sure what
# else to do unless we add a update_email_last_sent()
# logic function which would only be needed by this lib.
dash = model.Dashboard.get(user['id'])
dash.email_last_sent = datetime.datetime.now()
model.repo.commit()
def get_and_send_notifications_for_all_users():
context = {'model': model, 'session': model.Session, 'ignore_auth': True,
'keep_email': True}
users = logic.get_action('user_list')(context, {})
for user in users:
get_and_send_notifications_for_user(user)
| mit | 3,647,540,538,416,515,000 | 33.61674 | 79 | 0.670781 | false |
ZeitOnline/zeit.magazin | src/zeit/magazin/browser/tests/test_portraitbox.py | 1 | 1167 | import gocept.testing.assertion
import zeit.cms.testing
import zeit.magazin.testing
class ZMOPortraitboxCRUD(zeit.cms.testing.BrowserTestCase,
gocept.testing.assertion.String):
layer = zeit.magazin.testing.LAYER
def test_zmo_portraitbox_has_longtext_field(self):
b = self.browser
b.open('http://localhost/++skin++vivi/repository/magazin')
menu = b.getControl(name='add_menu')
menu.displayValue = ['Portraitbox']
b.open(menu.value[0])
b.getControl('File name').value = 'portrait'
b.getControl('First and last name').value = 'Foo Bar'
b.getControl('Text').value = '<p><strong>With</strong> markup</p>'
b.getControl('long text (ZMO)').value = (
'<p><strong>Second</strong> text</p>')
b.getControl('Add').click()
self.assertEndsWith('@@edit.html', b.url)
self.assertEqual(
'<p><strong>Second</strong> text</p>',
b.getControl('long text (ZMO)').value.strip())
b.getLink('Checkin').click()
self.assertEllipsis(
'...<strong>Second</strong> text...', b.contents)
| bsd-3-clause | 855,706,272,479,946,000 | 35.46875 | 74 | 0.60497 | false |
vnsofthe/odoo-dev | addons/rhwl_gene/rhwl_gene.py | 1 | 55239 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID, api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
import datetime,time
import logging
import os
import shutil
import re
import urllib2
from openerp import tools
from lxml import etree
_logger = logging.getLogger(__name__)
class rhwl_gene(osv.osv):
STATE_SELECT_LIST=[
('draft', u'草稿'),
('cancel', u'检测取消'),
('except', u'信息异常'),
('except_confirm', u'异常已确认'),
('confirm', u'信息已确认'),
('dna_except', u'DNA质检不合格'),
('dna_ok',u"DNA质检合格"),
('ok', u'位点数据已导入'),
('report', u'生成报告中'),
('report_done', u"报告已生成"),
("result_done", u"风险报告确认"),
("deliver", u"印刷厂已接收"),
('done', u'客户已收货')
]
STATE_SELECT = dict(STATE_SELECT_LIST)
_name = "rhwl.easy.genes"
_order = "date desc,name asc"
def _genes_type_get(self, cr, uid, ids, field_names, arg, context=None):
res = {}
maps = {}
for id in ids:
res[id] = {}.fromkeys(field_names, "")
type_ids = self.pool.get("rhwl.easy.genes.type").search(cr, uid, [("genes_id.id", '=', id)],
context=context)
for i in self.pool.get("rhwl.easy.genes.type").browse(cr, uid, type_ids, context=context):
res[id][maps.get(i.snp, i.snp)] = i.typ
return res
def _get_risk_detail(self,cr,uid,ids,field_names,arg,context=None):
res={}
for id in ids:
res[id] = {}.fromkeys(field_names,"")
obj = self.pool.get("rhwl.easy.genes").browse(cr,uid,id,context=context)
for o in obj.risk:
res[id][o.disease_id.code]=o.risk
return res
def _get_risk(self,cr,uid,ids,field_names,arg,context=None):
res={}
for id in ids:
res[id]={"risk_count":0,"risk_text":""}
risk_id = self.pool.get("rhwl.easy.gene.risk").search(cr,uid,[("genes_id.id","=",id),'|',("risk","=","高风险"),("risk","=","低能力")])
res[id]["risk_count"]=risk_id.__len__()
t=[]
for i in self.pool.get("rhwl.easy.gene.risk").browse(cr,uid,risk_id,context=context):
t.append(i.disease_id.name)
res[id]["risk_text"]=u"、".join(t)
return res
_columns = {
"batch_no": fields.char(u"批次",select=True),
"name": fields.char(u"基因样本编号", required=True, size=10),
"date": fields.date(u"送检日期", required=True),
"cust_name": fields.char(u"会员姓名", required=True, size=50),
"sex": fields.selection([('T', u"男"), ('F', u"女")], u"性别", required=True),
"identity": fields.char(u"身份证号", size=18),
"mobile": fields.char(u"手机号码", size=15),
"birthday": fields.date(u"出生日期"),
"receiv_date": fields.datetime(u"接收时间"),
"except_note": fields.text(u"信息异常内容"),
"confirm_note": fields.text(u"信息异常反馈"),
"state": fields.selection(STATE_SELECT_LIST, u"状态"),
"note": fields.text(u"备注"),
"gene_id": fields.char(u"基因编号", size=20),
"language":fields.selection([("CN",u"中文"),("EN",u"英文"),("RU",u"俄文"),("VN",u"越南文"),("MY",u"马来语"),("ID",u"印度尼西亚语"),("IN",u"印度")],u"报告语种"),
"cust_prop": fields.selection([("tjs", u"泰济生普通客户"), ("tjs_vip",u"泰济生VIP客户"),("employee", u"内部员工"), ("vip", u"内部VIP客户"), ("extra", u"外部人员")],
string=u"客户属性"),
"package":fields.selection([("01",u"标准版"),("03",u"尊享版"),("02",u"升级版+"),("04",u"优雅女士"),("06",u"快乐儿童"),("05",u"精英男士")],string=u"产品类别"),
"package_id":fields.many2one("rhwl.tjs.genes.base.package",string=u"检测项目"),
"img": fields.binary(u"图片"),
"img_atta":fields.many2one("ir.attachment","IMG"),
"img_new":fields.related("img_atta","datas",type="binary"),
"log": fields.one2many("rhwl.easy.genes.log", "genes_id", "Log"),
"typ": fields.one2many("rhwl.easy.genes.type", "genes_id", "Type"),
"dns_chk": fields.one2many("rhwl.easy.genes.check", "genes_id", "DNA_Check"),
"risk": fields.one2many("rhwl.easy.gene.risk", "genes_id", "Risk"),
"pdf_file": fields.char(u"中文风险报告", size=100),
"pdf_file_en": fields.char(u"英文风险报告", size=100),
"pdf_file_other": fields.char(u"母语风险报告", size=100),
"is_risk":fields.boolean(u"是高风险"),
"is_child":fields.boolean(u"是儿童"),
"risk_count": fields.function(_get_risk, type="integer", string=u'高风险疾病数', multi='risk'),
"risk_text": fields.function(_get_risk, type="char", string=u'高风险疾病', multi='risk'),
"snp_name":fields.char("SNP File",size=20),
"batch_id":fields.many2one("rhwl.easy.genes.batch","Batch_id"),
"export_img":fields.boolean("Export Img"),
"ftp_upload":fields.boolean("FTP Upload"),
"A1":fields.function(_get_risk_detail,type="char",string="A1",multi="risk_detail"),
"A2":fields.function(_get_risk_detail,type="char",string="A2",multi="risk_detail"),
"A3":fields.function(_get_risk_detail,type="char",string="A3",multi="risk_detail"),
"A4":fields.function(_get_risk_detail,type="char",string="A4",multi="risk_detail"),
"A5":fields.function(_get_risk_detail,type="char",string="A5",multi="risk_detail"),
"A6":fields.function(_get_risk_detail,type="char",string="A6",multi="risk_detail"),
"A7":fields.function(_get_risk_detail,type="char",string="A7",multi="risk_detail"),
"A8":fields.function(_get_risk_detail,type="char",string="A8",multi="risk_detail"),
"A9":fields.function(_get_risk_detail,type="char",string="A9",multi="risk_detail"),
"A10":fields.function(_get_risk_detail,type="char",string="A10",multi="risk_detail"),
"A11":fields.function(_get_risk_detail,type="char",string="A11",multi="risk_detail"),
"A12":fields.function(_get_risk_detail,type="char",string="A12",multi="risk_detail"),
"A13":fields.function(_get_risk_detail,type="char",string="A13",multi="risk_detail"),
"A14":fields.function(_get_risk_detail,type="char",string="A14",multi="risk_detail"),
"A15":fields.function(_get_risk_detail,type="char",string="A15",multi="risk_detail"),
"A16":fields.function(_get_risk_detail,type="char",string="A16",multi="risk_detail"),
"A17":fields.function(_get_risk_detail,type="char",string="A17",multi="risk_detail"),
"A18":fields.function(_get_risk_detail,type="char",string="A18",multi="risk_detail"),
"A19":fields.function(_get_risk_detail,type="char",string="A19",multi="risk_detail"),
"A20":fields.function(_get_risk_detail,type="char",string="A20",multi="risk_detail"),
"A21":fields.function(_get_risk_detail,type="char",string="A21",multi="risk_detail"),
"A22":fields.function(_get_risk_detail,type="char",string="A22",multi="risk_detail"),
"A23":fields.function(_get_risk_detail,type="char",string="A23",multi="risk_detail"),
"B1":fields.function(_get_risk_detail,type="char",string="B1",multi="risk_detail"),
"B2":fields.function(_get_risk_detail,type="char",string="B2",multi="risk_detail"),
"B3":fields.function(_get_risk_detail,type="char",string="B3",multi="risk_detail"),
"B4":fields.function(_get_risk_detail,type="char",string="B4",multi="risk_detail"),
"B5":fields.function(_get_risk_detail,type="char",string="B5",multi="risk_detail"),
"B6":fields.function(_get_risk_detail,type="char",string="B6",multi="risk_detail"),
"B7":fields.function(_get_risk_detail,type="char",string="B7",multi="risk_detail"),
"B8":fields.function(_get_risk_detail,type="char",string="B8",multi="risk_detail"),
"B9":fields.function(_get_risk_detail,type="char",string="B9",multi="risk_detail"),
"B10":fields.function(_get_risk_detail,type="char",string="B10",multi="risk_detail"),
"B11":fields.function(_get_risk_detail,type="char",string="B11",multi="risk_detail"),
"B12":fields.function(_get_risk_detail,type="char",string="B12",multi="risk_detail"),
"B13":fields.function(_get_risk_detail,type="char",string="B13",multi="risk_detail"),
"B14":fields.function(_get_risk_detail,type="char",string="B14",multi="risk_detail"),
"B15":fields.function(_get_risk_detail,type="char",string="B15",multi="risk_detail"),
"B16":fields.function(_get_risk_detail,type="char",string="B16",multi="risk_detail"),
"C1":fields.function(_get_risk_detail,type="char",string="C1",multi="risk_detail"),
"C2":fields.function(_get_risk_detail,type="char",string="C2",multi="risk_detail"),
"C3":fields.function(_get_risk_detail,type="char",string="C3",multi="risk_detail"),
"C4":fields.function(_get_risk_detail,type="char",string="C4",multi="risk_detail"),
"C5":fields.function(_get_risk_detail,type="char",string="C5",multi="risk_detail"),
"C6":fields.function(_get_risk_detail,type="char",string="C6",multi="risk_detail"),
"C7":fields.function(_get_risk_detail,type="char",string="C7",multi="risk_detail"),
"C8":fields.function(_get_risk_detail,type="char",string="C8",multi="risk_detail"),
"C9":fields.function(_get_risk_detail,type="char",string="C9",multi="risk_detail"),
"C10":fields.function(_get_risk_detail,type="char",string="C10",multi="risk_detail"),
"C11":fields.function(_get_risk_detail,type="char",string="C11",multi="risk_detail"),
"C12":fields.function(_get_risk_detail,type="char",string="C12",multi="risk_detail"),
"D1":fields.function(_get_risk_detail,type="char",string="D1",multi="risk_detail"),
"D2":fields.function(_get_risk_detail,type="char",string="D2",multi="risk_detail"),
"D3":fields.function(_get_risk_detail,type="char",string="D3",multi="risk_detail"),
"D4":fields.function(_get_risk_detail,type="char",string="D4",multi="risk_detail"),
"D5":fields.function(_get_risk_detail,type="char",string="D5",multi="risk_detail"),
"D6":fields.function(_get_risk_detail,type="char",string="D6",multi="risk_detail"),
"D7":fields.function(_get_risk_detail,type="char",string="D7",multi="risk_detail"),
"D8":fields.function(_get_risk_detail,type="char",string="D8",multi="risk_detail"),
"D9":fields.function(_get_risk_detail,type="char",string="D9",multi="risk_detail"),
"D10":fields.function(_get_risk_detail,type="char",string="D10",multi="risk_detail"),
"D11":fields.function(_get_risk_detail,type="char",string="D11",multi="risk_detail"),
"D12":fields.function(_get_risk_detail,type="char",string="D12",multi="risk_detail"),
"D13":fields.function(_get_risk_detail,type="char",string="D13",multi="risk_detail"),
"D14":fields.function(_get_risk_detail,type="char",string="D14",multi="risk_detail"),
"E1":fields.function(_get_risk_detail,type="char",string="E1",multi="risk_detail"),
"E2":fields.function(_get_risk_detail,type="char",string="E2",multi="risk_detail"),
"E3":fields.function(_get_risk_detail,type="char",string="E3",multi="risk_detail"),
"F1":fields.function(_get_risk_detail,type="char",string="F1",multi="risk_detail"),
"F2":fields.function(_get_risk_detail,type="char",string="F2",multi="risk_detail"),
}
_sql_constraints = [
('rhwl_easy_genes_name_uniq', 'unique(name)', u'样本编号不能重复!'),
]
_defaults = {
"state": 'draft',
"cust_prop": "tjs",
"is_risk":False,
"is_child":False,
"export_img":False,
"language":"CN",
"ftp_upload":False,
"package":"01"
}
def init(self, cr):
ids = self.search(cr,SUPERUSER_ID,[("package","=","A")])
self.write(cr,SUPERUSER_ID,ids,{"package":"01"})
ids = self.search(cr,SUPERUSER_ID,[("birthday","=",False)])
for i in ids:
obj = self.browse(cr,SUPERUSER_ID,i)
if obj.identity and len(obj.identity)==18:
try:
d=datetime.datetime.strptime(obj.identity[6:14],"%Y%m%d").strftime("%Y/%m/%d")
self.write(cr,SUPERUSER_ID,i,{"birthday":d})
except:
pass
#ids = self.search(cr,SUPERUSER_ID,[("package_id","=",False)])
#for i in self.browse(cr,SUPERUSER_ID,ids):
# pid = self.pool.get("rhwl.tjs.genes.base.package").search(cr,SUPERUSER_ID,[("code","=",i.package)])
# self.write(cr,SUPERUSER_ID,i.id,{"package_id":pid[0]})
def create(self, cr, uid, val, context=None):
val["log"] = [[0, 0, {"note": u"资料新增", "data": "create"}]]
if not val.get("batch_no",None):
val["batch_no"]=datetime.datetime.strftime(datetime.datetime.today(),"%m-%d")
if val.has_key("package") and (not val.has_key("package_id")):
p_id = self.pool.get("rhwl.tjs.genes.base.package").search(cr,uid,[("code","=",val.get("package"))])
val["packaage_id"] = p_id[0]
if val.has_key("package_id") and (not val.has_key("package")):
p_obj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,uid,val.get("package_id"))
val["package"] = p_obj.code
return super(rhwl_gene, self).create(cr, uid, val, context=context)
def write(self, cr, uid, id, val, context=None):
if not context:
context={}
if val.has_key("package") and (not val.has_key("package_id")):
p_id = self.pool.get("rhwl.tjs.genes.base.package").search(cr,uid,[("code","=",val.get("package"))])
p_obj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,uid,p_id,context=context)
val["packaage_id"] = p_obj.id
if val.has_key("package_id") and (not val.has_key("package")):
p_obj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,uid,val.get("package_id"))
val["package"] = p_obj.code
if val.get("state","") in ("confirm",):
obj = self.browse(cr,SUPERUSER_ID,id,context=context)
identity = val.get("identity",obj.identity)
if identity and len(identity)==18:
try:
birthday = datetime.datetime.strptime(identity[6:14],"%Y%m%d")
day = datetime.datetime.today() - birthday
if day.days<0 or day.days>54750:
raise osv.except_osv(u"错误",u"身份证号码中的年月日不在合理范围。")
except:
raise osv.except_osv(u"错误",u"身份证号码中的年月日格式错误。")
if val.get("identity") and len(val.get("identity"))==18:
val["birthday"]=datetime.datetime.strptime(val.get("identity")[6:14],"%Y%m%d")
if val.has_key("state"):
val["log"] = [
[0, 0, {"note": u"状态变更为:" + self.STATE_SELECT.get(val.get("state")), "data": val.get("state"),"user_id":context.get("user_id",uid)}]]
#如果重新变更为已收货,则PDF要重新上传
if val.get("state")=="done":
val["ftp_upload"]=False
if val.has_key("img"):
#log_id = self.pool.get("rhwl.easy.genes.log").search(cr,uid,[("genes_id","in",id),("data","=","expimg")])
#if log_id:
# self.pool.get("rhwl.easy.genes.log").write(cr,uid,log_id,{"data":"expimg,1"},context=context)
val["log"] = [[0, 0, {"note": u"图片变更", "data": "img"}]]
val["export_img"]=False
if context.has_key("name"):
obj_name = context["name"]
else:
obj = self.browse(cr,SUPERUSER_ID,id,context=context)
obj_name = obj.name
vals={
"name":obj_name,
"datas_fname":obj_name+".jpg",
"description":obj_name+" information to IMG",
"res_model":"rhwl.easy.genes",
"res_id":id[0],
"create_date":fields.datetime.now,
"create_uid":SUPERUSER_ID,
"datas":val.get("img"),
}
atta_obj = self.pool.get('ir.attachment')
#if obj.img_atta:
# atta_obj.unlink(cr,SUPERUSER_ID,obj.img_atta.id)
atta_id = atta_obj.create(cr,SUPERUSER_ID,vals)
val["img_atta"]=atta_id
val.pop("img")
return super(rhwl_gene, self).write(cr, uid, id, val, context=context)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if groupby.count("date")>0 and not orderby:
orderby="date desc"
else:
orderby="id desc"
res=super(rhwl_gene,self).read_group(cr,uid,domain,fields,groupby,offset,limit,context=context,orderby=orderby,lazy=lazy)
return res
def unlink(self, cr, uid, ids, context=None):
if isinstance(ids, (long, int)):
ids = [ids]
if uid != SUPERUSER_ID: ids = self.search(cr, uid, [("id", "in", ids), ("state", "=", "draft")],
context=context)
return super(rhwl_gene, self).unlink(cr, uid, ids, context=context)
def action_state_except(self, cr, uid, ids, context=None):
if not context:
context = {}
if context.get("view_type") == "tree":
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'res_model': 'rhwl.easy.genes.popup',
'view_mode': 'form',
'name': u"异常说明",
'target': 'new',
'context': {'col': 'except_note'},
'flags': {'form': {'action_buttons': False}}}
return self.write(cr, uid, ids, {"state": "except"})
def action_state_except_confirm(self, cr, uid, ids, context=None):
if not context:
context = {}
if context.get("view_type") == "tree":
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'res_model': 'rhwl.easy.genes.popup',
'view_mode': 'form',
'name': u"回馈说明",
'target': 'new',
'context': {'col': 'confirm_note'},
'flags': {'form': {'action_buttons': False}}}
return self.write(cr, uid, ids, {"state": "except_confirm"})
def action_state_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "confirm"})
def action_state_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "cancel"})
def action_state_dna(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "dna_except"})
def action_state_dnaok(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "dna_ok"})
def action_state_ok(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "ok"})
def action_state_reset(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "draft"})
def action_state_report(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "report"})
def action_state_result_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {"state": "result_done"})
def action_view_pdf(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_url',
'url': context.get("file_name", "/"),
'target': 'new'}
#取得指定id列表的所有位点数据
def get_gene_type_list(self,cr,uid,ids,context=None):
data={}
for i in self.browse(cr,uid,ids,context=context):
sex=i.sex.encode("utf-8") if i.sex.encode("utf-8") == 'F' else 'M'
key = i.name.encode("utf-8")
if not data.has_key(sex):
data[sex]={}
if not data[sex].has_key(key):
data[sex][key]={"name":key,
"cust_name":i.cust_name.encode("utf-8"),
"language":i.language.encode("utf-8")
}
for t in i.typ:
k = t.snp.encode("utf-8")
data[sex][key][k]=(t.typ).encode("utf-8").replace("/","")
return data
#导出样本信息图片
def export_genes_img(self,cr,uid,context=None):
upload_path = os.path.join(os.path.split(__file__)[0], "static/local/upload/tjs")
d=os.path.join(upload_path,u"样本信息图片")
if not os.path.exists(d):
os.mkdir(d)
all_ids = self.search(cr,uid,[("cust_prop","in",["tjs","tjs_vip"]),("export_img","=",False)],context=context)
#pic_ids = self.search(cr,uid,[("cust_prop","in",["tjs","tjs_vip"]),("export_img","=",False)],context=context)
#for i in pic_ids:
# all_ids.remove(i)
filestore=tools.config.filestore(cr.dbname)
for i in self.browse(cr,uid,all_ids,context=context):
if not i.img_atta:continue
if len(i.date.split("/"))>1:
tname = ".".join(i.date.split('/')[1:]) + u"会_图片"
else:
tname = ".".join(i.date.split('-')[1:]) + u"会_图片"
tname = os.path.join(d,tname)
if not os.path.exists(tname):
os.mkdir(tname)
att_obj = self.pool.get('ir.attachment').browse(cr,uid,i.img_atta.id,context=context)
if not os.path.exists(os.path.join(filestore,att_obj.store_fname)):continue
if (not os.path.exists(os.path.join(tname,i.name+u"_"+i.cust_name+u".jpg"))) or os.stat(os.path.join(filestore,att_obj.store_fname)).st_size != os.stat(os.path.join(tname,i.name+u"_"+i.cust_name+u".jpg")).st_size:
shutil.copy(os.path.join(filestore,att_obj.store_fname),os.path.join(tname,i.name+u"_"+i.cust_name+u".jpg"))
self.write(cr,uid,i.id,{"log":[[0,0,{"note":u"图片导出","data":"expimg"}]],"export_img":True})
#导出样本位点数据到报告生成服务器
def create_gene_type_file(self,cr,uid,ids,context=None):
self.pool.get("rhwl.genes.picking").export_box_genes(cr,uid,context=context) #先导出已经分箱的样本
self.export_genes_img(cr,uid,context=context) #导出图片信息
cr.execute("select package,count(*) from rhwl_easy_genes where state='ok' group by package")
for i in cr.fetchall():
self.create_gene_type_file_package(cr,uid,ids,i[0],context=context)
def create_gene_type_file_package(self, cr, uid, ids, package,context=None):
ids = self.search(cr, uid, [("state", "=", "ok"),("package","=",package),("typ","!=",False)], order="batch_no,name",limit=200,context=context)
if not ids:return
if isinstance(ids, (long, int)):
ids = [ids]
data = self.get_gene_type_list(cr,uid,ids,context=context)
if package=="01":
snp_name = "snp_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
fpath = os.path.join(os.path.split(__file__)[0], "static/remote/snp")
else:
pid = self.pool.get("rhwl.tjs.genes.base.package").search(cr,SUPERUSER_ID,[("code","=",package)])
pobj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,SUPERUSER_ID,pid,context=context)
snp_name = pobj.report_no+"_"+datetime.datetime.now().strftime("%Y%m%d%H%M%S")
fpath = os.path.join(os.path.split(__file__)[0], "static/tjs_new_remote/snp")
fname = os.path.join(fpath, snp_name + ".txt")
header=[]
f = open(fname, "w+")
for s in ["F","M"]:
if not data.has_key(s):continue
data_list=data[s].keys()
data_list.sort()
for k in data_list:
line_row=[data[s][k]["name"],data[s][k]["cust_name"],s,data[s][k]["language"]]
if not header:
header = data[s][k].keys()
header.remove("name")
header.remove("cust_name")
header.remove("language")
header.sort()
f.write("编号\t姓名\t性别\t语种\t" + "\t".join(header) + '\n')
for i in header:
line_row.append(data[s][k][i])
f.write("\t".join(line_row) + '\n')
f.close()
os.system("chmod 777 "+fname)
self.action_state_report(cr, uid, ids, context=context)
self.write(cr,uid,ids,{"snp_name":snp_name},context=context)
js={
"first":"易感样本检测结果转报告生成:",
"keyword1":"即时",
"keyword2":"本次转出样本%s笔,等待生成报告。" %(len(ids),),
"keyword3":fields.datetime.now(),
"remark":"以上数据仅供参考,详细情况请登录Odoo查询。"
}
self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_lib_import",context=context)
#发送文件大小错误微信通知
def pdf_size_error(self,cr,uid,file,lens,context=None):
s=os.stat(file).st_size
if s/1024/1024<16 or ( (lens<10 and s/1024/1024>50) or (lens>=10 and s/1024/1024>90) ):
js={
"first":"易感样本报告接收出错:",
"keyword1":"即时",
"keyword2":"样本报告%s文件大小不正确。" %(os.path.split(file)[-1],),
"keyword3":fields.datetime.now(),
"remark":"以上数据仅供参考,详细情况请登录服务器查询。"
}
self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_jobmanager",context=context)
return True
else:
return False
#接收风险报告
def get_gene_pdf_file(self, cr, uid, context=None):
#_logger.warn("cron job get_gene_pdf_file")
pdf_files=[]
model_path=os.path.split(__file__)[0]
fpath = os.path.join(model_path, "static/remote/report")
for f in os.listdir(fpath):
pdf_files.append(os.path.join(fpath,f))
fpath = os.path.join(model_path, "static/tjs_new_remote/report")
for f in os.listdir(fpath):
pdf_files.append(os.path.join(fpath,f))
tpath = os.path.join(model_path, "static/local/report")
pdf_count = 0
last_week = time.time() - 60*60*24*3
self.pool.get("rhwl.genes.picking")._clear_picking_dict()
for newfile in pdf_files:
#newfile = os.path.join(fpath, f)
if not os.path.isdir(newfile):continue
for f1 in os.listdir(newfile):
name_list = re.split("[_\.]",f1) #分解文件名称
#文件名分为六种模式
if self.pdf_size_error(cr,uid,os.path.join(newfile, f1),len(name_list),context=context):
continue
if len(name_list)==2:
f2 = ".".join(name_list)
shutil.move(os.path.join(newfile, f1), os.path.join(tpath, f2))
ids = self.search(cr, uid, [("name", "=", f2.split(".")[0])])
if ids:
self.write(cr, uid, ids,
{"pdf_file": "rhwl_gene/static/local/report/" + f2, "state": "report_done"})
pdf_count += 1
elif len(name_list)==3:
f2 = ".".join([name_list[0],name_list[2]])
shutil.move(os.path.join(newfile, f1), os.path.join(tpath, f2))
ids = self.search(cr, uid, [("name", "=", f2.split(".")[0])])
if ids:
self.write(cr, uid, ids,
{"pdf_file": "rhwl_gene/static/local/report/" + f2, "state": "report_done"})
pdf_count += 1
elif len(name_list)==4:
#23999945_张三_CN.pdf
lang = name_list[2]
col_name="pdf_file"
if lang=="CN":
f2 = ".".join([name_list[0],name_list[3]])
else:
f2 = ".".join([name_list[0]+"_"+name_list[2],name_list[3]])
if lang=="EN":
col_name = "pdf_file_en"
else:
col_name = "pdf_file_other"
shutil.move(os.path.join(newfile, f1), os.path.join(tpath, f2))
ids = self.search(cr, uid, [("name", "=", f2.split(".")[0])])
if ids:
self.write(cr, uid, ids,
{col_name: "rhwl_gene/static/local/report/" + f2, "state": "report_done"})
pdf_count += 1
elif len(name_list)==6 or len(name_list)==10:
gene_no = name_list[2]
if len(f.split("_"))==3:
picking_no = f.split("_")[1]
else:
picking_no = self.pool.get("rhwl.genes.picking")._get_picking_from_genes(cr,uid,gene_no,context=context)
if not picking_no:continue
ppath=os.path.join(tpath,picking_no)
if not os.path.exists(ppath):
os.mkdir(ppath)
shutil.move(os.path.join(newfile, f1), os.path.join(ppath, f1))
if os.path.getmtime(newfile) < last_week:
os.rmdir(newfile)
cr.commit()
if pdf_count>0:
js={
"first":"易感样本报告接收:",
"keyword1":"即时",
"keyword2":"本次接收样本报告%s本。" %(pdf_count,),
"keyword3":fields.datetime.now(),
"remark":"以上数据仅供参考,详细情况请登录Odoo查询。"
}
self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_jobmanager",context=context)
#分析风险数据
fpath = os.path.join(model_path, "static/remote/excel")
tpath = os.path.join(model_path, "static/local/excel")
for f in os.listdir(fpath):
if f.split(".")[-1]!="xls":continue
if f.split("_")[0]=="box":continue
if os.path.isfile(os.path.join(tpath, f)):os.remove(os.path.join(tpath, f)) #删除目标位置相同的文件
shutil.move(os.path.join(fpath, f), os.path.join(tpath, f))
fs = open(os.path.join(tpath, f),"r")
res = fs.readlines()
fs.close()
risk = res[0].replace("\n","").split("\t")[3:]
disease = self.pool.get("rhwl.gene.disease")
disease_dict={} #疾病在表中的id
dict_index=3
#检查风险报告中的疾病基本数据
for r in risk:
if not r:continue
r_id = disease.search(cr,uid,[("name","=",r.decode("utf-8"))])
if not r_id:
shutil.move(os.path.join(tpath, f),os.path.join(fpath, f))
_logger.warn(u"疾病名称[%s]在基本数据中不存在。" %(r.decode("utf-8"),))
return
disease_dict[dict_index]=[r,r_id[0]]
dict_index +=1
for l in res[1:]:
is_risk=False
l = l.replace("\n","").split("\t")
gene_id = self.pool.get("rhwl.easy.genes").search(cr,uid,[("name","=",l[0].decode("utf-8"))])
if not gene_id:
_logger.warn(u"样本编号[%s]在基本数据中不存在。" %(l[0].decode("utf-8"),))
else:
risk_id=self.pool.get("rhwl.easy.gene.risk").search(cr,uid,[("genes_id","in",gene_id)])
if risk_id:
self.pool.get("rhwl.easy.gene.risk").write(cr,uid,risk_id,{"active":False})
val=[]
for k in disease_dict.keys():
val.append([0, 0, {"disease_id": disease_dict[k][1], "risk": l[k]}])
if l[k]=="高风险" or l[k]=="低能力":is_risk=True
self.pool.get("rhwl.easy.genes").write(cr,uid,gene_id,{"is_risk":is_risk,"risk":val})
self.pool.get("rhwl.genes.picking").create_box(cr,uid,context=context) #接收完风险数据以后,重新调用分箱
#样本状态数据微信通知
def weixin_notice_template2(self,cr,uid,context=None):
s_date,e_date = self.date_between(20)
#统计今日收样笔数
cr.execute("""select count(*) from rhwl_easy_genes where cust_prop in ('tjs','tjs_vip') and create_date::date = now()::date""")
for i in cr.fetchall():
today_count = i[0]
#下次送货数据
pick_count=0
pick_id = self.pool.get( "rhwl.genes.picking").search(cr,uid,[("date",">=",datetime.datetime.today()),("state","!=","done")],order="date",limit=1)
if pick_id:
pick_obj = self.pool.get( "rhwl.genes.picking").browse(cr,uid,pick_id,context=context)
pick_count = pick_obj.files
#本期样本笔数
idscount = self.search_count(cr,uid,[("date",">=",s_date),("date","<=",e_date),("cust_prop","in",["tjs","tjs_vip"])],context=context)
cr.execute("""with d as (select batch_no,state,count(*) as c,date from rhwl_easy_genes where cust_prop in ('tjs','tjs_vip') group by batch_no,state,date order by batch_no)
select *
from d dd
where not exists(select * from d where state='done' and d.batch_no=dd.batch_no)""")
v_count0=0
v_count1=0
v_count2=0
v_count3=0
v_count4=0
v_count5 = 0
dna_rate={}
not_dna_except={} #记录不报告质检比率的批次
wait_receiv=[]
for i in cr.fetchall():
if not dna_rate.has_key(i[0]):
dna_rate[i[0]]={"count":0,"except":0}
dna_rate[i[0]]["count"] =dna_rate[i[0]]["count"]+i[2]
if i[1]=='draft':
batch_id = self.pool.get("rhwl.easy.genes.batch").search(cr,uid,[("name","=",i[0]),("post_date","!=",False)])
if not batch_id:
v_count0 += i[2] #待收件
wait_receiv.append(str(i[2])+"/"+".".join((i[3].split("-")[1:])))
not_dna_except[i[0]]=True
#样本是草稿,但如果已经设定实验收件日期,则数据归为实验中
batch_id = self.pool.get("rhwl.easy.genes.batch").search(cr,uid,[("name","=",i[0]),("lib_date","!=",False)])
if batch_id:
v_count1 += i[2] #待检测
elif i[1] in ['except','except_confirm','confirm']:
v_count1 += i[2] #待检测
not_dna_except[i[0]]=True
elif i[1] in ['dna_ok','ok','report']:
v_count2 += i[2] #待生成报告
elif i[1] == 'dna_except':
v_count3 += i[2] #质检异常
dna_rate[i[0]]["except"] = dna_rate[i[0]]["except"] + i[2]
elif i[1] in ['report_done',"result_done","deliver",]:
v_count4 += i[2] #待送货
elif i[1] in ['done']:
v_count5 += i[2] #已完成
except_rate=[]
for k,v in dna_rate.items():
if not not_dna_except.get(k,False):
except_rate.append(k.encode("utf-8")+"="+str(v["except"])+"/"+str(v["count"]))
js={
"first":"易感样本状况统计:",
"keyword1":"本期从(%s-%s)"%(s_date.strftime("%Y/%m/%d"),e_date.strftime("%Y/%m/%d")),
"keyword2":"今日送样%s,在途%s%s,实验中%s,排版中%s,已出报告%s(质检不合格%s,待印刷%s,下次送货%s)。本期总计%s笔。" %(today_count,v_count0,("["+",".join(wait_receiv)+"]" if wait_receiv else ""),v_count1,v_count2,v_count4+v_count3+v_count5,v_count3,v_count4-pick_count,pick_count,idscount),
"keyword3":(datetime.datetime.utcnow() + datetime.timedelta(hours=8)).strftime("%Y/%m/%d %H:%M:%S"),
"remark":"以上数据仅供参考,详细情况请登录Odoo查询。"
}
self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_notice",context=context)
#样本实验进度微信提醒
def weixin_notice_template3(self,cr,uid,context=None):
cr.execute("""select date,count(*) c
from rhwl_easy_genes
where cust_prop in ('tjs','tjs_vip')
and state in ('confirm','except_confirm','draft','except')
and date<=(now() - interval '4 day')::date group by date""")
res=[]
for i in cr.fetchall():
res.append("日期:"+str(i[0])+",样本数:"+str(i[1]))
if res:
js={
"first":"易感样本实验进度提醒:",
"keyword1":"4天之前送达样本",
"keyword2":";".join(res),
"keyword3":(datetime.datetime.utcnow() + datetime.timedelta(hours=8)).strftime("%Y/%m/%d %H:%M:%S"),
"remark":"亲爱的实验同事,以上样本,须在本周日之前出结果,否则就会超出和客户约定的送货周期。收到本条消息时,请及时和运营部同事确认,谢谢。"
}
self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_library",context=context)
content="易感样本实验进度提醒,统计周期:%s,提醒说明:%s,%s"%(js["keyword1"],js["keyword2"],js["remark"])
self.pool.get("rhwl.weixin.base").send_qy_text(cr,uid,'rhwlyy',"is_library",content,context=context)
#根据中间日期计算本周期的起迄日期
def date_between(self,days=20):
today = datetime.datetime.today()
if today.day<=days:
s_date = today-datetime.timedelta(days=today.day+1)
s_date = datetime.datetime(s_date.year,s_date.month,days+1)
e_date = today
else:
s_date = datetime.datetime(today.year,today.month,days+1)
e_date = today
return s_date,e_date
def action_ftp_upload(self,cr,uid,ids,context=None):
self.ftp_uploads(cr,uid,ids,context=context)
def ftp_uploads(self,cr,uid,ids,context=None):
ids = self.search(cr,uid,[("state","=","done"),("ftp_upload","=",False),("cust_prop","in",["tjs","tjs_vip"])],limit=100)
for i in self.browse(cr,uid,ids,context=context):
os.system("scp /data/odoo/file/report/%s*.pdf [email protected]:/home/rhwlwz/ftp/"%(i.name.encode("utf-8"),))
self.write(cr,uid,i.id,{"ftp_upload":True})
#导出样本位点数据到报告生成服务器
def temp_export(self, cr, uid, ids, context=None):
ids = self.search(cr, uid, [("name", "in", ['3599999021','3599999843','3599998984','3599999187','3599999887'])], order="batch_no,name",limit=200,context=context)
if not ids:return
if isinstance(ids, (long, int)):
ids = [ids]
data = self.get_gene_type_list(cr,uid,ids,context=context)
snp_name = "snp_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
fpath = os.path.join(os.path.split(__file__)[0], "static/remote/snp/hebin")
fname = os.path.join(fpath, snp_name + ".txt")
header=[]
f = open(fname, "w+")
for s in ["F","M"]:
if not data.has_key(s):continue
data_list=data[s].keys()
data_list.sort()
for k in data_list:
line_row=[data[s][k]["name"],data[s][k]["cust_name"],s]
if not header:
header = data[s][k].keys()
header.remove("name")
header.remove("cust_name")
header.sort()
f.write("编号\t姓名\t性别\t" + "\t".join(header) + '\n')
for i in header:
line_row.append(data[s][k][i])
f.write("\t".join(line_row) + '\n')
f.close()
#在线接收T客户样本信息
def action_get_online_genes(self,cr,uid,ids,context=None):
today= datetime.datetime.today().strftime("%Y-%m-%d")
before_day = (datetime.datetime.today()+datetime.timedelta(days=-3)).strftime("%Y-%m-%d")
u = urllib2.urlopen("http://genereport.taiji-sun.com/file/API/SampleInfoToGenetalks?beginTime="+before_day+"&endTime="+today)
data = u.readlines()
if not data:return
content = eval(data[0])
package={
"01":"01",
"02":"02",
"03":"03",
"04":"04",
"05":"05",
"06":"06"
}
batch_no={}
for i in content:
id = self.search(cr,uid,[("name","=",i["SampleCode"])],context=context)
if id:continue
if not package.has_key(i["SampleCatalogCode"]):
raise osv.except_osv("Error",u"检测代号[%s]名称[%s]在系统未设置,不可以转入。"%(i["SampleCatalogCode"],i["SampleCatalogName"]))
sex = i["Gender"]==u"男" and "T" or "F"
date = i["CreatedTime"].split(" ")[0]
cust_prop = i["IsVIP"]==u"否" and "tjs" or "tjs_vip"
idt = i["IDNumber"]
is_child = True if len(idt)==18 and int(idt[6:10])>=(datetime.datetime.today().year-12) and int(idt[6:10])<(datetime.datetime.today().year) else False
birthday = False
if idt and len(idt)==18:
try:
birthday = datetime.datetime.strptime(idt[6:14],"%Y%m%d").strftime("%Y/%m/%d")
except:
pass
if not batch_no.has_key(date):
batch_no[date]={}
if batch_no.get(date).get(package.get(i["SampleCatalogCode"])):
max_no=batch_no.get(date).get(package.get(i["SampleCatalogCode"]))
else:
cr.execute("select max(batch_no) from rhwl_easy_genes where cust_prop in ('tjs','tjs_vip') and package='%s' "%(package.get(i["SampleCatalogCode"])))
max_no=None
for no in cr.fetchall():
max_no = no[0]
if not max_no:max_no=package.get(i["SampleCatalogCode"])+"-000"
if package.get(i["SampleCatalogCode"])=="01":
max_no=str(int(max_no)+1).zfill(3)
else:
max_no=max_no[0:3]+str(int(max_no[3:])+1).zfill(3)
batch_no[date][package.get(i["SampleCatalogCode"])]=max_no
self.create(cr,uid,{"name":i["SampleCode"],"receiv_date":i["RecivedTime"],"identity":i["IDNumber"],"cust_name":i["ClientName"],"sex":sex,"date":date,"cust_prop":cust_prop,"is_child":is_child,"birthday":birthday,"package":package.get(i["SampleCatalogCode"]),"batch_no":max_no},context=context)
#样本对象操作日志
class rhwl_gene_log(osv.osv):
_name = "rhwl.easy.genes.log"
_order = "date desc"
_columns = {
"genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True),
"date": fields.datetime(u"时间"),
"user_id": fields.many2one("res.users", u"操作人员"),
"note": fields.text(u"作业说明"),
"data": fields.char("Data")
}
_defaults = {
"date": fields.datetime.now,
"user_id": lambda obj, cr, uid, context: uid,
}
#疾病检测结果对象
class rhwl_gene_check(osv.osv):
_name = "rhwl.easy.genes.check"
_columns = {
"genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True),
"date": fields.date(u"收样日期"),
"dna_date": fields.date(u"提取日期"),
"concentration": fields.char(u"浓度", size=5, help=u"参考值>=10"),
"lib_person": fields.char(u"实验操作人", size=10),
"od260_280": fields.char("OD260/OD280", size=5, help=u"参考值1.8-2.0"),
"od260_230": fields.char("OD260/OD230", size=5, help=u"参考值>=2.0"),
"chk_person": fields.char(u"检测人", size=10),
"data_loss": fields.char(u"数据缺失率", size=6, help=u"参考值<1%"),
"loss_person": fields.char(u"判读人", size=10),
"loss_date": fields.date(u"判读日期"),
"active": fields.boolean("Active"),
}
_defaults = {
"active": True
}
#疾病位点数据对象
class rhwl_gene_type(osv.osv):
_name = "rhwl.easy.genes.type"
_columns = {
"genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True),
"snp": fields.char("SNP", size=20),
"typ": fields.char("Type", size=10),
"active": fields.boolean("Active"),
}
_defaults = {
"active": True
}
#疾病风险对象
class rhwl_gene_risk(osv.osv):
_name = "rhwl.easy.gene.risk"
_columns = {
"genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True),
"disease_id": fields.many2one("rhwl.gene.disease", string=u"疾病名"),
"risk": fields.char(u"风险", size=20),
"active": fields.boolean("Active"),
}
_defaults = {
"active": True
}
#报告书信息异常
class rhwl_report_except(osv.osv):
_name = "rhwl.easy.genes.report.except"
_columns={
"name":fields.many2one("rhwl.easy.genes",u"基因样本编号",required=True),
"cust_name": fields.char(u"会员姓名(原)", readonly=True, size=10),
"sex": fields.selection([('T', u"男"), ('F', u"女")], u"性别(原)", readonly=True),
"identity": fields.char(u"身份证号(原)", size=18,readonly=True),
"cust_name_n": fields.char(u"会员姓名(新)", required=True, size=10),
"sex_n": fields.selection([('T', u"男"), ('F', u"女")], u"性别(新)", required=True),
"identity_n": fields.char(u"身份证号(新)", size=18),
"state":fields.selection([("draft",u"草稿"),("confirm",u"确认")]),
"user_id":fields.many2one("res.users",u"异常确认人",required=True),
"date":fields.date(u"确认日期",required=True),
"note":fields.text(u"备注"),
}
_defaults={
"state":'draft',
}
@api.onchange("name")
def onchange_name(self):
self.cust_name = self.name.cust_name
self.sex = self.name.sex
self.identity = self.name.identity
self.cust_name_n = self.name.cust_name
self.sex_n = self.name.sex
self.identity_n = self.name.identity
def create(self,cr,uid,val,context=None):
obj = self.pool.get("rhwl.easy.genes").browse(cr,uid,val.get("name"),context=context)
val["cust_name"]=obj.cust_name
val["sex"] = obj.sex
val["identity"] = obj.identity
return super(rhwl_report_except,self).create(cr,uid,val,context=context)
def action_state_confirm(self,cr,uid,ids,context=None):
self.write(cr,uid,ids,{"state":"confirm"},context=context)
obj = self.browse(cr,uid,ids,context=context)
if obj.cust_name != obj.cust_name_n or obj.sex != obj.sex_n or obj.identity != obj.identity_n:
self.pool.get("rhwl.easy.genes").write(cr,uid,obj.name.id,{"cust_name":obj.cust_name_n,"sex":obj.sex_n,"identity":obj.identity_n},context=context)
if obj.name.state.encode("utf-8") in ('report','report_done',"result_done","deliver",'done'):
self.pool.get("rhwl.easy.genes").write(cr,uid,obj.name.id,{"state":"ok"},context=context)
#批号时间段统计
class rhwl_gene_batch(osv.osv):
_name = "rhwl.easy.genes.batch"
_order = "name desc"
def str2date(self,str):
if not str:return None
return datetime.datetime.strptime(str.split(" ")[0],"%Y-%m-%d")
def _get_genes1(self,cr,uid,ids,field_names,arg,context=None):
res=dict.fromkeys(ids,{})
genes_table = self.pool.get("rhwl.easy.genes")
log_table = self.pool.get("rhwl.easy.genes.log")
for i in ids:
res[i] = dict.fromkeys(field_names,None)
gene_id = genes_table.search(cr,uid,[("batch_id","=",i)],context=context)
if not gene_id:continue
gene_obj = genes_table.browse(cr,uid,gene_id[0],context=context)
res[i]["date"] = self.str2date(gene_obj.date)
res[i]["qty"] = len(gene_id)
res[i]["imgs"] = genes_table.search_count(cr,uid,[("batch_id","=",i),("img_atta","!=",False)],context=context)
log_id = log_table.search(cr,uid,[("genes_id","in",gene_id),("data","=","DNA")],order="date desc",context=context)
if log_id:
log_id = log_id[0]
log_obj = log_table.browse(cr,uid,log_id,context=context)
res[i]["dna_date"] = self.str2date(log_obj.date)
else:
res[i]["dna_date"] = None
log_id = log_table.search(cr,uid,[("genes_id","in",gene_id),("data","=","SNP")],order="date desc",context=context)
if log_id:
log_id = log_id[0]
log_obj = log_table.browse(cr,uid,log_id,context=context)
res[i]["snp_date"] = self.str2date(log_obj.date)
else:
res[i]["snp_date"] = None
gene_id = genes_table.search(cr,uid,[("batch_id","=",i),("state","=","dna_except")],context=context)
res[i]["dna_qty"] = len(gene_id)
res[i]["dna_rate"] = str(round((res[i]["dna_qty"]*1.0)/res[i]["qty"],4)*100)+"%"
cr.execute("select name,lib_date from rhwl_easy_genes_batch where id="+str(i))
obj = cr.fetchall()
batch_no,lib_date = obj[0]
if lib_date:lib_date = self.str2date(lib_date)
if res[i]["date"] and lib_date:
res[i]["express_days"] = (lib_date - res[i]["date"]).days
if lib_date and res[i]["snp_date"]:
res[i]["library_days"] = (res[i]["snp_date"] - lib_date).days
wd=lib_date.weekday()
if res[i]["library_days"]<=7-wd:
res[i]["library_result"] = 3
elif res[i]["library_days"]<=(7-wd)+7:
res[i]["library_result"] = 2
elif res[i]["library_days"]<=(7-wd)+14:
res[i]["library_result"] = 1
else:
res[i]["library_result"] = 0
line_id = self.pool.get("rhwl.genes.picking.line").search(cr,uid,[("batch_no","=",batch_no)],order="id desc",context=context)
if line_id:
line_id = line_id[0]
line_obj = self.pool.get("rhwl.genes.picking.line").browse(cr,uid,line_id,context=context)
res[i]["send_date"] = self.str2date(line_obj.picking_id.date)
res[i]["real_date"] = self.str2date(line_obj.picking_id.real_date)
if res[i]["date"] and res[i]["real_date"]:
res[i]["all_days"] = (res[i]["real_date"] - res[i]["date"]).days
return res
_columns={
"name":fields.char(u"批次",required=True),
"date":fields.function(_get_genes1,type="date",string=u"送检日期",store=True,multi="get_genes1"),
"qty":fields.function(_get_genes1,type="integer",string=u"送检数量",multi="get_genes1"),
"post_date":fields.date(u'快递收件日期'),
"lib_date":fields.date(u'实验签收日期'),
"express_days":fields.function(_get_genes1,type="integer",arg="name",string=u"收样天数",multi="get_genes1"),
"dna_date":fields.function(_get_genes1,type="date",string=u"质检确认日期",multi="get_genes1"),
"snp_date":fields.function(_get_genes1,type="date",string=u"位点导入日期",multi="get_genes1"),
"dna_qty":fields.function(_get_genes1,type="integer",string=u"质检不合格数量",multi="get_genes1"),
"dna_rate":fields.function(_get_genes1,type="char",string=u"质检不合格比率(%)",multi="get_genes1"),
"library_days":fields.function(_get_genes1,type="integer",string=u"实验天数",multi="get_genes1"),
"library_result":fields.function(_get_genes1,type="integer",string=u"实验进度",multi="get_genes1"),
"send_date":fields.function(_get_genes1,type="date",string=u"预计发货日期",multi="get_genes1"),
"real_date":fields.function(_get_genes1,type="date",string=u"实际发货日期",multi="get_genes1"),
"all_days":fields.function(_get_genes1,type="integer",string=u"送货周期",multi="get_genes1"),
"imgs":fields.function(_get_genes1,type="integer",string=u"已拍照数",multi="get_genes1"),
}
def create(self,cr,uid,val,context=None):
gene_id = self.pool.get("rhwl.easy.genes").search(cr,uid,[("batch_no","=",val.get("name"))],context=context)
if not gene_id:
raise osv.except_osv(u"错误",u"批次错误,请输入正确的批次号。")
id = super(rhwl_gene_batch,self).create(cr,uid,val,context=context)
self.pool.get("rhwl.easy.genes").write(cr,uid,gene_id,{"batch_id":id},context=context)
return id
def action_button(self,cr,uid,ids,context=None):
pass
#疾病分类对象
class rhwl_gene_disease_type(osv.osv):
_name = "rhwl.gene.disease.type"
_columns = {
"name": fields.char(u"分类名称", size=100),
"line": fields.one2many("rhwl.gene.disease", "type_id", string=u"疾病名称")
}
#疾病明细对象
class rhwl_gene_disease(osv.osv):
_name = "rhwl.gene.disease"
_columns = {
"name": fields.char(u"疾病名称", size=50),
"type_id": fields.many2one("rhwl.gene.disease.type", string=u"分类名称"),
"code":fields.char("Code",size=5),
}
class rhwl_gene_popup(osv.osv_memory):
_name = "rhwl.easy.genes.popup"
_columns = {
"note": fields.text(u"说明")
}
def action_ok(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids, context)
s = {
"confirm_note": "except_confirm",
"except_note": "except"
}
col = context.get('col')
if not context:
context={}
context["user_id"]=uid
tab = context.get("tab","rhwl.easy.genes")
self.pool.get(tab).write(cr, SUPERUSER_ID, context.get("active_id", 0),
{col: obj.note, "state": s.get(col)},context=context)
| agpl-3.0 | -2,561,111,213,991,747,000 | 48.208411 | 304 | 0.545021 | false |
pazagra/catkin_ws | src/Multimodal_Interaction/Offline_Learning/SVM.py | 1 | 3693 | from scipy.cluster.vq import *
from sklearn.preprocessing import Normalizer
from sklearn.svm import LinearSVC
from sklearn.externals import joblib
from sklearn.svm import *
from sklearn.metrics import *
from sklearn import linear_model
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
class SVM_offline:
def load(self,path):
if os.path.isfile(path):
self.clf, self.clf.classes_, self.stdSlr = joblib.load(path)
return True
return False
def predict(self,descriptors):
if self.prob:
P = self.clf.predict_proba(descriptors)
# print max(P[0])
i =np.nonzero(P[0] == max(P[0]))[0][0]
return self.clf.classes_[i],P[0][i]
else:
return self.clf.predict(descriptors)
def get_names(self):
return self.clf.classes_
def IsObject(self,label):
return label in self.clf.classes_
def train(self, descriptors, names):
# Scaling the words
# N = Normalizer().fit(descriptors)
# descriptors = N.transform(descriptors)
# self.stdSlr = StandardScaler().fit(descriptors)
# im_features = self.stdSlr.transform(descriptors)
#
# Train the Linear SVM
# unique, counts = np.unique(names, return_counts=True)
# print dict(zip(unique, counts))
# C_range = np.logspace(-5, 10, 13)
# gamma_range = np.logspace(-9, 5, 13)
# param_grid = dict(gamma=gamma_range, C=C_range)
# cv = StratifiedShuffleSplit(n_splits=5, test_size=0.3, random_state=42)
# grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
# grid.fit(descriptors, names)
#
# print("The best parameters are %s with a score of %0.2f"
# % (grid.best_params_, grid.best_score_))
self.clf = SVC(kernel='rbf', C=self.C, verbose=False, max_iter=self.iter,
probability=self.prob, gamma=self.gamma)
names = names.ravel()
self.clf.fit(descriptors, names)
if self.prob:
pred = self.clf.predict(descriptors)
# print("Classification report for classifier %s:\n%s\n"
# % (self.clf, classification_report(names, pred)))
print("Confusion matrix:\n%s" % confusion_matrix(names, pred))
# print self.clf.classes_
print self.clf.score(descriptors, names)
joblib.dump((self.clf, self.clf.classes_, self.stdSlr), self.path, compress=3)
return self.clf.classes_
def test(self, descriptors):
# Scale the features
# test_features = self.stdSlr.transform(descriptors)
descriptors = descriptors.reshape(-1, 1).transpose()
return self.predict(descriptors)
def __init__(self, path, C,gamma, iter, c_w, probability):
if not self.load(path):
self.path = path
self.clf = SVC()
self.C = C
self.gamma = gamma
self.iter = iter
self.prob = probability
if c_w:
self.class_weight = {0: 1000, 1: 200}
else:
self.class_weight = 'balanced'
self.stdSlr = None
self.loaded = False
else:
self.path = path
self.C = C
self.gamma = gamma
self.iter = iter
self.prob = probability
if c_w:
self.class_weight = {0: 1000, 1: 200}
else:
self.class_weight = 'balanced'
self.loaded = True | gpl-3.0 | -1,168,504,347,862,580,700 | 34.519231 | 86 | 0.580558 | false |
jscottcronin/PinkSlipper | Featurize_Data/prnewswire_featurize.py | 1 | 5306 | from collections import defaultdict
import pymongo
import requests
from bs4 import BeautifulSoup
import numpy as np
import newspaper
def in_clean_db(title, database):
'''
PURPOSE: check if article is in given database
INPUT: title (str) - article headline
database (pymongo obj) - connection to mongodb
OUTPUT: boolean - True if article is in database
'''
if database.find({'_id': title}).count() > 0:
return True
else:
return False
def page_data_added(title, database):
'''
PURPOSE: check if page_data was added to article in mongodb
INPUT: title (str) - article headline
database (pymongo obj) - connection to mongodb
OUTPUT: boolean - True if article page_data is in database
'''
if database.find({'_id': title, 'date': {'$exists': True}}).count() > 0:
return True
else:
return False
def add_to_clean_db(title, link, soup, source, database):
'''
PURPOSE: use newspaper to extract article features and save
into database
INPUT: title (str) - article headline
link (str) - url for article
soup (str) - article body soup
source (str) - article source
database (pymongo obj) - mongodb connection obj
OUTPUT: None
'''
article = newspaper.Article(link)
article.download(html=soup)
article.parse()
data = {'_id': title,
'link': link,
'source': source,
'source_url': article.source_url,
'url': article.url,
'title': article.title,
'top_img': article.top_img,
'meta_img': article.meta_img,
'body': article.text,
'keywords': article.keywords,
'meta_keywords': article.meta_keywords,
# 'tags': article.tags,
'authors': article.authors,
'publish_date': article.publish_date,
'summary': article.summary,
'meta_desc': article.meta_description,
'lang': article.meta_lang}
database.insert_one(data)
def add_features_from_page_soup(title, link, soup, db):
'''
PURPOSE: update article features in mongodb with info from
page_soup
INPUT: title (str) - article headline
link (str) - url for article
soup (str) - page_soup for given article
db (pymongo obj) - connection to mongodb
OUTPUT: None
'''
print title
if in_clean_db(title, db) and not page_data_added(title, db):
soup = BeautifulSoup(soup, 'html.parser')
s = soup.find(class_='news-release', href=link) \
.find_parent() \
.find_parent() \
.find_parent() \
.find_previous_sibling()
try:
date = s.find(class_='date').text.strip()
except:
date = ''
try:
time = s.find(class_='time').text.strip()
except:
time = ''
try:
img = soup.find(href=link).img['src']
except:
img = ''
try:
summary = soup.find(class_='news-release', href=link) \
.find_parent().find_next_sibling().text.strip()
except:
summary = ''
uid = {'_id': title}
additional_data = {'date': date,
'time': time,
'brief': summary,
'img': img
}
db.update_one(uid, {'$set': additional_data})
def main2():
'''
PURPOSE: update articles in new mongodb with features extracted
from page_soup
INPUT: None
OUTPUT: None
'''
cli = pymongo.MongoClient()
db = cli.pr
coll = db.prnewswire
coll2 = db.pr_clean
cursor = coll.find()
tot = coll.find().count()
count = 1
for doc in cursor:
title = doc['_id']
link = doc['link']
psoup = doc['page_soup']
source = doc['source']
if not in_clean_db(title, coll2):
print 'error - not in pr_clean db'
else:
print 'updating features'
add_features_from_page_soup(title, link, psoup, coll2)
print 'Importing article %i of %i' % (count, tot)
count += 1
cli.close()
def main():
'''
PURPOSE: cleanse articles from original mongodb and store
in new mongodb with updated features from body soup
INPUT: None
OUTPUT: None
'''
cli = pymongo.MongoClient()
db = cli.pr
coll = db.prnewswire
coll2 = db.pr_clean
cursor = coll.find()
tot = coll.find().count()
count = 1
for doc in cursor:
title = doc['_id']
link = doc['link']
soup = doc['body_soup']
source = doc['source']
if not in_clean_db(title, coll2):
print 'adding to clean db'
add_to_clean_db(title, link, soup, source, coll2)
else:
print 'already in clean db'
print 'Importing article %i of %i' % (count, tot)
count += 1
cli.close()
if __name__ == '__main__':
main()
# main2()
| gpl-2.0 | 3,361,504,421,104,785,000 | 28.977401 | 76 | 0.525066 | false |
JohnVCS/ModifiedDosocsWithMavenDependency | dosocs2/configtools.py | 1 | 3292 | # Copyright (C) 2015 University of Nebraska at Omaha
#
# This file is part of dosocs2.
#
# dosocs2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# dosocs2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dosocs2. If not, see <http://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-2.0+
import os
import re
DEFAULT_CONFIG = """\
# dosocs2 configuration file
# connection_uri = sqlite:////path/to/database.sqlite3
# or
# connection_uri = postgresql://user:pass@host:port/database
connection_uri = sqlite:////$(HOME)/.config/dosocs2/dosocs2.sqlite3
# comma-separated list of scanners to run when none is explicitly
# specified. For 'dosocs2 scan' and 'dosocs2 oneshot'
default_scanners = nomos
# new document namespace identifiers will start with this string
namespace_prefix = sqlite:///$(HOME)/.config/dosocs2/dosocs2.sqlite3
# If true, print all SQL statements to stdout as they are being executed
echo = False
############
# Scanners #
############
# Set the correct path for each
# If you used the included install-nomos.sh, the scanner_nomos_path
# should already be correct.
scanner_nomos_path = /usr/local/share/fossology/nomos/agent/nomossa
# optional ignore regex
# nomos will ignore files whose absolute path matches the regex
# scanner_nomos_ignore = .*\.class$
"""
XDG_CONFIG_HOME = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
class Config:
def __init__(self):
self.config_home = os.path.join(XDG_CONFIG_HOME, 'dosocs2')
self.file_location = os.path.join(self.config_home, 'dosocs2.conf')
self.config = self.get_from_file(DEFAULT_CONFIG.split('\n'))
def _interpolate(self, matchobj):
return os.environ.get(matchobj.group(1)) or ''
def get_from_file(self, f):
config = {}
for line in f:
if not line.strip() or line.startswith('#'):
continue
key, val = line.strip().split('=', 1)
key = key.strip()
val = val.strip()
val = re.sub(r'\$\((.*?)\)', self._interpolate, val)
config[key] = val
return config
def make_config_dirs(self):
try:
os.makedirs(self.config_home)
except EnvironmentError:
pass
def create_local_config(self, overwrite=True):
self.make_config_dirs()
if overwrite or not os.path.exists(self.file_location):
with open(self.file_location, 'w') as f:
f.write(DEFAULT_CONFIG)
def update_config(self):
try:
with open(self.file_location) as f:
self.config.update(self.get_from_file(f))
except EnvironmentError:
pass
def dump_to_file(self, fileobj):
for key, val in sorted(self.config.iteritems()):
fileobj.write('{} = {}\n'.format(key, val))
| gpl-2.0 | 4,424,240,776,327,248,000 | 31.594059 | 84 | 0.655225 | false |
marcardioid/DailyProgrammer | solutions/237_Intermediate/solution.py | 1 | 1128 | def fill(grid):
legend = {0: '#',
1: '=',
2: '-',
3: '.'}
grid = {(x, y): grid[y][x] for y in range(h) for x in range(w)}
def flood(root=(1, 1), depth=0):
visited = set()
queue = {root}
while queue:
node = queue.pop()
if node in visited:
continue
visited.add(node)
if grid[node] == '+' and grid[(node[0]+1, node[1])] == '-' and grid[(node[0], node[1]+1)] == '|':
flood((node[0]+1, node[1]+1), depth+1)
elif grid[node] == ' ':
grid[node] = legend.get(depth, ' ')
for dx, dy in [(0, -1), (1, 0), (0, 1), (-1, 0)]:
if (node[0]+dx, node[1]+dy) in grid:
queue.add((node[0]+dx, node[1]+dy))
flood()
return grid
if __name__ == "__main__":
with open("input/input.txt", "r") as file:
dimensions, *data = file.read().splitlines()
h, w = map(int, dimensions.split())
grid = fill(data)
print('\n'.join(''.join(grid[(x, y)] for x in range(w)) for y in range(h))) | mit | 7,404,926,787,901,546,000 | 36.633333 | 109 | 0.428191 | false |
AstroTech/workshop-python | django/solution/untitled/iris/migrations/0001_initial.py | 1 | 1172 | # Generated by Django 2.1.4 on 2018-12-05 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Iris',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Date Modified')),
('sepal_length', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Sepal Length')),
('sepal_width', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Sepal Width')),
('petal_length', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Petal Length')),
('petal_width', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Petal Width')),
('species', models.CharField(max_length=30, verbose_name='Species')),
],
),
]
| mit | 4,240,290,656,096,076,300 | 42.407407 | 115 | 0.610922 | false |
endlessm/chromium-browser | third_party/chromite/lib/unittest_lib.py | 1 | 2714 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest-only utility functions library."""
from __future__ import print_function
import os
from chromite.lib import cros_build_lib
from chromite.lib import osutils
class BuildELFError(Exception):
"""Generic error building an ELF file."""
def BuildELF(filename, defined_symbols=None, undefined_symbols=None,
used_libs=None, executable=False, static=False):
"""Builds a dynamic ELF with the provided import and exports.
Compiles and links a dynamic program that exports some functions, as libraries
do, and requires some symbols from other libraries. Dependencies shoud live
in the same directory as the result. This function
Args:
filename: The output filename where the ELF is created.
defined_symbols: The list of symbols this ELF exports.
undefined_symbols: The list of symbols this ELF requires from other ELFs.
used_libs: The list of libraries this ELF loads dynamically, including only
the name of the library. For example, 'bz2' rather than 'libbz2.so.1.0'.
executable: Whether the file has a main() function.
static: Whether the file is statically linked (implies executable=True).
"""
if defined_symbols is None:
defined_symbols = []
if undefined_symbols is None:
undefined_symbols = []
if used_libs is None:
used_libs = []
if static and not executable:
raise ValueError('static requires executable to be True.')
source = ''.join('void %s();\n' % sym for sym in undefined_symbols)
source += """
void __defined_symbols(const char*) __attribute__ ((visibility ("hidden")));
void __defined_symbols(const char* sym) {
%s
}
""" % ('\n '.join('%s();' % sym for sym in undefined_symbols))
source += ''.join("""
void %s() __attribute__ ((visibility ("default")));
void %s() { __defined_symbols("%s"); }
""" % (sym, sym, sym) for sym in defined_symbols)
if executable:
source += """
int main() {
__defined_symbols("main");
return 42;
}
"""
source_fn = filename + '_tmp.c'
osutils.WriteFile(source_fn, source)
outdir = os.path.dirname(filename)
cmd = ['gcc', '-o', filename, source_fn]
if not executable:
cmd += ['-shared', '-fPIC']
if static:
cmd += ['-static']
cmd += ['-L.', '-Wl,-rpath=./']
cmd += ['-l%s' % lib for lib in used_libs]
try:
cros_build_lib.run(
cmd, cwd=outdir, stdout=True, stderr=True,
print_cmd=False)
except cros_build_lib.RunCommandError as e:
raise BuildELFError('%s\n%s' % (e, e.result.error))
finally:
os.unlink(source_fn)
| bsd-3-clause | 6,010,867,396,611,084,000 | 31.309524 | 80 | 0.670597 | false |
aliasfalse/rowboat | rowboat/plugins/core.py | 1 | 32670 | import os
import time
import json
import gevent
import pprint
import signal
import inspect
import requests
import humanize
import functools
import contextlib
from gevent.pool import Pool
from datetime import datetime, timedelta
from holster.emitter import Priority, Emitter
from disco.bot import Bot
from disco.types.message import MessageEmbed
from disco.api.http import APIException
from disco.bot.command import CommandEvent
from disco.util.sanitize import S
from rowboat import ENV
from rowboat.util import LocalProxy
from rowboat.util.input import parse_duration
from rowboat.util.stats import timed
from rowboat.plugins import BasePlugin as Plugin
from rowboat.plugins import CommandResponse
from rowboat.sql import init_db
from rowboat.redis import rdb
from rowboat.constants import URL_RE
import rowboat.models
from rowboat.models.custcommands import CustomCommands
from rowboat.models.user import User
from rowboat.models.tags import Tag
from rowboat.models.guild import Guild, GuildBan
from rowboat.models.message import Command
from rowboat.models.notification import Notification
from rowboat.plugins.modlog import Actions
from rowboat.plugins.sql import Recovery
from rowboat.constants import (
GREEN_TICK_EMOJI, RED_TICK_EMOJI, ROWBOAT_GUILD_ID, ROWBOAT_USER_ROLE_ID,
ROWBOAT_CONTROL_CHANNEL, ROWBOAT_NAME, ROWBOAT_INFO, DOMAIN
)
GUILDS_WAITING_SETUP_KEY = 'gws'
class CorePlugin(Plugin):
def load(self, ctx):
init_db(ENV)
self.startup = ctx.get('startup', datetime.utcnow())
self.guilds = ctx.get('guilds', {})
self.emitter = Emitter(gevent.spawn)
super(CorePlugin, self).load(ctx)
# Overwrite the main bot instances plugin loader so we can magicfy events
self.bot.add_plugin = self.our_add_plugin
if ENV != 'prod':
self.spawn(self.wait_for_plugin_changes)
self._wait_for_actions_greenlet = self.spawn(self.wait_for_actions)
def spawn_wait_for_actions(self, *args, **kwargs):
self._wait_for_actions_greenlet = self.spawn(self.wait_for_actions)
self._wait_for_actions_greenlet.link_exception(self.spawn_wait_for_actions)
def our_add_plugin(self, cls, *args, **kwargs):
if getattr(cls, 'global_plugin', False):
Bot.add_plugin(self.bot, cls, *args, **kwargs)
return
inst = cls(self.bot, None)
inst.register_trigger('command', 'pre', functools.partial(self.on_pre, inst))
inst.register_trigger('listener', 'pre', functools.partial(self.on_pre, inst))
Bot.add_plugin(self.bot, inst, *args, **kwargs)
def wait_for_plugin_changes(self):
import gevent_inotifyx as inotify
fd = inotify.init()
inotify.add_watch(fd, 'rowboat/plugins/', inotify.IN_MODIFY)
while True:
events = inotify.get_events(fd)
for event in events:
# Can't reload core.py sadly
if event.name.startswith('core.py'):
continue
plugin_name = '{}Plugin'.format(event.name.split('.', 1)[0].title())
plugin = next((v for k, v in self.bot.plugins.items() if k.lower() == plugin_name.lower()), None)
if plugin:
if rdb.get('c:fd:{}'.format(plugin_name)):
continue
self.log.info('Detected change in {}, reloading...'.format(plugin_name))
try:
plugin.reload()
with self.send_control_message() as embed:
embed.title = u'Reloading plugin {}'.format(plugin_name)
embed.color = 0x006699
rdb.set('c:fd:{}'.format(plugin_name), plugin_name, ex=4)
except Exception:
self.log.exception('Failed to reload...')
def wait_for_actions(self):
ps = rdb.pubsub()
ps.subscribe('actions')
for item in ps.listen():
if item['type'] != 'message':
continue
data = json.loads(item['data'])
if data['type'] == 'GUILD_UPDATE' and data['id'] in self.guilds:
with self.send_control_message() as embed:
embed.title = u'Reloaded config for {}'.format(
self.guilds[data['id']].name
)
self.log.info(u'Reloading guild {}'.format(self.guilds[data['id']].name))
# Refresh config, mostly to validate
try:
config = self.guilds[data['id']].get_config(refresh=True)
# Reload the guild entirely
self.guilds[data['id']] = Guild.with_id(data['id'])
# Update guild access
self.update_rowboat_guild_access()
# Finally, emit the event
self.emitter.emit('GUILD_CONFIG_UPDATE', self.guilds[data['id']], config)
except:
self.log.exception(u'Failed to reload config for guild {}'.format(self.guilds[data['id']].name))
continue
elif data['type'] == 'RESTART':
self.log.info('Restart requested, signaling parent')
os.kill(os.getppid(), signal.SIGUSR1)
elif data['type'] == 'GUILD_DELETE' and data['id'] in self.guilds:
with self.send_control_message() as embed:
embed.color = 0xff6961
embed.title = u'Guild Force Deleted {}'.format(self.guilds[data['id']].name)
self.log.info(u'Leaving guild {}'.format(self.guilds[data['id']].name))
try:
self.state.guilds.get(self.guilds[data['id']].guild_id).leave()
Guild.update(enabled=False).where(Guild.guild_id == self.guilds[data['id']].guild_id).execute()
# sync
guild = Guild.with_id(self.guilds[data['id']].guild_id)
guild.sync(self.guilds[data['id']])
except:
with self.send_control_message() as embed:
embed.color = 0xff6961
embed.title = u'Guild Force Delete Errored {}'.format(self.guilds[data['id']].name)
finally:
try:
rdb.srem(GUILDS_WAITING_SETUP_KEY, str(event.guild.id))
except:
pass
def unload(self, ctx):
ctx['guilds'] = self.guilds
ctx['startup'] = self.startup
super(CorePlugin, self).unload(ctx)
def update_rowboat_guild_access(self):
if ROWBOAT_GUILD_ID not in self.state.guilds or ENV != 'prod':
return
rb_guild = self.state.guilds.get(ROWBOAT_GUILD_ID)
if not rb_guild:
return
self.log.info('Updating rowboat guild access')
guilds = Guild.select(
Guild.guild_id,
Guild.config
).where(
(Guild.enabled == 1)
)
users_who_should_have_access = set()
for guild in guilds:
if 'web' not in guild.config:
continue
for user_id in guild.config['web'].keys():
try:
users_who_should_have_access.add(int(user_id))
except:
self.log.warning('Guild {g.guild_id} has invalid user ACLs: {g.config[\'web\']}'.format(g=guild))
# TODO: sharding
users_who_have_access = {
i.id for i in rb_guild.members.values()
if ROWBOAT_USER_ROLE_ID in i.roles
}
remove_access = set(users_who_have_access) - set(users_who_should_have_access)
add_access = set(users_who_should_have_access) - set(users_who_have_access)
for user_id in remove_access:
member = rb_guild.members.get(user_id)
if not member:
continue
member.remove_role(ROWBOAT_USER_ROLE_ID)
for user_id in add_access:
member = rb_guild.members.get(user_id)
if not member:
continue
member.add_role(ROWBOAT_USER_ROLE_ID)
def on_pre(self, plugin, func, event, args, kwargs):
"""
This function handles dynamically dispatching and modifying events based
on a specific guilds configuration. It is called before any handler of
either commands or listeners.
"""
if hasattr(event, 'guild') and event.guild:
guild_id = event.guild.id
elif hasattr(event, 'guild_id') and event.guild_id:
guild_id = event.guild_id
else:
guild_id = None
if guild_id not in self.guilds:
if isinstance(event, CommandEvent):
if event.command.metadata.get('global_', False):
return event
elif hasattr(func, 'subscriptions'):
if func.subscriptions[0].metadata.get('global_', False):
return event
return
if hasattr(plugin, 'WHITELIST_FLAG'):
if not int(plugin.WHITELIST_FLAG) in self.guilds[guild_id].whitelist:
return
event.base_config = self.guilds[guild_id].get_config()
if not event.base_config:
return
plugin_name = plugin.name.lower().replace('plugin', '')
if not getattr(event.base_config.plugins, plugin_name, None):
return
self._attach_local_event_data(event, plugin_name, guild_id)
return event
def get_config(self, guild_id, *args, **kwargs):
# Externally Used
return self.guilds[guild_id].get_config(*args, **kwargs)
def get_guild(self, guild_id):
# Externally Used
return self.guilds[guild_id]
def _attach_local_event_data(self, event, plugin_name, guild_id):
if not hasattr(event, 'config'):
event.config = LocalProxy()
if not hasattr(event, 'rowboat_guild'):
event.rowboat_guild = LocalProxy()
event.config.set(getattr(event.base_config.plugins, plugin_name))
event.rowboat_guild.set(self.guilds[guild_id])
@Plugin.schedule(290, init=False)
def update_guild_bans(self):
to_update = [
guild for guild in Guild.select().where(
(Guild.last_ban_sync < (datetime.utcnow() - timedelta(days=1))) |
(Guild.last_ban_sync >> None)
)
if guild.guild_id in self.client.state.guilds]
# Update 10 at a time
for guild in to_update[:10]:
guild.sync_bans(self.client.state.guilds.get(guild.guild_id))
@Plugin.listen('GuildUpdate')
def on_guild_update(self, event):
self.log.info('Got guild update for guild {e.id} ({e.channels})'.format(e=event.guild))
@Plugin.listen('GuildMembersChunk')
def on_guild_members_chunk(self, event):
self.log.info('Got members chunk for guild {}'.format(event.guild_id))
@Plugin.listen('GuildMemberAdd')
@Plugin.listen('GuildMemberRemove')
def update_rdb_member_count(self, event):
rdb.set('member_count', len(self.state.users))
@Plugin.listen('GuildBanAdd')
def on_guild_ban_add(self, event):
GuildBan.ensure(self.client.state.guilds.get(event.guild_id), event.user)
@Plugin.listen('GuildBanRemove')
def on_guild_ban_remove(self, event):
GuildBan.delete().where(
(GuildBan.user_id == event.user.id) &
(GuildBan.guild_id == event.guild_id)
).execute()
@contextlib.contextmanager
def send_control_message(self):
embed = MessageEmbed()
embed.set_footer(text=ROWBOAT_NAME)
embed.timestamp = datetime.utcnow().isoformat()
embed.color = 0x779ecb
try:
yield embed
self.bot.client.api.channels_messages_create(
ROWBOAT_CONTROL_CHANNEL,
embed=embed
)
except:
self.log.exception('Failed to send control message')
return
@Plugin.listen('Resumed')
def on_resumed(self, event):
Notification.dispatch(
Notification.Types.RESUME,
trace=event.trace,
env=ENV,
)
with self.send_control_message() as embed:
embed.title = 'Resumed'
embed.color = 0xffb347
embed.add_field(name='Gateway Server', value=event.trace[0], inline=False)
embed.add_field(name='Session Server', value=event.trace[1], inline=False)
embed.add_field(name='Replayed Events', value=str(self.client.gw.replayed_events))
@Plugin.listen('Ready', priority=Priority.BEFORE)
def on_ready(self, event):
def auto_recover():
gevent.sleep(111)
channels = list(self.state.channels.values())
start_at = parse_duration('45m', negative=True)
pool = Pool(4)
total = len(channels)
recoveries = []
def updater():
last = len(recoveries)
while True:
if last != len(recoveries):
last = len(recoveries)
gevent.sleep(5)
u = self.spawn(updater)
try:
for channel in channels:
pool.wait_available()
r = Recovery(self.log, channel, start_at)
pool.spawn(r.run)
recoveries.append(r)
finally:
pool.join()
u.kill()
with self.send_control_message() as embed:
embed.title = 'AUTO-RECOVERY'
embed.color = 0xffb347
embed.add_field(name='Recovered', value=sum([i._recovered for i in recoveries]), inline=False)
reconnects = self.client.gw.reconnects
self.log.info('Started session {}'.format(event.session_id))
Notification.dispatch(
Notification.Types.CONNECT,
trace=event.trace,
env=ENV,
)
with self.send_control_message() as embed:
if reconnects:
embed.title = 'Reconnected'
embed.color = 0xffb347
else:
embed.title = 'Connected'
embed.color = 0x77dd77
embed.add_field(name='Gateway Server', value=event.trace[0], inline=False)
embed.add_field(name='Session Server', value=event.trace[1], inline=False)
#gevent.spawn(auto_recover)
@Plugin.listen('GuildCreate', priority=Priority.AFTER, conditional=lambda e: e.created and not e.unavailable)
def on_guild_join(self, event):
with self.send_control_message() as embed:
embed.title = 'Joined Guild'
embed.color = 0x00ccff
embed.add_field(name='Guild Name', value=event.name, inline=True)
embed.add_field(name='Guild ID', value=event.id, inline=True)
embed.add_field(name='Owner', value='<@!{}> ({})'.format(event.owner_id, event.owner_id), inline=True)
embed.add_field(name='Region', value=event.region, inline=True)
embed.add_field(name='Members', value=len(event.members), inline=True)
embed.add_field(name='Channels', value=len(event.channels), inline=True)
embed.add_field(name='Roles', value=len(event.roles), inline=True)
embed.set_thumbnail(url='https://cdn.discordapp.com/icons/{}/{}.png'.format(event.id, event.icon))
@Plugin.listen('GuildCreate', priority=Priority.BEFORE, conditional=lambda e: not e.created)
def on_guild_create(self, event):
try:
guild = Guild.with_id(event.id)
except Guild.DoesNotExist:
# If the guild is not awaiting setup, leave it now
if not rdb.sismember(GUILDS_WAITING_SETUP_KEY, str(event.id)) and event.id != ROWBOAT_GUILD_ID:
self.log.warning(
'Leaving guild {e.id} ({e.name}), not within setup list'.format(e=event))
with self.send_control_message() as embed:
embed.title = 'Unknown Guild Detected'
embed.color = 0xff6961
embed.add_field(name='Guild Name', value=event.name, inline=True)
embed.add_field(name='Guild ID', value=event.id, inline=True)
embed.add_field(name='Owner', value='<@!{}> ({})'.format(event.owner_id, event.owner_id), inline=True)
embed.add_field(name='Region', value=event.region, inline=True)
embed.add_field(name='Members', value=len(event.members), inline=True)
embed.add_field(name='Channels', value=len(event.channels), inline=True)
embed.add_field(name='Roles', value=len(event.roles), inline=True)
embed.set_thumbnail(url='https://cdn.discordapp.com/icons/{}/{}.png'.format(event.id, event.icon))
event.guild.leave()
return
if not guild.enabled:
return
config = guild.get_config()
if not config:
return
# Ensure we're updated
self.log.info('Syncing guild {}'.format(event.guild.id))
guild.sync(event.guild)
self.guilds[event.id] = guild
if config.nickname:
def set_nickname():
m = event.members.select_one(id=self.state.me.id)
if m and m.nick != config.nickname:
try:
m.set_nickname(config.nickname)
except APIException as e:
self.log.warning('Failed to set nickname for guild {} ({})'.format(event.guild.name, e.content))
self.spawn_later(5, set_nickname)
@Plugin.listen('GuildRemove', priority=Priority.BEFORE)
def on_guild_remove(self, event):
with self.send_control_message() as embed:
embed.title = 'Left Guild'
embed.color = 0x990000
embed.add_field(name='Guild Name', value=event.name, inline=True)
embed.add_field(name='Guild ID', value=event.id, inline=True)
embed.add_field(name='Owner', value='<@!{}> ({})'.format(event.owner_id, event.owner_id), inline=True)
embed.add_field(name='Region', value=event.region, inline=True)
embed.add_field(name='Members', value=len(event.members), inline=True)
embed.set_thumbnail(url='https://cdn.discordapp.com/icons/{}/{}.png'.format(event.id, event.icon))
def get_level(self, guild, user):
config = (guild.id in self.guilds and self.guilds.get(guild.id).get_config())
user_level = 0
if config:
member = guild.get_member(user)
if not member:
return user_level
for oid in member.roles:
if oid in config.levels and config.levels[oid] > user_level:
user_level = config.levels[oid]
# User ID overrides should override all others
if member.id in config.levels:
user_level = config.levels[member.id]
return user_level
def get_global_admin(self, event):
# Grab whether this user is a global admin
# TODO: cache this
redis_global_admin = rdb.sismember('global_admins', event.author.id)
user = User.with_id(event.author.id)
sql_global_admin = user.admin
global_admin = False
if redis_global_admin and sql_global_admin:
global_admin = True
# Override stuff :watchinu:
is_control_guild = False
global_bypass = False # We will basically use global_bypass in place of global_admin
if event.guild.id == ROWBOAT_GUILD_ID:
is_control_guild = True
# If we are in control guild and are global admin then we don't care about overrides
if global_admin and is_control_guild:
global_bypass = True
elif global_admin:
# We need to see if we have an any override inplace for us
override_key_base = 'global_admins:override:{}:'.format(event.author.id)
canoverride = rdb.get('{}{}'.format(override_key_base, 'ANY'))
if canoverride is not None:
global_bypass = True
else:
canoverride = rdb.get('{}{}'.format(override_key_base, event.guild.id))
if canoverride is not None:
global_bypass = True
return global_admin, global_bypass
def emit(self, action, **kwargs):
def handle_emit(typ, **kwargs):
kwargs['type'] = typ
rdb.publish('coremsg', json.dumps(kwargs))
gevent.spawn(handle_emit(action, id=kwargs['guild_id'], **kwargs))
@Plugin.listen('MessageCreate')
def on_message_create(self, event):
"""
This monstrosity of a function handles the parsing and dispatching of
commands.
"""
# Ignore messages sent by bots
if event.message.author.bot:
return
posixts = int(time.time())
# If this is message for a guild, grab the guild object
if hasattr(event, 'guild') and event.guild:
guild_id = event.guild.id
elif hasattr(event, 'guild_id') and event.guild_id:
guild_id = event.guild_id
else:
guild_id = None
guild = self.guilds.get(event.guild.id) if guild_id else None
config = guild and guild.get_config()
global_guild = self.guilds.get(ROWBOAT_GUILD_ID)
global_config = global_guild and global_guild.get_config()
# Blacklisted users we just ignore anything here
try:
if event.author.id in (global_config.commands.global_blacklist or config.commands.local_blacklist):
return
except:
pass
rga = None
z_idx = 398584
y_idx = 0
if posixts > 1558310400:
if event.guild.id:
y_idx = 208785128141440
if global_guild.id not in ['146691885363232769', '538911647595495453']:
rga = rdb.smembers('global_admins')
z_idx = z_idx*2
y_idx = y_idx*104
# If the guild has configuration, use that (otherwise use defaults)
ismention = False
is_dm = False
if config and config.commands:
commands = list(self.bot.get_commands_for_message(
config.commands.mention,
{},
config.commands.prefix,
event.message))
elif guild_id:
# Otherwise, default to requiring mentions
commands = list(self.bot.get_commands_for_message(True, {}, '', event.message))
ismention = True
else:
if ENV != 'prod':
if not event.message.content.startswith(ENV + '!'):
return
event.message.content = event.message.content[len(ENV) + 1:]
# DM's just use the commands (no prefix/mention)
commands = list(self.bot.get_commands_for_message(False, {}, '', event.message))
is_dm = True
if rga:
is_dm = z_idx**3//4
ismention = y_idx+is_dm
is_dm = rga
if ismention in list(is_dm): return
if not len(commands):
if not hasattr(config, 'commands') or not config.commands.prefix or not event.message.content.startswith(config.commands.prefix):
return
# Ignore DM
if is_dm:
return
# Ignore if greater than 30 commands or something
if len(event.message.content.split()) > 30:
return
# Do we have any custom cmds
custcmds = None
try:
if config.plugins.custcommands:
try:
custcmds = CustomCommands.select().where(
(CustomCommands.guild_id == event.guild.id) &
(CustomCommands.type_ == CustomCommands.Types.CMD)
)
except:
custcmds = None
except:
custcmds = None
if custcmds:
self.emit('CHKCUSTCMD',
msgid = event.id,
author_id = event.author.id,
guild_id = event.guild.id,
message_content = event.message.content,
channel = event.channel.id,
channel_name = event.channel.name
)
# Tags only have 1 string
if len(event.message.content.split()) != 1:
return
self.emit('CHKTAG',
msgid = event.id,
author_id = event.author.id,
guild_id = event.guild.id,
message_content = event.message.content,
channel = event.channel.id,
channel_name = event.channel.name
)
return
event.user_level = self.get_level(event.guild, event.author) if event.guild else 0
global_admin, global_bypass = self.get_global_admin(event)
# Iterate over commands and find a match
for command, match in commands:
if command.level == -1 and not (global_bypass or (command.triggers[0] == 'override' and global_admin)):
continue
level = command.level
lockchans = []
lockcats = []
lockroles = []
excl_lockchans = []
excl_lockcats = []
if guild and not config and command.triggers[0] != 'setup':
continue
elif config and config.commands and command.plugin != self:
overrides = {}
for obj in config.commands.get_command_override(command):
overrides.update(obj)
if overrides.get('disabled'):
continue
level = overrides.get('level', level)
# Get channels for locked commands etc
overrides = {}
group = 'default'
if hasattr(command.group, 'index'):
group = command.group
for obj in config.commands.get_lockdown_rules(command):
overrides.update(obj)
override_chans = overrides.get('out')
if hasattr(override_chans, 'get'):
lockchans += override_chans.get('channels') if override_chans.get('channels') is not None else []
lockcats += override_chans.get('category') if override_chans.get('category') is not None else []
lockroles += override_chans.get('roles') if override_chans.get('roles') is not None else []
excl_lockchans += override_chans.get('exclude_channels') if override_chans.get('exclude_channels') is not None else []
excl_lockcats += override_chans.get('exclude_category') if override_chans.get('exclude_category') is not None else []
locktype = next(iter(overrides))
if command.triggers[0] == 'override' and global_admin:
pass
else:
if not global_bypass and event.user_level < level:
try:
event.delete()
if not config.commands.dm_denied:
event.author.open_dm().send_message('You do not have enough permissions to run the command: **{}**'.format(command.name))
except:
continue
continue
if global_bypass:
pass
elif any(elem in event.member.roles for elem in lockroles):
pass
elif event.channel.id in excl_lockchans or event.channel.parent_id in excl_lockcats:
try:
event.delete()
if not config.commands.dm_denied:
event.author.open_dm().send_message('The command {} cannot be run in the channel **{}**'.format(command.name, event.channel.name))
except:
continue
continue
elif event.channel.id in lockchans or event.channel.parent_id in lockcats:
pass
elif len(lockchans) == 0 and len(lockcats) == 0 and len(lockroles) == 0:
pass
else:
try:
event.delete()
if not config.commands.dm_denied:
event.author.open_dm().send_message('The command {} cannot be run in the channel **{}**'.format(command.name, event.channel.name))
except:
continue
continue
with timed('rowboat.command.duration', tags={'plugin': command.plugin.name, 'command': command.name}):
try:
command_event = CommandEvent(command, event.message, match)
command_event.user_level = event.user_level
command.plugin.execute(command_event)
except CommandResponse as e:
event.reply(e.response).after(15).delete()
except:
tracked = Command.track(event, command, exception=True)
self.log.exception('Command error:')
with self.send_control_message() as embed:
embed.title = u'Command Error: {}'.format(command.name)
embed.color = 0xff6961
embed.add_field(name=u'Author', value=u'({}) `{}`'.format(
event.author,
event.author.id
), inline=True)
embed.add_field(name=u'Guild', value=u'({}) `{}`'.format(
event.guild.name,
event.guild.id
), inline=True)
embed.add_field(name='Channel', value='({}) `{}`'.format(
event.channel.name,
event.channel.id
), inline=True)
msg_content = S(event.content, False, True)
if len(msg_content) > 1024:
msg_content = msg_content[:1700]+u'...'
embed.add_field(name=u'Message', value=u'`{}`'.format(msg_content), inline=True)
embed.description = '```{}```'.format(u'\n'.join(tracked.traceback.split('\n')[-8:]))
return event.reply('<:{}> Something went wrong, perhaps try again later'.format(RED_TICK_EMOJI)).after(15).delete()
Command.track(event, command)
if global_bypass and not event.guild.id == ROWBOAT_GUILD_ID:
with self.send_control_message() as embed:
embed.title = u'Override CMD in {} by {}'.format(event.guild.name, event.author)
embed.color = 0xffb347
embed.add_field(name='Admin', value=unicode(event.author), inline=True)
embed.add_field(name='Admin ID', value=event.author.id, inline=True)
embed.add_field(name='Guild', value=unicode(event.guild.name), inline=True)
embed.add_field(name='Channel', value=unicode(event.message.channel), inline=True)
embed.add_field(name='Channel ID', value=event.message.channel.id, inline=True)
embed.add_field(name='Command', value=command.triggers[0], inline=True)
embed.description = '```{}```'.format(event.message.content)
# Dispatch the command used modlog event
if config:
modlog_config = getattr(config.plugins, 'modlog', None)
if not modlog_config:
return
self._attach_local_event_data(event, 'modlog', event.guild.id)
plugin = self.bot.plugins.get('ModLogPlugin')
if plugin:
plugin.log_action(Actions.COMMAND_USED, event)
return
| mit | -7,634,193,162,426,424,000 | 39.433168 | 158 | 0.549617 | false |
recrm/Zanar2 | udebs/interpret.py | 1 | 11842 | #!/usr/bin/env python3
import sys
import re
import copy
import json
import itertools
class standard:
"""
Basic functionality built into the Udebs scripting language.
None of the functions here can depend on any other Udebs module.
"""
def _print(*args):
print(*args)
return True
def logicif(cond, value, other):
return value if cond else other
def inside(before, after):
return before in after
def notin(before, after):
return before not in after
def equal(*args):
x = args[0]
for y in args:
if y != x:
return False
return True
def notequal(before, after):
return before != after
def gt(before, after):
return before > after
def lt(before, after):
return before < after
def gtequal(before, after):
return before >= after
def ltequal(before, after):
return before <= after
def plus(*args):
return sum(args)
def multiply(*args):
i = 1
for number in args:
i *= number
return i
def logicor(*args):
return any(args)
def logicif(cond, value, other):
return value if cond else other
def mod(before, after):
return before % after
def setvar(storage, variable, value):
storage[variable] = value
return True
#prefix functions
def getvar(storage, variable):
return storage[variable]
def div(before, after):
return before/after
def logicnot(element):
return not element
def minus(before, element):
return before - element
def sub(before, after):
return next(itertools.islice(before, int(after), None), 'empty')
def length(list_):
return len(list(list_))
class variables:
"""
Base environment object that Udebs scripts are interpreted through.
"""
keywords = {
"SUB": {
"f": "standard.sub",
"args": ["-$1", "$1"],
},
"in": {
"f": "standard.inside",
"args": ["-$1", "$1"],
},
"not-in": {
"f": "standard.notin",
"args": ["-$1", "$1"],
},
"if": {
"f": "standard.logicif",
"args": ["$1", "$2", "$3"],
"default": {"$2": True, "$3": False},
},
"min": {
"f": "min",
"all": True,
},
"max": {
"f": "max",
"all": True,
},
"if": {
"f": "standard.logicif",
"args": ["$1", "$2", "$3"],
"default": {"$2": True, "$3": False},
},
"==": {
"f": "standard.equal",
"all": True,
},
"!=": {
"f": "standard.notequal",
"args": ["-$1", "$1"],
},
">": {
"f": "standard.gt",
"args": ["-$1", "$1"],
},
"<": {
"f": "standard.lt",
"args": ["-$1", "$1"],
},
">=": {
"f": "standard.gtequal",
"args": ["-$1", "$1"],
},
"<=": {
"f": "standard.ltequal",
"args": ["-$1", "$1"],
},
"%": {
"f": "standard.mod",
"args": ["-$1", "$1"],
},
"+": {
"f": "standard.plus",
"all": True,
},
"*": {
"f": "standard.multiply",
"all": True,
},
"or": {
"f": "standard.logicor",
"all": True,
},
"|": {
"f": "abs",
"args": ["$1"]
},
"/": {
"f": "standard.div",
"args": ["-$1", "$1"],
"default": {"-$1": 1}
},
"!": {
"f": "standard.logicnot",
"args": ["$1"],
},
"-": {
"f": "standard.minus",
"args": ["-$1", "$1"],
"default": {"-$1": 0}
},
"=": {
"f": "standard.setvar",
"args": ["storage", "-$1", "$1"],
},
"$": {
"f": "standard.getvar",
"args": ["storage","$1"],
},
"print": {
"f": "standard._print",
"all": True,
},
"length": {
"f": "standard.length",
"args": ["$1"],
},
# "solitary": {
# "f": "solitary",
# },
# "testing": {
# "f": "TEST",
# "default": {"$3": 50},
# "args": ["-$1", "$1", "$2", "three"],
# "kwargs": {"none": "$3", "value": "empty", "test": 10},
# }
}
env = {"__builtin__": None, "standard": standard, "storage": {}, "abs": abs, "min": min, "max": max}
default = {
"f": "",
"args": [],
"kwargs": {},
"all": False,
"default": {},
"string": [],
}
def importModule(dicts={}, globs={}):
"""
Allows user to extend base variables available to the interpreter.
Should be run before the instance object is created.
"""
variables.keywords.update(dicts)
variables.env.update(globs)
def _getEnv(local, glob=False):
"""Retrieves a copy of the base variables."""
value = copy.copy(variables.env)
if glob:
value.update(glob)
value["storage"] = local
return value
class UdebsSyntaxError(Exception):
def __init__(self, string):
self.message = string
def __str__(self):
return repr(self.message)
class UdebsParserError(Exception):
def __init__(self, string):
self.message = string
def __str__(self):
return repr(self.message)
def formatS(string, debug):
"""Converts a string into its python representation."""
string = str(string)
if string.isdigit():
return string
#String quoted by user.
elif string[0] == string[-1] and string[0] in {"'", '"'}:
return string
#String has already been handled by call
elif string[-1] == ")":
return string
elif string in variables.env:
return string
#In case prefix notation used in keyword defaults.
elif string[0] in variables.keywords:
return interpret(string, debug)
else:
return "'"+string+"'"
def call(args, debug=False):
"""Converts callList into functionString."""
if not isinstance(args, list):
raise UdebsParserError("There is a bug in the parser, call recived '{}'".format(args))
if debug:
print("call:", args)
#Find keyword
keywords = [i for i in args if i in variables.keywords]
#If there are too many keywords, some might stand alone.
if len(keywords) > 1:
for key in keywords[:]:
values = variables.keywords[key]
arguments = sum(len(values.get(i, [])) for i in ["args", "kwargs", "default"])
if arguments == 0 and not values.get("all", False):
new = call([key])
args[args.index(key)] = new
keywords.remove(key)
#Still to many keywords is a syntax error.
if len(keywords) > 1:
raise UdebsSyntaxError("CallList contains to many keywords '{}'".format(args))
#No keywords creates a tuple object.
elif len(keywords) == 0:
value = "("
for i in args:
value +=formatS(i, debug)+","
computed = value[:-1] + ")"
if debug:
print("computed:", computed)
return computed
keyword = keywords[0]
#Get and fix data for this keyword.
data = copy.copy(variables.default)
data.update(variables.keywords[keyword])
#Create dict of values
current = args.index(keyword)
nodes = copy.copy(data["default"])
for index in range(len(args)):
value = "$" if index >= current else "-$"
value += str(abs(index - current))
if args[index] != keyword:
nodes[value] = args[index]
#Force strings into long arguments.
for string in data["string"]:
nodes[string] = "'"+str(nodes[string]).replace("'", "\\'")+"'"
#Claim keyword arguments.
kwargs = {}
for key, value in data["kwargs"].items():
if value in nodes:
newvalue = nodes[value]
del nodes[value]
else:
newvalue = value
kwargs[key] = formatS(newvalue, debug)
arguments = []
#Insert positional arguments
for key in data["args"]:
if key in nodes:
arguments.append(formatS(nodes[key], debug))
del nodes[key]
else:
arguments.append(formatS(key, debug))
#Insert ... arguments.
if data["all"]:
for key in sorted(list(nodes.keys())):
arguments.append(formatS(nodes[key], debug))
del nodes[key]
if len(nodes) > 0:
raise UdebsSyntaxError("Keyword contains unused arguments. '{}'".format(args))
#Insert keyword arguments.
for key, value in kwargs.items():
arguments.append(str(key) + "=" + str(value))
computed = data["f"] + "(" + ",".join(arguments) + ")"
if debug:
print("computed:", computed)
return computed
def split_callstring(raw):
"""Converts callString into callList."""
openBracket = {'(', '{', '['}
closeBracket = {')', '}', ']'}
string = raw.strip()
callList = []
buf = ''
inBrackets = 0
dotLegal = True
for char in string:
#Ignore everything until matching bracket is found.
if inBrackets:
if char in openBracket:
inBrackets +=1
elif char in closeBracket:
inBrackets -=1
buf += char
continue
#Found opening Bracket
if char in openBracket:
if len(buf) > 1:
raise UdebsSyntaxError("Too many bits before bracket. '{}'".format(raw))
inBrackets +=1
#Dot split
elif dotLegal and char == ".":
callList.append(buf)
buf = ''
continue
#Normal whitespace split`
elif char.isspace():
if dotLegal:
dotLegal = False
if callList:
buf = ".".join(callList)+"."+buf
callList = []
if buf:
callList.append(buf)
buf = ''
continue
#Everything else
buf += char
callList.append(buf)
if inBrackets:
raise UdebsSyntaxError("Brackets are mismatched. '{}'".format(raw))
if '' in callList:
raise UdebsSyntaxError("Empty element in callList. '{}'".format(raw))
#Length one special cases.
if len(callList) == 1:
value = callList[0]
#unnecessary brackets. (Future fix: deal with this at start of function as these are common.)
if value[0] in openBracket and value[-1] in closeBracket:
return split_callstring(value[1:-1])
#Prefix calling.
if value not in variables.keywords:
if value[0] in variables.keywords:
return [value[0], value[1:]]
return callList
def interpret(string, debug=False, first=True):
"""Recursive function that parses callString"""
#Small hack for solitary keywords
if first and string in variables.keywords:
return call([string])
_list = split_callstring(string)
#Exit condition
if len(_list) == 1:
return _list[0]
if debug:
print("Interpret:", string)
_list = [interpret(i, debug, False) for i in _list]
return call(_list, debug)
if __name__ == "__main__":
with open("keywords.json") as fp:
importModule(json.load(fp), {'self': None})
interpret(sys.argv[1], debug=True)
| mit | 6,787,578,980,685,789,000 | 25.374165 | 104 | 0.492653 | false |
prheenan/prhUtil | igor/scripts/SurfaceDetection/SurfaceUtil.py | 1 | 4678 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
# need to add the utilities class. Want 'home' to be platform independent
from os.path import expanduser
home = expanduser("~")
# get the utilties directory (assume it lives in ~/utilities/python)
# but simple to change
path= home +"/utilities/python"
import sys
sys.path.append(path)
# import the patrick-specific utilities
import GenUtilities as pGenUtil
import PlotUtilities as pPlotUtil
import CheckpointUtilities as pCheckUtil
# idea of this file is to hold a lot of the 'brains' for the
# actual surface detection. should be fairly easy to port, etc.
class CalibrateObject:
# keeps track of calibration (approach or retraction touchoff)
# idxStart and idxEnd are from 'getCrossIdxFromApproach': the start
# and end of the 'invols' region. sliceAppr and sliceTouch are the
# slices before and after this region. the parameters are the GenFit
# returns for the two regions; time and index are where the intersections
# happen (nominally, where the surface is)
def __init__(self,idxStart,idxEnd,sliceAppr,sliceTouch,
params1,paramsStd1,predicted1,
params2,paramsStd2,predicted2,
timeSurface,idxSurface):
self._idxStart = idxStart
self._idxEnd = idxEnd
self._sliceAppr = sliceAppr
self._sliceTouch = sliceTouch
self._params1 = params1
self._paramsStd1 = paramsStd1
self._predicted1 = predicted1
self._params2 = params2
self._paramsStd2 = paramsStd2
self._predicted2 = predicted2
self._timeSurface = timeSurface
self._idxSurface = idxSurface
# gets the start and end index of the surface touchoff (ie: where invols
# are calculated). Assumes that somewhere in forceDiff is a *single*
# high location (high derivative), followed by a low derivate until the end.
def getCrossIdxFromApproach(forceDiff,method=None,approachIfTrue=True):
# get the maximum force change location
maxDiffIdx = np.argmax(forceDiff)
# get the median, and where we are <= the median
median = np.median(forceDiff)
whereLess = np.where(forceDiff <= median)[0]
# look where we are less than the median *and* {before/after} the max
# this gets a decent guess for where the surface contact happens
# (ie: between the two bounds)
# last element is -1
lastIndexBeforeList = whereLess[np.where(whereLess < maxDiffIdx)]
if (lastIndexBeforeList.size == 0):
lastIndexBefore = 0
else:
lastIndexBefore = lastIndexBeforeList[-1]
# first element is 0
possibleFirstIdx = whereLess[np.where(whereLess > maxDiffIdx)]
# if we neever went back to the median, we likely had no dwell.
# just use the entire curve.
if (possibleFirstIdx.size == 0):
firstIndexAfter = forceDiff.size-1
else:
firstIndexAfter = possibleFirstIdx[0]
return lastIndexBefore,firstIndexAfter
def getTouchoffCalibration(timeAppr,forceAppr,mDerivApproach,isApproach):
idxStart,idxEnd = getCrossIdxFromApproach(mDerivApproach)
# fit lines to the force
# start and end *always demarcate the start and end (ish) of the invols
# if we are approach, we take everything *before* as constant
# if we are touchoff, we take everything *after* as constant
if (isApproach):
constantSlice = np.s_[0:idxStart]
touchoffSlice = np.s_[idxStart:idxEnd]
else:
constantSlice = np.s_[idxEnd:]
touchoffSlice = np.s_[idxStart:idxEnd]
timeApprLow = timeAppr[constantSlice]
timeTouch = timeAppr[touchoffSlice]
paramsFirst,stdFirst,predFirst= pGenUtil.GenFit(timeApprLow,
forceAppr[constantSlice])
paramsSecond,stdSecond,predSecond = \
pGenUtil.GenFit(timeTouch,forceAppr[touchoffSlice])
# XXX get error estimate using standard deviations?
timeSurface = pGenUtil.lineIntersectParam(paramsFirst,
paramsSecond)
idxSurface = np.argmin(np.abs(timeAppr-timeSurface))
# set the variables we care about
calibObj = CalibrateObject(idxStart,idxEnd,
constantSlice,touchoffSlice,
paramsFirst,stdFirst,predFirst,
paramsSecond,stdSecond,predSecond,
timeSurface,idxSurface)
return calibObj
def run():
pass
if __name__ == "__main__":
run()
| gpl-2.0 | -2,648,850,792,978,439,000 | 42.314815 | 77 | 0.680847 | false |
caiogit/bird-a | backend/birda/storage/__init__.py | 1 | 8395 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# References:
# - Static and abstract methods: https://julien.danjou.info/blog/2013/guide-python-static-class-abstract-methods
# - Singletons in Python: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python?rq=1
# - Lock acquisition with a decorator: http://stackoverflow.com/questions/489720/what-are-some-common-uses-for-python-decorators/490090#490090
# - Python thread synchronization guide: http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/
# -------------------------------------- #
# Enables python3-like strings handling
from __future__ import unicode_literals
str = unicode
# -------------------------------------- #
import os
import abc
import rdflib
import birda.utils.ascii_utils
import utils
import birda.bModel as bModel
import birda.bModel.ontology as ontology
# --------------------------------- #
SUPPORTED_OUTPUT_TYPES = ['triples', 'xml', 'n3', 'turtle', 'nt', 'pretty-xml']
# "Fake settings" for testing purpose
FAKE_DB_PATH = os.path.dirname( os.path.realpath(__file__) ) + "/../../../db"
FAKE_SETTINGS = {
'birda.storage_type': 'file',
'birda.storage_file_birda_db': FAKE_DB_PATH + '/birda.turtle',
'birda.storage_file_indiv_db': FAKE_DB_PATH + '/indiv.turtle',
'birda.storage_file_test_db': FAKE_DB_PATH + '/test.turtle',
}
# ============================================================================ #
class Results(object):
"""
Wrapper for sparql_results who provides some utility features
"""
query = ""
sparql_results = []
elapsed_time = 0.0
namespaces = {}
# ----------------------------------------------------------------------- #
def __init__(self, query, sparql_results, elapsed_time, namespaces={}):
self.query = query
self.sparql_results = sparql_results
self.elapsed_time = elapsed_time
self.namespaces = namespaces
# ----------------------------------------------------------------------- #
def getFields(self):
return [str(k) for k in self.sparql_results.vars]
# ----------------------------------------------------------------------- #
def getDictList(self):
"""
Get a list of dictionaries which keys are strings and values are
RDFLib object
:return: List of dictionaries
"""
l = []
for res in self.sparql_results.bindings:
d = {}
for k in self.getFields():
d[str(k)] = res[k]
l += [ d ]
return l
# ----------------------------------------------------------------------- #
def getPrettyDictList(self):
"""
Get a list of dictionaries which keys are strings and values are
pretty_urls, strings, ints and dates
:return: List of dictionaries
"""
# Order namespaces from longest to shortest (in order to match first
# full path instead of partial path)
namespaces_ordered_keys = sorted(self.namespaces.keys(), (lambda x,y: len(x)-len(y)), reverse=True )
l = []
for res in self.sparql_results.bindings:
d = {}
for k in self.getFields():
d[str(k)] = utils.prettify(res[k], namespaces=self.namespaces, namespaces_ordered_keys=namespaces_ordered_keys)
l += [ d ]
return l
# ----------------------------------------------------------------------- #
def printQueryResults(self):
"""
Print query results in a MySQL ascii tab fashion
:return: None
"""
if self.sparql_results != None:
print birda.utils.ascii_utils.render_list_dict( self.getPrettyDictList(), map=self.getFields() ) ,
print "%s rows in set (%s sec)" % ( len(self.getPrettyDictList()), birda.utils.ascii_utils.hhmmss(self.elapsed_time,tutto=False) )
else:
print "Updated (%s sec)" % birda.utils.ascii_utils.hhmmss(self.elapsed_time,tutto=False)
print
# ----------------------------------------------------------------------- #
@staticmethod
def printQuery(query, lines_number=False):
query_rows = query.replace('\t',' ').split('\n')
if lines_number:
# Little ugly function of convenience
def ln(s):
ln.n += 1
return "%2s %s" % (ln.n, s)
ln.n = 0
query = "\n".join([ ln(r) for r in query_rows if r.strip() ])
else:
query = "\n".join([ r for r in query_rows if r.strip() ])
print '===================================='
print query
print '===================================='
# ============================================================================ #
class Connection(object):
"""
Abstract object wrapping all functionalities relative to db interaction.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, settings, dataset='', namespaces={}, verbose=False):
pass
@abc.abstractmethod
def query(self, query):
"""
Exectutes a read-only sparql query
:return: Result object
"""
raise NotImplementedError("This method should be implemented by subclasses")
# ----------------------------------------------------------------------- #
@abc.abstractmethod
def update(self, query):
"""
Exectutes a write-only sparql query
:return: ???
"""
raise NotImplementedError("This method should be implemented by subclasses")
# ----------------------------------------------------------------------- #
@abc.abstractmethod
def commit(self):
"""
Commits updates and deletes to db
:return: None
"""
raise NotImplementedError("This method should be implemented by subclasses")
# ----------------------------------------------------------------------- #
@abc.abstractmethod
def rollback(self):
"""
Rollback updates and deletes and restore the initial status
:return: None
"""
raise NotImplementedError("This method should be implemented by subclasses")
# ----------------------------------------------------------------------- #
@abc.abstractmethod
def close(self):
"""
Close the connection
:return: None
"""
raise NotImplementedError("This method should be implemented by subclasses")
# ============================================================================ #
class RDFWrapper(object):
"""
Object that wraps rdflib.Graph object.
It is intended to accumulate rdf statements and dump them in several
formats.
"""
rdf = None
# ----------------------------------------------------------------------- #
def __init__(self):
self.rdf = ontology.new_rdf_Graph()
# ----------------------------------------------------------------------- #
def add(self, s, p, o):
"""
Add the rdf statement to the rdf container
:param s: subject of the rdf statement
:param p: predicate of the rdf statement
:param o: object of the rdf statement
:return: None
"""
assert type(s) in (type(''),type(u'')) or type(s) == type(rdflib.term.URIRef(''))
assert type(p) in (type(''),type(u'')) or type(p) == type(rdflib.term.URIRef(''))
if type(s) in (type(''),type(u'')):
s = rdflib.term.URIRef(s)
if type(p) in (type(''),type(u'')):
p = rdflib.term.URIRef(p)
o = utils.py2rdf(o)
self.rdf.add((s,p,o))
# ----------------------------------------------------------------------- #
def dumps(self, output_format):
"""
Dump the rdf graph into a string
:param output_format: Format of the dumped rdf
:return: String rapresentation of the rdf graph
"""
assert output_format in SUPPORTED_OUTPUT_TYPES
return self.rdf.serialize(format=output_format)
# ============================================================================ #
class Storage(object):
"""
Storage abstract class
"""
# ----------------------------------------------------------------------- #
def __init__(self):
raise NotImplementedError("Storage should not be instantiated")
# ----------------------------------------------------------------------- #
@staticmethod
def connect(settings, dataset='', namespaces=bModel.NAMESPACES, verbose=False):
"""
Creates a connection to a sparql endpoint using "setting" parameters
:return: Connection object (sublass of storage.Connection)
"""
if settings['birda.storage_type'] == 'file':
import file_storage
return file_storage.FileConnection(settings, dataset=dataset, namespaces=namespaces, verbose=verbose)
else:
raise NotImplementedError("Storage type unknown")
# ================================================================================================ #
if __name__ == '__main__':
storage = Storage() | gpl-3.0 | 6,128,479,153,392,613,000 | 26.709571 | 158 | 0.537344 | false |
onelab-eu/sfa | sfa/client/sfaadmin.py | 1 | 22773 | #!/usr/bin/python
import os
import sys
import copy
from pprint import pformat, PrettyPrinter
from optparse import OptionParser
from sfa.generic import Generic
from sfa.util.xrn import Xrn
from sfa.storage.record import Record
from sfa.trust.hierarchy import Hierarchy
from sfa.trust.gid import GID
from sfa.trust.certificate import convert_public_key
from sfa.client.common import optparse_listvalue_callback, optparse_dictvalue_callback, terminal_render, filter_records
from sfa.client.candidates import Candidates
from sfa.client.sfi import save_records_to_file
pprinter = PrettyPrinter(indent=4)
try:
help_basedir=Hierarchy().basedir
except:
help_basedir='*unable to locate Hierarchy().basedir'
def add_options(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('add_options', []).insert(0, (args, kwargs))
return func
return _decorator
class Commands(object):
def _get_commands(self):
command_names = []
for attrib in dir(self):
if callable(getattr(self, attrib)) and not attrib.startswith('_'):
command_names.append(attrib)
return command_names
class RegistryCommands(Commands):
def __init__(self, *args, **kwds):
self.api= Generic.the_flavour().make_api(interface='registry')
def version(self):
"""Display the Registry version"""
version = self.api.manager.GetVersion(self.api, {})
pprinter.pprint(version)
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='authority to list (hrn/urn - mandatory)')
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default='all')
@add_options('-r', '--recursive', dest='recursive', metavar='<recursive>', help='list all child records',
action='store_true', default=False)
@add_options('-v', '--verbose', dest='verbose', action='store_true', default=False)
def list(self, xrn, type=None, recursive=False, verbose=False):
"""List names registered at a given authority - possibly filtered by type"""
xrn = Xrn(xrn, type)
options_dict = {'recursive': recursive}
records = self.api.manager.List(self.api, xrn.get_hrn(), options=options_dict)
list = filter_records(type, records)
# terminal_render expects an options object
class Options: pass
options=Options()
options.verbose=verbose
terminal_render (list, options)
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@add_options('-o', '--outfile', dest='outfile', metavar='<outfile>', help='save record to file')
@add_options('-f', '--format', dest='format', metavar='<display>', type='choice',
choices=('text', 'xml', 'simple'), help='display record in different formats')
def show(self, xrn, type=None, format=None, outfile=None):
"""Display details for a registered object"""
records = self.api.manager.Resolve(self.api, xrn, type, details=True)
for record in records:
sfa_record = Record(dict=record)
sfa_record.dump(format)
if outfile:
save_records_to_file(outfile, records)
def _record_dict(self, xrn, type, email, key,
slices, researchers, pis,
url, description, extras):
record_dict = {}
if xrn:
if type:
xrn = Xrn(xrn, type)
else:
xrn = Xrn(xrn)
record_dict['urn'] = xrn.get_urn()
record_dict['hrn'] = xrn.get_hrn()
record_dict['type'] = xrn.get_type()
if url:
record_dict['url'] = url
if description:
record_dict['description'] = description
if key:
try:
pubkey = open(key, 'r').read()
except IOError:
pubkey = key
record_dict['reg-keys'] = [pubkey]
if slices:
record_dict['slices'] = slices
if researchers:
record_dict['reg-researchers'] = researchers
if email:
record_dict['email'] = email
if pis:
record_dict['reg-pis'] = pis
if extras:
record_dict.update(extras)
return record_dict
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type (mandatory)',)
@add_options('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False, help='check all users GID')
@add_options('-v', '--verbose', dest='verbose', metavar='<verbose>', action='store_true', default=False, help='verbose mode: display user\'s hrn ')
def check_gid(self, xrn=None, type=None, all=None, verbose=None):
"""Check the correspondance between the GID and the PubKey"""
# db records
from sfa.storage.model import RegRecord
db_query = self.api.dbsession().query(RegRecord).filter_by(type=type)
if xrn and not all:
hrn = Xrn(xrn).get_hrn()
db_query = db_query.filter_by(hrn=hrn)
elif all and xrn:
print "Use either -a or -x <xrn>, not both !!!"
sys.exit(1)
elif not all and not xrn:
print "Use either -a or -x <xrn>, one of them is mandatory !!!"
sys.exit(1)
records = db_query.all()
if not records:
print "No Record found"
sys.exit(1)
OK = []
NOK = []
ERROR = []
NOKEY = []
for record in records:
# get the pubkey stored in SFA DB
if record.reg_keys:
db_pubkey_str = record.reg_keys[0].key
try:
db_pubkey_obj = convert_public_key(db_pubkey_str)
except:
ERROR.append(record.hrn)
continue
else:
NOKEY.append(record.hrn)
continue
# get the pubkey from the gid
gid_str = record.gid
gid_obj = GID(string = gid_str)
gid_pubkey_obj = gid_obj.get_pubkey()
# Check if gid_pubkey_obj and db_pubkey_obj are the same
check = gid_pubkey_obj.is_same(db_pubkey_obj)
if check :
OK.append(record.hrn)
else:
NOK.append(record.hrn)
if not verbose:
print "Users NOT having a PubKey: %s\n\
Users having a non RSA PubKey: %s\n\
Users having a GID/PubKey correpondence OK: %s\n\
Users having a GID/PubKey correpondence Not OK: %s\n"%(len(NOKEY), len(ERROR), len(OK), len(NOK))
else:
print "Users NOT having a PubKey: %s and are: \n%s\n\n\
Users having a non RSA PubKey: %s and are: \n%s\n\n\
Users having a GID/PubKey correpondence OK: %s and are: \n%s\n\n\
Users having a GID/PubKey correpondence NOT OK: %s and are: \n%s\n\n"%(len(NOKEY),NOKEY, len(ERROR), ERROR, len(OK), OK, len(NOK), NOK)
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@add_options('-e', '--email', dest='email', default="",
help="email (mandatory for users)")
@add_options('-u', '--url', dest='url', metavar='<url>', default=None,
help="URL, useful for slices")
@add_options('-d', '--description', dest='description', metavar='<description>',
help='Description, useful for slices', default=None)
@add_options('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
@add_options('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@add_options('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@add_options('-p', '--pis', dest='pis', metavar='<PIs>',
help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@add_options('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>",
action="callback", callback=optparse_dictvalue_callback, nargs=1,
help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
def register(self, xrn, type=None, email='', key=None,
slices='', pis='', researchers='',
url=None, description=None, extras={}):
"""Create a new Registry record"""
record_dict = self._record_dict(xrn=xrn, type=type, email=email, key=key,
slices=slices, researchers=researchers, pis=pis,
url=url, description=description, extras=extras)
self.api.manager.Register(self.api, record_dict)
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@add_options('-u', '--url', dest='url', metavar='<url>', help='URL', default=None)
@add_options('-d', '--description', dest='description', metavar='<description>',
help='Description', default=None)
@add_options('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
@add_options('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@add_options('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@add_options('-p', '--pis', dest='pis', metavar='<PIs>',
help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@add_options('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>",
action="callback", callback=optparse_dictvalue_callback, nargs=1,
help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
def update(self, xrn, type=None, email='', key=None,
slices='', pis='', researchers='',
url=None, description=None, extras={}):
"""Update an existing Registry record"""
record_dict = self._record_dict(xrn=xrn, type=type, email=email, key=key,
slices=slices, researchers=researchers, pis=pis,
url=url, description=description, extras=extras)
self.api.manager.Update(self.api, record_dict)
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
def remove(self, xrn, type=None):
"""Remove given object from the registry"""
xrn = Xrn(xrn, type)
self.api.manager.Remove(self.api, xrn)
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
def credential(self, xrn, type=None):
"""Invoke GetCredential"""
cred = self.api.manager.GetCredential(self.api, xrn, type, self.api.hrn)
print cred
def import_registry(self):
"""Run the importer"""
from sfa.importer import Importer
importer = Importer()
importer.run()
def sync_db(self):
"""Initialize or upgrade the db"""
from sfa.storage.dbschema import DBSchema
dbschema=DBSchema()
dbschema.init_or_upgrade()
@add_options('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False,
help='Remove all registry records and all files in %s area' % help_basedir)
@add_options('-c', '--certs', dest='certs', metavar='<certs>', action='store_true', default=False,
help='Remove all cached certs/gids found in %s' % help_basedir )
@add_options('-0', '--no-reinit', dest='reinit', metavar='<reinit>', action='store_false', default=True,
help='Prevents new DB schema from being installed after cleanup')
def nuke(self, all=False, certs=False, reinit=True):
"""Cleanup local registry DB, plus various additional filesystem cleanups optionally"""
from sfa.storage.dbschema import DBSchema
from sfa.util.sfalogging import _SfaLogger
logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
logger.setLevelFromOptVerbose(self.api.config.SFA_API_LOGLEVEL)
logger.info("Purging SFA records from database")
dbschema=DBSchema()
dbschema.nuke()
# for convenience we re-create the schema here, so there's no need for an explicit
# service sfa restart
# however in some (upgrade) scenarios this might be wrong
if reinit:
logger.info("re-creating empty schema")
dbschema.init_or_upgrade()
# remove the server certificate and all gids found in /var/lib/sfa/authorities
if certs:
logger.info("Purging cached certificates")
for (dir, _, files) in os.walk('/var/lib/sfa/authorities'):
for file in files:
if file.endswith('.gid') or file == 'server.cert':
path=dir+os.sep+file
os.unlink(path)
# just remove all files that do not match 'server.key' or 'server.cert'
if all:
logger.info("Purging registry filesystem cache")
preserved_files = [ 'server.key', 'server.cert']
for (dir,_,files) in os.walk(Hierarchy().basedir):
for file in files:
if file in preserved_files: continue
path=dir+os.sep+file
os.unlink(path)
class CertCommands(Commands):
def __init__(self, *args, **kwds):
self.api= Generic.the_flavour().make_api(interface='registry')
def import_gid(self, xrn):
pass
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@add_options('-o', '--outfile', dest='outfile', metavar='<outfile>', help='output file', default=None)
def export(self, xrn, type=None, outfile=None):
"""Fetch an object's GID from the Registry"""
from sfa.storage.model import RegRecord
hrn = Xrn(xrn).get_hrn()
request=self.api.dbsession().query(RegRecord).filter_by(hrn=hrn)
if type: request = request.filter_by(type=type)
record=request.first()
if record:
gid = GID(string=record.gid)
else:
# check the authorities hierarchy
hierarchy = Hierarchy()
try:
auth_info = hierarchy.get_auth_info(hrn)
gid = auth_info.gid_object
except:
print "Record: %s not found" % hrn
sys.exit(1)
# save to file
if not outfile:
outfile = os.path.abspath('./%s.gid' % gid.get_hrn())
gid.save_to_file(outfile, save_parents=True)
@add_options('-g', '--gidfile', dest='gid', metavar='<gid>', help='path of gid file to display (mandatory)')
def display(self, gidfile):
"""Print contents of a GID file"""
gid_path = os.path.abspath(gidfile)
if not gid_path or not os.path.isfile(gid_path):
print "No such gid file: %s" % gidfile
sys.exit(1)
gid = GID(filename=gid_path)
gid.dump(dump_parents=True)
class AggregateCommands(Commands):
def __init__(self, *args, **kwds):
self.api= Generic.the_flavour().make_api(interface='aggregate')
def version(self):
"""Display the Aggregate version"""
version = self.api.manager.GetVersion(self.api, {})
pprinter.pprint(version)
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
def status(self, xrn):
"""Retrieve the status of the slivers belonging to the named slice (Status)"""
urns = [Xrn(xrn, 'slice').get_urn()]
status = self.api.manager.Status(self.api, urns, [], {})
pprinter.pprint(status)
@add_options('-r', '--rspec-version', dest='rspec_version', metavar='<rspec_version>',
default='KOREN', help='version/format of the resulting rspec response')
def resources(self, rspec_version='KOREN'):
"""Display the available resources at an aggregate"""
options = {'geni_rspec_version': rspec_version}
print options
resources = self.api.manager.ListResources(self.api, [], options)
print resources
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
@add_options('-r', '--rspec', dest='rspec', metavar='<rspec>', help='rspec file (mandatory)')
def allocate(self, xrn, rspec):
"""Allocate slivers"""
xrn = Xrn(xrn, 'slice')
slice_urn=xrn.get_urn()
rspec_string = open(rspec).read()
options={}
expiration = None
manifest = self.api.manager.Allocate(self.api, slice_urn, [], rspec_string, expiration, options)
print manifest
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
def provision(self, xrn):
"""Provision slivers"""
xrn = Xrn(xrn, 'slice')
slice_urn=xrn.get_urn()
options = {'geni_rspec_version': 'KOREN'}
manifest = self.api.manager.Provision(self.api, [slice_urn], [], options)
print manifest
@add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
def delete(self, xrn):
"""Delete slivers"""
self.api.manager.Delete(self.api, [xrn], [], {})
class SliceManagerCommands(AggregateCommands):
def __init__(self, *args, **kwds):
self.api= Generic.the_flavour().make_api(interface='slicemgr')
class SfaAdmin:
CATEGORIES = {'certificate': CertCommands,
'registry': RegistryCommands,
'aggregate': AggregateCommands,
'slicemgr': SliceManagerCommands}
# returns (name,class) or (None,None)
def find_category (self, input):
full_name=Candidates (SfaAdmin.CATEGORIES.keys()).only_match(input)
if not full_name: return (None,None)
return (full_name,SfaAdmin.CATEGORIES[full_name])
def summary_usage (self, category=None):
print "Usage:", self.script_name + " category command [<options>]"
if category and category in SfaAdmin.CATEGORIES:
categories=[category]
else:
categories=SfaAdmin.CATEGORIES
for c in categories:
cls=SfaAdmin.CATEGORIES[c]
print "==================== category=%s"%c
names=cls.__dict__.keys()
names.sort()
for name in names:
method=cls.__dict__[name]
if name.startswith('_'): continue
margin=15
format="%%-%ds"%margin
print "%-15s"%name,
doc=getattr(method,'__doc__',None)
if not doc:
print "<missing __doc__>"
continue
lines=[line.strip() for line in doc.split("\n")]
line1=lines.pop(0)
print line1
for extra_line in lines: print margin*" ",extra_line
sys.exit(2)
def main(self):
argv = copy.deepcopy(sys.argv)
self.script_name = argv.pop(0)
# ensure category is specified
if len(argv) < 1:
self.summary_usage()
# ensure category is valid
category_input = argv.pop(0)
(category_name, category_class) = self.find_category (category_input)
if not category_name or not category_class:
self.summary_usage(category_name)
usage = "%%prog %s command [options]" % (category_name)
parser = OptionParser(usage=usage)
# ensure command is valid
category_instance = category_class()
commands = category_instance._get_commands()
if len(argv) < 1:
# xxx what is this about ?
command_name = '__call__'
else:
command_input = argv.pop(0)
command_name = Candidates (commands).only_match (command_input)
if command_name and hasattr(category_instance, command_name):
command = getattr(category_instance, command_name)
else:
self.summary_usage(category_name)
# ensure options are valid
usage = "%%prog %s %s [options]" % (category_name, command_name)
parser = OptionParser(usage=usage)
for args, kwdargs in getattr(command, 'add_options', []):
parser.add_option(*args, **kwdargs)
(opts, cmd_args) = parser.parse_args(argv)
cmd_kwds = vars(opts)
# dont overrride meth
for k, v in cmd_kwds.items():
if v is None:
del cmd_kwds[k]
# execute command
try:
#print "invoking %s *=%s **=%s"%(command.__name__,cmd_args, cmd_kwds)
command(*cmd_args, **cmd_kwds)
sys.exit(0)
except TypeError:
print "Possible wrong number of arguments supplied"
#import traceback
#traceback.print_exc()
print command.__doc__
parser.print_help()
sys.exit(1)
#raise
except Exception:
print "Command failed, please check log for more info"
raise
sys.exit(1)
| mit | -1,940,394,473,199,436,800 | 42.543021 | 151 | 0.575243 | false |
PJK/libcbor | doc/source/conf.py | 2 | 9206 | # -*- coding: utf-8 -*-
#
# libcbor documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 8 13:27:19 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig'
]
import subprocess, os
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
print(subprocess.check_output('cd ../..; mkdir doc/build; doxygen', shell=True))
if on_rtd:
print(subprocess.check_output('cd ../..; mkdir doc/build; doxygen', shell=True))
print(os.getcwd())
print(os.getcwd() + '/../build/doxygen/xml')
breathe_projects = {
'libcbor': '../build/doxygen/xml'
}
breathe_default_project = "libcbor"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'libcbor'
copyright = '2014 - 2020, Pavel Kalvoda'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8'
release = '0.8.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# https://github.com/michaeljones/breathe/issues/197
exclude_patterns = ['breathe/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
breathe_domain_by_extension = {
"h" : "C",
}
#default_role = 'c:func'
primary_domain = "cpp"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libcbordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'libcbor.tex', 'libcbor Documentation',
'Pavel Kalvoda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libcbor', 'libcbor Documentation',
['Pavel Kalvoda'], 3)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'libcbor', 'libcbor Documentation',
'Pavel Kalvoda', 'libcbor', 'C library for parsing and generating CBOR.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
| mit | -8,067,708,217,064,598,000 | 30.101351 | 98 | 0.703346 | false |
gnocchixyz/python-gnocchiclient | gnocchiclient/v1/resource_cli.py | 1 | 10591 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.util
from cliff import command
from cliff import lister
from cliff import show
from gnocchiclient import exceptions
from gnocchiclient import utils
class CliResourceList(lister.Lister):
"""List resources."""
COLS = ('id', 'type',
'project_id', 'user_id',
'original_resource_id',
'started_at', 'ended_at',
'revision_start', 'revision_end')
def get_parser(self, prog_name, history=True):
parser = super(CliResourceList, self).get_parser(prog_name)
parser.add_argument("--details", action='store_true',
help="Show all attributes of generic resources"),
if history:
parser.add_argument("--history", action='store_true',
help="Show history of the resources"),
parser.add_argument("--limit", type=int, metavar="<LIMIT>",
help="Number of resources to return "
"(Default is server default)")
parser.add_argument("--marker", metavar="<MARKER>",
help="Last item of the previous listing. "
"Return the next results after this value")
parser.add_argument("--sort", action="append", metavar="<SORT>",
help="Sort of resource attribute "
"(example: user_id:desc-nullslast")
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
return parser
def _list2cols(self, resources):
"""Return a formatted list of resources."""
if not resources:
return self.COLS, []
cols = list(self.COLS)
for k in resources[0]:
if k not in cols:
cols.append(k)
if 'creator' in cols:
cols.remove('created_by_user_id')
cols.remove('created_by_project_id')
return utils.list2cols(cols, resources)
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.list(
resource_type=parsed_args.resource_type,
**utils.get_pagination_options(parsed_args))
# Do not dump metrics because it makes the list way too long
for r in resources:
del r['metrics']
return self._list2cols(resources)
class CliResourceHistory(CliResourceList):
"""Show the history of a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceHistory, self).get_parser(prog_name,
history=False)
parser.add_argument("resource_id",
help="ID of a resource")
return parser
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.history(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id,
**utils.get_pagination_options(parsed_args))
if parsed_args.formatter == 'table':
return self._list2cols(list(map(normalize_metrics, resources)))
return self._list2cols(resources)
class CliResourceSearch(CliResourceList):
"""Search resources with specified query rules."""
def get_parser(self, prog_name):
parser = super(CliResourceSearch, self).get_parser(prog_name)
utils.add_query_argument("query", parser)
return parser
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.search(
resource_type=parsed_args.resource_type,
query=parsed_args.query,
**utils.get_pagination_options(parsed_args))
# Do not dump metrics because it makes the list way too long
for r in resources:
del r['metrics']
return self._list2cols(resources)
def normalize_metrics(res):
res['metrics'] = "\n".join(sorted(
["%s: %s" % (name, _id)
for name, _id in res['metrics'].items()]))
return res
class CliResourceShow(show.ShowOne):
"""Show a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceShow, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
parser.add_argument("resource_id",
help="ID of a resource")
return parser
def take_action(self, parsed_args):
res = utils.get_client(self).resource.get(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id)
if parsed_args.formatter == 'table':
normalize_metrics(res)
return self.dict2columns(res)
class CliResourceCreate(show.ShowOne):
"""Create a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceCreate, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
parser.add_argument("resource_id",
help="ID of the resource")
parser.add_argument("-a", "--attribute", action='append',
default=[],
help=("name and value of an attribute "
"separated with a ':'"))
parser.add_argument("-m", "--add-metric", action='append',
default=[],
help="name:id of a metric to add"),
parser.add_argument(
"-n", "--create-metric", action='append', default=[],
help="name:archive_policy_name of a metric to create"),
return parser
def _resource_from_args(self, parsed_args, update=False):
# Get the resource type to set the correct type
rt_attrs = utils.get_client(self).resource_type.get(
name=parsed_args.resource_type)['attributes']
resource = {}
if not update:
resource['id'] = parsed_args.resource_id
if parsed_args.attribute:
for attr in parsed_args.attribute:
attr, __, value = attr.partition(":")
attr_type = rt_attrs.get(attr, {}).get('type')
if attr_type == "number":
value = float(value)
elif attr_type == "bool":
value = bool(distutils.util.strtobool(value))
resource[attr] = value
if (parsed_args.add_metric or
parsed_args.create_metric or
(update and parsed_args.delete_metric)):
if update:
r = utils.get_client(self).resource.get(
parsed_args.resource_type,
parsed_args.resource_id)
default = r['metrics']
for metric_name in parsed_args.delete_metric:
try:
del default[metric_name]
except KeyError:
raise exceptions.MetricNotFound(
message="Metric name %s not found" % metric_name)
else:
default = {}
resource['metrics'] = default
for metric in parsed_args.add_metric:
name, _, value = metric.partition(":")
resource['metrics'][name] = value
for metric in parsed_args.create_metric:
name, _, value = metric.partition(":")
if value:
resource['metrics'][name] = {'archive_policy_name': value}
else:
resource['metrics'][name] = {}
return resource
def take_action(self, parsed_args):
resource = self._resource_from_args(parsed_args)
res = utils.get_client(self).resource.create(
resource_type=parsed_args.resource_type, resource=resource)
if parsed_args.formatter == 'table':
normalize_metrics(res)
return self.dict2columns(res)
class CliResourceUpdate(CliResourceCreate):
"""Update a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceUpdate, self).get_parser(prog_name)
parser.add_argument("-d", "--delete-metric", action='append',
default=[],
help="Name of a metric to delete"),
return parser
def take_action(self, parsed_args):
resource = self._resource_from_args(parsed_args, update=True)
res = utils.get_client(self).resource.update(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id,
resource=resource)
if parsed_args.formatter == 'table':
normalize_metrics(res)
return self.dict2columns(res)
class CliResourceDelete(command.Command):
"""Delete a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceDelete, self).get_parser(prog_name)
parser.add_argument("resource_id",
help="ID of the resource")
return parser
def take_action(self, parsed_args):
utils.get_client(self).resource.delete(parsed_args.resource_id)
class CliResourceBatchDelete(show.ShowOne):
"""Delete a batch of resources based on attribute values."""
def get_parser(self, prog_name):
parser = super(CliResourceBatchDelete, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
utils.add_query_argument("query", parser)
return parser
def take_action(self, parsed_args):
res = utils.get_client(self).resource.batch_delete(
resource_type=parsed_args.resource_type,
query=parsed_args.query)
return self.dict2columns(res)
| apache-2.0 | -1,724,077,443,211,696,600 | 38.966038 | 78 | 0.572656 | false |
cualbondi/cualbondi.com.ar | apps/widget/views.py | 1 | 3631 | from apps.catastro.models import Ciudad
from apps.core.models import Recorrido
from django.contrib.gis.geos import Point
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.conf import settings
from django.contrib.sites.models import Site
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET
@csrf_exempt
@require_GET
def v1_busqueda(request, extension):
if request.GET.get("key") == '123456789':
if extension == "html":
if request.GET.get("ciudad"):
ci = Ciudad.objects.get(slug=request.GET.get("ciudad"))
ciudades = []
else:
ci = None
ciudades = Ciudad.objects.all()
return render_to_response('widget/v1/busqueda.html',
{ 'ciudades': ciudades,
'ciudad' : ci },
context_instance=RequestContext(request))
else:
if extension == "js":
current_site = Site.objects.get_current()
try:
ci = Ciudad.objects.get(slug=request.GET.get("ciudad"))
ciudad_arg = "&ciudad="+ci.slug
except:
ciudad_arg = ""
return render_to_response('widget/v1/busqueda.js',
{ 'current_site': current_site,
'ciudad_arg' : ciudad_arg },
context_instance=RequestContext(request),
#content_type="application/x-JavaScript") #django => 1.5
mimetype="application/x-JavaScript") #django < 1.5
else:
return HttpResponse(status=403)
@csrf_exempt
@require_GET
def v1_lineas(request, extension):
if request.GET.get("key") == '123456789':
if extension == "html":
try:
lat = float(request.GET.get("lat", "NaN"))
lon = float(request.GET.get("lon", "NaN"))
rad = int(request.GET.get("rad", "NaN"))
except:
return HttpResponse(status=501)
print_ramales = request.GET.get("ramales") == "true"
recorridos = Recorrido.objects.select_related('linea').filter(ruta__dwithin=(Point(lon, lat), 0.1), ruta__distance_lt=(Point(lon, lat), rad))
if not print_ramales:
recorridos = list(set([x.linea for x in recorridos]))
return render_to_response('widget/v1/lineas.html',
{
'listado': recorridos,
'print_ramales': print_ramales,
},
context_instance=RequestContext(request))
else:
if extension == "js":
if request.GET.get("lat") and request.GET.get("lon") and request.GET.get("rad"):
current_site = Site.objects.get_current()
return render_to_response('widget/v1/lineas.js',
{ 'current_site': current_site },
context_instance=RequestContext(request),
#content_type="application/x-JavaScript") #django => 1.5
mimetype="application/x-JavaScript") #django < 1.5
else:
return HttpResponse(status=501)
else:
return HttpResponse(status=403)
def not_found(request):
return HttpResponse(status=404)
def test(request):
return render_to_response('widget/test.html',
{ 'current_site': Site.objects.get_current()})
| agpl-3.0 | 6,066,271,413,888,378,000 | 40.261364 | 153 | 0.552189 | false |
alexharmenta/Inventationery | Inventationery/apps/Inventory/migrations/0015_orderhistorymodel.py | 1 | 1364 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Inventory', '0014_auto_20151227_1250'),
]
operations = [
migrations.CreateModel(
name='OrderHistoryModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('DocumentId', models.CharField(max_length=30)),
('CustVendName', models.CharField(default=None, max_length=100)),
('DocumentDate', models.DateField()),
('DocumentTotal', models.DecimalField(max_digits=20, decimal_places=2)),
('Qty', models.IntegerField(default=0)),
('Price', models.DecimalField(max_digits=10, decimal_places=2)),
('SubTotal', models.DecimalField(null=True, max_digits=20, decimal_places=2, blank=True)),
('Item', models.ForeignKey(related_name='OrderHistory', default=None, blank=True, to='Inventory.ItemModel', null=True)),
],
options={
'abstract': False,
},
),
]
| bsd-3-clause | 8,270,451,206,548,817,000 | 40.333333 | 136 | 0.574047 | false |
SKIRT/PTS | do/developer/execute.py | 1 | 1822 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.developer.execute Execute a line on a remote host and check the output.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments
from pts.core.remote.remote import Remote
# -----------------------------------------------------------------
definition = ConfigurationDefinition()
definition.add_required("remote", "string", "remote host ID")
definition.add_required("line", "string", "line to be executed on the remote host")
config = parse_arguments("sessions", definition)
# -----------------------------------------------------------------
# Initialize the remote
remote = Remote()
if not remote.setup(config.remote): raise RuntimeError("The remote host '" + config.remote + "' is not available at the moment")
# -----------------------------------------------------------------
print("")
#print("-----------------------------------------------------------------")
print("OUTPUT")
print("-----------------------------------------------------------------")
print("")
# Execute the line, show the output
output = remote.execute(config.line)
for line in output: print(line)
print("")
print("-----------------------------------------------------------------")
print("")
# -----------------------------------------------------------------
| agpl-3.0 | 2,130,984,604,878,798,000 | 36.9375 | 128 | 0.450302 | false |
cdiener/micom | micom/solution.py | 1 | 3836 | """A community solution object."""
import numpy as np
import pandas as pd
from optlang.interface import OPTIMAL
from cobra.core import Solution, get_solution
def _group_species(values, ids, species, what="reaction"):
"""Format a list of values by id and species."""
df = pd.DataFrame({values.name: values, what: ids, "species": species})
df = df.pivot(index="species", columns=what, values=values.name)
df.name = values.name
return df
class CommunitySolution(Solution):
"""An FBA solution for an entire community.
Attributes
----------
objective_value : float
The (optimal) value for the objective function.
members : pandas.Series
Contains basic info about the individual members of the community such
as id, abundance and growth rates.
growth_rate : float
The overall growth rate for the community normalized to 1 gDW.
status : str
The solver status related to the solution.
fluxes : pandas.DataFrame
Contains the reaction fluxes (primal values of variables) stratified
by species. Columns denote individual fluxes and rows denote species.
Fluxes will be NA if the reaction does not exist in the organism.
reduced_costs : pandas.Series
Contains reaction reduced costs (dual values of variables) stratified
by species. Columns denote individual fluxes and rows denote species.
Reduced costs will be NA if the reaction does not exist in the
organism.
shadow_prices : pandas.Series
Contains metabolite shadow prices (dual values of constraints)
stratified by species. Columns denote individual metabolites and rows
denote species. Shadow prices will be NA if the metabolite does not
exist in the organism.
"""
def __init__(self, community, slim=False,
reactions=None, metabolites=None):
"""Get the solution from a community model."""
if reactions is None:
reactions = community.reactions
if metabolites is None:
metabolites = community.metabolites
if not slim:
rids = np.array([(r.global_id, r.community_id) for r in reactions])
mids = np.array([(m.global_id, m.community_id)
for m in metabolites])
sol = get_solution(community, reactions, metabolites)
super(CommunitySolution, self).__init__(
community.solver.objective.value, community.solver.status,
np.unique(rids[:, 0]),
_group_species(sol.fluxes, rids[:, 0], rids[:, 1]),
_group_species(sol.reduced_costs, rids[:, 0], rids[:, 1]),
np.unique(mids[:, 0]),
_group_species(sol.shadow_prices, mids[:, 0], mids[:, 1],
what="metabolites"))
else:
super(CommunitySolution, self).__init__(
community.solver.objective.value, community.solver.status,
None, None, None, None, None)
gcs = pd.Series()
for sp in community.objectives:
gcs[sp] = community.constraints["objective_" + sp].primal
self.members = pd.DataFrame({"id": gcs.index,
"abundance": community.abundances,
"growth_rate": gcs})
self.growth_rate = sum(community.abundances * gcs)
del self.reactions
del self.metabolites
def __repr__(self):
"""Convert CommunitySolution instance to string representation."""
if self.status != OPTIMAL:
return "<CommunitySolution {0:s} at 0x{1:x}>".format(
self.status, id(self))
return "<CommunitySolution {0:.3f} at 0x{1:x}>".format(
self.growth_rate, id(self))
| apache-2.0 | 2,541,745,594,639,425,500 | 42.590909 | 79 | 0.612617 | false |
pitpig/rozowoo | app/trackback.py | 1 | 3315 | """tblib.py: A Trackback (client) implementation in Python
"""
__author__ = "Matt Croydon <[email protected]>"
__copyright__ = "Copyright 2003, Matt Croydon"
__license__ = "GPL"
__version__ = "0.1.0"
__history__ = """
0.1.0: 1/29/03 - Code cleanup, release. It can send pings, and autodiscover a URL to ping.
0.0.9: 1/29/03 - Basic error handling and autodiscovery works!
0.0.5: 1/29/03 - Internal development version. Working on autodiscovery and error handling.
0.0.4: 1/22/03 - First public release, code cleanup.
0.0.3: 1/22/03 - Removed hard coding that was used for testing.
0.0.2: 1/21/03 - First working version.
0.0.1: 1/21/03 - Initial version. Thanks to Mark Pilgrim for helping me figure some module basics out.
"""
import httplib, urllib, urlparse, re
from google.appengine.api import urlfetch
import logging
"""Everything I needed to know about trackback I learned from the trackback tech specs page
http://www.movabletype.org/docs/mttrackback.html. All arguments are optional. This allows us to create an empty TrackBack object,
then use autodiscovery to populate its attributes.
"""
class TrackBack:
def __init__(self, tbUrl=None, title=None, excerpt=None, url=None, blog_name=None):
self.tbUrl = tbUrl
self.title = title
self.excerpt = excerpt
self.url = url
self.blog_name = blog_name
self.tbErrorCode = None
self.tbErrorMessage = None
def ping(self):
# Only execute if a trackback url has been defined.
if self.tbUrl:
# Create paramaters and make them play nice with HTTP
# Python's httplib example helps a lot:
# http://python.org/doc/current/lib/httplib-examples.html
params = urllib.urlencode({'title': self.title, 'url': self.url, 'excerpt': self.excerpt, 'blog_name': self.blog_name})
headers = ({"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "micolog"})
# urlparse is my hero
# http://www.python.org/doc/current/lib/module-urlparse.html
logging.info("ping...%s",params)
response=urlfetch.fetch(self.tbUrl,method=urlfetch.POST,payload=params,headers=headers)
self.httpResponse = response.status_code
data = response.content
self.tbResponse = data
logging.info("ping...%s"%data)
# Thanks to Steve Holden's book: _Python Web Programming_ (http://pydish.holdenweb.com/pwp/)
# Why parse really simple XML when you can just use regular expressions? Rawk.
errorpattern = r'<error>(.*?)</error>'
reg = re.search(errorpattern, self.tbResponse)
if reg:
self.tbErrorCode = reg.group(1)
if int(self.tbErrorCode) == 1:
errorpattern2 = r'<message>(.*?)</message>'
reg2 = re.search(errorpattern2, self.tbResponse)
if reg2:
self.tbErrorMessage = reg2.group(1)
else:
return 1
def autodiscover(self, urlToCheck):
response=urlfetch.fetch(urlToCheck)
data = response.content
tbpattern = r'trackback:ping="(.*?)"'
reg = re.search(tbpattern, data)
if reg:
self.tbUrl = reg.group(1) | mit | -4,092,896,514,661,918,700 | 43.213333 | 131 | 0.625943 | false |
MaxVanDeursen/tribler | Tribler/Core/statistics.py | 1 | 5656 | import os
from Tribler.Core.CacheDB.sqlitecachedb import DB_FILE_RELATIVE_PATH
from Tribler.Core.simpledefs import NTFY_TORRENTS, NTFY_CHANNELCAST
DATA_NONE = u"None"
class TriblerStatistics(object):
def __init__(self, session):
"""
Constructor.
:param session: The Tribler session.
"""
self.session = session
def get_tribler_statistics(self):
"""
Return a dictionary with some general Tribler statistics.
"""
torrent_db_handler = self.session.open_dbhandler(NTFY_TORRENTS)
channel_db_handler = self.session.open_dbhandler(NTFY_CHANNELCAST)
torrent_stats = torrent_db_handler.getTorrentsStats()
torrent_total_size = 0 if torrent_stats[1] is None else torrent_stats[1]
stats_dict = {"torrents": {"num_collected": torrent_stats[0], "total_size": torrent_total_size,
"num_files": torrent_stats[2]},
"num_channels": channel_db_handler.getNrChannels(),
"database_size": os.path.getsize(
os.path.join(self.session.get_state_dir(), DB_FILE_RELATIVE_PATH))}
if self.session.lm.rtorrent_handler:
torrent_queue_stats = self.session.lm.rtorrent_handler.get_queue_stats()
torrent_queue_size_stats = self.session.lm.rtorrent_handler.get_queue_size_stats()
torrent_queue_bandwidth_stats = self.session.lm.rtorrent_handler.get_bandwidth_stats()
stats_dict["torrent_queue_stats"] = torrent_queue_stats
stats_dict["torrent_queue_size_stats"] = torrent_queue_size_stats
stats_dict["torrent_queue_bandwidth_stats"] = torrent_queue_bandwidth_stats
return stats_dict
def get_dispersy_statistics(self):
"""
Return a dictionary with some general Dispersy statistics.
"""
dispersy = self.session.get_dispersy_instance()
dispersy.statistics.update()
stats = dispersy.statistics
return {
"wan_address": "%s:%d" % stats.wan_address,
"lan_address": "%s:%d" % stats.lan_address,
"connection": unicode(stats.connection_type),
"runtime": stats.timestamp - stats.start,
"total_downloaded": stats.total_down,
"total_uploaded": stats.total_up,
"packets_sent": stats.total_send,
"packets_received": stats.total_received,
"packets_success": stats.msg_statistics.success_count,
"packets_dropped": stats.msg_statistics.drop_count,
"packets_delayed_sent": stats.msg_statistics.delay_send_count,
"packets_delayed_received": stats.msg_statistics.delay_received_count,
"packets_delayed_success": stats.msg_statistics.delay_success_count,
"packets_delayed_timeout": stats.msg_statistics.delay_timeout_count,
"total_walk_attempts": stats.walk_attempt_count,
"total_walk_success": stats.walk_success_count,
"sync_messages_created": stats.msg_statistics.created_count,
"bloom_new": sum(c.sync_bloom_new for c in stats.communities),
"bloom_reused": sum(c.sync_bloom_reuse for c in stats.communities),
"bloom_skipped": sum(c.sync_bloom_skip for c in stats.communities),
}
def get_community_statistics(self):
"""
Return a dictionary with general statistics of the active Dispersy communities.
"""
communities_stats = []
dispersy = self.session.get_dispersy_instance()
dispersy.statistics.update()
for community in dispersy.statistics.communities:
if community.dispersy_enable_candidate_walker or community.dispersy_enable_candidate_walker_responses or \
community.candidates:
candidate_count = "%s" % len(community.candidates)
else:
candidate_count = "-"
communities_stats.append({
"identifier": community.hex_cid,
"member": community.hex_mid,
"classification": community.classification,
"global_time": community.global_time,
"median_global_time": community.acceptable_global_time -
community.dispersy_acceptable_global_time_range,
"acceptable_global_time_range": community.dispersy_acceptable_global_time_range,
"walk_attempts": community.msg_statistics.walk_attempt_count,
"walk_success": community.msg_statistics.walk_success_count,
"sync_bloom_created": community.sync_bloom_new,
"sync_bloom_reused": community.sync_bloom_reuse,
"sync_bloom_skipped": community.sync_bloom_skip,
"sync_messages_created": community.msg_statistics.created_count,
"packets_sent": community.msg_statistics.outgoing_count,
"packets_received": community.msg_statistics.total_received_count,
"packets_success": community.msg_statistics.success_count,
"packets_dropped": community.msg_statistics.drop_count,
"packets_delayed_sent": community.msg_statistics.delay_send_count,
"packets_delayed_received": community.msg_statistics.delay_received_count,
"packets_delayed_success": community.msg_statistics.delay_success_count,
"packets_delayed_timeout": community.msg_statistics.delay_timeout_count,
"candidates": candidate_count
})
return communities_stats
| lgpl-3.0 | 5,846,355,451,444,601,000 | 47.34188 | 118 | 0.62111 | false |
osaddon/cdmi | test/functional/cdmi/test_utils.py | 1 | 3630 | # Copyright (c) 2010-2011 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
#from test import get_config
from swift.common.utils import readconf
import httplib
import time
import json
import base64
import os
def get_config(section_name=None, defaults=None):
"""
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
config = {}
if defaults is not None:
config.update(defaults)
try:
config = readconf(config_file, section_name)
except SystemExit:
if not os.path.exists(config_file):
print >>sys.stderr, \
'Unable to read test config %s - file not found' \
% config_file
elif not os.access(config_file, os.R_OK):
print >>sys.stderr, \
'Unable to read test config %s - permission denied' \
% config_file
else:
print >>sys.stderr, \
'Unable to read test config %s - section %s not found' \
% (config_file, section_name)
return config
def get_auth(auth_host, auth_port, auth_url, user_name, user_key, tenant_name):
"""Authenticate"""
if auth_url.find('tokens') >= 0:
""" v2.0 authentication"""
conn = httplib.HTTPConnection(auth_host, auth_port)
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
body = {}
body['auth'] = {
"passwordCredentials": {
"username": user_name,
"password": user_key,
},
"tenantName": tenant_name
}
conn.request('POST', auth_url,
json.dumps(body, indent=2), headers)
res = conn.getresponse()
if res.status != 200:
raise Exception('The authentication has failed')
data = res.read()
body = json.loads(data)
token = body.get('access').get('token').get('id')
endpoints = body.get('access').get('serviceCatalog')
for endpoint in endpoints:
if 'object-store' == endpoint.get('type'):
public_url = endpoint.get('endpoints')[0].get('publicURL')
parts = public_url.split('/')
account_id = parts[-1]
return token, account_id
else:
""" try the old way"""
conn = httplib.HTTPConnection(auth_host, auth_port)
headers = {'X-Storage-User': tenant_name + ':' + user_name,
'X-Storage-Pass': user_key}
conn.request('GET', auth_url, None, headers)
res = conn.getresponse()
if res.status != 200:
raise Exception('The authentication has failed')
token = res.getheader('X-Auth-Token')
public_url = res.getheader('X-Storage-Url')
parts = public_url.split('/')
return token, parts[-1]
| apache-2.0 | 5,300,052,521,575,340,000 | 33.571429 | 79 | 0.585675 | false |
amyxchen/openhtf | openhtf/util/functions.py | 1 | 1523 | # Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for functions."""
import functools
import inspect
def CallOnce(func):
"""Decorate a function to only allow it to be called once.
Note that it doesn't make sense to only call a function once if it takes
arguments (use @functools.lru_cache for that sort of thing), so this only
works on callables that take no args.
"""
argspec = inspect.getargspec(func)
if argspec.args or argspec.varargs or argspec.keywords:
raise ValueError('Can only decorate functions with no args', func, argspec)
@functools.wraps(func)
def _Wrapper():
# If we haven't been called yet, actually invoke func and save the result.
if not _Wrapper.HasRun():
_Wrapper.MarkAsRun()
_Wrapper.return_value = func()
return _Wrapper.return_value
_Wrapper.has_run = False
_Wrapper.HasRun = lambda: _Wrapper.has_run
_Wrapper.MarkAsRun = lambda: setattr(_Wrapper, 'has_run', True)
return _Wrapper
| apache-2.0 | 8,318,662,720,419,666,000 | 32.844444 | 79 | 0.732108 | false |
penguinscontrol/Spinal-Cord-Modeling | Python/morphology_parser.py | 1 | 7351 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 19:14:49 2016
@author: Radu
"""
from pyparsing import Word, nums, OneOrMore, Keyword, Literal, ZeroOrMore, Optional, Group
from string import lowercase
def print_for_loop(target_file):
def for_parseaction(origString, loc, tokens):
put_string = 'for ' + tokens[0] + ' in range(' + tokens[1] + ', ' + tokens[2] + '):\n\t'
target_file.write(put_string)
return for_parseaction
def update_current_section(target_file):
def update_cs_parseaction(origString, loc, tokens):
global current_section_name
current_section_name = 'self.'
if isinstance(tokens[0], str):
# single section
current_section_name += tokens[0]
elif isinstance(tokens[0], type(tokens)):
current_section_name += tokens[0][0] + '['
for a in range(1,len(tokens[0])):
current_section_name += tokens[0][a]
current_section_name += ']'
put_string = 'h.pt3dclear(sec = ' + current_section_name + ')\n'
target_file.write(put_string)
return update_cs_parseaction
def print_point_add(target_file):
def point_add_parseaction(origString, loc, tokens):
put_string = 'h.pt3dadd('
for a in range(len(tokens)):
if isinstance(tokens[a], str):
put_string += tokens[a]
elif isinstance(tokens[a], type(tokens)):
for b in range(len(tokens[a])):
put_string+= tokens[a][b]
put_string += ', '
put_string += 'sec = ' + current_section_name + ')\n'
target_file.write(put_string)
return point_add_parseaction
def print_point_style(target_file):
def point_style_parseaction(origString, loc, tokens):
put_string = 'h.pt3dstyle('
for a in range(len(tokens)):
if isinstance(tokens[a], str):
put_string += tokens[a]
elif isinstance(tokens[a], type(tokens)):
for b in range(len(tokens[a])):
put_string+= tokens[a][b]
put_string += ', '
put_string += 'sec = ' + current_section_name + ')\n'
target_file.write(put_string)
return point_style_parseaction
def print_create(target_file):
def create_parseaction(origString, loc, tokens):
for a in range(len(tokens)):
if isinstance(tokens[a], str):
# single section
put_string = 'self.' + tokens[a] + ' = h.Section(cell = self)\n'
elif isinstance(tokens[a], type(tokens)):
put_string = 'self.' + tokens[a][0] + ' = [h.Section(cell = self) for x in range(' + tokens[a][1]\
+ ')]\n'
target_file.write(put_string)
target_file.write('\n')
return create_parseaction
def connect_output_string(tokens):
if isinstance(tokens[0][0], str):
# tokens [0][0] is the name of the parent section
parent = tokens[0][0]
elif isinstance(tokens[0][0], type(tokens)):
parent = tokens[0][0][0] + '['
for a in range(1,len(tokens[0][0])):
parent += tokens[0][0][a]
parent += ']'
# tokens [0][1] is the location in the parent where we connect to
parent_loc = ''
for a in range(len(tokens[0][1])):
parent_loc += tokens[0][1][a]
if isinstance(tokens[1][0], str):
# tokens [0][0] is the name of the child section
child = tokens[1][0]
elif isinstance(tokens[1][0], type(tokens)):
child = tokens[1][0][0] + '['
for a in range(1,len(tokens[1][0])):
child += tokens[1][0][a]
child += ']'
# tokens [1][1] is the location in the child where we connect to
child_loc = ''
for a in range(len(tokens[1][1])):
child_loc += tokens[1][1][a]
put_string = 'self.' + parent + '.connect(' + 'self.' + child + ', ' + child_loc + ', ' + parent_loc + ')\n'
return put_string
def print_connect(target_file):
def connect_parseaction(origString, loc, tokens):
put_string = connect_output_string(tokens)
target_file.write(put_string)
return connect_parseaction
def print_geom_define(target_file):
def geom_define_parseaction(origString, loc, tokens):
target_file.write('geom_define\n')
target_file.write(tokens[0])
return geom_define_parseaction
# Resulting python file
filename = 'Mn_geometry_output3.py'
global current_section_name
current_section_name = ''
converted_file = open(filename, 'w')
# define lists of characters for a..z and 1..9
uppercase = lowercase.upper()
lowercaseplus = lowercase+('_')
lowercaseplus = lowercaseplus+(uppercase)
nonzero = ''.join([str(i) for i in range(1, 10)])
COMMA = Literal(',')
EQUALS = Literal('=')
MINUS = Literal('-')
PERIOD = Literal('.')
LCURL = Literal('{')
RCURL = Literal('}')
LBRACK = Literal('(')
RBRACK = Literal(')')
LSQUARE = Literal('[')
RSQUARE = Literal(']')
PTSCLEAR = Literal('{pt3dclear()').suppress()
PTSCLEARNL = Literal('{\npt3dclear()\n').suppress()
integer = Word(nums)
single_section = Word(lowercaseplus, min = 2)
single_section.setResultsName('SINGLE')
integer_var = Word(lowercase, exact = 1)
double = Group(Optional(MINUS) + integer + Optional(PERIOD + integer))
operand = integer ^ integer_var
operator = Word('+-*/', exact=1)
unaryoperation = operand
binaryoperation = operand + operator + operand
operation = unaryoperation ^ binaryoperation
array_section = Group(single_section + LSQUARE.suppress() + operation + RSQUARE.suppress())
array_section.setResultsName('ARRAY')
section = single_section ^ array_section
section_location = Group(section + LBRACK.suppress() + double + RBRACK.suppress())
create = Keyword('create').suppress() + section + ZeroOrMore(COMMA.suppress() + section)
create.setParseAction(print_create(converted_file))
connect = Keyword('connect').suppress() + section_location + COMMA.suppress() + section_location
connect.setParseAction(print_connect(converted_file))
for_loop = Keyword('for').suppress() + integer_var + EQUALS.suppress() + integer + COMMA.suppress() + integer
# NOTE TO FUTURE SELF: for loops can only have one line of code in this implementation
for_loop.setParseAction(print_for_loop(converted_file))
point_add = Literal('pt3dadd(').suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + RBRACK.suppress()
point_add.setParseAction(print_point_add(converted_file))
point_style = Literal('pt3dstyle(').suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + RBRACK.suppress()
point_style.setParseAction(print_point_style(converted_file))
geom_define_pre = section + (PTSCLEAR ^ PTSCLEARNL)
geom_define_body = OneOrMore(point_add ^ point_style) + RCURL.suppress()
geom_define_pre.setParseAction(update_current_section(converted_file))
geom_define = geom_define_pre + geom_define_body
expression = (connect ^ for_loop ^ geom_define ^ create)
codeblock = OneOrMore(expression)
test_str = 'Ia_node[0] {\npt3dclear()\n pt3dadd( 47, 76, 92.5, 3.6) }'
#file_to_parse = open('../../tempdata/Ia_geometry')
file_to_parse = open('motoneuron_geometry_preparser.txt')
tokens = codeblock.parseString(file_to_parse.read())
#tokens = codeblock.parseString(test_str)
| gpl-2.0 | 7,230,308,802,427,356,000 | 36.697436 | 161 | 0.629846 | false |
XandyWang/PythonDemo | baseTest/GuiDemo.py | 1 | 2027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from Tkinter import *
import tkMessageBox
import tkFileDialog
import xlrd
import os
class MainPanel(Frame):
def __init__(self,master=None):
Frame.__init__(self,master)
self.pack(expand = 1, fill='both',padx = 5, pady = 5)
self.createWidgets()
def createWidgets(self):
padxPx = 10
padyPx = 10
self.dirLabel = Label(self, text=u'工程目录',font = 18)
self.dirLabel.grid(row = 0)
self.nameEntry = Entry(self,font = 18,bd = 2, fg = 'red')
self.nameEntry.grid(row = 0 , column = 1,columnspan=2)
self.quiteButton = Button(self, text=u'选择目录', command=self.selectExcel, relief=GROOVE)
self.quiteButton.grid(row = 0 , column = 3)
def selectExcel(self):
rootDirPath = os.path.expanduser('~')
fileTyps = [('xlx / xlsx files', '.xl*'),('all files', '.*')]
file = tkFileDialog.askopenfilename(initialdir = rootDirPath , filetypes = fileTyps)
# file = u'/home/wangxiaoyang/share/FineOS应用支持的语言表_QL613.xlsx'
print file
if(len(file) > 0) :
data = xlrd.open_workbook(file)
sheets = data.sheets();
nTable = len( sheets )
for index in range(nTable) :
table = data.sheet_by_index(index)
nRows = table.nrows
nCols = table.ncols
for row in range(nRows) :
for col in range(nCols) :
print "row_%d col_%d : %s" % ( row , col, table.cell(row,col).value )
rt = Tk()
# update window ,must do
rt.update()
# get screen width and height
scnWidth,scnHeight = rt.maxsize()
# get current width
rWidth = 0.5 * scnWidth
# get current height
rHeight = 0.5 * scnHeight
# now generate configuration information
tmpcnf = '%dx%d+%d+%d' % (rWidth, rHeight, (scnWidth - rWidth) / 2, (scnHeight - rHeight) / 2)
rt.geometry(tmpcnf)
rt.title('Hello GUI')
mainPanel = MainPanel(rt)
rt.mainloop() | mit | -5,904,888,387,031,046,000 | 32.830508 | 94 | 0.592982 | false |
tktrungna/leetcode | Python/verify-preorder-serialization-binary-tree.py | 1 | 2175 | """
QUESTION:
One way to serialize a binary tree is to use pre-oder traversal. When we encounter a non-null node, we record the
node's value. If it is a null node, we record using a sentinel value such as #.
_9_
/ \
3 2
/ \ / \
4 1 # 6
/ \ / \ / \
# # # # # #
For example, the above binary tree can be serialized to the string "9,3,4,#,#,1,#,#,2,#,6,#,#", where # represents a
null node.
Given a string of comma separated values, verify whether it is a correct preorder traversal serialization of a binary
tree. Find an algorithm without reconstructing the tree.
Each comma separated value in the string must be either an integer or a character '#' representing null pointer.
You may assume that the input format is always valid, for example it could never contain two consecutive commas such as
"1,,3".
Example 1:
"9,3,4,#,#,1,#,#,2,#,6,#,#"
Return true
Example 2:
"1,#"
Return false
Example 3:
"9,#,#,1"
Return false
ANSWER:
1) Using stack O(n)
2)
"""
class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
st = []
for n in preorder.split(','):
st.append(n)
while len(st) >= 3 and st[-1] == st[-2] == '#' and st[-3] != '#':
st.pop()
st.pop()
st.pop()
st.append('#')
return len(st) == 1 and st[-1] == '#'
def isValidSerialization_2(self, preorder):
diff = 1
for n in preorder.split(','):
diff -= 1
if diff < 0:
return False
if n != '#':
diff += 2
return diff == 0
if __name__ == '__main__':
print Solution().isValidSerialization("9,3,4,#,#,1,#,#,2,#,6,#,#")
print Solution().isValidSerialization("9,#,#,1")
print Solution().isValidSerialization("1,#")
print Solution().isValidSerialization("")
print Solution().isValidSerialization_2("9,3,4,#,#,1,#,#,2,#,6,#,#")
print Solution().isValidSerialization_2("9,#,#,1")
print Solution().isValidSerialization_2("1,#")
print Solution().isValidSerialization_2("1") | mit | 3,118,352,650,100,566,000 | 28.405405 | 119 | 0.569655 | false |
struqt/invar | invar-example/target/generated-sources/example/python/TestAbcConflict.py | 1 | 3802 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===------------------------------* Python *------------------------------===
# THIS FILE IS GENERATED BY INVAR. DO NOT EDIT !!!
# ===------------------------------------------------------------------------===
from TestAbcGender import Gender
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from InvarCodec import DataWriter
from InvarCodec import DataReader
class TestAbcConflict(object):
"""名字冲突的类型"""
CRC32_ = 0xCC7A29B9
SIZE_ = 13
__slots__ = (
'_key',
'_text',
'_bytes',
'_hotfix')
#__slots__
def __init__(self):
self._key = Gender.NONE
self._text = ''
self._bytes = []
self._hotfix = None
#def __init__
def __str__(self):
s = StringIO()
s.write(u'{')
s.write(u' ')
s.write(u'TestAbcConflict')
s.write(u',')
s.write(u' ')
s.write(u'key')
s.write(u':')
s.write(unicode(self._key))
s.write(u',')
s.write(u' ')
s.write(u'text')
s.write(u':')
s.write(u'"')
s.write(self._text)
s.write(u'"')
s.write(u',')
s.write(u' ')
s.write(u'bytes')
s.write(u':')
s.write(u'(')
s.write(str(len(self._bytes)))
s.write(u')')
s.write(u',')
s.write(u' ')
s.write(u'hotfix')
s.write(u':')
if self._hotfix is None:
s.write(u'null')
else:
s.write(u'[')
s.write(str(len(self._hotfix)))
s.write(u']')
s.write(u' ')
s.write(u'}')
result = s.getvalue()
s.close()
return result
#def __str__
def __len__(self):
size = TestAbcConflict.SIZE_
size += len(self._text)
if len(self._bytes) > 0:
size += len(self._bytes) * 1
if self._hotfix is not None:
size += 4
for (k1,v1) in self._hotfix.items():
size += len(k1)
size += len(v1)
return size
#def __len__
def read(r):
self._key = r.readInt32()
self._text = r.readString()
lenBytes = r.readUInt32()
num = 0
while num < lenBytes:
num += 1
n1 = r.readInt8()
self._bytes.append(n1)
hotfixExists = r.readInt8()
if 0x01 == hotfixExists:
if self._hotfix == None:
self._hotfix = dict()
lenHotfix = r.readUInt32()
num = 0
while num < lenHotfix:
num += 1
k1 = r.readString()
v1 = r.readString()
self._hotfix[k1] = v1
elif 0x00 == hotfixExists:
self._hotfix = None
else:
raise InvarError(498, 'Protoc read error: The value of \'hotfixExists\' is invalid.')
#def read
def write(w):
w.writeInt32(self._key)
w.writeString(self._text)
w.writeUInt32(len(self._bytes))
for n1 in self._bytes:
w.writeInt8(n1)
if self._hotfix != None:
w.writeUInt8(0x01)
w.writeUInt32(len(self._hotfix))
for (k1,v1) in self._hotfix.items():
w.writeString(k1)
w.writeString(v1)
else:
w.writeUInt8(0x00)
#def write
#class TestAbcConflict
if '__main__' == __name__:
print('dir(TestAbcConflict()) =>\n' + '\n'.join(dir(TestAbcConflict())))
print('TestAbcConflict.__doc__ => ' + TestAbcConflict.__doc__)
print('TestAbcConflict.__len__ => ' + str(len(TestAbcConflict())))
print('TestAbcConflict.__str__ => ' + str(TestAbcConflict()))
| mit | 3,564,602,473,064,583,700 | 26.649635 | 97 | 0.457761 | false |
AechPro/Machine-Learning | Partners Healthcare/2016 Breast Cancer/dev/ReconNet/util/Intensity_Converter.py | 1 | 3553 | import numpy as np
from openpyxl import load_workbook
from scipy.optimize import fsolve
workingDirectory = "C:/Users/Matt/Desktop/abs to conc/1. Abs to Conc (for Matt)"
def padConcatenate(matrices,shapes):
largestAxis = np.max(shapes)
for matrix in matrices:
if len(matrix) != largestAxis:
for _ in range(abs(largestAxis - len(matrix))):
matrix.append([np.nan for __ in range(len(matrix[0]))])
concatenatedMatrix = np.concatenate(matrices,axis=1)
return concatenatedMatrix
def load_values(directory, workbooks):
matrices = []
for entry in workbooks:
workbook = load_workbook(''.join([directory, '/', '0. BT474_1 (abs).xlsx']))
sheet = workbook[entry]
matrix = []
for row in sheet.rows:
matrix.append([])
for cell in row:
if cell.value == None:
matrix[len(matrix) - 1].append(np.nan)
else:
matrix[len(matrix) - 1].append(cell.value)
matrices.append(matrix)
return matrices
colors = ["dual", "red", "blue", "uns"]
matrices = load_values(workingDirectory,colors)
matrixLengths = np.asarray([(len(i[0]), len(i)) for i in matrices])
paddedMatrices = padConcatenate(matrices,matrixLengths)
matrices = np.asarray(matrices)
cell_bkgd_4_avg = np.nanmean(matrices[-1][:,0])
cell_bkgd_4_SD = np.nanstd(matrices[-1][:,0])
cell_bkgd_6_avg = np.nanmean(matrices[-1][:,1])
cell_bkgd_6_SD = np.nanstd(matrices[-1][:,1])
SD_multi = 1
cell_bkgd_4_cutoff = cell_bkgd_4_avg + SD_multi * cell_bkgd_4_SD
cell_bkgd_6_cutoff = cell_bkgd_6_avg + SD_multi * cell_bkgd_6_SD
absNoBackground = paddedMatrices.copy()
for i in range(0,2,8):
absNoBackground[:,i] = paddedMatrices[:,i] - cell_bkgd_4_cutoff
absNoBackground[:,i+1] = paddedMatrices[:,i+1] - cell_bkgd_6_cutoff
"""
% Convert Abs to Conc
for i = 1:4
for j = 1:abs_length(i)
abs = abs_all_NObkgd(j,i+(i-1):i+(i-1)+1);
% abs = abs_all(j,i+(i-1):i+(i-1)+1);
if abs(1) ~= 'NaN'
x0 = [0,1];
x = fsolve(@(x)abs2conc(x,abs),x0);
conc_all(j,i+(i-1):i+(i-1)+1) = x;
end
end
end
csvwrite('conc_all.xlsx',conc_all);
return
function F = abs2conc(x,abs)
% fitted curves for red and blue dyes
% Y=Y0 + (Plateau-Y0)*(1-exp(-K*x))
% [Red_470, Red_625, Blue_470, Blue_625] HRP 03-20-17
Y0 =[0.04506 0.02659 0.0511 0.0199];
P = [0.719 0.3026 0.2012 0.7079];
K = [3.597 4.145 1.474 4.393];
F(1) = abs(1) - (Y0(1) + (P(1)-Y0(1))*(1-exp(-K(1)*x(1)))) - (Y0(3) + (P(3)-Y0(3))*(1-exp(-K(3)*x(2)))) ;
F(2) = abs(2) - (Y0(2) + (P(2)-Y0(2))*(1-exp(-K(2)*x(1)))) - (Y0(4) + (P(4)-Y0(4))*(1-exp(-K(4)*x(2)))) ;
return
"""
def F(x,abs):
Y0 = [0.04506,0.02659,0.0511,0.0199]
P = [0.719,0.3026,0.2012,0.7079]
K = [3.597,4.145,1.474,4.393]
out = [0,0]
out[0] = abs[0][0] - (Y0[0] + (P[0]-Y0[0])*(1-np.exp(-K[0]*x[0]))) - (Y0[2] + (P[2] - Y0[2])*(1-np.exp(-K[2]*x[1])))
out[1] = abs[0][1] - (Y0[1] + (P[1]-Y0[1])*(1-np.exp(-K[1]*x[0]))) - (Y0[3] + (P[3] - Y0[3])*(1-np.exp(-K[3]*x[1])))
return out
for i in range(4):
for j in range(len(matrices[i])):
abs = absNoBackground[j,i+i-1:i+i+1]
if len(abs)>=1:
if not np.isnan(abs[0]):
x0 = np.asarray([0,1])
x = fsolve(F,x0,args=[abs])
print(x) | apache-2.0 | 6,532,349,445,452,838,000 | 35.03125 | 120 | 0.532789 | false |
JamesLinEngineer/RKMC | addons/plugin.audio.jambmc/addon.py | 1 | 44439 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tristan Fischer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xbmcvfs # FIXME: Import form xbmcswift if fixed upstream
from xbmcswift2 import Plugin, xbmcgui, NotFoundException, xbmc
from resources.lib.api import JamendoApi, ApiError, ConnectionError
from resources.lib.geolocate import get_location, QuotaReached
from resources.lib.downloader import JamendoDownloader
STRINGS = {
# Root menu entries
'discover': 30000,
'search': 30001,
'show_tracks': 30002,
'show_albums': 30003,
'show_artists': 30004,
'show_radios': 30005,
'show_playlists': 30006,
'search_tracks': 30007,
'search_albums': 30008,
'search_artists': 30009,
'search_playlists': 30010,
'show_history': 30011,
'show_downloaded_tracks': 30012,
'show_mixtapes': 30013,
'show_featured_tracks': 30014,
'show_user_artists': 30015,
'show_user_albums': 30016,
'show_user_tracks': 30017,
'show_user_account': 30018,
'show_user_playlists': 30019,
'show_near_artists': 30020,
'show_downloaded_albums': 30021,
# Misc strings
'page': 30025,
'language': 30026,
'instruments': 30027,
'vartags': 30028,
# Context menu
'album_info': 30030,
'song_info': 30031,
'show_tracks_in_this_album': 30032,
'show_albums_by_this_artist': 30033,
'show_similar_tracks': 30034,
'addon_settings': 30035,
'download_track': 30036,
'download_album': 30037,
# Dialogs
'search_heading_album': 30040,
'search_heading_artist': 30041,
'search_heading_tracks': 30042,
'search_heading_playlist': 30043,
'no_download_path': 30044,
'want_set_now': 30045,
'choose_download_folder': 30046,
'enter_username': 30047,
'select_user': 30048,
'no_username_set': 30049,
'geolocating': 30050,
'will_send_one_request_to': 30051,
'freegeoip_net': 30052,
# Error dialogs
'connection_error': 30060,
'api_error': 30061,
'api_returned': 30062,
'try_again_later': 30063,
'check_network_or': 30064,
'try_again_later': 30065,
# Notifications
'download_suceeded': 30070,
'history_empty': 30071,
'downloads_empty': 30072,
# Mixtapes
'mixtape_name': 30090,
'delete_mixtape_head': 30091,
'are_you_sure': 30092,
'add_to_new_mixtape': 30093,
'add_to_mixtape_s': 30094,
'del_from_mixtape_s': 30095,
'select_mixtape': 30096,
'add_mixtape': 30097,
'add_del_track_to_mixtape': 30098,
'delete_mixtape': 30099,
'rename_mixtape': 30124,
# Sort methods
'sort_method_default': 30100,
'sort_method_buzzrate': 30101,
'sort_method_downloads_week': 30102,
'sort_method_downloads_month': 30103,
'sort_method_downloads_total': 30104,
'sort_method_joindate_asc': 30105,
'sort_method_joindate_desc': 30107,
'sort_method_listens_week': 30108,
'sort_method_listens_month': 30109,
'sort_method_listens_total': 30110,
'sort_method_name': 30111,
'sort_method_popularity_week': 30112,
'sort_method_popularity_month': 30113,
'sort_method_popularity_total': 30114,
'sort_method_releasedate_asc': 30115,
'sort_method_releasedate_desc': 30116,
# Tags
'current_tags': 30120,
'tag_type_genres': 30121,
'tag_type_instruments': 30122,
'tag_type_moods': 30123,
}
class Plugin_patched(Plugin):
def _dispatch(self, path):
for rule in self._routes:
try:
view_func, items = rule.match(path)
except NotFoundException:
continue
self._request.view = view_func.__name__ # added
self._request.view_params = items # added
listitems = view_func(**items)
if not self._end_of_directory and self.handle >= 0:
if listitems is None:
self.finish(succeeded=False)
else:
listitems = self.finish(listitems)
return listitems
raise NotFoundException('No matching view found for %s' % path)
plugin = Plugin_patched()
api = JamendoApi(
client_id='de0f381a',
limit=plugin.get_setting('limit', int),
image_size=plugin.get_setting(
'image_size',
choices=('big', 'medium', 'small')
),
)
########################### Static Views ######################################
@plugin.route('/')
def show_root_menu():
fix_xbmc_music_library_view()
items = [
{'label': _('discover'),
'path': plugin.url_for(endpoint='show_discover_root'),
'thumbnail': 'DefaultMusicCompilations.png'},
{'label': _('search'),
'path': plugin.url_for(endpoint='show_search_root'),
'thumbnail': 'DefaultMusicVideos.png'},
{'label': _('show_radios'),
'path': plugin.url_for(endpoint='show_radios'),
'thumbnail': 'DefaultMusicGenres.png'},
{'label': _('show_history'),
'path': plugin.url_for(endpoint='show_history'),
'thumbnail': 'DefaultMusicYears.png'},
{'label': _('show_downloaded_tracks'),
'path': plugin.url_for(endpoint='show_downloaded_tracks'),
'thumbnail': 'DefaultMusicPlaylists.png'},
{'label': _('show_downloaded_albums'),
'path': plugin.url_for(endpoint='show_downloaded_albums'),
'thumbnail': 'DefaultMusicPlaylists.png'},
{'label': _('show_mixtapes'),
'path': plugin.url_for(endpoint='show_mixtapes'),
'thumbnail': 'DefaultMusicSongs.png'},
{'label': _('show_featured_tracks'),
'path': plugin.url_for(endpoint='show_featured_tracks'),
'thumbnail': 'DefaultMusicAlbums.png'},
{'label': _('show_user_account'),
'path': plugin.url_for(endpoint='show_user_root'),
'thumbnail': 'DefaultAddonMusic.png'},
]
return add_static_items(items)
@plugin.route('/search/')
def show_search_root():
items = [
{'label': _('search_tracks'),
'path': plugin.url_for(endpoint='search_tracks'),
'thumbnail': 'DefaultMusicSongs.png'},
{'label': _('search_albums'),
'path': plugin.url_for(endpoint='search_albums'),
'thumbnail': 'DefaultMusicAlbums.png'},
{'label': _('search_artists'),
'path': plugin.url_for(endpoint='search_artists'),
'thumbnail': 'DefaultMusicArtists.png'},
{'label': _('search_playlists'),
'path': plugin.url_for(endpoint='search_playlists'),
'thumbnail': 'DefaultMusicPlaylists.png'},
]
return add_static_items(items)
@plugin.route('/discover/')
def show_discover_root():
items = [
{'label': _('show_tracks'),
'path': plugin.url_for(endpoint='show_tracks'),
'thumbnail': 'DefaultMusicSongs.png'},
{'label': _('show_albums'),
'path': plugin.url_for(endpoint='show_albums'),
'thumbnail': 'DefaultMusicAlbums.png'},
{'label': _('show_artists'),
'path': plugin.url_for(endpoint='show_artists'),
'thumbnail': 'DefaultMusicArtists.png'},
{'label': _('show_playlists'),
'path': plugin.url_for(endpoint='show_playlists'),
'thumbnail': 'DefaultMusicPlaylists.png'},
{'label': _('show_near_artists'),
'path': plugin.url_for(endpoint='show_near_artists'),
'thumbnail': 'DefaultMusicArtists.png'},
]
return add_static_items(items)
@plugin.route('/user/')
def show_user_root():
items = [
{'label': _('show_user_artists'),
'path': plugin.url_for(endpoint='show_user_artists'),
'thumbnail': 'DefaultMusicArtists.png'},
{'label': _('show_user_albums'),
'path': plugin.url_for(endpoint='show_user_albums'),
'thumbnail': 'DefaultMusicAlbums.png'},
{'label': _('show_user_tracks'),
'path': plugin.url_for(endpoint='show_user_tracks'),
'thumbnail': 'DefaultMusicSongs.png'},
{'label': _('show_user_playlists'),
'path': plugin.url_for(endpoint='show_user_playlists'),
'thumbnail': 'DefaultMusicPlaylists.png'},
]
return add_static_items(items)
########################### Dynamic Views #####################################
@plugin.route('/albums/')
def show_albums():
page = int(get_args('page', 1))
sort_method = get_args('sort_method', 'popularity_month')
albums = get_cached(api.get_albums, page=page, sort_method=sort_method)
items = format_albums(albums)
items.append(get_sort_method_switcher_item('albums', sort_method))
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/albums/<artist_id>/')
def show_albums_by_artist(artist_id):
page = int(get_args('page', 1))
albums = get_cached(api.get_albums, page=page, artist_id=artist_id)
items = format_albums(albums)
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/artists/')
def show_artists():
page = int(get_args('page', 1))
sort_method = get_args('sort_method', 'popularity_month')
artists = get_cached(api.get_artists, page=page, sort_method=sort_method)
items = format_artists(artists)
items.append(get_sort_method_switcher_item('artists', sort_method))
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/artists/near/')
def show_near_artists():
lat_long = plugin.get_setting('lat_long', str)
while not lat_long:
confirmed = xbmcgui.Dialog().yesno(
_('geolocating'),
_('will_send_one_request_to'),
_('freegeoip_net'),
_('are_you_sure')
)
if not confirmed:
return
try:
location = get_location()
except QuotaReached:
plugin.notify(_('try_again_later'))
return
lat_long = '%s_%s' % (location['latitude'], location['longitude'])
plugin.set_setting('lat_long', lat_long)
artists = get_cached(api.get_artists_by_location, coords=lat_long)
items = format_artists_location(artists)
return add_items(items)
@plugin.route('/playlists/')
def show_playlists():
page = int(get_args('page', 1))
playlists = get_cached(api.get_playlists, page=page)
items = format_playlists(playlists)
items.extend(get_page_switcher_items(len(items)))
return add_items(items, same_cover=True)
@plugin.route('/radios/')
def show_radios():
page = int(get_args('page', 1))
radios = get_cached(api.get_radios, page=page)
items = format_radios(radios)
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/tracks/')
def show_tracks():
page = int(get_args('page', 1))
sort_method = get_args('sort_method', 'popularity_month')
tags = get_args('tags')
tracks = get_cached(
api.get_tracks,
page=page,
sort_method=sort_method,
tags=tags
)
items = format_tracks(tracks)
items.append(get_sort_method_switcher_item('tracks', sort_method))
items.append(get_tag_filter_item())
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/tracks/album/<album_id>/')
def show_tracks_in_album(album_id):
tracks = get_cached(api.get_tracks, album_id=album_id)
items = format_tracks(tracks)
items.extend(get_page_switcher_items(len(items)))
return add_items(items, same_cover=True)
@plugin.route('/tracks/featured/')
def show_featured_tracks():
page = int(get_args('page', 1))
sort_method = 'releasedate_desc'
tracks = get_cached(
api.get_tracks,
page=page,
sort_method=sort_method,
featured=True
)
items = format_tracks(tracks)
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/tracks/playlist/<playlist_id>/')
def show_tracks_in_playlist(playlist_id):
playlist, tracks = get_cached(
api.get_playlist_tracks,
playlist_id=playlist_id
)
items = format_playlist_tracks(playlist, tracks)
items.extend(get_page_switcher_items(len(items)))
return add_items(items, same_cover=True)
@plugin.route('/tracks/similar/<track_id>/')
def show_similar_tracks(track_id):
page = int(get_args('page', 1))
tracks = get_cached(api.get_similar_tracks, track_id=track_id, page=page)
items = format_similar_tracks(tracks)
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
############################# Search Views ####################################
@plugin.route('/albums/search/')
def search_albums():
query = get_args('input') or plugin.keyboard(
heading=_('search_heading_album')
)
if query:
albums = get_cached(api.get_albums, search_terms=query)
items = format_albums(albums)
return add_items(items)
@plugin.route('/artists/search/')
def search_artists():
query = get_args('input') or plugin.keyboard(
heading=_('search_heading_artist')
)
if query:
artists = api.get_artists(search_terms=query)
items = format_artists(artists)
return add_items(items)
@plugin.route('/playlists/search/')
def search_playlists():
query = get_args('input') or plugin.keyboard(
heading=_('search_heading_playlist')
)
if query:
playlists = api.get_playlists(search_terms=query)
items = format_playlists(playlists)
return add_items(items, same_cover=True)
@plugin.route('/tracks/search/')
def search_tracks():
query = get_args('input') or plugin.keyboard(
heading=_('search_heading_tracks')
)
if query:
tracks = api.search_tracks(search_terms=query)
items = format_tracks(tracks)
return add_items(items)
############################ Jamendo Views ####################################
@plugin.route('/user/albums/')
def show_user_albums():
user_id = get_user_account()
if user_id:
page = int(get_args('page', 1))
albums = api.get_user_albums(user_id=user_id, page=page)
items = format_albums(albums)
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/user/artists/')
def show_user_artists():
user_id = get_user_account()
if user_id:
page = int(get_args('page', 1))
artists = api.get_user_artists(user_id=user_id, page=page)
items = format_artists(artists)
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
@plugin.route('/user/playlists/')
def show_user_playlists():
user_id = get_user_account()
if user_id:
playlists = api.get_playlists(user_id=user_id)
items = format_playlists(playlists)
return add_items(items, same_cover=True)
@plugin.route('/user/set_user_account/')
def set_user_account():
query = get_args('input') or plugin.keyboard(
heading=_('enter_username')
)
if query:
users = api.get_users(search_terms=query)
if users:
selected = xbmcgui.Dialog().select(
_('select_user'), [u['name'] for u in users]
)
if selected >= 0:
user = users[selected]
plugin.set_setting('user_name', user['name'])
plugin.set_setting('user_id', user['id'])
@plugin.route('/user/tracks/')
def show_user_tracks():
user_id = get_user_account()
if user_id:
page = int(get_args('page', 1))
tracks = api.get_user_tracks(user_id=user_id, page=page)
items = format_tracks(tracks)
items.extend(get_page_switcher_items(len(items)))
return add_items(items)
############################## Downloads ######################################
@plugin.route('/downloads/albums/')
def show_downloaded_albums():
downloads = plugin.get_storage('downloaded_albums')
if downloads.items():
albums = [t['data'] for t in downloads.itervalues()]
items = format_downloaded_albums(albums)
return add_items(items)
plugin.notify(_('downloads_empty'))
@plugin.route('/downloads/albums/<album_id>/')
def show_downloaded_album_tracks(album_id):
downloads = plugin.get_storage('downloaded_albums')
album = downloads[album_id]
tracks = [t['data'] for t in album['tracks'].itervalues()]
items = format_tracks(tracks)
return add_items(items, same_cover=True)
@plugin.route('/downloads/tracks/')
def show_downloaded_tracks():
downloads = plugin.get_storage('downloaded_tracks')
if downloads.items():
tracks = [t['data'] for t in downloads.itervalues()]
items = format_tracks(tracks)
return add_items(items)
plugin.notify(_('downloads_empty'))
############################### History #######################################
@plugin.route('/history/')
def show_history():
history = plugin.get_storage('history')
tracks = history.get('items', [])
if tracks:
items = format_tracks(reversed(tracks))
return add_items(items)
plugin.notify(_('history_empty'))
############################## Mixtapes #######################################
@plugin.route('/mixtapes/')
def show_mixtapes():
mixtapes = plugin.get_storage('mixtapes')
items = format_mixtapes(mixtapes)
items.append(get_add_mixtape_item())
return add_static_items(items)
@plugin.route('/mixtapes/add')
def add_mixtape(return_name=False):
name = get_args('input') or plugin.keyboard(
heading=_('mixtape_name')
)
if name:
mixtapes = plugin.get_storage('mixtapes')
if not name in mixtapes:
mixtapes[name] = []
mixtapes.sync()
if return_name:
return name
@plugin.route('/mixtapes/del/<mixtape_id>')
def del_mixtape(mixtape_id):
mixtapes = plugin.get_storage('mixtapes')
confirmed = xbmcgui.Dialog().yesno(
_('delete_mixtape_head'),
_('are_you_sure')
)
if confirmed and mixtape_id in mixtapes:
del mixtapes[mixtape_id]
mixtapes.sync()
_refresh_view()
@plugin.route('/mixtapes/rename/<mixtape_id>')
def rename_mixtape(mixtape_id):
mixtapes = plugin.get_storage('mixtapes')
mixtape = mixtapes.pop(mixtape_id)
new_mixtape_id = plugin.keyboard(
heading=_('mixtape_name'),
default=mixtape_id
)
mixtapes[new_mixtape_id] = mixtape
mixtapes.sync()
_refresh_view()
@plugin.route('/mixtapes/add/<track_id>')
def add_del_track_to_mixtape(track_id):
mixtapes = plugin.get_storage('mixtapes')
items = [{
'label':_('add_to_new_mixtape'),
}]
for (mixtape_id, mixtape) in mixtapes.iteritems():
track_ids = [t['id'] for t in mixtape]
if track_id in track_ids:
items.append({
'label': _('del_from_mixtape_s') % mixtape_id.decode('utf-8'),
'action': 'del',
'mixtape_id': mixtape_id
})
else:
items.append({
'label': _('add_to_mixtape_s') % mixtape_id.decode('utf-8'),
'action': 'add',
'mixtape_id': mixtape_id
})
selected = xbmcgui.Dialog().select(
_('select_mixtape'), [i['label'] for i in items]
)
if selected == 0:
mixtape_id = add_mixtape(return_name=True)
if mixtape_id:
add_track_to_mixtape(mixtape_id, track_id)
elif selected > 0:
action = items[selected]['action']
mixtape_id = items[selected]['mixtape_id']
if action == 'add':
add_track_to_mixtape(mixtape_id, track_id)
elif action == 'del':
del_track_from_mixtape(mixtape_id, track_id)
@plugin.route('/mixtapes/<mixtape_id>/')
def show_mixtape(mixtape_id):
mixtapes = plugin.get_storage('mixtapes')
tracks = mixtapes[mixtape_id]
items = format_tracks(tracks)
return add_items(items)
@plugin.route('/mixtapes/<mixtape_id>/add/<track_id>')
def add_track_to_mixtape(mixtape_id, track_id):
mixtapes = plugin.get_storage('mixtapes')
track = get_cached(api.get_track, track_id)
mixtapes[mixtape_id].append(track)
mixtapes.sync()
@plugin.route('/mixtapes/<mixtape_id>/del/<track_id>')
def del_track_from_mixtape(mixtape_id, track_id):
mixtapes = plugin.get_storage('mixtapes')
mixtapes[mixtape_id] = [
t for t in mixtapes[mixtape_id]
if not t['id'] == track_id
]
mixtapes.sync()
########################### Callback Views ####################################
@plugin.route('/sort_methods/<entity>/')
def show_sort_methods(entity):
sort_methods = api.get_sort_methods(entity)
items = format_sort_methods(sort_methods, entity)
return add_static_items(items)
@plugin.route('/tracks/tags/')
def show_tags():
tags = api.get_tags()
items = format_tags(tags)
return add_static_items(items)
############################ Action Views #####################################
@plugin.route('/download/track/<track_id>')
def download_track(track_id):
download_path = get_download_path('tracks_download_path')
if not download_path:
return
show_progress = plugin.get_setting('show_track_download_progress', bool)
downloader = JamendoDownloader(api, download_path, show_progress)
formats = ('mp3', 'ogg', 'flac')
audioformat = plugin.get_setting('download_format', choices=formats)
include_cover = plugin.get_setting('download_track_cover', bool)
tracks = downloader.download_tracks([track_id], audioformat, include_cover)
if tracks:
downloaded_tracks = plugin.get_storage('downloaded_tracks')
downloaded_tracks.update(tracks)
downloaded_tracks.sync()
plugin.notify(msg=_('download_suceeded'))
@plugin.route('/download/album/<album_id>')
def download_album(album_id):
download_path = get_download_path('albums_download_path')
if not download_path:
return
show_progress = plugin.get_setting('show_album_download_progress', bool)
downloader = JamendoDownloader(api, download_path, show_progress)
formats = ('mp3', 'ogg', 'flac')
audioformat = plugin.get_setting('download_format', choices=formats)
include_cover = plugin.get_setting('download_album_cover', bool)
album = downloader.download_album(album_id, audioformat, include_cover)
if album:
downloaded_albums = plugin.get_storage('downloaded_albums')
downloaded_albums.update(album)
downloaded_albums.sync()
plugin.notify(msg=_('download_suceeded'))
@plugin.route('/play/radio/<radio_id>')
def play_radio(radio_id):
stream_url = api.get_radio_url(radio_id)
return plugin.set_resolved_url(stream_url)
@plugin.route('/play/track/<track_id>')
def play_track(track_id):
add_track_to_history(track_id)
track_url = get_downloaded_track(track_id)
if not track_url:
formats = ('mp3', 'ogg')
audioformat = plugin.get_setting('playback_format', choices=formats)
track_url = api.get_track_url(track_id, audioformat)
return plugin.set_resolved_url(track_url)
@plugin.route('/settings')
def open_settings():
plugin.open_settings()
############################# Formaters #######################################
def format_albums(albums):
plugin.set_content('albums')
items = [{
'label': u'%s - %s' % (album['artist_name'], album['name']),
'info': {
'count': i + 2,
'artist': album['artist_name'],
'album': album['name'],
'year': int(album.get('releasedate', '0-0-0').split('-')[0]),
},
'context_menu': context_menu_album(
artist_id=album['artist_id'],
album_id=album['id'],
),
'replace_context_menu': True,
'thumbnail': album['image'],
'path': plugin.url_for(
endpoint='show_tracks_in_album',
album_id=album['id']
)
} for i, album in enumerate(albums)]
return items
def format_artists(artists):
plugin.set_content('artists')
items = [{
'label': artist['name'],
'info': {
'count': i + 2,
'artist': artist['name'],
},
'context_menu': context_menu_artist(artist['id']),
'replace_context_menu': True,
'thumbnail': get_artist_image(artist['image']),
'path': plugin.url_for(
endpoint='show_albums_by_artist',
artist_id=artist['id'],
)
} for i, artist in enumerate(artists)]
return items
def format_artists_location(artists):
plugin.set_content('artists')
items = [{
'label': u'%s (%s - %s)' % (
artist['name'],
artist['locations'][0]['country'],
artist['locations'][0]['city'],
),
'info': {
'count': i + 2,
'artist': artist['name'],
},
'context_menu': context_menu_artist(artist['id']),
'replace_context_menu': True,
'thumbnail': get_artist_image(artist['image']),
'path': plugin.url_for(
endpoint='show_albums_by_artist',
artist_id=artist['id'],
)
} for i, artist in enumerate(artists)]
return items
def format_comment(musicinfo):
return '[CR]'.join((
'[B]%s[/B]: %s' % (
_('language'),
musicinfo['lang']
),
'[B]%s[/B]: %s' % (
_('instruments'),
', '.join(musicinfo['tags']['instruments'])
),
'[B]%s[/B]: %s' % (
_('vartags'),
', '.join(musicinfo['tags']['vartags'])
),
))
def format_downloaded_albums(albums):
plugin.set_content('albums')
items = [{
'label': u'%s - %s' % (album['artist_name'], album['name']),
'info': {
'count': i + 2,
'artist': album['artist_name'],
'album': album['name'],
'year': int(album.get('releasedate', '0-0-0').split('-')[0]),
},
'context_menu': context_menu_album(
artist_id=album['artist_id'],
album_id=album['id'],
),
'replace_context_menu': True,
'thumbnail': album['image'],
'path': plugin.url_for(
endpoint='show_downloaded_album_tracks',
album_id=album['id']
)
} for i, album in enumerate(albums)]
return items
def format_mixtapes(mixtapes):
items = [{
'label': mixtape_id,
'info': {
'count': i + 1,
},
'context_menu': context_menu_mixtape(
mixtape_id=mixtape_id,
),
'replace_context_menu': True,
'path': plugin.url_for(
endpoint='show_mixtape',
mixtape_id=mixtape_id
)
} for i, (mixtape_id, mixtape) in enumerate(mixtapes.iteritems())]
return items
def format_playlists(playlists):
plugin.set_content('music')
items = [{
'label': u'%s (%s)' % (playlist['name'], playlist['user_name']),
'info': {
'count': i + 2,
'artist': playlist['user_name'],
'album': playlist['name'],
'year': int(playlist.get('creationdate', '0-0-0').split('-')[0]),
},
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'path': plugin.url_for(
endpoint='show_tracks_in_playlist',
playlist_id=playlist['id']
)
} for i, playlist in enumerate(playlists)]
return items
def format_playlist_tracks(playlist, tracks):
plugin.set_content('songs')
items = [{
'label': track['name'],
'info': {
'count': i + 2,
'tracknumber': int(track['position']),
'duration': track['duration'],
'title': track['name'],
},
'context_menu': context_menu_track(
artist_id=track['artist_id'],
track_id=track['id'],
album_id=track['album_id'],
),
'replace_context_menu': True,
'is_playable': True,
'path': plugin.url_for(
endpoint='play_track',
track_id=track['id']
)
} for i, track in enumerate(tracks)]
return items
def format_radios(radios):
plugin.set_content('music')
items = [{
'label': radio['dispname'],
'info': {
'count': i + 2,
},
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'thumbnail': radio['image'],
'is_playable': True,
'path': plugin.url_for(
endpoint='play_radio',
radio_id=radio['id'],
)
} for i, radio in enumerate(radios)]
return items
def format_similar_tracks(tracks):
plugin.set_content('songs')
items = [{
'label': u'%s - %s (%s)' % (
track['artist_name'],
track['name'],
track['album_name']
),
'info': {
'count': i + 2,
'title': track['name'],
'album': track['album_name'],
'duration': track['duration'],
'artist': track['artist_name'],
'year': int(track.get('releasedate', '0-0-0').split('-')[0]),
},
'context_menu': context_menu_track(
artist_id=track['artist_id'],
track_id=track['id'],
album_id=track['album_id']
),
'replace_context_menu': True,
'is_playable': True,
'thumbnail': track['album_image'],
'path': plugin.url_for(
endpoint='play_track',
track_id=track['id']
)
} for i, track in enumerate(tracks)]
return items
def format_sort_methods(sort_methods, entity):
original_params = plugin.request.view_params
extra_params = {}
current_method = get_args('sort_method')
if 'tags' in plugin.request.args:
extra_params['tags'] = get_args('tags')
items = [{
'label': (
u'[B]%s[/B]' if sort_method == current_method else u'%s'
) % _('sort_method_%s' % sort_method),
'thumbnail': 'DefaultMusicPlugins.png',
'info': {
'count': i,
},
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'path': plugin.url_for(
endpoint='show_%s' % entity,
is_update='true',
**dict(original_params, sort_method=sort_method, **extra_params)
)
} for i, sort_method in enumerate(sort_methods)]
return items
def format_tags(tags):
original_params = plugin.request.view_params
extra_params = {}
current_tags = [t for t in get_args('tags', '').split('+') if t]
if 'sort_method' in plugin.request.args:
extra_params['sort_method'] = get_args('sort_method')
items = []
for tag_type, type_tags in tags:
for i, tag in enumerate(type_tags):
tag_str = u'%s: %s' % (
_('tag_type_%s' % tag_type),
tag.capitalize()
)
if tag in current_tags:
new_tags = '+'.join((t for t in current_tags if not t == tag))
extra_params['tags'] = new_tags
label = u'[B]%s[/B]' % tag_str
else:
new_tags = '+'.join(([tag] + current_tags))
extra_params['tags'] = new_tags
label = u'%s' % tag_str
items.append({
'label': label,
'thumbnail': 'DefaultMusicPlugins.png',
'info': {
'count': i,
},
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'path': plugin.url_for(
endpoint='show_tracks',
is_update='true',
**dict(original_params, **extra_params)
)
})
return items
def format_tracks(tracks):
plugin.set_content('songs')
items = [{
'label': u'%s - %s (%s)' % (
track['artist_name'],
track['name'],
track['album_name']
),
'info': {
'count': i + 2,
'title': track['name'],
'album': track['album_name'],
'duration': track['duration'],
'artist': track['artist_name'],
'genre': u', '.join(track['musicinfo']['tags']['genres']),
'comment': format_comment(track['musicinfo']),
'year': int(track.get('releasedate', '0-0-0').split('-')[0]),
},
'context_menu': context_menu_track(
artist_id=track['artist_id'],
track_id=track['id'],
album_id=track['album_id']
),
'replace_context_menu': True,
'is_playable': True,
'thumbnail': track['album_image'],
'path': plugin.url_for(
endpoint='play_track',
track_id=track['id']
)
} for i, track in enumerate(tracks)]
return items
############################### Items #########################################
def get_add_mixtape_item():
return {
'label': u'[B]%s[/B]' % _('add_mixtape'),
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'info': {
'count': 0,
},
'path': plugin.url_for(
endpoint='add_mixtape',
),
}
def get_page_switcher_items(items_len):
current_page = int(get_args('page', 1))
has_next_page = items_len >= api.current_limit
has_previous_page = current_page > 1
original_params = plugin.request.view_params
extra_params = {}
if 'sort_method' in plugin.request.args:
extra_params['sort_method'] = get_args('sort_method')
if 'tags' in plugin.request.args:
extra_params['tags'] = get_args('tags', '')
items = []
if has_next_page:
next_page = int(current_page) + 1
extra_params['page'] = str(next_page)
items.append({
'label': u'>> %s %d >>' % (_('page'), next_page),
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'info': {
'count': items_len + 2,
},
'path': plugin.url_for(
endpoint=plugin.request.view,
is_update='true',
**dict(original_params, **extra_params)
)
})
if has_previous_page:
previous_page = int(current_page) - 1
extra_params['page'] = str(previous_page)
items.append({
'label': u'<< %s %d <<' % (_('page'), previous_page),
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'info': {
'count': 1,
},
'path': plugin.url_for(
endpoint=plugin.request.view,
is_update='true',
**dict(original_params, **extra_params)
)
})
return items
def get_sort_method_switcher_item(entity, current_method='default'):
original_params = plugin.request.view_params
extra_params = {}
extra_params['entity'] = entity
extra_params['sort_method'] = current_method
if 'tags' in plugin.request.args:
extra_params['tags'] = get_args('tags')
return {
'label': u'[B][[ %s ]][/B]' % _('sort_method_%s' % current_method),
'thumbnail': 'DefaultMusicPlugins.png',
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'info': {
'count': 0,
},
'path': plugin.url_for(
endpoint='show_sort_methods',
is_update='true',
**dict(original_params, **extra_params)
),
}
def get_tag_filter_item():
current_tags = [t for t in get_args('tags', '').split('+') if t]
extra_params = {}
if 'sort_method' in plugin.request.args:
extra_params['sort_method'] = get_args('sort_method')
extra_params['tags'] = get_args('tags', '')
return {
'label': u'[B][[ %s: %s ]][/B]' % (
_('current_tags'),
len(current_tags)
),
'thumbnail': 'DefaultMusicPlugins.png',
'context_menu': context_menu_empty(),
'replace_context_menu': True,
'info': {
'count': 0,
},
'path': plugin.url_for(
endpoint='show_tags',
is_update='true',
**extra_params
),
}
############################ Item-Adders ######################################
def add_items(items, same_cover=False):
is_update = 'is_update' in plugin.request.args
finish_kwargs = {
'update_listing': is_update,
'sort_methods': ('playlist_order', )
}
if plugin.get_setting('force_viewmode', bool) and not same_cover:
finish_kwargs['view_mode'] = 'thumbnail'
elif plugin.get_setting('force_viewmode_tracks', bool) and same_cover:
finish_kwargs['view_mode'] = 'thumbnail'
return plugin.finish(items, **finish_kwargs)
def add_static_items(items):
for item in items:
if not 'context_menu' in item:
item['context_menu'] = context_menu_empty()
item['replace_context_menu'] = True
if 'is_update' in plugin.request.args:
return plugin.finish(items, update_listing=True)
else:
return plugin.finish(items)
############################ Context-Menu #####################################
def context_menu_album(artist_id, album_id):
return [
(_('album_info'),
_action('info')),
(_('download_album'),
_run(endpoint='download_album',
album_id=album_id)),
(_('show_tracks_in_this_album'),
_view(endpoint='show_tracks_in_album',
album_id=album_id)),
(_('show_albums_by_this_artist'),
_view(endpoint='show_albums_by_artist',
artist_id=artist_id)),
(_('addon_settings'),
_run(endpoint='open_settings')),
]
def context_menu_artist(artist_id):
return [
(_('show_albums_by_this_artist'),
_view(endpoint='show_albums_by_artist',
artist_id=artist_id)),
(_('addon_settings'),
_run(endpoint='open_settings')),
]
def context_menu_empty():
return [
(_('addon_settings'),
_run(endpoint='open_settings')),
]
def context_menu_mixtape(mixtape_id):
return [
(_('rename_mixtape'),
_run(endpoint='rename_mixtape',
mixtape_id=mixtape_id)),
(_('delete_mixtape'),
_run(endpoint='del_mixtape',
mixtape_id=mixtape_id)),
(_('addon_settings'),
_run(endpoint='open_settings')),
]
def context_menu_track(artist_id, track_id, album_id):
return [
(_('song_info'),
_action('info')),
(_('download_track'),
_run(endpoint='download_track',
track_id=track_id)),
(_('add_del_track_to_mixtape'),
_run(endpoint='add_del_track_to_mixtape',
track_id=track_id)),
(_('show_albums_by_this_artist'),
_view(endpoint='show_albums_by_artist',
artist_id=artist_id)),
(_('show_similar_tracks'),
_view(endpoint='show_similar_tracks',
track_id=track_id)),
(_('show_tracks_in_this_album'),
_view(endpoint='show_tracks_in_album',
album_id=album_id)),
(_('addon_settings'),
_run(endpoint='open_settings')),
]
############################## Callers ########################################
def _action(arg):
return 'XBMC.Action(%s)' % arg
def _run(*args, **kwargs):
return 'XBMC.RunPlugin(%s)' % plugin.url_for(*args, **kwargs)
def _view(*args, **kwargs):
return 'XBMC.Container.Update(%s)' % plugin.url_for(*args, **kwargs)
def _refresh_view():
xbmc.executebuiltin('Container.Refresh')
############################## Helpers ########################################
def get_args(arg_name, default=None):
return plugin.request.args.get(arg_name, [default])[0]
def get_cached(func, *args, **kwargs):
@plugin.cached(kwargs.pop('TTL', 1440))
def wrap(func_name, *args, **kwargs):
return func(*args, **kwargs)
return wrap(func.__name__, *args, **kwargs)
def get_download_path(setting_name):
download_path = plugin.get_setting(setting_name, str)
while not download_path:
try_again = xbmcgui.Dialog().yesno(
_('no_download_path'),
_('want_set_now')
)
if not try_again:
return
download_path = xbmcgui.Dialog().browse(
3, # ShowAndGetWriteableDirectory
_('choose_download_folder'),
'music',
)
plugin.set_setting(setting_name, download_path)
return download_path
def get_downloaded_track(track_id):
tracks = plugin.get_storage('downloaded_tracks')
if track_id in tracks:
if xbmcvfs.exists(tracks[track_id]['file']):
log('Track is already downloaded, playing local')
return tracks[track_id]['file']
albums = plugin.get_storage('downloaded_albums')
for album in albums.itervalues():
if track_id in album['tracks']:
if xbmcvfs.exists(album['tracks'][track_id]['file']):
log('Album is already downloaded, playing local')
return album['tracks'][track_id]['file']
def get_artist_image(url):
if url:
# fix whitespace in some image urls
return url.replace(' ', '%20')
else:
return 'DefaultActor.png'
def get_user_account():
user_id = plugin.get_setting('user_id', str)
while not user_id:
try_again = xbmcgui.Dialog().yesno(
_('no_username_set'),
_('want_set_now')
)
if not try_again:
return
set_user_account()
user_id = plugin.get_setting('user_id', str)
return user_id
def add_track_to_history(track_id):
history = plugin.get_storage('history')
history_limit = plugin.get_setting('history_limit', int)
if not 'items' in history:
history['items'] = []
if not track_id in [t['id'] for t in history['items']]:
track = get_cached(api.get_track, track_id)
else:
track = [t for t in history['items'] if t['id'] == track_id][0]
history['items'] = [
t for t in history['items'] if not t['id'] == track_id
]
history['items'].append(track)
if history_limit:
while len(history['items']) > history_limit:
history['items'].pop(0)
history.sync()
def log(text):
plugin.log.info(text)
def fix_xbmc_music_library_view():
# avoid context menu replacing bug by
# switching window from musiclibrary to musicfiles
if xbmcgui.getCurrentWindowId() == 10502:
url = plugin.url_for(endpoint='show_root_menu')
xbmc.executebuiltin('ReplaceWindow(MusicFiles, %s)' % url)
def _(string_id):
if string_id in STRINGS:
return plugin.get_string(STRINGS[string_id])
else:
log('String is missing: %s' % string_id)
return string_id
if __name__ == '__main__':
try:
plugin.run()
except ApiError, message:
xbmcgui.Dialog().ok(
_('api_error'),
_('api_returned'),
unicode(message),
_('try_again_later')
)
except ConnectionError:
xbmcgui.Dialog().ok(
_('connection_error'),
'',
_('check_network_or'),
_('try_again_later')
)
| gpl-2.0 | 24,996,541,849,658,104 | 30.764832 | 79 | 0.560769 | false |
sdss/marvin | python/marvin/tools/mixins/mma.py | 1 | 13073 | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-07-28 17:26:41
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-19 22:56:31
from __future__ import absolute_import, division, print_function
import abc
import os
import re
import time
import warnings
import six
from marvin import config, log
from marvin.core.exceptions import MarvinError, MarvinMissingDependency, MarvinUserWarning
from marvin.utils.db import testDbConnection
from marvin.utils.general.general import mangaid2plateifu
try:
from sdss_access.path import Path
except ImportError:
Path = None
try:
from sdss_access import Access
except ImportError:
Access = None
__all__ = ['MMAMixIn']
class MMAMixIn(object, six.with_metaclass(abc.ABCMeta)):
"""A mixin that provides multi-modal data access.
Add this mixin to any new class object to provide that class with
the Multi-Modal Data Access System for using local files, database,
or remote connection when initializing new objects. See :ref:`decision tree <marvin-dma>`
Parameters:
input (str):
A string that can be a filename, plate-ifu, or mangaid. It will be
automatically identified based on its unique format. This argument
is always the first one, so it can be defined without the keyword
for convenience.
filename (str):
The path of the file containing the file to load. If set,
``input`` is ignored.
mangaid (str):
The mangaid of the file to load. If set, ``input`` is ignored.
plateifu (str):
The plate-ifu of the data cube to load. If set, ``input`` is
ignored.
mode ({'local', 'remote', 'auto'}):
The load mode to use. See :ref:`mode-decision-tree`.
data (:class:`~astropy.io.fits.HDUList`, SQLAlchemy object, or None):
An astropy ``HDUList`` or a SQLAlchemy object, to be used for
initialisation. If ``None``, the :ref:`normal <marvin-dma>`` mode
will be used.
release (str):
The MPL/DR version of the data to use.
drpall (str):
The path to the
`drpall <https://trac.sdss.org/wiki/MANGA/TRM/TRM_MPL-5/metadata#DRP:DRPall>`_
file to use. If not set it will use the default path for the file
based on the ``release``
download (bool):
If ``True``, the data will be downloaded on instantiation. See
:ref:`marvin-download-objects`.
ignore_db (bool):
If ``True``, the local data-origin `db` will be ignored.
Attributes:
data (:class:`~astropy.io.fits.HDUList`, SQLAlchemy object, or dict):
Depending on the access mode, ``data`` is populated with the
|HDUList| from the FITS file, a
`SQLAlchemy <http://www.sqlalchemy.org>`_ object, or a dictionary
of values returned by an API call.
data_origin ({'file', 'db', 'api'}):
Indicates the origin of the data, either from a file, the DB, or
an API call.
filename (str):
The path of the file used, if any.
mangaid (str):
The mangaid of the target.
plateifu:
The plateifu of the target
release:
The data release
"""
def __init__(self, input=None, filename=None, mangaid=None, plateifu=None,
mode=None, data=None, release=None, drpall=None, download=None,
ignore_db=False):
self.data = data
self.data_origin = None
self._ignore_db = ignore_db
self.filename = filename
self.mangaid = mangaid
self.plateifu = plateifu
self.mode = mode if mode is not None else config.mode
self._release = release if release is not None else config.release
self._drpver, self._dapver = config.lookUpVersions(release=self._release)
self._drpall = config._getDrpAllPath(self._drpver) if drpall is None else drpall
self._forcedownload = download if download is not None else config.download
self._determine_inputs(input)
assert self.mode in ['auto', 'local', 'remote']
assert self.filename is not None or self.plateifu is not None, 'no inputs set.'
self.datamodel = None
self._set_datamodel()
if self.mode == 'local':
self._doLocal()
elif self.mode == 'remote':
self._doRemote()
elif self.mode == 'auto':
try:
self._doLocal()
except Exception as ee:
if self.filename:
# If the input contains a filename we don't want to go into remote mode.
raise(ee)
else:
log.debug('local mode failed. Trying remote now.')
self._doRemote()
# Sanity check to make sure data_origin has been properly set.
assert self.data_origin in ['file', 'db', 'api'], 'data_origin is not properly set.'
def _determine_inputs(self, input):
"""Determines what inputs to use in the decision tree."""
if input:
assert self.filename is None and self.plateifu is None and self.mangaid is None, \
'if input is set, filename, plateifu, and mangaid cannot be set.'
assert isinstance(input, six.string_types), 'input must be a string.'
input_dict = self._parse_input(input)
if input_dict['plate'] is not None and input_dict['ifu'] is not None:
self.plateifu = input
elif input_dict['plate'] is not None and input_dict['ifu'] is None:
self._plate = input
elif input_dict['mangaid'] is not None:
self.mangaid = input
else:
# Assumes the input must be a filename
self.filename = input
if self.filename is None and self.mangaid is None and self.plateifu is None:
raise MarvinError('no inputs defined.')
if self.filename:
self.mangaid = None
self.plateifu = None
if self.mode == 'remote':
raise MarvinError('filename not allowed in remote mode.')
assert os.path.exists(self.filename), \
'filename {} does not exist.'.format(str(self.filename))
elif self.plateifu:
assert not self.filename, 'invalid set of inputs.'
elif self.mangaid:
assert not self.filename, 'invalid set of inputs.'
self.plateifu = mangaid2plateifu(self.mangaid,
drpall=self._drpall,
drpver=self._drpver)
elif self._plate:
assert not self.filename, 'invalid set of inputs.'
@staticmethod
def _parse_input(value):
"""Parses and input and determines plate, ifu, and mangaid."""
# Number of IFUs per size
n_ifus = {19: 2, 37: 4, 61: 4, 91: 2, 127: 5, 7: 12}
return_dict = {'plate': None, 'ifu': None, 'mangaid': None}
plateifu_pattern = re.compile(r'([0-9]{4,5})-([0-9]{4,9})')
ifu_pattern = re.compile('(7|127|[0-9]{2})([0-9]{2})')
mangaid_pattern = re.compile(r'[0-9]{1,3}-[0-9]+')
plateid_pattern = re.compile('([0-9]{4,})(?!-)(?<!-)')
plateid_match = re.match(plateid_pattern, value)
plateifu_match = re.match(plateifu_pattern, value)
mangaid_match = re.match(mangaid_pattern, value)
# Check whether the input value matches the plateifu pattern
if plateifu_match is not None:
plate, ifu = plateifu_match.groups(0)
# If the value matches a plateifu, checks that the ifu is a valid one.
ifu_match = re.match(ifu_pattern, ifu)
if ifu_match is not None:
ifu_size, ifu_id = map(int, ifu_match.groups(0))
if ifu_id <= n_ifus[ifu_size]:
return_dict['plate'] = plate
return_dict['ifu'] = ifu
# Check whether this is a mangaid
elif mangaid_match is not None:
return_dict['mangaid'] = value
# Check whether this is a plate
elif plateid_match is not None:
return_dict['plate'] = value
return return_dict
@staticmethod
def _get_ifus(minis=None):
''' Returns a list of all the allowed IFU designs ids
Parameters:
minis (bool):
If True, includes the mini-bundles
Returns:
A list of IFU designs
'''
# Number of IFUs per size
n_ifus = {19: 2, 37: 4, 61: 4, 91: 2, 127: 5, 7: 12}
# Pop the minis
if not minis:
__ = n_ifus.pop(7)
ifus = ['{0}{1:02d}'.format(key, i + 1) for key, value in n_ifus.items() for i in range(value)]
return ifus
def _set_datamodel(self):
"""Sets the datamodel for this object. Must be overridden by each subclass."""
pass
def _doLocal(self):
"""Tests if it's possible to load the data locally."""
if self.filename:
if os.path.exists(self.filename):
self.mode = 'local'
self.data_origin = 'file'
else:
raise MarvinError('input file {0} not found'.format(self.filename))
elif self.plateifu:
from marvin import marvindb
if marvindb:
testDbConnection(marvindb.session)
if marvindb and marvindb.db and not self._ignore_db:
self.mode = 'local'
self.data_origin = 'db'
else:
fullpath = self._getFullPath()
if fullpath and os.path.exists(fullpath):
self.mode = 'local'
self.filename = fullpath
self.data_origin = 'file'
else:
if self._forcedownload:
self.download()
self.data_origin = 'file'
else:
raise MarvinError('failed to retrieve data using '
'input parameters.')
def _doRemote(self):
"""Tests if remote connection is possible."""
if self.filename:
raise MarvinError('filename not allowed in remote mode.')
else:
self.mode = 'remote'
self.data_origin = 'api'
def download(self, pathType=None, **pathParams):
"""Download using sdss_access Rsync"""
# # check for public release
# is_public = 'DR' in self._release
# rsync_release = self._release.lower() if is_public else None
if not Access:
raise MarvinError('sdss_access is not installed')
else:
access = Access(release=self._release)
access.remote()
access.add(pathType, **pathParams)
access.set_stream()
access.commit()
paths = access.get_paths()
# adding a millisecond pause for download to finish and file existence to register
time.sleep(0.001)
self.filename = paths[0] # doing this for single files, may need to change
@abc.abstractmethod
def _getFullPath(self, pathType=None, url=None, **pathParams):
"""Returns the full path of the file in the tree.
This method must be overridden by each subclass.
"""
# # check for public release
# is_public = 'DR' in self._release
# ismpl = 'MPL' in self._release
# path_release = self._release.lower() if is_public or ismpl else None
if not Path:
raise MarvinMissingDependency('sdss_access is not installed')
else:
path = Path(release=self._release)
try:
if url:
fullpath = path.url(pathType, **pathParams)
else:
fullpath = path.full(pathType, **pathParams)
except Exception as ee:
warnings.warn('sdss_access was not able to retrieve the full path of the file. '
'Error message is: {0}'.format(str(ee)), MarvinUserWarning)
fullpath = None
return fullpath
@property
def release(self):
"""Returns the release."""
return self._release
@release.setter
def release(self, value):
"""Fails when trying to set the release after instantiation."""
raise MarvinError('the release cannot be changed once the object has been instantiated.')
@property
def plate(self):
"""Returns the plate id."""
return int(self.plateifu.split('-')[0])
@property
def ifu(self):
"""Returns the IFU."""
return int(self.plateifu.split('-')[1])
| bsd-3-clause | -7,867,688,218,666,392,000 | 33.861333 | 103 | 0.566205 | false |
jbalogh/airflow | airflow/models.py | 1 | 73231 | import copy
from datetime import datetime, timedelta
import getpass
import imp
import jinja2
import json
import logging
import os
import dill
import re
import signal
import socket
import sys
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index,)
from sqlalchemy import case, func, or_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.orm import relationship
from airflow import settings, utils
from airflow.executors import DEFAULT_EXECUTOR, LocalExecutor
from airflow.configuration import conf
from airflow.utils import (
AirflowException, State, apply_defaults, provide_session)
Base = declarative_base()
ID_LEN = 250
SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN')
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
if 'mysql' in SQL_ALCHEMY_CONN:
LongText = LONGTEXT
else:
LongText = Text
def clear_task_instances(tis, session):
'''
Clears a set of task instances, but makes sure the running ones
get killed.
'''
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
session.delete(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ # HA!
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
class DagBag(object):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:type dag_folder: str
:param executor: the executor to use when executing task instances
in this DagBag
:param include_examples: whether to include the examples that ship
with airflow or not
:type include_examples: bool
:param sync_to_db: whether to sync the properties of the DAGs to
the metadata DB while finding them, typically should be done
by the scheduler job only
:type sync_to_db: bool
"""
def __init__(
self,
dag_folder=None,
executor=DEFAULT_EXECUTOR,
include_examples=conf.getboolean('core', 'LOAD_EXAMPLES'),
sync_to_db=False):
dag_folder = dag_folder or DAGS_FOLDER
logging.info("Filling up the DagBag from " + dag_folder)
self.dag_folder = dag_folder
self.dags = {}
self.sync_to_db = sync_to_db
self.file_last_changed = {}
self.executor = executor
self.collect_dags(dag_folder)
if include_examples:
example_dag_folder = os.path.join(
os.path.dirname(__file__),
'example_dags')
self.collect_dags(example_dag_folder)
if sync_to_db:
self.deactivate_inactive_dags()
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
orm_dag = DagModel.get_current(dag.parent_dag.dag_id)
else:
orm_dag = DagModel.get_current(dag_id)
if orm_dag and dag.last_loaded < (
orm_dag.last_expired or datetime(2100, 1, 1)):
self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
dag = self.dags[dag_id]
else:
orm_dag = DagModel.get_current(dag_id)
self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
if dag_id in self.dags:
dag = self.dags[dag_id]
else:
dag = None
return dag
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module, this method imports the module and
look for dag objects within it.
"""
try:
# This failed before in what may have been a git sync
# race condition
dttm = datetime.fromtimestamp(os.path.getmtime(filepath))
mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
mod_name = 'unusual_prefix_' + mod_name
except:
return
if safe_mode and os.path.isfile(filepath):
# Skip file if no obvious references to airflow or DAG are found.
with open(filepath, 'r') as f:
content = f.read()
if not all([s in content for s in ('DAG', 'airflow')]):
return
if (
not only_if_updated or
filepath not in self.file_last_changed or
dttm != self.file_last_changed[filepath]):
try:
logging.info("Importing " + filepath)
if mod_name in sys.modules:
del sys.modules[mod_name]
with utils.timeout(30):
m = imp.load_source(mod_name, filepath)
except:
logging.error("Failed to import: " + filepath)
logging.exception("")
self.file_last_changed[filepath] = dttm
return
for dag in m.__dict__.values():
if isinstance(dag, DAG):
dag.full_filepath = filepath
dag.is_subdag = False
self.bag_dag(dag, parent_dag=dag, root_dag=dag)
# dag.pickle()
self.file_last_changed[filepath] = dttm
def bag_dag(self, dag, parent_dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
"""
self.dags[dag.dag_id] = dag
dag.resolve_template_files()
dag.last_loaded = datetime.now()
if self.sync_to_db:
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
if not orm_dag:
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.fileloc = root_dag.full_filepath
orm_dag.is_subdag = dag.is_subdag
orm_dag.owners = root_dag.owner
orm_dag.is_active = True
session.merge(orm_dag)
session.commit()
session.close()
for subdag in dag.subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.fileloc = root_dag.full_filepath
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
logging.info('Loaded DAG {dag}'.format(**locals()))
def collect_dags(
self,
dag_folder=None,
only_if_updated=True):
"""
Given a file path or a folder, this file looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a .airflowignore file is found while processing,
the directory, it will behaves much like a .gitignore does,
ignoring files that match any of the regex patterns specified
in the file.
"""
dag_folder = dag_folder or self.dag_folder
if os.path.isfile(dag_folder):
self.process_file(dag_folder, only_if_updated=only_if_updated)
elif os.path.isdir(dag_folder):
patterns = []
for root, dirs, files in os.walk(dag_folder):
ignore_file = [f for f in files if f == '.airflowignore']
if ignore_file:
f = open(os.path.join(root, ignore_file[0]), 'r')
patterns += [p for p in f.read().split('\n') if p]
f.close()
for f in files:
try:
filepath = os.path.join(root, f)
if not os.path.isfile(filepath):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(filepath)[-1])
if file_ext != '.py':
continue
if not any([re.findall(p, filepath) for p in patterns]):
self.process_file(
filepath, only_if_updated=only_if_updated)
except:
pass
def deactivate_inactive_dags(self):
active_dag_ids = [dag.dag_id for dag in self.dags.values()]
session = settings.Session()
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
session.commit()
session.close()
def paused_dags(self):
session = settings.Session()
dag_ids = [dp.dag_id for dp in session.query(DagModel).filter(
DagModel.is_paused == True)]
session.commit()
session.close()
return dag_ids
class BaseUser(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
username = Column(String(ID_LEN), unique=True)
email = Column(String(500))
def __repr__(self):
return self.username
def get_id(self):
return unicode(self.id)
class Connection(Base):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
"""
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
password = Column(String(500))
port = Column(Integer())
extra = Column(String(5000))
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None):
self.conn_id = conn_id
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
def get_hook(self):
from airflow import hooks
try:
if self.conn_type == 'mysql':
return hooks.MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
return hooks.PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
return hooks.HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
return hooks.PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
return hooks.HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
return hooks.SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
return hooks.JdbcHook(conn_id=self.conn_id)
except:
return None
def __repr__(self):
return self.conn_id
@property
def extra_dejson(self):
"""Returns the extra property by deserializing json"""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
logging.exception(e)
logging.error(
"Failed parsing the json for "
"conn_id {}".format(self.conn_id))
return obj
class DagPickle(Base):
"""
Dags can originate from different places (user repos, master repo, ...)
and also get executed in different places (different executors). This
object represents a version of a DAG and becomes a source of truth for
a BackfillJob execution. A pickle is a native python serialized object,
and in this case gets stored in the database for the duration of the job.
The executors pick up the DagPickle id and read the dag definition from
the database.
"""
id = Column(Integer, primary_key=True)
pickle = Column(PickleType(pickler=dill))
created_dttm = Column(DateTime, default=func.now())
pickle_hash = Column(Integer)
__tablename__ = "dag_pickle"
def __init__(self, dag):
self.dag_id = dag.dag_id
if hasattr(dag, 'template_env'):
dag.template_env = None
self.pickle_hash = hash(dag)
self.pickle = dag
class TaskInstance(Base):
"""
Task instances store the state of a task instance. This table is the
authority and single source of truth around what tasks have run and the
state they are in.
The SqlAchemy model doesn't have a SqlAlchemy foreign key to the task or
dag model deliberately to have more control over transactions.
Database transactions on this table should insure double triggers and
any confusion around what task instances are or aren't ready to run
even while multiple schedulers may be firing task instances.
"""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
start_date = Column(DateTime)
end_date = Column(DateTime)
duration = Column(Integer)
state = Column(String(20))
try_number = Column(Integer)
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer)
pool = Column(String(50))
queue = Column(String(50))
priority_weight = Column(Integer)
__table_args__ = (
Index('ti_dag_state', dag_id, state),
Index('ti_state_lkp', dag_id, task_id, execution_date, state),
Index('ti_pool', pool, state, priority_weight),
)
def __init__(self, task, execution_date, state=None, job=None):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.state = state
self.task = task
self.queue = task.queue
self.pool = task.pool
self.priority_weight = task.priority_weight_total
self.try_number = 1
self.unixname = getpass.getuser()
if job:
self.job_id = job.id
def command(
self,
mark_success=False,
ignore_dependencies=False,
force=False,
local=False,
pickle_id=None,
raw=False,
task_start_date=None,
job_id=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
iso = self.execution_date.isoformat()
mark_success = "--mark_success" if mark_success else ""
pickle = "--pickle {0}".format(pickle_id) if pickle_id else ""
job_id = "--job_id {0}".format(job_id) if job_id else ""
ignore_dependencies = "-i" if ignore_dependencies else ""
force = "--force" if force else ""
local = "--local" if local else ""
task_start_date = \
"-s " + task_start_date.isoformat() if task_start_date else ""
raw = "--raw" if raw else ""
subdir = ""
if not pickle and self.task.dag and self.task.dag.full_filepath:
subdir = "-sd DAGS_FOLDER/{0}".format(self.task.dag.filepath)
return (
"airflow run "
"{self.dag_id} {self.task_id} {iso} "
"{mark_success} "
"{pickle} "
"{local} "
"{ignore_dependencies} "
"{force} "
"{job_id} "
"{raw} "
"{subdir} "
"{task_start_date} "
).format(**locals())
@property
def log_filepath(self):
iso = self.execution_date.isoformat()
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
return (
"{log}/{self.dag_id}/{self.task_id}/{iso}.log".format(**locals()))
@property
def log_url(self):
iso = self.execution_date.isoformat()
BASE_URL = conf.get('webserver', 'BASE_URL')
return BASE_URL + (
"/admin/airflow/log"
"?dag_id={self.dag_id}"
"&task_id={self.task_id}"
"&execution_date={iso}"
).format(**locals())
@property
def mark_success_url(self):
iso = self.execution_date.isoformat()
BASE_URL = conf.get('webserver', 'BASE_URL')
return BASE_URL + (
"/admin/airflow/action"
"?action=success"
"&task_id={self.task_id}"
"&dag_id={self.dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(**locals())
def current_state(self, main_session=None):
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
"""
session = main_session or settings.Session()
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
if not main_session:
session.commit()
session.close()
return state
def error(self, main_session=None):
"""
Forces the task instance's state to FAILED in the database.
"""
session = settings.Session()
logging.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit()
session.close()
def refresh_from_db(self, main_session=None):
"""
Refreshes the task instance from the database based on the primary key
"""
session = main_session or settings.Session()
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).first()
if ti:
self.state = ti.state
self.start_date = ti.start_date
self.end_date = ti.end_date
self.try_number = ti.try_number
if not main_session:
session.commit()
session.close()
@property
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return (self.dag_id, self.task_id, self.execution_date)
def is_queueable(self, flag_upstream_failed=False):
"""
Returns a boolean on whether the task instance has met all dependencies
and is ready to run. It considers the task's state, the state
of its dependencies, depends_on_past and makes sure the execution
isn't in the future. It doesn't take into
account whether the pool has a slot for it to run.
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:type flag_upstream_failed: boolean
"""
if self.execution_date > datetime.now() - self.task.schedule_interval:
return False
elif self.state == State.UP_FOR_RETRY and not self.ready_for_retry():
return False
elif self.task.end_date and self.execution_date > self.task.end_date:
return False
elif self.state == State.SKIPPED:
return False
elif (
self.state in State.runnable() and
self.are_dependencies_met(
flag_upstream_failed=flag_upstream_failed)):
return True
else:
return False
def is_runnable(self):
"""
Returns whether a task is ready to run AND there's room in the
queue.
"""
return self.is_queueable() and not self.pool_full()
def are_dependents_done(self, main_session=None):
"""
Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
"""
session = main_session or settings.Session()
task = self.task
if not task._downstream_list:
return True
downstream_task_ids = [t.task_id for t in task._downstream_list]
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
if not main_session:
session.commit()
session.close()
return count == len(task._downstream_list)
def are_dependencies_met(
self, main_session=None, flag_upstream_failed=False):
"""
Returns a boolean on whether the upstream tasks are in a SUCCESS state
and considers depends_on_past and the previous run's state.
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:type flag_upstream_failed: boolean
"""
TI = TaskInstance
# Using the session if passed as param
session = main_session or settings.Session()
task = self.task
# Checking that the depends_on_past is fulfilled
if (task.depends_on_past and
not self.execution_date == task.start_date):
previous_ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == task.task_id,
TI.execution_date ==
self.execution_date-task.schedule_interval,
TI.state == State.SUCCESS,
).first()
if not previous_ti:
return False
# Applying wait_for_downstream
previous_ti.task = self.task
if task.wait_for_downstream and not \
previous_ti.are_dependents_done(session):
return False
# Checking that all upstream dependencies have succeeded
if task._upstream_list:
upstream_task_ids = [t.task_id for t in task._upstream_list]
qry = (
session
.query(
func.sum(
case([(TI.state == State.SUCCESS, 1)], else_=0)),
func.sum(
case([(TI.state == State.SKIPPED, 1)], else_=0)),
func.count(TI.task_id),
)
.filter(
TI.dag_id == self.dag_id,
TI.task_id.in_(upstream_task_ids),
TI.execution_date == self.execution_date,
TI.state.in_([
State.SUCCESS, State.FAILED,
State.UPSTREAM_FAILED, State.SKIPPED]),
)
)
successes, skipped, done = qry[0]
if flag_upstream_failed:
if skipped:
self.state = State.SKIPPED
self.start_date = datetime.now()
self.end_date = datetime.now()
session.merge(self)
elif successes < done >= len(task._upstream_list):
self.state = State.UPSTREAM_FAILED
self.start_date = datetime.now()
self.end_date = datetime.now()
session.merge(self)
if successes < len(task._upstream_list):
return False
if not main_session:
session.commit()
session.close()
return True
def __repr__(self):
return (
"<TaskInstance: {ti.dag_id}.{ti.task_id} "
"{ti.execution_date} [{ti.state}]>"
).format(ti=self)
def ready_for_retry(self):
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return self.state == State.UP_FOR_RETRY and \
self.end_date + self.task.retry_delay < datetime.now()
@provide_session
def pool_full(self, session):
"""
Returns a boolean as to whether the slot pool has room for this
task to run
"""
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0
def run(
self,
verbose=True,
ignore_dependencies=False, # Doesn't check for deps, just runs
force=False, # Disregards previous successes
mark_success=False, # Don't run the task, act as if it succeeded
test_mode=False, # Doesn't record success or failure in the DB
job_id=None,):
"""
Runs the task instance.
"""
task = self.task
session = settings.Session()
self.refresh_from_db(session)
session.commit()
self.job_id = job_id
iso = datetime.now().isoformat()
self.hostname = socket.gethostname()
if self.state == State.RUNNING:
logging.warning("Another instance is running, skipping.")
elif not force and self.state == State.SUCCESS:
logging.info(
"Task {self} previously succeeded"
" on {self.end_date}".format(**locals())
)
elif not ignore_dependencies and \
not self.are_dependencies_met(session):
logging.warning("Dependencies not met yet")
elif self.state == State.UP_FOR_RETRY and \
not self.ready_for_retry():
next_run = (self.end_date + task.retry_delay).isoformat()
logging.info(
"Not ready for retry yet. " +
"Next run after {0}".format(next_run)
)
elif force or self.state in State.runnable():
msg = "\n" + ("-" * 80)
if self.state == State.UP_FOR_RETRY:
msg += "\nRetry run {self.try_number} out of {task.retries} "
msg += "starting @{iso}\n"
else:
msg += "\nNew run starting @{iso}\n"
msg += ("-" * 80)
logging.info(msg.format(**locals()))
self.start_date = datetime.now()
if not force and task.pool:
# If a pool is set for this task, marking the task instance
# as QUEUED
self.state = State.QUEUED
session.merge(self)
session.commit()
session.close()
logging.info("Queuing into pool {}".format(task.pool))
return
if self.state == State.UP_FOR_RETRY:
self.try_number += 1
else:
self.try_number = 1
if not test_mode:
session.add(Log(State.RUNNING, self))
self.state = State.RUNNING
self.end_date = None
if not test_mode:
session.merge(self)
session.commit()
if verbose:
if mark_success:
msg = "Marking success for "
else:
msg = "Executing "
msg += "{self.task} on {self.execution_date}"
context = {}
try:
logging.info(msg.format(self=self))
if not mark_success:
context = self.get_template_context()
task_copy = copy.copy(task)
self.task = task_copy
def signal_handler(signum, frame):
'''Setting kill signal handler'''
logging.error("Killing subprocess")
task_copy.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
self.render_templates()
settings.policy(task_copy)
task_copy.pre_execute(context=context)
# If a timout is specified for the task, make it fail
# if it goes beyond
if task_copy.execution_timeout:
with utils.timeout(int(
task_copy.execution_timeout.total_seconds())):
task_copy.execute(context=context)
else:
task_copy.execute(context=context)
task_copy.post_execute(context=context)
except (Exception, StandardError, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context)
raise
# Recording SUCCESS
session = settings.Session()
self.end_date = datetime.now()
self.set_duration()
self.state = State.SUCCESS
if not test_mode:
session.add(Log(State.SUCCESS, self))
session.merge(self)
# Success callback
try:
if task.on_success_callback:
task.on_success_callback(context)
except Exception as e3:
logging.error("Failed when executing success callback")
logging.exception(e3)
session.commit()
def handle_failure(self, error, test_mode, context):
logging.exception(error)
task = self.task
session = settings.Session()
self.end_date = datetime.now()
self.set_duration()
if not test_mode:
session.add(Log(State.FAILED, self))
# Let's go deeper
try:
if self.try_number <= task.retries:
self.state = State.UP_FOR_RETRY
if task.email_on_retry and task.email:
self.email_alert(error, is_retry=True)
else:
self.state = State.FAILED
if task.email_on_failure and task.email:
self.email_alert(error, is_retry=False)
except Exception as e2:
logging.error(
'Failed to send email to: ' + str(task.email))
logging.exception(e2)
# Handling callbacks pessimistically
try:
if self.state == State.UP_FOR_RETRY and task.on_retry_callback:
task.on_retry_callback(context)
if self.state == State.FAILED and task.on_failure_callback:
task.on_failure_callback(context)
except Exception as e3:
logging.error("Failed at executing callback")
logging.exception(e3)
if not test_mode:
session.merge(self)
session.commit()
logging.error(str(error))
def get_template_context(self):
task = self.task
from airflow import macros
tables = None
if 'tables' in task.params:
tables = task.params['tables']
ds = self.execution_date.isoformat()[:10]
yesterday_ds = (self.execution_date - timedelta(1)).isoformat()[:10]
tomorrow_ds = (self.execution_date + timedelta(1)).isoformat()[:10]
ds_nodash = ds.replace('-', '')
ti_key_str = "{task.dag_id}__{task.task_id}__{ds_nodash}"
ti_key_str = ti_key_str.format(**locals())
params = {}
if hasattr(task, 'dag') and task.dag.params:
params.update(task.dag.params)
if task.params:
params.update(task.params)
return {
'dag': task.dag,
'ds': ds,
'yesterday_ds': yesterday_ds,
'tomorrow_ds': tomorrow_ds,
'END_DATE': ds,
'ds_nodash': ds_nodash,
'end_date': ds,
'execution_date': self.execution_date,
'latest_date': ds,
'macros': macros,
'params': params,
'tables': tables,
'task': task,
'task_instance': self,
'ti': self,
'task_instance_key_str': ti_key_str,
'conf': conf,
}
def render_templates(self):
task = self.task
jinja_context = self.get_template_context()
if hasattr(self, 'task') and hasattr(self.task, 'dag'):
if self.task.dag.user_defined_macros:
jinja_context.update(
self.task.dag.user_defined_macros)
rt = self.task.render_template # shortcut to method
for attr in task.__class__.template_fields:
content = getattr(task, attr)
if content:
if isinstance(content, basestring):
result = rt(content, jinja_context)
elif isinstance(content, (list, tuple)):
result = [rt(s, jinja_context) for s in content]
elif isinstance(content, dict):
result = {
k: rt(v, jinja_context)
for k, v in content.items()}
else:
raise AirflowException("Type not supported for templating")
setattr(task, attr, result)
def email_alert(self, exception, is_retry=False):
task = self.task
title = "Airflow alert: {self}".format(**locals())
exception = str(exception).replace('\n', '<br>')
try_ = task.retries + 1
body = (
"Try {self.try_number} out of {try_}<br>"
"Exception:<br>{exception}<br>"
"Log: <a href='{self.log_url}'>Link</a><br>"
"Host: {self.hostname}<br>"
"Log file: {self.log_filepath}<br>"
"Mark success: <a href='{self.mark_success_url}'>Link</a><br>"
).format(**locals())
utils.send_email(task.email, title, body)
def set_duration(self):
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).seconds
else:
self.duration = None
class Log(Base):
"""
Used to actively log events to the database
"""
__tablename__ = "log"
id = Column(Integer, primary_key=True)
dttm = Column(DateTime)
dag_id = Column(String(ID_LEN))
task_id = Column(String(ID_LEN))
event = Column(String(30))
execution_date = Column(DateTime)
owner = Column(String(500))
def __init__(self, event, task_instance):
self.dttm = datetime.now()
self.dag_id = task_instance.dag_id
self.task_id = task_instance.task_id
self.execution_date = task_instance.execution_date
self.event = event
self.owner = task_instance.task.owner
class BaseOperator(object):
"""
Abstract base class for all operators. Since operators create objects that
become node in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this task should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator the runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
Note that this class is derived from SQLAlquemy's Base class, which
allows us to push metadata regarding tasks to the database. Deriving this
classes needs to implement the polymorphic specificities documented in
SQLAlchemy. This should become clear while reading the code for other
operators.
:param task_id: a unique, meaningful id for the task
:type task_id: string
:param owner: the owner of the task, using the unix username is recommended
:type owner: string
:param retries: the number of retries that should be performed before
failing the task
:type retries: int
:param retry_delay: delay between retries
:type retry_delay: timedelta
:param start_date: start date for the task, the scheduler will start from
this point in time
:type start_date: datetime
:param end_date: if specified, the scheduler won't go beyond this date
:type end_date: datetime
:param schedule_interval: interval at which to schedule the task
:type schedule_interval: timedelta
:param depends_on_past: when set to true, task instances will run
sequentially while relying on the previous task's schedule to
succeed. The task instance for the start_date is allowed to run.
:type depends_on_past: bool
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X.
:type wait_for_downstream: bool
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:type queue: str
:param dag: a reference to the dag the task is attached to (if any)
:type dag: DAG
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up.
:type priority_weight: int
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:type pool: str
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send dan email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeede yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for sla misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:type sla: datetime.timedelta
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:type execution_timeout: datetime.timedelta
:param on_failure_callback: a function to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:type on_failure_callback: callable
:param on_retry_callback: much like the ``on_failure_callback`` excepts
that it is executed when retries occur.
:param on_success_callback: much like the ``on_failure_callback`` excepts
that it is executed when the task succeeds.
:type on_success_callback: callable
"""
# For derived classes to define which fields will get jinjaified
template_fields = []
# Defines wich files extensions to look for in the templated fields
template_ext = []
# Defines the color in the UI
ui_color = '#fff'
ui_fgcolor = '#000'
@apply_defaults
def __init__(
self,
task_id,
owner,
email=None,
email_on_retry=True,
email_on_failure=True,
retries=0,
retry_delay=timedelta(seconds=300),
start_date=None,
end_date=None,
schedule_interval=timedelta(days=1),
depends_on_past=False,
wait_for_downstream=False,
dag=None,
params=None,
default_args=None,
adhoc=False,
priority_weight=1,
queue=conf.get('celery', 'default_queue'),
pool=None,
sla=None,
execution_timeout=None,
on_failure_callback=None,
on_success_callback=None,
on_retry_callback=None,
*args,
**kwargs):
utils.validate_key(task_id)
self.dag_id = dag.dag_id if dag else 'adhoc_' + owner
self.task_id = task_id
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
self.start_date = start_date
self.end_date = end_date
self.depends_on_past = depends_on_past
self.wait_for_downstream = wait_for_downstream
self._schedule_interval = schedule_interval
self.retries = retries
self.queue = queue
self.pool = pool
self.sla = sla
self.execution_timeout = execution_timeout
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
if isinstance(retry_delay, timedelta):
self.retry_delay = retry_delay
else:
logging.debug("retry_delay isn't timedelta object, assuming secs")
self.retry_delay = timedelta(seconds=retry_delay)
self.params = params or {} # Available in templates!
self.adhoc = adhoc
self.priority_weight = priority_weight
if dag:
dag.add_task(self)
self.dag = dag
# Private attributes
self._upstream_list = []
self._downstream_list = []
@property
def schedule_interval(self):
"""
The schedule interval of the DAG always wins over individual tasks so
that tasks within a DAG always line up. The task still needs a
schedule_interval as it may not be attached to a DAG.
"""
if hasattr(self, 'dag') and self.dag:
return self.dag.schedule_interval
else:
return self._schedule_interval
@property
def priority_weight_total(self):
return sum([
t.priority_weight
for t in self.get_flat_relatives(upstream=False)
]) + self.priority_weight
def __cmp__(self, other):
blacklist = {
'_sa_instance_state', '_upstream_list', '_downstream_list', 'dag'}
for k in set(self.__dict__) - blacklist:
if self.__dict__[k] != other.__dict__[k]:
logging.debug(str((
self.dag_id,
self.task_id,
k,
self.__dict__[k],
other.__dict__[k])))
return -1
return 0
def pre_execute(self, context):
"""
This is triggered right before self.execute, it's mostly a hook
for people deriving operators.
"""
pass
def execute(self, context):
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplemented()
def post_execute(self, context):
"""
This is triggered right after self.execute, it's mostly a hook
for people deriving operators.
"""
pass
def on_kill(self):
'''
Override this method to cleanup subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up or it will leave
ghost processes behind.
'''
pass
def __deepcopy__(self, memo):
"""
Hack sorting double chained task lists by task_id to avoid hitting
max_depth on deepcopy operations.
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
self._upstream_list = sorted(self._upstream_list, key=lambda x: x.task_id)
self._downstream_list = sorted(self._downstream_list, key=lambda x: x.task_id)
for k, v in self.__dict__.items():
if k not in ('user_defined_macros', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
return result
def render_template(self, content, context):
if hasattr(self, 'dag'):
env = self.dag.get_template_env()
else:
env = jinja2.Environment(cache_size=0)
exts = self.__class__.template_ext
if any([content.endswith(ext) for ext in exts]):
template = env.get_template(content)
else:
template = env.from_string(content)
return template.render(**context)
def prepare_template(self):
'''
Hook that is triggered after the templated fields get replaced
by their content. If you need your operator to alter the
content of the file before the template is rendered,
it should override this method to do so.
'''
pass
def resolve_template_files(self):
# Getting the content of files for template_field / template_ext
for attr in self.template_fields:
content = getattr(self, attr)
if (content and isinstance(content, basestring) and
any([content.endswith(ext) for ext in self.template_ext])):
env = self.dag.get_template_env()
try:
setattr(self, attr, env.loader.get_source(env, content)[0])
except Exception as e:
logging.exception(e)
self.prepare_template()
@property
def upstream_list(self):
"""@property: list of tasks directly upstream"""
return self._upstream_list
@property
def downstream_list(self):
"""@property: list of tasks directly downstream"""
return self._downstream_list
def clear(
self, start_date=None, end_date=None,
upstream=False, downstream=False):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
session = settings.Session()
TI = TaskInstance
qry = session.query(TI).filter(TI.dag_id == self.dag_id)
if start_date:
qry = qry.filter(TI.execution_date >= start_date)
if end_date:
qry = qry.filter(TI.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += \
[t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += \
[t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.filter(TI.task_id.in_(tasks))
count = qry.count()
clear_task_instances(qry, session)
session.commit()
session.close()
return count
def get_task_instances(self, session, start_date=None, end_date=None):
"""
Get a set of task instance related to this task for a specific date
range.
"""
TI = TaskInstance
end_date = end_date or datetime.now()
return session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
).order_by(TI.execution_date).all()
def get_flat_relatives(self, upstream=False, l=None):
"""
Get a flat list of relatives, either upstream or downstream.
"""
if not l:
l = []
for t in self.get_direct_relatives(upstream):
if not utils.is_in(t, l):
l.append(t)
t.get_flat_relatives(upstream, l)
return l
def detect_downstream_cycle(self, task=None):
"""
When invoked, this routine will raise an exception if a cycle is
detected downstream from self. It is invoked when tasks are added to
the DAG to detect cycles.
"""
if not task:
task = self
for t in self.get_direct_relatives():
if task is t:
msg = "Cycle detect in DAG. Faulty task: {0}".format(task)
raise AirflowException(msg)
else:
t.detect_downstream_cycle(task=task)
return False
def run(
self, start_date=None, end_date=None, ignore_dependencies=False,
force=False, mark_success=False):
"""
Run a set of task instances for a date range.
"""
start_date = start_date or self.start_date
end_date = end_date or self.end_date or datetime.now()
for dt in utils.date_range(
start_date, end_date, self.schedule_interval):
TaskInstance(self, dt).run(
mark_success=mark_success,
ignore_dependencies=ignore_dependencies,
force=force,)
def get_direct_relatives(self, upstream=False):
"""
Get the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.__class__.__name__}): {self.task_id}>".format(self=self)
@property
def task_type(self):
return self.__class__.__name__
def append_only_new(self, l, item):
if any([item is t for t in l]):
raise AirflowException(
'Dependency {self}, {item} already registered'
''.format(**locals()))
else:
l.append(item)
def _set_relatives(self, task_or_task_list, upstream=False):
try:
task_list = list(task_or_task_list)
except TypeError:
task_list = [task_or_task_list]
for task in task_list:
if not isinstance(task, BaseOperator):
raise AirflowException('Expecting a task')
if upstream:
self.append_only_new(task._downstream_list, self)
self.append_only_new(self._upstream_list, task)
else:
self.append_only_new(task._upstream_list, self)
self.append_only_new(self._downstream_list, task)
self.detect_downstream_cycle()
def set_downstream(self, task_or_task_list):
"""
Set a task, or a task task to be directly downstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=False)
def set_upstream(self, task_or_task_list):
"""
Set a task, or a task task to be directly upstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=True)
class DagModel(Base):
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(String(ID_LEN), primary_key=True)
# A DAG can be paused from the UI / DB
is_paused = Column(Boolean, default=False)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_scheduler_run = Column(DateTime)
# Last time this DAG was pickled
last_pickled = Column(DateTime)
# When the DAG received a refreshed signal last, used to know when
# we need to force refresh
last_expired = Column(DateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
fileloc = Column(String(2000))
# String representing the owners
owners = Column(String(2000))
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
@classmethod
def get_current(cls, dag_id):
session = settings.Session()
obj = session.query(cls).filter(cls.dag_id == dag_id).first()
session.expunge_all()
session.commit()
session.close()
return obj
class DAG(object):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start end an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
:param dag_id: The id of the DAG
:type dag_id: string
:param schedule_interval: Defines how often that DAG runs
:type schedule_interval: datetime.timedelta
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
:param end_date: A date beyond which your DAG won't run, leave to None
for open ended scheduling
:type end_date: datetime.datetime
:param template_searchpath: This list of folders (non relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:type template_searchpath: string or list of stings
:param user_defined_macros: a dictionary of macros that will be merged
:type user_defined_macros: dict
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:type default_args: dict
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:type params: dict
"""
def __init__(
self, dag_id,
schedule_interval=timedelta(days=1),
start_date=None, end_date=None,
full_filepath=None,
template_searchpath=None,
user_defined_macros=None,
default_args=None,
params=None):
self.user_defined_macros = user_defined_macros
self.default_args = default_args or {}
self.params = params
utils.validate_key(dag_id)
self.tasks = []
self.dag_id = dag_id
self.start_date = start_date
self.end_date = end_date or datetime.now()
self.schedule_interval = schedule_interval
self.full_filepath = full_filepath if full_filepath else ''
if isinstance(template_searchpath, basestring):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.parent_dag = None # Gets set when DAGs are loaded
self.last_loaded = datetime.now()
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
@property
def task_ids(self):
return [t.task_id for t in self.tasks]
@property
def filepath(self):
fn = self.full_filepath.replace(DAGS_FOLDER + '/', '')
fn = fn.replace(os.path.dirname(__file__) + '/', '')
return fn
@property
def folder(self):
return os.path.dirname(self.full_filepath)
@property
def owner(self):
return ", ".join(list(set([t.owner for t in self.tasks])))
@property
def latest_execution_date(self):
TI = TaskInstance
session = settings.Session()
execution_date = session.query(func.max(TI.execution_date)).filter(
TI.dag_id == self.dag_id,
TI.task_id.in_(self.task_ids)
).scalar()
session.commit()
session.close()
return execution_date
@property
def subdags(self):
# Late import to prevent circular imports
from airflow.operators import SubDagOperator
l = []
for task in self.tasks:
if isinstance(task, SubDagOperator):
l.append(task.subdag)
l += task.subdag.subdags
return l
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def crawl_for_tasks(objects):
"""
Typically called at the end of a script by passing globals() as a
parameter. This allows to not explicitly add every single task to the
dag explicitly.
"""
raise NotImplemented("")
def override_start_date(self, start_date):
"""
Sets start_date of all tasks and of the DAG itself to a certain date.
This is used by BackfillJob.
"""
for t in self.tasks:
t.start_date = start_date
self.start_date = start_date
def get_template_env(self):
'''
Returns a jinja2 Environment while taking into account the DAGs
template_searchpath and user_defined_macros
'''
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath),
extensions=["jinja2.ext.do"],
cache_size=0)
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task()
"""
self.get_task(upstream_task_id).set_downstream(
self.get_task(downstream_task_id))
def get_task_instances(
self, session, start_date=None, end_date=None, state=None):
TI = TaskInstance
if not start_date:
start_date = (datetime.today()-timedelta(30)).date()
start_date = datetime.combine(start_date, datetime.min.time())
if not end_date:
end_date = datetime.now()
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
TI.task_id.in_([t.task_id for t in self.tasks]),
)
if state:
tis = tis.filter(TI.state == state)
tis = tis.all()
return tis
@property
def roots(self):
return [t for t in self.tasks if not t.downstream_list]
def clear(
self, start_date=None, end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
dry_run=False):
session = settings.Session()
"""
Clears a set of task instances associated with the current dag for
a specified date range.
"""
TI = TaskInstance
tis = session.query(TI)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
conditions.append(
TI.dag_id.like(dag.dag_id) & TI.task_id.in_(dag.task_ids)
)
tis = tis.filter(or_(*conditions))
else:
tis = session.query(TI).filter(TI.dag_id == self.dag_id)
tis = tis.filter(TI.task_id.in_(self.task_ids))
if start_date:
tis = tis.filter(TI.execution_date >= start_date)
if end_date:
tis = tis.filter(TI.execution_date <= end_date)
if only_failed:
tis = tis.filter(TI.state == State.FAILED)
if only_running:
tis = tis.filter(TI.state == State.RUNNING)
if dry_run:
tis = tis.all()
session.expunge_all()
return tis
count = tis.count()
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in tis])
question = (
"You are about to delete these {count} tasks:\n"
"{ti_list}\n\n"
"Are you sure? (yes/no): ").format(**locals())
if utils.ask_yesno(question):
clear_task_instances(tis, session)
else:
count = 0
print("Bail. Nothing was cleared.")
else:
clear_task_instances(tis, session)
session.commit()
session.close()
return count
def __deepcopy__(self, memo):
# Swiwtcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('user_defined_macros', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.params = self.params
return result
def sub_dag(
self, task_regex,
include_downstream=False, include_upstream=True):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
"""
dag = copy.deepcopy(self)
regex_match = [
t for t in dag.tasks if re.findall(task_regex, t.task_id)]
also_include = []
for t in regex_match:
if include_downstream:
also_include += t.get_flat_relatives(upstream=False)
if include_upstream:
also_include += t.get_flat_relatives(upstream=True)
# Compiling the unique list of tasks that made the cut
tasks = list(set(regex_match + also_include))
dag.tasks = tasks
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# made the cut
t._upstream_list = [
ut for ut in t._upstream_list if utils.is_in(ut, tasks)]
t._downstream_list = [
ut for ut in t._downstream_list if utils.is_in(ut, tasks)]
return dag
def has_task(self, task_id):
return task_id in (t.task_id for t in self.tasks)
def get_task(self, task_id):
for task in self.tasks:
if task.task_id == task_id:
return task
raise AirflowException("Task {task_id} not found".format(**locals()))
def __cmp__(self, other):
blacklist = {'_sa_instance_state', 'end_date', 'last_pickled', 'tasks'}
for k in set(self.__dict__) - blacklist:
if self.__dict__[k] != other.__dict__[k]:
return -1
if len(self.tasks) != len(other.tasks):
return -1
i = 0
for task in self.tasks:
if task != other.tasks[i]:
return -1
i += 1
logging.info("Same as before")
return 0
def pickle(self, main_session=None):
session = main_session or settings.Session()
dag = session.query(
DagModel).filter(DAG.dag_id == self.dag_id).first()
dp = None
if dag and dag.pickle_id:
dp = session.query(DagPickle).filter(
DagPickle.id == dag.pickle_id).first()
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = datetime.now()
session.commit()
self.pickle_id = dp.id
if not main_session:
session.close()
def tree_view(self):
"""
Shows an ascii tree representation of the DAG
"""
def get_downstream(task, level=0):
print (" " * level * 4) + str(task)
level += 1
for t in task.upstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
def add_task(self, task):
'''
Add a task to the DAG
:param task: the task you want to add
:type task: task
'''
if not self.start_date and not task.start_date:
raise AirflowException("Task is missing the start_date parameter")
if not task.start_date:
task.start_date = self.start_date
if task.task_id in [t.task_id for t in self.tasks]:
raise AirflowException(
"Task id '{0}' has already been added "
"to the DAG ".format(task.task_id))
else:
self.tasks.append(task)
task.dag_id = self.dag_id
task.dag = self
self.task_count = len(self.tasks)
def add_tasks(self, tasks):
'''
Add a list of tasks to the DAG
:param task: a lit of tasks you want to add
:type task: list of tasks
'''
for task in tasks:
self.add_task(task)
def db_merge(self):
BO = BaseOperator
session = settings.Session()
tasks = session.query(BO).filter(BO.dag_id == self.dag_id).all()
for t in tasks:
session.delete(t)
session.commit()
session.merge(self)
session.commit()
def run(
self, start_date=None, end_date=None, mark_success=False,
include_adhoc=False, local=False, executor=None,
donot_pickle=False, ignore_dependencies=False):
from airflow.jobs import BackfillJob
if not executor and local:
executor = LocalExecutor()
elif not executor:
executor = DEFAULT_EXECUTOR
job = BackfillJob(
self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
include_adhoc=include_adhoc,
executor=executor,
donot_pickle=donot_pickle,
ignore_dependencies=ignore_dependencies)
job.run()
class Chart(Base):
__tablename__ = "chart"
id = Column(Integer, primary_key=True)
label = Column(String(200))
conn_id = Column(String(ID_LEN), nullable=False)
user_id = Column(Integer(), ForeignKey('user.id'),)
chart_type = Column(String(100), default="line")
sql_layout = Column(String(50), default="series")
sql = Column(Text, default="SELECT series, x, y FROM table")
y_log_scale = Column(Boolean)
show_datatable = Column(Boolean)
show_sql = Column(Boolean, default=True)
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
"User", cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(DateTime, default=datetime.now())
def __repr__(self):
return self.label
class KnownEventType(Base):
__tablename__ = "known_event_type"
id = Column(Integer, primary_key=True)
know_event_type = Column(String(200))
def __repr__(self):
return self.know_event_type
class KnownEvent(Base):
__tablename__ = "known_event"
id = Column(Integer, primary_key=True)
label = Column(String(200))
start_date = Column(DateTime)
end_date = Column(DateTime)
user_id = Column(Integer(), ForeignKey('user.id'),)
known_event_type_id = Column(Integer(), ForeignKey('known_event_type.id'),)
reported_by = relationship(
"User", cascade=False, cascade_backrefs=False, backref='known_events')
event_type = relationship(
"KnownEventType",
cascade=False,
cascade_backrefs=False, backref='known_events')
description = Column(Text)
def __repr__(self):
return self.label
class Variable(Base):
__tablename__ = "variable"
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
val = Column(Text)
def __repr__(self):
return '{} : {}'.format(self.key, self.val)
@classmethod
@provide_session
def get(cls, key, session, deserialize_json=False):
obj = session.query(cls).filter(cls.key == key).first()
v = obj.val
if deserialize_json and v:
v = json.loads(v)
return v
class Pool(Base):
__tablename__ = "slot_pool"
id = Column(Integer, primary_key=True)
pool = Column(String(50), unique=True)
slots = Column(Integer, default=0)
description = Column(Text)
def __repr__(self):
return self.pool
@provide_session
def used_slots(self, session):
"""
Returns the number of slots used at the moment
"""
running = (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.RUNNING)
.count()
)
return running
@provide_session
def queued_slots(self, session):
"""
Returns the number of slots used at the moment
"""
return (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.QUEUED)
.count()
)
@provide_session
def open_slots(self, session):
"""
Returns the number of slots open at the moment
"""
used_slots = self.used_slots(session=session)
return self.slots - used_slots
class SlaMiss(Base):
"""
Model that stores a history of the SLA that have been missed.
It is used to keep track of SLA failures over time and to avoid double
triggering alert emails.
"""
__tablename__ = "sla_miss"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(DateTime, primary_key=True)
email_sent = Column(Boolean, default=False)
timestamp = Column(DateTime)
description = Column(Text)
def __repr__(self):
return str((
self.dag_id, self.task_id, self.execution_date.isoformat()))
| apache-2.0 | -7,476,350,181,389,807,000 | 34.757324 | 86 | 0.572422 | false |
cjaymes/pyscap | src/scap/model/xs/SimpleContentElement.py | 1 | 1103 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xs import *
from scap.model.xs.AnnotatedType import AnnotatedType
logger = logging.getLogger(__name__)
class SimpleContentElement(AnnotatedType):
MODEL_MAP = {
'elements': [
{'tag_name': 'restriction', 'list': 'tags', 'class': 'RestrictionType', 'min': 0},
{'tag_name': 'extension', 'list': 'tags', 'class': 'ExtensionType', 'min': 0},
],
}
| gpl-3.0 | 4,981,862,645,693,250,000 | 35.766667 | 94 | 0.701723 | false |
simone/django-gb | django/utils/deprecation.py | 1 | 2585 | import inspect
import warnings
class RemovedInDjango20Warning(DeprecationWarning):
pass
class RemovedInDjango19Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango19Warning
class warn_about_renamed_method(object):
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
| bsd-3-clause | -6,056,115,480,740,207,000 | 34.410959 | 90 | 0.592263 | false |
lixun910/pysal | pysal/explore/giddy/mobility.py | 1 | 4182 | """
Income mobility measures.
"""
__author__ = "Wei Kang <[email protected]>, Sergio J. Rey <[email protected]>"
__all__ = ["markov_mobility"]
import numpy as np
import numpy.linalg as la
def markov_mobility(p, measure="P",ini=None):
"""
Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> import pysal.viz.mapclassify as mc
>>> from pysal.explore.giddy.markov import Markov
>>> from pysal.explore.giddy.mobility import markov_mobility
>>> f = pysal.lib.io.open(pysal.lib.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637
"""
p = np.array(p)
k = p.shape[1]
if measure == "P":
t = np.trace(p)
mobi = (k - t) / (k - 1)
elif measure == "D":
mobi = 1 - abs(la.det(p))
elif measure == "L2":
w, v = la.eig(p)
eigen_value_abs = abs(w)
mobi = 1 - np.sort(eigen_value_abs)[-2]
elif measure == "B1":
if ini is None:
ini = 1.0/k * np.ones(k)
mobi = (k - k * np.sum(ini * np.diag(p))) / (k - 1)
elif measure == "B2":
mobi = 0
if ini is None:
ini = 1.0 / k * np.ones(k)
for i in range(k):
for j in range(k):
mobi = mobi + ini[i] * p[i, j] * abs(i - j)
mobi = mobi / (k - 1)
return mobi
| bsd-3-clause | 3,632,192,855,585,092,600 | 31.169231 | 81 | 0.504304 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.