code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
from collections import defaultdict
from typing import Callable, Dict, Optional, TypeVar, Set, cast, Iterator
import functools
import dask
A = TypeVar("A")
B = TypeVar("B")
K = TypeVar("K")
U = TypeVar("U")
def map(self: Set[A], f: Callable[[A], B]) -> Set[B]:
"""
Builds a new set by applying a function to all elements of this set.
Args:
f: The function to apply to all elements.
Returns:
The new set.
"""
return cast(Set[B], type(self)(f(x) for x in self))
def filter(self: Set[A], p: Callable[[A], bool]) -> Set[A]:
"""
Selects all elements of this set which satisfy a predicate.
Args:
p: The predicate to satisfy.
Returns:
The filtered set.
"""
return type(self)(x for x in self if p(x))
def filter_not(self: Set[A], p: Callable[[A], bool]) -> Set[A]:
"""
Selects all elements of this set which do not satisfy a predicate.
Args:
p: The predicate to not satisfy.
Returns:
The filtered set.
"""
return type(self)(x for x in self if not p(x))
def flat_map(self: Set[A], f: Callable[[A], Set[B]]) -> Set[B]:
"""
Builds a new set by applying a function to all elements of this set and using the elements of the resulting collections.
Args:
f: The function to apply to all elements.
Returns:
The new set.
"""
return cast(Set[B], type(self)(y for x in self for y in f(x)))
def contains(self: Set[A], elem: A) -> bool:
"""
Tests whether this set contains a given value as element.
Args:
elem: The element to look for.
Returns:
True if the set contains the element, False otherwise.
"""
return elem in self
def foreach(self: Set[A], f: Callable[[A], U]) -> None:
"""
Apply f to each element of the set for its side effects.
Args:
f: The function to apply to all elements for its side effects.
"""
for x in self:
f(x)
def group_by(self: Set[A], f: Callable[[A], K]) -> Dict[K, Set[A]]:
"""
Partitions this set into a dict of sets according to some discriminator function.
Args:
f: The grouping function.
Returns:
A dictionary where elements are grouped according to the grouping function.
"""
# frozenset does not have `add`
d = defaultdict(set if isinstance(self, frozenset) else type(self))
for x in self:
k = f(x)
d[k].add(x)
return d
def is_empty(self: Set[A]) -> bool:
"""
Tests whether the set is empty.
Returns:
True if the set is empty, False otherwise.
"""
return len(self) == 0
def size(self: Set[A]) -> int:
"""
Computes the size of this set.
Returns:
The size of the set.
"""
return len(self)
def find(self: Set[A], p: Callable[[A], bool]) -> Optional[A]:
"""
Finds the first element of the set satisfying a predicate, if any.
Args:
p: The predicate to satisfy.
Returns:
The first element satisfying the predicate, otherwise None.
"""
for x in self:
if p(x):
return x
return None
def fold_left(self: Set[A], z: B, op: Callable[[B, A], B]) -> B:
"""
Applies a binary operator to a start value and all elements of this set, going left to right.
Note: might return different results for different runs, unless the underlying collection type is ordered or the operator is associative and commutative.
Args:
z: The start value.
op: The binary operation.
Returns:
The result of inserting op between consecutive elements of this set, going left to right with the start value z on the left:
op(...op(z, x_1), x_2, ..., x_n)
where x1, ..., xn are the elements of this set. Returns z if this set is empty.
"""
acc = z
for x in self:
acc = op(acc, x)
return acc
def fold_right(self: Set[A], z: B, op: Callable[[A, B], B]) -> B:
"""
Applies a binary operator to all elements of this set and a start value, going right to left.
Note: might return different results for different runs, unless the underlying collection type is ordered or the operator is associative and commutative.
Args:
z: The start value.
op: The binary operation.
Returns:
The result of inserting op between consecutive elements of this set, going right to left with the start value z on the right:
op(x_1, op(x_2, ... op(x_n, z)...))
where x1, ..., xn are the elements of this set. Returns z if this set is empty.
"""
acc = z
for x in self:
acc = op(x, acc)
return acc
def forall(self: Set[A], p: Callable[[A], bool]) -> bool:
"""
Tests whether a predicate holds for all elements of this set.
Args:
p: The predicate used to test elements.
Returns:
True if this set is empty or the given predicate p holds for all elements of this set, otherwise False.
"""
for x in self:
if not p(x):
return False
return True
def length(self: Set[A]) -> int:
"""
Returns the length (number of elements) of the set. `size` is an alias for length.
Returns:
The length of the set
"""
return len(self)
def to_iterator(self: Set[A]) -> Iterator[A]:
"""
Converts this set to an iterator.
Returns:
An iterator
"""
return (x for x in self)
# Parallel operations
def par_map(self: Set[A], f: Callable[[A], B]) -> Set[B]:
"""
Builds a new set by applying in parallel a function to all elements of this set.
Args:
f: The function to apply to all elements.
Returns:
The new set.
"""
return cast(Set[B], type(self)((dask.compute(*(dask.delayed(f)(x) for x in self)))))
def par_filter(self: Set[A], p: Callable[[A], bool]) -> Set[A]:
"""
Selects in parallel all elements of this set which satisfy a predicate.
Args:
p: The predicate to satisfy.
Returns:
The filtered set.
"""
preds = dask.compute(*(dask.delayed(p)(x) for x in self))
return type(self)(x for i, x in enumerate(self) if preds[i])
def par_filter_not(self: Set[A], p: Callable[[A], bool]) -> Set[A]:
"""
Selects in parallel all elements of this set which do not satisfy a predicate.
Args:
p: The predicate to not satisfy.
Returns:
The filtered set.
"""
preds = dask.compute(*(dask.delayed(p)(x) for x in self))
return type(self)(x for i, x in enumerate(self) if not preds[i])
def par_flat_map(self: Set[A], f: Callable[[A], Set[B]]) -> Set[B]:
"""
Builds a new set by applying in parallel a function to all elements of this set and using the elements of the resulting collections.
Args:
f: The function to apply to all elements.
Returns:
The new set.
"""
applications = dask.compute(*(dask.delayed(f)(x) for x in self))
return cast(Set[B], type(self)(x for y in applications for x in y))
# Pure operations
def pure_map(self: Set[A], f: Callable[[A], B]) -> Set[B]:
"""
Builds a new set by applying a function to all elements of this set using memoization to improve performance.
WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!
Type A must be hashable using `hash()` function.
Args:
f: The PURE function to apply to all elements.
Returns:
The new set.
"""
f_cache = functools.cache(f)
return cast(Set[B], type(self)(f_cache(x) for x in self))
def pure_flat_map(self: Set[A], f: Callable[[A], Set[B]]) -> Set[B]:
"""
Builds a new set by applying a function to all elements of this set and using the elements of the resulting collections using memoization to improve performance.
WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!
Type A must be hashable using `hash()` function.
Args:
f: The function to apply to all elements.
Returns:
The new set.
"""
f_cache = functools.cache(f)
return cast(Set[B], type(self)(y for x in self for y in f_cache(x)))
def pure_filter(self: Set[A], p: Callable[[A], bool]) -> Set[A]:
"""
Selects all elements of this set which satisfy a predicate using memoization to improve performance.
WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!
Type A must be hashable using `hash()` function.
Args:
p: The predicate to satisfy.
Returns:
The filtered set.
"""
p_cache = functools.cache(p)
return type(self)(x for x in self if p_cache(x))
def pure_filter_not(self: Set[A], p: Callable[[A], bool]) -> Set[A]:
"""
Selects all elements of this set which do not satisfy a predicate using memoization to improve performance.
WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!
Type A must be hashable using `hash()` function.
Args:
p: The predicate not to satisfy.
Returns:
The filtered set.
"""
p_cache = functools.cache(p)
return type(self)(x for x in self if not p_cache(x))
def lazy_map(self: Set[A], f: Callable[[A], B]) -> Iterator[B]:
"""
Builds a new set by applying a function to all elements of this set, lazily.
Args:
f: The function to apply to all elements.
Returns:
The new lazy set, as an iterator.
"""
for x in self:
yield f(x)
def lazy_filter(self: Set[A], p: Callable[[A], bool]) -> Iterator[A]:
"""
Selects all elements of this set which satisfy a predicate, lazily.
Args:
p: The predicate to satisfy.
Returns:
The filtered lazy set, as an iterator.
"""
for x in self:
if p(x):
yield x
def lazy_filter_not(self: Set[A], p: Callable[[A], bool]) -> Iterator[A]:
"""
Selects all elements of this set which do not satisfy a predicate, lazily.
Args:
p: The predicate to not satisfy.
Returns:
The filtered lazy set, as an iterator.
"""
for x in self:
if not p(x):
yield x
def lazy_flat_map(self: Set[A], f: Callable[[A], Set[B]]) -> Iterator[B]:
"""
Builds a new lazy set by applying a function to all elements of this set and using the elements of the resulting collections.
Args:
f: The function to apply to all elements.
Returns:
The new lazy set, as an iterator.
"""
return (y for x in self for y in f(x))
| [
"functools.cache",
"dask.delayed",
"typing.TypeVar"
] | [((144, 156), 'typing.TypeVar', 'TypeVar', (['"""A"""'], {}), "('A')\n", (151, 156), False, 'from typing import Callable, Dict, Optional, TypeVar, Set, cast, Iterator\n'), ((161, 173), 'typing.TypeVar', 'TypeVar', (['"""B"""'], {}), "('B')\n", (168, 173), False, 'from typing import Callable, Dict, Optional, TypeVar, Set, cast, Iterator\n'), ((178, 190), 'typing.TypeVar', 'TypeVar', (['"""K"""'], {}), "('K')\n", (185, 190), False, 'from typing import Callable, Dict, Optional, TypeVar, Set, cast, Iterator\n'), ((195, 207), 'typing.TypeVar', 'TypeVar', (['"""U"""'], {}), "('U')\n", (202, 207), False, 'from typing import Callable, Dict, Optional, TypeVar, Set, cast, Iterator\n'), ((7537, 7555), 'functools.cache', 'functools.cache', (['f'], {}), '(f)\n', (7552, 7555), False, 'import functools\n'), ((8146, 8164), 'functools.cache', 'functools.cache', (['f'], {}), '(f)\n', (8161, 8164), False, 'import functools\n'), ((8693, 8711), 'functools.cache', 'functools.cache', (['p'], {}), '(p)\n', (8708, 8711), False, 'import functools\n'), ((9236, 9254), 'functools.cache', 'functools.cache', (['p'], {}), '(p)\n', (9251, 9254), False, 'import functools\n'), ((6094, 6109), 'dask.delayed', 'dask.delayed', (['p'], {}), '(p)\n', (6106, 6109), False, 'import dask\n'), ((6482, 6497), 'dask.delayed', 'dask.delayed', (['p'], {}), '(p)\n', (6494, 6497), False, 'import dask\n'), ((6939, 6954), 'dask.delayed', 'dask.delayed', (['f'], {}), '(f)\n', (6951, 6954), False, 'import dask\n'), ((5783, 5798), 'dask.delayed', 'dask.delayed', (['f'], {}), '(f)\n', (5795, 5798), False, 'import dask\n')] |
#!/usr/bin/python
from PIL import Image, ImageDraw, ImageFont
import math
import argparse
from datetime import date
import re
def get_lines(expected_line_number, text):
text = str(text)
words = text.split(' ')
max_char_number = math.ceil(len(text) / expected_line_number)
lines = []
line = ''
for word in words:
if len((line + ' ' + word).strip()) <= max_char_number:
line = (line + ' ' + word).strip()
else:
lines.append(line.strip())
line = word
if len(line) > 0:
lines.append(line)
if len(lines) > expected_line_number:
for i in range(expected_line_number, len(lines)):
lines[expected_line_number -
1] = (lines[expected_line_number - 1] + ' ' + lines.pop()).strip()
return lines
def get_font_size(lines, font_size):
fnt = ImageFont.truetype('arialbd.ttf', font_size)
fnt_sizes = []
for ln in lines:
fnt_sizes.append(fnt.getsize(ln))
size = [0, 0]
for sz in fnt_sizes:
size[0] = max(size[0], sz[0])
size[1] += sz[1]
return size, fnt_sizes
def draw_image(new_img, text, fgColor):
text = str(text)
draw = ImageDraw.Draw(new_img)
img_size = new_img.size
expected_line_number = 1
font_size = 64
lines = get_lines(expected_line_number, text)
fnt_sizes = []
fnt_size, fnt_sizes = get_font_size(lines, font_size)
while fnt_size[0] > img_size[0] or fnt_size[1] > img_size[1]:
if expected_line_number < 2:
expected_line_number += 1
lines = get_lines(expected_line_number, text)
else:
font_size -= 5
fnt_size, fnt_sizes = get_font_size(lines, font_size)
padding = 10
x = (img_size[0] - fnt_sizes[0][0]) / 2
y = (img_size[1] - fnt_size[1] - (len(fnt_sizes) - 1) * padding) / 2
fnt = ImageFont.truetype('arialbd.ttf', font_size)
offset_y = y
draw.text((x, y), lines[0], font=fnt, fill=fgColor)
offset_y += fnt_sizes[0][1] + padding
print(offset_y)
print(fnt_sizes)
for i in range(1, len(fnt_sizes)):
x = (img_size[0] - fnt_sizes[i][0]) / 2
draw.text((x, offset_y), lines[i], font=fnt, fill=fgColor)
offset_y += fnt_sizes[i][1] + padding
del draw
def new_image(width, height, text, bgColor, fgColor):
new_img = Image.new('RGB', (int(width), int(height)), bgColor)
draw_image(new_img, text, fgColor)
new_text = re.sub('\W+', '-', text).lower()
new_img.save(r'../assets/%s-%s/banner.jpg' % (date.today().isoformat(), new_text.strip('-')))
del new_img
def new_image_with_file(fn):
with open(fn, encoding='utf-8') as f:
for l in f:
l = l.strip()
if l:
ls = l.split(',')
if '#' == l[0] or len(ls) < 2:
continue
new_image(*ls)
if '__main__' == __name__:
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--text', help='Text', required=True, type=str)
parser.add_argument('-f', '--foreground', help='Foreground Color',
default='white')
parser.add_argument('-b', '--background', help='Background Color',
default='green')
args = parser.parse_args()
new_image(1200, 1200, args.text, fgColor=args.foreground, bgColor=args.background)
| [
"argparse.ArgumentParser",
"PIL.ImageFont.truetype",
"PIL.ImageDraw.Draw",
"re.sub",
"datetime.date.today"
] | [((867, 911), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arialbd.ttf"""', 'font_size'], {}), "('arialbd.ttf', font_size)\n", (885, 911), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1201, 1224), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['new_img'], {}), '(new_img)\n', (1215, 1224), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1876, 1920), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arialbd.ttf"""', 'font_size'], {}), "('arialbd.ttf', font_size)\n", (1894, 1920), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2934, 2959), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2957, 2959), False, 'import argparse\n'), ((2467, 2492), 're.sub', 're.sub', (['"""\\\\W+"""', '"""-"""', 'text'], {}), "('\\\\W+', '-', text)\n", (2473, 2492), False, 'import re\n'), ((2550, 2562), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2560, 2562), False, 'from datetime import date\n')] |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Name:
# MetarMonitor.py
# GFS1-NHD:A6636.0000-SCRIPT;1.26
#
# Status:
# DELIVERED
#
# History:
# Revision 1.26 (DELIVERED)
# Created: 27-SEP-2009 22:55:47 OBERFIEL
# Last bit of heart surgery. TEMPO groups evaluated more
# frequently.
#
# Revision 1.25 (DELIVERED)
# Created: 20-AUG-2009 16:18:22 OBERFIEL
# Change for sake of making a change.
#
# Revision 1.24 (DELIVERED)
# Created: 17-APR-2009 12:07:51 OBERFIEL
# Now handles (ignores) AvnUnknwnPcp exceptions when checking
# TEMPO groups
#
# Revision 1.23 (DELIVERED)
# Created: 01-AUG-2008 15:44:47 OBERFIEL
# Synch'd up with changes in OB8.3.1
#
# Revision 1.22 (DELIVERED)
# Created: 28-JUL-2008 13:57:54 OBERFIEL
# Removed AFG specific feature. Replaced by the
# AirportOpsThresh rule.
#
# Revision 1.21 (DELIVERED)
# Created: 06-FEB-2008 08:52:29 GILMOREDM
#
# Revision 1.20 (DELIVERED)
# Created: 03-OCT-2007 13:57:56 OBERFIEL
# Added message when TPO is purple to include mention of
#
# Revision 1.19 (DELIVERED)
# Created: 16-MAY-2006 10:50:31 TROJAN
# spr 7146: added history button in TWEB Editor's statusbar,
# fixed spelling
#
# Revision 1.18 (DELIVERED)
# Created: 04-MAY-2006 14:16:16 TROJAN
# SPR7125: fixed weather check in TafQC, changes to
# checkTEMPO() in MetarMonitor
#
# Revision 1.17 (DELIVERED)
# Created: 04-MAY-2006 14:02:08 TROJAN
# SPR 7126: fixed weather check in TafQC, changes to
# checkTEMPO() in MetarMonitor
#
# Revision 1.16 (DELIVERED)
# Created: 23-APR-2006 11:54:00 TROJAN
# spr 7125 - changes to TEMPO and category monitoring
#
# Revision 1.15 (DELIVERED)
# Created: 23-APR-2006 11:45:01 TROJAN
# spr 7126 - fix to __checkTEMPO()
#
# Revision 1.14 (DELIVERED)
# Created: 23-APR-2006 10:52:20 TROJAN
# spr 7126 - changes to tempo and category alerts
#
# Revision 1.13 (DELIVERED)
# Created: 21-APR-2006 11:29:24 TROJAN
# spr 7124: added exception catching code ro compare()
#
# Revision 1.12 (APPROVED)
# Created: 20-APR-2006 15:47:32 TROJAN
# made compare() wrapper in base class handling exceptions
# renamed compare() to __compare()
#
# Revision 1.11 (DELIVERED)
# Created: 23-JAN-2006 08:23:15 TROJAN
# stdr 956
#
# Revision 1.10 (DELIVERED)
# Created: 06-JUL-2005 18:16:40 TROJAN
# spr 6548
#
# Revision 1.9 (DELIVERED)
# Created: 07-MAY-2005 11:35:41 OBERFIEL
# Added Item Header Block
#
# Revision 1.8 (DELIVERED)
# Created: 04-APR-2005 15:51:08 TROJAN
# spr 6775
#
# Revision 1.7 (DELIVERED)
# Created: 28-FEB-2005 21:37:47 TROJAN
# spr 6686
#
# Revision 1.6 (DELIVERED)
# Created: 14-FEB-2005 20:54:50 TROJAN
# spr 6649
#
# Revision 1.5 (APPROVED)
# Created: 30-SEP-2004 18:56:03 TROJAN
# stdr 874
#
# Revision 1.4 (APPROVED)
# Created: 19-AUG-2004 20:51:36 OBERFIEL
# Change code
#
# Revision 1.3 (APPROVED)
# Created: 01-JUL-2004 14:59:41 OBERFIEL
# Update
#
# Revision 1.2 (DELIVERED)
# Created: 08-JAN-2004 21:40:10 PCMS
# Updating for code cleanup
#
# Revision 1.1 (APPROVED)
# Created: 06-NOV-2003 16:45:55 OBERFIEL
# date and time created -2147483647/-2147483648/-2147481748
# -2147483648:-2147483648:-2147483648 by oberfiel
#
# Change Document History:
# 1:
# Change Document: GFS1-NHD_SPR_7429
# Action Date: 02-OCT-2009 15:11:26
# Relationship Type: In Response to
# Status: TEST
# Title: AvnFPS: Unable to properly set QC functions in Site Info Editor
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 06JUL2012 15153 zhao Retrieve latest METAR record in database
# Jan 19, 2018 6957 tgurney Log missing metar message at info level
#
##
# This is a base file that is not intended to be overridden.
##
import logging, time
import Avn, AvnLib, MonitorP, MetarMonitorP
_Logger = logging.getLogger(Avn.CATEGORY)
import MetarData
###############################################################################
class Monitor(MetarMonitorP.Monitor):
Source = 'mtrs'
def __checkTEMPO(self, tempo, mtrs):
"""Checks whether TEMPO rules verify at least one METAR"""
d = {}
for rule in self.rules:
for mtr in mtrs:
try:
if rule.severity <= self.args.get('tempolevel', 2) or \
not rule.method(tempo, AvnLib.makeMetarData(mtr)):
break
except (Avn.AvnMissing,Avn.AvnUnknwnPcp):
break
else:
if rule.type != 'wx' or 'wx' in tempo:
d[rule.type] = 1
return d
def __compare(self, taf):
now = time.time()
# metars = Globals.DRC.getMetars(self.info['sites']['metar'], True,
# now-15000.0)
# For DR15153: use 'maxSize=0' to indicate that the latest record is to be retrieved
metars = MetarData.retrieve(self.info['sites']['metar'],0)
msg = None
result = {}
if not metars:
msg = 'Missing METAR'
_Logger.info('%s for %s', msg, self.info['sites']['metar'])
else:
rpt = metars[0]
result['header'] = rpt.header
result['text'] = rpt.text
if 'fatal' in rpt.dcd:
msg = 'Cannot decode METAR'
_Logger.error('%s for %s:\n%s', msg,
self.info['sites']['metar'], rpt.text)
if msg:
result['status'] = self.setMissing(msg)
return result
result['dcd'] = rpt.dcd
try:
delta = MonitorP.applyRules(self.rules, now, taf.hourly.get(now),
AvnLib.makeMetarData(rpt.dcd))
result['status'] = MonitorP.addMessages(self.args['items'][1:],
MonitorP.addRules(self.args['items'][1:],
self.rules, delta))
except IndexError:
result['status'] = self.setNIL()
return result
#
# Begin check on TEMPO conditions
for g in taf.dcd['group']:
if g['prev']['time']['from'] <= now < g['prev']['time']['to']:
break
result['status']['tempo'] = Avn.Bunch(severity=0, msg='OK')
#
# If a TEMPO group is still in effect...
if 'ocnl' in g and g['ocnl']['type'] == 'TEMPO' and now < g['ocnl']['time']['to']:
tf = g['ocnl']['time']['from']
halflife = (g['ocnl']['time']['to'] - g['ocnl']['time']['from'])/2.0
delta = now-tf
#
# If an hour has passed or more than half of the TEMPO group valid time...
if delta >= 3600. or delta >= halflife:
tempo = AvnLib.TafData.makeTempo(g)
if not tempo:
return result
tmpdcd = [m.dcd for m in metars if 'fatal' not in m.dcd]
dt = now
delta = 0
tdict = {}
#
# Generally 30 up to 90 minutes makes sense.
tempograceperiod = min(max(int(self.args.get('tempograceperiod','3600')),\
1800),5400)
while (dt > tf):
dt -= tempograceperiod
#
# select observations that fall within grace period window
dcds = [d2 for d1, d2 in Avn.window(tmpdcd) if d1['itime']['value'] > dt]
try:
dcds.insert(0,tmpdcd[0])
except IndexError:
break
#
d = self.__checkTEMPO(tempo, dcds)
if d:
tdict.update(d)
delta = now - max(tf,dcds[-1]['itime']['value'])
#
# Go back further in time for more non-TEMPO events
try:
while(dcds.pop()):
tmpdcd.pop(0)
except IndexError:
pass
else:
break
#
# If TEMPO events not found in the recent past...
if tdict:
events = [self.args['labels'].get(x, x) for x in tdict.keys()]
if delta <= 3630.0:
msg = '%s events did not occur for %02.0f mins' % \
(' '.join(events), delta/60.0)
else:
msg = '%s events did not occur for %dh %02.0fm' % \
(' '.join(events),delta//3600,(delta/60)%60)
if delta >= halflife: # more than half of valid time
s = 4 # Orange
else:
s = 3 # Yellow
result['status']['tempo'] = Avn.Bunch(severity=s, msg=msg)
return result
| [
"logging.getLogger",
"MonitorP.addRules",
"Avn.window",
"AvnLib.TafData.makeTempo",
"AvnLib.makeMetarData",
"Avn.Bunch",
"time.time",
"MetarData.retrieve"
] | [((5628, 5659), 'logging.getLogger', 'logging.getLogger', (['Avn.CATEGORY'], {}), '(Avn.CATEGORY)\n', (5645, 5659), False, 'import logging, time\n'), ((6462, 6473), 'time.time', 'time.time', ([], {}), '()\n', (6471, 6473), False, 'import logging, time\n'), ((6707, 6757), 'MetarData.retrieve', 'MetarData.retrieve', (["self.info['sites']['metar']", '(0)'], {}), "(self.info['sites']['metar'], 0)\n", (6725, 6757), False, 'import MetarData\n'), ((8107, 8138), 'Avn.Bunch', 'Avn.Bunch', ([], {'severity': '(0)', 'msg': '"""OK"""'}), "(severity=0, msg='OK')\n", (8116, 8138), False, 'import Avn, AvnLib, MonitorP, MetarMonitorP\n'), ((7497, 7526), 'AvnLib.makeMetarData', 'AvnLib.makeMetarData', (['rpt.dcd'], {}), '(rpt.dcd)\n', (7517, 7526), False, 'import Avn, AvnLib, MonitorP, MetarMonitorP\n'), ((7656, 7716), 'MonitorP.addRules', 'MonitorP.addRules', (["self.args['items'][1:]", 'self.rules', 'delta'], {}), "(self.args['items'][1:], self.rules, delta)\n", (7673, 7716), False, 'import Avn, AvnLib, MonitorP, MetarMonitorP\n'), ((8617, 8644), 'AvnLib.TafData.makeTempo', 'AvnLib.TafData.makeTempo', (['g'], {}), '(g)\n', (8641, 8644), False, 'import Avn, AvnLib, MonitorP, MetarMonitorP\n'), ((10849, 10879), 'Avn.Bunch', 'Avn.Bunch', ([], {'severity': 's', 'msg': 'msg'}), '(severity=s, msg=msg)\n', (10858, 10879), False, 'import Avn, AvnLib, MonitorP, MetarMonitorP\n'), ((9308, 9326), 'Avn.window', 'Avn.window', (['tmpdcd'], {}), '(tmpdcd)\n', (9318, 9326), False, 'import Avn, AvnLib, MonitorP, MetarMonitorP\n'), ((6148, 6173), 'AvnLib.makeMetarData', 'AvnLib.makeMetarData', (['mtr'], {}), '(mtr)\n', (6168, 6173), False, 'import Avn, AvnLib, MonitorP, MetarMonitorP\n')] |
import math
# Global Vars
TICKET_PRICE = 10
SERVICE_CHARGE = 2
tickets_remaining = 100
def get_tickets_remaining():
return tickets_remaining
# Calculate the price function
def calculate_ticket_price(tickets):
# adding service charge of $2.00
return math.ceil(tickets * TICKET_PRICE) + SERVICE_CHARGE
while tickets_remaining >= 1:
print('there are {0} tickets remaining.'.format(get_tickets_remaining()))
current_user = input('Please enter your name, ')
print('Welcome , {}'.format(current_user))
num_tickets = input('How many tickets would you like to purchase? ')
try:
num_tickets = int(num_tickets)
if num_tickets > tickets_remaining:
raise ValueError('There are only {0} tickets remaining'.format(tickets_remaining))
except ValueError as err:
print('oh no, we ran into an issue. {0}. Please try again'.format(err))
else:
print('That would be ${0}'.format(calculate_ticket_price(num_tickets)))
should_proceed = input('Would you like to purchase? Y/N, ')
if should_proceed.lower() == 'y':
# TODO: Gather CC info and process payment
print('SOLD!')
tickets_remaining -= num_tickets
else:
print('Thank you anyways, {0}'.format(current_user))
# notify that we are sold out
print('Sorry we are all sold out! ')
| [
"math.ceil"
] | [((265, 298), 'math.ceil', 'math.ceil', (['(tickets * TICKET_PRICE)'], {}), '(tickets * TICKET_PRICE)\n', (274, 298), False, 'import math\n')] |
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import json
from ocean_lib.web3_internal.account import Account
from tests.resources.ddo_helpers import get_resource_path
def test_account_properties_from_file(alice_account):
key_file = get_resource_path("keys", "key_file_2.json")
account = Account(key_file=key_file, password="<PASSWORD>", address="0x0")
assert json.loads(account.key)["id"] == "<KEY>"
assert account.private_key is None, "The private key can be shown."
assert account.key == account._encrypted_key
assert account.key_file == str(key_file)
| [
"json.loads",
"tests.resources.ddo_helpers.get_resource_path",
"ocean_lib.web3_internal.account.Account"
] | [((280, 324), 'tests.resources.ddo_helpers.get_resource_path', 'get_resource_path', (['"""keys"""', '"""key_file_2.json"""'], {}), "('keys', 'key_file_2.json')\n", (297, 324), False, 'from tests.resources.ddo_helpers import get_resource_path\n'), ((339, 403), 'ocean_lib.web3_internal.account.Account', 'Account', ([], {'key_file': 'key_file', 'password': '"""<PASSWORD>"""', 'address': '"""0x0"""'}), "(key_file=key_file, password='<PASSWORD>', address='0x0')\n", (346, 403), False, 'from ocean_lib.web3_internal.account import Account\n'), ((415, 438), 'json.loads', 'json.loads', (['account.key'], {}), '(account.key)\n', (425, 438), False, 'import json\n')] |
#!/usr/bin/env python3
"""
File: isc_clause_key.py
Clause: keys
Title: Clause statement for key
Description: Provides key-related grammar in PyParsing engine
for ISC-configuration style
"""
from pyparsing import Word, alphanums, Group, Keyword, ZeroOrMore
from bind9_parser.isc_utils import semicolon, lbrack, rbrack, key_id, key_secret
# NOTE: If any declaration here is to be used OUTSIDE of the 'keys' clause,
# it should instead be defined in isc_utils.py
key_algorithm_name = Word(alphanums + '-')('algorithm')
key_algorithm_name.setName('<key-algorithm>')
# algorithm <string>;
key_algorithm_element = (
Keyword('algorithm').suppress()
- key_algorithm_name('algorithm')
+ semicolon
)
key_algorithm_element.setName('algorithm <key-algorithm>;')
# secret <key_secret>;
key_secret_element = (
Keyword('secret').suppress()
- key_secret('secret')
+ semicolon
)
key_secret_element.setName('secret <key_secret>;')
# key <key-name> { algorithm <string>; secret <key-secret>; };
# key key_id {
# algorithm algorithm_id;
# secret secret_string;
# };
clause_stmt_key_standalone = (
Keyword('key').suppress()
- Group(
key_id('key_id')
+ lbrack
- key_algorithm_element
- key_secret_element
+ rbrack
)
+ semicolon
)('key')
# {0-*} statement
clause_stmt_key_series = (
ZeroOrMore(
clause_stmt_key_standalone
)
)('key')
clause_stmt_key_series.setName('key <key-name> { algorithm <string>; secret <key-secret>; };')
| [
"bind9_parser.isc_utils.key_secret",
"pyparsing.Word",
"pyparsing.ZeroOrMore",
"pyparsing.Keyword",
"bind9_parser.isc_utils.key_id"
] | [((499, 520), 'pyparsing.Word', 'Word', (["(alphanums + '-')"], {}), "(alphanums + '-')\n", (503, 520), False, 'from pyparsing import Word, alphanums, Group, Keyword, ZeroOrMore\n'), ((1394, 1432), 'pyparsing.ZeroOrMore', 'ZeroOrMore', (['clause_stmt_key_standalone'], {}), '(clause_stmt_key_standalone)\n', (1404, 1432), False, 'from pyparsing import Word, alphanums, Group, Keyword, ZeroOrMore\n'), ((887, 907), 'bind9_parser.isc_utils.key_secret', 'key_secret', (['"""secret"""'], {}), "('secret')\n", (897, 907), False, 'from bind9_parser.isc_utils import semicolon, lbrack, rbrack, key_id, key_secret\n'), ((637, 657), 'pyparsing.Keyword', 'Keyword', (['"""algorithm"""'], {}), "('algorithm')\n", (644, 657), False, 'from pyparsing import Word, alphanums, Group, Keyword, ZeroOrMore\n'), ((848, 865), 'pyparsing.Keyword', 'Keyword', (['"""secret"""'], {}), "('secret')\n", (855, 865), False, 'from pyparsing import Word, alphanums, Group, Keyword, ZeroOrMore\n'), ((1154, 1168), 'pyparsing.Keyword', 'Keyword', (['"""key"""'], {}), "('key')\n", (1161, 1168), False, 'from pyparsing import Word, alphanums, Group, Keyword, ZeroOrMore\n'), ((1201, 1217), 'bind9_parser.isc_utils.key_id', 'key_id', (['"""key_id"""'], {}), "('key_id')\n", (1207, 1217), False, 'from bind9_parser.isc_utils import semicolon, lbrack, rbrack, key_id, key_secret\n')] |
import pathlib
import subprocess
import tempfile
from openforcefield.utils import temporary_cd
from openff.cli import __file__ as cli_path
# TODO: Run tests from tempdirs
CLI_ROOT = pathlib.Path(cli_path).joinpath("../../../").resolve()
class TestCLICalls:
def call(self, cmd, raise_err=True):
"""Helper function to execute direct CLI calls"""
if type(cmd) == str:
cmd = cmd.split()
with tempfile.TemporaryDirectory() as tmp_dir:
with temporary_cd(tmp_dir):
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate()
if err and raise_err:
raise Exception(err)
return out.decode(), err.decode()
class TestCheckVersionsCalls(TestCLICalls):
def test_check_versions(self):
"""Test some basic behavior of check_versions.py"""
from openforcefield import __version__ as toolkit_version
from openff.cli import __version__ as cli_version
out, _ = self.call(f"python {CLI_ROOT}/openff/cli/check_versions.py")
# TODO: Use regex to connect package names with versions
assert toolkit_version in out
assert cli_version in out
class TestGenerateConformersCalls(TestCLICalls):
def test_unsupported_toolkit(self):
"""Ensure that requesting an unsupported toolkit is caught"""
# TODO: This should maybe fail before checking the toolkit,
# based on missing required arguments
_, err = self.call(
(
f"python {CLI_ROOT}/openff/cli/generate_conformers.py "
"--forcefield openff-1.0.0 --toolkit magic "
"--molecule molecule.sdf"
),
raise_err=False,
)
assert "openff.cli.utils.exceptions.UnsupportedToolkitError" in err
assert "magic" in err
| [
"tempfile.TemporaryDirectory",
"openforcefield.utils.temporary_cd",
"subprocess.Popen",
"pathlib.Path"
] | [((434, 463), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (461, 463), False, 'import tempfile\n'), ((185, 207), 'pathlib.Path', 'pathlib.Path', (['cli_path'], {}), '(cli_path)\n', (197, 207), False, 'import pathlib\n'), ((493, 514), 'openforcefield.utils.temporary_cd', 'temporary_cd', (['tmp_dir'], {}), '(tmp_dir)\n', (505, 514), False, 'from openforcefield.utils import temporary_cd\n'), ((539, 608), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (555, 608), False, 'import subprocess\n')] |
from canvasxpress.canvas import CanvasXpress
def generate_canvasxpress_code_from_json_file(
cx_json_path: str,
document_includes: bool = True,
document_render: bool = True,
document_jupyter_render=False
) -> str:
"""
Generates a string with a CanvasXPress in Python declaration using a
CanvasXpress reproducible research JSON stored in a file.
:param cx_json_path: `str`
A valid path to the reproducible JSON text from which a CanvasXPress
object is to be built and then converted into example code.
:param document_includes: `bool`
Default `True`. Indicate if include headers should be prefixed.
:param document_render: `bool`
Default `True`. Indicate if rendering should be included in the
example code.
:param document_jupyter_render: `bool`
Default `False`. Indicate if Jupyter rendering should be performed;
otherwise, popup rendering will suffixed.
:returns: `str`
A string with the code example.
"""
with open(cx_json_path, 'r') as cx_json_file:
cx_json = cx_json_file.read()
return generate_canvasxpress_code(
CanvasXpress.from_reproducible_json(cx_json),
document_includes,
document_render,
document_jupyter_render
)
def generate_canvasxpress_code_from_json(
cx_json: str,
document_includes: bool = True,
document_render: bool = True,
document_jupyter_render=False
) -> str:
"""
Generates a string with a CanvasXPress in Python declaration using a
CanvasXpress reproducible research JSON.
:param cx_json: `str`
The reproducible JSON text from which a CanvasXPress object is to be
built and then converted into example code.
:param document_includes: `bool`
Default `True`. Indicate if include headers should be prefixed.
:param document_render: `bool`
Default `True`. Indicate if rendering should be included in the
example code.
:param document_jupyter_render: `bool`
Default `False`. Indicate if Jupyter rendering should be performed;
otherwise, popup rendering will suffixed.
:returns: `str`
A string with the code example.
"""
return generate_canvasxpress_code(
CanvasXpress.from_reproducible_json(cx_json),
document_includes,
document_render,
document_jupyter_render
)
def generate_canvasxpress_code(
cx: CanvasXpress,
document_includes: bool = True,
document_render: bool = True,
document_jupyter_render=False,
) -> str:
"""
Generates a string with a CanvasXPress in Python declaration.
:param cx: `CanvasXpress`
The `CanvasXpress` object from which to generate the example code.
:param document_includes: `bool`
Default `True`. Indicate if include headers should be prefixed.
:param document_render: `bool`
Default `True`. Indicate if rendering should be included in the
example code.
:param document_jupyter_render: `bool`
Default `False`. Indicate if Jupyter rendering should be performed;
otherwise, popup rendering will suffixed.
:returns: `str`
A string with the code example.
"""
example_text = ""
if document_includes:
example_text += "from canvasxpress.canvas import CanvasXpress \n"
example_text += "from canvasxpress.js.collection import CXEvents \n"
if document_render:
if document_jupyter_render:
example_text += "from canvasxpress.render.jupyter" \
" import CXNoteBook \n"
else:
example_text += "from canvasxpress.render.popup" \
" import CXBrowserPopup \n"
example_text += "\n"
example_text += "cx = " + repr(cx)
if document_render:
if document_jupyter_render:
example_text += "\n"
example_text += "display = CXNoteBook(cx) \n"
else:
example_text += "\n"
example_text += "display = CXBrowserPopup(cx) \n"
example_text += "display.render() \n"
return example_text
| [
"canvasxpress.canvas.CanvasXpress.from_reproducible_json"
] | [((2338, 2382), 'canvasxpress.canvas.CanvasXpress.from_reproducible_json', 'CanvasXpress.from_reproducible_json', (['cx_json'], {}), '(cx_json)\n', (2373, 2382), False, 'from canvasxpress.canvas import CanvasXpress\n'), ((1188, 1232), 'canvasxpress.canvas.CanvasXpress.from_reproducible_json', 'CanvasXpress.from_reproducible_json', (['cx_json'], {}), '(cx_json)\n', (1223, 1232), False, 'from canvasxpress.canvas import CanvasXpress\n')] |
"""
Copyright (C) 2006, 2007 <NAME>
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<<EMAIL>>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
"""Generate source code for loop functions."""
from gensrc.addins import addin
from gensrc.utilities import outputfile
from gensrc.utilities import log
from gensrc.categories import category
from gensrc.configuration import environment
class Loop(addin.Addin):
"""Generate source code for loop functions."""
#############################################
# class variables
#############################################
FUNC_BIND = '''\
typedef boost::_bi::bind_t<
%(returnType)s,
%(bindPointer)s,
%(bindList)s
%(functionName)sBind;'''
FUNC_SIG = '''\
typedef %(returnType)s
(%(functionType)s::* %(functionSignature)s)(%(signatureArguments)s)%(const)s;'''
BUF_LOOP = '''
// %(functionName)s
%(functionBind)s
%(functionSignature)s
'''
#############################################
# public interface
#############################################
def generate(self, categoryList, enumerationList):
"""Generate source code for Loops."""
log.Log.instance().logMessage(' begin generating Loops ...')
for cat in categoryList.categories('*', self.coreCategories_, self.addinCategories_):
if cat.containsLoopFunction():
self.generateLoops(cat)
log.Log.instance().logMessage(' done generating Loops.')
def generateLoop(self, func):
"""Generate loop typedefs for given function."""
returnType = self.loopDatatype_.apply(func.returnValue())
functionBind = Loop.FUNC_BIND % {
'bindList' : func.behavior().bindList(self.bindList_),
'bindPointer' : func.behavior().bindPointer(self.bindPointer_, returnType),
'functionName' : func.name(),
'returnType' : returnType }
if func.behavior().functionSignature_:
if func.const():
const = ' const'
else:
const = ''
functionSignature = Loop.FUNC_SIG % {
'const' : const,
'functionSignature' : func.behavior().functionSignature_,
'functionType' : func.type(),
'signatureArguments' : func.parameterList().generate(self.signatureArguments_),
'returnType' : returnType }
else:
functionSignature = ''
return Loop.BUF_LOOP % {
'functionBind' : functionBind,
'functionName' : func.name(),
'functionSignature' : functionSignature }
def generateLoops(self, cat):
"""Generate type definitions required for source code for loop functions."""
buf = ''
for func in cat.functions('*'):
if func.loopParameter():
buf += self.generateLoop(func)
self.bufferFile_.set({
'buffer' : buf,
'namespace' : environment.config().namespaceObjects() })
fileName = self.rootPath_ + 'loop_' + cat.name() + '.hpp'
outputfile.OutputFile(self, fileName, self.copyright_, self.bufferFile_)
| [
"gensrc.configuration.environment.config",
"gensrc.utilities.log.Log.instance",
"gensrc.utilities.outputfile.OutputFile"
] | [((3742, 3814), 'gensrc.utilities.outputfile.OutputFile', 'outputfile.OutputFile', (['self', 'fileName', 'self.copyright_', 'self.bufferFile_'], {}), '(self, fileName, self.copyright_, self.bufferFile_)\n', (3763, 3814), False, 'from gensrc.utilities import outputfile\n'), ((1807, 1825), 'gensrc.utilities.log.Log.instance', 'log.Log.instance', ([], {}), '()\n', (1823, 1825), False, 'from gensrc.utilities import log\n'), ((2053, 2071), 'gensrc.utilities.log.Log.instance', 'log.Log.instance', ([], {}), '()\n', (2069, 2071), False, 'from gensrc.utilities import log\n'), ((3625, 3645), 'gensrc.configuration.environment.config', 'environment.config', ([], {}), '()\n', (3643, 3645), False, 'from gensrc.configuration import environment\n')] |
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml import pins
from esphomeyaml.const import CONF_CLK_PIN, CONF_ID, CONF_MISO_PIN, CONF_MOSI_PIN
from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, \
gpio_output_pin_expression, add, setup_component, Component
SPIComponent = esphomelib_ns.class_('SPIComponent', Component)
SPIDevice = esphomelib_ns.class_('SPIDevice')
SPI_SCHEMA = vol.All(vol.Schema({
cv.GenerateID(): cv.declare_variable_id(SPIComponent),
vol.Required(CONF_CLK_PIN): pins.gpio_output_pin_schema,
vol.Optional(CONF_MISO_PIN): pins.gpio_input_pin_schema,
vol.Optional(CONF_MOSI_PIN): pins.gpio_output_pin_schema,
}), cv.has_at_least_one_key(CONF_MISO_PIN, CONF_MOSI_PIN))
CONFIG_SCHEMA = vol.All(cv.ensure_list, [SPI_SCHEMA])
def to_code(config):
for conf in config:
for clk in gpio_output_pin_expression(conf[CONF_CLK_PIN]):
yield
rhs = App.init_spi(clk)
spi = Pvariable(conf[CONF_ID], rhs)
if CONF_MISO_PIN in conf:
for miso in gpio_input_pin_expression(conf[CONF_MISO_PIN]):
yield
add(spi.set_miso(miso))
if CONF_MOSI_PIN in conf:
for mosi in gpio_input_pin_expression(conf[CONF_MOSI_PIN]):
yield
add(spi.set_mosi(mosi))
setup_component(spi, conf)
BUILD_FLAGS = '-DUSE_SPI'
| [
"esphomeyaml.helpers.App.init_spi",
"esphomeyaml.config_validation.GenerateID",
"voluptuous.Required",
"esphomeyaml.config_validation.has_at_least_one_key",
"esphomeyaml.helpers.gpio_output_pin_expression",
"esphomeyaml.helpers.setup_component",
"esphomeyaml.helpers.esphomelib_ns.class_",
"esphomeyaml.helpers.Pvariable",
"esphomeyaml.helpers.gpio_input_pin_expression",
"esphomeyaml.config_validation.declare_variable_id",
"voluptuous.Optional",
"voluptuous.All"
] | [((352, 399), 'esphomeyaml.helpers.esphomelib_ns.class_', 'esphomelib_ns.class_', (['"""SPIComponent"""', 'Component'], {}), "('SPIComponent', Component)\n", (372, 399), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n'), ((412, 445), 'esphomeyaml.helpers.esphomelib_ns.class_', 'esphomelib_ns.class_', (['"""SPIDevice"""'], {}), "('SPIDevice')\n", (432, 445), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n'), ((800, 837), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[SPI_SCHEMA]'], {}), '(cv.ensure_list, [SPI_SCHEMA])\n', (807, 837), True, 'import voluptuous as vol\n'), ((728, 781), 'esphomeyaml.config_validation.has_at_least_one_key', 'cv.has_at_least_one_key', (['CONF_MISO_PIN', 'CONF_MOSI_PIN'], {}), '(CONF_MISO_PIN, CONF_MOSI_PIN)\n', (751, 781), True, 'import esphomeyaml.config_validation as cv\n'), ((904, 950), 'esphomeyaml.helpers.gpio_output_pin_expression', 'gpio_output_pin_expression', (['conf[CONF_CLK_PIN]'], {}), '(conf[CONF_CLK_PIN])\n', (930, 950), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n'), ((984, 1001), 'esphomeyaml.helpers.App.init_spi', 'App.init_spi', (['clk'], {}), '(clk)\n', (996, 1001), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n'), ((1016, 1045), 'esphomeyaml.helpers.Pvariable', 'Pvariable', (['conf[CONF_ID]', 'rhs'], {}), '(conf[CONF_ID], rhs)\n', (1025, 1045), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n'), ((1383, 1409), 'esphomeyaml.helpers.setup_component', 'setup_component', (['spi', 'conf'], {}), '(spi, conf)\n', (1398, 1409), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n'), ((485, 500), 'esphomeyaml.config_validation.GenerateID', 'cv.GenerateID', ([], {}), '()\n', (498, 500), True, 'import esphomeyaml.config_validation as cv\n'), ((544, 570), 'voluptuous.Required', 'vol.Required', (['CONF_CLK_PIN'], {}), '(CONF_CLK_PIN)\n', (556, 570), True, 'import voluptuous as vol\n'), ((605, 632), 'voluptuous.Optional', 'vol.Optional', (['CONF_MISO_PIN'], {}), '(CONF_MISO_PIN)\n', (617, 632), True, 'import voluptuous as vol\n'), ((666, 693), 'voluptuous.Optional', 'vol.Optional', (['CONF_MOSI_PIN'], {}), '(CONF_MOSI_PIN)\n', (678, 693), True, 'import voluptuous as vol\n'), ((502, 538), 'esphomeyaml.config_validation.declare_variable_id', 'cv.declare_variable_id', (['SPIComponent'], {}), '(SPIComponent)\n', (524, 538), True, 'import esphomeyaml.config_validation as cv\n'), ((1104, 1150), 'esphomeyaml.helpers.gpio_input_pin_expression', 'gpio_input_pin_expression', (['conf[CONF_MISO_PIN]'], {}), '(conf[CONF_MISO_PIN])\n', (1129, 1150), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n'), ((1268, 1314), 'esphomeyaml.helpers.gpio_input_pin_expression', 'gpio_input_pin_expression', (['conf[CONF_MOSI_PIN]'], {}), '(conf[CONF_MOSI_PIN])\n', (1293, 1314), False, 'from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_input_pin_expression, gpio_output_pin_expression, add, setup_component, Component\n')] |
import cv2 as cv
img = cv.imread('../me1.jpg')
cv.imshow('me', img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('gray', gray)
threshold, thresholded = cv.threshold(gray, 120, 200, cv.THRESH_BINARY)
cv.imshow('thresholded 120', thresholded)
threshold, thresholded_inv = cv.threshold(gray, 120, 200, cv.THRESH_BINARY_INV)
cv.imshow('thresholded inv', thresholded_inv)
adaptive_thresh = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 15, 5)
cv.imshow('adaptive threshold', adaptive_thresh)
cv.waitKey(0) | [
"cv2.threshold",
"cv2.imshow",
"cv2.adaptiveThreshold",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.imread"
] | [((24, 47), 'cv2.imread', 'cv.imread', (['"""../me1.jpg"""'], {}), "('../me1.jpg')\n", (33, 47), True, 'import cv2 as cv\n'), ((48, 68), 'cv2.imshow', 'cv.imshow', (['"""me"""', 'img'], {}), "('me', img)\n", (57, 68), True, 'import cv2 as cv\n'), ((77, 112), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (88, 112), True, 'import cv2 as cv\n'), ((113, 136), 'cv2.imshow', 'cv.imshow', (['"""gray"""', 'gray'], {}), "('gray', gray)\n", (122, 136), True, 'import cv2 as cv\n'), ((163, 209), 'cv2.threshold', 'cv.threshold', (['gray', '(120)', '(200)', 'cv.THRESH_BINARY'], {}), '(gray, 120, 200, cv.THRESH_BINARY)\n', (175, 209), True, 'import cv2 as cv\n'), ((210, 251), 'cv2.imshow', 'cv.imshow', (['"""thresholded 120"""', 'thresholded'], {}), "('thresholded 120', thresholded)\n", (219, 251), True, 'import cv2 as cv\n'), ((282, 332), 'cv2.threshold', 'cv.threshold', (['gray', '(120)', '(200)', 'cv.THRESH_BINARY_INV'], {}), '(gray, 120, 200, cv.THRESH_BINARY_INV)\n', (294, 332), True, 'import cv2 as cv\n'), ((333, 378), 'cv2.imshow', 'cv.imshow', (['"""thresholded inv"""', 'thresholded_inv'], {}), "('thresholded inv', thresholded_inv)\n", (342, 378), True, 'import cv2 as cv\n'), ((398, 490), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['gray', '(255)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY', '(15)', '(5)'], {}), '(gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.\n THRESH_BINARY, 15, 5)\n', (418, 490), True, 'import cv2 as cv\n'), ((486, 534), 'cv2.imshow', 'cv.imshow', (['"""adaptive threshold"""', 'adaptive_thresh'], {}), "('adaptive threshold', adaptive_thresh)\n", (495, 534), True, 'import cv2 as cv\n'), ((536, 549), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (546, 549), True, 'import cv2 as cv\n')] |
import pyredner
import redner
import torch
import math
# Estimate the pose of a teapot object.
# This tutorial demonstrates:
# 1. how to render G buffer, such as depth, normal, albedo
# 2. how to use G buffer to do "deferred rendering" in pytorch, which bypasses the main path tracing
# process in redner, resulting in fast approximate rendering
# You might want to read the wikipedia page first if you are not familiar with the concept
# of deferred rendering: https://en.wikipedia.org/wiki/Deferred_shading
#
# Like the second tutorial, we first render a target image, then perturb the
# rotation/translation parameters and optimize to match the target.
# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())
# Load from the teapot Wavefront object file just like tutorial 02
material_map, mesh_list, light_map = pyredner.load_obj('teapot.obj')
# Compute shading normal
for _, mesh in mesh_list:
mesh.normals = pyredner.compute_vertex_normal(mesh.vertices, mesh.indices)
# Setup camera
cam = pyredner.Camera(position = torch.tensor([0.0, 30.0, 200.0]),
look_at = torch.tensor([0.0, 30.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
fisheye = False)
# Get a list of materials from material_map
material_id_map = {}
materials = []
count = 0
for key, value in material_map.items():
material_id_map[key] = count
count += 1
materials.append(value)
# Get a list of shapes
shapes = []
for mtl_name, mesh in mesh_list:
shapes.append(pyredner.Shape(\
vertices = mesh.vertices,
indices = mesh.indices,
uvs = mesh.uvs,
normals = mesh.normals,
material_id = material_id_map[mtl_name]))
# Construct the scene
# Unlike previous tutorials, here we don't setup any light sources
scene = pyredner.Scene(cam, shapes, materials, area_lights = [], envmap = None)
# Serialize the scene.
# There are two difference comparing to previous tutorials.
# 1. we set "max_bounces" to 0, so we do not perform path tracing at all.
# 2. there is an extra argument "channels", specifying the output as position, shading normal, and albedo
# by default the channels is a list with a single item redner.channels.radiance, which contains
# the path tracing output.
# See channels.h for a full list of channels we support.
# The channels argument can also be useful for e.g. RGBD rendering.
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16, # Still need some samples for anti-aliasing
max_bounces = 0,
channels = [redner.channels.position,
redner.channels.shading_normal,
redner.channels.diffuse_reflectance])
render = pyredner.RenderFunction.apply
g_buffer = render(0, *scene_args)
# Now, since we specified the outputs to be position, normal, and albedo,
# g_buffer is a 9-channels image
pos = g_buffer[:, :, :3]
normal = g_buffer[:, :, 3:6]
albedo = g_buffer[:, :, 6:9]
# Next, we render the g-buffer into a final image
# For this we define a deferred_render function:
def deferred_render(pos, normal, albedo):
# We assume a point light at the camera origin (0, 30, 200)
# The lighting consists of a geometry term cos/d^2, albedo, and the light intensity
light_pos = torch.tensor([0.0, 30.0, 200.0], device = pyredner.get_device())
light_pos = light_pos.view(1, 1, 3)
light_intensity = torch.tensor([10000.0, 10000.0, 10000.0], device = pyredner.get_device())
light_intensity = light_intensity.view(1, 1, 3)
light_dir = light_pos - pos
# the d^2 term:
light_dist_sq = torch.sum(light_dir * light_dir, 2, keepdim = True)
light_dist = torch.sqrt(light_dist_sq)
# Normalize light direction
light_dir = light_dir / light_dist
dot_l_n = torch.sum(light_dir * normal, 2, keepdim = True)
return light_intensity * dot_l_n * (albedo / math.pi) / light_dist_sq
img = deferred_render(pos, normal, albedo)
# Save the images
pyredner.imwrite(img.cpu(), 'results/fast_deferred_rendering/target.exr')
pyredner.imwrite(img.cpu(), 'results/fast_deferred_rendering/target.png')
# Load the targets back
target = pyredner.imread('results/fast_deferred_rendering/target.exr')
if pyredner.get_use_gpu():
target = target.cuda()
# Same as tutorial 2, perturb the scene by a translation and a rotation to the object
translation_params = torch.tensor([0.1, -0.1, 0.1],
device = pyredner.get_device(), requires_grad=True)
translation = translation_params * 100.0
euler_angles = torch.tensor([0.1, -0.1, 0.1], requires_grad=True)
shape0_vertices = shapes[0].vertices.clone()
shape1_vertices = shapes[1].vertices.clone()
rotation_matrix = pyredner.gen_rotate_matrix(euler_angles)
if pyredner.get_use_gpu():
rotation_matrix = rotation_matrix.cuda()
center = torch.mean(torch.cat([shape0_vertices, shape1_vertices]), 0)
shapes[0].vertices = \
(shape0_vertices - center) @ torch.t(rotation_matrix) + \
center + translation
shapes[1].vertices = \
(shape1_vertices - center) @ torch.t(rotation_matrix) + \
center + translation
# Since we changed the vertices, we need to regenerate the shading normals
shapes[0].normals = pyredner.compute_vertex_normal(shapes[0].vertices, shapes[0].indices)
shapes[1].normals = pyredner.compute_vertex_normal(shapes[1].vertices, shapes[1].indices)
# We need to serialize the scene again to get the new arguments.
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16, # Still need some samples for anti-aliasing
max_bounces = 0,
channels = [redner.channels.position,
redner.channels.shading_normal,
redner.channels.diffuse_reflectance])
# Render the initial guess.
g_buffer = render(1, *scene_args)
pos = g_buffer[:, :, :3]
normal = g_buffer[:, :, 3:6]
albedo = g_buffer[:, :, 6:9]
img = deferred_render(pos, normal, albedo)
# Save the images
pyredner.imwrite(img.cpu(), 'results/fast_deferred_rendering/init.png')
# Compute the difference and save the images.
diff = torch.abs(target - img)
pyredner.imwrite(diff.cpu(), 'results/fast_deferred_rendering/init_diff.png')
# Optimize for pose parameters.
optimizer = torch.optim.Adam([translation_params, euler_angles], lr=1e-2)
# Run 200 Adam iterations.
for t in range(200):
print('iteration:', t)
optimizer.zero_grad()
# Forward pass: apply the mesh operation and render the image.
translation = translation_params * 100.0
rotation_matrix = pyredner.gen_rotate_matrix(euler_angles)
if pyredner.get_use_gpu():
rotation_matrix = rotation_matrix.cuda()
center = torch.mean(torch.cat([shape0_vertices, shape1_vertices]), 0)
shapes[0].vertices = \
(shape0_vertices - center) @ torch.t(rotation_matrix) + \
center + translation
shapes[1].vertices = \
(shape1_vertices - center) @ torch.t(rotation_matrix) + \
center + translation
shapes[0].normals = pyredner.compute_vertex_normal(shapes[0].vertices, shapes[0].indices)
shapes[1].normals = pyredner.compute_vertex_normal(shapes[1].vertices, shapes[1].indices)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 4, # Use less in Adam iteration
max_bounces = 0,
channels = [redner.channels.position,
redner.channels.shading_normal,
redner.channels.diffuse_reflectance])
# Important to use a different seed every iteration, otherwise the result
# would be biased.
g_buffer = render(t+1, *scene_args)
pos = g_buffer[:, :, :3]
normal = g_buffer[:, :, 3:6]
albedo = g_buffer[:, :, 6:9]
img = deferred_render(pos, normal, albedo)
# Save the intermediate render.
pyredner.imwrite(img.cpu(), 'results/fast_deferred_rendering/iter_{}.png'.format(t))
# Compute the loss function. Here it is L2.
loss = (img - target).pow(2).sum()
print('loss:', loss.item())
# Backpropagate the gradients.
loss.backward()
# Print the gradients
print('translation_params.grad:', translation_params.grad)
print('euler_angles.grad:', euler_angles.grad)
# Take a gradient descent step.
optimizer.step()
# Print the current pose parameters.
print('translation:', translation)
print('euler_angles:', euler_angles)
# Render the final result.
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 0,
channels = [redner.channels.position,
redner.channels.shading_normal,
redner.channels.diffuse_reflectance])
g_buffer = render(202, *scene_args)
pos = g_buffer[:, :, :3]
normal = g_buffer[:, :, 3:6]
albedo = g_buffer[:, :, 6:9]
img = deferred_render(pos, normal, albedo)
# Save the images and differences.
pyredner.imwrite(img.cpu(), 'results/fast_deferred_rendering/final.exr')
pyredner.imwrite(img.cpu(), 'results/fast_deferred_rendering/final.png')
pyredner.imwrite(torch.abs(target - img).cpu(), 'results/fast_deferred_rendering/final_diff.png')
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/fast_deferred_rendering/iter_%d.png", "-vb", "20M",
"results/fast_deferred_rendering/out.mp4"]) | [
"torch.optim.Adam",
"torch.abs",
"pyredner.get_use_gpu",
"pyredner.load_obj",
"torch.sqrt",
"pyredner.get_device",
"pyredner.Scene",
"torch.tensor",
"torch.cuda.is_available",
"torch.sum",
"subprocess.call",
"torch.cat",
"pyredner.compute_vertex_normal",
"pyredner.Shape",
"pyredner.RenderFunction.serialize_scene",
"torch.t",
"pyredner.gen_rotate_matrix",
"pyredner.imread"
] | [((837, 868), 'pyredner.load_obj', 'pyredner.load_obj', (['"""teapot.obj"""'], {}), "('teapot.obj')\n", (854, 868), False, 'import pyredner\n'), ((1988, 2055), 'pyredner.Scene', 'pyredner.Scene', (['cam', 'shapes', 'materials'], {'area_lights': '[]', 'envmap': 'None'}), '(cam, shapes, materials, area_lights=[], envmap=None)\n', (2002, 2055), False, 'import pyredner\n'), ((2596, 2794), 'pyredner.RenderFunction.serialize_scene', 'pyredner.RenderFunction.serialize_scene', ([], {'scene': 'scene', 'num_samples': '(16)', 'max_bounces': '(0)', 'channels': '[redner.channels.position, redner.channels.shading_normal, redner.channels.\n diffuse_reflectance]'}), '(scene=scene, num_samples=16,\n max_bounces=0, channels=[redner.channels.position, redner.channels.\n shading_normal, redner.channels.diffuse_reflectance])\n', (2635, 2794), False, 'import pyredner\n'), ((4331, 4392), 'pyredner.imread', 'pyredner.imread', (['"""results/fast_deferred_rendering/target.exr"""'], {}), "('results/fast_deferred_rendering/target.exr')\n", (4346, 4392), False, 'import pyredner\n'), ((4396, 4418), 'pyredner.get_use_gpu', 'pyredner.get_use_gpu', ([], {}), '()\n', (4416, 4418), False, 'import pyredner\n'), ((4698, 4748), 'torch.tensor', 'torch.tensor', (['[0.1, -0.1, 0.1]'], {'requires_grad': '(True)'}), '([0.1, -0.1, 0.1], requires_grad=True)\n', (4710, 4748), False, 'import torch\n'), ((4857, 4897), 'pyredner.gen_rotate_matrix', 'pyredner.gen_rotate_matrix', (['euler_angles'], {}), '(euler_angles)\n', (4883, 4897), False, 'import pyredner\n'), ((4901, 4923), 'pyredner.get_use_gpu', 'pyredner.get_use_gpu', ([], {}), '()\n', (4921, 4923), False, 'import pyredner\n'), ((5355, 5424), 'pyredner.compute_vertex_normal', 'pyredner.compute_vertex_normal', (['shapes[0].vertices', 'shapes[0].indices'], {}), '(shapes[0].vertices, shapes[0].indices)\n', (5385, 5424), False, 'import pyredner\n'), ((5445, 5514), 'pyredner.compute_vertex_normal', 'pyredner.compute_vertex_normal', (['shapes[1].vertices', 'shapes[1].indices'], {}), '(shapes[1].vertices, shapes[1].indices)\n', (5475, 5514), False, 'import pyredner\n'), ((5593, 5791), 'pyredner.RenderFunction.serialize_scene', 'pyredner.RenderFunction.serialize_scene', ([], {'scene': 'scene', 'num_samples': '(16)', 'max_bounces': '(0)', 'channels': '[redner.channels.position, redner.channels.shading_normal, redner.channels.\n diffuse_reflectance]'}), '(scene=scene, num_samples=16,\n max_bounces=0, channels=[redner.channels.position, redner.channels.\n shading_normal, redner.channels.diffuse_reflectance])\n', (5632, 5791), False, 'import pyredner\n'), ((6216, 6239), 'torch.abs', 'torch.abs', (['(target - img)'], {}), '(target - img)\n', (6225, 6239), False, 'import torch\n'), ((6363, 6424), 'torch.optim.Adam', 'torch.optim.Adam', (['[translation_params, euler_angles]'], {'lr': '(0.01)'}), '([translation_params, euler_angles], lr=0.01)\n', (6379, 6424), False, 'import torch\n'), ((8548, 8746), 'pyredner.RenderFunction.serialize_scene', 'pyredner.RenderFunction.serialize_scene', ([], {'scene': 'scene', 'num_samples': '(16)', 'max_bounces': '(0)', 'channels': '[redner.channels.position, redner.channels.shading_normal, redner.channels.\n diffuse_reflectance]'}), '(scene=scene, num_samples=16,\n max_bounces=0, channels=[redner.channels.position, redner.channels.\n shading_normal, redner.channels.diffuse_reflectance])\n', (8587, 8746), False, 'import pyredner\n'), ((9316, 9470), 'subprocess.call', 'call', (["['ffmpeg', '-framerate', '24', '-i',\n 'results/fast_deferred_rendering/iter_%d.png', '-vb', '20M',\n 'results/fast_deferred_rendering/out.mp4']"], {}), "(['ffmpeg', '-framerate', '24', '-i',\n 'results/fast_deferred_rendering/iter_%d.png', '-vb', '20M',\n 'results/fast_deferred_rendering/out.mp4'])\n", (9320, 9470), False, 'from subprocess import call\n'), ((705, 730), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (728, 730), False, 'import torch\n'), ((939, 998), 'pyredner.compute_vertex_normal', 'pyredner.compute_vertex_normal', (['mesh.vertices', 'mesh.indices'], {}), '(mesh.vertices, mesh.indices)\n', (969, 998), False, 'import pyredner\n'), ((3785, 3834), 'torch.sum', 'torch.sum', (['(light_dir * light_dir)', '(2)'], {'keepdim': '(True)'}), '(light_dir * light_dir, 2, keepdim=True)\n', (3794, 3834), False, 'import torch\n'), ((3854, 3879), 'torch.sqrt', 'torch.sqrt', (['light_dist_sq'], {}), '(light_dist_sq)\n', (3864, 3879), False, 'import torch\n'), ((3965, 4011), 'torch.sum', 'torch.sum', (['(light_dir * normal)', '(2)'], {'keepdim': '(True)'}), '(light_dir * normal, 2, keepdim=True)\n', (3974, 4011), False, 'import torch\n'), ((4990, 5035), 'torch.cat', 'torch.cat', (['[shape0_vertices, shape1_vertices]'], {}), '([shape0_vertices, shape1_vertices])\n', (4999, 5035), False, 'import torch\n'), ((6660, 6700), 'pyredner.gen_rotate_matrix', 'pyredner.gen_rotate_matrix', (['euler_angles'], {}), '(euler_angles)\n', (6686, 6700), False, 'import pyredner\n'), ((6708, 6730), 'pyredner.get_use_gpu', 'pyredner.get_use_gpu', ([], {}), '()\n', (6728, 6730), False, 'import pyredner\n'), ((7123, 7192), 'pyredner.compute_vertex_normal', 'pyredner.compute_vertex_normal', (['shapes[0].vertices', 'shapes[0].indices'], {}), '(shapes[0].vertices, shapes[0].indices)\n', (7153, 7192), False, 'import pyredner\n'), ((7217, 7286), 'pyredner.compute_vertex_normal', 'pyredner.compute_vertex_normal', (['shapes[1].vertices', 'shapes[1].indices'], {}), '(shapes[1].vertices, shapes[1].indices)\n', (7247, 7286), False, 'import pyredner\n'), ((7304, 7501), 'pyredner.RenderFunction.serialize_scene', 'pyredner.RenderFunction.serialize_scene', ([], {'scene': 'scene', 'num_samples': '(4)', 'max_bounces': '(0)', 'channels': '[redner.channels.position, redner.channels.shading_normal, redner.channels.\n diffuse_reflectance]'}), '(scene=scene, num_samples=4,\n max_bounces=0, channels=[redner.channels.position, redner.channels.\n shading_normal, redner.channels.diffuse_reflectance])\n', (7343, 7501), False, 'import pyredner\n'), ((1048, 1080), 'torch.tensor', 'torch.tensor', (['[0.0, 30.0, 200.0]'], {}), '([0.0, 30.0, 200.0])\n', (1060, 1080), False, 'import torch\n'), ((1114, 1144), 'torch.tensor', 'torch.tensor', (['[0.0, 30.0, 0.0]'], {}), '([0.0, 30.0, 0.0])\n', (1126, 1144), False, 'import torch\n'), ((1173, 1202), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (1185, 1202), False, 'import torch\n'), ((1232, 1252), 'torch.tensor', 'torch.tensor', (['[45.0]'], {}), '([45.0])\n', (1244, 1252), False, 'import torch\n'), ((1701, 1840), 'pyredner.Shape', 'pyredner.Shape', ([], {'vertices': 'mesh.vertices', 'indices': 'mesh.indices', 'uvs': 'mesh.uvs', 'normals': 'mesh.normals', 'material_id': 'material_id_map[mtl_name]'}), '(vertices=mesh.vertices, indices=mesh.indices, uvs=mesh.uvs,\n normals=mesh.normals, material_id=material_id_map[mtl_name])\n', (1715, 1840), False, 'import pyredner\n'), ((4599, 4620), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (4618, 4620), False, 'import pyredner\n'), ((6805, 6850), 'torch.cat', 'torch.cat', (['[shape0_vertices, shape1_vertices]'], {}), '([shape0_vertices, shape1_vertices])\n', (6814, 6850), False, 'import torch\n'), ((3502, 3523), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (3521, 3523), False, 'import pyredner\n'), ((3638, 3659), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (3657, 3659), False, 'import pyredner\n'), ((5096, 5120), 'torch.t', 'torch.t', (['rotation_matrix'], {}), '(rotation_matrix)\n', (5103, 5120), False, 'import torch\n'), ((5206, 5230), 'torch.t', 'torch.t', (['rotation_matrix'], {}), '(rotation_matrix)\n', (5213, 5230), False, 'import torch\n'), ((9156, 9179), 'torch.abs', 'torch.abs', (['(target - img)'], {}), '(target - img)\n', (9165, 9179), False, 'import torch\n'), ((6919, 6943), 'torch.t', 'torch.t', (['rotation_matrix'], {}), '(rotation_matrix)\n', (6926, 6943), False, 'import torch\n'), ((7041, 7065), 'torch.t', 'torch.t', (['rotation_matrix'], {}), '(rotation_matrix)\n', (7048, 7065), False, 'import torch\n')] |
import os
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import albumentations as A
from albumentations.pytorch import ToTensor
from zipfile import ZipFile
from tqdm import tqdm
from deepnet.data.dataset.basedataset import BaseDataset
class ModestMuseum(BaseDataset):
@property
def mean(self):
"""Returns Channel-wise mean of the whole dataset"""
return dict({
'bg':(0.40086, 0.46599, 0.53281),
'bg_fg':(0.41221, 0.47368, 0.53431),
'bg_fg_mask':(0.05207),
'bg_fg_depth':(0.2981)
})
@property
def std(self):
"""Returns Channel-wise standard deviation of the whole dataset"""
return dict({
'bg':(0.25451, 0.24249, 0.23615),
'bg_fg':(0.25699, 0.24577, 0.24217),
'bg_fg_mask':(0.21686),
'bg_fg_depth':(0.11561)
})
@property
def input_size(self):
"""Returns Dimension of the input image"""
# channels, height, width = self._train_data[0][0].shape
# return tuple((channels, height, width))
return dict({
'bg': (3,224,224),
'bg_fg':(3,224,224),
'bg_fg_mask':(1,224,224),
'bg_fg_depth':(1,224,224)
})
def download(self, train=True):
"""Download the dataset
Arguments:
train: True if train data is to downloaded, False for test data
(default: True)
apply_transformations: True if transformations is to applied, else False
(default: False)
Returns:
Dataset after downloading
"""
transform = self._train_transformations if train else self._test_transformations
args = {
'path' : self.path,
'train': train,
'train_test_split': self.train_test_split,
'seed': self.seed,
'transforms': transform}
return Download(**args)
def dataset_creation(self):
"""Creates dataset
Returns:
Train dataset and Test dataset
"""
self._train_transformations = {
'bg':self.transform(self.mean['bg'], self.std['bg']),
'bg_fg':self.transform(self.mean['bg_fg'], self.std['bg_fg']),
'bg_fg_mask':self.transform(self.mean['bg_fg_mask'], self.std['bg_fg_mask'], modest_input=False),
'bg_fg_depth':self.transform(self.mean['bg_fg_depth'], self.std['bg_fg_depth'], modest_input=False)
}
train_data = self.download(train = True)
self._test_transformations = {
'bg':self.transform(self.mean['bg'], self.std['bg'], train=False),
'bg_fg':self.transform(self.mean['bg_fg'], self.std['bg_fg'], train=False),
'bg_fg_mask':self.transform(self.mean['bg_fg_mask'], self.std['bg_fg_mask'], train=False, modest_input=False),
'bg_fg_depth':self.transform(self.mean['bg_fg_depth'], self.std['bg_fg_depth'], train=False, modest_input=False)
}
test_data = self.download(train = False)
return train_data, test_data
class Download(Dataset):
def __init__(self, path, train=False, train_test_split=0.7, seed=1, transforms=None):
'''Extract the data and target from the dataset folder
Arguments:
path (str): Path to store the dataset
train (bool): True if train data is to be extracted, False is test data is to be extracted
(default: False)
train_test_split (float, optional) : Value to split train test data for training
(default: 0.7)
seed (integer, optional): Value for random initialization
(default: 1)
transforms: Transformations that are to be applied on the data
(default: None)
'''
self.train = train
self.transforms = transforms
data = []
file_map = open(os.path.join(path,'file_map.txt'))
file_info = file_map.readlines()
for f in file_info:
mapping = f[:-1].split('\t')
data.append({'bg' : os.path.join(path,'bg',mapping[0] + '.jpeg'),
'bg_fg' : os.path.join(path,'bg_fg',mapping[1] + '.jpeg'),
'bg_fg_mask' : os.path.join(path,'bg_fg_mask',mapping[2] + '.jpeg'),
'bg_fg_depth' : os.path.join(path,'bg_fg_depth_map',mapping[3] + '.jpeg'),})
total_images = len(data)
image_index = list(range(0,total_images))
np.random.seed(seed)
np.random.shuffle(image_index)
last_train_index = int(total_images*train_test_split)
if train:
image_index = image_index[:last_train_index]
else:
image_index = image_index[last_train_index:]
#stores path and class of the image
self.dataset = []
for index in image_index:
self.dataset.append(data[index])
def __len__(self):
'''Returns the length of the data'''
return len(self.dataset)
def __getitem__(self, idx):
'''Return the data'''
data = self.dataset[idx]
bg = self.transforms['bg'](Image.open(data['bg']))
bg_fg = self.transforms['bg_fg'](Image.open(data['bg_fg']))
bg_fg_mask = self.transforms['bg_fg_mask'](Image.open(data['bg_fg_mask']))
bg_fg_depth = self.transforms['bg_fg_depth'](Image.open(data['bg_fg_depth']))
data = {
'bg' : bg,
'bg_fg' : bg_fg,
'bg_fg_mask' : bg_fg_mask,
'bg_fg_depth' : bg_fg_depth
}
return data | [
"PIL.Image.open",
"os.path.join",
"numpy.random.seed",
"numpy.random.shuffle"
] | [((4582, 4602), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4596, 4602), True, 'import numpy as np\n'), ((4611, 4641), 'numpy.random.shuffle', 'np.random.shuffle', (['image_index'], {}), '(image_index)\n', (4628, 4641), True, 'import numpy as np\n'), ((3988, 4022), 'os.path.join', 'os.path.join', (['path', '"""file_map.txt"""'], {}), "(path, 'file_map.txt')\n", (4000, 4022), False, 'import os\n'), ((5236, 5258), 'PIL.Image.open', 'Image.open', (["data['bg']"], {}), "(data['bg'])\n", (5246, 5258), False, 'from PIL import Image\n'), ((5301, 5326), 'PIL.Image.open', 'Image.open', (["data['bg_fg']"], {}), "(data['bg_fg'])\n", (5311, 5326), False, 'from PIL import Image\n'), ((5379, 5409), 'PIL.Image.open', 'Image.open', (["data['bg_fg_mask']"], {}), "(data['bg_fg_mask'])\n", (5389, 5409), False, 'from PIL import Image\n'), ((5464, 5495), 'PIL.Image.open', 'Image.open', (["data['bg_fg_depth']"], {}), "(data['bg_fg_depth'])\n", (5474, 5495), False, 'from PIL import Image\n'), ((4166, 4212), 'os.path.join', 'os.path.join', (['path', '"""bg"""', "(mapping[0] + '.jpeg')"], {}), "(path, 'bg', mapping[0] + '.jpeg')\n", (4178, 4212), False, 'import os\n'), ((4246, 4295), 'os.path.join', 'os.path.join', (['path', '"""bg_fg"""', "(mapping[1] + '.jpeg')"], {}), "(path, 'bg_fg', mapping[1] + '.jpeg')\n", (4258, 4295), False, 'import os\n'), ((4334, 4388), 'os.path.join', 'os.path.join', (['path', '"""bg_fg_mask"""', "(mapping[2] + '.jpeg')"], {}), "(path, 'bg_fg_mask', mapping[2] + '.jpeg')\n", (4346, 4388), False, 'import os\n'), ((4428, 4487), 'os.path.join', 'os.path.join', (['path', '"""bg_fg_depth_map"""', "(mapping[3] + '.jpeg')"], {}), "(path, 'bg_fg_depth_map', mapping[3] + '.jpeg')\n", (4440, 4487), False, 'import os\n')] |
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
urlpatterns = i18n_patterns(
url("^api/", include("mezzanine_api.urls")),
)
| [
"django.conf.urls.include"
] | [((138, 167), 'django.conf.urls.include', 'include', (['"""mezzanine_api.urls"""'], {}), "('mezzanine_api.urls')\n", (145, 167), False, 'from django.conf.urls import include, url\n')] |
import gust # library for loading graph data
import torch
import scipy as sp
import numpy as np
def load_dataset(name='cora'):
A, X, _, y = gust.load_dataset(name).standardize().unpack()
# A - adjacency matrix
# X - attribute matrix - not needed
# y - node labels
if (A != A.T).sum() > 0:
raise RuntimeError("The graph must be undirected!")
if (A.data != 1).sum() > 0:
raise RuntimeError("The graph must be unweighted!")
return A,y
def csr_matrix_to_torch_tensor(matrix):
coo = sp.sparse.coo_matrix(matrix)
values = coo.data
indices = np.vstack((coo.row, coo.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = coo.shape
return torch.sparse.FloatTensor(i, v, torch.Size(shape)).to_dense()
| [
"gust.load_dataset",
"torch.LongTensor",
"numpy.vstack",
"scipy.sparse.coo_matrix",
"torch.Size",
"torch.FloatTensor"
] | [((538, 566), 'scipy.sparse.coo_matrix', 'sp.sparse.coo_matrix', (['matrix'], {}), '(matrix)\n', (558, 566), True, 'import scipy as sp\n'), ((603, 632), 'numpy.vstack', 'np.vstack', (['(coo.row, coo.col)'], {}), '((coo.row, coo.col))\n', (612, 632), True, 'import numpy as np\n'), ((642, 667), 'torch.LongTensor', 'torch.LongTensor', (['indices'], {}), '(indices)\n', (658, 667), False, 'import torch\n'), ((676, 701), 'torch.FloatTensor', 'torch.FloatTensor', (['values'], {}), '(values)\n', (693, 701), False, 'import torch\n'), ((766, 783), 'torch.Size', 'torch.Size', (['shape'], {}), '(shape)\n', (776, 783), False, 'import torch\n'), ((147, 170), 'gust.load_dataset', 'gust.load_dataset', (['name'], {}), '(name)\n', (164, 170), False, 'import gust\n')] |
"""
We assume throughout that the image format is (samples, channels, height, width)
"""
import numpy as np
import numbers
from . import utilities
from ..utils import to_value
from ..reporting.export import as_rgb_image
from ..reporting.export import as_image_ui8
from ..reporting.export import export_image
def export_as_image(name, samples, sample_id, export_root, txt_file):
"""
Export a value as an image
:param name:
:param samples:
:param export_root:
:param txt_file:
:return:
"""
samples = to_value(samples)
# an image MUST have a filter component, else we could confuse
# if as a 2D array that we want to export in a text file
if not isinstance(samples, np.ndarray) or len(samples.shape) <= 3:
return False
rgb = as_rgb_image(samples[sample_id])
if rgb is None:
return False
# use the batch min/max to determine the range of the pixels. If the batch is
# not too small, it should be fine.
# TODO Can we find a more deterministic range?
batch_min = np.min(samples)
batch_max = np.max(samples)
ui8 = as_image_ui8(rgb, min_value=batch_min, max_value=batch_max)
if ui8 is None:
return False
path = export_root + name + '.png'
export_image(ui8, path)
return True
def export_as_npy(name, samples, sample_id, export_root, txt_file):
samples = to_value(samples)
if isinstance(samples, np.ndarray):
if len(samples.shape) == 1 or len(samples.shape) == 0:
return False # if 1D, we probably want this exported as text
if len(samples.shape) == 2 and samples.shape[1] < 10000:
return False
path = export_root + name + '.npy'
np.save(path, samples[sample_id])
return True
return False
def export_as_string(name, samples, sample_id, export_root, txt_file, max_len=1024):
samples = to_value(samples)
if isinstance(samples, numbers.Number) or isinstance(samples, str):
txt_file.write('%s=%s\n' % (name, str(samples)))
return True
if isinstance(samples, np.ndarray) and len(samples.shape) <= 2 and len(samples.shape) >= 1:
txt_file.write('%s=%s\n' % (name, str(samples[sample_id])))
return True
if isinstance(samples, list) and isinstance(samples[0], str):
txt_file.write('%s=%s\n' % (name, str(samples[sample_id])))
return True
# default fallback: as a string!
value_str = str(samples)
txt_file.write('%s=%s\n' % (name, value_str[:max_len]))
return True
def export_functions():
"""
Default export functions
:return:
"""
return [
export_as_image,
export_as_npy,
export_as_string
]
def clean_mapping_name(name):
return name.replace('_truth', '')
def export_sample(
batch,
sample_id,
export_root,
txt_file,
exports=export_functions,
features_to_discard=None,
clean_mapping_name_fn=clean_mapping_name,
classification_mappings=None,
):
"""
Export a sample from the batch
:param batch: a batch
:param sample_id: the index of the sample to export
:param export_root: where to export the data (typically including the sample id)
:param txt_file: where to export text data. Must be an opened file for write
:param exports: a functor returning the functions to be used for the export
:param features_to_discard: a list of feature names to discard
:param clean_mapping_name_fn: function to translate the name of batch feature name to class output name
:param classification_mappings: a dictionary of mapping output to translate class ID to class name
"""
fns = exports()
for feature_name, feature in batch.items():
if features_to_discard is not None and feature_name in features_to_discard:
continue
for fn in fns:
exported = fn(feature_name, feature, sample_id, export_root, txt_file)
if exported:
break
# check if we have some classification mapping names: it would be much easier
# to read the actual class name than the class ID
if classification_mappings is not None:
for name, value in batch.items():
value = to_value(value)
if not isinstance(value, np.ndarray):
continue
mapping_name = clean_mapping_name_fn(name)
m = classification_mappings.get(mapping_name)
if m is not None:
class_name = utilities.get_class_name(m, value[sample_id])
txt_file.write('{}_str={}\n'.format(name, class_name))
| [
"numpy.max",
"numpy.save",
"numpy.min"
] | [((1050, 1065), 'numpy.min', 'np.min', (['samples'], {}), '(samples)\n', (1056, 1065), True, 'import numpy as np\n'), ((1082, 1097), 'numpy.max', 'np.max', (['samples'], {}), '(samples)\n', (1088, 1097), True, 'import numpy as np\n'), ((1714, 1747), 'numpy.save', 'np.save', (['path', 'samples[sample_id]'], {}), '(path, samples[sample_id])\n', (1721, 1747), True, 'import numpy as np\n')] |
from pathlib import PurePosixPath
import re
from inspect import signature
import collections
import copy
class PathGoesAboveRoot(Exception):
pass
class fspathtree:
"""A small class that wraps a tree data struction and allow accessing the nested elements using filesystem-style paths."""
DefaultNodeType = dict
PathType = PurePosixPath
def __init__(self,tree=None,root=None,abspath='/'):
self.tree = tree if tree is not None else self.DefaultNodeType()
self.root = root if root is not None else self.tree
self.abspath = self.PathType(abspath)
if self.tree == self.root and abspath != '/':
raise RuntimeError("fspathtree: tree initialized with a root, but abspath is not '/'.")
if self.tree != self.root and abspath == '/':
raise RuntimeError("fspathtree: tree initialized with an abspath '/', but the tree and root are not the same.")
self.get_all_leaf_node_paths = self._instance_get_all_leaf_node_paths
self.find = self._instance_find
@staticmethod
def is_leaf(key,node):
if type(node) in [str,bytes]:
return True
if isinstance(node,collections.abc.Mapping) or isinstance(node,collections.abc.Sequence):
return False
return True
# Public Instance API
def __getitem__(self,path,wrap_branch_nodes=True):
path = self._make_path(path)
if path.is_absolute():
node = fspathtree.getitem(self.root,path,normalize_path=True)
else:
try:
node = fspathtree.getitem(self.tree,path)
except PathGoesAboveRoot as e:
# if the key references a node above th root,
# try again with the root tree.
if self.abspath == self.PathType("/"):
raise e
node = fspathtree.getitem(self.root,(self.abspath/path))
# if the item is an indexable node, we want to wrap it in an fspathtree before returning.
if fspathtree.is_leaf(path,node) or wrap_branch_nodes is False:
return node
else:
return fspathtree(node,root=self.root,abspath=(self.abspath/path).as_posix())
def __setitem__(self,key,value):
path = self._make_path(key)
if path.is_absolute():
fspathtree.setitem(self.root,path,value)
return
# path is relative
# first try to set the item from local tree
# if a PathGoesAboveRoot exception is thrown, then
# we can check to see if the path refers to an path in the
# root tree
try:
fspathtree.setitem(self.tree,path,value)
except PathGoesAboveRoot as e:
if self.abspath == self.PathType("/"):
raise e
fspathtree.setitem(self.root,(self.abspath/path),value)
def __contains__(self,key):
try:
self[key]
return True
except:
return False
def __len__(self):
return len(self.tree)
def update(self,*args,**kwargs):
self.tree.update(*args,**kwargs)
def path(self):
return self.normalize_path(self.abspath)
def get(self,path,default_value):
'''
Returns the value of the node references by path, or a default value if the node does not exist.
'''
try:
return self[path]
except KeyError:
return default_value
# this is used to allow the same name for instance and static methods
def _instance_get_all_leaf_node_paths(self, transform = None, predicate=None):
return fspathtree.get_all_leaf_node_paths(self.tree,transform,predicate)
def _instance_find(self,pattern):
return fspathtree.find(self.tree,pattern)
# Public Static API
@staticmethod
def normalize_path(path,up="..",current="."):
parts = fspathtree._normalize_path_parts( path.parts, up, current)
if parts is None:
return None
return fspathtree.PathType(*parts)
@staticmethod
def getitem(tree,path,normalize_path=True):
'''
Given a tree and a path, returns the value of the node pointed to by the path. By default, the path will be normalized first.
This can be disabled by passing normalize_path=False.
path may be specified as a string, Path-like object, or list of path elements.
'''
original_path = copy.copy(path)
path = fspathtree._make_path(path,normalize_path=False)
# remove the '/' from the beginning of the path if it exists.
if path.is_absolute():
path = path.relative_to('/')
if str(path) == '' or str(path) == '.':
return tree
try:
return fspathtree._getitem_from_path_parts(tree,path.parts,normalize_path)
except KeyError as e:
msg = f"Could not find path element '{e.args[0]}' while parsing path '{original_path}'"
raise KeyError(msg)
except IndexError as e:
msg = f"Could not find path element '{e.args[0]}' while parsing path '{original_path}'"
raise KeyError(msg)
except Exception as e:
raise e
@staticmethod
def setitem(tree,path,value,normalize_path=True):
'''
Given a tree, a path, and a value, sets the value of the node pointed to by the path. If any level of the path does not
exist, it is created.
'''
original_path = copy.copy(path)
path = fspathtree._make_path(path,normalize_path=False)
# remove the '/' from the beginning of the path if it exists.
if path.is_absolute():
path = path.relative_to('/')
try:
fspathtree._setitem_from_path_parts(tree,path.parts,value,normalize_path)
except KeyError as e:
msg = f"Could not find path element '{e.args[0]}' while parsing path '{original_path}'"
raise KeyError(msg)
except IndexError as e:
msg = f"Could not find path element '{e.args[0]}' while parsing path '{original_path}'"
raise KeyError(msg)
except Exception as e:
raise e
@staticmethod
def get_all_leaf_node_paths(node,transform = None ,predicate = None):
if transform is False:
transform = None
return fspathtree._get_all_leaf_node_paths(node,transform,predicate)
@staticmethod
def find(tree,pattern,as_string=False):
return fspathtree.get_all_leaf_node_paths(tree,str if as_string else None,lambda p: p.match(pattern))
# Private Methods
@staticmethod
def _make_path(key,normalize_path=False):
'''
Given a string, bytes array, integer, or list of path elements; return a PathType object representing the path.
'''
if type(key) in (list,tuple):
path = fspathtree.PathType(*key)
else:
if type(key) in (str,bytes):
key = re.sub(r'^\/+','/',key) # replace multiple '/' at front with a single '/'. i.e. // -> /
if type(key) in (int,):
key = str(key)
path = fspathtree.PathType(key)
if normalize_path:
path = fspathtree.normalize_path(path)
if path is None:
raise PathGoesAboveRoot("fspathtree: Key path contains a parent reference (..) that goes above the root of the tree")
return path
@staticmethod
def _normalize_path_parts(parts,up="..",current="."):
if up not in parts and current not in parts:
return parts
norm_parts = list()
for p in parts:
if p == current:
continue
elif p == up:
if len(norm_parts) < 1:
return None
del norm_parts[-1]
else:
norm_parts.append(p)
return norm_parts
@staticmethod
def _getitem_from_path_parts(tree,parts,normalize_path=True):
if normalize_path:
parts = fspathtree._normalize_path_parts(parts)
if parts is None:
raise PathGoesAboveRoot("fspathtree: Key path contains a parent reference (..) that goes above the root of the tree")
if isinstance(tree,collections.abc.Mapping):
if parts[0] in tree:
node = tree[parts[0]]
else:
raise KeyError(parts[0])
elif isinstance(tree,collections.abc.Sequence):
if len(tree) > int(parts[0]):
node = tree[int(parts[0])]
else:
raise IndexError(parts[0])
else:
raise RuntimeError(f"Unrecognized node type '{type(tree)}' is not Mapping of Sequence.")
if len(parts) == 1:
return node
else:
return fspathtree._getitem_from_path_parts(node,parts[1:],False)
@staticmethod
def _setitem_from_path_parts(tree,parts,value,normalize_path=True):
if normalize_path:
parts = fspathtree._normalize_path_parts(parts)
if parts is None:
raise PathGoesAboveRoot("fspathtree: Key path contains a parent reference (..) that goes above the root of the tree")
if isinstance(tree,collections.abc.Mapping):
if len(parts) == 1:
tree[parts[0]] = value
else:
if parts[0] not in tree:
tree[parts[0]] = fspathtree.DefaultNodeType()
fspathtree._setitem_from_path_parts(tree[parts[0]],parts[1:],value,False)
elif isinstance(tree,collections.abc.Sequence):
# if the list does not have enough elements
# append None until it does
while len(tree) <= int(parts[0]):
tree.append(None)
if len(parts) == 1:
tree[int(parts[0])] = value
else:
if tree[int(parts[0])] is None:
tree[int(parts[0])] = fspathtree.DefaultNodeType()
fspathtree._setitem_from_path_parts(tree[int(parts[0])],parts[1:],value,False)
else:
raise RuntimeError(f"fspathree: unrecognized node type '{type(tree)}' is not Mapping of Sequence. Do not know how to set item.")
@staticmethod
def _get_all_leaf_node_paths(node, transform = None, predicate = None, current_path=PathType("/")):
'''
Returns a list containing the paths to all leaf nodes in the tree.
'''
if not fspathtree.is_leaf(current_path,node):
try:
for i in range(len(node)):
yield from fspathtree._get_all_leaf_node_paths( node[i], transform, predicate, current_path / str(i))
except:
for k in node:
yield from fspathtree._get_all_leaf_node_paths( node[k], transform, predicate, current_path / k)
else:
return_path = True
if predicate is not None:
num_args = len(signature(predicate).parameters)
if num_args == 1:
return_path = predicate(current_path)
elif num_args == 2:
return_path = predicate(current_path,node)
else:
raise RuntimeError(f"fspathtree: Predicate function not supported. Predicates may take 1 or 2 arguments. Provided function takes {num_args}.")
if return_path:
if transform is None:
yield current_path
elif type(transform) == type:
yield transform(current_path)
else:
num_args = len(signature(transform).parameters)
if num_args == 1:
yield transform(current_path)
elif num_args == 2:
yield transform(current_path,node)
else:
raise RuntimeError(f"fspathtree: Transform function not supported. Transforms may take 1 or 2 arguments. Provided function takes {num_args}.")
| [
"re.sub",
"copy.copy",
"inspect.signature"
] | [((4068, 4083), 'copy.copy', 'copy.copy', (['path'], {}), '(path)\n', (4077, 4083), False, 'import copy\n'), ((5017, 5032), 'copy.copy', 'copy.copy', (['path'], {}), '(path)\n', (5026, 5032), False, 'import copy\n'), ((6371, 6396), 're.sub', 're.sub', (['"""^\\\\/+"""', '"""/"""', 'key'], {}), "('^\\\\/+', '/', key)\n", (6377, 6396), False, 'import re\n'), ((9896, 9916), 'inspect.signature', 'signature', (['predicate'], {}), '(predicate)\n', (9905, 9916), False, 'from inspect import signature\n'), ((10451, 10471), 'inspect.signature', 'signature', (['transform'], {}), '(transform)\n', (10460, 10471), False, 'from inspect import signature\n')] |
from importlib import import_module
import traceback
from pkgutil import find_loader
import re
def get_setup_util(ta_name, splunk_uri, session_key, logger):
lib_dir = re.sub("[^\w]+", "_", ta_name.lower())
loader = find_loader(lib_dir + "_setup_util")
if not loader:
logger.debug('module="%s" doesn\'t exists, no global setting available',
lib_dir + "_setup_util")
return None
try:
setup_util_module = import_module(lib_dir + "_setup_util")
except ImportError:
logger.error('Did not import module: "%s", reason="%s"',
lib_dir + "_setup_util", traceback.format_exc())
return None
return setup_util_module.Setup_Util(splunk_uri, session_key,
logger)
| [
"traceback.format_exc",
"pkgutil.find_loader",
"importlib.import_module"
] | [((225, 261), 'pkgutil.find_loader', 'find_loader', (["(lib_dir + '_setup_util')"], {}), "(lib_dir + '_setup_util')\n", (236, 261), False, 'from pkgutil import find_loader\n'), ((465, 503), 'importlib.import_module', 'import_module', (["(lib_dir + '_setup_util')"], {}), "(lib_dir + '_setup_util')\n", (478, 503), False, 'from importlib import import_module\n'), ((638, 660), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (658, 660), False, 'import traceback\n')] |
'''
Convergence study of maximum mangification for central value with resolution
@author: <NAME> ppymj11
'''
# import modules
import numpy as np
import matplotlib.pyplot as plt
import Project_completed.modules.lensing_function as lensing
import timeit
# %%
# start the timer
start = timeit.default_timer()
# set up lensing parameters
rc = 0
eps = 0
dom = 1 # abs() of domain of r values (normally -1, 1 --> 1)
# set the initial size and maximum. To conserve pixel size, will split each
# at each step upto 'splits'
lasts = []
for init_N in np.arange(3, 15, 2):
if init_N < 10:
N_max = init_N * 200
else:
N_max = 1500
splits = N_max//init_N
# set up array to store number of pixels for eahc of these:
N_ends = []
# set up an array of used image sizes
sizes = []
odds = []
odds_n = []
evens = []
evens_n = []
# loop over them finding max magnification:
for msplit in range(1, splits):
# get size of image from original and number of splits in each cell
N = init_N*msplit
# reset the initial image
image_s = np.zeros((N, N, 3))
if msplit == 1:
half = int((N-1)/2)
image_s[half, half, 0] = 1
elif msplit % 2 == 1:
half = int((N-1)/2) # middle integer
spread = int((msplit-1)/2) # range around to to inc in xoliured pixel
image_s[half-spread : half+spread, half-spread : half+spread, 0] = 1/msplit**2
else:
half = int(N/2) # not middle index, but represntative
spread = int(msplit/2) # spread around rep. middle
image_s[half-spread : half+spread, half-spread : half+spread, 0] = 1/msplit**2
# lens it
image_l = lensing.lens(image_s, rc, eps, dom)
# find number of pixels this central value projected to
# using 1 in R (from RGB) only, this is same as sum:
# note, includes the pixel itself, as for transparent source, it will
# also be visible
N_end = np.sum(image_l)
N_ends.append(N_end)
sizes.append(N)
if msplit % 2 == 1:
odds.append(N_end)
odds_n.append(N)
else:
evens.append(N_end)
evens_n.append(N)
lasts.append(evens[-1])
# %%
# set up figure, axia and visuals
fig = plt.figure()
ax1 = fig.add_subplot(111)
# plot
inits = np.arange(3, 15, 2)
ax1.plot(inits, lasts/inits)
# return time to run
stop = timeit.default_timer()
print('Time to run was: {:.4f}'.format(stop - start) + ' s') | [
"Project_completed.modules.lensing_function.lens",
"timeit.default_timer",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.arange"
] | [((289, 311), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (309, 311), False, 'import timeit\n'), ((551, 570), 'numpy.arange', 'np.arange', (['(3)', '(15)', '(2)'], {}), '(3, 15, 2)\n', (560, 570), True, 'import numpy as np\n'), ((2438, 2450), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2448, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2494, 2513), 'numpy.arange', 'np.arange', (['(3)', '(15)', '(2)'], {}), '(3, 15, 2)\n', (2503, 2513), True, 'import numpy as np\n'), ((2573, 2595), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2593, 2595), False, 'import timeit\n'), ((1154, 1173), 'numpy.zeros', 'np.zeros', (['(N, N, 3)'], {}), '((N, N, 3))\n', (1162, 1173), True, 'import numpy as np\n'), ((1827, 1862), 'Project_completed.modules.lensing_function.lens', 'lensing.lens', (['image_s', 'rc', 'eps', 'dom'], {}), '(image_s, rc, eps, dom)\n', (1839, 1862), True, 'import Project_completed.modules.lensing_function as lensing\n'), ((2117, 2132), 'numpy.sum', 'np.sum', (['image_l'], {}), '(image_l)\n', (2123, 2132), True, 'import numpy as np\n')] |
"""
Copyright (c) 2019-2020 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "<NAME>"
from plato.domain.ontology import Ontology
from plato.agent.component.dialogue_policy import dialogue_policy
from plato.dialogue.action import DialogueAct, DialogueActItem, Operator
from copy import deepcopy
import random
"""
HandcraftedPolicy is a rule-based system policy, developed as a baseline and as
a quick way to perform sanity checks and debug a Conversational Agent.
It will try to fill unfilled slots, then suggest an item, and answer any
requests from the user.
"""
class HandcraftedPolicy(dialogue_policy.DialoguePolicy):
def __init__(self, args):
"""
Load the ontology.
:param args: contain the domain ontology
"""
super(HandcraftedPolicy, self).__init__()
if 'ontology' in args:
ontology = args['ontology']
else:
raise ValueError('No ontology provided for HandcraftedPolicy!')
self.ontology = None
if isinstance(ontology, Ontology):
self.ontology = ontology
elif isinstance(ontology, str):
self.ontology = Ontology(ontology)
else:
raise ValueError('Unacceptable ontology type %s ' % ontology)
def initialize(self, args):
"""
Nothing to do here
:param args:
:return:
"""
pass
def next_action(self, dialogue_state):
"""
Generate a response given which conditions are met by the current
dialogue state.
:param dialogue_state:
:return:
"""
# Check for terminal state
if dialogue_state.is_terminal_state:
return [DialogueAct('bye', [DialogueActItem('', Operator.EQ, '')])]
# Check if the user has made any requests
elif dialogue_state.requested_slot:
if dialogue_state.item_in_focus and \
dialogue_state.system_made_offer:
requested_slot = dialogue_state.requested_slot
# Reset request as we attempt to address it
dialogue_state.requested_slot = ''
value = 'not available'
if requested_slot in dialogue_state.item_in_focus and \
dialogue_state.item_in_focus[requested_slot]:
value = dialogue_state.item_in_focus[requested_slot]
return \
[DialogueAct(
'inform',
[DialogueActItem(requested_slot, Operator.EQ, value)])]
# Else, if no item is in focus or no offer has been made,
# ignore the user's request
# Try to fill slots
requestable_slots = \
deepcopy(self.ontology.ontology['system_requestable'])
if not hasattr(dialogue_state, 'requestable_slot_entropies') or \
not dialogue_state.requestable_slot_entropies:
slot = random.choice(requestable_slots)
while dialogue_state.slots_filled[slot] and \
len(requestable_slots) > 1:
requestable_slots.remove(slot)
slot = random.choice(requestable_slots)
else:
slot = ''
slots = \
[k for k, v in
dialogue_state.requestable_slot_entropies.items()
if v == max(
dialogue_state.requestable_slot_entropies.values())
and v > 0 and k in requestable_slots]
if slots:
slot = random.choice(slots)
while dialogue_state.slots_filled[slot] \
and dialogue_state.requestable_slot_entropies[
slot] > 0 \
and len(requestable_slots) > 1:
requestable_slots.remove(slot)
slots = \
[k for k, v in
dialogue_state.requestable_slot_entropies.items()
if v == max(
dialogue_state.requestable_slot_entropies.values())
and k in requestable_slots]
if slots:
slot = random.choice(slots)
else:
break
if slot and not dialogue_state.slots_filled[slot]:
return [DialogueAct(
'request',
[DialogueActItem(slot, Operator.EQ, '')])]
elif dialogue_state.item_in_focus:
name = dialogue_state.item_in_focus['name'] \
if 'name' in dialogue_state.item_in_focus \
else 'unknown'
dacts = [DialogueAct(
'offer',
[DialogueActItem('name', Operator.EQ, name)])]
for slot in dialogue_state.slots_filled:
if slot != 'requested' and dialogue_state.slots_filled[slot]:
if slot in dialogue_state.item_in_focus:
if slot not in ['id', 'name']:
dacts.append(
DialogueAct(
'inform',
[DialogueActItem(
slot,
Operator.EQ,
dialogue_state.item_in_focus[slot])]))
else:
dacts.append(DialogueAct(
'inform',
[DialogueActItem(
slot,
Operator.EQ,
'no info')]))
return dacts
else:
# Fallback action - cannot help!
# Note: We can have this check (no item in focus) at the beginning,
# but this would assume that the system
# queried a database before coming in here.
return [DialogueAct('canthelp', [])]
def train(self, data):
"""
Nothing to do here.
:param data:
:return:
"""
pass
def restart(self, args):
"""
Nothing to do here.
:param args:
:return:
"""
pass
def save(self, path=None):
"""
Nothing to do here.
:param path:
:return:
"""
pass
def load(self, path):
"""
Nothing to do here.
:param path:
:return:
"""
pass
| [
"plato.dialogue.action.DialogueActItem",
"random.choice",
"plato.dialogue.action.DialogueAct",
"copy.deepcopy",
"plato.domain.ontology.Ontology"
] | [((3273, 3327), 'copy.deepcopy', 'deepcopy', (["self.ontology.ontology['system_requestable']"], {}), "(self.ontology.ontology['system_requestable'])\n", (3281, 3327), False, 'from copy import deepcopy\n'), ((3485, 3517), 'random.choice', 'random.choice', (['requestable_slots'], {}), '(requestable_slots)\n', (3498, 3517), False, 'import random\n'), ((1669, 1687), 'plato.domain.ontology.Ontology', 'Ontology', (['ontology'], {}), '(ontology)\n', (1677, 1687), False, 'from plato.domain.ontology import Ontology\n'), ((3695, 3727), 'random.choice', 'random.choice', (['requestable_slots'], {}), '(requestable_slots)\n', (3708, 3727), False, 'import random\n'), ((4088, 4108), 'random.choice', 'random.choice', (['slots'], {}), '(slots)\n', (4101, 4108), False, 'import random\n'), ((6522, 6549), 'plato.dialogue.action.DialogueAct', 'DialogueAct', (['"""canthelp"""', '[]'], {}), "('canthelp', [])\n", (6533, 6549), False, 'from plato.dialogue.action import DialogueAct, DialogueActItem, Operator\n'), ((2247, 2283), 'plato.dialogue.action.DialogueActItem', 'DialogueActItem', (['""""""', 'Operator.EQ', '""""""'], {}), "('', Operator.EQ, '')\n", (2262, 2283), False, 'from plato.dialogue.action import DialogueAct, DialogueActItem, Operator\n'), ((4755, 4775), 'random.choice', 'random.choice', (['slots'], {}), '(slots)\n', (4768, 4775), False, 'import random\n'), ((4969, 5007), 'plato.dialogue.action.DialogueActItem', 'DialogueActItem', (['slot', 'Operator.EQ', '""""""'], {}), "(slot, Operator.EQ, '')\n", (4984, 5007), False, 'from plato.dialogue.action import DialogueAct, DialogueActItem, Operator\n'), ((5281, 5323), 'plato.dialogue.action.DialogueActItem', 'DialogueActItem', (['"""name"""', 'Operator.EQ', 'name'], {}), "('name', Operator.EQ, name)\n", (5296, 5323), False, 'from plato.dialogue.action import DialogueAct, DialogueActItem, Operator\n'), ((3036, 3087), 'plato.dialogue.action.DialogueActItem', 'DialogueActItem', (['requested_slot', 'Operator.EQ', 'value'], {}), '(requested_slot, Operator.EQ, value)\n', (3051, 3087), False, 'from plato.dialogue.action import DialogueAct, DialogueActItem, Operator\n'), ((6083, 6128), 'plato.dialogue.action.DialogueActItem', 'DialogueActItem', (['slot', 'Operator.EQ', '"""no info"""'], {}), "(slot, Operator.EQ, 'no info')\n", (6098, 6128), False, 'from plato.dialogue.action import DialogueAct, DialogueActItem, Operator\n'), ((5745, 5815), 'plato.dialogue.action.DialogueActItem', 'DialogueActItem', (['slot', 'Operator.EQ', 'dialogue_state.item_in_focus[slot]'], {}), '(slot, Operator.EQ, dialogue_state.item_in_focus[slot])\n', (5760, 5815), False, 'from plato.dialogue.action import DialogueAct, DialogueActItem, Operator\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#Copyright (c) 2015 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import argparse
import logging
import yaml
from fuse import FUSE
from .b2fuse_main import B2Fuse
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("mountpoint", type=str, help="Mountpoint for the B2 bucket")
parser.add_argument('--enable_hashfiles', dest='enable_hashfiles', action='store_true', help="Enable normally hidden hashes as exposed by B2 API")
parser.set_defaults(enable_hashfiles=False)
parser.add_argument('--version',action='version', version="B2Fuse version 1.3")
parser.add_argument('--use_disk', dest='use_disk', action='store_true')
parser.set_defaults(use_disk=False)
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(debug=False)
parser.add_argument(
"--account_id",
type=str,
default=None,
help="Account ID for your B2 account (overrides config)"
)
parser.add_argument(
"--application_key",
type=str,
default=None,
help="Application key for your account (overrides config)"
)
parser.add_argument(
"--bucket_id",
type=str,
default=None,
help="Bucket ID for the bucket to mount (overrides config)"
)
parser.add_argument("--temp_folder", type=str, default=".tmp/", help="Temporary file folder")
parser.add_argument("--config_filename", type=str, default="config.yaml", help="Config file")
parser.add_argument('--allow_other', dest='allow_other', action='store_true')
parser.set_defaults(allow_other=False)
return parser
def load_config(config_filename):
with open(config_filename) as f:
return yaml.load(f.read())
def main():
parser = create_parser()
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.INFO, format="%(asctime)s:%(levelname)s:%(message)s")
else:
logging.basicConfig(level=logging.WARNING, format="%(asctime)s:%(levelname)s:%(message)s")
if args.config_filename:
config = load_config(args.config_filename)
else:
config = {}
if args.account_id:
config["accountId"] = args.account_id
if args.application_key:
config["applicationKey"] = args.application_key
if args.bucket_id:
config["bucketId"] = args.bucket_id
if args.enable_hashfiles:
config["enableHashfiles"] = args.enable_hashfiles
else:
config["enableHashfiles"] = False
if args.temp_folder:
config["tempFolder"] = args.temp_folder
if args.use_disk:
config["useDisk"] = args.use_disk
else:
config["useDisk"] = False
args.options = {} # additional options passed to FUSE
if args.allow_other:
args.options['allow_other'] = True
with B2Fuse(
config["accountId"], config["applicationKey"], config["bucketId"],
config["enableHashfiles"], config["tempFolder"], config["useDisk"]
) as filesystem:
FUSE(filesystem, args.mountpoint, nothreads=True, foreground=True, **args.options)
if __name__ == '__main__':
main() | [
"logging.basicConfig",
"fuse.FUSE",
"argparse.ArgumentParser"
] | [((1273, 1298), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1296, 1298), False, 'import argparse\n'), ((2953, 3045), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s:%(levelname)s:%(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s:%(levelname)s:%(message)s')\n", (2972, 3045), False, 'import logging\n'), ((3059, 3154), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING', 'format': '"""%(asctime)s:%(levelname)s:%(message)s"""'}), "(level=logging.WARNING, format=\n '%(asctime)s:%(levelname)s:%(message)s')\n", (3078, 3154), False, 'import logging\n'), ((4135, 4222), 'fuse.FUSE', 'FUSE', (['filesystem', 'args.mountpoint'], {'nothreads': '(True)', 'foreground': '(True)'}), '(filesystem, args.mountpoint, nothreads=True, foreground=True, **args.\n options)\n', (4139, 4222), False, 'from fuse import FUSE\n')] |
from django.contrib import admin
from .models import Paradigm, Language, Programmer
# Register your models here.
admin.site.register(Paradigm)
admin.site.register(Language)
admin.site.register(Programmer) | [
"django.contrib.admin.site.register"
] | [((114, 143), 'django.contrib.admin.site.register', 'admin.site.register', (['Paradigm'], {}), '(Paradigm)\n', (133, 143), False, 'from django.contrib import admin\n'), ((144, 173), 'django.contrib.admin.site.register', 'admin.site.register', (['Language'], {}), '(Language)\n', (163, 173), False, 'from django.contrib import admin\n'), ((174, 205), 'django.contrib.admin.site.register', 'admin.site.register', (['Programmer'], {}), '(Programmer)\n', (193, 205), False, 'from django.contrib import admin\n')] |
import sys
sys.path.insert(0, "deep_predictor")
from deep_predictor import flask_app
from waitress import serve
# add logger file to waitress logger
import logging
logger = logging.getLogger("waitress")
logger.setLevel(logging.WARN)
file_handler = logging.FileHandler("deep_predictor/logs/waitress.log")
logger.addHandler(file_handler)
# serveing options
deep_predictor = flask_app("deep_predictor/cfg/deep_predictor_dummy.cfg")
# deep_predictor = flask_app("deep_predictor/cfg/deep_predictor_small.cfg")
app = deep_predictor.create_app()
host = "0.0.0.0"
port = 5000
threads = 5
# url_scheme = "https"
# start server
serve(app, host=host, port=port, threads=threads) | [
"logging.getLogger",
"sys.path.insert",
"deep_predictor.flask_app",
"waitress.serve",
"logging.FileHandler"
] | [((11, 47), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""deep_predictor"""'], {}), "(0, 'deep_predictor')\n", (26, 47), False, 'import sys\n'), ((174, 203), 'logging.getLogger', 'logging.getLogger', (['"""waitress"""'], {}), "('waitress')\n", (191, 203), False, 'import logging\n'), ((249, 304), 'logging.FileHandler', 'logging.FileHandler', (['"""deep_predictor/logs/waitress.log"""'], {}), "('deep_predictor/logs/waitress.log')\n", (268, 304), False, 'import logging\n'), ((374, 430), 'deep_predictor.flask_app', 'flask_app', (['"""deep_predictor/cfg/deep_predictor_dummy.cfg"""'], {}), "('deep_predictor/cfg/deep_predictor_dummy.cfg')\n", (383, 430), False, 'from deep_predictor import flask_app\n'), ((621, 670), 'waitress.serve', 'serve', (['app'], {'host': 'host', 'port': 'port', 'threads': 'threads'}), '(app, host=host, port=port, threads=threads)\n', (626, 670), False, 'from waitress import serve\n')] |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import matplotlib.pyplot as plt # matplotlib.pyplot の取り込み。一般的に plt と名前を省略して取り込みます
import pandas as pd
variable_number = 0 # ヒストグラムを描画する特徴量の番号。0 から始まるため注意
number_of_bins = 20 # ヒストグラムのビンの数
dataset = pd.read_csv('iris_without_species.csv', index_col=0)
# 以下でヒストグラムを描画します
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.hist(dataset.iloc[:, variable_number], bins=number_of_bins) # ヒストグラムの作成
plt.xlabel(dataset.columns[variable_number]) # 横軸の名前。ここでは、variable_number 番目の列の名前
plt.ylabel('frequency') # 縦軸の名前
plt.show() # 以上の設定において、グラフを描画
| [
"matplotlib.pyplot.hist",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show"
] | [((264, 316), 'pandas.read_csv', 'pd.read_csv', (['"""iris_without_species.csv"""'], {'index_col': '(0)'}), "('iris_without_species.csv', index_col=0)\n", (275, 316), True, 'import pandas as pd\n'), ((397, 460), 'matplotlib.pyplot.hist', 'plt.hist', (['dataset.iloc[:, variable_number]'], {'bins': 'number_of_bins'}), '(dataset.iloc[:, variable_number], bins=number_of_bins)\n', (405, 460), True, 'import matplotlib.pyplot as plt\n'), ((475, 519), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['dataset.columns[variable_number]'], {}), '(dataset.columns[variable_number])\n', (485, 519), True, 'import matplotlib.pyplot as plt\n'), ((559, 582), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (569, 582), True, 'import matplotlib.pyplot as plt\n'), ((593, 603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (601, 603), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Q
from django.db.models.query import QuerySet
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.translation import ugettext as _
from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes
from notebook.notes.constants import *
from notebook.tags.models import Tag, Tag_Frame
standalone = False
import logging
import notebook
#TODO: put logging into a common modules so both models and views can import from it
def getlogger(name):
logger = logging.getLogger(name)
hdlr = logging.FileHandler(settings.LOG_FILE)
formatter = logging.Formatter('[%(asctime)s]%(levelname)-8s%(name)s,%(pathname)s,line%(lineno)d,process%(process)d,thread%(thread)d,"%(message)s"','%Y-%m-%d %a %H:%M:%S')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(settings.LOG_LEVEL)
return logger
log = getlogger('notes.models')
#owner_name = ""
#TODO:move to util.py
def getT(username):
return create_model("T_"+str(username), Tag, username)
def getL(username):
return create_model("L_"+str(username), LinkageNote, username)
def getW(username):
return create_model("W_"+str(username), WorkingSet, username)
def getNC(username):
return create_model("NC_"+str(username), Note_Comment, username)
#TODO: make snippet, bookmark, scrap inherit note, so the common code can all be merged into note. This
# way note become the core engine. Might use abstrat base class since there might be many subclasses. (can it still do
# query on all the subclass tables if using abstract base class? seems like this is impossible)
#TODO: move code from view to model to build up the core engine
#TODO: also extends User, so a lot of user related method can be gathered there.
#user.getNote(bookname) should get the notebook needed, and the different db issue is handled there
#maybe also user.getFriends(), user.getGroups() (might not be important)
#TODO: add a new model class to inherit User table, so maybe other things can be added. At least, user related methods can be
# grouped there. Or maybe not a model class, but just a normal class with user related methods.
#class MultiUserManager(models.Manager):
# use_for_related_fields = True
#
# def __init__(self, owner_name):
# super(MultiUserManager, self).__init__()
# self.owner_name = owner_name
# self._db = owner_name
#self._meta = super._meta #TODO
# def get_query_set(self):
# #return super(MultiUserManager, self).get_query_set().filter(user__username__exact=self.owner_name)
# qs = QuerySet(self.model)
# #if self._db is not None:
# #qs = qs.using(self._db)
# qs = qs.using(self.owner_name)
# return qs
# def select_related(self, *args, **kwargs):
# return self.get_query_set().select_related(*args, **kwargs).filter(user__username__exact=self.owner_name)
#===============================================================================
#
# class Framable(models.Model):
# children = models.ManyToManyField(Framable, through="Frame_Children")
#
#
# def get_children_order(self):
# id = self.id
# fcs = Frame_Children.objects.using(self.owner_name).filter(frame__id=self.id).order_by('id')
# fcs_list = [fc for fc in fcs]
# for fc in fcs:
# fc.owner_name = self.owner_name
# if None in [fc._order for fc in fcs]:
# return [fc.child.id for fc in fcs]
# else:
#
# fcs_list.sort(key=lambda r: r._order)
# return [fc.child.id for fc in fcs_list]
#
#
#
# def set_children_order(self, order):
# seq = 0
# for child_id in order:
# fc = Frame_Children.objects.using(self.owner_name).get(frame__id=self.id, child__id=child_id)
# fc.owner_name = self.owner_name
# fc._order = seq
# fc.save()
# seq = seq + 1
# self.save() #save the order to the social note
#===============================================================================
#===============================================================================
#
#
# class Frame_Children(models.Model):
# frame = models.ForeignKey(Framable) #TODO:
# child = models.ForeignKey(Framable)
#
# class Meta:
# order_with_respect_to = 'frame'
#
#===============================================================================
class WorkingSet(models.Model):
name = models.CharField(max_length=50)
desc = models.TextField(blank=True, max_length=200)
tags = models.ManyToManyField(Tag)
private = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
#TODO: at the most only one workingset can have this to be true per user
current = models.BooleanField(default=False) #TODO: is it enough to keep this info in the session? Or rename as active? Or just use delete for this purpose
class Meta:
unique_together = (("name"),)
ordering = ['name']
def __unicode__(self):
return self.name
def display_tags(self):
return ','.join([t.name for t in self.tags.all().order_by('name')])
def get_tags(self):
return [t.id for t in self.tags.all().order_by('name')]
def add_tags(self, tags_str):
new_tags_list = [name.lstrip().rstrip() for name in tags_str.split(',')]
count_tag_created = 0
for tname in new_tags_list:
t, created = Tag.objects.using(self.owner_name).get_or_create(name=tname)
self.tags.add(t)
self.save()#TODO: performance?
if created:
count_tag_created += 1
return count_tag_created
from django.utils.translation import ugettext_lazy
#TODO: so far, no title
#TODO: public or private field
#TODO: add importance (just 2 level or 3 level, optional)? Or maybe don't use importance level, but associate a
# list of events with the experience. From the number of events, you can tell the importance.
class Note(models.Model):
#For bookmarks or scraps from some sites, the title can be quite long. Force users to truncate it?
title = models.CharField(verbose_name=ugettext_lazy('Title'), blank=True,max_length=2000, help_text=_("The size of the title is limited to 2000 characaters.")) #maybe 20 is enough
#event = models.CharField(blank=True,max_length=300)
#enforce in views to limit the length for snippet or enforce in save(), or move this field down to snippet
desc = models.TextField(verbose_name=ugettext_lazy('Description'), max_length=2000, blank=True, help_text=_("The size of the desc is limited to 2000 characaters."))
tags = models.ManyToManyField(Tag, verbose_name=ugettext_lazy('Tags'), blank=True, help_text=_("Default tag is 'random thought'.")) #TODO: NOT NULL?? #TODO: set default as random thoughts?
private = models.BooleanField(verbose_name=ugettext_lazy('Private'), default=False)
#the current django implementation, setting auto_now or auto_now_add to True will cause the field to have editable=False and blank=True set.
init_date = models.DateTimeField(verbose_name=ugettext_lazy('date created'), auto_now_add=True, editable=True)
last_modi_date = models.DateTimeField(verbose_name=ugettext_lazy('date last modified'), auto_now=True)
deleted = models.BooleanField(default=False)
#TODO: how to display the Chinese of this one with ugettext_lazy??
vote = models.IntegerField(verbose_name=ugettext_lazy('self ranking'), default=0) #TODO: change to rank
#user = models.ForeignKey(User)
class Meta:
ordering = ['-init_date','vote','desc','title']
# unique_together = (("note_id","user"),)
# abstract = True
def __unicode__(self):
return self.desc
def set_translation(self, original_lang, lang, title, desc):
trans, created = Note_Translation.objects.using(self.owner_name).get_or_create(note=self)
trans.original_lang = original_lang
trans.lang = lang
trans.title = title
trans.desc = desc
trans.owner_name = self.owner_name
trans.save()
def get_desc_en(self):
if not self.get_lang():
return self.desc
elif self.get_lang() == 'E':
return self.desc
else:
trans = Note_Translation.objects.using(self.owner_name).get(note=self)
return trans.desc
#get the Chinese version
def get_desc_cn(self):
if not self.get_lang():
return self.desc
elif self.get_lang() == 'C':
return self.desc
else:
trans = Note_Translation.objects.using(self.owner_name).get(note=self)
return trans.desc
def get_title_en(self):
if not self.get_lang():
return self.title
elif self.get_lang() == 'E':
return self.title
else:
trans = Note_Translation.objects.using(self.owner_name).get(note=self)
return trans.title
#get the Chinese version
def get_title_cn(self):
if not self.get_lang():
return self.title
elif self.get_lang() == 'C':
return self.title
else:
trans = Note_Translation.objects.using(self.owner_name).get(note=self)
return trans.title
def get_lang(self):
if Note_Translation.objects.using(self.owner_name).filter(note=self).exists():
trans = Note_Translation.objects.using(self.owner_name).get(note=self)
return trans.original_lang
else:
return ''
#TODO:better to move this method and the next one outside of this class?
def get_note_type(self):
try:
self.snippet
return 'Snippet'
except ObjectDoesNotExist:
try:
self.bookmark
return 'Bookmark'
except ObjectDoesNotExist:
try:
self.scrap
return 'Scrap'
except ObjectDoesNotExist:
try:
self.frame
return 'Frame'
except ObjectDoesNotExist:
log.info('No note type found!')
return 'Note' #TODO:
def get_note_bookname(self):
return model_book_dict.get(self.get_note_type())
def has_attachment(self):
if hasattr(self, 'attachment') and self.attachment:
return True
return False
#TODO: rewrite parts in views that get public notes. It should just use a filter or something with the help of this method
#or simply add to tempalte (this way is used and it seems to work well, but then in the note list display, the counting of notes
#won't be correct. It is better to filter in views code)
def is_private(self):
#check the private field of this Note. if so, private. If not,
#check the tags to see if any tag is private
if self.private == True:
return True
else:
for tag in self.tags.all():
if tag.private == True:
return True
return False
def display_tags(self):
#return ','.join([t.name for t in self.tags.all()])
return ','.join(self.get_tags())
def get_tags_ids(self):
return [t.id for t in self.tags.all()]
def get_tags(self):
return [t.name for t in self.tags.all()]
def display_linkages(self):
return ','.join([str(l.id) for l in self.linkagenote_set.all()])
def display_frames(self):
return ','.join([str(l.id) for l in self.in_frames.filter(deleted=False)])
def get_frame_ids_titles(self):
return [[l.id, l.title] for l in self.in_frames.filter(deleted=False)]
def is_in_frame(self):
if self.get_frame_ids_titles():
return True
else:
return False
def get_desc_short(self):
if len(self.desc)>97:
return self.desc[0:97]+'...'
else:
return self.desc
def get_desc_super_short(self):
if len(self.desc)>50:
return self.desc#[0:50]+'...'
else:
return self.desc
def get_relevance(self, tag_path):
tag_list = tag_path.split('-')
tag_name = tag_list[-1]
relevance = 0
#check if this note really has tag_name as its tag.
if not tag_name in self.get_tags():
#print 'tag not in the note tags, return 0'
return 0 #not related at all. May raise an error here TODO:
relevant_tags = [tag_name]
#merge code of direct parent with grand parents?TODO:
direct_parent = tag_list[-2]
relevant_tags.append(direct_parent)
if direct_parent in self.get_tags():
relevance += 10 * tag_list.index(direct_parent)
ptf = Tag_Frame.objects.using(self.owner_name).get(name=direct_parent)
ptf.owner_name = self.owner_name
#print 'ptf.get_siblings(tag_name)', ptf.get_siblings(tag_name)
for sib in ptf.get_siblings(tag_name):
relevant_tags.append(sib)
if sib in self.get_tags():
relevance += 5
#TODO: checking for cousins.
#check for uncles
grandparent_list = tag_list[:-2]
grandparent_list.reverse()
for i, grandparent in enumerate(grandparent_list):
relevant_tags.append(grandparent)
if grandparent in self.get_tags():
relevance += 10 * tag_list.index(grandparent)
child_tag_name = tag_list[-i-2]
gtf = Tag_Frame.objects.using(self.owner_name).get(name=grandparent)
#print 'child_tag_name:', child_tag_name, 'gtf', gtf
gtf.owner_name = self.owner_name
for sib in gtf.get_siblings(child_tag_name):
relevant_tags.append(sib)
if sib in self.get_tags():
relevance += len(tag_list) - i #check if always > 0
log.info('relevant_tags'+str(relevant_tags))
for t in self.get_tags():
if not t in relevant_tags:
relevance -= 1
return relevance
#Not used.
# def add_tags(self, tags_str):
# new_tags_list = [name.lstrip().rstrip() for name in tags_str.split(',')]
# count_tag_created = 0
# for tname in new_tags_list:
# t, created = Tag.objects.using(self.owner_name).get_or_create(name=tname)
# self.tags.add(t)
# self.save()#TODO: performance?
#
# if created:
# count_tag_created += 1
# w = WorkingSet.objects.using(self.owner_name).get(name='snippetbook')
# w.tags.add(t)
# w.save()
# return count_tag_created
#TODO: make bookname a class field? And it get set when calling getNote(). So the instance method doesn't need to pass bookname again.
#TODO: this method so far is not used by bookmarks.views and scraps.views, so either merge them or get rid of bookname here.
#tags_to_add is a list of tag names
#TODO: get rid of bookname, and use hasattr to tell what the instance is. Just like how update_tags does
def add_tags(self, tags_to_add, bookname):
#TODO: not right. Should tell bookname base on the instance. After using hasattr, no need to consider the case of 'notebook'
if not tags_to_add:
return 0
else:
if bookname == 'notebook':
bookname = 'snippetbook'
num_of_tags_created = 0
W = getW(self.owner_name)
w = W.objects.get(name=bookname)
#TODO:no hard coding here. get from system tags constants
tags_to_add = [tag for tag in tags_to_add if not tag.startswith('takenfrom:')]
for tag_name in tags_to_add:
t, created = Tag.objects.using(self.owner_name).get_or_create(name=tag_name)
#in any case, just add the tag to the snippet working set. If it is already
# in it, just no effect.
try:
w.tags.add(t)
w.save()
except Exception:
log.error("Error in add_tags with w "+w.name+' and tag '+t.name)
self.tags.add(t)
#TODO: there seems to be no need t save this instance, as self.tags.add(t) already saved data to the m2m table
# but to update the social note as well, I think below is still needed
self.save()
if created:
num_of_tags_created += 1
return num_of_tags_created
#tags_to_add is a list of tag names
def remove_tags(self, tags_to_remove):
tags_to_remove = [tag for tag in tags_to_remove if not tag.startswith('takenfrom:')]
for tag_name in tags_to_remove:
t = Tag.objects.using(self.owner_name).get(name=tag_name)
self.tags.remove(t)
self.save()
#TODO: merge the ws part with add_tags or maybe move that into save() (seems better since save will be called anyway to update social note)
def update_tags(self, tags_str):
if not tags_str:
return 0
tags_str = ','.join([tag for tag in tags_str.split(',') if not tag.startswith('takenfrom:')])
new_tags_list = [name.lstrip().rstrip() for name in tags_str.split(',')]
#TODO:for now, just remove all the old tags and then add all the new ones
#might want to improve the algorithm later
#self.tags.clear()
for tag in self.tags.all():
if not tag.name.startswith('takenfrom:'):
self.tags.remove(tag)
count_tag_created = 0
W = getW(self.owner_name)
if hasattr(self, 'snippet'):
w = W.objects.get(name='snippetbook')
#w = WorkingSet.objects.using(self.owner_name).get(name='snippetbook')
if hasattr(self, 'bookmark'):
w = W.objects.get(name='bookmarkbook')
#w = WorkingSet.objects.using(self.owner_name).get(name='bookmarkbook')
if hasattr(self, 'scrap'):
w = W.objects.get(name='scrapbook')
#w = WorkingSet.objects.using(self.owner_name).get(name='scrapbook')
for tname in new_tags_list:
t, created = Tag.objects.using(self.owner_name).get_or_create(name=tname)
self.tags.add(t)
self.save()#TODO: performance?
if created:
count_tag_created += 1
w.tags.add(t)
w.save()
return count_tag_created
def get_comments(self):
comments = Note_Comment.objects.using(self.owner_name).filter(note=self)
return comments
#return ''.join([comment.desc for comment in comments])
def display_comments(self):
comments = self.get_comments()
return [{'id':comment.id,'desc':comment.desc} for comment in comments]
def get_social_note(self):
try:
sn = Social_Note.objects.get(owner__username=self.owner_name, owner_note_id=self.id)
return sn
except ObjectDoesNotExist:
return ''
#is attachment img? TODO: better ways of telling if the attach is an img or not
def is_img(self):
file_type = None
if self.get_note_type() == 'Snippet':
if self.snippet.attachment.name:
splits = self.snippet.attachment.name.split('.')
file_type = splits[len(splits)-1]
elif self.get_note_type() == 'Frame':
if self.frame.attachment.name:
splits = self.frame.attachment.name.split('.')
file_type = splits[len(splits)-1]
if file_type in ['jpg','JPG','jpeg','JPEG','png','PNG', 'gif']:
return True
else:
return False
#TODO:so far, notes of private tag cannot be viewed by others in the person's notebook. But should it be viewed by others in a group?
# maybe the logic should be notes of private tags are not really private. It is just that the tag is private and that private tag will not
# not show in tags list. But if that note has other tags, you can still find that note.
#The problem with notes of private tags not showing up in social notes is that if a group member sets his own tag such as *** as a private
#tag, then his notes in *** won't show in the group
#TODO:refactor out to several sync methods
#TODO:when a note become public again, should the frame that it is in get a save too so that the social frame can be updated?
#TODO: debug: when making a frame private, it is not removed from the social space.
def save(self, *args, **kwargs):
#do_something()
super(Note, self).save(*args, **kwargs) # Call the "real" save() method.
owner = User.objects.get(username=self.owner_name)
#TODO:if note is set deleted=True, then it should be removed from frames it is in. How about the frame in the social notebook?
#below doesn't work. Not sure why. TODO:
#===============================================================================
# if self.deleted:#
# if self.in_frames.all():#TODO:need self.owner_name?#
# for f in self.in_frames.using(self.owner_name).all():
# print 'remove self from frame:', f.id
# f.notes.remove(self)
# f.save()
#===============================================================================
sharingtoGroup = self.tags.filter(name__startswith="sharinggroup:").exists()
#TODO: consider moving this into various subclasses
#this checks if the note is private or delete
if not (self.is_private() or self.deleted) or sharingtoGroup:
if hasattr(self, 'snippet'):
sn, created = Social_Snippet.objects.get_or_create(owner=owner.member, owner_note_id=self.id)
if hasattr(self, 'bookmark'):
sn, created = Social_Bookmark.objects.get_or_create(owner=owner.member, owner_note_id=self.id)
if hasattr(self, 'scrap'):
sn, created = Social_Scrap.objects.get_or_create(owner=owner.member, owner_note_id=self.id)
if hasattr(self, 'frame'):
sn, created = Social_Frame.objects.get_or_create(owner=owner.member, owner_note_id=self.id)
#if the note has sharinggroup: prefixed tag, then it still need to be in the social space
#if not created and (self.private or self.deleted) and not self.tags.filter(name__startswith="sharinggroup:").exists():
if (self.is_private() or self.deleted) and not sharingtoGroup:
#if the note is already in social note, and the note in the original db is changed to priviate or delete
# then needs to delete it from the social note
#TODO: still, deleting the child won't delete the parent. Will this be an issue? So at least disable mixed.
try:
sn = Social_Note.objects.get(owner=owner.member, owner_note_id=self.id)
sn.delete()
except ObjectDoesNotExist:
pass
else:
#whether the note is first created or just an update, below applies to both situations
sts = []
for t in self.tags.all():
#private tags should not be pushed to the social space, unless it contains "sharinggroup:"
if not t.private or t.name.startswith("sharinggroup:"):
st, created = Social_Tag.objects.get_or_create(name=t.name)
sts.append(st)
if hasattr(self, 'bookmark') or hasattr(self, 'scrap'):
log.debug('having attribute bookmark or scrap')
if hasattr(self, 'bookmark'):
sn.url = self.bookmark.url
if hasattr(self, 'scrap'):
sn.url = self.scrap.url
if hasattr(self, 'snippet') or hasattr(self, 'frame'):
log.debug('having attribute snippet or frame')
if hasattr(self, 'snippet'):
sn.attachment = self.snippet.attachment
if hasattr(self, 'frame'):
sn.attachment = self.frame.attachment
#sns_included = []
#TODO:test below
if hasattr(self, 'frame'):
self.frame.owner_name = self.owner_name
self.vote = self.frame.get_vote()
#for n_included in self.frame.notes.using(self.owner_name).all():
order = self.frame.get_notes_order()
#clear the included notes for the social note first
sn.notes.clear()
for note_id in order:
#TODO:how about deleted?
n_included = Note.objects.using(self.owner_name).get(id=note_id)
if not n_included.is_private():
sn_included = Social_Note.objects.get(owner=owner.member, owner_note_id=n_included.id)
#sns_included.append(sn_included)
sfn, created = Social_Frame_Notes.objects.get_or_create(social_frame=sn, social_note=sn_included)
sfn._order = order.index(note_id)
sfn.save()
#sn.notes = sns_included
sn.desc = self.desc
sn.title = self.title
#sn.event = self.event
sn.last_modi_date = self.last_modi_date
sn.init_date = self.init_date
sn.vote = self.vote
sn.private = self.is_private()
#attachment
sn.tags.clear()
for st in sts:
sn.tags.add(st)
#save the translation
if self.get_lang():
trans = Note_Translation.objects.using(self.owner_name).get(note=self)
sn.set_translation(trans.original_lang, trans.lang, trans.title, trans.desc)
#TODO: for group members that want every note posted to the group, email them the newly posted note. Might want to disable this
#due to privcy reason (for example, the user can make a note private to take it back after posting)
sn.save()
from time import gmtime, strftime
def get_storage_loc(instance, filename):
#timepath= strftime('/%Y/%m/%d/')
#return 'noteattachments/'+instance.owner_name+timepath+filename
return 'att/'+instance.owner_name+'/'+filename
from django.core.files.storage import FileSystemStorage
from notebook.env_settings import DB_ROOT
fs = FileSystemStorage(location=DB_ROOT)
#class Note_Backup(models.Model):
# title = models.CharField(blank=True,max_length=50, help_text="The size of the title is limited to 50 characaters.") #maybe 20 is enough
# #event = models.CharField(blank=True,max_length=300)
# desc = models.TextField(max_length=200, help_text="The size of the desc is limited to 200 characaters.")
# tags = models.ManyToManyField(Tag,blank=True, help_text="Default tag is 'random thought'.") #TODO: NOT NULL?? #TODO: set default as random thoughts?
# private = models.BooleanField(default=False)
# init_date = models.DateTimeField('date created', auto_now_add=True)
# last_modi_date = models.DateTimeField('date last modified', auto_now=True)
# deleted = models.BooleanField(default=False)
# vote = models.IntegerField(default=0) #TODO: change to rank
# attachment = models.FileField(upload_to=get_storage_loc,blank=True, storage=fs)
class Note_Comment(models.Model):
note = models.ForeignKey(Note)
#commenter = models.ForeignKey(User)
desc = models.TextField(max_length=2000)
init_date = models.DateTimeField('date created', auto_now_add=True)
def __unicode__(self):
return self.desc
class Meta:
unique_together = (("note", "desc"),)
ordering = ['-init_date','note','desc']
#For now, we don't make frame of Snippet/Bookmark/Scrap. There are only frames of Notes.
#TODO: clean up code that duplicate with those in Note
class Frame(Note):
#max_length=100 is the defaul value. Don't change it for now. If the user realy need longer file name, then change it. TODO:
attachment = models.FileField(upload_to=get_storage_loc, max_length=100, blank=True, storage=fs, verbose_name=ugettext_lazy('Attachment'))
#TODO: notes reference to the id of Note instead of note_id. Unlike ForeignKey field, ManyToManyField
#doesn't allow specifying a to_field argument. Think of whether to reference to note_id.
notes = models.ManyToManyField(Note, related_name='in_frames', through="Frame_Notes")
class Meta:
# unique_together = (("linkage_id","user"),)
verbose_name = "frame"
def __unicode__(self):
return ','.join([str(note.id) for note in self.notes.all()])
#===============================================================================
# def get_note_order(self):
# return [n.id for n in self.notes.all()]
#
# def set_note_order(self, ordered_ids):
#
#===============================================================================
def get_vote(self):
v = 0
#TODO:using(self.owner_name)?
for n in self.notes.using(self.owner_name).all():
v = v + n.vote
return v
def get_sum_of_note_tags(self):
ts = set([])
#TODO:using(self.owner_name)?
for n in self.notes.using(self.owner_name).all():
for t in n.tags.using(self.owner_name).all():
ts.add(t.name)
return list(ts)
def get_sum_of_note_tag_ids(self):
ts = set([])
#TODO:using(self.owner_name)?
for n in self.notes.using(self.owner_name).all():
for t in n.tags.using(self.owner_name).all():
ts.add(t.id)
return list(ts)
def get_display_of_sum_of_note_tags(self):
ts = self.get_sum_of_note_tags()
return ','.join(ts)
#not useful anymore, since a frame's tags should just be the sum of its included notes' tags
def get_unique_extra_tags(self):
ts = self.get_sum_of_note_tags()
return list(set(self.get_tags()).difference(set(ts)))
def get_display_of_unique_extra_tags(self):
return ','.join(self.get_unique_extra_tags())
def get_notes_in_order(self, sort=None):
order = self.get_notes_order()
ns = []
for note_id in order:
n = Note.objects.using(self.owner_name).get(id=note_id)
#add below so it can keep pointing to the right db
n.owner_name = self.owner_name
if not n.deleted:
ns.append(n)
if sort and sort == 'vote':
ns.sort(key=lambda r: r.vote, reverse=True)
return ns
def get_public_notes_in_order(self, sort=None):
order = self.get_notes_order()
ns = []
for note_id in order:
n = Note.objects.using(self.owner_name).get(id=note_id)
n.owner_name = self.owner_name
if n.private == False:
if not n.deleted:
ns.append(n)
if sort and sort == 'vote':
ns.sort(key=lambda r: r.vote, reverse=True)
return ns
#===============================================================================
# def display_notes(self):
# return [[n.id, n.title, n.desc, n.vote, n.get_note_bookname, n.get_note_type] for n in self.get_notes_in_order()]
#
#
# def display_public_notes(self):
# return [[n.id, n.title, n.desc, n.vote, n.get_note_bookname, n.get_note_type] for n in self.get_public_notes_in_order()]
#===============================================================================
#TODO: need save?
def update_tags(self, tags_str):
new_tags_list = [name.lstrip().rstrip() for name in tags_str.split(',')] #assume distinct here. TODO:
#TODO:for now, just remove all the old tags and then add all the new ones
#might want to improve the algorithm later
self.tags.clear()
for tname in new_tags_list:
t = Tag.objects.using(self.owner_name).get(name=tname)
self.tags.add(t)
#return True
#TODO: need save?
def add_notes(self, noteids_str):
if noteids_str:
note_id_list = [note_id.lstrip().rstrip() for note_id in noteids_str.split(',')]
current_num_of_notes = len(self.get_notes_order())
self.db = self.owner_name
for note_id in note_id_list:
if note_id != str(self.id):
n = Note.objects.using(self.owner_name).get(id=note_id)
if n not in self.notes.all():
fn,created = Frame_Notes.objects.using(self.owner_name).get_or_create(frame=self, note=n)
if created:
fn._order=current_num_of_notes
current_num_of_notes += 1
#self.notes.add(n)
#TODO:note_id or id?
def remove_note(self, note_id):
n = Note.objects.using(self.owner_name).get(id=note_id)
#self.notes.remove(n)
fn = Frame_Notes.objects.using(self.owner_name).get(frame=self, note=n)
#TODO: need to move the seq number over?
fn.owner_name = self.owner_name
fn.delete()
#Need to update the social one
self.save()
#===============================================================================
# def get_owner_name(self):
# if self.owner_name:
# return self.owner_name
# else:
# if
#===============================================================================
#replace the original get_frame_notes_order coming with the django model for order_with_respect_to
def get_notes_order(self):
id = self.id
fns = Frame_Notes.objects.using(self.owner_name).filter(frame__id=self.id).order_by('id')
fns_list = [fn for fn in fns]
for fn in fns_list:
fn.owner_name = self.owner_name
if None in [fn._order for fn in fns_list]:
return [fn.note.id for fn in fns]
else:
fns_list.sort(key=lambda r: r._order)
return [fn.note.id for fn in fns_list]
def set_notes_order(self, order):
seq = 0
for note_id in order:
fn = Frame_Notes.objects.using(self.owner_name).get(frame__id=self.id, note__id=note_id)
fn.owner_name = self.owner_name
fn._order = seq
fn.save()
seq = seq + 1
self.save() #save the order to the social note
def has_attachment(self):
if hasattr(self, 'attachment') and self.attachment:
return True
notes = self.get_notes_in_order()
for note in notes:
if note.get_note_type() == 'Snippet' and note.snippet.has_attachment():
return True
if note.get_note_type() == 'Frame':
note.frame.owner_name = self.owner_name
if note.frame.has_attachment():
return True
return False
def get_related_frames(self):
related = []
offsprings = self.get_offsprings()
for child in self.notes.filter(deleted=False):
child.owner_name = self.owner_name
uncles = child.get_frame_ids_titles()
for uncle in uncles:
uncle.append('(note '+str(child.id)+') '+child.title+': '+child.desc)
if uncle[0] != self.id and uncle[0] not in self.get_offsprings() and uncle not in related:
#make it into a tuple so it can be hashed for soring later. (list cannot be hashed)
related.append((uncle[0],uncle[1],uncle[2]))
#for now, don't go up further TODO:
if child.get_note_type() == 'Frame':
child.frame.owner_name = self.owner_name
related.extend(child.frame.get_related_frames())
#get a copy of related. Otherwise, removing item while iterating through the list will mess things up
for nr in related[:]:
if nr[0] in self.get_offsprings():
related.remove(nr)
if nr[0] == self.id:
related.remove(nr)
related = list(set(related))
related.sort(key=lambda r: r[1],reverse = False)
return related
def get_size_of_related_frames(self):
return len(self.get_related_frames())
def get_offsprings(self):
offsprings = [n.id for n in self.notes.all()] #it is managed, can it use .notes reference directly? TODO:
for child in self.notes.all():
child.owner_name = self.owner_name
if child.get_note_type() == 'Frame':
child.frame.owner_name = self.owner_name
offsprings.extend(child.frame.get_offsprings())
#return offsprings
return list(set(offsprings))
class Frame_Notes(models.Model):
frame = models.ForeignKey(Frame, related_name='note_and_frame') #TODO:
note = models.ForeignKey(Note)
class Meta:
order_with_respect_to = 'frame'
#TODO:for now LinkageNote is kept, but only for viewing old linkages. Frame is used to "frame" notes together.
#TODO:clean up web UI
#TODO: or merge this with Note field, and add field is_linkage, type_of_linkage. If you do so, linkage can link linkage, that
#might make it too complicated. If there is a level of grouping above linkage, make it another thing in the future?
class LinkageNote(models.Model):
LINKAGE_TYPE_CHOICES = (
('T', 'the same topic'),
('E', 'the same event'),
)
type_of_linkage = models.CharField(max_length=1, choices=LINKAGE_TYPE_CHOICES,blank=True)
title = models.CharField(blank=True, max_length=2000) #TODO: need title?
desc = models.TextField(blank=True, max_length=2000)
tags = models.ManyToManyField(Tag, blank=True)#TODO: get rid of
private = models.BooleanField(default=False)
init_date = models.DateTimeField('date created', auto_now_add=True)
last_modi_date = models.DateTimeField('date last modified', auto_now=True)
deleted = models.BooleanField(default=False)
vote = models.IntegerField(default=0, blank=True)#TODO: get rid of
attachment = models.FileField(upload_to=get_storage_loc, blank=True, storage=fs)
#TODO: notes reference to the id of Note instead of note_id. Unlike ForeignKey field, ManyToManyField
#doesn't allow specifying a to_field argument. Think of whether to reference to note_id.
notes = models.ManyToManyField(Note) #TODO: so far doesn't allow linkage of linkange note
#linkage_id = models.IntegerField()
# user = models.ForeignKey(User)
class Meta:
# unique_together = (("linkage_id","user"),)
verbose_name = "linkage"
def __unicode__(self):
return ','.join([str(note.id) for note in self.notes.all()])
def is_private(self):
#check the private field of this Note. if so, private. If not,
#check the tags to see if any tag is private
if self.private == True:
return True
else:
for tag in self.tags.all():
if tag.private == True:
return True
def get_vote(self):
v = 0
for n in self.notes.all():
v = v + n.vote
return v
def get_sum_of_note_tags(self):
ts = set([])
for n in self.notes.all():
for t in n.tags.all():
ts.add(t.name)
return list(ts)
def get_display_of_sum_of_note_tags(self):
ts = self.get_sum_of_note_tags()
return ','.join(ts)
def get_unique_extra_tags(self):
ts = self.get_sum_of_note_tags()
return list(set(self.get_tags()).difference(set(ts)))
def get_display_of_unique_extra_tags(self):
return ','.join(self.get_unique_extra_tags())
def get_tags(self):
return [t.name for t in self.tags.all()]
def display_tags(self):
return ','.join(self.get_tags())
def get_desc_short(self):
if len(self.desc)>97:
return self.desc[0:97]+'...'
else:
return self.desc
def get_desc_super_short(self):
if len(self.desc)>30:
return self.desc[0:30]+'...'
else:
return self.desc
def display_notes(self):
#return [(n.note_id, n.title, n.desc,n.display_tags()) for n in self.notes.all()]
return [[n.id, n.title, n.desc] for n in self.notes.all()]
def display_public_notes(self):
q = ~Q(tags__private=True)
return [(n.id, n.title, n.desc,n.display_tags()) for n in self.notes.filter(q) if n.private==False]
#TODO: need save?
def update_tags(self, tags_str):
new_tags_list = [name.lstrip().rstrip() for name in tags_str.split(',')] #assume distinct here. TODO:
#TODO:for now, just remove all the old tags and then add all the new ones
#might want to improve the algorithm later
self.tags.clear()
for tname in new_tags_list:
t = Tag.objects.using(self.owner_name).get(name=tname)
self.tags.add(t)
#return True
#TODO: need save?
def add_notes(self, noteids_str):
note_id_list = [note_id.lstrip().rstrip() for note_id in noteids_str.split(',')]
for note_id in note_id_list:
n = Note.objects.using(self.owner_name).get(id=note_id)
self.notes.add(n)
#TODO:note_id or id?
def remove_note(self, note_id):
# if self.__class__.owner_name:
# n = Note.objects.using(self.__class__.owner_name).get(id=note_id)
# else:
# n = Note.objects.get(id=note_id)
n = Note.objects.using(self.owner_name).get(id=note_id)
self.notes.remove(n)
#TODO: should folder be universal for snippets/bookmarks/scraps??
#search can be saved as dynamic folder
class Folder(models.Model):
name = models.CharField(blank=False,max_length=50) #False should be the default value, right? TODO:
value = models.CharField(blank=False,max_length=200)
desc = models.CharField(blank=True,max_length=500)
deleted = models.BooleanField(default=False)
init_date = models.DateTimeField('date created', auto_now_add=True)
private = models.BooleanField(default=False) #TODO:need it?
#folder_id = models.IntegerField()
#user = models.ForeignKey(User)
class Meta:
ordering = ['name', '-init_date']
# unique_together = (("folder_id","user"), ("name","user"),)
def __unicode__(self):
return self.name
#~ def getName(self, v):
#~ return
class Frame_Folder(Folder):
pass
#think of implementing this with NonSql
#Fold similiar notes together, and only show the reprensentative one. TODO:
#For notes folded, if not the representative one, mark them as deleted. So they never show up
#in normal notes list. And for the representative one, just provide more notes as alternative explanations
#===============================================================================
# class Fold(models.Model):
# repr = models.ForeignKey(Note)
# note_ids = models.CharField(blank=False,max_length=800)
#
#===============================================================================
class Cache(models.Model):
note_ids = models.CharField(blank=False,max_length=800)
# cache_id = models.IntegerField()
# user = models.ForeignKey(User)
# class Meta:
# unique_together = (("cache_id","user"),)
def __unicode__(self):
return self.note_ids
class Frame_Cache(Cache):
cache_id = models.AutoField(primary_key=True)
#Store the alternative language translation for notes.
class Note_Translation(models.Model):
note = models.ForeignKey(Note)
LANG_TYPE_CHOICES = (
('C', 'Chinese'),
('E', 'English'),
)
lang = models.CharField(max_length=1, choices=LANG_TYPE_CHOICES, verbose_name=ugettext_lazy('Language'),) #mark the language in the original note
original_lang = models.CharField(max_length=1, choices=LANG_TYPE_CHOICES, verbose_name=ugettext_lazy('Original language'),)
title = models.CharField(verbose_name=ugettext_lazy('Title'), blank=True,max_length=2000, help_text=_("The size of the title is limited to 2000 characaters."))
desc = models.TextField(verbose_name=ugettext_lazy('Description'), max_length=2000, blank=True, help_text=_("The size of the desc is limited to 2000 characaters."))
#TODO:should this table be here?
class UserAuth(models.Model):
user = models.ForeignKey(User)
site = models.CharField(blank=False,max_length=20)
access_token_key = models.CharField(blank=False,max_length=80)
access_token_secret = models.CharField(blank=False,max_length=80)
#TODO: store user's profile name on the site?
#profile_name = models.CharField(blank=False,max_length=80)
class Meta:
unique_together = (("user","site"),)
def __unicode__(self):
return self.user.__unicode__()+'@'+site
def getAccessKey(username, site):
user = User.objects.get(username=username)
ua = UserAuth.objects.get(user=user, site=site)
return ua.access_token_key, ua.access_token_secret
def getBoundSites(username):
user = User.objects.get(username=username)
uas = UserAuth.objects.filter(user=user)
return [ua.site for ua in uas]
from django.contrib import admin
def _create_model(name, base=models.Model, fields=None, app_label='', module='', options=None, admin_opts=None):
"""
Create specified model
"""
class Meta:
# Using type('Meta', ...) gives a dictproxy error during model creation
pass
if app_label:
# app_label must be set using the Meta inner class
setattr(Meta, 'app_label', app_label)
# Update Meta with any options that were provided
if options is not None:
for key, value in options.iteritems():
setattr(Meta, key, value)
# Set up a dictionary to simulate declarations within a class
attrs = {'__module__': module, 'Meta': Meta}
# Add in any fields that were provided
if fields:
attrs.update(fields)
# Create the class, which automatically triggers ModelBase processing
model = type(name, (base,), attrs)
# Create an Admin class if admin options were provided
if admin_opts is not None:
class Admin(admin.ModelAdmin):
pass
for key, value in admin_opts:
setattr(Admin, key, value)
admin.site.register(model, Admin)
return model
#TODO: will this only create one model instead of duplicated
def create_model(name, base, owner_name, #db_table,
fields={}, options={}):
'''create a proxy model using a MultiUserManager manager.'''
#fields.update({'objects':MultiUserManager(owner_name)})
# if base==Note or base==LinkageNote:
# fields.update({'attachment':models.FileField(upload_to='noteattachments/'+owner_name+'/%Y/%m/%d', blank=True) })
options.update({'proxy':True})
fields.update({'owner_name':owner_name})
#options.update({'db_table':db_table})
return _create_model(name=name, base=base, fields=fields,
app_label='notebook.notes', module='notebook.notes.models', options=options)
def create_model_form(name, model, base=ModelForm, fields={}, options={}):
from django import forms
if 'tags' in dir(model):
if ('tags' not in fields) and (not options.get('exclude') or 'tags' not in options.get('exclude')):
# fields.update({'tags':forms.ModelMultipleChoiceField(queryset=Tag.objects.filter(user__username__exact=model.objects.owner_name).order_by('name'))})
fields.update({'tags':forms.ModelMultipleChoiceField(queryset=Tag.objects.using(model.owner_name).all().order_by('name'))})
if 'notes' in dir(model):
if (not options.get('exclude')) or ('notes' not in options.get('exclude')):
# fields.update({'tags':forms.ModelMultipleChoiceField(queryset=Tag.objects.filter(user__username__exact=model.objects.owner_name).order_by('name'))})
fields.update({'notes':forms.ModelMultipleChoiceField(queryset=Note.objects.using(model.owner_name).all().order_by('id'))})
options.update({'model':model})
return _create_model(name=name, base=base, fields=fields,
app_label='notebook.notes', module='notebook.notes.models', options=options)
| [
"logging.getLogger",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.contrib.auth.models.User.objects.get",
"notebook.social.models.Social_Frame_Notes.objects.get_or_create",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"notebook.social.models.Social_Bookmark.objects.get_or_create",
"logging.FileHandler",
"notebook.tags.models.Tag.objects.using",
"django.db.models.DateTimeField",
"notebook.social.models.Social_Note.objects.get",
"django.db.models.CharField",
"django.core.files.storage.FileSystemStorage",
"django.utils.translation.ugettext_lazy",
"notebook.social.models.Social_Frame.objects.get_or_create",
"notebook.tags.models.Tag_Frame.objects.using",
"django.db.models.BooleanField",
"notebook.social.models.Social_Scrap.objects.get_or_create",
"notebook.social.models.Social_Snippet.objects.get_or_create",
"django.utils.translation.ugettext",
"logging.Formatter",
"django.contrib.admin.site.register",
"django.db.models.ManyToManyField",
"django.db.models.AutoField",
"notebook.social.models.Social_Tag.objects.get_or_create",
"django.db.models.Q"
] | [((28573, 28608), 'django.core.files.storage.FileSystemStorage', 'FileSystemStorage', ([], {'location': 'DB_ROOT'}), '(location=DB_ROOT)\n', (28590, 28608), False, 'from django.core.files.storage import FileSystemStorage\n'), ((748, 771), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (765, 771), False, 'import logging\n'), ((783, 821), 'logging.FileHandler', 'logging.FileHandler', (['settings.LOG_FILE'], {}), '(settings.LOG_FILE)\n', (802, 821), False, 'import logging\n'), ((842, 1011), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s]%(levelname)-8s%(name)s,%(pathname)s,line%(lineno)d,process%(process)d,thread%(thread)d,"%(message)s\\""""', '"""%Y-%m-%d %a %H:%M:%S"""'], {}), '(\n \'[%(asctime)s]%(levelname)-8s%(name)s,%(pathname)s,line%(lineno)d,process%(process)d,thread%(thread)d,"%(message)s"\'\n , \'%Y-%m-%d %a %H:%M:%S\')\n', (859, 1011), False, 'import logging\n'), ((4833, 4864), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (4849, 4864), False, 'from django.db import models\n'), ((4877, 4921), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(200)'}), '(blank=True, max_length=200)\n', (4893, 4921), False, 'from django.db import models\n'), ((4933, 4960), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Tag'], {}), '(Tag)\n', (4955, 4960), False, 'from django.db import models\n'), ((4975, 5009), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4994, 5009), False, 'from django.db import models\n'), ((5024, 5058), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5043, 5058), False, 'from django.db import models\n'), ((5150, 5184), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5169, 5184), False, 'from django.db import models\n'), ((7769, 7803), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (7788, 7803), False, 'from django.db import models\n'), ((29570, 29593), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Note'], {}), '(Note)\n', (29587, 29593), False, 'from django.db import models\n'), ((29650, 29683), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(2000)'}), '(max_length=2000)\n', (29666, 29683), False, 'from django.db import models\n'), ((29700, 29755), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date created"""'], {'auto_now_add': '(True)'}), "('date created', auto_now_add=True)\n", (29720, 29755), False, 'from django.db import models\n'), ((30621, 30698), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Note'], {'related_name': '"""in_frames"""', 'through': '"""Frame_Notes"""'}), "(Note, related_name='in_frames', through='Frame_Notes')\n", (30643, 30698), False, 'from django.db import models\n'), ((39662, 39717), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Frame'], {'related_name': '"""note_and_frame"""'}), "(Frame, related_name='note_and_frame')\n", (39679, 39717), False, 'from django.db import models\n'), ((39736, 39759), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Note'], {}), '(Note)\n', (39753, 39759), False, 'from django.db import models\n'), ((40356, 40428), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'LINKAGE_TYPE_CHOICES', 'blank': '(True)'}), '(max_length=1, choices=LINKAGE_TYPE_CHOICES, blank=True)\n', (40372, 40428), False, 'from django.db import models\n'), ((40440, 40485), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(2000)'}), '(blank=True, max_length=2000)\n', (40456, 40485), False, 'from django.db import models\n'), ((40517, 40562), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(2000)'}), '(blank=True, max_length=2000)\n', (40533, 40562), False, 'from django.db import models\n'), ((40574, 40613), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Tag'], {'blank': '(True)'}), '(Tag, blank=True)\n', (40596, 40613), False, 'from django.db import models\n'), ((40645, 40679), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (40664, 40679), False, 'from django.db import models\n'), ((40696, 40751), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date created"""'], {'auto_now_add': '(True)'}), "('date created', auto_now_add=True)\n", (40716, 40751), False, 'from django.db import models\n'), ((40773, 40830), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date last modified"""'], {'auto_now': '(True)'}), "('date last modified', auto_now=True)\n", (40793, 40830), False, 'from django.db import models\n'), ((40845, 40879), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (40864, 40879), False, 'from django.db import models\n'), ((40892, 40934), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'blank': '(True)'}), '(default=0, blank=True)\n', (40911, 40934), False, 'from django.db import models\n'), ((40969, 41036), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'get_storage_loc', 'blank': '(True)', 'storage': 'fs'}), '(upload_to=get_storage_loc, blank=True, storage=fs)\n', (40985, 41036), False, 'from django.db import models\n'), ((41257, 41285), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Note'], {}), '(Note)\n', (41279, 41285), False, 'from django.db import models\n'), ((44871, 44915), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(50)'}), '(blank=False, max_length=50)\n', (44887, 44915), False, 'from django.db import models\n'), ((44976, 45021), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(200)'}), '(blank=False, max_length=200)\n', (44992, 45021), False, 'from django.db import models\n'), ((45032, 45076), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(500)'}), '(blank=True, max_length=500)\n', (45048, 45076), False, 'from django.db import models\n'), ((45090, 45124), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (45109, 45124), False, 'from django.db import models\n'), ((45141, 45196), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date created"""'], {'auto_now_add': '(True)'}), "('date created', auto_now_add=True)\n", (45161, 45196), False, 'from django.db import models\n'), ((45211, 45245), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (45230, 45245), False, 'from django.db import models\n'), ((46312, 46357), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(800)'}), '(blank=False, max_length=800)\n', (46328, 46357), False, 'from django.db import models\n'), ((46607, 46641), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (46623, 46641), False, 'from django.db import models\n'), ((46748, 46771), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Note'], {}), '(Note)\n', (46765, 46771), False, 'from django.db import models\n'), ((47558, 47581), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (47575, 47581), False, 'from django.db import models\n'), ((47593, 47637), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(20)'}), '(blank=False, max_length=20)\n', (47609, 47637), False, 'from django.db import models\n'), ((47660, 47704), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(80)'}), '(blank=False, max_length=80)\n', (47676, 47704), False, 'from django.db import models\n'), ((47731, 47775), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(80)'}), '(blank=False, max_length=80)\n', (47747, 47775), False, 'from django.db import models\n'), ((48095, 48130), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'username'}), '(username=username)\n', (48111, 48130), False, 'from django.contrib.auth.models import User\n'), ((48283, 48318), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'username'}), '(username=username)\n', (48299, 48318), False, 'from django.contrib.auth.models import User\n'), ((22299, 22341), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'self.owner_name'}), '(username=self.owner_name)\n', (22315, 22341), False, 'from django.contrib.auth.models import User\n'), ((49558, 49591), 'django.contrib.admin.site.register', 'admin.site.register', (['model', 'Admin'], {}), '(model, Admin)\n', (49577, 49591), False, 'from django.contrib import admin\n'), ((6625, 6647), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Title"""'], {}), "('Title')\n", (6638, 6647), False, 'from django.utils.translation import ugettext_lazy\n'), ((6687, 6745), 'django.utils.translation.ugettext', '_', (['"""The size of the title is limited to 2000 characaters."""'], {}), "('The size of the title is limited to 2000 characaters.')\n", (6688, 6745), True, 'from django.utils.translation import ugettext as _\n'), ((6977, 7005), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Description"""'], {}), "('Description')\n", (6990, 7005), False, 'from django.utils.translation import ugettext_lazy\n'), ((7047, 7104), 'django.utils.translation.ugettext', '_', (['"""The size of the desc is limited to 2000 characaters."""'], {}), "('The size of the desc is limited to 2000 characaters.')\n", (7048, 7104), True, 'from django.utils.translation import ugettext as _\n'), ((7158, 7179), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Tags"""'], {}), "('Tags')\n", (7171, 7179), False, 'from django.utils.translation import ugettext_lazy\n'), ((7204, 7241), 'django.utils.translation.ugettext', '_', (['"""Default tag is \'random thought\'."""'], {}), '("Default tag is \'random thought\'.")\n', (7205, 7241), True, 'from django.utils.translation import ugettext as _\n'), ((7347, 7371), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Private"""'], {}), "('Private')\n", (7360, 7371), False, 'from django.utils.translation import ugettext_lazy\n'), ((7583, 7612), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""date created"""'], {}), "('date created')\n", (7596, 7612), False, 'from django.utils.translation import ugettext_lazy\n'), ((7703, 7738), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""date last modified"""'], {}), "('date last modified')\n", (7716, 7738), False, 'from django.utils.translation import ugettext_lazy\n'), ((7920, 7949), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""self ranking"""'], {}), "('self ranking')\n", (7933, 7949), False, 'from django.utils.translation import ugettext_lazy\n'), ((20425, 20504), 'notebook.social.models.Social_Note.objects.get', 'Social_Note.objects.get', ([], {'owner__username': 'self.owner_name', 'owner_note_id': 'self.id'}), '(owner__username=self.owner_name, owner_note_id=self.id)\n', (20448, 20504), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((30372, 30399), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Attachment"""'], {}), "('Attachment')\n", (30385, 30399), False, 'from django.utils.translation import ugettext_lazy\n'), ((43426, 43447), 'django.db.models.Q', 'Q', ([], {'tags__private': '(True)'}), '(tags__private=True)\n', (43427, 43447), False, 'from django.db.models import Q\n'), ((46938, 46963), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Language"""'], {}), "('Language')\n", (46951, 46963), False, 'from django.utils.translation import ugettext_lazy\n'), ((47097, 47131), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Original language"""'], {}), "('Original language')\n", (47110, 47131), False, 'from django.utils.translation import ugettext_lazy\n'), ((47176, 47198), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Title"""'], {}), "('Title')\n", (47189, 47198), False, 'from django.utils.translation import ugettext_lazy\n'), ((47238, 47296), 'django.utils.translation.ugettext', '_', (['"""The size of the title is limited to 2000 characaters."""'], {}), "('The size of the title is limited to 2000 characaters.')\n", (47239, 47296), True, 'from django.utils.translation import ugettext as _\n'), ((47340, 47368), 'django.utils.translation.ugettext_lazy', 'ugettext_lazy', (['"""Description"""'], {}), "('Description')\n", (47353, 47368), False, 'from django.utils.translation import ugettext_lazy\n'), ((47410, 47467), 'django.utils.translation.ugettext', '_', (['"""The size of the desc is limited to 2000 characaters."""'], {}), "('The size of the desc is limited to 2000 characaters.')\n", (47411, 47467), True, 'from django.utils.translation import ugettext as _\n'), ((13753, 13793), 'notebook.tags.models.Tag_Frame.objects.using', 'Tag_Frame.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (13776, 13793), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((23360, 23439), 'notebook.social.models.Social_Snippet.objects.get_or_create', 'Social_Snippet.objects.get_or_create', ([], {'owner': 'owner.member', 'owner_note_id': 'self.id'}), '(owner=owner.member, owner_note_id=self.id)\n', (23396, 23439), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((23513, 23598), 'notebook.social.models.Social_Bookmark.objects.get_or_create', 'Social_Bookmark.objects.get_or_create', ([], {'owner': 'owner.member', 'owner_note_id': 'self.id'}), '(owner=owner.member, owner_note_id=self.id\n )\n', (23550, 23598), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((23664, 23741), 'notebook.social.models.Social_Scrap.objects.get_or_create', 'Social_Scrap.objects.get_or_create', ([], {'owner': 'owner.member', 'owner_note_id': 'self.id'}), '(owner=owner.member, owner_note_id=self.id)\n', (23698, 23741), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((23813, 23890), 'notebook.social.models.Social_Frame.objects.get_or_create', 'Social_Frame.objects.get_or_create', ([], {'owner': 'owner.member', 'owner_note_id': 'self.id'}), '(owner=owner.member, owner_note_id=self.id)\n', (23847, 23890), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((24575, 24641), 'notebook.social.models.Social_Note.objects.get', 'Social_Note.objects.get', ([], {'owner': 'owner.member', 'owner_note_id': 'self.id'}), '(owner=owner.member, owner_note_id=self.id)\n', (24598, 24641), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((5864, 5898), 'notebook.tags.models.Tag.objects.using', 'Tag.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (5881, 5898), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((14510, 14550), 'notebook.tags.models.Tag_Frame.objects.using', 'Tag_Frame.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (14533, 14550), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((18049, 18083), 'notebook.tags.models.Tag.objects.using', 'Tag.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (18066, 18083), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((19623, 19657), 'notebook.tags.models.Tag.objects.using', 'Tag.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (19640, 19657), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((25164, 25209), 'notebook.social.models.Social_Tag.objects.get_or_create', 'Social_Tag.objects.get_or_create', ([], {'name': 't.name'}), '(name=t.name)\n', (25196, 25209), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((34413, 34447), 'notebook.tags.models.Tag.objects.using', 'Tag.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (34430, 34447), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((43965, 43999), 'notebook.tags.models.Tag.objects.using', 'Tag.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (43982, 43999), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((16919, 16953), 'notebook.tags.models.Tag.objects.using', 'Tag.objects.using', (['self.owner_name'], {}), '(self.owner_name)\n', (16936, 16953), False, 'from notebook.tags.models import Tag, Tag_Frame\n'), ((26736, 26808), 'notebook.social.models.Social_Note.objects.get', 'Social_Note.objects.get', ([], {'owner': 'owner.member', 'owner_note_id': 'n_included.id'}), '(owner=owner.member, owner_note_id=n_included.id)\n', (26759, 26808), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((26930, 27017), 'notebook.social.models.Social_Frame_Notes.objects.get_or_create', 'Social_Frame_Notes.objects.get_or_create', ([], {'social_frame': 'sn', 'social_note': 'sn_included'}), '(social_frame=sn, social_note=\n sn_included)\n', (26970, 27017), False, 'from notebook.social.models import Social_Note, Social_Tag, Social_Snippet, Social_Bookmark, Social_Scrap, Social_Frame, Social_Frame_Notes\n'), ((50852, 50887), 'notebook.tags.models.Tag.objects.using', 'Tag.objects.using', (['model.owner_name'], {}), '(model.owner_name)\n', (50869, 50887), False, 'from notebook.tags.models import Tag, Tag_Frame\n')] |
import numpy as np
import matplotlib.pyplot as pl
import subprocess
import pdb
import struct
def readSpec(f):
f = open(f)
res = f.read()
n = struct.unpack('i',res[0:4])
freq = []
stokes = []
left = 4
for i in range(n[0]):
right = left + 4
n = struct.unpack('i',res[left:right])
left = right
right = left + 8*n[0]
t1 = np.asarray(struct.unpack('d'*n[0],res[left:right]))
freq.append(t1)
left = right
right = left + 4*8*n[0]
t2 = np.asarray(struct.unpack('d'*4*n[0],res[left:right])).reshape((n[0],4))
stokes.append(t2)
left = right
freq = np.concatenate(freq)
stokes = np.concatenate(stokes)
return freq, stokes
freq, stokes = readSpec('test.spec')
f, ax = pl.subplots()
ind = np.argsort(freq)
freq = freq[ind]
stokes = stokes[ind,:]
ax.plot(2.99792458e18/freq, stokes[:,0],'r')
pl.tight_layout() | [
"numpy.argsort",
"struct.unpack",
"matplotlib.pyplot.tight_layout",
"numpy.concatenate",
"matplotlib.pyplot.subplots"
] | [((721, 734), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {}), '()\n', (732, 734), True, 'import matplotlib.pyplot as pl\n'), ((742, 758), 'numpy.argsort', 'np.argsort', (['freq'], {}), '(freq)\n', (752, 758), True, 'import numpy as np\n'), ((852, 869), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (867, 869), True, 'import matplotlib.pyplot as pl\n'), ((147, 175), 'struct.unpack', 'struct.unpack', (['"""i"""', 'res[0:4]'], {}), "('i', res[0:4])\n", (160, 175), False, 'import struct\n'), ((598, 618), 'numpy.concatenate', 'np.concatenate', (['freq'], {}), '(freq)\n', (612, 618), True, 'import numpy as np\n'), ((629, 651), 'numpy.concatenate', 'np.concatenate', (['stokes'], {}), '(stokes)\n', (643, 651), True, 'import numpy as np\n'), ((269, 304), 'struct.unpack', 'struct.unpack', (['"""i"""', 'res[left:right]'], {}), "('i', res[left:right])\n", (282, 304), False, 'import struct\n'), ((364, 406), 'struct.unpack', 'struct.unpack', (["('d' * n[0])", 'res[left:right]'], {}), "('d' * n[0], res[left:right])\n", (377, 406), False, 'import struct\n'), ((488, 534), 'struct.unpack', 'struct.unpack', (["('d' * 4 * n[0])", 'res[left:right]'], {}), "('d' * 4 * n[0], res[left:right])\n", (501, 534), False, 'import struct\n')] |
import uuid
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.gis.db import models
from django.utils.translation import gettext_lazy as _
from traffic_control.mixins.models import SourceControlModel
class GroupOperationalArea(models.Model):
"""
Model to link OperationalAreas to django.contrib.auth.Group model
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
group = models.OneToOneField(
Group,
unique=True,
related_name="operational_area",
verbose_name=_("Group"),
on_delete=models.CASCADE,
)
areas = models.ManyToManyField(
"OperationalArea",
related_name="groups",
verbose_name=_("Operational areas"),
blank=True,
)
class Meta:
verbose_name = _("Group operational area")
verbose_name_plural = _("Group operational areas")
def __str__(self):
return f"GroupOperationalArea {self.group.name}"
class OperationalArea(SourceControlModel):
"""
Model containing operational area polygon used to check location based
permissions
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(_("Name"), max_length=256, blank=False)
name_short = models.CharField(_("Name short"), max_length=256, blank=True)
area_type = models.CharField(_("Area type"), max_length=256, blank=True)
contractor = models.CharField(_("Contractor"), max_length=256, blank=True)
start_date = models.DateField(_("Start date"), null=True, blank=True)
end_date = models.DateField(_("End date"), null=True, blank=True)
updated_date = models.DateField(_("Updated date"), null=True, blank=True)
task = models.CharField(_("Task"), max_length=256, blank=True)
status = models.CharField(_("Status"), max_length=256, blank=True)
location = models.MultiPolygonField(_("Location (3D)"), dim=3, srid=settings.SRID)
class Meta:
verbose_name = _("Operational area")
verbose_name_plural = _("Operational areas")
unique_together = ["source_name", "source_id"]
def __str__(self):
return f"OperationalArea {self.name}"
| [
"django.utils.translation.gettext_lazy",
"django.contrib.gis.db.models.UUIDField"
] | [((389, 459), 'django.contrib.gis.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (405, 459), False, 'from django.contrib.gis.db import models\n'), ((1179, 1249), 'django.contrib.gis.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (1195, 1249), False, 'from django.contrib.gis.db import models\n'), ((849, 876), 'django.utils.translation.gettext_lazy', '_', (['"""Group operational area"""'], {}), "('Group operational area')\n", (850, 876), True, 'from django.utils.translation import gettext_lazy as _\n'), ((907, 935), 'django.utils.translation.gettext_lazy', '_', (['"""Group operational areas"""'], {}), "('Group operational areas')\n", (908, 935), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1278, 1287), 'django.utils.translation.gettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (1279, 1287), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1352, 1367), 'django.utils.translation.gettext_lazy', '_', (['"""Name short"""'], {}), "('Name short')\n", (1353, 1367), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1430, 1444), 'django.utils.translation.gettext_lazy', '_', (['"""Area type"""'], {}), "('Area type')\n", (1431, 1444), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1508, 1523), 'django.utils.translation.gettext_lazy', '_', (['"""Contractor"""'], {}), "('Contractor')\n", (1509, 1523), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1587, 1602), 'django.utils.translation.gettext_lazy', '_', (['"""Start date"""'], {}), "('Start date')\n", (1588, 1602), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1659, 1672), 'django.utils.translation.gettext_lazy', '_', (['"""End date"""'], {}), "('End date')\n", (1660, 1672), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1733, 1750), 'django.utils.translation.gettext_lazy', '_', (['"""Updated date"""'], {}), "('Updated date')\n", (1734, 1750), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1803, 1812), 'django.utils.translation.gettext_lazy', '_', (['"""Task"""'], {}), "('Task')\n", (1804, 1812), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1872, 1883), 'django.utils.translation.gettext_lazy', '_', (['"""Status"""'], {}), "('Status')\n", (1873, 1883), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1953, 1971), 'django.utils.translation.gettext_lazy', '_', (['"""Location (3D)"""'], {}), "('Location (3D)')\n", (1954, 1971), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2040, 2061), 'django.utils.translation.gettext_lazy', '_', (['"""Operational area"""'], {}), "('Operational area')\n", (2041, 2061), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2092, 2114), 'django.utils.translation.gettext_lazy', '_', (['"""Operational areas"""'], {}), "('Operational areas')\n", (2093, 2114), True, 'from django.utils.translation import gettext_lazy as _\n'), ((592, 602), 'django.utils.translation.gettext_lazy', '_', (['"""Group"""'], {}), "('Group')\n", (593, 602), True, 'from django.utils.translation import gettext_lazy as _\n'), ((759, 781), 'django.utils.translation.gettext_lazy', '_', (['"""Operational areas"""'], {}), "('Operational areas')\n", (760, 781), True, 'from django.utils.translation import gettext_lazy as _\n')] |
# Generated by Django 2.0.2 on 2018-03-04 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notenrechner', '0004_auto_20180304_1450'),
]
operations = [
migrations.AddField(
model_name='klausur',
name='anzahl_aufgaben',
field=models.PositiveSmallIntegerField(default=3),
),
migrations.AlterUniqueTogether(
name='klausur',
unique_together={('fach', 'klasse', 'nummer')},
),
]
| [
"django.db.migrations.AlterUniqueTogether",
"django.db.models.PositiveSmallIntegerField"
] | [((413, 511), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""klausur"""', 'unique_together': "{('fach', 'klasse', 'nummer')}"}), "(name='klausur', unique_together={('fach',\n 'klasse', 'nummer')})\n", (443, 511), False, 'from django.db import migrations, models\n'), ((349, 392), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(3)'}), '(default=3)\n', (381, 392), False, 'from django.db import migrations, models\n')] |
import os
import sys
import bpy
import math
# To be able to find this project's modules, the path needs to be added to
# sys.path.
basepath = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
sys.path.append(basepath)
from util.config import read_config
from blender.model import load_new_model
from blender.scenes import (
set_resolution,
set_depth_pixel_depth,
link_new_scene,
clear_scenes,
delete_objects,
ground_visibility,
correct_object_names,
)
from blender.cameras import position_cameras, dump_k_matrix, setup_displacement_values
from blender.lighting import set_lighting
from grounds.meshes import cobblestone_ground, slate_ground, asphalt_ground
from grounds.meshes import always_defects
from util.output_suppressor import OutputSuppressor
VERBOSITY = 0
def main():
'''
Load the given config file, initialize the Blender scene and set up and
execute rendering.
Create the desired ground type, position the cameras and set up the
lighting. Configure Blender to render both camera angles and render all
images for one image set.
By default this function loads the `config.json` file that was written out
by the CLI, but one can also be specified as an argument when executing
this skript directly.
To directly run this script, execute (note the ``--`` before the config
file)::
$ blender --python main.py [ -- config file]
'''
global VERBOSITY
basepath = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(sys.argv[3])))
try:
config = read_config(basepath, file=sys.argv.pop())
except ValueError:
config = read_config(basepath)
VERBOSITY = config['verbose']
vv('start, config loaded:', config)
output_path = os.path.join(basepath, config['output'])
load_new_model(os.path.join(basepath, 'model', 'base_model.blend'))
v('model loaded')
cameras = [
bpy.data.objects['camera_left'],
bpy.data.objects['camera_right'],
]
nodes = bpy.data.scenes['main_scene'].node_tree.nodes
v('generating ground meshes...')
texture_output = os.path.join(config['output'], 'road_textures')
if config['ground_type'] == 'cobblestone':
cobblestone_ground(source=texture_output, size=(7,7), defects=config['defects'])
elif config['ground_type'] == 'asphalt':
asphalt_ground(source=texture_output, size=(7,7), defects=config['defects'])
elif config['ground_type'] == 'slate':
slate_ground(source=texture_output, size=(7,7), defects=config['defects'])
else:
raise ValueError('Unknown ground type {}'.format(config['ground_type']))
v('ground generated')
position_cameras(
cameras,
config['camera_distance'] / 100.0, # cm -> m
config['camera_pitch'],
config['camera_inward_yaw'],
height=config['camera_height'] / 100.0 # cm -> m
)
set_lighting()
set_resolution(config['resolution'])
os.makedirs(output_path, exist_ok=True)
dump_k_matrix(cameras[0], os.path.join(output_path, 'k_matrix.csv'))
v('K matrix written')
# create a new scene and link it to a camera and render layer
link_new_scene(
scene_name='right',
camera_name='camera_right',
node_name='right'
)
v('left scene set up')
main_scene = bpy.data.scenes['main_scene']
right_scene = bpy.data.scenes['right']
if config['defects'] or always_defects:
link_new_scene(
scene_name='defects',
camera_name='camera_left',
node_name='defects'
)
defects_scene = bpy.data.scenes['defects']
v('defects scene set up')
correct_object_names()
if config['defects'] or always_defects:
ground_visibility(main_scene, ground_visible=True, defects_visible=False)
ground_visibility(right_scene, ground_visible=True, defects_visible=False)
ground_visibility(defects_scene, ground_visible=False, defects_visible=True)
set_depth_pixel_depth(nodes, config['depth_range'])
# set filenames for left, depth, right & disparity pictures
nodes['File Output'].file_slots[0].path = os.path.join(output_path, '{}-{:0>5}-#-left.png'.format(config['ground_type'], config['image_index']))
nodes['File Output'].file_slots[1].path = os.path.join(output_path, '{}-{:0>5}-#-right.png'.format(config['ground_type'], config['image_index']))
nodes['File Output'].file_slots[2].path = os.path.join(output_path, '{}-{:0>5}-#-depth.png'.format(config['ground_type'], config['image_index']))
nodes['File Output'].file_slots[3].path = os.path.join(output_path, '{}-{:0>5}-#-displacement.png'.format(config['ground_type'], config['image_index']))
nodes['File Output'].file_slots[4].path = os.path.join(output_path, '{}-{:0>5}-#-defects.png'.format(config['ground_type'], config['image_index']))
factor = setup_displacement_values(nodes, cameras, 0.04)
# write out images
if VERBOSITY >= 2:
bpy.ops.render.render(write_still=True, scene='main_scene')
else:
v('rendering...', end='')
with OutputSuppressor(): # suppress render progress output
bpy.ops.render.render(write_still=True, scene='main_scene')
v(' done')
# if i < config['number']-1:
# delete_objects()
# clear_scenes()
# bpy.ops.wm.quit_blender()
def v(*msgs, end='\n'):
'''Print messages for verbosity level 1.'''
if VERBOSITY and VERBOSITY>= 1:
print(*msgs, end=end, flush=True)
def vv(*msgs, end='\n'):
'''Print messages nodes, for verbosity level 2.'''
if VERBOSITY and VERBOSITY >= 2:
print(*msgs, end=end, flush=True)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| [
"blender.scenes.set_depth_pixel_depth",
"blender.lighting.set_lighting",
"blender.scenes.ground_visibility",
"sys.path.append",
"util.output_suppressor.OutputSuppressor",
"grounds.meshes.slate_ground",
"grounds.meshes.cobblestone_ground",
"blender.cameras.setup_displacement_values",
"grounds.meshes.asphalt_ground",
"blender.cameras.position_cameras",
"blender.scenes.link_new_scene",
"blender.scenes.set_resolution",
"os.path.dirname",
"util.config.read_config",
"sys.argv.pop",
"blender.scenes.correct_object_names",
"os.makedirs",
"os.path.join",
"os.getcwd",
"bpy.ops.render.render"
] | [((222, 247), 'sys.path.append', 'sys.path.append', (['basepath'], {}), '(basepath)\n', (237, 247), False, 'import sys\n'), ((1855, 1895), 'os.path.join', 'os.path.join', (['basepath', "config['output']"], {}), "(basepath, config['output'])\n", (1867, 1895), False, 'import os\n'), ((2238, 2285), 'os.path.join', 'os.path.join', (["config['output']", '"""road_textures"""'], {}), "(config['output'], 'road_textures')\n", (2250, 2285), False, 'import os\n'), ((2815, 2978), 'blender.cameras.position_cameras', 'position_cameras', (['cameras', "(config['camera_distance'] / 100.0)", "config['camera_pitch']", "config['camera_inward_yaw']"], {'height': "(config['camera_height'] / 100.0)"}), "(cameras, config['camera_distance'] / 100.0, config[\n 'camera_pitch'], config['camera_inward_yaw'], height=config[\n 'camera_height'] / 100.0)\n", (2831, 2978), False, 'from blender.cameras import position_cameras, dump_k_matrix, setup_displacement_values\n'), ((3053, 3067), 'blender.lighting.set_lighting', 'set_lighting', ([], {}), '()\n', (3065, 3067), False, 'from blender.lighting import set_lighting\n'), ((3079, 3115), 'blender.scenes.set_resolution', 'set_resolution', (["config['resolution']"], {}), "(config['resolution'])\n", (3093, 3115), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((3127, 3166), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (3138, 3166), False, 'import os\n'), ((3346, 3432), 'blender.scenes.link_new_scene', 'link_new_scene', ([], {'scene_name': '"""right"""', 'camera_name': '"""camera_right"""', 'node_name': '"""right"""'}), "(scene_name='right', camera_name='camera_right', node_name=\n 'right')\n", (3360, 3432), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((3890, 3912), 'blender.scenes.correct_object_names', 'correct_object_names', ([], {}), '()\n', (3910, 3912), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((4228, 4279), 'blender.scenes.set_depth_pixel_depth', 'set_depth_pixel_depth', (['nodes', "config['depth_range']"], {}), "(nodes, config['depth_range'])\n", (4249, 4279), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((5128, 5175), 'blender.cameras.setup_displacement_values', 'setup_displacement_values', (['nodes', 'cameras', '(0.04)'], {}), '(nodes, cameras, 0.04)\n', (5153, 5175), False, 'from blender.cameras import position_cameras, dump_k_matrix, setup_displacement_values\n'), ((180, 191), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (189, 191), False, 'import os\n'), ((193, 218), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (208, 218), False, 'import os\n'), ((1922, 1973), 'os.path.join', 'os.path.join', (['basepath', '"""model"""', '"""base_model.blend"""'], {}), "(basepath, 'model', 'base_model.blend')\n", (1934, 1973), False, 'import os\n'), ((2343, 2429), 'grounds.meshes.cobblestone_ground', 'cobblestone_ground', ([], {'source': 'texture_output', 'size': '(7, 7)', 'defects': "config['defects']"}), "(source=texture_output, size=(7, 7), defects=config[\n 'defects'])\n", (2361, 2429), False, 'from grounds.meshes import cobblestone_ground, slate_ground, asphalt_ground\n'), ((3198, 3239), 'os.path.join', 'os.path.join', (['output_path', '"""k_matrix.csv"""'], {}), "(output_path, 'k_matrix.csv')\n", (3210, 3239), False, 'import os\n'), ((3657, 3746), 'blender.scenes.link_new_scene', 'link_new_scene', ([], {'scene_name': '"""defects"""', 'camera_name': '"""camera_left"""', 'node_name': '"""defects"""'}), "(scene_name='defects', camera_name='camera_left', node_name=\n 'defects')\n", (3671, 3746), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((3973, 4046), 'blender.scenes.ground_visibility', 'ground_visibility', (['main_scene'], {'ground_visible': '(True)', 'defects_visible': '(False)'}), '(main_scene, ground_visible=True, defects_visible=False)\n', (3990, 4046), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((4056, 4130), 'blender.scenes.ground_visibility', 'ground_visibility', (['right_scene'], {'ground_visible': '(True)', 'defects_visible': '(False)'}), '(right_scene, ground_visible=True, defects_visible=False)\n', (4073, 4130), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((4140, 4216), 'blender.scenes.ground_visibility', 'ground_visibility', (['defects_scene'], {'ground_visible': '(False)', 'defects_visible': '(True)'}), '(defects_scene, ground_visible=False, defects_visible=True)\n', (4157, 4216), False, 'from blender.scenes import set_resolution, set_depth_pixel_depth, link_new_scene, clear_scenes, delete_objects, ground_visibility, correct_object_names\n'), ((5239, 5298), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)', 'scene': '"""main_scene"""'}), "(write_still=True, scene='main_scene')\n", (5260, 5298), False, 'import bpy\n'), ((1581, 1592), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1590, 1592), False, 'import os\n'), ((1594, 1622), 'os.path.dirname', 'os.path.dirname', (['sys.argv[3]'], {}), '(sys.argv[3])\n', (1609, 1622), False, 'import os\n'), ((1738, 1759), 'util.config.read_config', 'read_config', (['basepath'], {}), '(basepath)\n', (1749, 1759), False, 'from util.config import read_config\n'), ((2479, 2556), 'grounds.meshes.asphalt_ground', 'asphalt_ground', ([], {'source': 'texture_output', 'size': '(7, 7)', 'defects': "config['defects']"}), "(source=texture_output, size=(7, 7), defects=config['defects'])\n", (2493, 2556), False, 'from grounds.meshes import cobblestone_ground, slate_ground, asphalt_ground\n'), ((5359, 5377), 'util.output_suppressor.OutputSuppressor', 'OutputSuppressor', ([], {}), '()\n', (5375, 5377), False, 'from util.output_suppressor import OutputSuppressor\n'), ((5427, 5486), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)', 'scene': '"""main_scene"""'}), "(write_still=True, scene='main_scene')\n", (5448, 5486), False, 'import bpy\n'), ((1680, 1694), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (1692, 1694), False, 'import sys\n'), ((2609, 2684), 'grounds.meshes.slate_ground', 'slate_ground', ([], {'source': 'texture_output', 'size': '(7, 7)', 'defects': "config['defects']"}), "(source=texture_output, size=(7, 7), defects=config['defects'])\n", (2621, 2684), False, 'from grounds.meshes import cobblestone_ground, slate_ground, asphalt_ground\n')] |
import os
import requests
import re
import time
import warnings
from dragnet import extract_content
from gensim.summarization import summarize, keywords
from slackclient import SlackClient
from urllib.parse import urlparse
warnings.filterwarnings('ignore', category=UserWarning)
COMMAND_REGEX = re.compile('^!tldr <(https?://[^\s]+)>')
RESPONSE_TEMPLATE = u'%s %s %i%% reduced:\n> %s\n*Keywords* %s'
ERROR_RESPONSE = '¯\_(ツ)_/¯'
RTM_READ_DELAY = 1
WORD_COUNT = int(os.environ.get('WORD_COUNT') or 200)
KEYWORD_COUNT = int(os.environ.get('KEYWORD_COUNT') or 5)
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
if not SLACK_BOT_TOKEN:
print('Error: SLACK_BOT_TOKEN env var must be set')
exit(1)
slack_client = SlackClient(SLACK_BOT_TOKEN)
bot_id = None
def tldrafy(user, url):
domain = urlparse(url).netloc
content = extract_content(requests.get(url).content)
summary = summarize(content, word_count=WORD_COUNT)
keyword_list = keywords(content, words=KEYWORD_COUNT, lemmatize=True, split=True)
percent_reduction = round((1 - (float(len(summary)) / len(content))) * 100)
return '<@%s>' % user, \
'<%s|%s>' % (url, domain), \
percent_reduction, \
summary.replace('\n', ' '), \
', '.join(keyword_list)
def parse_events(slack_events):
for event in slack_events:
if event['type'] == 'message' and 'subtype' not in event:
match = COMMAND_REGEX.match(event['text'])
if match:
try:
response = RESPONSE_TEMPLATE % tldrafy(event['user'], match.group(1))
except Exception as err:
print('Error: %s' % err)
response = ERROR_RESPONSE
slack_client.api_call('chat.postMessage',
channel=event['channel'],
text=response)
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
print('@tldrbot connected')
bot_id = slack_client.api_call('auth.test')['user_id']
while True:
parse_events(slack_client.rtm_read())
time.sleep(RTM_READ_DELAY)
else:
print('Error: Could not connect')
exit(1)
| [
"urllib.parse.urlparse",
"re.compile",
"os.environ.get",
"requests.get",
"time.sleep",
"slackclient.SlackClient",
"gensim.summarization.summarize",
"gensim.summarization.keywords",
"warnings.filterwarnings"
] | [((224, 279), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (247, 279), False, 'import warnings\n'), ((297, 338), 're.compile', 're.compile', (['"""^!tldr <(https?://[^\\\\s]+)>"""'], {}), "('^!tldr <(https?://[^\\\\s]+)>')\n", (307, 338), False, 'import re\n'), ((580, 613), 'os.environ.get', 'os.environ.get', (['"""SLACK_BOT_TOKEN"""'], {}), "('SLACK_BOT_TOKEN')\n", (594, 613), False, 'import os\n'), ((722, 750), 'slackclient.SlackClient', 'SlackClient', (['SLACK_BOT_TOKEN'], {}), '(SLACK_BOT_TOKEN)\n', (733, 750), False, 'from slackclient import SlackClient\n'), ((896, 937), 'gensim.summarization.summarize', 'summarize', (['content'], {'word_count': 'WORD_COUNT'}), '(content, word_count=WORD_COUNT)\n', (905, 937), False, 'from gensim.summarization import summarize, keywords\n'), ((957, 1023), 'gensim.summarization.keywords', 'keywords', (['content'], {'words': 'KEYWORD_COUNT', 'lemmatize': '(True)', 'split': '(True)'}), '(content, words=KEYWORD_COUNT, lemmatize=True, split=True)\n', (965, 1023), False, 'from gensim.summarization import summarize, keywords\n'), ((467, 495), 'os.environ.get', 'os.environ.get', (['"""WORD_COUNT"""'], {}), "('WORD_COUNT')\n", (481, 495), False, 'import os\n'), ((524, 555), 'os.environ.get', 'os.environ.get', (['"""KEYWORD_COUNT"""'], {}), "('KEYWORD_COUNT')\n", (538, 555), False, 'import os\n'), ((804, 817), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (812, 817), False, 'from urllib.parse import urlparse\n'), ((855, 872), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (867, 872), False, 'import requests\n'), ((2173, 2199), 'time.sleep', 'time.sleep', (['RTM_READ_DELAY'], {}), '(RTM_READ_DELAY)\n', (2183, 2199), False, 'import time\n')] |
import logging
import curses
from core.config import theme
logger = logging.getLogger('colors')
colors = {}
def init():
color_definitions = theme['colors']
for i, color_name in enumerate(color_definitions.keys()):
foreground, background = color_definitions[color_name]
curses.init_pair(i + 1, foreground, background)
colors[color_name] = curses.color_pair(i + 1)
| [
"logging.getLogger",
"curses.init_pair",
"curses.color_pair"
] | [((70, 97), 'logging.getLogger', 'logging.getLogger', (['"""colors"""'], {}), "('colors')\n", (87, 97), False, 'import logging\n'), ((298, 345), 'curses.init_pair', 'curses.init_pair', (['(i + 1)', 'foreground', 'background'], {}), '(i + 1, foreground, background)\n', (314, 345), False, 'import curses\n'), ((375, 399), 'curses.color_pair', 'curses.color_pair', (['(i + 1)'], {}), '(i + 1)\n', (392, 399), False, 'import curses\n')] |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from unittest.mock import patch
from django.core.cache import cache
from apps.node_man.periodic_tasks.sync_cmdb_biz_topo_task import (
get_and_cache_format_biz_topo,
)
from apps.utils.unittest.testcase import CustomBaseTestCase
from .mock_data import MOCK_BK_BIZ_ID
from .utils import MockClient
class TestSyncCMDBBizTopo(CustomBaseTestCase):
def get_topo_path(self, topo__id_path_map, now_topo):
topo__id_path_map[now_topo["biz_inst_id"]] = now_topo["path"]
for children_topo in now_topo["children"]:
self.get_topo_path(topo__id_path_map, children_topo)
@patch("apps.node_man.periodic_tasks.sync_cmdb_biz_topo_task.client_v2", MockClient)
@patch("apps.node_man.handlers.cmdb.client_v2", MockClient)
def test_get_and_cache_format_biz_topo(self):
get_and_cache_format_biz_topo(MOCK_BK_BIZ_ID)
topo_cache = cache.get(f"{MOCK_BK_BIZ_ID}_topo_cache")
topo_nodes = cache.get(f"{MOCK_BK_BIZ_ID}_topo_nodes")
# 这里做一个{biz_inst_id: path}的路径验证,判断topo_cache与topo_nodes生成一致
topo_nodes__id_path_map = {item["biz_inst_id"]: item["path"] for item in topo_nodes}
topo_cache__id_path_map = {}
self.get_topo_path(topo_cache__id_path_map, topo_cache)
self.assertEqual(topo_cache__id_path_map, topo_nodes__id_path_map)
| [
"unittest.mock.patch",
"apps.node_man.periodic_tasks.sync_cmdb_biz_topo_task.get_and_cache_format_biz_topo",
"django.core.cache.cache.get"
] | [((1329, 1416), 'unittest.mock.patch', 'patch', (['"""apps.node_man.periodic_tasks.sync_cmdb_biz_topo_task.client_v2"""', 'MockClient'], {}), "('apps.node_man.periodic_tasks.sync_cmdb_biz_topo_task.client_v2',\n MockClient)\n", (1334, 1416), False, 'from unittest.mock import patch\n'), ((1418, 1476), 'unittest.mock.patch', 'patch', (['"""apps.node_man.handlers.cmdb.client_v2"""', 'MockClient'], {}), "('apps.node_man.handlers.cmdb.client_v2', MockClient)\n", (1423, 1476), False, 'from unittest.mock import patch\n'), ((1535, 1580), 'apps.node_man.periodic_tasks.sync_cmdb_biz_topo_task.get_and_cache_format_biz_topo', 'get_and_cache_format_biz_topo', (['MOCK_BK_BIZ_ID'], {}), '(MOCK_BK_BIZ_ID)\n', (1564, 1580), False, 'from apps.node_man.periodic_tasks.sync_cmdb_biz_topo_task import get_and_cache_format_biz_topo\n'), ((1602, 1643), 'django.core.cache.cache.get', 'cache.get', (['f"""{MOCK_BK_BIZ_ID}_topo_cache"""'], {}), "(f'{MOCK_BK_BIZ_ID}_topo_cache')\n", (1611, 1643), False, 'from django.core.cache import cache\n'), ((1665, 1706), 'django.core.cache.cache.get', 'cache.get', (['f"""{MOCK_BK_BIZ_ID}_topo_nodes"""'], {}), "(f'{MOCK_BK_BIZ_ID}_topo_nodes')\n", (1674, 1706), False, 'from django.core.cache import cache\n')] |
from pymongo import MongoClient
import json
import tushare as ts
import time
from apscheduler.schedulers.blocking import BlockingScheduler
__author__ = '<NAME>'
# 连接mongodb数据库
client = MongoClient("mongodb://127.0.0.1/27017")
# 指定数据库名称
db = client.stock
scheduler = BlockingScheduler()
# 周一到周五早8-晚6,每隔十分钟调用次接口
@scheduler.scheduled_job('cron', day_of_week='mon-fri', hour='8-20',minute='0,10,20,30,40,50')
def store_news():
# 获取新浪股吧的最新新闻
sina_news = ts.guba_sina(show_content=True)
# 防止数据未更新插入重复数据 2018/04/22 0:33
# TODO 集合为空时的容错处理
for i in db.news.sina.find().sort([("_id", -1)]).limit(1):
for j in sina_news[:6][-1:].get("title"):
if i.get("title") != j:
db.news.sina.insert(json.loads(sina_news[:6].to_json(orient='records')))
# 获取前6条最新的即时新闻
immediate_news = ts.get_latest_news(top=6, show_content=True)
for i in db.news.immediate.find().sort([("_id", -1)]).limit(1):
for j in immediate_news[-1:].get("title"):
if i.get("title") != j:
db.news.immediate.insert(json.loads(immediate_news.to_json(orient='records')))
# 获取个股信息地雷数据
mines_news = ts.get_notices()
if not mines_news is None:
db.news.mines.insert(json.loads(mines_news.to_json(orient='records')))
scheduler.start()
| [
"tushare.get_latest_news",
"tushare.get_notices",
"tushare.guba_sina",
"apscheduler.schedulers.blocking.BlockingScheduler",
"pymongo.MongoClient"
] | [((187, 227), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://127.0.0.1/27017"""'], {}), "('mongodb://127.0.0.1/27017')\n", (198, 227), False, 'from pymongo import MongoClient\n'), ((269, 288), 'apscheduler.schedulers.blocking.BlockingScheduler', 'BlockingScheduler', ([], {}), '()\n', (286, 288), False, 'from apscheduler.schedulers.blocking import BlockingScheduler\n'), ((462, 493), 'tushare.guba_sina', 'ts.guba_sina', ([], {'show_content': '(True)'}), '(show_content=True)\n', (474, 493), True, 'import tushare as ts\n'), ((832, 876), 'tushare.get_latest_news', 'ts.get_latest_news', ([], {'top': '(6)', 'show_content': '(True)'}), '(top=6, show_content=True)\n', (850, 876), True, 'import tushare as ts\n'), ((1163, 1179), 'tushare.get_notices', 'ts.get_notices', ([], {}), '()\n', (1177, 1179), True, 'import tushare as ts\n')] |
### configuration file for odf_process_all.py
#
# TODO: organize these by editable/fixed variables
from importlib import resources
import yaml
with resources.open_text("ctdcal", "user_settings.yaml") as f:
settings = yaml.safe_load(f)
# Unpack user settings (any sanitizing/checks needed? probably)
expocode = settings["expocode"]
section_id = settings["section_id"]
ctd_serial = settings["ctd_serial"]
ctd_outputs = settings["ctd_outputs"]
# CTD file (.ct1) variable outputs
# move elsewhere when xarray is implemented
# TODO: check against cchdo.params?
ctd_col_names, ctd_col_units = [], []
for (param, attrs) in ctd_outputs.items():
if param == "CTDPRS":
ctd_col_names += [param]
ctd_col_units += [attrs["units"]]
else:
ctd_col_names += [param, f"{param}_FLAG_W"]
ctd_col_units += [attrs["units"], ""]
# List of directories for I/O purposes
dirs = {
"ssscc": "data/ssscc/",
"raw": "data/raw/",
"converted": "data/converted/",
"time": "data/time/",
"pressure": "data/pressure/",
"bottle": "data/bottle/",
"salt": "data/salt/",
"reft": "data/reft/",
"oxygen": "data/oxygen/",
"logs": "data/logs/",
}
fig_dirs = {
"t1": "data/logs/fitting_figs/temp_primary/",
"t2": "data/logs/fitting_figs/temp_secondary/",
"c1": "data/logs/fitting_figs/cond_primary/",
"c2": "data/logs/fitting_figs/cond_secondary/",
"ox": "data/logs/fitting_figs/oxy_primary/",
"rinko": "data/logs/fitting_figs/oxy_rinko/",
}
# remnant of old system, will be pushed into xarray metadata/attrs
# Labels for CTD columns
column = {
"p": "CTDPRS",
"t1": "CTDTMP1",
"t2": "CTDTMP2",
"c1": "CTDCOND1",
"c2": "CTDCOND2",
"sal": "CTDSAL",
# "s1": "CTDSAL1", # TODO: calc salinity from primary and secondary sensors
# "s2": "CTDSAL2",
"rinko_oxy": "FREE1", # CHECK THIS!
"oxyvolts": "CTDOXYVOLTS",
"refT": "REFTMP",
"refC": "BTLCOND",
"refS": "SALNTY",
"refO": "OXYGEN",
"lat": "GPSLAT",
"lon": "GPSLON",
}
# List of columns to filter
filter_cols = []
for x in ["p", "t1", "t2", "c1", "c2", "sal", "rinko_oxy", "oxyvolts", "lat", "lon"]:
filter_cols.append(column[x])
| [
"importlib.resources.open_text",
"yaml.safe_load"
] | [((150, 201), 'importlib.resources.open_text', 'resources.open_text', (['"""ctdcal"""', '"""user_settings.yaml"""'], {}), "('ctdcal', 'user_settings.yaml')\n", (169, 201), False, 'from importlib import resources\n'), ((223, 240), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (237, 240), False, 'import yaml\n')] |
#! /bin/env python
# -*- coding: utf-8 -*-
"""
test_language_tag -
"""
import logging
from unittest import TestCase
from lgr.tools.idn_review.language_tag import generate_language_tag_report
from tests.unit.utils import load_lgr
logger = logging.getLogger('test_variant_sets')
class Test(TestCase):
def setUp(self) -> None:
super().setUp()
self.maxDiff = None
self.ref = load_lgr('idn_table_review', 'reference_lgr.xml')
def test_language_tag(self):
idn = load_lgr('idn_table_review/language_tag', 'language_tag.xml')
result = generate_language_tag_report(idn, self.ref)
self.assertCountEqual(result, [{
'idn_table_language_tag': 'arab',
'comparison': [{
'reference_lgr_language_tag': 'ar',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}, {
'reference_lgr_language_tag': 'ja-hira',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}, {
'reference_lgr_language_tag': 'und-Arab',
'result': 'MATCH',
'remark': 'Exact match'
}]
}, {
'idn_table_language_tag': 'ar',
'comparison': [{
'reference_lgr_language_tag': 'ar',
'result': 'MATCH',
'remark': 'Exact match'
}, {
'reference_lgr_language_tag': 'ja-hira',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}, {
'reference_lgr_language_tag': 'und-Arab',
'result': 'MATCH',
'remark': 'The language tag in IDN Table relevant to the script tag in Reference LGR'
}]
}, {
'idn_table_language_tag': 'ar-arab',
'comparison': [{
'reference_lgr_language_tag': 'ar',
'result': 'MATCH',
'remark': 'Consider minimizing the tag as per the RFC5646 and IANA subtag registry'
}, {
'reference_lgr_language_tag': 'ja-hira',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}, {
'reference_lgr_language_tag': 'und-Arab',
'result': 'MATCH',
'remark': 'The language tag in IDN Table relevant to the script tag in Reference LGR'
}]
}, {
'idn_table_language_tag': 'test-unexisting',
'comparison': [{
'reference_lgr_language_tag': 'ar',
'result': 'MANUAL CHECK',
'remark': 'Language tag may be included in the comment'
}, {
'reference_lgr_language_tag': 'ja-hira',
'result': 'MANUAL CHECK',
'remark': 'Language tag may be included in the comment'
}, {
'reference_lgr_language_tag': 'und-Arab',
'result': 'MANUAL CHECK',
'remark': 'Language tag may be included in the comment'
}]
}, {
'idn_table_language_tag': 'ja',
'comparison': [{
'reference_lgr_language_tag': 'ar',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}, {
'reference_lgr_language_tag': 'ja-hira',
'result': 'MATCH',
'remark': 'Language match'
}, {
'reference_lgr_language_tag': 'und-Arab',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}]
}, {
'idn_table_language_tag': 'ja-hira',
'comparison': [{
'reference_lgr_language_tag': 'ar',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}, {
'reference_lgr_language_tag': 'ja-hira',
'result': 'MATCH',
'remark': 'Exact match'
}, {
'reference_lgr_language_tag': 'und-Arab',
'result': 'REVIEW',
'remark': 'The language tag in IDN Table and Reference LGR are mismatched'
}]
}])
def test_language_tag_none(self):
idn = load_lgr('idn_table_review/language_tag', 'language_tag_none.xml')
result = generate_language_tag_report(idn, self.ref)
self.assertCountEqual(result, [{
'idn_table_language_tag': '-',
'comparison': [{
'reference_lgr_language_tag': 'ar',
'result': 'MANUAL CHECK',
'remark': 'Language tag may be included in the comment'
}, {
'reference_lgr_language_tag': 'ja-hira',
'result': 'MANUAL CHECK',
'remark': 'Language tag may be included in the comment'
}, {
'reference_lgr_language_tag': 'und-Arab',
'result': 'MANUAL CHECK',
'remark': 'Language tag may be included in the comment'
}]
}])
| [
"logging.getLogger",
"lgr.tools.idn_review.language_tag.generate_language_tag_report",
"tests.unit.utils.load_lgr"
] | [((240, 278), 'logging.getLogger', 'logging.getLogger', (['"""test_variant_sets"""'], {}), "('test_variant_sets')\n", (257, 278), False, 'import logging\n'), ((404, 453), 'tests.unit.utils.load_lgr', 'load_lgr', (['"""idn_table_review"""', '"""reference_lgr.xml"""'], {}), "('idn_table_review', 'reference_lgr.xml')\n", (412, 453), False, 'from tests.unit.utils import load_lgr\n'), ((502, 563), 'tests.unit.utils.load_lgr', 'load_lgr', (['"""idn_table_review/language_tag"""', '"""language_tag.xml"""'], {}), "('idn_table_review/language_tag', 'language_tag.xml')\n", (510, 563), False, 'from tests.unit.utils import load_lgr\n'), ((582, 625), 'lgr.tools.idn_review.language_tag.generate_language_tag_report', 'generate_language_tag_report', (['idn', 'self.ref'], {}), '(idn, self.ref)\n', (610, 625), False, 'from lgr.tools.idn_review.language_tag import generate_language_tag_report\n'), ((4627, 4693), 'tests.unit.utils.load_lgr', 'load_lgr', (['"""idn_table_review/language_tag"""', '"""language_tag_none.xml"""'], {}), "('idn_table_review/language_tag', 'language_tag_none.xml')\n", (4635, 4693), False, 'from tests.unit.utils import load_lgr\n'), ((4712, 4755), 'lgr.tools.idn_review.language_tag.generate_language_tag_report', 'generate_language_tag_report', (['idn', 'self.ref'], {}), '(idn, self.ref)\n', (4740, 4755), False, 'from lgr.tools.idn_review.language_tag import generate_language_tag_report\n')] |
import torch
def prepare_device(use_gpu: bool) -> torch.device:
gpu_count = torch.cuda.device_count()
print(f"{gpu_count} CUDA-capable GPUs found.")
if not use_gpu or gpu_count < 1:
print("Local CPU selected for calculations.")
return torch.device("cpu")
device_id = 0
device = torch.device(f"cuda:{device_id}")
name = torch.cuda.get_device_name(device_id)
capability = torch.cuda.get_device_capability(device_id)
print(f"Using {name} GPU with CUDA {capability[0]}.{capability[1]} capability.")
return device
| [
"torch.cuda.get_device_name",
"torch.cuda.get_device_capability",
"torch.cuda.device_count",
"torch.device"
] | [((82, 107), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (105, 107), False, 'import torch\n'), ((318, 351), 'torch.device', 'torch.device', (['f"""cuda:{device_id}"""'], {}), "(f'cuda:{device_id}')\n", (330, 351), False, 'import torch\n'), ((363, 400), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['device_id'], {}), '(device_id)\n', (389, 400), False, 'import torch\n'), ((418, 461), 'torch.cuda.get_device_capability', 'torch.cuda.get_device_capability', (['device_id'], {}), '(device_id)\n', (450, 461), False, 'import torch\n'), ((266, 285), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (278, 285), False, 'import torch\n')] |
# Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ord_interface.build_database."""
import os
from absl.testing import absltest
from absl.testing import flagsaver
import psycopg2
from ord_schema import message_helpers
from ord_schema.proto import dataset_pb2
from ord_schema.proto import reaction_pb2
import ord_interface
from ord_interface import build_database
class BuildDatabaseTest(absltest.TestCase):
def setUp(self):
super().setUp()
# NOTE(kearnes): ord-postgres is the hostname in docker-compose.
self.host = 'ord-postgres'
# Create a test database.
connection = self._connect(ord_interface.POSTGRES_DB)
connection.set_session(autocommit=True)
with connection.cursor() as cursor:
cursor.execute('CREATE DATABASE test;')
connection.close()
# Create a test dataset.
self.test_subdirectory = self.create_tempdir()
reaction = reaction_pb2.Reaction()
reaction.reaction_id = 'test'
reaction.identifiers.add(value='reaction', type='REACTION_SMILES')
input1 = reaction.inputs['input1']
input1.components.add().identifiers.add(value='input1', type='SMILES')
input2 = reaction.inputs['input2']
input2.components.add().identifiers.add(value='input2a', type='SMILES')
input2.components.add().identifiers.add(value='input2b', type='SMILES')
outcome = reaction.outcomes.add()
product = outcome.products.add()
product.measurements.add(type='YIELD', percentage={'value': 2.5})
product.identifiers.add(value='product', type='SMILES')
reaction.provenance.doi = '10.0000/test.foo'
self.dataset = dataset_pb2.Dataset(dataset_id='test_dataset',
reactions=[reaction])
message_helpers.write_message(
self.dataset, os.path.join(self.test_subdirectory, 'test.pb'))
def tearDown(self):
# Remove the test database.
connection = self._connect(ord_interface.POSTGRES_DB)
connection.set_session(autocommit=True)
with connection.cursor() as cursor:
cursor.execute('DROP DATABASE test;')
connection.close()
def _connect(self, dbname):
return psycopg2.connect(dbname=dbname,
user=ord_interface.POSTGRES_USER,
password=ord_interface.POSTGRES_PASSWORD,
host=self.host,
port=ord_interface.POSTGRES_PORT)
def test_main(self):
input_pattern = os.path.join(self.test_subdirectory, '*.pb')
with flagsaver.flagsaver(input=input_pattern,
dbname='test',
host=self.host):
build_database.main(())
# Sanity checks.
with self._connect('test') as connection:
with connection.cursor() as cursor:
cursor.execute('SELECT * from reactions LIMIT 1;')
row = cursor.fetchone()
self.assertLen(row, 5)
cursor.execute('SELECT * from inputs LIMIT 1;')
row = cursor.fetchone()
self.assertLen(row, 2)
cursor.execute('SELECT * from outputs LIMIT 1;')
row = cursor.fetchone()
self.assertLen(row, 3)
if __name__ == '__main__':
absltest.main()
| [
"psycopg2.connect",
"ord_interface.build_database.main",
"os.path.join",
"absl.testing.absltest.main",
"ord_schema.proto.reaction_pb2.Reaction",
"ord_schema.proto.dataset_pb2.Dataset",
"absl.testing.flagsaver.flagsaver"
] | [((3989, 4004), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4002, 4004), False, 'from absl.testing import absltest\n'), ((1506, 1529), 'ord_schema.proto.reaction_pb2.Reaction', 'reaction_pb2.Reaction', ([], {}), '()\n', (1527, 1529), False, 'from ord_schema.proto import reaction_pb2\n'), ((2265, 2333), 'ord_schema.proto.dataset_pb2.Dataset', 'dataset_pb2.Dataset', ([], {'dataset_id': '"""test_dataset"""', 'reactions': '[reaction]'}), "(dataset_id='test_dataset', reactions=[reaction])\n", (2284, 2333), False, 'from ord_schema.proto import dataset_pb2\n'), ((2831, 2998), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': 'dbname', 'user': 'ord_interface.POSTGRES_USER', 'password': 'ord_interface.POSTGRES_PASSWORD', 'host': 'self.host', 'port': 'ord_interface.POSTGRES_PORT'}), '(dbname=dbname, user=ord_interface.POSTGRES_USER, password=\n ord_interface.POSTGRES_PASSWORD, host=self.host, port=ord_interface.\n POSTGRES_PORT)\n', (2847, 2998), False, 'import psycopg2\n'), ((3167, 3211), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""*.pb"""'], {}), "(self.test_subdirectory, '*.pb')\n", (3179, 3211), False, 'import os\n'), ((2442, 2489), 'os.path.join', 'os.path.join', (['self.test_subdirectory', '"""test.pb"""'], {}), "(self.test_subdirectory, 'test.pb')\n", (2454, 2489), False, 'import os\n'), ((3225, 3296), 'absl.testing.flagsaver.flagsaver', 'flagsaver.flagsaver', ([], {'input': 'input_pattern', 'dbname': '"""test"""', 'host': 'self.host'}), "(input=input_pattern, dbname='test', host=self.host)\n", (3244, 3296), False, 'from absl.testing import flagsaver\n'), ((3376, 3399), 'ord_interface.build_database.main', 'build_database.main', (['()'], {}), '(())\n', (3395, 3399), False, 'from ord_interface import build_database\n')] |
from texting import COSP, LF
from xbrief.padder.pad_entries import pad_entries
candidates = {
'cities': [
('1', 'paris'),
('1.1', 'san fransisco'),
('1.2', 'tokyo'),
('1.3', 'delhi'),
('1.4', 'vienna'),
]
}
def test():
for key, vec in candidates.items():
print(key)
padded = pad_entries(vec)
print(LF.join(['(' + key + COSP + value + ')' for key, value in padded]))
test()
| [
"xbrief.padder.pad_entries.pad_entries",
"texting.LF.join"
] | [((347, 363), 'xbrief.padder.pad_entries.pad_entries', 'pad_entries', (['vec'], {}), '(vec)\n', (358, 363), False, 'from xbrief.padder.pad_entries import pad_entries\n'), ((378, 446), 'texting.LF.join', 'LF.join', (["[('(' + key + COSP + value + ')') for key, value in padded]"], {}), "([('(' + key + COSP + value + ')') for key, value in padded])\n", (385, 446), False, 'from texting import COSP, LF\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""INE 5421 - Linguagem Formais e Compiladores - Trabalho 01
Universidade Federal de Santa Catarina
Departamento de Informática e Estatística (INE)
Alunos:
- <NAME>
- <NAME>
- <NAME>
"""
import sys
from src.automata import Automata
from src.grammar import Grammar
from PyQt5.QtWidgets import QApplication
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant
class Operator(QObject):
automataLoaded = pyqtSignal()
grammarLoaded = pyqtSignal()
automataGeneratedFromAutomata = pyqtSignal()
automataGeneratedFromGrammar = pyqtSignal()
grammarGeneratedFromAutomata = pyqtSignal()
def __init__(self, context, parent=None):
super(Operator, self).__init__(parent)
self._ctx = context
self._automatas = []
self._grammars = []
self._grammarFromAutomata = None
self._automataFromAutomata = None
self._automataFromGrammar = None
self._automataFromRegex = None
@pyqtProperty(str, notify=automataLoaded)
def automatas(self):
result_string = ''
for automata in self._automatas:
for i, input in enumerate(list(automata.alphabet) + ['&']):
if i == 0:
result_string += '\t\t' + input
else:
result_string += '\t' + input
result_string += '\n'
for state in automata.states:
if state in automata.final_states:
result_string += '* '
if state == automata.q0:
result_string += '-> '
result_string += state + '\t'
for input in list(automata.alphabet) + ['&']:
if automata.transition(state, input):
result_string += '\t' + str(list(automata.transition(state, input)))
else:
result_string += '\t' + '[]'
result_string += '\n'
return result_string
@automatas.setter
def automatas(self, value):
self._automatas = value
self.automataLoaded.emit()
@pyqtProperty(str, notify=automataGeneratedFromAutomata)
def automataFromAutomata(self):
result_string = ''
a = self._automataFromAutomata
if not a:
return result_string
for i, input in enumerate(list(a.alphabet) + ['&']):
if i == 0:
result_string += '\t\t' + input
else:
result_string += '\t' + input
result_string += '\n'
for state in a.states:
if state in a.final_states:
result_string += '* '
if state == a.q0:
result_string += '-> '
result_string += state + '\t'
for input in list(a.alphabet) + ['&']:
transition = a.transition(state, input)
if transition:
if isinstance(transition, list):
result_string += '\t' + str(transition)
else:
result_string += '\t' + str([transition])
else:
result_string += '\t' + str(list([]))
result_string += '\n'
return result_string
@pyqtProperty(str, notify=automataGeneratedFromGrammar)
def automataFromGrammar(self):
result_string = ''
a = self._automataFromGrammar
if not a:
return
for i, input in enumerate(list(a.alphabet) + ['&']):
if i == 0:
result_string += '\t\t' + input
else:
result_string += '\t' + input
result_string += '\n'
for state in a.states:
if state in a.final_states:
result_string += '* '
if state == a.q0:
result_string += '-> '
result_string += state + '\t'
for input in list(a.alphabet) + ['&']:
if a.transition(state, input):
result_string += '\t' + str(list(a.transition(state, input)))
else:
result_string += '\t' + str(list([]))
result_string += '\n'
return result_string
@automataFromGrammar.setter
def automataFromGrammar(self, value):
self._automataFromGrammar = value
self.automataGeneratedFromGrammar.emit()
@automataFromAutomata.setter
def automataFromAutomata(self, value):
self._automatasFromAutomata = value
self.automataGeneratedFromAutomata.emit()
@pyqtProperty(str, notify=grammarLoaded)
def grammars(self):
result_string = ''
if len(self._grammars) < 1:
return
for production in self._grammars[0]._productions:
result_string += production[0] + ' -> ' + production[1]
if len(production) > 2:
result_string += production[2]
result_string += '\n'
return result_string
@grammars.setter
def grammars(self, value):
self._grammars = value
self.grammarLoaded.emit()
@pyqtProperty(str, notify=grammarGeneratedFromAutomata)
def grammarFromAutomata(self):
result_string = ''
if not self._grammarFromAutomata:
return result_string
for production in self._grammarFromAutomata._productions:
result_string += production[0] + ' -> ' + production[1]
if len(production) > 2:
result_string += production[2]
result_string += '\n'
return result_string
@grammarFromAutomata.setter
def grammarFromAutomata(self, value):
self._grammarFromAutomata = value
self.grammarGeneratedFromAutomata.emit()
@pyqtSlot(QVariant)
def load_automata(self, filename):
automata = Automata.read_from_json(filename[0].toString().replace('.json', '').replace('file://', ''))
if automata and len(self._automatas) < 2:
self._automatas.append(automata)
self.automatas = self._automatas
else:
# TODO: Show some dialog to the user
print('Reached the max number automatas')
@pyqtSlot(QVariant)
def clear_automatas(self):
self._automatas = []
self._automataFromAutomata = None
self._grammarFromAutomata = None
self.automatas = self._automatas
self.automataFromAutomata = self._automataFromAutomata
self.grammarFromAutomata = self._grammarFromAutomata
@pyqtSlot(QVariant)
def clear_grammars(self):
self._grammars = []
self._automataFromGrammar = None
self.grammars = self._grammars
self.automataFromGrammar = self._automataFromGrammar
@pyqtSlot(QVariant)
def nfa_to_dfa(self):
if len(self._automatas) != 1:
# TODO: Show some dialog to the user
print('Only one automata is allowed for this operation')
return
result = self._automatas[0].to_dfa()
self._automataFromAutomata = result
self.automataFromAutomata = self._automataFromAutomata
@pyqtSlot(QVariant)
def dfa_to_grammar(self):
if len(self._automatas) != 1:
# TODO: Show some dialog to the user
print('Only one automata is allowed for this operation')
return
result = self._automatas[0].to_grammar()
self._grammarFromAutomata = result
self.grammarFromAutomata = self._grammarFromAutomata
@pyqtSlot(QVariant)
def dfa_union(self):
if len(self._automatas) != 2:
# TODO: Show some dialog to the user
print('You need two automatas to perform this operation')
return
result = self._automatas[0].union(self._automatas[1])
self._automataFromAutomata = result
self.automataFromAutomata = self._automataFromAutomata
@pyqtSlot(QVariant)
def dfa_intersection(self):
if len(self._automatas) != 2:
# TODO: Show some dialog to the user
print('You need two automatas to perform this operation')
return
result = self._automatas[0].intersection(self._automatas[1])
self._automataFromAutomata = result
self.automataFromAutomata = self._automataFromAutomata
@pyqtSlot(QVariant)
def dfa_minimize(self):
if len(self._automatas) != 1:
# TODO: Show some dialog to the user
print('Only one automata is allowed for this operation')
return
result = self._automatas[0].minimize()
self._automataFromAutomata = result
self.automataFromAutomata = self._automataFromAutomata
@pyqtSlot(QVariant)
def load_grammar(self, filename):
grammar = Grammar.read_from_json(filename[0].toString().replace('.json', '').replace('file://', ''))
if grammar and len(self._grammars) < 1:
self._grammars.append(grammar)
self.grammars = self._grammars
else:
# TODO: Show some dialog to the user
print('Reached the max number of grammars')
@pyqtSlot(QVariant)
def grammar_to_dfa(self):
if len(self._grammars) != 1:
# TODO: Show some dialog to the user
print('You need exactly one grammar to perform this operation')
return
result = self._grammars[0].to_automaton()
self._automataFromGrammar = result
self.automataFromGrammar = self._automataFromGrammar
if __name__ == "__main__":
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
ctx = engine.rootContext()
operator = Operator(ctx)
ctx.setContextProperty('operator', operator)
engine.load('ui/main.qml')
win = engine.rootObjects()[0]
win.show()
sys.exit(app.exec_())
| [
"PyQt5.QtCore.pyqtProperty",
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtQml.QQmlApplicationEngine",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtWidgets.QApplication"
] | [((527, 539), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (537, 539), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((560, 572), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (570, 572), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((609, 621), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (619, 621), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((657, 669), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (667, 669), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((705, 717), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (715, 717), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((1066, 1106), 'PyQt5.QtCore.pyqtProperty', 'pyqtProperty', (['str'], {'notify': 'automataLoaded'}), '(str, notify=automataLoaded)\n', (1078, 1106), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((2213, 2268), 'PyQt5.QtCore.pyqtProperty', 'pyqtProperty', (['str'], {'notify': 'automataGeneratedFromAutomata'}), '(str, notify=automataGeneratedFromAutomata)\n', (2225, 2268), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((3368, 3422), 'PyQt5.QtCore.pyqtProperty', 'pyqtProperty', (['str'], {'notify': 'automataGeneratedFromGrammar'}), '(str, notify=automataGeneratedFromGrammar)\n', (3380, 3422), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((4673, 4712), 'PyQt5.QtCore.pyqtProperty', 'pyqtProperty', (['str'], {'notify': 'grammarLoaded'}), '(str, notify=grammarLoaded)\n', (4685, 4712), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((5218, 5272), 'PyQt5.QtCore.pyqtProperty', 'pyqtProperty', (['str'], {'notify': 'grammarGeneratedFromAutomata'}), '(str, notify=grammarGeneratedFromAutomata)\n', (5230, 5272), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((5865, 5883), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (5873, 5883), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((6297, 6315), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (6305, 6315), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((6630, 6648), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (6638, 6648), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((6854, 6872), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (6862, 6872), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((7233, 7251), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (7241, 7251), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((7617, 7635), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (7625, 7635), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((8013, 8031), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (8021, 8031), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((8423, 8441), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (8431, 8441), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((8806, 8824), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (8814, 8824), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((9231, 9249), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QVariant'], {}), '(QVariant)\n', (9239, 9249), False, 'from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QVariant\n'), ((9654, 9676), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (9666, 9676), False, 'from PyQt5.QtWidgets import QApplication\n'), ((9690, 9713), 'PyQt5.QtQml.QQmlApplicationEngine', 'QQmlApplicationEngine', ([], {}), '()\n', (9711, 9713), False, 'from PyQt5.QtQml import QQmlApplicationEngine\n')] |
import obspy
import read_event_obspy_file as reof
from getwaveform import *
def get_ev_info(ev_info,iex):
# ===============================================================
# SilwalTape2016 example event (Anchorage) -- python run_getwaveform.py event_input_mtuq2022 1
if iex == 1:
ev_info.overwrite_ddir = 1
ev_info.use_catalog = 0 # do not use event catalog for source parameters
ev_info.otime = obspy.UTCDateTime("2009-04-07T20:12:55.351")
ev_info.min_dist = 0
ev_info.max_dist = 300
ev_info.tbefore_sec = 100
ev_info.tafter_sec = 300
# RAW and ENZ files can be used when checking if you are receiving all possible data (example station: SOLD)
ev_info.isave_raw = False
ev_info.isave_raw_processed = False
ev_info.isave_ENZ = False
# Networks of interest (All within distance limitations)
ev_info.network = 'AK,YV,AV,AT,XZ,PN'
ev_info.channel = 'BH?'
# Event information
ev_info.elat = 61.45420
ev_info.elon = -149.7428
ev_info.edep = 33033.60
ev_info.emag = 4.6
# scaling and resampling
ev_info.resample_TF = True
ev_info.resample_freq = 50
# Scaling depends on units (This assumes velocity in units cm/s)
ev_info.scale_factor = 100
# 2020 Southern California event
if iex == 2:
ev_info.overwrite_ddir = 1
ev_info.use_catalog = 0
ev_info.otime = obspy.UTCDateTime("2020-04-04T01:53:18.920")
ev_info.min_dist = 0
ev_info.max_dist = 300
ev_info.tbefore_sec = 100
ev_info.tafter_sec = 300
# RAW and ENZ files can be used when checking if you are receiving all possible data
ev_info.isave_raw = False
ev_info.isave_raw_processed = False
ev_info.isave_ENZ = False
# Only receive CI network stations
ev_info.network = 'CI'
ev_info.channel = 'BH?'
# CI.SWS causes script to crash (Station problems?)
ev_info.station = 'BAR,IKP,PLM,GLA,BC3,PDM,IRM,DAN,GMR,TUQ,HEC,GSC,RRX,BBR,SCI2,CIA,SDD,VTV,ADO,ARV,DGR,SVD,DJJ,FMP,-SWS' #Receive subset of stations
# Event specific information
ev_info.elat = 33.490
ev_info.elon = -116.506
ev_info.edep = 10500.0
ev_info.emag = 4.9
# scaling and resampling
ev_info.resample_TF = True
ev_info.resample_freq = 50
# See iex == 1 for more info
ev_info.scale_factor = 100
# 2017 North Korea Event
if iex == 3:
ev_info.overwrite_ddir = 1
ev_info.use_catalog = 0
ev_info.otime = obspy.UTCDateTime("2017-09-03 03:30:01.760")
ev_info.min_dist = 0
ev_info.max_dist = 1300
ev_info.tbefore_sec = 100
ev_info.tafter_sec = 600
#^^^^^^^^^^^^^^^^^^^^^^^^^^
# RAW and ENZ files can be used when checking if you are receiving all possible data
ev_info.isave_raw = False
ev_info.isave_raw_processed = False
ev_info.isave_ENZ = False
# Network and Channel requests CHANGE THIS vvvvvvvv
ev_info.network = 'IC,IU,G,JP'
ev_info.channel = 'BH?'
ev_info.station = 'MDJ,INCN,JTU,MAJO,INU,BJT,YSS'
ev_info.location = '00'
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Event specific information CHANGE THIS vvvvvvvvvvvv
ev_info.elat = 41.3324
ev_info.elon = 129.0297
ev_info.edep = 1000.0
ev_info.emag = 5.18
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# scaling and resampling
ev_info.resample_TF = True
ev_info.resample_freq = 5
# See iex == 1 for more info
ev_info.scale_factor = 100
# Iceland Event
if iex == 4:
ev_info.overwrite_ddir = 1
ev_info.use_catalog = 0
#CHANGE THIS vvvvvvvvv
#
ev_info.otime = obspy.UTCDateTime("2014-08-25T16:19:03.0")
ev_info.min_dist = 20
ev_info.max_dist = 300
ev_info.tbefore_sec = 60
ev_info.tafter_sec = 360
#^^^^^^^^^^^^^^^^^^^^^^^^^^
# RAW and ENZ files can be used when checking if you are receiving all possible data
ev_info.isave_raw = False
ev_info.isave_raw_processed = False
ev_info.isave_ENZ = False
# Network and Channel requests CHANGE THIS vvvvvvvv
ev_info.network = 'Z7'
ev_info.channel = 'HH?'
ev_info.station = 'RODG,DYSA,LIND,LOKT,LAUF,KALF,HELI,FAG,SVIN,K250' #Receive all stations except CI.SWS (if not needed just delete)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Event specific information CHANGE THIS vvvvvvvvvvvv
ev_info.elat = 64.612
ev_info.elon = -17.472
ev_info.edep = 5000.0
ev_info.emag = 4.6
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# scaling and resampling
ev_info.resample_TF = True
ev_info.resample_freq = 50
# See iex == 1 for more info
ev_info.scale_factor = 100
# Iceland Event SA
if iex == 5:
ev_info.overwrite_ddir = 1
ev_info.use_catalog = 0
#CHANGE THIS vvvvvvvvv
#
ev_info.otime = obspy.UTCDateTime("2014-08-28T08:13:39.0")
ev_info.min_dist = 50
ev_info.max_dist = 300
ev_info.tbefore_sec = 60
ev_info.tafter_sec = 360
#^^^^^^^^^^^^^^^^^^^^^^^^^^
# RAW and ENZ files can be used when checking if you are receiving all possible data
ev_info.isave_raw = False
ev_info.isave_raw_processed = False
ev_info.isave_ENZ = False
# Network and Channel requests CHANGE THIS vvvvvvvv
ev_info.network = 'Z7'
ev_info.channel = 'HH?'
ev_info.station = 'VADA,RIFR,DREK,VIFE,SVAD,BRU,BOTN,UTYR,MIDF,SKAF,HETO,LAUF,FAG,SVIN,K250'
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Event specific information CHANGE THIS vvvvvvvvvvvv
ev_info.elat = 64.654
ev_info.elon = -17.385
ev_info.edep = 7000.0
ev_info.emag = 5.3
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# scaling and resampling
ev_info.resample_TF = True
ev_info.resample_freq = 50
# See iex == 1 for more info
ev_info.scale_factor = 100
# Iceland Event SB
if iex == 6:
ev_info.overwrite_ddir = 1
ev_info.use_catalog = 0
#CHANGE THIS vvvvvvvvv
#
ev_info.otime = obspy.UTCDateTime("2014-08-26T11:56:45.0")
ev_info.min_dist = 20
ev_info.max_dist = 300
ev_info.tbefore_sec = 60
ev_info.tafter_sec = 360
#^^^^^^^^^^^^^^^^^^^^^^^^^^
# RAW and ENZ files can be used when checking if you are receiving all possible data
ev_info.isave_raw = False
ev_info.isave_raw_processed = False
ev_info.isave_ENZ = False
# Network and Channel requests CHANGE THIS vvvvvvvv
ev_info.network = 'Z7'
ev_info.channel = 'HH?'
ev_info.station = 'FJAS,OSKV,KLUR,DYFE,DJK,VIFE,LOKT,BJK,BRU,MIDF,KOLL,KODA,SVIN,SKAF,K250,KALF,LAUF'
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Event specific information CHANGE THIS vvvvvvvvvvvv
ev_info.elat = 64.8
ev_info.elon = -16.897
ev_info.edep = 7000.0
ev_info.emag = 4.2
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# scaling and resampling
ev_info.resample_TF = True
ev_info.resample_freq = 50
# See iex == 1 for more info
ev_info.scale_factor = 100
return(ev_info)
| [
"obspy.UTCDateTime"
] | [((434, 478), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2009-04-07T20:12:55.351"""'], {}), "('2009-04-07T20:12:55.351')\n", (451, 478), False, 'import obspy\n'), ((1488, 1532), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-04-04T01:53:18.920"""'], {}), "('2020-04-04T01:53:18.920')\n", (1505, 1532), False, 'import obspy\n'), ((2668, 2712), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2017-09-03 03:30:01.760"""'], {}), "('2017-09-03 03:30:01.760')\n", (2685, 2712), False, 'import obspy\n'), ((3959, 4001), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2014-08-25T16:19:03.0"""'], {}), "('2014-08-25T16:19:03.0')\n", (3976, 4001), False, 'import obspy\n'), ((5301, 5343), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2014-08-28T08:13:39.0"""'], {}), "('2014-08-28T08:13:39.0')\n", (5318, 5343), False, 'import obspy\n'), ((6633, 6675), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2014-08-26T11:56:45.0"""'], {}), "('2014-08-26T11:56:45.0')\n", (6650, 6675), False, 'import obspy\n')] |
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request,'landing\home.html')
# Create your views here.
| [
"django.shortcuts.render"
] | [((102, 139), 'django.shortcuts.render', 'render', (['request', '"""landing\\\\home.html"""'], {}), "(request, 'landing\\\\home.html')\n", (108, 139), False, 'from django.shortcuts import render\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
"""
import re
import torch
from ptranking.data.data_utils import LABEL_TYPE
from ptranking.metric.adhoc.adhoc_metric import torch_dcg_at_k
#######
# For Delta Metrics
#######
def get_delta_ndcg(batch_ideal_rankings, batch_predict_rankings, label_type=LABEL_TYPE.MultiLabel, device='cpu'):
'''
Delta-nDCG w.r.t. pairwise swapping of the currently predicted ltr_adhoc
:param batch_ideal_rankings: the standard labels sorted in a descending order
:param batch_predicted_rankings: the standard labels sorted based on the corresponding predictions
:return:
'''
# ideal discount cumulative gains
batch_idcgs = torch_dcg_at_k(batch_rankings=batch_ideal_rankings, label_type=label_type, device=device)
if LABEL_TYPE.MultiLabel == label_type:
batch_gains = torch.pow(2.0, batch_predict_rankings) - 1.0
elif LABEL_TYPE.Permutation == label_type:
batch_gains = batch_predict_rankings
else:
raise NotImplementedError
batch_n_gains = batch_gains / batch_idcgs # normalised gains
batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1)
batch_std_ranks = torch.arange(batch_predict_rankings.size(1), dtype=torch.float, device=device)
batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients
batch_dists = torch.unsqueeze(batch_dists, dim=0)
batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)
batch_delta_ndcg = torch.abs(batch_ng_diffs) * torch.abs(batch_dists_diffs) # absolute changes w.r.t. pairwise swapping
return batch_delta_ndcg
def metric_results_to_string(list_scores=None, list_cutoffs=None, split_str=', ', metric='nDCG'):
"""
Convert metric results to a string representation
:param list_scores:
:param list_cutoffs:
:param split_str:
:return:
"""
list_str = []
for i in range(len(list_scores)):
list_str.append(metric + '@{}:{:.4f}'.format(list_cutoffs[i], list_scores[i]))
return split_str.join(list_str)
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
"""
Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect."""
l.sort(key=alphanum_key, reverse=True)
def test_sort():
tmp_list = ['net_params_epoch_2.pkl', 'net_params_epoch_34.pkl', 'net_params_epoch_8.pkl']
print(sort_nicely(tmp_list))
print(tmp_list)
def get_opt_model(list_model_names):
sort_nicely(list_model_names)
return list_model_names[0]
| [
"ptranking.metric.adhoc.adhoc_metric.torch_dcg_at_k",
"re.split",
"torch.abs",
"torch.unsqueeze",
"torch.pow",
"torch.log2"
] | [((704, 797), 'ptranking.metric.adhoc.adhoc_metric.torch_dcg_at_k', 'torch_dcg_at_k', ([], {'batch_rankings': 'batch_ideal_rankings', 'label_type': 'label_type', 'device': 'device'}), '(batch_rankings=batch_ideal_rankings, label_type=label_type,\n device=device)\n', (718, 797), False, 'from ptranking.metric.adhoc.adhoc_metric import torch_dcg_at_k\n'), ((1426, 1461), 'torch.unsqueeze', 'torch.unsqueeze', (['batch_dists'], {'dim': '(0)'}), '(batch_dists, dim=0)\n', (1441, 1461), False, 'import torch\n'), ((1143, 1180), 'torch.unsqueeze', 'torch.unsqueeze', (['batch_n_gains'], {'dim': '(2)'}), '(batch_n_gains, dim=2)\n', (1158, 1180), False, 'import torch\n'), ((1183, 1220), 'torch.unsqueeze', 'torch.unsqueeze', (['batch_n_gains'], {'dim': '(1)'}), '(batch_n_gains, dim=1)\n', (1198, 1220), False, 'import torch\n'), ((1347, 1380), 'torch.log2', 'torch.log2', (['(batch_std_ranks + 2.0)'], {}), '(batch_std_ranks + 2.0)\n', (1357, 1380), False, 'import torch\n'), ((1486, 1521), 'torch.unsqueeze', 'torch.unsqueeze', (['batch_dists'], {'dim': '(2)'}), '(batch_dists, dim=2)\n', (1501, 1521), False, 'import torch\n'), ((1524, 1559), 'torch.unsqueeze', 'torch.unsqueeze', (['batch_dists'], {'dim': '(1)'}), '(batch_dists, dim=1)\n', (1539, 1559), False, 'import torch\n'), ((1583, 1608), 'torch.abs', 'torch.abs', (['batch_ng_diffs'], {}), '(batch_ng_diffs)\n', (1592, 1608), False, 'import torch\n'), ((1611, 1639), 'torch.abs', 'torch.abs', (['batch_dists_diffs'], {}), '(batch_dists_diffs)\n', (1620, 1639), False, 'import torch\n'), ((861, 899), 'torch.pow', 'torch.pow', (['(2.0)', 'batch_predict_rankings'], {}), '(2.0, batch_predict_rankings)\n', (870, 899), False, 'import torch\n'), ((2381, 2404), 're.split', 're.split', (['"""([0-9]+)"""', 's'], {}), "('([0-9]+)', s)\n", (2389, 2404), False, 'import re\n')] |
# -*- coding: utf-8 -*-
from scipy.stats import gamma
from matplotlib import gridspec
import math
from math import exp
import matplotlib
import nanopores as nano
import nanopores.geometries.pughpore as pughpore
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import sys
import nanopores.tools.fields as f
HOME = os.path.expanduser("~")
PAPERDIR = os.path.join(HOME, "papers", "paper-howorka")
FIGDIR = os.path.join(PAPERDIR, "figures", "")
DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
f.set_dir(DATADIR)
number=False
drop, th = f.get("events_pugh_experiment", "drop", "t")
len=th.load().shape[0]
la = 1./23.
a = 1.074
color2='green'
orange='lightgreen'
color3='red'
gray='#888888'
orange='#ff6600'
#orange=gray
log=True
#log=False
if log:
plt.figure(figsize=(10,5),dpi=80)
gs = gridspec.GridSpec(2,3,width_ratios=[4,2,1],height_ratios=[1,2.5])
else:
plt.figure(figsize=(4,6),dpi=80)
gs = gridspec.GridSpec(2,1,height_ratios=[1,2.5])
gs.update(wspace=0.,hspace=0.)
thleft=[x for x in th if x<2.]
thright=[x for x in th if x>=2.]
dropleft=[drop[i] for i in range(len) if th[i]<2.]
dropright=[drop[i] for i in range(len) if th[i]>=2.]
minperc=0.
maxperc=40.
if log:
plt1=plt.subplot(gs[1,0])
plt1.scatter(thright,dropright,s=8,color=orange)
plt1.scatter(thleft,dropleft,s=8,color=gray)
xfmt=FormatStrFormatter('%g')
plt1.set_xlim([.2*min(th),max(th)*5.])
plt1.set_ylim([minperc,maxperc])
plt1.set_xscale('log')
plt1.xaxis.set_major_formatter(xfmt)
plt1.invert_yaxis()
plt1.set_ylabel(r'A/I$_0$ [%]',fontsize=15)
plt1.set_xlabel(r'$\tau_{off}$ [ms]',fontsize=15,x=.76)
if log:
plt2=plt.subplot(gs[1,1])
else:
plt2=plt.subplot(gs[1,0])
plt2.scatter(thright,dropright,s=8,color=orange)
plt2.scatter(thleft,dropleft,s=8,color=gray)
plt2.invert_yaxis()
plt2.set_ylim([maxperc,minperc])
plt2.set_xlim([-2e-2*max(th),max(th)*(1.+2e-2)])
if log:
plt2.axes.get_yaxis().set_visible(False)
plt2.axes.get_xaxis().major.locator.set_params(nbins=6)
plt2.set_ylabel(r'A/I$_0$ [%]',fontsize=15)
if not log:
plt2.axes.get_xaxis().major.locator.set_params(nbins=7)
plt2.set_xlabel(r'$\tau_{off}$ [ms]',fontsize=15)
alpha=.3
if log:
plt3=plt.subplot(gs[1,2])
n, bins, patches = plt3.hist(np.array(dropright),5,normed=1,orientation='horizontal',color=orange,alpha=alpha)
plt3.invert_yaxis()
plt3.set_xlim([0.,max(n)*1.2])
plt3.set_ylim([maxperc,minperc])
plt3.axes.get_xaxis().set_visible(False)
plt3.axes.get_yaxis().set_visible(False)
if log:
plt4=plt.subplot(gs[0,1])
else:
plt4=plt.subplot(gs[0,0])
plt4.plot(np.linspace(1,100,100),np.array([gamma.pdf(x,a)*la**a*exp(x*(1-la)) for x in np.linspace(1,100,100)]),color=orange,linewidth=1.5)
n, bins, patches = plt4.hist(np.array(thright),15,normed=1,color=orange,alpha=alpha)
plt4.set_xlim([-2e-2*max(th),max(th)*(1.+2e-2)])
plt4.axes.get_xaxis().set_visible(False)
plt4.axes.get_yaxis().set_visible(False)
plt.tight_layout()
if log:
pass
# plt.savefig('hist_data1.pdf')
else:
pass
# plt.savefig('hist_data2.pdf')
plt.show()
| [
"matplotlib.pyplot.show",
"nanopores.tools.fields.get",
"scipy.stats.gamma.pdf",
"os.path.join",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"math.exp",
"matplotlib.pyplot.subplot",
"matplotlib.ticker.FormatStrFormatter",
"os.path.expanduser",
"nanopores.tools.fields.set_dir"
] | [((374, 397), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (392, 397), False, 'import os\n'), ((409, 454), 'os.path.join', 'os.path.join', (['HOME', '"""papers"""', '"""paper-howorka"""'], {}), "(HOME, 'papers', 'paper-howorka')\n", (421, 454), False, 'import os\n'), ((464, 501), 'os.path.join', 'os.path.join', (['PAPERDIR', '"""figures"""', '""""""'], {}), "(PAPERDIR, 'figures', '')\n", (476, 501), False, 'import os\n'), ((512, 564), 'os.path.join', 'os.path.join', (['HOME', '"""Dropbox"""', '"""nanopores"""', '"""fields"""'], {}), "(HOME, 'Dropbox', 'nanopores', 'fields')\n", (524, 564), False, 'import os\n'), ((564, 582), 'nanopores.tools.fields.set_dir', 'f.set_dir', (['DATADIR'], {}), '(DATADIR)\n', (573, 582), True, 'import nanopores.tools.fields as f\n'), ((610, 654), 'nanopores.tools.fields.get', 'f.get', (['"""events_pugh_experiment"""', '"""drop"""', '"""t"""'], {}), "('events_pugh_experiment', 'drop', 't')\n", (615, 654), True, 'import nanopores.tools.fields as f\n'), ((3054, 3072), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3070, 3072), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3183, 3185), True, 'import matplotlib.pyplot as plt\n'), ((829, 864), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)', 'dpi': '(80)'}), '(figsize=(10, 5), dpi=80)\n', (839, 864), True, 'import matplotlib.pyplot as plt\n'), ((872, 943), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(3)'], {'width_ratios': '[4, 2, 1]', 'height_ratios': '[1, 2.5]'}), '(2, 3, width_ratios=[4, 2, 1], height_ratios=[1, 2.5])\n', (889, 943), False, 'from matplotlib import gridspec\n'), ((948, 982), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 6)', 'dpi': '(80)'}), '(figsize=(4, 6), dpi=80)\n', (958, 982), True, 'import matplotlib.pyplot as plt\n'), ((990, 1037), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[1, 2.5]'}), '(2, 1, height_ratios=[1, 2.5])\n', (1007, 1037), False, 'from matplotlib import gridspec\n'), ((1278, 1299), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (1289, 1299), True, 'import matplotlib.pyplot as plt\n'), ((1410, 1434), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%g"""'], {}), "('%g')\n", (1428, 1434), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((1733, 1754), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 1]'], {}), '(gs[1, 1])\n', (1744, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1790), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (1780, 1790), True, 'import matplotlib.pyplot as plt\n'), ((2297, 2318), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 2]'], {}), '(gs[1, 2])\n', (2308, 2318), True, 'import matplotlib.pyplot as plt\n'), ((2639, 2660), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (2650, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2675, 2696), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (2686, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2706, 2730), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', '(100)'], {}), '(1, 100, 100)\n', (2717, 2730), True, 'import numpy as np\n'), ((2865, 2882), 'numpy.array', 'np.array', (['thright'], {}), '(thright)\n', (2873, 2882), True, 'import numpy as np\n'), ((2351, 2370), 'numpy.array', 'np.array', (['dropright'], {}), '(dropright)\n', (2359, 2370), True, 'import numpy as np\n'), ((2760, 2777), 'math.exp', 'exp', (['(x * (1 - la))'], {}), '(x * (1 - la))\n', (2763, 2777), False, 'from math import exp\n'), ((2783, 2807), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', '(100)'], {}), '(1, 100, 100)\n', (2794, 2807), True, 'import numpy as np\n'), ((2739, 2754), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x', 'a'], {}), '(x, a)\n', (2748, 2754), False, 'from scipy.stats import gamma\n')] |
# Important! : Crawling news from websites
from newsapi import NewsApiClient
#basic libraries
import pandas as pd
import numpy as np
import json
#importing datetime libarary
import datetime
from datetime import date, timedelta
import os
#extracted from GTD database
hot_keywords = ['incidents','explosives','assailants', 'attack',
'bomber', 'bomb', 'dynamite', 'extremists', 'perpatrator']
source_list = ['abc-news', "abc-news-au", "al-jazeera-english", "ansa",
"associated-press", "australian-financial-review", "axios", "bbc-news",
"bloomberg", "breitbart-news", "buzzfeed", "cbs-news", "cnn", "fox-news",
"google-news", "msnbc", "nbc-news", "newsweek", "reuters", "the-hill",
"the-wall-street-journal", "the-washington-times", "time", "usa-today",
"vice-news"]
# Init
newsapi = NewsApiClient(api_key='fed75f65663e4cb7a02f2ae336bacd5e')
#searching datas within 30 days
end_date = date.today().strftime("%Y-%m-%d")
start_date = date.today() - datetime.timedelta(days=30)
start_date = start_date.strftime("%Y-%m-%d")
desired_dir = "./json_files"
def write_json(new_data, filename="Report.JSON"):
full_path = os.path.join(desired_dir, filename)
with open(full_path, 'w') as f:
json_string=json.dumps(new_data)
f.write(json_string)
# /v2/everything
for keyword in hot_keywords:
for source in source_list:
all_articles = newsapi.get_everything(q=keyword,
sources= source,
from_param = start_date,
to=end_date,
#domains='bbc.co.uk,techcrunch.com',
#language='en',
#page=10,
sort_by='relevancy')
print(all_articles)
print(len(all_articles['articles']))
name = keyword + " " + source + '.json'
write_json(all_articles, name)
print("Crawling finished") | [
"json.dumps",
"os.path.join",
"datetime.date.today",
"newsapi.NewsApiClient",
"datetime.timedelta"
] | [((810, 867), 'newsapi.NewsApiClient', 'NewsApiClient', ([], {'api_key': '"""fed75f65663e4cb7a02f2ae336bacd5e"""'}), "(api_key='fed75f65663e4cb7a02f2ae336bacd5e')\n", (823, 867), False, 'from newsapi import NewsApiClient\n'), ((963, 975), 'datetime.date.today', 'date.today', ([], {}), '()\n', (973, 975), False, 'from datetime import date, timedelta\n'), ((978, 1005), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (996, 1005), False, 'import datetime\n'), ((1153, 1188), 'os.path.join', 'os.path.join', (['desired_dir', 'filename'], {}), '(desired_dir, filename)\n', (1165, 1188), False, 'import os\n'), ((915, 927), 'datetime.date.today', 'date.today', ([], {}), '()\n', (925, 927), False, 'from datetime import date, timedelta\n'), ((1245, 1265), 'json.dumps', 'json.dumps', (['new_data'], {}), '(new_data)\n', (1255, 1265), False, 'import json\n')] |
########
# Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Local imports
from __future__ import unicode_literals
# Third-party imports
from mock import patch
# Local imports
from .. import stackdriver_uptimecheck
from ...tests import TestGCP
@patch('cloudify_gcp.gcp.ServiceAccountCredentials.from_json_keyfile_dict')
@patch('cloudify_gcp.gcp.build')
class TestGCPStackDriverGCP(TestGCP):
def test_create(self, mock_build, *args):
test_dict = {'a': 1, 'bb': 2, 'cc': 'cc', }
stackdriver_uptimecheck.create(
project_id='proj-id', uptime_check_config=test_dict)
mock_build().projects().uptimeCheckConfigs(
).create.assert_called_once_with(
parent='projects/proj-id', body=test_dict)
def test_delete(self, mock_build, *args):
self.ctxmock.instance.runtime_properties['name'] = 'some-name'
stackdriver_uptimecheck.delete()
mock_build().projects().uptimeCheckConfigs(
).delete.assert_called_once_with(name='some-name')
def test_update(self, mock_build, *args):
self.ctxmock.instance.runtime_properties['name'] = 'some-name'
test_dict = {'a': 1, 'bb': 2, 'cc': 'cc', }
stackdriver_uptimecheck.update(
project_id='proj-id', uptime_check_config=test_dict)
mock_build().projects().uptimeCheckConfigs(
).update.assert_called_once_with(name='some-name', body=test_dict)
| [
"mock.patch"
] | [((818, 892), 'mock.patch', 'patch', (['"""cloudify_gcp.gcp.ServiceAccountCredentials.from_json_keyfile_dict"""'], {}), "('cloudify_gcp.gcp.ServiceAccountCredentials.from_json_keyfile_dict')\n", (823, 892), False, 'from mock import patch\n'), ((894, 925), 'mock.patch', 'patch', (['"""cloudify_gcp.gcp.build"""'], {}), "('cloudify_gcp.gcp.build')\n", (899, 925), False, 'from mock import patch\n')] |
# Generated by Django 3.1.1 on 2021-05-05 14:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0008_homepage_search_image'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='keywords',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
| [
"django.db.models.CharField"
] | [((338, 393), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(512)', 'null': '(True)'}), '(blank=True, max_length=512, null=True)\n', (354, 393), False, 'from django.db import migrations, models\n')] |
import numpy as np
from numpy import genfromtxt
import numpy.linalg as LA
import scipy.interpolate as interpolate
import json, time, collections
from numba import njit
EPSILON = 0.00000000001
@njit(fastmath=False, cache=True)
def get_rotation_matrix(theta):
c, s = np.cos(theta), np.sin(theta)
return np.array([[c, -s], [s, c]])
@njit(fastmath=False, cache=True)
def nearest_point_on_trajectory_py2(point, trajectory):
'''
Return the nearest point along the given piecewise linear trajectory.
Same as nearest_point_on_line_segment, but vectorized. This method is quite fast, time constraints should
not be an issue so long as trajectories are not insanely long.
Order of magnitude: trajectory length: 1000 --> 0.0002 second computation (5000fps)
point: size 2 numpy array
trajectory: Nx2 matrix of (x,y) trajectory waypoints
- these must be unique. If they are not unique, a divide by 0 error will destroy the world
'''
diffs = trajectory[1:,:] - trajectory[:-1,:]
l2s = diffs[:,0]**2 + diffs[:,1]**2
# this is equivalent to the elementwise dot product
# dots = np.sum((point - trajectory[:-1,:]) * diffs[:,:], axis=1)
dots = np.empty((trajectory.shape[0]-1, ))
for i in range(dots.shape[0]):
dots[i] = np.dot((point - trajectory[i, :]), diffs[i, :])
t = dots / l2s
t[t<0.0] = 0.0
t[t>1.0] = 1.0
# t = np.clip(dots / l2s, 0.0, 1.0)
projections = trajectory[:-1,:] + (t*diffs.T).T
# dists = np.linalg.norm(point - projections, axis=1)
dists = np.empty((projections.shape[0],))
for i in range(dists.shape[0]):
temp = point - projections[i]
dists[i] = np.sqrt(np.sum(temp*temp))
min_dist_segment = np.argmin(dists)
return projections[min_dist_segment], dists[min_dist_segment], t[min_dist_segment], min_dist_segment
@njit(fastmath=False, cache=True)
def first_point_on_trajectory_intersecting_circle(point, radius, trajectory, t=0.0, wrap=False):
''' starts at beginning of trajectory, and find the first point one radius away from the given point along the trajectory.
Assumes that the first segment passes within a single radius of the point
http://codereview.stackexchange.com/questions/86421/line-segment-to-circle-collision-algorithm
'''
start_i = int(t)
start_t = t % 1.0
first_t = None
first_i = None
first_p = None
trajectory = np.ascontiguousarray(trajectory)
for i in range(start_i, trajectory.shape[0]-1):
start = trajectory[i,:]
end = trajectory[i+1,:]+1e-6
V = np.ascontiguousarray(end - start)
a = np.dot(V,V)
b = 2.0*np.dot(V, start - point)
c = np.dot(start, start) + np.dot(point,point) - 2.0*np.dot(start, point) - radius*radius
discriminant = b*b-4*a*c
if discriminant < 0:
continue
# print "NO INTERSECTION"
# else:
# if discriminant >= 0.0:
discriminant = np.sqrt(discriminant)
t1 = (-b - discriminant) / (2.0*a)
t2 = (-b + discriminant) / (2.0*a)
if i == start_i:
if t1 >= 0.0 and t1 <= 1.0 and t1 >= start_t:
first_t = t1
first_i = i
first_p = start + t1 * V
break
if t2 >= 0.0 and t2 <= 1.0 and t2 >= start_t:
first_t = t2
first_i = i
first_p = start + t2 * V
break
elif t1 >= 0.0 and t1 <= 1.0:
first_t = t1
first_i = i
first_p = start + t1 * V
break
elif t2 >= 0.0 and t2 <= 1.0:
first_t = t2
first_i = i
first_p = start + t2 * V
break
# wrap around to the beginning of the trajectory if no intersection is found1
if wrap and first_p is None:
for i in range(-1, start_i):
start = trajectory[i % trajectory.shape[0],:]
end = trajectory[(i+1) % trajectory.shape[0],:]+1e-6
V = end - start
a = np.dot(V,V)
b = 2.0*np.dot(V, start - point)
c = np.dot(start, start) + np.dot(point,point) - 2.0*np.dot(start, point) - radius*radius
discriminant = b*b-4*a*c
if discriminant < 0:
continue
discriminant = np.sqrt(discriminant)
t1 = (-b - discriminant) / (2.0*a)
t2 = (-b + discriminant) / (2.0*a)
if t1 >= 0.0 and t1 <= 1.0:
first_t = t1
first_i = i
first_p = start + t1 * V
break
elif t2 >= 0.0 and t2 <= 1.0:
first_t = t2
first_i = i
first_p = start + t2 * V
break
return first_p, first_i, first_t
# print min_dist_segment, dists[min_dist_segment], projections[min_dist_segment]
# @njit(fastmath=False, cache=True)
def get_actuation(pose_theta, lookahead_point, position, lookahead_distance, wheelbase):
waypoint_y = np.dot(np.array([np.cos(pose_theta), np.sin(-pose_theta)]), lookahead_point[0:2]-position)
# waypoint_y = np.dot(np.array([np.sin(-pose_theta), np.cos(-pose_theta)]), lookahead_point[0:2]-position)
speed = lookahead_point[2]
if np.abs(waypoint_y) < 1e-6:
return speed, 0.
radius = 1/(2.0*waypoint_y/lookahead_distance**2)
steering_angle = np.arctan(wheelbase/radius)
return speed, steering_angle | [
"numpy.abs",
"numpy.sqrt",
"numpy.sin",
"numba.njit",
"numpy.ascontiguousarray",
"numpy.array",
"numpy.dot",
"numpy.sum",
"numpy.empty",
"numpy.cos",
"numpy.argmin",
"numpy.arctan"
] | [((197, 229), 'numba.njit', 'njit', ([], {'fastmath': '(False)', 'cache': '(True)'}), '(fastmath=False, cache=True)\n', (201, 229), False, 'from numba import njit\n'), ((344, 376), 'numba.njit', 'njit', ([], {'fastmath': '(False)', 'cache': '(True)'}), '(fastmath=False, cache=True)\n', (348, 376), False, 'from numba import njit\n'), ((1866, 1898), 'numba.njit', 'njit', ([], {'fastmath': '(False)', 'cache': '(True)'}), '(fastmath=False, cache=True)\n', (1870, 1898), False, 'from numba import njit\n'), ((313, 340), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (321, 340), True, 'import numpy as np\n'), ((1209, 1245), 'numpy.empty', 'np.empty', (['(trajectory.shape[0] - 1,)'], {}), '((trajectory.shape[0] - 1,))\n', (1217, 1245), True, 'import numpy as np\n'), ((1565, 1598), 'numpy.empty', 'np.empty', (['(projections.shape[0],)'], {}), '((projections.shape[0],))\n', (1573, 1598), True, 'import numpy as np\n'), ((1742, 1758), 'numpy.argmin', 'np.argmin', (['dists'], {}), '(dists)\n', (1751, 1758), True, 'import numpy as np\n'), ((2427, 2459), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['trajectory'], {}), '(trajectory)\n', (2447, 2459), True, 'import numpy as np\n'), ((5431, 5460), 'numpy.arctan', 'np.arctan', (['(wheelbase / radius)'], {}), '(wheelbase / radius)\n', (5440, 5460), True, 'import numpy as np\n'), ((273, 286), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (279, 286), True, 'import numpy as np\n'), ((288, 301), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (294, 301), True, 'import numpy as np\n'), ((1298, 1343), 'numpy.dot', 'np.dot', (['(point - trajectory[i, :])', 'diffs[i, :]'], {}), '(point - trajectory[i, :], diffs[i, :])\n', (1304, 1343), True, 'import numpy as np\n'), ((2593, 2626), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['(end - start)'], {}), '(end - start)\n', (2613, 2626), True, 'import numpy as np\n'), ((2640, 2652), 'numpy.dot', 'np.dot', (['V', 'V'], {}), '(V, V)\n', (2646, 2652), True, 'import numpy as np\n'), ((2984, 3005), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (2991, 3005), True, 'import numpy as np\n'), ((5304, 5322), 'numpy.abs', 'np.abs', (['waypoint_y'], {}), '(waypoint_y)\n', (5310, 5322), True, 'import numpy as np\n'), ((1700, 1719), 'numpy.sum', 'np.sum', (['(temp * temp)'], {}), '(temp * temp)\n', (1706, 1719), True, 'import numpy as np\n'), ((2668, 2692), 'numpy.dot', 'np.dot', (['V', '(start - point)'], {}), '(V, start - point)\n', (2674, 2692), True, 'import numpy as np\n'), ((4077, 4089), 'numpy.dot', 'np.dot', (['V', 'V'], {}), '(V, V)\n', (4083, 4089), True, 'import numpy as np\n'), ((4359, 4380), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (4366, 4380), True, 'import numpy as np\n'), ((4109, 4133), 'numpy.dot', 'np.dot', (['V', '(start - point)'], {}), '(V, start - point)\n', (4115, 4133), True, 'import numpy as np\n'), ((5081, 5099), 'numpy.cos', 'np.cos', (['pose_theta'], {}), '(pose_theta)\n', (5087, 5099), True, 'import numpy as np\n'), ((5101, 5120), 'numpy.sin', 'np.sin', (['(-pose_theta)'], {}), '(-pose_theta)\n', (5107, 5120), True, 'import numpy as np\n'), ((2705, 2725), 'numpy.dot', 'np.dot', (['start', 'start'], {}), '(start, start)\n', (2711, 2725), True, 'import numpy as np\n'), ((2728, 2748), 'numpy.dot', 'np.dot', (['point', 'point'], {}), '(point, point)\n', (2734, 2748), True, 'import numpy as np\n'), ((2754, 2774), 'numpy.dot', 'np.dot', (['start', 'point'], {}), '(start, point)\n', (2760, 2774), True, 'import numpy as np\n'), ((4150, 4170), 'numpy.dot', 'np.dot', (['start', 'start'], {}), '(start, start)\n', (4156, 4170), True, 'import numpy as np\n'), ((4173, 4193), 'numpy.dot', 'np.dot', (['point', 'point'], {}), '(point, point)\n', (4179, 4193), True, 'import numpy as np\n'), ((4199, 4219), 'numpy.dot', 'np.dot', (['start', 'point'], {}), '(start, point)\n', (4205, 4219), True, 'import numpy as np\n')] |
from PySide2 import QtWidgets, QtGui, QtCore
from skinning import gui
from skinning.utils import undo
from skinning.tools.delinear_weights import commands
WINDOW_TITLE = "De-Linearize Weights"
WINDOW_ICON = gui.get_icon_file_path("ST_delinearWeights.png")
class DelinearWeightsWidget(QtWidgets.QWidget):
def __init__(self, parent):
super(DelinearWeightsWidget, self).__init__(parent)
scale_factor = self.logicalDpiX() / 96.0
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(WINDOW_TITLE)
self.setWindowIcon(QtGui.QIcon(WINDOW_ICON))
self.resize(400 * scale_factor, 25 * scale_factor)
# create layout
layout = QtWidgets.QGridLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
layout.setSpacing(5)
# create tweening
delinear_method_label = QtWidgets.QLabel(self)
delinear_method_label.setText("De-linearize method:")
self.delinear_method = gui.widgets.EasingWidget(self)
layout.addWidget(delinear_method_label, 0, 0)
layout.addWidget(self.delinear_method, 0, 1)
# create divider
divider = gui.widgets.DividerWidget(self)
layout.addWidget(divider, 1, 0, 1, 2)
# create button
apply_button = QtWidgets.QPushButton(self)
apply_button.setText("Apply")
apply_button.released.connect(self.apply)
layout.addWidget(apply_button, 2, 0, 1, 2)
@gui.display_error
def apply(self):
with gui.WaitCursor():
with undo.UndoChunk():
method = self.delinear_method.currentText()
commands.delinear_weights_on_selection(method)
def show():
parent = gui.get_main_window()
widget = DelinearWeightsWidget(parent)
widget.show()
| [
"PySide2.QtWidgets.QGridLayout",
"PySide2.QtWidgets.QPushButton",
"skinning.utils.undo.UndoChunk",
"skinning.gui.widgets.EasingWidget",
"PySide2.QtGui.QIcon",
"skinning.gui.get_main_window",
"skinning.tools.delinear_weights.commands.delinear_weights_on_selection",
"PySide2.QtWidgets.QLabel",
"skinning.gui.widgets.DividerWidget",
"skinning.gui.WaitCursor",
"skinning.gui.get_icon_file_path"
] | [((210, 258), 'skinning.gui.get_icon_file_path', 'gui.get_icon_file_path', (['"""ST_delinearWeights.png"""'], {}), "('ST_delinearWeights.png')\n", (232, 258), False, 'from skinning import gui\n'), ((1715, 1736), 'skinning.gui.get_main_window', 'gui.get_main_window', ([], {}), '()\n', (1734, 1736), False, 'from skinning import gui\n'), ((693, 720), 'PySide2.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self'], {}), '(self)\n', (714, 720), False, 'from PySide2 import QtWidgets, QtGui, QtCore\n'), ((855, 877), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self'], {}), '(self)\n', (871, 877), False, 'from PySide2 import QtWidgets, QtGui, QtCore\n'), ((971, 1001), 'skinning.gui.widgets.EasingWidget', 'gui.widgets.EasingWidget', (['self'], {}), '(self)\n', (995, 1001), False, 'from skinning import gui\n'), ((1153, 1184), 'skinning.gui.widgets.DividerWidget', 'gui.widgets.DividerWidget', (['self'], {}), '(self)\n', (1178, 1184), False, 'from skinning import gui\n'), ((1287, 1314), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self'], {}), '(self)\n', (1308, 1314), False, 'from PySide2 import QtWidgets, QtGui, QtCore\n'), ((566, 590), 'PySide2.QtGui.QIcon', 'QtGui.QIcon', (['WINDOW_ICON'], {}), '(WINDOW_ICON)\n', (577, 590), False, 'from PySide2 import QtWidgets, QtGui, QtCore\n'), ((1512, 1528), 'skinning.gui.WaitCursor', 'gui.WaitCursor', ([], {}), '()\n', (1526, 1528), False, 'from skinning import gui\n'), ((1547, 1563), 'skinning.utils.undo.UndoChunk', 'undo.UndoChunk', ([], {}), '()\n', (1561, 1563), False, 'from skinning.utils import undo\n'), ((1641, 1687), 'skinning.tools.delinear_weights.commands.delinear_weights_on_selection', 'commands.delinear_weights_on_selection', (['method'], {}), '(method)\n', (1679, 1687), False, 'from skinning.tools.delinear_weights import commands\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 28 16:58:54 2021
@author: Lenovo
"""
import os
import pickle
path_use = os.getcwd()
path=path_use.replace(os.sep, '/')
path=path + "/" + "dictionary_info.pkl"
sha = pickle.load(open(path, "rb")) | [
"os.getcwd"
] | [((121, 132), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (130, 132), False, 'import os\n')] |
## Author : <NAME>, February 2020
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models import densenet161
import torch
from torchvision.io import read_video,read_video_timestamps
import torchvision.transforms as transforms
import objectdetection
import placesCNN_basic
import torch.nn as nn
from importlib import reload
from tqdm import tqdm
import os
import sys
videofile = sys.argv[1]
srtfile = (videofile[:-3] + 'srt')
if os.path.isfile(srtfile):
os.remove(srtfile)
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# prepare the image transformer for Places Network
centre_crop = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((256,256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
] ### COCO Categories
categories = objectdetection.categories ### ImageNet Categories
places_categories= placesCNN_basic.classes ### Places Categories
### Define and register hook for extracting output feature map
#visualisation = []
#def hook_fn(m, i, o):
# visualisation.append(o)
#model.roi_heads.box_predictor.cls_score.register_forward_hook(hook_fn)
fps = 24
nb_frames = 1
nbsec = 3
n_obj = 3
beg_film = 1
end_film = 600
allpreds = []
onsets = []
model = fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
model_imagenet = densenet161(pretrained=True)
model_imagenet.eval()
model_places = placesCNN_basic.model.eval()
n=0
with torch.no_grad():
for curstart in tqdm(range(beg_film,end_film,nbsec)):
start = curstart
end = start + (nb_frames/fps)
vframes, aframes, info = read_video(filename=videofile,start_pts = start,end_pts=end,pts_unit='sec')
vframes = vframes.permute(0,3,1,2).float() / 255
_,H,C,V = vframes.shape
### make prediction for Places
im_norm = centre_crop(vframes[0]).reshape(1,3,224,224)
preds_places = model_places(im_norm)
### make prediction for Imagenet classification
im_norm = normalize(vframes[0]).reshape(1,H,C,V)
preds_class= model_imagenet(im_norm)
### make predictions for object detection
preds = model(vframes)
### Associate Detection labels to prediction and keep only the first n_obj
predlabels_det = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in preds[0]['labels'].numpy()[:n_obj]]
### Associate Classification labels to prediction
allclasses = preds_class.data.numpy()[0]
# process output of Imagenet Classes and print results:
order = allclasses.argsort()
last = len(categories)-1
text = ''
for i in range(min(3, last+1)):
text += categories[order[last-i]]
text += ', '
text=text[:-2]
# process output of Places Classes and print results:
_, idx = preds_places[0].sort(0, True)
textplaces = ''
# output the prediction
for i in range(0, 5):
textplaces += places_categories[idx[i]]
textplaces += ', '
textplaces = textplaces[:-2]
### Generate final string
annotation_str = "PLACES: {places} \nCOCO : {coco} \nImageNet : {net}".format(places=textplaces,coco=str(predlabels_det),net=text)
#print(annotation_str)
### Append to srt file with timecode
objectdetection.gen_srt(annotation_str,start,srtfile=srtfile,num=n)
n=n+1 | [
"torchvision.transforms.CenterCrop",
"placesCNN_basic.model.eval",
"torchvision.transforms.ToPILImage",
"torchvision.models.densenet161",
"os.path.isfile",
"torchvision.io.read_video",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"torchvision.transforms.Normalize",
"objectdetection.gen_srt",
"torchvision.transforms.Resize",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"os.remove"
] | [((464, 487), 'os.path.isfile', 'os.path.isfile', (['srtfile'], {}), '(srtfile)\n', (478, 487), False, 'import os\n'), ((525, 591), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (545, 591), True, 'import torchvision.transforms as transforms\n'), ((2345, 2385), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2368, 2385), False, 'from torchvision.models.detection import fasterrcnn_resnet50_fpn\n'), ((2417, 2445), 'torchvision.models.densenet161', 'densenet161', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2428, 2445), False, 'from torchvision.models import densenet161\n'), ((2484, 2512), 'placesCNN_basic.model.eval', 'placesCNN_basic.model.eval', ([], {}), '()\n', (2510, 2512), False, 'import placesCNN_basic\n'), ((493, 511), 'os.remove', 'os.remove', (['srtfile'], {}), '(srtfile)\n', (502, 511), False, 'import os\n'), ((2525, 2540), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2538, 2540), False, 'import torch\n'), ((687, 710), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (708, 710), True, 'import torchvision.transforms as transforms\n'), ((720, 749), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (737, 749), True, 'import torchvision.transforms as transforms\n'), ((758, 784), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (779, 784), True, 'import torchvision.transforms as transforms\n'), ((794, 815), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (813, 815), True, 'import torchvision.transforms as transforms\n'), ((2702, 2778), 'torchvision.io.read_video', 'read_video', ([], {'filename': 'videofile', 'start_pts': 'start', 'end_pts': 'end', 'pts_unit': '"""sec"""'}), "(filename=videofile, start_pts=start, end_pts=end, pts_unit='sec')\n", (2712, 2778), False, 'from torchvision.io import read_video, read_video_timestamps\n'), ((4439, 4509), 'objectdetection.gen_srt', 'objectdetection.gen_srt', (['annotation_str', 'start'], {'srtfile': 'srtfile', 'num': 'n'}), '(annotation_str, start, srtfile=srtfile, num=n)\n', (4462, 4509), False, 'import objectdetection\n')] |
import os
import logging
def exitwitherror(msg, code, program=''):
logger = logging.getLogger()
logger.error(msg)
with open('_FAILED.txt', 'a') as f:
f.write(program + ': ' + msg + '\n') # python will convert \n to os.linesep
print(program + ': ' + msg)
exit(code)
def setuplogger(log='log'):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(os.getcwd(), "log", log + '.log'), 'w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s: %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | [
"logging.getLogger",
"logging.Formatter",
"os.getcwd"
] | [((87, 106), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (104, 106), False, 'import logging\n'), ((353, 372), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (370, 372), False, 'import logging\n'), ((541, 602), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: %(levelname)s - %(message)s"""'], {}), "('%(asctime)s: %(levelname)s - %(message)s')\n", (558, 602), False, 'import logging\n'), ((452, 463), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (461, 463), False, 'import os\n')] |
from cms.plugin_pool import plugin_pool
from django.utils.translation import gettext_lazy as _
from ... import settings
from ...cms_plugins import CMSUIPlugin
from ...common.attributes import AttributesMixin
from ...common.background import BackgroundMixin
from ...common.responsive import ResponsiveMixin
from ...common.spacing import MarginMixin, PaddingMixin
from ...helpers import add_plugin
from .. import card
from . import forms, models
mixin_factory = settings.get_renderer(card)
@plugin_pool.register_plugin
class CardLayoutPlugin(mixin_factory("CardLayout"), AttributesMixin, CMSUIPlugin):
"""
Components > "Card" Plugin
https://getbootstrap.com/docs/5.0/components/card/
"""
name = _("Card layout")
module = _("Frontend")
model = models.CardLayout
form = forms.CardLayoutForm
change_form_template = "djangocms_frontend/admin/card_layout.html"
allow_children = True
child_classes = [
"CardPlugin",
]
fieldsets = [
(
None,
{
"fields": (
(
"card_type",
"create",
),
)
},
),
(
_("Responsive settings"),
{
"fields": ([f"row_cols_{size}" for size in settings.DEVICE_SIZES],),
},
),
]
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
data = form.cleaned_data
for pos in range(data["create"] if data["create"] is not None else 0):
add_plugin(
obj.placeholder,
models.Card(
parent=obj,
placeholder=obj.placeholder,
position=obj.position + 1 + pos,
language=obj.language,
plugin_type=CardPlugin.__name__,
ui_item=models.Card.__class__.__name__,
).initialize_from_form(forms.CardForm),
)
@plugin_pool.register_plugin
class CardPlugin(
mixin_factory("Card"),
AttributesMixin,
ResponsiveMixin,
MarginMixin,
BackgroundMixin,
CMSUIPlugin,
):
"""
Components > "Card" Plugin
https://getbootstrap.com/docs/5.0/components/card/
"""
name = _("Card")
module = _("Frontend")
model = models.Card
form = forms.CardForm
change_form_template = "djangocms_frontend/admin/card.html"
allow_children = True
child_classes = [
"CardInnerPlugin",
"ListGroupPlugin",
"ImagePlugin",
"GridRowPlugin",
]
fieldsets = [
(
None,
{
"fields": (
"card_alignment",
(
"card_text_color",
"card_outline",
),
"card_full_height",
)
},
),
]
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if not change:
add_plugin(
obj.placeholder,
models.CardInner(
parent=obj,
position=obj.position + 1,
placeholder=obj.placeholder,
language=obj.language,
plugin_type=CardInnerPlugin.__name__,
ui_item=models.CardInner.__class__.__name__,
config=dict(inner_type="card-body"),
),
)
@plugin_pool.register_plugin
class CardInnerPlugin(
mixin_factory("CardInner"),
AttributesMixin,
ResponsiveMixin,
PaddingMixin,
BackgroundMixin,
CMSUIPlugin,
):
"""
Components > "Card - Inner" Plugin (Header, Footer, Body)
https://getbootstrap.com/docs/5.0/components/card/
"""
name = _("Card inner")
module = _("Frontend")
model = models.CardInner
form = forms.CardInnerForm
change_form_template = "djangocms_frontend/admin/card.html"
allow_children = True
parent_classes = [
"CardPlugin",
"CollapseTriggerPlugin",
"CollapseContainerPlugin",
"GridColumnPlugin",
]
fieldsets = [
(
None,
{
"fields": (
"inner_type",
"text_alignment",
)
},
),
]
| [
"django.utils.translation.gettext_lazy"
] | [((718, 734), 'django.utils.translation.gettext_lazy', '_', (['"""Card layout"""'], {}), "('Card layout')\n", (719, 734), True, 'from django.utils.translation import gettext_lazy as _\n'), ((748, 761), 'django.utils.translation.gettext_lazy', '_', (['"""Frontend"""'], {}), "('Frontend')\n", (749, 761), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2357, 2366), 'django.utils.translation.gettext_lazy', '_', (['"""Card"""'], {}), "('Card')\n", (2358, 2366), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2380, 2393), 'django.utils.translation.gettext_lazy', '_', (['"""Frontend"""'], {}), "('Frontend')\n", (2381, 2393), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3949, 3964), 'django.utils.translation.gettext_lazy', '_', (['"""Card inner"""'], {}), "('Card inner')\n", (3950, 3964), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3978, 3991), 'django.utils.translation.gettext_lazy', '_', (['"""Frontend"""'], {}), "('Frontend')\n", (3979, 3991), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1242, 1266), 'django.utils.translation.gettext_lazy', '_', (['"""Responsive settings"""'], {}), "('Responsive settings')\n", (1243, 1266), True, 'from django.utils.translation import gettext_lazy as _\n')] |
# -*- coding: utf-8 -*-
import os
# import click
# import logging
# from dotenv import find_dotenv, load_dotenv
import numpy as np
import pandas as pd
import sklearn.preprocessing
def create_dummies(df, column):
ndf = pd.get_dummies(df[column], prefix=column, dummy_na=True)
df = df.drop(column, axis=1)
df = df.join(ndf)
return df
# @click.command()
# @click.argument('input_filepath', type=click.Path(exists=True))
# @click.argument('output_filepath', type=click.Path())
def main(input_filepath=0, output_filepath=0):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
# logger = logging.getLogger(__name__)
# logger.info('making final data set from raw data')
train = load_df('train.csv')
test = load_df('test.csv')
train_test = pd.concat([train, test], axis=0)
train_test.reset_index(drop=True, inplace=True)
print('\noriginal datasets:')
print('train', train.shape)
print('test', test.shape)
print('train_test', train_test.shape)
train_test = create_dummies(train_test, 'MSSubClass')
for column in train_test.select_dtypes(exclude=[np.number]).columns:
train_test = create_dummies(train_test, column)
train_test['2ndFlrSF'].fillna(0, inplace=True)
imp = sklearn.preprocessing.Imputer()
train_test = pd.DataFrame(imp.fit_transform(train_test), columns=train_test.columns)
train_length = train.index.shape[0]
train = train_test.iloc[:train_length].copy()
test = train_test.iloc[train_length:].copy()
test.drop(columns=['SalePrice'], inplace=True)
print('\nprocessed datasets:')
print('train', train.shape)
print('test', test.shape)
print('train_test', train_test.shape)
save_df(train, 'train.hdf5')
save_df(test, 'test.hdf5')
def load_df(filename):
"""
Loads train_df
:return: Train DataFrame
:rtype: pandas DataFrame
"""
final_path = os.path.join(get_data_path(), 'raw/' + filename)
return pd.read_csv(final_path)
def save_df(df, filename):
"""
Saves DataFrame in hdf5 with name 'train.hdf5'
:param df: DataFrame to be Saved
"""
final_path = os.path.join(get_data_path(), 'processed')
if not os.path.exists(final_path):
os.mkdir(final_path)
final_path = os.path.join(final_path, filename)
df.to_hdf(final_path, 'processed_data')
def get_data_path():
# Get current absolute path
absolute_path = os.path.abspath(__file__)
# Get parent path
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(absolute_path)))
# Get final path (absolute path for '../../data/raw/'
final_path = os.path.join(project_root, 'data/')
return final_path
if __name__ == '__main__':
# log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
# project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
# load_dotenv(find_dotenv())
main()
| [
"os.path.exists",
"pandas.read_csv",
"os.path.join",
"os.path.abspath",
"os.path.dirname",
"os.mkdir",
"pandas.get_dummies",
"pandas.concat"
] | [((224, 280), 'pandas.get_dummies', 'pd.get_dummies', (['df[column]'], {'prefix': 'column', 'dummy_na': '(True)'}), '(df[column], prefix=column, dummy_na=True)\n', (238, 280), True, 'import pandas as pd\n'), ((870, 902), 'pandas.concat', 'pd.concat', (['[train, test]'], {'axis': '(0)'}), '([train, test], axis=0)\n', (879, 902), True, 'import pandas as pd\n'), ((2055, 2078), 'pandas.read_csv', 'pd.read_csv', (['final_path'], {}), '(final_path)\n', (2066, 2078), True, 'import pandas as pd\n'), ((2359, 2393), 'os.path.join', 'os.path.join', (['final_path', 'filename'], {}), '(final_path, filename)\n', (2371, 2393), False, 'import os\n'), ((2514, 2539), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2529, 2539), False, 'import os\n'), ((2732, 2767), 'os.path.join', 'os.path.join', (['project_root', '"""data/"""'], {}), "(project_root, 'data/')\n", (2744, 2767), False, 'import os\n'), ((2284, 2310), 'os.path.exists', 'os.path.exists', (['final_path'], {}), '(final_path)\n', (2298, 2310), False, 'import os\n'), ((2320, 2340), 'os.mkdir', 'os.mkdir', (['final_path'], {}), '(final_path)\n', (2328, 2340), False, 'import os\n'), ((2623, 2653), 'os.path.dirname', 'os.path.dirname', (['absolute_path'], {}), '(absolute_path)\n', (2638, 2653), False, 'import os\n')] |
# This is a helper script written by <NAME> and used by <NAME> for plotting the vehicle geometry.
# Original script is from: https://github.com/nkapania/Wolverine/blob/master/utils/sim_lib.py
import numpy as np
def plotVehicle(posE, posN, psi, delta, a, b, d, rW):
# VG EDIT: use psi (yaw angle) to be compatible with this code.
#returns position of vehicle frame given coordinates of vehicle cg, steer angle, and dimensions a, b, d, and rW
FrontAxle_Center_x = posE + a*np.cos(psi);
FrontAxle_Center_y = posN + a*np.sin(psi);
RearAxle_Center_x = posE - b*np.cos(psi);
RearAxle_Center_y = posN - b*np.sin(psi);
FrontAxle_Right_x = FrontAxle_Center_x + (d/2)*np.sin(psi);
FrontAxle_Right_y = FrontAxle_Center_y - (d/2)*np.cos(psi);
FrontAxle_Left_x = FrontAxle_Center_x - (d/2)*np.sin(psi);
FrontAxle_Left_y = FrontAxle_Center_y + (d/2)*np.cos(psi);
RearAxle_Right_x = RearAxle_Center_x + (d/2)*np.sin(psi);
RearAxle_Right_y = RearAxle_Center_y - (d/2)*np.cos(psi);
RearAxle_Left_x = RearAxle_Center_x - (d/2)*np.sin(psi);
RearAxle_Left_y = RearAxle_Center_y + (d/2)*np.cos(psi);
RightFrontTire_Front_x = FrontAxle_Right_x + rW*np.cos(psi+delta);
RightFrontTire_Front_y = FrontAxle_Right_y + rW*np.sin(psi+delta);
RightFrontTire_Back_x = FrontAxle_Right_x - rW*np.cos(psi+delta);
RightFrontTire_Back_y = FrontAxle_Right_y - rW*np.sin(psi+delta);
RightRearTire_Front_x = RearAxle_Right_x + rW*np.cos(psi);
RightRearTire_Front_y = RearAxle_Right_y + rW*np.sin(psi);
RightRearTire_Back_x = RearAxle_Right_x - rW*np.cos(psi);
RightRearTire_Back_y = RearAxle_Right_y - rW*np.sin(psi);
LeftFrontTire_Front_x = FrontAxle_Left_x + rW*np.cos(psi+delta);
LeftFrontTire_Front_y = FrontAxle_Left_y + rW*np.sin(psi+delta);
LeftFrontTire_Back_x = FrontAxle_Left_x - rW*np.cos(psi+delta);
LeftFrontTire_Back_y = FrontAxle_Left_y - rW*np.sin(psi+delta);
LeftRearTire_Front_x = RearAxle_Left_x + rW*np.cos(psi);
LeftRearTire_Front_y = RearAxle_Left_y + rW*np.sin(psi);
LeftRearTire_Back_x = RearAxle_Left_x - rW*np.cos(psi);
LeftRearTire_Back_y = RearAxle_Left_y - rW*np.sin(psi);
FrontBody = np.array([[posE, FrontAxle_Center_x], [posN, FrontAxle_Center_y]]).squeeze()
RearBody = np.array([[posE, RearAxle_Center_x] , [posN, RearAxle_Center_y]]).squeeze()
FrontAxle = np.array([[FrontAxle_Left_x, FrontAxle_Right_x], [FrontAxle_Left_y, FrontAxle_Right_y]]).squeeze()
RearAxle = np.array([[RearAxle_Left_x, RearAxle_Right_x], [RearAxle_Left_y, RearAxle_Right_y]]).squeeze()
RightFrontTire = np.array([[RightFrontTire_Front_x, RightFrontTire_Back_x], [RightFrontTire_Front_y, RightFrontTire_Back_y]]).squeeze()
RightRearTire = np.array([[RightRearTire_Front_x, RightRearTire_Back_x], [RightRearTire_Front_y, RightRearTire_Back_y]]).squeeze()
LeftFrontTire = np.array([[LeftFrontTire_Front_x, LeftFrontTire_Back_x], [LeftFrontTire_Front_y, LeftFrontTire_Back_y]]).squeeze()
LeftRearTire = np.array([[LeftRearTire_Front_x, LeftRearTire_Back_x], [LeftRearTire_Front_y, LeftRearTire_Back_y]]).squeeze()
return FrontBody, RearBody, FrontAxle, RearAxle, RightFrontTire, RightRearTire, LeftFrontTire, LeftRearTire
| [
"numpy.sin",
"numpy.array",
"numpy.cos"
] | [((478, 489), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (484, 489), True, 'import numpy as np\n'), ((522, 533), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (528, 533), True, 'import numpy as np\n'), ((566, 577), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (572, 577), True, 'import numpy as np\n'), ((609, 620), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (615, 620), True, 'import numpy as np\n'), ((671, 682), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (677, 682), True, 'import numpy as np\n'), ((732, 743), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (738, 743), True, 'import numpy as np\n'), ((793, 804), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (799, 804), True, 'import numpy as np\n'), ((853, 864), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (859, 864), True, 'import numpy as np\n'), ((913, 924), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (919, 924), True, 'import numpy as np\n'), ((972, 983), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (978, 983), True, 'import numpy as np\n'), ((1031, 1042), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (1037, 1042), True, 'import numpy as np\n'), ((1089, 1100), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (1095, 1100), True, 'import numpy as np\n'), ((1152, 1171), 'numpy.cos', 'np.cos', (['(psi + delta)'], {}), '(psi + delta)\n', (1158, 1171), True, 'import numpy as np\n'), ((1220, 1239), 'numpy.sin', 'np.sin', (['(psi + delta)'], {}), '(psi + delta)\n', (1226, 1239), True, 'import numpy as np\n'), ((1288, 1307), 'numpy.cos', 'np.cos', (['(psi + delta)'], {}), '(psi + delta)\n', (1294, 1307), True, 'import numpy as np\n'), ((1355, 1374), 'numpy.sin', 'np.sin', (['(psi + delta)'], {}), '(psi + delta)\n', (1361, 1374), True, 'import numpy as np\n'), ((1422, 1433), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (1428, 1433), True, 'import numpy as np\n'), ((1482, 1493), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (1488, 1493), True, 'import numpy as np\n'), ((1542, 1553), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (1548, 1553), True, 'import numpy as np\n'), ((1601, 1612), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (1607, 1612), True, 'import numpy as np\n'), ((1662, 1681), 'numpy.cos', 'np.cos', (['(psi + delta)'], {}), '(psi + delta)\n', (1668, 1681), True, 'import numpy as np\n'), ((1728, 1747), 'numpy.sin', 'np.sin', (['(psi + delta)'], {}), '(psi + delta)\n', (1734, 1747), True, 'import numpy as np\n'), ((1794, 1813), 'numpy.cos', 'np.cos', (['(psi + delta)'], {}), '(psi + delta)\n', (1800, 1813), True, 'import numpy as np\n'), ((1859, 1878), 'numpy.sin', 'np.sin', (['(psi + delta)'], {}), '(psi + delta)\n', (1865, 1878), True, 'import numpy as np\n'), ((1924, 1935), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (1930, 1935), True, 'import numpy as np\n'), ((1982, 1993), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (1988, 1993), True, 'import numpy as np\n'), ((2040, 2051), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (2046, 2051), True, 'import numpy as np\n'), ((2097, 2108), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (2103, 2108), True, 'import numpy as np\n'), ((2126, 2192), 'numpy.array', 'np.array', (['[[posE, FrontAxle_Center_x], [posN, FrontAxle_Center_y]]'], {}), '([[posE, FrontAxle_Center_x], [posN, FrontAxle_Center_y]])\n', (2134, 2192), True, 'import numpy as np\n'), ((2217, 2281), 'numpy.array', 'np.array', (['[[posE, RearAxle_Center_x], [posN, RearAxle_Center_y]]'], {}), '([[posE, RearAxle_Center_x], [posN, RearAxle_Center_y]])\n', (2225, 2281), True, 'import numpy as np\n'), ((2307, 2399), 'numpy.array', 'np.array', (['[[FrontAxle_Left_x, FrontAxle_Right_x], [FrontAxle_Left_y, FrontAxle_Right_y]]'], {}), '([[FrontAxle_Left_x, FrontAxle_Right_x], [FrontAxle_Left_y,\n FrontAxle_Right_y]])\n', (2315, 2399), True, 'import numpy as np\n'), ((2420, 2508), 'numpy.array', 'np.array', (['[[RearAxle_Left_x, RearAxle_Right_x], [RearAxle_Left_y, RearAxle_Right_y]]'], {}), '([[RearAxle_Left_x, RearAxle_Right_x], [RearAxle_Left_y,\n RearAxle_Right_y]])\n', (2428, 2508), True, 'import numpy as np\n'), ((2533, 2646), 'numpy.array', 'np.array', (['[[RightFrontTire_Front_x, RightFrontTire_Back_x], [RightFrontTire_Front_y,\n RightFrontTire_Back_y]]'], {}), '([[RightFrontTire_Front_x, RightFrontTire_Back_x], [\n RightFrontTire_Front_y, RightFrontTire_Back_y]])\n', (2541, 2646), True, 'import numpy as np\n'), ((2671, 2780), 'numpy.array', 'np.array', (['[[RightRearTire_Front_x, RightRearTire_Back_x], [RightRearTire_Front_y,\n RightRearTire_Back_y]]'], {}), '([[RightRearTire_Front_x, RightRearTire_Back_x], [\n RightRearTire_Front_y, RightRearTire_Back_y]])\n', (2679, 2780), True, 'import numpy as np\n'), ((2807, 2916), 'numpy.array', 'np.array', (['[[LeftFrontTire_Front_x, LeftFrontTire_Back_x], [LeftFrontTire_Front_y,\n LeftFrontTire_Back_y]]'], {}), '([[LeftFrontTire_Front_x, LeftFrontTire_Back_x], [\n LeftFrontTire_Front_y, LeftFrontTire_Back_y]])\n', (2815, 2916), True, 'import numpy as np\n'), ((2943, 3048), 'numpy.array', 'np.array', (['[[LeftRearTire_Front_x, LeftRearTire_Back_x], [LeftRearTire_Front_y,\n LeftRearTire_Back_y]]'], {}), '([[LeftRearTire_Front_x, LeftRearTire_Back_x], [\n LeftRearTire_Front_y, LeftRearTire_Back_y]])\n', (2951, 3048), True, 'import numpy as np\n')] |
import types
import functools
from torch import nn
from timm.models.efficientnet_builder import *
from timm.models.efficientnet import EfficientNetFeatures, _cfg
EDITIONS = {
'a' : (0.86, 1.0, 1.2),
'b' : (0.84, 0.75, 1.1),
'c' : (0.825, 0.54, 0.85),
'd' : (0.68, 0.54, 0.695),
'e' : (0.475, 0.51, 0.6)
}
def feature_strides():
return [8, 16, 32]
def feature_channels(self):
return self.feature_info.channels()
def TinyNet(edition, **kwargs):
kwargs.pop('pretrained')
r = kwargs.pop('r', 1)
w = kwargs.pop('w', 1)
d = kwargs.pop('d', 1)
if edition is not None:
r, w, d = EDITIONS.get(edition.casefold(), (1., 1., 1.))
"""Creates a TinyNet model.
"""
arch_def = [
['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'],
['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'],
['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'],
['ir_r1_k3_s1_e6_c320_se0.25'],
]
stem_size = 32
round_chs_fn = functools.partial(round_channels, stem_size, w)
norm_layer = functools.partial(nn.BatchNorm2d, **resolve_bn_args(kwargs))
model_kwargs = dict(
block_args=decode_arch_def(arch_def, d, depth_trunc="round"),
stem_size=stem_size,
fix_stem=True,
act_layer=nn.ReLU,
norm_layer=norm_layer,
out_indices=(2, 3, 4),
feature_location="bottleneck",
round_chs_fn=round_chs_fn,
**kwargs
)
m = EfficientNetFeatures(**model_kwargs)
hw = int(224 * r)
m.default_cfg =_cfg(input_size=(3, hw, hw))
m.feature_strides = feature_strides
m.feature_channels = types.MethodType(feature_channels, m)
return m
| [
"timm.models.efficientnet._cfg",
"types.MethodType",
"functools.partial",
"timm.models.efficientnet.EfficientNetFeatures"
] | [((957, 1004), 'functools.partial', 'functools.partial', (['round_channels', 'stem_size', 'w'], {}), '(round_channels, stem_size, w)\n', (974, 1004), False, 'import functools\n'), ((1360, 1396), 'timm.models.efficientnet.EfficientNetFeatures', 'EfficientNetFeatures', ([], {}), '(**model_kwargs)\n', (1380, 1396), False, 'from timm.models.efficientnet import EfficientNetFeatures, _cfg\n'), ((1433, 1461), 'timm.models.efficientnet._cfg', '_cfg', ([], {'input_size': '(3, hw, hw)'}), '(input_size=(3, hw, hw))\n', (1437, 1461), False, 'from timm.models.efficientnet import EfficientNetFeatures, _cfg\n'), ((1522, 1559), 'types.MethodType', 'types.MethodType', (['feature_channels', 'm'], {}), '(feature_channels, m)\n', (1538, 1559), False, 'import types\n')] |
from django.contrib import admin
from fileslibrary.models import Topic, File
class FileInline(admin.TabularInline):
model = File
extra = 1
class TopicAdmin (admin.ModelAdmin):
inlines = [FileInline]
admin.site.register(Topic, TopicAdmin) | [
"django.contrib.admin.site.register"
] | [((207, 245), 'django.contrib.admin.site.register', 'admin.site.register', (['Topic', 'TopicAdmin'], {}), '(Topic, TopicAdmin)\n', (226, 245), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python2
"""
Example showing how to download a runbook from an automation account so you can call it from a parent python script
You could publish the below code as a new python runbook (hello_world) and then call it with this sample.
#!/usr/bin/env python2
def hello(name):
print name
"""
def download_file(resource_group, automation_account, runbook_name, runbook_type):
"""
Downloads a runbook from the automation account to the cloud container
"""
import os
import sys
import requests
import automationassets
# Return token based on Azure automation Runas connection
def get_automation_runas_token(runas_connection):
""" Returs a token that can be used to authenticate against Azure resources """
from OpenSSL import crypto
import adal
# Get the Azure Automation RunAs service principal certificate
cert = automationassets.get_automation_certificate("AzureRunAsCertificate")
sp_cert = crypto.load_pkcs12(cert)
pem_pkey = crypto.dump_privatekey(crypto.FILETYPE_PEM, sp_cert.get_privatekey())
# Get run as connection information for the Azure Automation service principal
application_id = runas_connection["ApplicationId"]
thumbprint = runas_connection["CertificateThumbprint"]
tenant_id = runas_connection["TenantId"]
# Authenticate with service principal certificate
resource = "https://management.core.windows.net/"
authority_url = ("https://login.microsoftonline.com/" + tenant_id)
context = adal.AuthenticationContext(authority_url)
azure_credential = context.acquire_token_with_client_certificate(
resource,
application_id,
pem_pkey,
thumbprint)
# Return the token
return azure_credential.get('accessToken')
# Authenticate to Azure using the Azure Automation RunAs service principal
automation_runas_connection = automationassets.get_automation_connection("AzureRunAsConnection")
access_token = get_automation_runas_token(automation_runas_connection)
# Set what resources to act against
subscription_id = str(automation_runas_connection["SubscriptionId"])
# Set up URI to create a new automation job
uri = ("https://management.azure.com/subscriptions/" + subscription_id
+ "/resourceGroups/" + resource_group
+ "/providers/Microsoft.Automation/automationAccounts/" + automation_account
+ "/runbooks/" + runbook_name + "/content?api-version=2015-10-31")
# Make request to create new automation job
headers = {"Authorization": 'Bearer ' + access_token}
result = requests.get(uri, headers=headers)
runbookfile = os.path.join(sys.path[0], runbook_name) + runbook_type
with open(runbookfile, "w") as text_file:
text_file.write(result.text)
# Specify the runbook to download
_AUTOMATION_RESOURCE_GROUP = "contoso"
_AUTOMATION_ACCOUNT = "contosodev"
_RUNBOOK_NAME = "hello_world"
_RUNBOOK_TYPE = ".py"
download_file(_AUTOMATION_RESOURCE_GROUP, _AUTOMATION_ACCOUNT, _RUNBOOK_NAME, _RUNBOOK_TYPE)
# Import child runbook and call some function
import child_runbook
child_runbook.hello("world")
| [
"child_runbook.hello",
"automationassets.get_automation_certificate",
"adal.AuthenticationContext",
"automationassets.get_automation_connection",
"os.path.join",
"requests.get",
"OpenSSL.crypto.load_pkcs12"
] | [((3218, 3246), 'child_runbook.hello', 'child_runbook.hello', (['"""world"""'], {}), "('world')\n", (3237, 3246), False, 'import child_runbook\n'), ((1986, 2052), 'automationassets.get_automation_connection', 'automationassets.get_automation_connection', (['"""AzureRunAsConnection"""'], {}), "('AzureRunAsConnection')\n", (2028, 2052), False, 'import automationassets\n'), ((2702, 2736), 'requests.get', 'requests.get', (['uri'], {'headers': 'headers'}), '(uri, headers=headers)\n', (2714, 2736), False, 'import requests\n'), ((911, 979), 'automationassets.get_automation_certificate', 'automationassets.get_automation_certificate', (['"""AzureRunAsCertificate"""'], {}), "('AzureRunAsCertificate')\n", (954, 979), False, 'import automationassets\n'), ((998, 1022), 'OpenSSL.crypto.load_pkcs12', 'crypto.load_pkcs12', (['cert'], {}), '(cert)\n', (1016, 1022), False, 'from OpenSSL import crypto\n'), ((1581, 1622), 'adal.AuthenticationContext', 'adal.AuthenticationContext', (['authority_url'], {}), '(authority_url)\n', (1607, 1622), False, 'import adal\n'), ((2756, 2795), 'os.path.join', 'os.path.join', (['sys.path[0]', 'runbook_name'], {}), '(sys.path[0], runbook_name)\n', (2768, 2795), False, 'import os\n')] |
import re
def snake_case(string: str) -> str:
return re.sub('(?!^)([A-Z]+)', r'_\1', string).lower()
| [
"re.sub"
] | [((59, 98), 're.sub', 're.sub', (['"""(?!^)([A-Z]+)"""', '"""_\\\\1"""', 'string'], {}), "('(?!^)([A-Z]+)', '_\\\\1', string)\n", (65, 98), False, 'import re\n')] |
from lib.stats.axis_intervals.numba_backend import _axis_intervals as axis_intervals
import numpy as np
import math
import pytest
def interval_union(start, stop):
"""Compute non-unique union of many intervals"""
return [
i
for rng in zip(start, stop)
for i in range(*rng)
]
def assert_invariants(ais, cis, n):
"""Conditions that should always be true regardless of paramaterization"""
assert len(ais) == n
assert len(cis) <= len(ais)
# The number of interval members in each interval definition should be the same
assert ais['count'].sum() == cis['count'].sum()
# All ranges should be exclusive on the right and non-empty
assert (cis['max_index'] > cis['min_index']).all()
assert (cis['max_stop'] > cis['min_start']).all()
assert (ais['stop'] > ais['start']).all()
assert (ais['stop'] - ais['start'] == ais['count']).all()
# All indexes along axis must be within bounds
def assert_ibi(df, c, n):
assert df[c].between(0, n).all()
assert_ibi(ais, 'start', n - 1)
assert_ibi(cis, 'min_index', n - 1)
assert_ibi(cis, 'min_start', n - 1)
assert_ibi(ais, 'stop', n)
assert_ibi(cis, 'max_index', n)
assert_ibi(cis, 'max_stop', n)
# The range of axis elements (usually rows) in both partitionings
# should have no intersection and be exhaustive
assert ais['index'].tolist() == list(range(n))
assert interval_union(cis['min_index'], cis['max_index']) == list(range(n))
def ais_df(n, *args, **kwargs):
ais, cis = axis_intervals(*args, n=n, **kwargs)
ais, cis = ais.to_dataset('var').to_dataframe(), cis.to_dataset('var').to_dataframe()
assert_invariants(ais, cis, n)
return ais, cis
@pytest.mark.parametrize("n", [5, 10])
@pytest.mark.parametrize("target_chunk_size", [None, 5, 1])
def test_no_window(n, target_chunk_size):
# With no window, each interval should only include a single element (typically row)
ais, cis = ais_df(n=n, window=0, step=None, target_chunk_size=target_chunk_size)
assert (ais['count'] == 1).all()
assert (ais['stop'] - ais['start'] == 1).all()
assert len(cis) == n / (target_chunk_size or n)
@pytest.mark.parametrize("n", [5, 10])
@pytest.mark.parametrize("target_chunk_size", [None, 5, 1])
def test_unit_window(n, target_chunk_size):
# With window of 1, each interval should contain one element and its neighbor (to the right)
ais, cis = ais_df(n=n, window=1, step=None, target_chunk_size=target_chunk_size)
# The number of element in each interval should be two except for the last element
assert (ais['count'].iloc[:-1] == 2).all()
assert ais['count'].iloc[-1] == 1
assert (ais['stop'] - ais['start'] <= 2).all()
# Each chunk should include `target_chunk_size` - 1 elements
# because inclusion of single neighbors ends each chunk one element earlier
if target_chunk_size is None:
assert len(cis) == 1
elif target_chunk_size == 1:
assert len(cis) == n
else:
assert len(cis) == math.ceil(n / (target_chunk_size - 1))
@pytest.mark.parametrize("target_chunk_size", [None, 5, 1])
def test_window4_step2(target_chunk_size):
n = 10
ais, _ = ais_df(n=n, window=4, step=2)
# Manually curated example validating the axis intervals
# (correctness of chunk intervals implied largely by equal sums of these counts)
assert ais['count'].tolist() == [5, 4, 5, 4, 5, 4, 4, 3, 2, 1]
@pytest.mark.parametrize("step,window", [(0, 1), (-1, 0), (-1, -1), (1, 0), (1, -1), (3, 2)])
def test_raise_on_bad_step_or_window(step, window):
with pytest.raises(ValueError):
axis_intervals(n=10, step=step, window=window)
@pytest.mark.parametrize("target_chunk_size", [None, 3, 1])
def test_window_by_position(target_chunk_size):
n = 6
# Separate windows reachable from first, next three, and last two
positions = np.array([1, 5, 6, 7, 11, 12])
ais, _ = ais_df(n=n, window=3, step=None, positions=positions, target_chunk_size=target_chunk_size)
assert ais['count'].tolist() == [1, 3, 2, 1, 2, 1]
@pytest.mark.parametrize("target_chunk_size", [None, 3, 1])
def test_window_by_position_with_groups(target_chunk_size):
n = 6
# 1st is on its own, 2nd-4th are within window but broken by group, last two are together
# Note that position decreses at group break
positions = np.array([1, 5, 6, 4, 8, 9])
groups = np.array([1, 1, 1, 2, 2, 2])
ais, _ = ais_df(n=n, window=3, step=None, positions=positions, groups=groups, target_chunk_size=target_chunk_size)
assert ais['count'].tolist() == [1, 2, 1, 1, 2, 1]
def test_raise_on_non_monotonic_positions():
with pytest.raises(ValueError):
positions = np.array([1, 2, 3, 1, 2, 3])
axis_intervals(window=1, positions=positions)
with pytest.raises(ValueError):
positions = np.array([3, 2, 1, 3, 2, 1])
groups = np.array([1, 1, 1, 2, 2, 2])
axis_intervals(window=1, positions=positions, groups=groups) | [
"math.ceil",
"pytest.mark.parametrize",
"numpy.array",
"pytest.raises",
"lib.stats.axis_intervals.numba_backend._axis_intervals"
] | [((1734, 1771), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[5, 10]'], {}), "('n', [5, 10])\n", (1757, 1771), False, 'import pytest\n'), ((1773, 1831), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_chunk_size"""', '[None, 5, 1]'], {}), "('target_chunk_size', [None, 5, 1])\n", (1796, 1831), False, 'import pytest\n'), ((2191, 2228), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[5, 10]'], {}), "('n', [5, 10])\n", (2214, 2228), False, 'import pytest\n'), ((2230, 2288), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_chunk_size"""', '[None, 5, 1]'], {}), "('target_chunk_size', [None, 5, 1])\n", (2253, 2288), False, 'import pytest\n'), ((3088, 3146), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_chunk_size"""', '[None, 5, 1]'], {}), "('target_chunk_size', [None, 5, 1])\n", (3111, 3146), False, 'import pytest\n'), ((3462, 3558), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""step,window"""', '[(0, 1), (-1, 0), (-1, -1), (1, 0), (1, -1), (3, 2)]'], {}), "('step,window', [(0, 1), (-1, 0), (-1, -1), (1, 0),\n (1, -1), (3, 2)])\n", (3485, 3558), False, 'import pytest\n'), ((3701, 3759), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_chunk_size"""', '[None, 3, 1]'], {}), "('target_chunk_size', [None, 3, 1])\n", (3724, 3759), False, 'import pytest\n'), ((4097, 4155), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_chunk_size"""', '[None, 3, 1]'], {}), "('target_chunk_size', [None, 3, 1])\n", (4120, 4155), False, 'import pytest\n'), ((1548, 1584), 'lib.stats.axis_intervals.numba_backend._axis_intervals', 'axis_intervals', (['*args'], {'n': 'n'}), '(*args, n=n, **kwargs)\n', (1562, 1584), True, 'from lib.stats.axis_intervals.numba_backend import _axis_intervals as axis_intervals\n'), ((3904, 3934), 'numpy.array', 'np.array', (['[1, 5, 6, 7, 11, 12]'], {}), '([1, 5, 6, 7, 11, 12])\n', (3912, 3934), True, 'import numpy as np\n'), ((4385, 4413), 'numpy.array', 'np.array', (['[1, 5, 6, 4, 8, 9]'], {}), '([1, 5, 6, 4, 8, 9])\n', (4393, 4413), True, 'import numpy as np\n'), ((4427, 4455), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 2, 2]'], {}), '([1, 1, 1, 2, 2, 2])\n', (4435, 4455), True, 'import numpy as np\n'), ((3616, 3641), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3629, 3641), False, 'import pytest\n'), ((3651, 3697), 'lib.stats.axis_intervals.numba_backend._axis_intervals', 'axis_intervals', ([], {'n': '(10)', 'step': 'step', 'window': 'window'}), '(n=10, step=step, window=window)\n', (3665, 3697), True, 'from lib.stats.axis_intervals.numba_backend import _axis_intervals as axis_intervals\n'), ((4691, 4716), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4704, 4716), False, 'import pytest\n'), ((4742, 4770), 'numpy.array', 'np.array', (['[1, 2, 3, 1, 2, 3]'], {}), '([1, 2, 3, 1, 2, 3])\n', (4750, 4770), True, 'import numpy as np\n'), ((4783, 4828), 'lib.stats.axis_intervals.numba_backend._axis_intervals', 'axis_intervals', ([], {'window': '(1)', 'positions': 'positions'}), '(window=1, positions=positions)\n', (4797, 4828), True, 'from lib.stats.axis_intervals.numba_backend import _axis_intervals as axis_intervals\n'), ((4844, 4869), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4857, 4869), False, 'import pytest\n'), ((4895, 4923), 'numpy.array', 'np.array', (['[3, 2, 1, 3, 2, 1]'], {}), '([3, 2, 1, 3, 2, 1])\n', (4903, 4923), True, 'import numpy as np\n'), ((4945, 4973), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 2, 2]'], {}), '([1, 1, 1, 2, 2, 2])\n', (4953, 4973), True, 'import numpy as np\n'), ((4986, 5046), 'lib.stats.axis_intervals.numba_backend._axis_intervals', 'axis_intervals', ([], {'window': '(1)', 'positions': 'positions', 'groups': 'groups'}), '(window=1, positions=positions, groups=groups)\n', (5000, 5046), True, 'from lib.stats.axis_intervals.numba_backend import _axis_intervals as axis_intervals\n'), ((3046, 3084), 'math.ceil', 'math.ceil', (['(n / (target_chunk_size - 1))'], {}), '(n / (target_chunk_size - 1))\n', (3055, 3084), False, 'import math\n')] |
# Copyright 2020 The Merlin Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from merlin.docker.docker import copy_pyfunc_dockerfile, copy_standard_dockerfile
def test_copy_pyfunc_dockerfile():
path = copy_pyfunc_dockerfile(".")
assert os.path.isfile(path) == True
assert os.path.basename(path) == "pyfunc.Dockerfile"
def test_copy_standard_dockerfile():
path = copy_standard_dockerfile(".")
assert os.path.isfile(path) == True
assert os.path.basename(path) == "standard.Dockerfile"
| [
"os.path.isfile",
"merlin.docker.docker.copy_pyfunc_dockerfile",
"merlin.docker.docker.copy_standard_dockerfile",
"os.path.basename"
] | [((724, 751), 'merlin.docker.docker.copy_pyfunc_dockerfile', 'copy_pyfunc_dockerfile', (['"""."""'], {}), "('.')\n", (746, 751), False, 'from merlin.docker.docker import copy_pyfunc_dockerfile, copy_standard_dockerfile\n'), ((899, 928), 'merlin.docker.docker.copy_standard_dockerfile', 'copy_standard_dockerfile', (['"""."""'], {}), "('.')\n", (923, 928), False, 'from merlin.docker.docker import copy_pyfunc_dockerfile, copy_standard_dockerfile\n'), ((763, 783), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (777, 783), False, 'import os\n'), ((803, 825), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (819, 825), False, 'import os\n'), ((940, 960), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (954, 960), False, 'import os\n'), ((980, 1002), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (996, 1002), False, 'import os\n')] |
#-*-coding:utf-8-*-
import torch
from torch.autograd import Variable
from torchvision import models
import sys
import os
import getpass
import numpy as np
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils.sgd import SGD
import utils.dataset as dataset
import argparse
from operator import itemgetter
from heapq import nsmallest
from time import time
def get_args():
parser = argparse.ArgumentParser(description='Pytorch Prunning Experiment')
parser.add_argument('--arch', metavar='ARCH', default='alexnet', help='model architecture')
parser.add_argument('--data_name', metavar='DATA_NAME', type=str, default='Flower102', help='dataset name')
parser.add_argument('--checkpoint', default=False, action='store_true', help='choose if prune from checkpoint or not')
parser.add_argument('--l1', default=5e-3, type=float, help='set the l1 Regularization weight')
parser.add_argument('--threshold', default=1e-1, type=float, help='set the threshold value')
parser.add_argument('--conv1x1Lr', default=1e-1, type=float, help='set the learning rate of 1x1 layers')
parser.add_argument('--convLr', default=0, type=float, help='set the learning rate of conv layers')
parser.add_argument('--fcLr', default=1e-3, type=float, help='set the learning rate of fc layers')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--pruneRatio', default=0.1, type=float, help='set the stop condition')
args = parser.parse_args()
return args
args = get_args()
arch = args.arch
data_name = args.data_name
checkpoint = args.checkpoint
l1 = args.l1
threshold = args.threshold
conv1x1Lr = args.conv1x1Lr
convLr = args.convLr
fcLr = args.fcLr
momentum = args.momentum
pruneRatio = args.pruneRatio
import logging
#======================generate logging imformation===============
log_path = './log'
if not os.path.exists(log_path):
os.mkdir(log_path)
# you should assign log_name first such as mobilenet_resnet50_CIFAR10.log
log_name = 'alexnetPrune.log'
TrainInfoPath = os.path.join(log_path, log_name)
# formater
formatter = logging.Formatter('%(levelname)s %(message)s')
# cmd Handler
cmdHandler = logging.StreamHandler()
# File Handler including info
infoFileHandler = logging.FileHandler(TrainInfoPath, mode='w')
infoFileHandler.setFormatter(formatter)
# info Logger
infoLogger = logging.getLogger('info')
infoLogger.setLevel(logging.DEBUG)
infoLogger.addHandler(cmdHandler)
infoLogger.addHandler(infoFileHandler)
if getpass.getuser() == 'tsq':
train_batch_size = 8
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_batch_size = 64
use_gpu = torch.cuda.is_available()
num_batches = 0
num_classes = 1000
if 'Flower102' in data_name:
train_path = "./Flower102/train"
test_path = "./Flower102/test"
num_classes = 102
elif 'Birds200' in data_name:
train_path = "./Birds200/train"
test_path = "./Birds200/test"
num_classes = 200
elif 'catdog' in data_name:
train_path = "./CatDog/train"
test_path = "./CatDog/test"
num_classes = 2
train_loader = dataset.train_loader(train_path, batch_size=train_batch_size, num_workers=4, pin_memory=True)
test_loader = dataset.test_loader(test_path, batch_size=1, num_workers=4, pin_memory=True)
infoLogger.info("dataset is: "+args.data_name)
def test(model, test_loader):
model.eval()
test_correct = 0
test_total = 0
for i, (batch, label) in enumerate(test_loader):
batch = batch.cuda()
output = model(Variable(batch))
pred_label = output.data.max(1)[1] # 返回模型预测概率最大的标签
test_correct += pred_label.cpu().eq(label).sum() # label为torch.LongTensor类型
test_total += label.size(0)
infoLogger.info("Test Accuracy :"+str(round( float(test_correct) / test_total , 3 )))
model.train()
return round( float(test_correct) / test_total , 3 )
def train_batch(model, optimizer, batch, label):
optimizer.zero_grad() #
input = Variable(batch)
output = model(input)
criterion = torch.nn.CrossEntropyLoss()
criterion(output, Variable(label)).backward()
optimizer.step()
return criterion(output, Variable(label)).data
def train_epoch(model, train_loader, optimizer=None):
global num_batches
for batch, label in train_loader:
loss = train_batch(model, optimizer, batch.cuda(), label.cuda())
if num_batches%31 == 0:
infoLogger.info('%23s%-9s%-13s'%('the '+str(num_batches)+'th batch, ','loss is: ',str(round(loss[0],8))))
num_batches +=1
# 训练一个epoch,测试一次
def train_test(model, train_loader, test_loader, optimizer=None, epoches=10):
infoLogger.info("Start training.")
if optimizer is None:
optimizer = optim.SGD(model.classifier.parameters(), lr = 0.001, momentum=0.9)
for i in range(epoches):
model.train()
infoLogger.info("Epoch: "+str(i))
train_epoch(model, train_loader, optimizer)
state = model.state_dict()
if i%5==0 or i==epoches-1:
acc = test(model, test_loader)
filename = './1x1models/prune/' + args.arch + '1x1pruned' + '_' + args.data_name + '_' + str(acc) + '.pth'
torch.save(state, filename)
s1x1ParaName = ['features12.0.weight', 'features22.0.weight', 'features32.0.weight', 'features42.0.weight', 'features52.0.weight']
if i==epoches-1:
for name in s1x1ParaName:
sumStr = name+' sum is: '+str(torch.abs(state[name]).sum())
meanStr = name+' mean is: '+str(torch.mean(state[name]))
stdStr = name+' std is: '+str(torch.std(state[name]))
infoLogger.info(sumStr)
infoLogger.info(meanStr)
infoLogger.info(stdStr)
infoLogger.info("Finished training.")
return model.state_dict()
class AddLayerAlexNet(nn.Module):
def __init__(self, num_classes=2, convNumList=[64, 192, 384, 256, 256]):
super(AddLayerAlexNet, self).__init__()
self.features11 = nn.Sequential(
nn.Conv2d(3, convNumList[0], 11, 4, 2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, dilation=1),
)
self.features12 = nn.Sequential(
nn.Conv2d(in_channels=convNumList[0],out_channels=convNumList[0],kernel_size=1,stride=1,groups=convNumList[0],bias=False),
)
self.features21 = nn.Sequential(
nn.Conv2d(convNumList[0], convNumList[1], 5, 1, 2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, dilation=1),
)
self.features22 = nn.Sequential(
nn.Conv2d(in_channels=convNumList[1],out_channels=convNumList[1],kernel_size=1,stride=1,groups=convNumList[1],bias=False),
)
self.features31 = nn.Sequential(
nn.Conv2d(convNumList[1], convNumList[2], 3, 1, 1),
nn.ReLU(inplace=True),
)
self.features32 = nn.Sequential(
nn.Conv2d(in_channels=convNumList[2],out_channels=convNumList[2],kernel_size=1,stride=1,groups=convNumList[2],bias=False),
)
self.features41 = nn.Sequential(
nn.Conv2d(convNumList[2], convNumList[3], 3, 1, 1),
nn.ReLU(inplace=True),
)
self.features42 = nn.Sequential(
nn.Conv2d(in_channels=convNumList[3],out_channels=convNumList[3],kernel_size=1,stride=1,groups=convNumList[3],bias=False),
)
self.features51 = nn.Sequential(
nn.Conv2d(convNumList[3], convNumList[4], 3, 1, 1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, dilation=1),
)
self.features52 = nn.Sequential(
nn.Conv2d(in_channels=convNumList[4],out_channels=convNumList[4],kernel_size=1,stride=1,groups=convNumList[4],bias=False),
)
for param in self.features11.parameters():
param.requires_grad = False
for param in self.features12.parameters():
param.requires_grad = True
for param in self.features21.parameters():
param.requires_grad = False
for param in self.features22.parameters():
param.requires_grad = True
for param in self.features31.parameters():
param.requires_grad = False
for param in self.features32.parameters():
param.requires_grad = True
for param in self.features41.parameters():
param.requires_grad = False
for param in self.features42.parameters():
param.requires_grad = True
for param in self.features51.parameters():
param.requires_grad = False
for param in self.features52.parameters():
param.requires_grad = True
self.num_classes = num_classes
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(convNumList[4] * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features11(x)
x = self.features12(x)
x = self.features21(x)
x = self.features22(x)
x = self.features31(x)
x = self.features32(x)
x = self.features41(x)
x = self.features42(x)
x = self.features51(x)
x = self.features52(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# add 1x1 layers
def add1x1layers(num_classes, convIndexList=[[]]):
if len(convIndexList[0])==0:
return AddLayerAlexNet(num_classes)
convNumList = [len(i) for i in convIndexList]
return AddLayerAlexNet(num_classes, convNumList)
# reload paras
# state: the previous OrderedDict of model
def reloadParam(num_classes, state, convIndexList=[[]]):
global use_gpu
if len(convIndexList)==1 and len(convIndexList[0])==0:
convIndexList = []
convIndexList.append(range(64))
convIndexList.append(range(192))
convIndexList.append(range(384))
convIndexList.append(range(256))
convIndexList.append(range(256))
model = add1x1layers(num_classes, convIndexList)
now_state = model.state_dict()
npIndexList = [np.array(i) for i in convIndexList]
# convNumList = [len(i) for i in convIndexList]
convKeys = ['features11.0.weight',
'features21.0.weight',
'features31.0.weight',
'features41.0.weight',
'features51.0.weight']
convBiasKeys = ['features11.0.bias',
'features21.0.bias',
'features31.0.bias',
'features41.0.bias',
'features51.0.bias']
fcKeys = ['classifier.1.weight',
'classifier.4.weight',
'classifier.6.weight']
fcBiasKeys = ['classifier.1.bias',
'classifier.4.bias',
'classifier.6.bias']
conv1x1Keys = ['features12.0.weight',
'features22.0.weight',
'features32.0.weight',
'features42.0.weight',
'features52.0.weight']
for i, key in enumerate(convKeys):
if i==0:
inIndex = np.array([0,1,2])
outIndex = npIndexList[i]
now_state[key].copy_(state[key][outIndex])
else:
inIndex = npIndexList[i-1]
outIndex = npIndexList[i]
now_state[key].copy_(state[key][outIndex][:,inIndex])
for i, key in enumerate(convBiasKeys):
outIndex = npIndexList[i]
now_state[key].copy_(state[key][outIndex])
# only first first layer need to change
fcIndex = []
for i in npIndexList[-1]:
temp = range(i*36, (i+1)*36)
fcIndex +=temp
fcIndex = np.array(fcIndex)
now_state[fcKeys[0]].copy_(state[fcKeys[0]][:,fcIndex])
now_state[fcKeys[1]].copy_(state[fcKeys[1]])
now_state[fcKeys[2]].copy_(state[fcKeys[2]])
for i, key in enumerate(fcBiasKeys):
now_state[key].copy_(state[key])
# set 1x1 layers weights equal 1
for i, key in enumerate(conv1x1Keys):
shape = now_state[key].shape
now_state[key].copy_(torch.ones(shape))
if use_gpu:
model = model.cuda()
infoLogger.info("Use GPU!")
else:
infoLogger.info("Use CPU!")
return model
# only train 1x1 layers and add L1 Regularization
def train1x1withL1(model, epochs=10):
global train_loader, test_loader
global threshold, l1, conv1x1Lr, momentum
optimizer = SGD([
{'params': model.features12.parameters()},
{'params': model.features22.parameters()},
{'params': model.features32.parameters()},
{'params': model.features42.parameters()},
{'params': model.features52.parameters()},
], weight_decay1=l1, lr=conv1x1Lr, momentum=momentum)
state = train_test(model, train_loader, test_loader, optimizer=optimizer, epoches=epochs)
s1x1ParaName = ['features12.0.weight', 'features22.0.weight', 'features32.0.weight', 'features42.0.weight', 'features52.0.weight']
convIndexList = []
for i, name in enumerate(s1x1ParaName):
para = state[name]
para = torch.squeeze(torch.squeeze(torch.squeeze(para,1),1),1)
temp = []
# para = para.cpu().numpy()
# para = (para-np.min(para))/(np.max(para)-np.min(para))
for index,value in enumerate(para):
if abs(value)<=threshold:
# print i, index # index to be deleted
pass
else:
temp.append(index)
convIndexList.append(temp)
return convIndexList
# fine-tune model
def fineTune(model, epoches=10):
global train_loader, test_loader
optimizer = SGD(model.classifier.parameters(), weight_decay2=5e-4, lr = 1e-3, momentum=0.9)
state = train_test(model, train_loader, test_loader, optimizer=optimizer, epoches=epoches)
return state
def singePrune(state, convIndexList=[[]], epochs=10):
global num_classes
model = reloadParam(num_classes, state, convIndexList)
convIndexList = train1x1withL1(model, epochs=epochs)
model = reloadParam(num_classes, state, convIndexList)
state = fineTune(model, epoches=2)
return convIndexList, state
def GetconvIndexList(numList=[64, 192, 384, 256, 256]):
convIndexList = []
for i in numList:
convIndexList.append(range(i))
return convIndexList
def prune():
global infoLogger
global use_gpu, num_batches, train_batch_size
global num_classes
totalConv = 1152.0
t0 = time()
if not checkpoint:
state = torch.load('./1x1models/finetune/alexnet1x1_Flower102_0.832.pth')
convIndexList = GetconvIndexList()
tempsum = 0
for i in range(len(convIndexList)):
tempsum = tempsum+len(convIndexList[i])
ratio = tempsum/totalConv
while(ratio!=0 and ratio>=threshold):
convIndexList, state = singePrune(state, convIndexList, epochs=10)
tempsum = 0
numList = []
for i in range(len(convIndexList)):
infoLogger.info('the '+str(i)+'th'+' layer, channels num is: '+str(len(convIndexList[i])))
numList.append(len(convIndexList[i]))
tempsum = tempsum+len(convIndexList[i])
ratio = tempsum/totalConv
infoLogger.info('current conv channels ratio is: '+str(ratio))
convIndexList = GetconvIndexList(numList=numList)
state = fineTune(model, epoches=10)
infoLogger.info("The prunning took", str(time() - t0))
if __name__ == '__main__':
prune()
# usage
# python alexnetPrune.py | [
"logging.getLogger",
"torch.nn.ReLU",
"logging.StreamHandler",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"numpy.array",
"torch.cuda.is_available",
"torch.squeeze",
"getpass.getuser",
"os.path.exists",
"argparse.ArgumentParser",
"torch.mean",
"logging.FileHandler",
"os.mkdir",
"torch.autograd.Variable",
"torch.abs",
"torch.save",
"time.time",
"torch.std",
"torch.ones",
"logging.Formatter",
"torch.load",
"os.path.join",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"utils.dataset.test_loader",
"utils.dataset.train_loader"
] | [((2105, 2137), 'os.path.join', 'os.path.join', (['log_path', 'log_name'], {}), '(log_path, log_name)\n', (2117, 2137), False, 'import os\n'), ((2161, 2207), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s %(message)s"""'], {}), "('%(levelname)s %(message)s')\n", (2178, 2207), False, 'import logging\n'), ((2236, 2259), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2257, 2259), False, 'import logging\n'), ((2309, 2353), 'logging.FileHandler', 'logging.FileHandler', (['TrainInfoPath'], {'mode': '"""w"""'}), "(TrainInfoPath, mode='w')\n", (2328, 2353), False, 'import logging\n'), ((2421, 2446), 'logging.getLogger', 'logging.getLogger', (['"""info"""'], {}), "('info')\n", (2438, 2446), False, 'import logging\n'), ((2702, 2727), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2725, 2727), False, 'import torch\n'), ((3139, 3236), 'utils.dataset.train_loader', 'dataset.train_loader', (['train_path'], {'batch_size': 'train_batch_size', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(train_path, batch_size=train_batch_size, num_workers=4,\n pin_memory=True)\n', (3159, 3236), True, 'import utils.dataset as dataset\n'), ((3247, 3323), 'utils.dataset.test_loader', 'dataset.test_loader', (['test_path'], {'batch_size': '(1)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(test_path, batch_size=1, num_workers=4, pin_memory=True)\n', (3266, 3323), True, 'import utils.dataset as dataset\n'), ((442, 508), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pytorch Prunning Experiment"""'}), "(description='Pytorch Prunning Experiment')\n", (465, 508), False, 'import argparse\n'), ((1935, 1959), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (1949, 1959), False, 'import os\n'), ((1965, 1983), 'os.mkdir', 'os.mkdir', (['log_path'], {}), '(log_path)\n', (1973, 1983), False, 'import os\n'), ((2561, 2578), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (2576, 2578), False, 'import getpass\n'), ((4019, 4034), 'torch.autograd.Variable', 'Variable', (['batch'], {}), '(batch)\n', (4027, 4034), False, 'from torch.autograd import Variable\n'), ((4077, 4104), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4102, 4104), False, 'import torch\n'), ((12022, 12039), 'numpy.array', 'np.array', (['fcIndex'], {}), '(fcIndex)\n', (12030, 12039), True, 'import numpy as np\n'), ((14867, 14873), 'time.time', 'time', ([], {}), '()\n', (14871, 14873), False, 'from time import time\n'), ((10472, 10483), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (10480, 10483), True, 'import numpy as np\n'), ((14913, 14978), 'torch.load', 'torch.load', (['"""./1x1models/finetune/alexnet1x1_Flower102_0.832.pth"""'], {}), "('./1x1models/finetune/alexnet1x1_Flower102_0.832.pth')\n", (14923, 14978), False, 'import torch\n'), ((3565, 3580), 'torch.autograd.Variable', 'Variable', (['batch'], {}), '(batch)\n', (3573, 3580), False, 'from torch.autograd import Variable\n'), ((4206, 4221), 'torch.autograd.Variable', 'Variable', (['label'], {}), '(label)\n', (4214, 4221), False, 'from torch.autograd import Variable\n'), ((5229, 5256), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (5239, 5256), False, 'import torch\n'), ((6104, 6142), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'convNumList[0]', '(11)', '(4)', '(2)'], {}), '(3, convNumList[0], 11, 4, 2)\n', (6113, 6142), True, 'import torch.nn as nn\n'), ((6160, 6181), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6167, 6181), True, 'import torch.nn as nn\n'), ((6199, 6248), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'dilation': '(1)'}), '(kernel_size=3, stride=2, dilation=1)\n', (6211, 6248), True, 'import torch.nn as nn\n'), ((6321, 6451), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'convNumList[0]', 'out_channels': 'convNumList[0]', 'kernel_size': '(1)', 'stride': '(1)', 'groups': 'convNumList[0]', 'bias': '(False)'}), '(in_channels=convNumList[0], out_channels=convNumList[0],\n kernel_size=1, stride=1, groups=convNumList[0], bias=False)\n', (6330, 6451), True, 'import torch.nn as nn\n'), ((6515, 6565), 'torch.nn.Conv2d', 'nn.Conv2d', (['convNumList[0]', 'convNumList[1]', '(5)', '(1)', '(2)'], {}), '(convNumList[0], convNumList[1], 5, 1, 2)\n', (6524, 6565), True, 'import torch.nn as nn\n'), ((6583, 6604), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6590, 6604), True, 'import torch.nn as nn\n'), ((6622, 6671), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'dilation': '(1)'}), '(kernel_size=3, stride=2, dilation=1)\n', (6634, 6671), True, 'import torch.nn as nn\n'), ((6744, 6874), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'convNumList[1]', 'out_channels': 'convNumList[1]', 'kernel_size': '(1)', 'stride': '(1)', 'groups': 'convNumList[1]', 'bias': '(False)'}), '(in_channels=convNumList[1], out_channels=convNumList[1],\n kernel_size=1, stride=1, groups=convNumList[1], bias=False)\n', (6753, 6874), True, 'import torch.nn as nn\n'), ((6938, 6988), 'torch.nn.Conv2d', 'nn.Conv2d', (['convNumList[1]', 'convNumList[2]', '(3)', '(1)', '(1)'], {}), '(convNumList[1], convNumList[2], 3, 1, 1)\n', (6947, 6988), True, 'import torch.nn as nn\n'), ((7006, 7027), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7013, 7027), True, 'import torch.nn as nn\n'), ((7100, 7230), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'convNumList[2]', 'out_channels': 'convNumList[2]', 'kernel_size': '(1)', 'stride': '(1)', 'groups': 'convNumList[2]', 'bias': '(False)'}), '(in_channels=convNumList[2], out_channels=convNumList[2],\n kernel_size=1, stride=1, groups=convNumList[2], bias=False)\n', (7109, 7230), True, 'import torch.nn as nn\n'), ((7294, 7344), 'torch.nn.Conv2d', 'nn.Conv2d', (['convNumList[2]', 'convNumList[3]', '(3)', '(1)', '(1)'], {}), '(convNumList[2], convNumList[3], 3, 1, 1)\n', (7303, 7344), True, 'import torch.nn as nn\n'), ((7362, 7383), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7369, 7383), True, 'import torch.nn as nn\n'), ((7456, 7586), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'convNumList[3]', 'out_channels': 'convNumList[3]', 'kernel_size': '(1)', 'stride': '(1)', 'groups': 'convNumList[3]', 'bias': '(False)'}), '(in_channels=convNumList[3], out_channels=convNumList[3],\n kernel_size=1, stride=1, groups=convNumList[3], bias=False)\n', (7465, 7586), True, 'import torch.nn as nn\n'), ((7650, 7700), 'torch.nn.Conv2d', 'nn.Conv2d', (['convNumList[3]', 'convNumList[4]', '(3)', '(1)', '(1)'], {}), '(convNumList[3], convNumList[4], 3, 1, 1)\n', (7659, 7700), True, 'import torch.nn as nn\n'), ((7718, 7739), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7725, 7739), True, 'import torch.nn as nn\n'), ((7757, 7806), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'dilation': '(1)'}), '(kernel_size=3, stride=2, dilation=1)\n', (7769, 7806), True, 'import torch.nn as nn\n'), ((7879, 8009), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'convNumList[4]', 'out_channels': 'convNumList[4]', 'kernel_size': '(1)', 'stride': '(1)', 'groups': 'convNumList[4]', 'bias': '(False)'}), '(in_channels=convNumList[4], out_channels=convNumList[4],\n kernel_size=1, stride=1, groups=convNumList[4], bias=False)\n', (7888, 8009), True, 'import torch.nn as nn\n'), ((9019, 9031), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (9029, 9031), True, 'import torch.nn as nn\n'), ((9045, 9084), 'torch.nn.Linear', 'nn.Linear', (['(convNumList[4] * 6 * 6)', '(4096)'], {}), '(convNumList[4] * 6 * 6, 4096)\n', (9054, 9084), True, 'import torch.nn as nn\n'), ((9098, 9119), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9105, 9119), True, 'import torch.nn as nn\n'), ((9133, 9145), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (9143, 9145), True, 'import torch.nn as nn\n'), ((9159, 9180), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (9168, 9180), True, 'import torch.nn as nn\n'), ((9194, 9215), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9201, 9215), True, 'import torch.nn as nn\n'), ((9229, 9257), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'num_classes'], {}), '(4096, num_classes)\n', (9238, 9257), True, 'import torch.nn as nn\n'), ((11460, 11479), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (11468, 11479), True, 'import numpy as np\n'), ((12426, 12443), 'torch.ones', 'torch.ones', (['shape'], {}), '(shape)\n', (12436, 12443), False, 'import torch\n'), ((4127, 4142), 'torch.autograd.Variable', 'Variable', (['label'], {}), '(label)\n', (4135, 4142), False, 'from torch.autograd import Variable\n'), ((13518, 13540), 'torch.squeeze', 'torch.squeeze', (['para', '(1)'], {}), '(para, 1)\n', (13531, 13540), False, 'import torch\n'), ((15875, 15881), 'time.time', 'time', ([], {}), '()\n', (15879, 15881), False, 'from time import time\n'), ((5595, 5618), 'torch.mean', 'torch.mean', (['state[name]'], {}), '(state[name])\n', (5605, 5618), False, 'import torch\n'), ((5668, 5690), 'torch.std', 'torch.std', (['state[name]'], {}), '(state[name])\n', (5677, 5690), False, 'import torch\n'), ((5516, 5538), 'torch.abs', 'torch.abs', (['state[name]'], {}), '(state[name])\n', (5525, 5538), False, 'import torch\n')] |
import json
import pika
from scrapy.utils.serialize import ScrapyJSONEncoder
class RabbitMQItemPublisherPipeline(object):
def __init__(self, host, port, user, password, virtual_host, exchange, routing_key, queue):
self.host = host
self.port = port
self.user = user
self.password = password
self.virtual_host = virtual_host
credentials = pika.PlainCredentials(self.user, self.password)
parameters = pika.ConnectionParameters(self.host,
self.port,
self.virtual_host,
credentials)
self.connection = pika.BlockingConnection(parameters=parameters)
self.channel = self.connection.channel()
self.exchange = exchange
self.routing_key = routing_key
self.queue = queue
self.channel.exchange_declare(exchange=exchange,
exchange_type="direct",
durable=True)
self.channel.queue_declare(queue=queue,
durable=True)
self.channel.queue_bind(exchange=exchange,
routing_key=routing_key,
queue=queue)
self.encoder = ScrapyJSONEncoder()
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get("RABBITMQ_HOST"),
port=crawler.settings.get("RABBITMQ_PORT"),
user=crawler.settings.get("RABBITMQ_USER"),
password=crawler.settings.get("RABBITMQ_PASSWORD"),
virtual_host=crawler.settings.get("RABBITMQ_VIRTUAL_HOST"),
exchange=crawler.settings.get("RABBITMQ_EXCHANGE"),
routing_key=crawler.settings.get("RABBITMQ_ROUTING_KEY"),
queue=crawler.settings.get("RABBITMQ_QUEUE"),
)
def close_spider(self, spider):
self.channel.close()
self.connection.close()
def process_item(self, item, spider):
data = self.encoder.encode(item)
self.channel.basic_publish(
exchange=self.exchange,
routing_key=self.routing_key,
body=data,
)
return item
| [
"pika.ConnectionParameters",
"pika.BlockingConnection",
"pika.PlainCredentials",
"scrapy.utils.serialize.ScrapyJSONEncoder"
] | [((392, 439), 'pika.PlainCredentials', 'pika.PlainCredentials', (['self.user', 'self.password'], {}), '(self.user, self.password)\n', (413, 439), False, 'import pika\n'), ((461, 540), 'pika.ConnectionParameters', 'pika.ConnectionParameters', (['self.host', 'self.port', 'self.virtual_host', 'credentials'], {}), '(self.host, self.port, self.virtual_host, credentials)\n', (486, 540), False, 'import pika\n'), ((708, 754), 'pika.BlockingConnection', 'pika.BlockingConnection', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (731, 754), False, 'import pika\n'), ((1347, 1366), 'scrapy.utils.serialize.ScrapyJSONEncoder', 'ScrapyJSONEncoder', ([], {}), '()\n', (1364, 1366), False, 'from scrapy.utils.serialize import ScrapyJSONEncoder\n')] |
'''
Test functions for simphony.core
'''
import pytest
import copy
import numpy as np
import simphony.core as core
import simphony.errors as errors
import simphony.DeviceLibrary.ebeam as dev
import simphony.simulation as sim
from simphony.core import register_component_model, deregister_component_model
class TestBase:
def test_ComponentModel_duplicity(self):
""" Tests whether you can have two models with the same name.
Should raise an error since the ebeam library was already
imported, and this class shares the name of one of the existing
devices.
"""
fake_s_params = ([1500, 1550, 1600], [0,0,0])
with pytest.raises(errors.DuplicateModelError):
@register_component_model
class ebeam_bdc_te1550(core.ComponentModel):
ports = 4
s_parameters = fake_s_params
cachable = True
def test_ComponentModel_cachable(self):
fake_s_params = ([1500, 1550, 1600], [0,0,0])
@register_component_model
class RingResonator(core.ComponentModel):
ports = 4
s_parameters = fake_s_params
cachable = True
assert fake_s_params == RingResonator.get_s_parameters()
assert fake_s_params == RingResonator.get_s_parameters(fake_keyword=3, faker_keyword="long")
deregister_component_model('RingResonator')
with pytest.raises(errors.CachableParametersError):
@register_component_model
class RingResonator(core.ComponentModel):
ports = 4
cachable = True
def test_ComponentModel_uncachable(self):
fake_s_params = ([1500, 1550, 1600], [0,0,0])
with pytest.raises(errors.UncachableParametersError):
@register_component_model
class RingResonator(core.ComponentModel):
ports = 4
s_parameters = fake_s_params
cachable = False
with pytest.raises(errors.UncachableParametersError):
@register_component_model
class RingResonator(core.ComponentModel):
ports = 4
cachable = False
def test_ComponentInstance_cachableModel(self):
fake_s_params = ([1500, 1550, 1600], [0,0,0])
@register_component_model
class RingResonator(core.ComponentModel):
ports = 4
s_parameters = fake_s_params
cachable = True
ci1 = core.ComponentInstance(RingResonator, [0,1,2,3], {'extras':'should be ignored'})
assert RingResonator.get_s_parameters() == fake_s_params
assert RingResonator.get_s_parameters() == ci1.get_s_parameters()
assert ci1.get_s_parameters() == fake_s_params
deregister_component_model('RingResonator')
def test_ComponentInstance_uncachableModel(self):
@register_component_model
class Waveguide(core.ComponentModel):
ports = 2
cachable = False
@classmethod
def s_parameters(cls, freq, length, height):
# Some random equation just to allow us to see the effects of the function
return height*np.sin(freq)+length
extras = {
'freq': np.linspace(0,2*np.pi),
'length': 2,
'height':0.5
}
ci1 = core.ComponentInstance(Waveguide, [0,1], extras)
expected = Waveguide.s_parameters(extras['freq'], extras['length'], extras['height'])
assert np.array_equal(expected, ci1.get_s_parameters())
assert np.array_equal(expected, Waveguide.get_s_parameters(**extras))
deregister_component_model('Waveguide')
class TestNetlist:
@classmethod
def setup(cls):
bdc1 = core.ComponentInstance(dev.ebeam_bdc_te1550, [0, 1, 2, 3])
term1 = core.ComponentInstance(dev.ebeam_terminator_te1550, [2])
y1 = core.ComponentInstance(dev.ebeam_y_1550, [-1, 0, 1])
dc1 = core.ComponentInstance(dev.ebeam_dc_halfring_te1550, [3, -2])
cls.components = [bdc1, term1, y1, dc1]
def test_netlist_InstancesFromComponentModels(self):
@register_component_model
class RingResonator(core.ComponentModel):
ports = 4
s_parameters = ([1500, 1550, 1600], [0,0,0])
cachable = True
nl = core.Netlist()
c1 = core.ComponentInstance(RingResonator, [0,1,2,3], {'lay_x':3.1, 'lay_y':4})
c2 = core.ComponentInstance(RingResonator, [4,1,2,5])
nl.add_component(c1)
nl.add_component(c2)
assert len(nl.components) == 2
deregister_component_model('RingResonator')
def test_parsing_listoflists(self):
bdc1 = core.ComponentInstance(dev.ebeam_bdc_te1550)
term1 = core.ComponentInstance(dev.ebeam_terminator_te1550)
y1 = core.ComponentInstance(dev.ebeam_y_1550)
gc1 = core.ComponentInstance(dev.ebeam_gc_te1550)
c1 = [bdc1, bdc1, bdc1, bdc1]
p1 = [0, 1, 2, 3]
c2 = [y1, y1, term1, gc1]
p2 = [0, 1, 0, 0]
data = zip(c1, p1, c2, p2)
nl = core.Netlist()
nl.load(data, formatter='ll')
# TODO: Figure out what the actually correct connections are in the
# netlist and verify them.
def test_Netlist_parameterized_initialization(self):
self.nl = core.Netlist(components=self.components)
assert self.nl.net_count == 4
assert len(self.nl.components) == len(self.components)
# TODO: Figure out what the actually correct connections are in the
# netlist and verify them.
def test_Netlist_unparameterized_initialization(self):
self.nl = core.Netlist()
for i in range(len(self.components)):
self.nl.add_component(self.components[i])
assert len(self.nl.components) == len(self.components)
# TODO: Figure out what the actually correct connections are in the
# netlist and verify them.
# def test_Netlist_externals(self):
# self.nl = core.Netlist(components=self.components)
# expected = [comp for comp in self.components if any(x < 0 for x in comp.nets)]
# actual = self.nl.get_external_components()
# assert len(expected) == len(actual)
# for item in expected:
# assert item in actual
| [
"simphony.core.Netlist",
"simphony.core.ComponentInstance",
"numpy.linspace",
"pytest.raises",
"numpy.sin",
"simphony.core.deregister_component_model"
] | [((1382, 1425), 'simphony.core.deregister_component_model', 'deregister_component_model', (['"""RingResonator"""'], {}), "('RingResonator')\n", (1408, 1425), False, 'from simphony.core import register_component_model, deregister_component_model\n'), ((2509, 2597), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['RingResonator', '[0, 1, 2, 3]', "{'extras': 'should be ignored'}"], {}), "(RingResonator, [0, 1, 2, 3], {'extras':\n 'should be ignored'})\n", (2531, 2597), True, 'import simphony.core as core\n'), ((2792, 2835), 'simphony.core.deregister_component_model', 'deregister_component_model', (['"""RingResonator"""'], {}), "('RingResonator')\n", (2818, 2835), False, 'from simphony.core import register_component_model, deregister_component_model\n'), ((3384, 3433), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['Waveguide', '[0, 1]', 'extras'], {}), '(Waveguide, [0, 1], extras)\n', (3406, 3433), True, 'import simphony.core as core\n'), ((3677, 3716), 'simphony.core.deregister_component_model', 'deregister_component_model', (['"""Waveguide"""'], {}), "('Waveguide')\n", (3703, 3716), False, 'from simphony.core import register_component_model, deregister_component_model\n'), ((3789, 3847), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_bdc_te1550', '[0, 1, 2, 3]'], {}), '(dev.ebeam_bdc_te1550, [0, 1, 2, 3])\n', (3811, 3847), True, 'import simphony.core as core\n'), ((3864, 3920), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_terminator_te1550', '[2]'], {}), '(dev.ebeam_terminator_te1550, [2])\n', (3886, 3920), True, 'import simphony.core as core\n'), ((3934, 3986), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_y_1550', '[-1, 0, 1]'], {}), '(dev.ebeam_y_1550, [-1, 0, 1])\n', (3956, 3986), True, 'import simphony.core as core\n'), ((4001, 4062), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_dc_halfring_te1550', '[3, -2]'], {}), '(dev.ebeam_dc_halfring_te1550, [3, -2])\n', (4023, 4062), True, 'import simphony.core as core\n'), ((4386, 4400), 'simphony.core.Netlist', 'core.Netlist', ([], {}), '()\n', (4398, 4400), True, 'import simphony.core as core\n'), ((4414, 4493), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['RingResonator', '[0, 1, 2, 3]', "{'lay_x': 3.1, 'lay_y': 4}"], {}), "(RingResonator, [0, 1, 2, 3], {'lay_x': 3.1, 'lay_y': 4})\n", (4436, 4493), True, 'import simphony.core as core\n'), ((4502, 4553), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['RingResonator', '[4, 1, 2, 5]'], {}), '(RingResonator, [4, 1, 2, 5])\n', (4524, 4553), True, 'import simphony.core as core\n'), ((4656, 4699), 'simphony.core.deregister_component_model', 'deregister_component_model', (['"""RingResonator"""'], {}), "('RingResonator')\n", (4682, 4699), False, 'from simphony.core import register_component_model, deregister_component_model\n'), ((4756, 4800), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_bdc_te1550'], {}), '(dev.ebeam_bdc_te1550)\n', (4778, 4800), True, 'import simphony.core as core\n'), ((4817, 4868), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_terminator_te1550'], {}), '(dev.ebeam_terminator_te1550)\n', (4839, 4868), True, 'import simphony.core as core\n'), ((4882, 4922), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_y_1550'], {}), '(dev.ebeam_y_1550)\n', (4904, 4922), True, 'import simphony.core as core\n'), ((4937, 4980), 'simphony.core.ComponentInstance', 'core.ComponentInstance', (['dev.ebeam_gc_te1550'], {}), '(dev.ebeam_gc_te1550)\n', (4959, 4980), True, 'import simphony.core as core\n'), ((5162, 5176), 'simphony.core.Netlist', 'core.Netlist', ([], {}), '()\n', (5174, 5176), True, 'import simphony.core as core\n'), ((5402, 5442), 'simphony.core.Netlist', 'core.Netlist', ([], {'components': 'self.components'}), '(components=self.components)\n', (5414, 5442), True, 'import simphony.core as core\n'), ((5733, 5747), 'simphony.core.Netlist', 'core.Netlist', ([], {}), '()\n', (5745, 5747), True, 'import simphony.core as core\n'), ((674, 715), 'pytest.raises', 'pytest.raises', (['errors.DuplicateModelError'], {}), '(errors.DuplicateModelError)\n', (687, 715), False, 'import pytest\n'), ((1440, 1485), 'pytest.raises', 'pytest.raises', (['errors.CachableParametersError'], {}), '(errors.CachableParametersError)\n', (1453, 1485), False, 'import pytest\n'), ((1752, 1799), 'pytest.raises', 'pytest.raises', (['errors.UncachableParametersError'], {}), '(errors.UncachableParametersError)\n', (1765, 1799), False, 'import pytest\n'), ((2011, 2058), 'pytest.raises', 'pytest.raises', (['errors.UncachableParametersError'], {}), '(errors.UncachableParametersError)\n', (2024, 2058), False, 'import pytest\n'), ((3286, 3311), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (3297, 3311), True, 'import numpy as np\n'), ((3226, 3238), 'numpy.sin', 'np.sin', (['freq'], {}), '(freq)\n', (3232, 3238), True, 'import numpy as np\n')] |
import json
from collections import OrderedDict
from pathlib import Path
from typing import List
import h5py
import numpy as np
import torch
import torch.utils.data as data
from easydict import EasyDict
from tqdm import tqdm
from utils_collection import general
from utils_collection.text_embedding import preprocess_bert_paragraph
import pickle as pck
import os
DBG_LOADER_LIMIT = 10000
class BertTextFeatureLoader:
def __init__(
self, dataset_path_dict: EasyDict, ids, preload=True, debug_size: int=DBG_LOADER_LIMIT):
self.h5_path = dataset_path_dict["language_feats"]
lens_file = dataset_path_dict["meta_text_len"]
l_file = lens_file.open("rt", encoding="utf8")
self.lens = json.load(l_file)
l_file.close()
self.cached_data = None
if preload:
h5file = h5py.File(self.h5_path, "r")
self.cached_data = {}
mod_keys = list(ids.keys())
i = 0
for id_ in tqdm(ids[mod_keys[0]], desc="preload text"):
np_array = h5file[id_]
shared_array = general.make_shared_array(np_array)
self.cached_data[id_] = shared_array
if debug_size < DBG_LOADER_LIMIT: # For quick debugging we limit the data loading to debug_size
if i > debug_size:
break
i += 1
h5file.close()
def __getitem__(self, id_):
lens = self.lens[id_]
if self.cached_data is None:
h5file = h5py.File(self.h5_path, "r")
features = np.array(h5file[id_])
h5file.close()
return features, lens
return self.cached_data[id_], lens
class ActivityNetVideoFeatureLoader:
def __init__(self, dataset_path: Path, ids: List[str], preload: bool):
self.dataset_path = Path(dataset_path)
self.features_path = (dataset_path / "features" /
"ICEP_V3_global_pool_skip_8_direct_resize")
self.cached_data = None
if preload:
self.cached_data = {}
for id_ in tqdm(ids, desc="preload videos"):
np_array = self.load_from_file(id_)
shared_array = general.make_shared_array(np_array)
self.cached_data[id_] = shared_array
def __getitem__(self, id_):
if self.cached_data is None:
return self.load_from_file(id_)
else:
return self.cached_data[id_]
def load_from_file(self, id_):
return np.load(str(self.features_path / f"{id_}.npz"))[
"frame_scores"].squeeze(1).squeeze(2).squeeze(2)
class LSMDCVideoFeatureLoader:
def __init__(
self, dataset_path_dict: EasyDict, ids: List[str],
preload: bool, data_split, debug_size: int=DBG_LOADER_LIMIT):
self.h5_path = {}
for i_mod in dataset_path_dict["video_feats"]:
self.h5_path[i_mod] = dataset_path_dict["video_feats"][i_mod]
self.cached_data = None
self.features_source = "h5"
self.data_keys = ids
self.data_split = data_split
if preload:
self.cached_data = {}
# buffer data to memory
for i_mod in dataset_path_dict["video_feats"]:
self.cached_data[i_mod] = {}
for i_mod in dataset_path_dict["video_feats"]:
h5 = h5py.File(self.h5_path[i_mod], "r")
i = 0
for key in tqdm(self.data_keys[i_mod], desc="Preloading {} - modality: {} ==>".format(self.data_split, i_mod)):
data = h5[key]
self.cached_data[i_mod][key] = general.make_shared_array(data)
if debug_size < DBG_LOADER_LIMIT: # limit the dataloading for quick debugging
if i > debug_size:
break
i += 1
def get_features_by_key(self, item: str) -> np.ndarray:
"""
Given feature key, load the feature.
Args:
item: Key.
Returns:
Feature data array with shape (num_frames, feature_dim)
"""
if self.features_source == "h5":
# load from h5
h5 = h5py.File(self.h5_path, "r")
return np.array(h5[item])
if self.features_source == "npz_activitynet":
# load from single npz file
# this is specific to the activitynet inception features
return np.load(str(self.dataset_path / "features" / self.features_name / f"v_{item}.npz")
)["frame_scores"].squeeze(1).squeeze(2).squeeze(2)
raise NotImplementedError(f"Feature source type {self.features_source} not understood.")
def get_features_as_items(self, load_all: bool = False, i_mod: str = "action"):
"""
Iterator for key, value pairs of all features.
Args:
load_all: If true, ignores the provided data keys and loops everything in the path.
Yields:
Tuple of feature key and feature data array with shape (num_frames, feature_dim)
"""
if self.features_source == "h5":
# load from h5
h5 = h5py.File(self.h5_path[i_mod], "r")
if load_all:
for key, data in h5.items():
yield key, data
else:
for key in self.data_keys:
yield key, h5[key]
elif self.features_source == "npz_activitynet":
# load from npz for activitynet
if load_all:
files = os.listdir(self.dataset_path / "features" / self.features_name)
for file in files:
data_key = file[2:-4] # extract youtube id from v_###########.npz
yield data_key, self.get_features_by_key(data_key)
else:
for data_key in self.data_keys:
yield data_key, self.get_features_by_key(data_key)
else:
raise NotImplementedError(f"Feature source type {self.features_source} not understood.")
def __getitem__(self, id_):
if self.cached_data is None:
h5file = h5py.File(self.h5_path, "r")
features = np.array(h5file[id_])
h5file.close()
return features
else:
return self.cached_data[id_]
class LSMDCVideoPickleLoader:
def __init__(
self, dataset_path_dict: EasyDict, ids: List[str],
preload: bool, data_split, debug_size: int=DBG_LOADER_LIMIT):
self.h5_path = {}
for i_mod in dataset_path_dict["video_feats"]:
self.h5_path[i_mod] = dataset_path_dict["video_feats"][i_mod]
self.cached_data = None
self.features_source = "h5"
self.data_keys = ids
self.data_split = data_split
self.pickle_folder = dataset_path_dict["pickle_path"]
self.pck_folder = os.path.join(self.pickle_folder, self.data_split)
self.cached_data = None
i = 0
if preload:
self.cached_data = {}
mod_name = list(dataset_path_dict["video_feats"])[0]
for id_ in tqdm(ids[mod_name], desc="preload videos"):
self.cached_data[id_] = self.load_from_file(id_)
if debug_size < DBG_LOADER_LIMIT: # limit the dataloading for quick debugging
if i > debug_size:
break
i += 1
def __getitem__(self, id_):
if self.cached_data is None:
return self.load_from_file(id_)
else:
return self.cached_data[id_]
def load_from_file(self, id_):
pck_file = os.path.join(self.pck_folder, 'id_' + str(id_) + '_feat.pickle')
with open(pck_file, 'rb') as pickle_file:
data = pck.load(pickle_file)
return data
class LSMDCVideoPickleSaver:
def __init__(
self, dataset_path_dict: EasyDict, ids: List[str],
preload: bool, data_split, debug_size: int=DBG_LOADER_LIMIT):
self.h5_path = {}
for i_mod in dataset_path_dict["video_feats"]:
self.h5_path[i_mod] = dataset_path_dict["video_feats"][i_mod]
self.cached_data = None
self.features_source = "h5"
self.data_keys = ids
self.data_split = data_split
self.save_folder = dataset_path_dict["pickle_path"]
# self.h5_path['object'] = "/mnt/efs/fs1/workspace/experiments/data/lsmdc16/debug/modality_experts"
pck_obj_folder = os.path.join(self.save_folder, self.data_split)
os.makedirs(pck_obj_folder, exist_ok=True)
self.cached_data = {}
h5_all = {}
# buffer data to memory
for i_mod in dataset_path_dict["video_feats"]:
self.cached_data[i_mod] = {}
h5_all[i_mod] = []
for i_mod in dataset_path_dict["video_feats"]:
h5_all[i_mod] = h5py.File(self.h5_path[i_mod], "r")
i = 0
for key in tqdm(self.data_keys[i_mod], desc="Preloading {} - modality: {} ==>".format(self.data_split, i_mod)):
data_pck= {}
for i_mod in dataset_path_dict["video_feats"]:
print(i_mod, key)
data = h5_all[i_mod][key]
data_pck[i_mod] = general.make_shared_array(data)
gp_obj_file = os.path.join(pck_obj_folder, 'id_' + str(key) + '_feat.pickle')
with open(gp_obj_file, 'wb') as f:
pck.dump(data_pck, f, pck.HIGHEST_PROTOCOL)
if debug_size < DBG_LOADER_LIMIT: # limit the dataloading for quick debugging
if i > debug_size:
break
i += 1
def __getitem__(self, id_):
return self.cached_data[id_]
class Youcook2VideoFeatureLoader:
def __init__(
self, dataset_path_dict: EasyDict, ids: List[str],
preload: bool):
self.h5_path = dataset_path_dict["video_feats"]
self.cached_data = None
if preload:
self.cached_data = {}
h5file = h5py.File(self.h5_path, "r")
for id_ in tqdm(ids, desc="preload videos"):
np_array = h5file[id_]
shared_array = general.make_shared_array(np_array)
self.cached_data[id_] = shared_array
def __getitem__(self, id_):
if self.cached_data is None:
h5file = h5py.File(self.h5_path, "r")
features = np.array(h5file[id_])
h5file.close()
return features
else:
return self.cached_data[id_]
class VideoDatasetFeatures(data.Dataset):
def __init__(
self, dataset_path_dict: EasyDict,
split: str, max_frames: int, is_train: bool,
preload_vid_feat: bool, preload_text_feat: bool,
frames_noise: float, debug_size: int=DBG_LOADER_LIMIT, pickle_path=None):
self.frames_noise = frames_noise
self.split = split
self.max_frames = max_frames
self.is_train = is_train
self.load_pickle = (pickle_path == "")
self.debug_size = debug_size
meta_file = dataset_path_dict["meta_data"]
self.vids_dict = {}
for i_meta, i_path in meta_file.items():
json_file = i_path.open("rt", encoding="utf8")
self.vids_dict[i_meta] = json.load(json_file,
object_pairs_hook=OrderedDict)
json_file.close()
self.ids = {}
self.modalities = []
# print(self.split)
for i_mod, i_dict in self.vids_dict.items():
self.modalities.append(i_mod)
self.ids[i_mod] = [key for key, val in i_dict.items(
) if val["split"] == self.split]
print("init modality {} of dataset {} split {} length {} ".format(i_mod, dataset_path_dict["dataset_name"], split, len(self.ids[i_mod])))
if dataset_path_dict["dataset_name"] == "lsmdc16":
self.preproc_par_fn = preprocess_bert_paragraph
self.text_data = BertTextFeatureLoader(
dataset_path_dict, self.ids, preload_text_feat, debug_size=self.debug_size)
# self.vid_data = LSMDCVideoFeatureLoader(
# dataset_path_dict, self.ids, preload_vid_feat, self.split, debug_size=self.debug_size)
if pickle_path == "":
print("==> Start loading hdf5 files ... (Might be slower and needs more memory)")
self.vid_data = LSMDCVideoFeatureLoader(
dataset_path_dict, self.ids, preload_vid_feat, self.split, debug_size=self.debug_size)
else:
print("==> Start loading pickle files ...")
self.vid_data = LSMDCVideoPickleLoader(
dataset_path_dict, self.ids, preload_vid_feat, self.split, debug_size=self.debug_size)
elif dataset_path_dict["dataset_name"] == "youcook2":
self.preproc_par_fn = preprocess_bert_paragraph
self.text_data = BertTextFeatureLoader(
dataset_path_dict, self.ids, preload_text_feat)
self.vid_data = Youcook2VideoFeatureLoader(
dataset_path_dict, self.ids, preload_vid_feat)
else:
raise NotImplementedError
def get_frames_from_video(
self, vid_id, indices=None, num_frames=None, modality_name: str = "action"):
vid_dict = self.vids_dict[modality_name][vid_id]
vid_len = vid_dict["num_frames"]
if num_frames is not None:
indices = general.compute_indices(
vid_len, num_frames, self.is_train)
if self.load_pickle:
frames = self.vid_data[modality_name][vid_id][indices]
else:
frames = self.vid_data[vid_id][modality_name][indices]
#
# print("frames: ", frames)
return frames
def get_frames_from_segment(
self, vid_id, seg_num, num_frames, modality_name: str = "action"):
vid_dict = self.vids_dict[modality_name][vid_id]
seg = vid_dict["segments"][seg_num]
start_frame = seg["start_frame"]
seg_len = seg["num_frames"]
indices = general.compute_indices(seg_len, num_frames, self.is_train)
indices += start_frame
frames = self.get_frames_from_video(vid_id, indices, modality_name=modality_name)
return frames
def __len__(self):
if self.debug_size < DBG_LOADER_LIMIT:
return self.debug_size
else:
return len(self.ids[self.modalities[0]])
def __getitem__(self, index):
get_data = {}
for i_mod in self.modalities:
get_data[i_mod]={}
vid_id = self.ids[i_mod][index]
vid_dict = self.vids_dict[i_mod][vid_id]
clip_num = len(vid_dict["segments"])
sent_num = len(vid_dict["segments"])
# load video frames
vid_frames_len = vid_dict["num_frames"]
# print(i_mod, "vid_frames_len: ", vid_frames_len)
if vid_frames_len > self.max_frames:
vid_frames_len = self.max_frames
vid_frames = torch.tensor(self.get_frames_from_video(
vid_id, num_frames=vid_frames_len, modality_name=i_mod))
vid_frames_len = int(vid_frames.shape[0])
if self.frames_noise != 0:
vid_frames_noise = general.truncated_normal_fill(
vid_frames.shape, std=self.frames_noise)
vid_frames += vid_frames_noise
# load segment frames
clip_frames_list = []
clip_frames_len_list = []
for i, seg in enumerate(vid_dict["segments"]):
c_num_frames = seg["num_frames"]
# print("num_frames: ", c_num_frames)
if c_num_frames > self.max_frames:
c_num_frames = self.max_frames
c_frames = self.get_frames_from_segment(
vid_id, i, num_frames=c_num_frames, modality_name=i_mod)
c_frames = torch.tensor(c_frames)
if self.frames_noise != 0:
clip_frames_noise = general.truncated_normal_fill(
c_frames.shape, std=self.frames_noise)
c_frames += clip_frames_noise
clip_frames_list.append(c_frames)
clip_frames_len_list.append(c_frames.shape[0])
# print(clip_frames_len_list)
# print("-0-"*20)
# load text
seg_narrations = []
for seg in vid_dict["segments"]:
seg_narr = seg["narration"]
if seg_narr is None:
seg_narr = "undefined"
print("WARNING: Undefined text tokens "
"(no narration data, is this a test set?)")
seg_narrations.append(seg_narr)
list_of_list_of_words = self.preproc_par_fn(seg_narrations)
# load precomputed text features
par_cap_vectors, sent_cap_len_list = self.text_data[vid_id]
par_cap_len = int(par_cap_vectors.shape[0])
par_cap_vectors = torch.tensor(par_cap_vectors).float()
# split paragraph features into sentences
sent_cap_vectors_list = []
pointer = 0
for i, sent_cap_len in enumerate(sent_cap_len_list):
sent_cap_vectors = par_cap_vectors[
pointer:pointer + sent_cap_len, :]
sent_cap_vectors_list.append(sent_cap_vectors)
pointer += sent_cap_len
# print(vid_id, "====>")
get_data[i_mod]={
"vid_id": vid_id,
"data_words": list_of_list_of_words,
"vid_frames": vid_frames,
"vid_frames_len": vid_frames_len,
"par_cap_vectors": par_cap_vectors,
"par_cap_len": par_cap_len,
"clip_num": clip_num,
"sent_num": sent_num,
"clip_frames_list": clip_frames_list,
"clip_frames_len_list": clip_frames_len_list,
"sent_cap_len_list": sent_cap_len_list,
"sent_cap_vectors_list": sent_cap_vectors_list
}
return get_data
def collate_fn(self, data_batch):
def get_data(i_mod, key):
return [d[i_mod][key] for d in data_batch]
batch_size = len(data_batch)
batch_data = {}
for i_mod in self.modalities:
# collate video frames
batch_data[i_mod] = {}
list_vid_frames = get_data(i_mod, "vid_frames")
list_vid_frames_len = get_data(i_mod, "vid_frames_len")
vid_feature_dim = list_vid_frames[0].shape[-1]
vid_frames_len = torch.tensor(list_vid_frames_len).long()
vid_frames_max_seq_len = int(vid_frames_len.max().numpy())
vid_frames = torch.zeros(
batch_size, vid_frames_max_seq_len, vid_feature_dim).float()
vid_frames_mask = torch.zeros(batch_size, vid_frames_max_seq_len)
for batch, (seq_len, item) in enumerate(zip(
list_vid_frames_len, list_vid_frames)):
vid_frames[batch, :seq_len] = item
vid_frames_mask[batch, :seq_len] = 1
# collate paragraph features
list_par_cap_len = get_data(i_mod, "par_cap_len")
list_par_cap_vectors = get_data(i_mod, "par_cap_vectors")
par_feature_dim = list_par_cap_vectors[0].shape[-1]
par_cap_len = torch.tensor(list_par_cap_len).long()
par_cap_max_len = int(par_cap_len.max().numpy())
par_cap_vectors = torch.zeros(
batch_size, par_cap_max_len, par_feature_dim).float()
par_cap_mask = torch.zeros(batch_size, par_cap_max_len)
for batch, (seq_len, item) in enumerate(
zip(list_par_cap_len, list_par_cap_vectors)):
par_cap_vectors[batch, :seq_len, :] = item
par_cap_mask[batch, :seq_len] = 1
# collate clip frames
list_clip_num = get_data(i_mod, "clip_num")
clip_num = torch.tensor(list_clip_num).long()
total_clip_num = int(np.sum(list_clip_num))
list_clip_frames_len_list = get_data(i_mod, "clip_frames_len_list")
clip_frames_max_len = int(np.max(
[np.max(len_single) for len_single in list_clip_frames_len_list]))
clip_frames = torch.zeros((
total_clip_num, clip_frames_max_len, vid_feature_dim)).float()
clip_frames_mask = torch.zeros(
(total_clip_num, clip_frames_max_len))
list_clip_frames_list = get_data(i_mod, "clip_frames_list")
clip_frames_len = []
c_num = 0
for batch, clip_frames_list in enumerate(list_clip_frames_list):
for i, clip_frames_item in enumerate(clip_frames_list):
clip_frames_len_item = int(clip_frames_item.shape[0])
clip_frames[c_num, :clip_frames_len_item, :] =\
clip_frames_item
clip_frames_mask[c_num, :clip_frames_len_item] = 1
clip_frames_len.append(clip_frames_len_item)
c_num += 1
clip_frames_len = torch.tensor(clip_frames_len).long()
# collate sentence features
list_sent_num = get_data(i_mod, "sent_num")
sent_num = torch.tensor(list_sent_num).long()
total_sent_num = int(np.sum(list_sent_num))
list_sent_cap_len_list = get_data(i_mod, "sent_cap_len_list")
sent_cap_max_len = int(np.max(
[np.max(len_single) for len_single in list_sent_cap_len_list]))
sent_cap_len = []
sent_cap_mask = torch.zeros(
(total_sent_num, sent_cap_max_len)).long()
cap_feature_dim = list_par_cap_vectors[0].shape[-1]
sent_cap_vectors = torch.zeros(
(total_sent_num, sent_cap_max_len, cap_feature_dim))
c_num = 0
for batch, sent_cap_len_list in enumerate(
list_sent_cap_len_list):
pointer = 0
for sent_cap_len_item in sent_cap_len_list:
sent_cap_vectors[c_num, :sent_cap_len_item] =\
par_cap_vectors[
batch, pointer:pointer + sent_cap_len_item]
sent_cap_mask[c_num, :sent_cap_len_item] = 1
sent_cap_len.append(sent_cap_len_item)
c_num += 1
pointer += sent_cap_len_item
sent_cap_len = torch.tensor(sent_cap_len).long()
batch_data[i_mod] = {
"vid_frames": vid_frames,
"vid_frames_mask": vid_frames_mask,
"vid_frames_len": vid_frames_len,
"par_cap_vectors": par_cap_vectors,
"par_cap_mask": par_cap_mask,
"par_cap_len": par_cap_len,
"clip_num": clip_num,
"clip_frames": clip_frames,
"clip_frames_len": clip_frames_len,
"clip_frames_mask": clip_frames_mask,
"sent_num": sent_num,
"sent_cap_vectors": sent_cap_vectors,
"sent_cap_mask": sent_cap_mask,
"sent_cap_len": sent_cap_len,
"vid_id": get_data(i_mod, "vid_id"),
"data_words": get_data(i_mod, "data_words")
}
return batch_data
def create_datasets(
dataset_path_dict: EasyDict, cfg: EasyDict, preload_vid_feat: bool,
preload_text_feat: bool, eval=False, test=False,
debug_train_size: int=DBG_LOADER_LIMIT, debug_val_size: int=DBG_LOADER_LIMIT,
debug_test_size: int=DBG_LOADER_LIMIT, pickle_path=None):
if eval:
val_set = VideoDatasetFeatures(dataset_path_dict,
cfg.dataset.val_split, cfg.dataset.max_frames, False, preload_vid_feat,
preload_text_feat, 0, pickle_path=pickle_path)
return val_set
if test:
test_set = VideoDatasetFeatures(dataset_path_dict,cfg.dataset.test_split, cfg.dataset.max_frames,
False, preload_vid_feat,preload_text_feat, 0, debug_size=debug_test_size, pickle_path=pickle_path)
return test_set
# print("Train loader", "00"*20)
train_set = VideoDatasetFeatures(dataset_path_dict,
cfg.dataset.train_split, cfg.dataset.max_frames, True,
preload_vid_feat, preload_text_feat, cfg.dataset.frames_noise, debug_size=debug_train_size, pickle_path=pickle_path)
# print("Val loader", "00"*20)
val_set = VideoDatasetFeatures(dataset_path_dict,
cfg.dataset.val_split, cfg.dataset.max_frames, False, preload_vid_feat,
preload_text_feat, 0, debug_size=debug_val_size, pickle_path=pickle_path)
return train_set, val_set
def create_loaders(
train_set: VideoDatasetFeatures, val_set: VideoDatasetFeatures, test_set: VideoDatasetFeatures,
batch_size: int, num_workers: int, eval=False):
if eval:
if val_set is not None:
val_loader = data.DataLoader(
val_set, batch_size=batch_size, shuffle=False,
num_workers=num_workers, collate_fn=val_set.collate_fn,
pin_memory=True)
return val_loader
if test_set is not None:
test_loader = data.DataLoader(
test_set, batch_size=batch_size, shuffle=False,
num_workers=num_workers, collate_fn=test_set.collate_fn,
pin_memory=True)
return test_loader
# train_loader = data.DataLoader(
# train_set, batch_size=batch_size, shuffle=True,
# num_workers=num_workers, collate_fn=train_set.collate_fn,
# pin_memory=True)
train_loader = data.DataLoader(
train_set, batch_size=batch_size, shuffle=True, collate_fn=train_set.collate_fn,
pin_memory=True)
val_loader = data.DataLoader(
val_set, batch_size=batch_size, shuffle=False,
collate_fn=val_set.collate_fn,
pin_memory=True)
if test_set is not None:
test_loader = data.DataLoader(
test_set, batch_size=batch_size, shuffle=False,
collate_fn=val_set.collate_fn,
pin_memory=True)
else:
test_loader = None
return train_loader, val_loader, test_loader
| [
"utils_collection.general.compute_indices",
"os.listdir",
"pickle.dump",
"os.makedirs",
"pathlib.Path",
"tqdm.tqdm",
"os.path.join",
"pickle.load",
"utils_collection.general.make_shared_array",
"h5py.File",
"utils_collection.general.truncated_normal_fill",
"numpy.array",
"torch.tensor",
"numpy.sum",
"numpy.max",
"torch.utils.data.DataLoader",
"json.load",
"torch.zeros"
] | [((26152, 26270), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_set'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'collate_fn': 'train_set.collate_fn', 'pin_memory': '(True)'}), '(train_set, batch_size=batch_size, shuffle=True, collate_fn=\n train_set.collate_fn, pin_memory=True)\n', (26167, 26270), True, 'import torch.utils.data as data\n'), ((26300, 26415), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'collate_fn': 'val_set.collate_fn', 'pin_memory': '(True)'}), '(val_set, batch_size=batch_size, shuffle=False, collate_fn=\n val_set.collate_fn, pin_memory=True)\n', (26315, 26415), True, 'import torch.utils.data as data\n'), ((731, 748), 'json.load', 'json.load', (['l_file'], {}), '(l_file)\n', (740, 748), False, 'import json\n'), ((1870, 1888), 'pathlib.Path', 'Path', (['dataset_path'], {}), '(dataset_path)\n', (1874, 1888), False, 'from pathlib import Path\n'), ((6992, 7041), 'os.path.join', 'os.path.join', (['self.pickle_folder', 'self.data_split'], {}), '(self.pickle_folder, self.data_split)\n', (7004, 7041), False, 'import os\n'), ((8612, 8659), 'os.path.join', 'os.path.join', (['self.save_folder', 'self.data_split'], {}), '(self.save_folder, self.data_split)\n', (8624, 8659), False, 'import os\n'), ((8668, 8710), 'os.makedirs', 'os.makedirs', (['pck_obj_folder'], {'exist_ok': '(True)'}), '(pck_obj_folder, exist_ok=True)\n', (8679, 8710), False, 'import os\n'), ((14354, 14413), 'utils_collection.general.compute_indices', 'general.compute_indices', (['seg_len', 'num_frames', 'self.is_train'], {}), '(seg_len, num_frames, self.is_train)\n', (14377, 14413), False, 'from utils_collection import general\n'), ((26489, 26605), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'collate_fn': 'val_set.collate_fn', 'pin_memory': '(True)'}), '(test_set, batch_size=batch_size, shuffle=False, collate_fn=\n val_set.collate_fn, pin_memory=True)\n', (26504, 26605), True, 'import torch.utils.data as data\n'), ((846, 874), 'h5py.File', 'h5py.File', (['self.h5_path', '"""r"""'], {}), "(self.h5_path, 'r')\n", (855, 874), False, 'import h5py\n'), ((990, 1033), 'tqdm.tqdm', 'tqdm', (['ids[mod_keys[0]]'], {'desc': '"""preload text"""'}), "(ids[mod_keys[0]], desc='preload text')\n", (994, 1033), False, 'from tqdm import tqdm\n'), ((1550, 1578), 'h5py.File', 'h5py.File', (['self.h5_path', '"""r"""'], {}), "(self.h5_path, 'r')\n", (1559, 1578), False, 'import h5py\n'), ((1602, 1623), 'numpy.array', 'np.array', (['h5file[id_]'], {}), '(h5file[id_])\n', (1610, 1623), True, 'import numpy as np\n'), ((2130, 2162), 'tqdm.tqdm', 'tqdm', (['ids'], {'desc': '"""preload videos"""'}), "(ids, desc='preload videos')\n", (2134, 2162), False, 'from tqdm import tqdm\n'), ((4264, 4292), 'h5py.File', 'h5py.File', (['self.h5_path', '"""r"""'], {}), "(self.h5_path, 'r')\n", (4273, 4292), False, 'import h5py\n'), ((4312, 4330), 'numpy.array', 'np.array', (['h5[item]'], {}), '(h5[item])\n', (4320, 4330), True, 'import numpy as np\n'), ((5241, 5276), 'h5py.File', 'h5py.File', (['self.h5_path[i_mod]', '"""r"""'], {}), "(self.h5_path[i_mod], 'r')\n", (5250, 5276), False, 'import h5py\n'), ((6245, 6273), 'h5py.File', 'h5py.File', (['self.h5_path', '"""r"""'], {}), "(self.h5_path, 'r')\n", (6254, 6273), False, 'import h5py\n'), ((6297, 6318), 'numpy.array', 'np.array', (['h5file[id_]'], {}), '(h5file[id_])\n', (6305, 6318), True, 'import numpy as np\n'), ((7231, 7273), 'tqdm.tqdm', 'tqdm', (['ids[mod_name]'], {'desc': '"""preload videos"""'}), "(ids[mod_name], desc='preload videos')\n", (7235, 7273), False, 'from tqdm import tqdm\n'), ((7889, 7910), 'pickle.load', 'pck.load', (['pickle_file'], {}), '(pickle_file)\n', (7897, 7910), True, 'import pickle as pck\n'), ((9013, 9048), 'h5py.File', 'h5py.File', (['self.h5_path[i_mod]', '"""r"""'], {}), "(self.h5_path[i_mod], 'r')\n", (9022, 9048), False, 'import h5py\n'), ((10196, 10224), 'h5py.File', 'h5py.File', (['self.h5_path', '"""r"""'], {}), "(self.h5_path, 'r')\n", (10205, 10224), False, 'import h5py\n'), ((10248, 10280), 'tqdm.tqdm', 'tqdm', (['ids'], {'desc': '"""preload videos"""'}), "(ids, desc='preload videos')\n", (10252, 10280), False, 'from tqdm import tqdm\n'), ((10532, 10560), 'h5py.File', 'h5py.File', (['self.h5_path', '"""r"""'], {}), "(self.h5_path, 'r')\n", (10541, 10560), False, 'import h5py\n'), ((10584, 10605), 'numpy.array', 'np.array', (['h5file[id_]'], {}), '(h5file[id_])\n', (10592, 10605), True, 'import numpy as np\n'), ((11478, 11529), 'json.load', 'json.load', (['json_file'], {'object_pairs_hook': 'OrderedDict'}), '(json_file, object_pairs_hook=OrderedDict)\n', (11487, 11529), False, 'import json\n'), ((13712, 13771), 'utils_collection.general.compute_indices', 'general.compute_indices', (['vid_len', 'num_frames', 'self.is_train'], {}), '(vid_len, num_frames, self.is_train)\n', (13735, 13771), False, 'from utils_collection import general\n'), ((19282, 19329), 'torch.zeros', 'torch.zeros', (['batch_size', 'vid_frames_max_seq_len'], {}), '(batch_size, vid_frames_max_seq_len)\n', (19293, 19329), False, 'import torch\n'), ((20054, 20094), 'torch.zeros', 'torch.zeros', (['batch_size', 'par_cap_max_len'], {}), '(batch_size, par_cap_max_len)\n', (20065, 20094), False, 'import torch\n'), ((20887, 20937), 'torch.zeros', 'torch.zeros', (['(total_clip_num, clip_frames_max_len)'], {}), '((total_clip_num, clip_frames_max_len))\n', (20898, 20937), False, 'import torch\n'), ((22281, 22345), 'torch.zeros', 'torch.zeros', (['(total_sent_num, sent_cap_max_len, cap_feature_dim)'], {}), '((total_sent_num, sent_cap_max_len, cap_feature_dim))\n', (22292, 22345), False, 'import torch\n'), ((25474, 25614), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'collate_fn': 'val_set.collate_fn', 'pin_memory': '(True)'}), '(val_set, batch_size=batch_size, shuffle=False, num_workers=\n num_workers, collate_fn=val_set.collate_fn, pin_memory=True)\n', (25489, 25614), True, 'import torch.utils.data as data\n'), ((25736, 25878), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'collate_fn': 'test_set.collate_fn', 'pin_memory': '(True)'}), '(test_set, batch_size=batch_size, shuffle=False, num_workers\n =num_workers, collate_fn=test_set.collate_fn, pin_memory=True)\n', (25751, 25878), True, 'import torch.utils.data as data\n'), ((1105, 1140), 'utils_collection.general.make_shared_array', 'general.make_shared_array', (['np_array'], {}), '(np_array)\n', (1130, 1140), False, 'from utils_collection import general\n'), ((2247, 2282), 'utils_collection.general.make_shared_array', 'general.make_shared_array', (['np_array'], {}), '(np_array)\n', (2272, 2282), False, 'from utils_collection import general\n'), ((3416, 3451), 'h5py.File', 'h5py.File', (['self.h5_path[i_mod]', '"""r"""'], {}), "(self.h5_path[i_mod], 'r')\n", (3425, 3451), False, 'import h5py\n'), ((9378, 9409), 'utils_collection.general.make_shared_array', 'general.make_shared_array', (['data'], {}), '(data)\n', (9403, 9409), False, 'from utils_collection import general\n'), ((9570, 9613), 'pickle.dump', 'pck.dump', (['data_pck', 'f', 'pck.HIGHEST_PROTOCOL'], {}), '(data_pck, f, pck.HIGHEST_PROTOCOL)\n', (9578, 9613), True, 'import pickle as pck\n'), ((10352, 10387), 'utils_collection.general.make_shared_array', 'general.make_shared_array', (['np_array'], {}), '(np_array)\n', (10377, 10387), False, 'from utils_collection import general\n'), ((15586, 15656), 'utils_collection.general.truncated_normal_fill', 'general.truncated_normal_fill', (['vid_frames.shape'], {'std': 'self.frames_noise'}), '(vid_frames.shape, std=self.frames_noise)\n', (15615, 15656), False, 'from utils_collection import general\n'), ((16257, 16279), 'torch.tensor', 'torch.tensor', (['c_frames'], {}), '(c_frames)\n', (16269, 16279), False, 'import torch\n'), ((20505, 20526), 'numpy.sum', 'np.sum', (['list_clip_num'], {}), '(list_clip_num)\n', (20511, 20526), True, 'import numpy as np\n'), ((21836, 21857), 'numpy.sum', 'np.sum', (['list_sent_num'], {}), '(list_sent_num)\n', (21842, 21857), True, 'import numpy as np\n'), ((3688, 3719), 'utils_collection.general.make_shared_array', 'general.make_shared_array', (['data'], {}), '(data)\n', (3713, 3719), False, 'from utils_collection import general\n'), ((5645, 5708), 'os.listdir', 'os.listdir', (["(self.dataset_path / 'features' / self.features_name)"], {}), "(self.dataset_path / 'features' / self.features_name)\n", (5655, 5708), False, 'import os\n'), ((16363, 16431), 'utils_collection.general.truncated_normal_fill', 'general.truncated_normal_fill', (['c_frames.shape'], {'std': 'self.frames_noise'}), '(c_frames.shape, std=self.frames_noise)\n', (16392, 16431), False, 'from utils_collection import general\n'), ((17378, 17407), 'torch.tensor', 'torch.tensor', (['par_cap_vectors'], {}), '(par_cap_vectors)\n', (17390, 17407), False, 'import torch\n'), ((19025, 19058), 'torch.tensor', 'torch.tensor', (['list_vid_frames_len'], {}), '(list_vid_frames_len)\n', (19037, 19058), False, 'import torch\n'), ((19162, 19226), 'torch.zeros', 'torch.zeros', (['batch_size', 'vid_frames_max_seq_len', 'vid_feature_dim'], {}), '(batch_size, vid_frames_max_seq_len, vid_feature_dim)\n', (19173, 19226), False, 'import torch\n'), ((19815, 19845), 'torch.tensor', 'torch.tensor', (['list_par_cap_len'], {}), '(list_par_cap_len)\n', (19827, 19845), False, 'import torch\n'), ((19944, 20001), 'torch.zeros', 'torch.zeros', (['batch_size', 'par_cap_max_len', 'par_feature_dim'], {}), '(batch_size, par_cap_max_len, par_feature_dim)\n', (19955, 20001), False, 'import torch\n'), ((20437, 20464), 'torch.tensor', 'torch.tensor', (['list_clip_num'], {}), '(list_clip_num)\n', (20449, 20464), False, 'import torch\n'), ((20763, 20830), 'torch.zeros', 'torch.zeros', (['(total_clip_num, clip_frames_max_len, vid_feature_dim)'], {}), '((total_clip_num, clip_frames_max_len, vid_feature_dim))\n', (20774, 20830), False, 'import torch\n'), ((21611, 21640), 'torch.tensor', 'torch.tensor', (['clip_frames_len'], {}), '(clip_frames_len)\n', (21623, 21640), False, 'import torch\n'), ((21768, 21795), 'torch.tensor', 'torch.tensor', (['list_sent_num'], {}), '(list_sent_num)\n', (21780, 21795), False, 'import torch\n'), ((22114, 22161), 'torch.zeros', 'torch.zeros', (['(total_sent_num, sent_cap_max_len)'], {}), '((total_sent_num, sent_cap_max_len))\n', (22125, 22161), False, 'import torch\n'), ((22980, 23006), 'torch.tensor', 'torch.tensor', (['sent_cap_len'], {}), '(sent_cap_len)\n', (22992, 23006), False, 'import torch\n'), ((20671, 20689), 'numpy.max', 'np.max', (['len_single'], {}), '(len_single)\n', (20677, 20689), True, 'import numpy as np\n'), ((21993, 22011), 'numpy.max', 'np.max', (['len_single'], {}), '(len_single)\n', (21999, 22011), True, 'import numpy as np\n')] |
import struct
import spidev
import sys
import socket
import time
def toDevice(rgbs):
values = [0, 0, 0, 0]
#Transform 8-bit value to 5-bit
for (r,g,b) in rgbs:
r = int(r) >> 3
g = int(g) >> 3
b = int(b) >> 3
values.append((0x80 | (b<<2) | ((r&0x18)>>3)))
values.append((((r&0x07)<<5) | g))
#Add a single 1 bit for every pixel
for i in range(int(len(rgbs)/8)+1):
values.append(0xFF)
spi.writebytes(values)
#Initial setup
IP = sys.argv[1]
PORT = int(sys.argv[2])
BUFFER_SIZE = 20
max_fps = 60
max_period = 1.0/max_fps
count = 0
start = 0
#Start SPI
spi = spidev.SpiDev()
spi.open(0,0)
print("SPI is setup.")
#Start listening
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((IP, PORT))
s.listen(1)
print("Network is setup. Listening on", IP+":"+str(PORT))
try:
while True:
#Connected!
conn, addr = s.accept()
sfile = conn.makefile("rwb")
print("Connection from:", addr)
start = time.time()
#Initial
conn.sendall((str(time.time()) + "\n").encode())
conn.sendall((str(time.time()) + "\n").encode())
t = time.time()
while True:
try:
data = str(sfile.readline().strip())
#Should be sent as soon after receiving data, but limiting at ~60fps
time.sleep(max(0, t+max_period-time.time()))
t = time.time()
conn.sendall((str(t) + "\n").encode())
data = str(data.strip("'"))
pixels = data.split("#")[1:]
rgbs = [struct.unpack('BBB', bytes.fromhex(p)) for p in pixels]
toDevice(rgbs)
count +=1
except Exception as e:
end = time.time()
conn.close()
print(e)
print(count, "frames in", end-start, "seconds. FPS of", count/(end-start))
count = 0
break
except Exception as e:
print("hit exception")
print(e)
conn.close()
end = time.time()
spi.close()
print(count, "frames in", end-start, "seconds. FPS of", count/(end-start))
| [
"spidev.SpiDev",
"time.time",
"socket.socket"
] | [((632, 647), 'spidev.SpiDev', 'spidev.SpiDev', ([], {}), '()\n', (645, 647), False, 'import spidev\n'), ((707, 756), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (720, 756), False, 'import socket\n'), ((2096, 2107), 'time.time', 'time.time', ([], {}), '()\n', (2105, 2107), False, 'import time\n'), ((1014, 1025), 'time.time', 'time.time', ([], {}), '()\n', (1023, 1025), False, 'import time\n'), ((1170, 1181), 'time.time', 'time.time', ([], {}), '()\n', (1179, 1181), False, 'import time\n'), ((1439, 1450), 'time.time', 'time.time', ([], {}), '()\n', (1448, 1450), False, 'import time\n'), ((1804, 1815), 'time.time', 'time.time', ([], {}), '()\n', (1813, 1815), False, 'import time\n'), ((1070, 1081), 'time.time', 'time.time', ([], {}), '()\n', (1079, 1081), False, 'import time\n'), ((1127, 1138), 'time.time', 'time.time', ([], {}), '()\n', (1136, 1138), False, 'import time\n'), ((1405, 1416), 'time.time', 'time.time', ([], {}), '()\n', (1414, 1416), False, 'import time\n')] |
# displays main game board
import pygame
import time
from Control import config
from View.soundeffects import Sound
from View.button import Button
class Table:
def __init__(self, controller):
self.control = controller
self.loop_1 = True
self.loop_2 = True
self.result_msg = ""
self.balance = str(self.control.get_players_balance())
# Player Hand Loop sets config.new_game and config.game_exit or goes though hand
def player_hand_loop(self):
config.game_exit = False
# Sound Effect of Dealing 4 cards
sound = Sound()
sound.get_sound_effect("Deal4")
config.gameDisplay.fill(config.board_color)
self.show_dealers_hand()
self.show_balance(str(self.control.get_players_balance()))
# self.show_players_hand()
while self.loop_1:
# checks to see if deck is empty
if config.end_shoe is True:
self.end_of_shoe()
# event loop / NOT logic loop
# creates a list of events per frame per second (mouse movement/clicks etc)
for event in pygame.event.get():
if event.type == pygame.QUIT:
config.game_exit = True
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
# buttons for hit and stand
hit_button = Button(
"HIT",
100,
500,
100,
50,
config.rose_white,
config.dark_red,
self.hit,
)
hit_button.intro_button()
stand_button = Button(
"STAND", 300, 500, 100, 50, config.rose_white, config.dark_red
)
stand_button.bool_button()
if stand_button.return_boolean():
self.stand()
# buttons that return a boolean for new game and quit game
new_game_button = Button(
"NEW GAME",
800,
500,
150,
50,
config.rose_white,
config.dark_red,
)
new_game_button.bool_button()
if new_game_button.return_boolean():
config.new_game = True
self.loop_1 = False
quit_button = Button(
"QUIT GAME",
1000,
500,
150,
50,
config.rose_white,
config.dark_red,
)
quit_button.bool_button()
if quit_button.return_boolean():
config.game_exit = True
self.loop_1 = False
# buttons for hit,stand,new game, and quit game
hit_button = Button(
"HIT ME", 100, 500, 100, 50, config.light_gold, config.gold
)
hit_button.intro_button()
stand_button = Button(
"STAND", 300, 500, 100, 50, config.light_gold, config.gold
)
stand_button.intro_button()
new_game_button = Button(
"NEW GAME", 800, 500, 150, 50, config.light_gold, config.gold
)
new_game_button.intro_button()
quit_button = Button(
"QUIT GAME", 1000, 500, 150, 50, config.light_gold, config.gold
)
quit_button.intro_button()
self.show_players_hand()
if self.control.starting_blackjack:
self.result_msg = "Blackjack! You Win!"
self.show_results(self.result_msg)
self.control.starting_blackjack = False
self.loop_1 = False
pygame.display.update()
config.clock.tick(15)
self.loop_1 = True
# End of Hand
def end_of_hand(self):
config.gameDisplay.fill(config.board_color)
self.show_balance(str(self.control.get_players_balance()))
self.show_dealers_hand()
self.show_players_hand()
self.show_results(self.result_msg)
while self.loop_2:
for event in pygame.event.get():
if event.type == pygame.QUIT:
config.game_exit = True
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
next_hand_button = Button(
"NEXT HAND", 100, 500, 150, 50, config.light_gold, config.gold
)
next_hand_button.bool_button()
if next_hand_button.return_boolean():
self.loop_2 = False
new_game_button = Button(
"NEW GAME", 800, 500, 150, 50, config.light_gold, config.gold
)
new_game_button.bool_button()
if new_game_button.return_boolean():
config.new_game = True
self.loop_2 = False
quit_button = Button(
"QUIT GAME", 1000, 500, 150, 50, config.light_gold, config.gold
)
quit_button.bool_button()
if quit_button.return_boolean():
config.game_exit = True
self.loop_2 = False
# config.gameDisplay.fill(config.board_color)
hit_button = Button(
"", 100, 500, 100, 50, config.board_color, config.board_color
)
hit_button.intro_button()
stand_button = Button(
"", 300, 500, 100, 50, config.board_color, config.board_color
)
stand_button.intro_button()
next_hand_button = Button(
"NEXT HAND", 100, 500, 150, 50, config.light_gold, config.gold
)
next_hand_button.bool_button()
next_hand_button = Button(
"NEW GAME", 800, 500, 150, 50, config.light_gold, config.gold
)
next_hand_button.intro_button()
new_game_button = Button(
"QUIT GAME", 1000, 500, 150, 50, config.light_gold, config.gold
)
new_game_button.intro_button()
pygame.display.update()
config.clock.tick(30)
# Reset loop
self.loop_2 = True
@staticmethod
def text_objects(text, font):
text_surface = font.render(text, True, config.black)
return text_surface, text_surface.get_rect()
@staticmethod
def user_display(self, text):
large_text = pygame.font.Font("freesansbold.ttf", 80)
text_surf, text_rect = self.text_objects(text, large_text)
text_rect.center = ((config.disp_width / 2), (config.disp_height / 2.5))
config.gameDisplay.blit(text_surf, text_rect)
pygame.display.update()
# starts game loop over and resets
time.sleep(1)
def end_of_shoe(self):
text = "End of Shoe, New Deck after re-deal"
medium_text = pygame.font.Font("freesansbold.ttf", 50)
text_surf, text_rect = self.text_objects(text, medium_text)
text_rect.center = ((config.disp_width / 2), (config.disp_height / 3.5))
config.gameDisplay.blit(text_surf, text_rect)
pygame.display.update()
# starts game loop over and resets
def show_dealers_hand(self):
k = 1
dealers_hand = self.control.get_dealers_hand()
for i in range(len(dealers_hand)):
right = 500
down = 0
card = pygame.image.load(str(dealers_hand[i].get_filename()))
config.gameDisplay.blit(card, (right + k, down))
k += 100
def show_players_hand(self):
k = 1
players_hand = self.control.get_players_hand()
for i in range(len(players_hand)):
right = 500
down = 400
card = pygame.image.load(str(players_hand[i].get_filename()))
config.gameDisplay.blit(card, (right + k, down))
k += 100
def hit(self):
(card, score) = self.control.hit_player()
if score >= 21:
self.stand()
def stand(self):
self.result_msg = self.control.hit_dealer()
self.show_dealers_hand()
config.hand_loop = False
self.loop_1 = False
def show_results(self, result_msg):
self.user_display(self, result_msg)
def show_balance(self, balance):
mid_text = pygame.font.Font("freesansbold.ttf", 30)
text_surf, text_rect = self.text_objects("Balance: $" + balance, mid_text)
# text_rect.top = (0, 0)
config.gameDisplay.blit(text_surf, text_rect)
pygame.display.update()
# time.sleep(1) | [
"View.button.Button",
"pygame.quit",
"pygame.event.get",
"time.sleep",
"Control.config.gameDisplay.fill",
"Control.config.clock.tick",
"Control.config.gameDisplay.blit",
"View.soundeffects.Sound",
"pygame.font.Font",
"pygame.display.update"
] | [((588, 595), 'View.soundeffects.Sound', 'Sound', ([], {}), '()\n', (593, 595), False, 'from View.soundeffects import Sound\n'), ((645, 688), 'Control.config.gameDisplay.fill', 'config.gameDisplay.fill', (['config.board_color'], {}), '(config.board_color)\n', (668, 688), False, 'from Control import config\n'), ((4301, 4344), 'Control.config.gameDisplay.fill', 'config.gameDisplay.fill', (['config.board_color'], {}), '(config.board_color)\n', (4324, 4344), False, 'from Control import config\n'), ((7078, 7118), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(80)'], {}), "('freesansbold.ttf', 80)\n", (7094, 7118), False, 'import pygame\n'), ((7275, 7320), 'Control.config.gameDisplay.blit', 'config.gameDisplay.blit', (['text_surf', 'text_rect'], {}), '(text_surf, text_rect)\n', (7298, 7320), False, 'from Control import config\n'), ((7329, 7352), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7350, 7352), False, 'import pygame\n'), ((7404, 7417), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7414, 7417), False, 'import time\n'), ((7521, 7561), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(50)'], {}), "('freesansbold.ttf', 50)\n", (7537, 7561), False, 'import pygame\n'), ((7719, 7764), 'Control.config.gameDisplay.blit', 'config.gameDisplay.blit', (['text_surf', 'text_rect'], {}), '(text_surf, text_rect)\n', (7742, 7764), False, 'from Control import config\n'), ((7773, 7796), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7794, 7796), False, 'import pygame\n'), ((8965, 9005), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(30)'], {}), "('freesansbold.ttf', 30)\n", (8981, 9005), False, 'import pygame\n'), ((9130, 9175), 'Control.config.gameDisplay.blit', 'config.gameDisplay.blit', (['text_surf', 'text_rect'], {}), '(text_surf, text_rect)\n', (9153, 9175), False, 'from Control import config\n'), ((9184, 9207), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (9205, 9207), False, 'import pygame\n'), ((1128, 1146), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1144, 1146), False, 'import pygame\n'), ((3225, 3292), 'View.button.Button', 'Button', (['"""HIT ME"""', '(100)', '(500)', '(100)', '(50)', 'config.light_gold', 'config.gold'], {}), "('HIT ME', 100, 500, 100, 50, config.light_gold, config.gold)\n", (3231, 3292), False, 'from View.button import Button\n'), ((3388, 3454), 'View.button.Button', 'Button', (['"""STAND"""', '(300)', '(500)', '(100)', '(50)', 'config.light_gold', 'config.gold'], {}), "('STAND', 300, 500, 100, 50, config.light_gold, config.gold)\n", (3394, 3454), False, 'from View.button import Button\n'), ((3555, 3624), 'View.button.Button', 'Button', (['"""NEW GAME"""', '(800)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('NEW GAME', 800, 500, 150, 50, config.light_gold, config.gold)\n", (3561, 3624), False, 'from View.button import Button\n'), ((3724, 3795), 'View.button.Button', 'Button', (['"""QUIT GAME"""', '(1000)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('QUIT GAME', 1000, 500, 150, 50, config.light_gold, config.gold)\n", (3730, 3795), False, 'from View.button import Button\n'), ((4162, 4185), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4183, 4185), False, 'import pygame\n'), ((4198, 4219), 'Control.config.clock.tick', 'config.clock.tick', (['(15)'], {}), '(15)\n', (4215, 4219), False, 'from Control import config\n'), ((4573, 4591), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4589, 4591), False, 'import pygame\n'), ((5889, 5958), 'View.button.Button', 'Button', (['""""""', '(100)', '(500)', '(100)', '(50)', 'config.board_color', 'config.board_color'], {}), "('', 100, 500, 100, 50, config.board_color, config.board_color)\n", (5895, 5958), False, 'from View.button import Button\n'), ((6054, 6123), 'View.button.Button', 'Button', (['""""""', '(300)', '(500)', '(100)', '(50)', 'config.board_color', 'config.board_color'], {}), "('', 300, 500, 100, 50, config.board_color, config.board_color)\n", (6060, 6123), False, 'from View.button import Button\n'), ((6225, 6295), 'View.button.Button', 'Button', (['"""NEXT HAND"""', '(100)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('NEXT HAND', 100, 500, 150, 50, config.light_gold, config.gold)\n", (6231, 6295), False, 'from View.button import Button\n'), ((6400, 6469), 'View.button.Button', 'Button', (['"""NEW GAME"""', '(800)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('NEW GAME', 800, 500, 150, 50, config.light_gold, config.gold)\n", (6406, 6469), False, 'from View.button import Button\n'), ((6574, 6645), 'View.button.Button', 'Button', (['"""QUIT GAME"""', '(1000)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('QUIT GAME', 1000, 500, 150, 50, config.light_gold, config.gold)\n", (6580, 6645), False, 'from View.button import Button\n'), ((6731, 6754), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6752, 6754), False, 'import pygame\n'), ((6767, 6788), 'Control.config.clock.tick', 'config.clock.tick', (['(30)'], {}), '(30)\n', (6784, 6788), False, 'from Control import config\n'), ((8117, 8165), 'Control.config.gameDisplay.blit', 'config.gameDisplay.blit', (['card', '(right + k, down)'], {}), '(card, (right + k, down))\n', (8140, 8165), False, 'from Control import config\n'), ((8466, 8514), 'Control.config.gameDisplay.blit', 'config.gameDisplay.blit', (['card', '(right + k, down)'], {}), '(card, (right + k, down))\n', (8489, 8514), False, 'from Control import config\n'), ((1258, 1271), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1269, 1271), False, 'import pygame\n'), ((1437, 1515), 'View.button.Button', 'Button', (['"""HIT"""', '(100)', '(500)', '(100)', '(50)', 'config.rose_white', 'config.dark_red', 'self.hit'], {}), "('HIT', 100, 500, 100, 50, config.rose_white, config.dark_red, self.hit)\n", (1443, 1515), False, 'from View.button import Button\n'), ((1812, 1882), 'View.button.Button', 'Button', (['"""STAND"""', '(300)', '(500)', '(100)', '(50)', 'config.rose_white', 'config.dark_red'], {}), "('STAND', 300, 500, 100, 50, config.rose_white, config.dark_red)\n", (1818, 1882), False, 'from View.button import Button\n'), ((2184, 2257), 'View.button.Button', 'Button', (['"""NEW GAME"""', '(800)', '(500)', '(150)', '(50)', 'config.rose_white', 'config.dark_red'], {}), "('NEW GAME', 800, 500, 150, 50, config.rose_white, config.dark_red)\n", (2190, 2257), False, 'from View.button import Button\n'), ((2681, 2756), 'View.button.Button', 'Button', (['"""QUIT GAME"""', '(1000)', '(500)', '(150)', '(50)', 'config.rose_white', 'config.dark_red'], {}), "('QUIT GAME', 1000, 500, 150, 50, config.rose_white, config.dark_red)\n", (2687, 2756), False, 'from View.button import Button\n'), ((4703, 4716), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4714, 4716), False, 'import pygame\n'), ((4840, 4910), 'View.button.Button', 'Button', (['"""NEXT HAND"""', '(100)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('NEXT HAND', 100, 500, 150, 50, config.light_gold, config.gold)\n", (4846, 4910), False, 'from View.button import Button\n'), ((5148, 5217), 'View.button.Button', 'Button', (['"""NEW GAME"""', '(800)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('NEW GAME', 800, 500, 150, 50, config.light_gold, config.gold)\n", (5154, 5217), False, 'from View.button import Button\n'), ((5496, 5567), 'View.button.Button', 'Button', (['"""QUIT GAME"""', '(1000)', '(500)', '(150)', '(50)', 'config.light_gold', 'config.gold'], {}), "('QUIT GAME', 1000, 500, 150, 50, config.light_gold, config.gold)\n", (5502, 5567), False, 'from View.button import Button\n')] |
#!/usr/bin/env python
from threading import Thread
from threading import Lock
import pygame
import random
import math
from config import fish_options
from fish import *
class Sea():
""" Screen game """
def __init__(self):
""" init method """
#init game
pygame.init()
self.fishs = []
#setup size
self.width = 700
self.height = 300
#setup screen
self.screen = pygame.display.set_mode((self.width, self.height), 0, 16)
self.bg_color = (51,153,255)
self.deads = []
self.paredes = [
pygame.Rect(0, 0, 10, self.height),
pygame.Rect(0, 0, self.width, 30),
pygame.Rect(self.width, 0, 10, self.height),
pygame.Rect(0, self.height, self.width, 10)
]
self.xLines = [pygame.Rect(i, 0, 1, self.height) for i in range(0, self.width, fish_options['fish_space'])]
self.yLines = [pygame.Rect(0 ,i, self.width, 2) for i in range(0, self.height, fish_options['fish_space'])]
self.water = [[None for i in range(self.height / fish_options['fish_space'] + 1 )] for i in range(self.width / fish_options['fish_space'] + 1 )]
self.reloj = pygame.time.Clock()
self.make_stage()
def make_stage(self):
""" Build stage """
self.populate_sea('m', Shark, 7, FishMove.get_move())
self.populate_sea('w', Shark, 4, FishMove.get_move())
self.populate_sea('w', Fish, 10, FishMove.get_move())
self.populate_sea('m', Fish, 15, FishMove.get_move())
start = False
done = False
while done == False:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
for item_fish in self.fishs:
item_fish.dead()
self.reloj.tick(10)
self.screen.fill(self.bg_color)
# for pared in self.paredes:
# pygame.draw.rect(self.screen,[0,0,200],pared)
for line in self.xLines:
pygame.draw.rect(self.screen, [123, 56, 200], line)
for line in self.yLines:
pygame.draw.rect(self.screen, [180, 12, 200], line)
writeFile(self.water)
if start == False:
for item_fish in self.fishs:
item_fish.start()
start = True
for item_fish in self.fishs:
if item_fish.alive == True:
self.screen.blit(item_fish.draw.image, item_fish.draw.rect.move(item_fish.x, i, tem_fish.y))
pygame.display.update()
exit()
def populate_sea(self,sexo,fish_type,quantity,move):
""" fill the sea with items """
for i in range(quantity):
fish = fish_type(sexo, fish_type.__name__.lower(), self,move)
self.fishs.append(fish)
def writeFile(items):
l = Lock()
l.acquire(True)
screen = open('screen','w')
screen.write("\n\n\n")
screen.write("\n==============\n")
for item in items:
line = ""
for x,i in enumerate(item):
if i == None:
line = line + "[]"
else:
if i.__class__ == Shark:
line = line + "[X]"
screen.write(line + "\n")
screen.write("\n==============\n")
screen.close()
l.release()
| [
"pygame.init",
"pygame.event.get",
"threading.Lock",
"pygame.display.set_mode",
"pygame.draw.rect",
"pygame.time.Clock",
"pygame.display.update",
"pygame.Rect"
] | [((2485, 2491), 'threading.Lock', 'Lock', ([], {}), '()\n', (2489, 2491), False, 'from threading import Lock\n'), ((267, 280), 'pygame.init', 'pygame.init', ([], {}), '()\n', (278, 280), False, 'import pygame\n'), ((387, 444), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.width, self.height)', '(0)', '(16)'], {}), '((self.width, self.height), 0, 16)\n', (410, 444), False, 'import pygame\n'), ((1077, 1096), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1094, 1096), False, 'import pygame\n'), ((518, 552), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', '(10)', 'self.height'], {}), '(0, 0, 10, self.height)\n', (529, 552), False, 'import pygame\n'), ((557, 590), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', 'self.width', '(30)'], {}), '(0, 0, self.width, 30)\n', (568, 590), False, 'import pygame\n'), ((595, 638), 'pygame.Rect', 'pygame.Rect', (['self.width', '(0)', '(10)', 'self.height'], {}), '(self.width, 0, 10, self.height)\n', (606, 638), False, 'import pygame\n'), ((643, 686), 'pygame.Rect', 'pygame.Rect', (['(0)', 'self.height', 'self.width', '(10)'], {}), '(0, self.height, self.width, 10)\n', (654, 686), False, 'import pygame\n'), ((709, 742), 'pygame.Rect', 'pygame.Rect', (['i', '(0)', '(1)', 'self.height'], {}), '(i, 0, 1, self.height)\n', (720, 742), False, 'import pygame\n'), ((820, 852), 'pygame.Rect', 'pygame.Rect', (['(0)', 'i', 'self.width', '(2)'], {}), '(0, i, self.width, 2)\n', (831, 852), False, 'import pygame\n'), ((1464, 1482), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1480, 1482), False, 'import pygame\n'), ((2209, 2232), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2230, 2232), False, 'import pygame\n'), ((1771, 1822), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', '[123, 56, 200]', 'line'], {}), '(self.screen, [123, 56, 200], line)\n', (1787, 1822), False, 'import pygame\n'), ((1860, 1911), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', '[180, 12, 200]', 'line'], {}), '(self.screen, [180, 12, 200], line)\n', (1876, 1911), False, 'import pygame\n')] |
#
# Copyright (c) 2015 <NAME>
# Licensed under the MIT license, see LICENSE.txt for details.
#
#
# This script converts log files as output by harmonic.m to LaTeX files
# Needs to be run inside ./lib
#
import subprocess
import re
import glob
# log files to be processed
FILE_GLOB = '../log/*.log'
# path to magma executable (give full path if your envoironment is not set up properly)
MAGMA_EXEC = 'magma'
TEX_TABLE_HEADER = '''\\begin{{tabular}}{{| l | l |}}
\\multicolumn{{2}}{{l}}{{\\bf Heckeoperatoren der Form $\\diag({0})$}} \\\\
\\hline
$P$ & $\\chi^\\text{{char}}(T)$ \\\\
\\hline
'''
def convertLog(infile):
state = k = P = char = ''
data = []
with open(infile) as f:
for line in f:
if state == 'CHAR':
if line.startswith('Time'):
data.append( (int(k), P, char.strip()) )
state = k = P = char = ''
else:
char += line
elif state == 'HECKE':
if line.startswith('Characteristic polynomial'):
char = line.split(': ')[1]
state = 'CHAR'
else:
if line.startswith('[*] Computing Hecke'):
hecke = re.search('k = (\d+), P = (.*)\.', line)
k = hecke.group(1)
P = hecke.group(2)
state = 'HECKE'
with open(infile+'.tmp', 'w') as f:
f.write('Attach("buildingLib.m");\n')
f.write('Attach("latex.m");\n')
f.write('import "latex.m" : LatexFactorization;\n')
f.write('R<T> := PolynomialRing(Integers());\n')
f.write('poly := [\n')
for i,(k,P,char) in enumerate(data):
f.write(char)
if i < len(data)-1:
f.write(',\n')
f.write('];\n')
f.write("""print "|||";
for p in poly do
print LatexFactorization(p);
print "|||";
end for;
exit;""")
magma = subprocess.Popen([MAGMA_EXEC, infile+'.tmp'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = magma.communicate()
latex = out.split('|||')
for i,(k,P,char) in enumerate(data):
data[i] = (k,P,char,latex[i+1].strip())
ks = frozenset(d[0] for d in data)
with open(infile+'.tex', 'w') as f:
for k in ks:
if k == 1:
f.write(TEX_TABLE_HEADER.format('1,1,P'))
else:
f.write(TEX_TABLE_HEADER.format('1,P,P'))
for (_,P,_,charTex) in filter(lambda d: d[0] == k, data):
f.write('$' + P + '$ &\n')
tex = charTex.splitlines()
for i in range(len(tex)-1):
tex[i] += '\\\\&'
tex = '\n'.join(tex)
f.write('$\\!\\begin{aligned}\n\t&' + tex + '\\end{aligned}$ \\\\\n\\hline\n')
f.write('\end{tabular}\n\n\n')
for filename in glob.glob(FILE_GLOB):
print('Processing ' + filename)
convertLog(filename)
| [
"subprocess.Popen",
"glob.glob",
"re.search"
] | [((2994, 3014), 'glob.glob', 'glob.glob', (['FILE_GLOB'], {}), '(FILE_GLOB)\n', (3003, 3014), False, 'import glob\n'), ((2057, 2156), 'subprocess.Popen', 'subprocess.Popen', (["[MAGMA_EXEC, infile + '.tmp']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([MAGMA_EXEC, infile + '.tmp'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (2073, 2156), False, 'import subprocess\n'), ((1243, 1285), 're.search', 're.search', (['"""k = (\\\\d+), P = (.*)\\\\."""', 'line'], {}), "('k = (\\\\d+), P = (.*)\\\\.', line)\n", (1252, 1285), False, 'import re\n')] |
import os
from multiprocessing import Pool
import numpy as np
from pygesture import filestruct
from pygesture import wav
class Processor(object):
"""
Speficies how to process recording files, including conditioning and
feature extraction from relevant segments of the files. It is basically a
workaround so all of this information can be passed conveniently from the
config to the Recording class, which does all of the work.
Parameters
----------
conditioner : pipeline.Conditioner object
Conditions the data (usually filters and downsamples).
windower : pipeline.Windower object
Holds data for overlapping windows.
feature_extractor : features.FeatureExtractor object
Extracts features from waveform data.
rest_bounds : 2-tuple of ints
Specifies (start, end) sample indices for the rest class.
gesture_bounds : 2-tuple of ints
Specifies (start, end) smaple indices for the gesture class.
"""
def __init__(self, conditioner, windower, feature_extractor, rest_bounds,
gesture_bounds):
self.conditioner = conditioner
self.windower = windower
self.feature_extractor = feature_extractor
self.rest_bounds = rest_bounds
self.gesture_bounds = gesture_bounds
def batch_process(rootdir, pid, processor, sid_list='all', pool=1):
"""
Processes the given participants' sessions. If sid_list is not provided,
all sessions are processed.
Parameters
----------
rootdir : str
The path to the root of the data file structure.
pid : str
The participant ID for which to process the files (e.g. 'p0').
sid_list : list of strings, optional
List of session IDs to process (e.g. ['arm1', 'arm2']). The default is
'all', which means the function will search for all of the given
participant's sessions and process them.
pool : int, default 1
The number of processes to start for processing. Default is 1, which
means the function will not use the multiprocessing module.
"""
if sid_list == 'all':
sid_list = filestruct.get_session_list(rootdir, pid)
if pool > 1:
pool = Pool(processes=pool)
pool.map(_process_session, [
(rootdir, pid, sid, processor) for sid in sid_list])
pool.close()
else:
for sid in sid_list:
_process_session((
rootdir, pid, sid, processor))
def _process_session(args):
"""
Internally used for processing a single session. The input should be a
tuple matching the input args of the Session constructor.
"""
sess = Session(*args)
sess.process()
def read_feature_file_list(file_list, labels='all'):
"""
Reads all of the feature files specified and concatenates all of their data
into a (vector, label) tuple.
Parameters
----------
file_list : list (str)
List of paths to feature files.
labels : list (int)
List of labels to include in the data. Default is 'all', meaning all
labels in the files are included.
"""
data = np.concatenate([np.genfromtxt(f, delimiter=',') for f in file_list])
X = data[:, 1:]
y = data[:, 0]
if labels == 'all':
return (X, y)
else:
mask = np.zeros(y.shape, dtype=bool)
for label in labels:
mask |= (y == label)
return (X[mask], y[mask])
def get_session_data(rootdir, pid, sid_list):
"""
Convenience function to retrieve the data for the specified particpiant and
session ID list in a (vector, label) tuple.
"""
file_list = filestruct.get_feature_file_list(rootdir, pid, sid_list)
(X, y) = read_feature_file_list(file_list)
return (X, y)
class Session:
"""
Processor of a group of recordings that were taken consecutively.
Parameters
----------
rootdir : str
Root directory of the data (see pygesture.filestruct).
pid : str
ID of the participant who generated the data.
sid : str
ID of the session.
processor : pygesture.analysis.processing.Processor
Processor used to transform the raw data to conditioned data and
feature data.
Attributes
----------
sessdir : str
Path to the session directory.
rawdir : str
Path to the directory containing the raw recording files.
procdir : str
Path to the directory to place the conditioned recording files.
featfile : str
Path to the feature file to be generated.
"""
def __init__(self, rootdir, pid, sid, processor):
self.sid = sid
self.pid = pid
self.processor = processor
self.sessdir = filestruct.find_session_dir(rootdir, pid, sid)
self.rawdir = filestruct.get_recording_dir(self.sessdir)
self.procdir = filestruct.get_processed_dir(self.sessdir)
self.featfile = filestruct.new_feature_file(
self.sessdir,
self.pid,
self.sid,
filestruct.parse_date_string(self.sessdir))
if not os.path.exists(self.procdir):
os.mkdir(self.procdir)
def process(self, saveproc=True):
"""
Iterates over all recordings in the session, processes them (see
Recording's process method), writes the conditioned data to procdir,
and writes the features to a CSV file.
"""
if os.path.isfile(self.featfile):
os.remove(self.featfile)
with open(self.featfile, 'ab') as fid:
for f in filestruct.get_recording_file_list(self.rawdir):
try:
rec = Recording(f, self.processor)
except KeyError:
continue
proc_data, features = rec.process()
procfile = os.path.join(self.procdir, rec.filename)
fs_proc = self.processor.conditioner.f_down
wav.write(procfile, fs_proc, proc_data)
np.savetxt(fid, features, delimiter=',', fmt='%.5e')
class Recording:
"""
Representation of a single multi-channel raw recording.
Parameters
----------
wavfile : str
Full path to the raw recording (WAV file).
processor : pygesture.analysis.processing.Processor
Processor used to transform the raw data to conditioned data and
feature data.
Attributes
----------
fs_raw : int
Sampling rate (Hz) of the raw recording as read from the WAV file.
filename : str
Name of the WAV file (name only, no path).
raw_data : array, shape (n_samples, n_channels)
Raw data as read from the WAV file.
trial_number : int
Trial number of the recording (pertains to the session it was recorded
in).
label : int
Label of the recording as a whole, relevant to recordings in which a
gesture is held throughout.
conditioned_data : array, shape (n_samples_conditioned, n_channels)
Raw data that has been transformed by the conditioner (of the input
processor)
feature_data : array, shape (n_windows, n_features+1)
Feature data with the label of the recording in the first column. If
rest bounds are given in the processor, the first rows of the feature
data will be rest data, and the remaining portion will be from the
gesture bounds given in the processor.
"""
def __init__(self, wavfile, processor):
self.wavfile = wavfile
self.processor = processor
self._conditioner = self.processor.conditioner
self._feature_extractor = self.processor.feature_extractor
self._windower = self.processor.windower
self._read_file()
def _read_file(self):
self.fs_raw, self.raw_data = wav.read(self.wavfile)
path, self.filename = os.path.split(self.wavfile)
self.trial_number = filestruct.parse_trial_number(self.filename)
self.label = filestruct.parse_label(self.filename)
def process(self):
"""
Processes the raw recording data in two steps. The first step is to
condition the data (usually something like normalization, filtering,
etc.), specified by the conditioner belonging to this recording's
processor object. The second step is to calculate features from the
conditioned data. The conditioned data is windowed according to the
processor's windower (window length, overlap) and for each window, the
processor's feature extractor is applied. The conditioned data and the
feature data are returned.
Returns
-------
conditioned_data : array, shape (n_samples_conditioned, n_channels)
The conditioned data.
feature_data : array, shape (n_windows, n_features+1)
The feature data. Each row is an instance. The first column is
the gesture label. The rest of the columns are feature types.
"""
self._conditioner.clear()
cd = self._conditioner.process(self.raw_data)
rb = self.processor.rest_bounds
if rb is not None:
rest_data = cd[rb[0]:rb[1]]
rest_ind = list(
windowind(rest_data.shape[0], self._windower.length,
overlap=self._windower.overlap))
n_rest = len(rest_ind)
else:
rest_ind = []
n_rest = 0
gb = self.processor.gesture_bounds
gest_data = cd[gb[0]:gb[1]]
gest_ind = list(
windowind(gest_data.shape[0], self._windower.length,
overlap=self._windower.overlap))
n_gest = len(gest_ind)
n_rows = n_rest + n_gest
fd = np.zeros((n_rows, self._feature_extractor.n_features+1))
for i, ind in enumerate(rest_ind):
fd[i, 0] = 0
fd[i, 1:] = self._feature_extractor.process(
rest_data[ind[0]:ind[1]])
for i, ind in enumerate(gest_ind):
fd[n_rest+i, 0] = self.label
fd[n_rest+i, 1:] = self._feature_extractor.process(
gest_data[ind[0]:ind[1]])
self.conditioned_data = cd
self.feature_data = fd
return self.conditioned_data, self.feature_data
def window(x, length, overlap=0, axis=0):
"""
Generates a sequence of windows of the input data, each with a specified
length and optional overlap with the previous window. Only windows of the
specified length are retrieved (if windows don't fit evenly into the data).
"""
n = x.shape[axis]
for f, t in windowind(n, length, overlap=overlap):
if axis == 0:
yield x[f:t, :]
else:
yield x[:, f:t]
def windowind(n, length, overlap=0):
"""
Generates a sequence of pairs of indices corresponding to consecutive
windows of an array of length n. Returns a tuple (low_ind, high_ind) which
can be used to window an array like `win = data[low_ind, high_ind]`.
"""
ind = range(0, n, length-overlap)
for i in ind:
if i + length < n:
yield i, i+length
| [
"pygesture.filestruct.parse_label",
"pygesture.filestruct.find_session_dir",
"pygesture.filestruct.parse_date_string",
"numpy.genfromtxt",
"os.remove",
"os.path.exists",
"pygesture.filestruct.get_recording_dir",
"pygesture.filestruct.parse_trial_number",
"pygesture.wav.write",
"os.path.split",
"os.mkdir",
"pygesture.filestruct.get_feature_file_list",
"os.path.isfile",
"pygesture.filestruct.get_session_list",
"numpy.savetxt",
"pygesture.filestruct.get_recording_file_list",
"os.path.join",
"numpy.zeros",
"pygesture.wav.read",
"multiprocessing.Pool",
"pygesture.filestruct.get_processed_dir"
] | [((3672, 3728), 'pygesture.filestruct.get_feature_file_list', 'filestruct.get_feature_file_list', (['rootdir', 'pid', 'sid_list'], {}), '(rootdir, pid, sid_list)\n', (3704, 3728), False, 'from pygesture import filestruct\n'), ((2155, 2196), 'pygesture.filestruct.get_session_list', 'filestruct.get_session_list', (['rootdir', 'pid'], {}), '(rootdir, pid)\n', (2182, 2196), False, 'from pygesture import filestruct\n'), ((2230, 2250), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'pool'}), '(processes=pool)\n', (2234, 2250), False, 'from multiprocessing import Pool\n'), ((3337, 3366), 'numpy.zeros', 'np.zeros', (['y.shape'], {'dtype': 'bool'}), '(y.shape, dtype=bool)\n', (3345, 3366), True, 'import numpy as np\n'), ((4758, 4804), 'pygesture.filestruct.find_session_dir', 'filestruct.find_session_dir', (['rootdir', 'pid', 'sid'], {}), '(rootdir, pid, sid)\n', (4785, 4804), False, 'from pygesture import filestruct\n'), ((4827, 4869), 'pygesture.filestruct.get_recording_dir', 'filestruct.get_recording_dir', (['self.sessdir'], {}), '(self.sessdir)\n', (4855, 4869), False, 'from pygesture import filestruct\n'), ((4893, 4935), 'pygesture.filestruct.get_processed_dir', 'filestruct.get_processed_dir', (['self.sessdir'], {}), '(self.sessdir)\n', (4921, 4935), False, 'from pygesture import filestruct\n'), ((5468, 5497), 'os.path.isfile', 'os.path.isfile', (['self.featfile'], {}), '(self.featfile)\n', (5482, 5497), False, 'import os\n'), ((7854, 7876), 'pygesture.wav.read', 'wav.read', (['self.wavfile'], {}), '(self.wavfile)\n', (7862, 7876), False, 'from pygesture import wav\n'), ((7907, 7934), 'os.path.split', 'os.path.split', (['self.wavfile'], {}), '(self.wavfile)\n', (7920, 7934), False, 'import os\n'), ((7964, 8008), 'pygesture.filestruct.parse_trial_number', 'filestruct.parse_trial_number', (['self.filename'], {}), '(self.filename)\n', (7993, 8008), False, 'from pygesture import filestruct\n'), ((8030, 8067), 'pygesture.filestruct.parse_label', 'filestruct.parse_label', (['self.filename'], {}), '(self.filename)\n', (8052, 8067), False, 'from pygesture import filestruct\n'), ((9796, 9854), 'numpy.zeros', 'np.zeros', (['(n_rows, self._feature_extractor.n_features + 1)'], {}), '((n_rows, self._feature_extractor.n_features + 1))\n', (9804, 9854), True, 'import numpy as np\n'), ((3172, 3203), 'numpy.genfromtxt', 'np.genfromtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (3185, 3203), True, 'import numpy as np\n'), ((5072, 5114), 'pygesture.filestruct.parse_date_string', 'filestruct.parse_date_string', (['self.sessdir'], {}), '(self.sessdir)\n', (5100, 5114), False, 'from pygesture import filestruct\n'), ((5132, 5160), 'os.path.exists', 'os.path.exists', (['self.procdir'], {}), '(self.procdir)\n', (5146, 5160), False, 'import os\n'), ((5174, 5196), 'os.mkdir', 'os.mkdir', (['self.procdir'], {}), '(self.procdir)\n', (5182, 5196), False, 'import os\n'), ((5511, 5535), 'os.remove', 'os.remove', (['self.featfile'], {}), '(self.featfile)\n', (5520, 5535), False, 'import os\n'), ((5605, 5652), 'pygesture.filestruct.get_recording_file_list', 'filestruct.get_recording_file_list', (['self.rawdir'], {}), '(self.rawdir)\n', (5639, 5652), False, 'from pygesture import filestruct\n'), ((5873, 5913), 'os.path.join', 'os.path.join', (['self.procdir', 'rec.filename'], {}), '(self.procdir, rec.filename)\n', (5885, 5913), False, 'import os\n'), ((5990, 6029), 'pygesture.wav.write', 'wav.write', (['procfile', 'fs_proc', 'proc_data'], {}), '(procfile, fs_proc, proc_data)\n', (5999, 6029), False, 'from pygesture import wav\n'), ((6047, 6099), 'numpy.savetxt', 'np.savetxt', (['fid', 'features'], {'delimiter': '""","""', 'fmt': '"""%.5e"""'}), "(fid, features, delimiter=',', fmt='%.5e')\n", (6057, 6099), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# encoding: utf-8
"""
@version: 0.1
@author: lyrichu
@license: Apache Licence
@contact: <EMAIL>
@site: http://www.github.com/Lyrichu
@file: SGA.py
@time: 2018/06/05 21:03
@description:
simple Genetic Algorithm(SGA)
"""
import matplotlib
matplotlib.use("Agg")
import numpy as np
import matplotlib.pyplot as plt
from sopt.util.ga_config import *
class SGA:
def __init__(self,lower_bound,upper_bound,variables_num,func,
cross_rate = ga_config.cross_rate,
mutation_rate = ga_config.mutation_rate,
population_size = ga_config.population_size,
generations = ga_config.generations,
binary_code_length= ga_config.binary_code_length,
func_type = ga_config.func_type_min
):
'''
:param lower_bound: the lower bound of variables,real number or list of real numbers
:param upper_boound: the upper bound of variables,real number or list of real numbers
:param variables_num: the number of variables
:param func: the target function
:param cross_rate: GA cross rate
:param mutation_rate: GA mutation rate
:param population_size: the size of GA population
:param generations: the GA generations count
:param binary_code_length: the binary code length to represent a real number
:param func_type:'min' means to evaluate the minimum target function;'max'
means to evaluate the maximum target function
'''
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.variables_num = variables_num
if isinstance(self.lower_bound,(int,float)):
self.lower_bound = [self.lower_bound]*self.variables_num
if isinstance(self.upper_bound,(int,float)):
self.upper_bound = [self.upper_bound]*self.variables_num
self.func = func
self.cross_rate = cross_rate
self.mutation_rate = mutation_rate
self.population_size = population_size
self.generations = generations
self.binary_code_length = binary_code_length
self.func_type = func_type
self.global_best_target = None
self.global_best_point = None
self.generations_best_targets = []
self.global_best_index = 0
self.generations_best_points = []
self.global_best_raw_point = None
def init_population(self):
'''
init the population
:return: None
'''
self.population = np.random.randint(0,2,(self.population_size,self.variables_num*self.binary_code_length))
self.calculate(self.population)
def calculate(self,population,real_population = None,complex_constraints = None,complex_constraints_C = ga_config.complex_constraints_C):
'''
calculate the global_best_target,global_best_points,
generations_best_target,generations_best_points
:param population:
:param real_population: if the population is the real code type population
:param add_generations:if add to generations_best_target and generations_best_points
:return: None
'''
if real_population is None:
real_population = self._convert_binary_to_real(population)
targets_func = np.zeros(self.population_size)
for i in range(self.population_size):
if complex_constraints is None:
targets_func[i] = self.func(real_population[i])
else:
targets_func[i] = self.func(real_population[i])
tmp_plus = self._check_constraints(real_population[i],complex_constraints)
if tmp_plus > 0:
if self.func_type == ga_config.func_type_min:
targets_func[i] += complex_constraints_C*tmp_plus
else:
targets_func[i] -= complex_constraints_C*tmp_plus
if self.func_type == ga_config.func_type_min:
if self.global_best_target is None:
self.global_best_target = np.min(targets_func)
self.global_best_raw_point = population[targets_func.argmin()]
self.global_best_point = real_population[targets_func.argmin()]
else:
if self.global_best_target > np.min(targets_func):
self.global_best_target = np.min(targets_func)
self.global_best_raw_point = population[targets_func.argmin()]
self.global_best_point = real_population[targets_func.argmin()]
self.generations_best_targets.append(np.min(targets_func))
self.generations_best_points.append(real_population[targets_func.argmin()])
else:
if self.global_best_target is None:
self.global_best_target = np.max(targets_func)
self.global_best_raw_point = population[targets_func.argmax()]
self.global_best_point = real_population[targets_func.argmax()]
else:
if self.global_best_target < np.max(targets_func):
self.global_best_target = np.max(targets_func)
self.global_best_raw_point = population[targets_func.argmax()]
self.global_best_point = real_population[targets_func.argmax()]
self.generations_best_targets.append(np.max(targets_func))
self.generations_best_points.append(real_population[targets_func.argmax()])
def _check_constraints(self,data,constraints):
res = 0
for constraint in constraints:
if constraint(data) > 0:
res += constraint(data)
return res
def select(self,probs = None,complex_constraints = None,complex_constraints_C = ga_config.complex_constraints_C,M = ga_config.M):
if probs is None:
real_population = self._convert_binary_to_real(self.population)
targets_func = np.zeros(self.population_size)
for i in range(self.population_size):
if complex_constraints is None:
targets_func[i] = self.func(real_population[i])
else:
targets_func[i] = self.func(real_population[i])
tmp_plus = self._check_constraints(real_population[i],complex_constraints)
if self.func_type == ga_config.func_type_min:
targets_func[i] += tmp_plus*complex_constraints_C
else:
targets_func[i] -= tmp_plus*complex_constraints_C
if targets_func[i] < 0:
if self.func_type == ga_config.func_type_min:
raise ValueError("Please make sure that the target function value is > 0!")
else:
targets_func = 1./(abs(targets_func[i])*M)
assert (np.all(targets_func > 0) == True),"Please make sure that the target function value is > 0!"
if self.func_type == ga_config.func_type_min:
targets_func = 1./targets_func
prob_func = targets_func/np.sum(targets_func)
else:
assert (len(probs) == self.population_size and abs(sum(probs)-1) < ga_config.eps), "rank_select_probs should be list or array of size %d,and sum to 1!" % self.population_size
prob_func = probs
new_population = np.zeros_like(self.population)
for i in range(self.population_size):
choice = np.random.choice(self.population_size,p = prob_func)
new_population[i] = self.population[choice]
self.population = new_population
def _convert_binary_to_real(self,population,cross_code = False):
'''
convert binary population to real population
:param population: binary population,shape:(self.population_size,self.binary_code_length*self.variables_num)
:return:real_population,shape:(self.population_size,self.variables_num)
'''
if cross_code:
population = self._convert_from_cross_code(population)
real_population = np.zeros((self.population_size,self.variables_num))
base_max = float(int("1"*self.binary_code_length,2))
for i in range(self.population_size):
for j in range(self.variables_num):
binary_arr = population[i][j*self.binary_code_length:(j+1)*self.binary_code_length]
real_value = int("".join(map(str,list(binary_arr))),2)
real_population[i][j] = real_value*(self.upper_bound[j]-self.lower_bound[j])/base_max + self.lower_bound[j]
return real_population
def _convert_from_cross_code(self,population):
new_population = np.zeros_like(population)
for i in range(self.population_size):
tmp = []
for j in range(self.variables_num):
tmp.append(list(population[i][j*self.binary_code_length:(j+1)*self.binary_code_length]))
#print(tmp)
tmp = list(zip(tmp))
res = []
for t in tmp:
res += list(t[0])
new_population[i] = np.array(res)
return new_population
def cross(self,cross_rate=None):
if cross_rate is None:
cross_rate = self.cross_rate
indices = list(range(self.population_size))
np.random.shuffle(indices)
first_indices = indices[:self.population_size//2]
second_indices = indices[self.population_size//2:]
for i in range(self.population_size//2):
if np.random.random() < cross_rate:
# generate position to cross,using single point cross method
cross_pos = np.random.choice(self.binary_code_length*self.variables_num)
self.population[first_indices[i],cross_pos:],self.population[second_indices[i],cross_pos:] = self.population[second_indices[i],cross_pos:],self.population[first_indices[i],cross_pos:]
def mutate(self,mutation_rate = None):
if mutation_rate is None:
mutation_rate = self.mutation_rate
# using numpy vectorized method to accelarate the computing speed
r_matrix = np.random.rand(self.population_size,self.binary_code_length*self.variables_num)
bool_matrix = r_matrix < mutation_rate
self.population[bool_matrix] ^= 1
def run(self):
self.init_population()
for i in range(self.generations):
self.select()
self.cross()
self.mutate()
self.calculate(self.population)
if self.func_type == ga_config.func_type_min:
self.global_best_index = np.array(self.generations_best_targets).argmin()
else:
self.global_best_index = np.array(self.generations_best_targets).argmax()
def save_plot(self,save_name = "SGA.png"):
plt.plot(self.generations_best_targets,'r-')
plt.title("Best target function value for %d generations" % self.generations)
plt.xlabel("generations")
plt.ylabel("best target function value")
plt.savefig(save_name)
def show_result(self):
print("-"*20,"SGA config is:","-"*20)
# iterate all the class attributes
for k,v in self.__dict__.items():
if k not in ['population','generations_best_points','global_best_point','generations_best_targets','rank_select_probs',
'global_best_index','global_best_target','global_best_raw_point']:
print("%s:%s" %(k,v))
print("-"*20,"SGA caculation result is:","-"*20)
print("global best generation index/total generations:%s/%s" % (self.global_best_index,self.generations))
print("global best point:%s" % self.global_best_point)
print("global best target:%s" % self.global_best_target)
| [
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"numpy.random.choice",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.random",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"numpy.sum",
"numpy.min",
"matplotlib.pyplot.title",
"numpy.all",
"numpy.zeros_like",
"numpy.random.shuffle"
] | [((261, 282), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (275, 282), False, 'import matplotlib\n'), ((2554, 2652), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(self.population_size, self.variables_num * self.binary_code_length)'], {}), '(0, 2, (self.population_size, self.variables_num * self.\n binary_code_length))\n', (2571, 2652), True, 'import numpy as np\n'), ((3322, 3352), 'numpy.zeros', 'np.zeros', (['self.population_size'], {}), '(self.population_size)\n', (3330, 3352), True, 'import numpy as np\n'), ((7459, 7489), 'numpy.zeros_like', 'np.zeros_like', (['self.population'], {}), '(self.population)\n', (7472, 7489), True, 'import numpy as np\n'), ((8167, 8219), 'numpy.zeros', 'np.zeros', (['(self.population_size, self.variables_num)'], {}), '((self.population_size, self.variables_num))\n', (8175, 8219), True, 'import numpy as np\n'), ((8777, 8802), 'numpy.zeros_like', 'np.zeros_like', (['population'], {}), '(population)\n', (8790, 8802), True, 'import numpy as np\n'), ((9408, 9434), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (9425, 9434), True, 'import numpy as np\n'), ((10234, 10321), 'numpy.random.rand', 'np.random.rand', (['self.population_size', '(self.binary_code_length * self.variables_num)'], {}), '(self.population_size, self.binary_code_length * self.\n variables_num)\n', (10248, 10321), True, 'import numpy as np\n'), ((10913, 10958), 'matplotlib.pyplot.plot', 'plt.plot', (['self.generations_best_targets', '"""r-"""'], {}), "(self.generations_best_targets, 'r-')\n", (10921, 10958), True, 'import matplotlib.pyplot as plt\n'), ((10966, 11043), 'matplotlib.pyplot.title', 'plt.title', (["('Best target function value for %d generations' % self.generations)"], {}), "('Best target function value for %d generations' % self.generations)\n", (10975, 11043), True, 'import matplotlib.pyplot as plt\n'), ((11052, 11077), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""generations"""'], {}), "('generations')\n", (11062, 11077), True, 'import matplotlib.pyplot as plt\n'), ((11086, 11126), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""best target function value"""'], {}), "('best target function value')\n", (11096, 11126), True, 'import matplotlib.pyplot as plt\n'), ((11135, 11157), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_name'], {}), '(save_name)\n', (11146, 11157), True, 'import matplotlib.pyplot as plt\n'), ((5987, 6017), 'numpy.zeros', 'np.zeros', (['self.population_size'], {}), '(self.population_size)\n', (5995, 6017), True, 'import numpy as np\n'), ((7557, 7608), 'numpy.random.choice', 'np.random.choice', (['self.population_size'], {'p': 'prob_func'}), '(self.population_size, p=prob_func)\n', (7573, 7608), True, 'import numpy as np\n'), ((9193, 9206), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (9201, 9206), True, 'import numpy as np\n'), ((4098, 4118), 'numpy.min', 'np.min', (['targets_func'], {}), '(targets_func)\n', (4104, 4118), True, 'import numpy as np\n'), ((4646, 4666), 'numpy.min', 'np.min', (['targets_func'], {}), '(targets_func)\n', (4652, 4666), True, 'import numpy as np\n'), ((4860, 4880), 'numpy.max', 'np.max', (['targets_func'], {}), '(targets_func)\n', (4866, 4880), True, 'import numpy as np\n'), ((5408, 5428), 'numpy.max', 'np.max', (['targets_func'], {}), '(targets_func)\n', (5414, 5428), True, 'import numpy as np\n'), ((6948, 6972), 'numpy.all', 'np.all', (['(targets_func > 0)'], {}), '(targets_func > 0)\n', (6954, 6972), True, 'import numpy as np\n'), ((7182, 7202), 'numpy.sum', 'np.sum', (['targets_func'], {}), '(targets_func)\n', (7188, 7202), True, 'import numpy as np\n'), ((9616, 9634), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (9632, 9634), True, 'import numpy as np\n'), ((9754, 9816), 'numpy.random.choice', 'np.random.choice', (['(self.binary_code_length * self.variables_num)'], {}), '(self.binary_code_length * self.variables_num)\n', (9770, 9816), True, 'import numpy as np\n'), ((4341, 4361), 'numpy.min', 'np.min', (['targets_func'], {}), '(targets_func)\n', (4347, 4361), True, 'import numpy as np\n'), ((4409, 4429), 'numpy.min', 'np.min', (['targets_func'], {}), '(targets_func)\n', (4415, 4429), True, 'import numpy as np\n'), ((5103, 5123), 'numpy.max', 'np.max', (['targets_func'], {}), '(targets_func)\n', (5109, 5123), True, 'import numpy as np\n'), ((5171, 5191), 'numpy.max', 'np.max', (['targets_func'], {}), '(targets_func)\n', (5177, 5191), True, 'import numpy as np\n'), ((10708, 10747), 'numpy.array', 'np.array', (['self.generations_best_targets'], {}), '(self.generations_best_targets)\n', (10716, 10747), True, 'import numpy as np\n'), ((10808, 10847), 'numpy.array', 'np.array', (['self.generations_best_targets'], {}), '(self.generations_best_targets)\n', (10816, 10847), True, 'import numpy as np\n')] |
import networkx as nx
from grakel.utils import graph_from_networkx
def find_node(gr, att, val):
"""Applicable for the first-layer WL feature (i.e. the nodes themselves)"""
return len([node for node in gr.nodes(data=True) if node[1][att] == val])
def find_2_structure(gr, att, encoding):
"""Applicable for the second-layer WL features (i.e. the nodes + their 1-neighbours).
This is actually faulty. Do not use this line."""
if "~" in encoding:
encoding = encoding.split("~")
encoding = [(e, ) for e in encoding]
root_node = encoding[0][0]
leaf_node = [encoding[e][0] for e in range(1, len(encoding))]
counter = {x: leaf_node.count(x) for x in set(leaf_node)}
counts = []
for node in gr.nodes(data=True):
if node[1][att] == root_node:
count = {x: 0 for x in set(leaf_node)}
for neighbor in nx.neighbors(gr, node[0]):
if gr.nodes[neighbor][att] in leaf_node:
count[gr.nodes[neighbor][att]] += 1
counts.append(count)
for c in counts:
if c == counter: return True
return False
def find_wl_feature(test, feature, kernel, ):
"""Return the number of occurrence of --feature-- in --test--, based on a --kernel--."""
import numpy as np
if not isinstance(test, list): test = [test]
test = graph_from_networkx(test, 'op_name', )
feat_map = kernel.feature_map(flatten=False)
len_feat_map = [len(f) for f in feat_map.values()]
try:
idx = list(kernel.feature_map(flatten=True).values()).index(feature[0])
except KeyError:
raise KeyError("Feature " + str(feature) + ' is not found in the training set of the kernel!')
embedding = kernel.kern.transform(test, return_embedding_only=True)
for i, em in enumerate(embedding):
embedding[i] = em.flatten()[:len_feat_map[i]]
return np.hstack(embedding)[idx]
| [
"networkx.neighbors",
"grakel.utils.graph_from_networkx",
"numpy.hstack"
] | [((1352, 1388), 'grakel.utils.graph_from_networkx', 'graph_from_networkx', (['test', '"""op_name"""'], {}), "(test, 'op_name')\n", (1371, 1388), False, 'from grakel.utils import graph_from_networkx\n'), ((1885, 1905), 'numpy.hstack', 'np.hstack', (['embedding'], {}), '(embedding)\n', (1894, 1905), True, 'import numpy as np\n'), ((880, 905), 'networkx.neighbors', 'nx.neighbors', (['gr', 'node[0]'], {}), '(gr, node[0])\n', (892, 905), True, 'import networkx as nx\n')] |
import os,sys,glob
import pylab
import os.path
from .features import *
from .segment import *
from .audiodb import *
from .testsignal import *
from .psychoacoustics import *
from .tuning import *
from .sound import *
from .plca import *
from .distance import *
from .classifier import *
from .error import *
from .beat import *
sep = os.path.sep
bregman_data_root = os.path.split(__file__)[0]
examples_dir = os.path.join(bregman_data_root,"examples"+sep)
audio_dir = os.path.join(bregman_data_root,"audio"+sep)
sys.path.append(examples_dir)
def get_tutorials():
"""
print(and return a list of tutorials in the bregman/examples folder)
"""
tlist = glob.glob(os.path.join(examples_dir,"*.py"))
tlist.sort()
return tlist
| [
"sys.path.append",
"os.path.join",
"os.path.split"
] | [((411, 460), 'os.path.join', 'os.path.join', (['bregman_data_root', "('examples' + sep)"], {}), "(bregman_data_root, 'examples' + sep)\n", (423, 460), False, 'import os, sys, glob\n'), ((470, 516), 'os.path.join', 'os.path.join', (['bregman_data_root', "('audio' + sep)"], {}), "(bregman_data_root, 'audio' + sep)\n", (482, 516), False, 'import os, sys, glob\n'), ((514, 543), 'sys.path.append', 'sys.path.append', (['examples_dir'], {}), '(examples_dir)\n', (529, 543), False, 'import os, sys, glob\n'), ((368, 391), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (381, 391), False, 'import os, sys, glob\n'), ((677, 711), 'os.path.join', 'os.path.join', (['examples_dir', '"""*.py"""'], {}), "(examples_dir, '*.py')\n", (689, 711), False, 'import os, sys, glob\n')] |
import cv2
from scipy import misc
import copy
import gaze.misc_utils as MU
import gaze.input_utils as IU
import keras as K
import tensorflow as tf
import os
import sys
import json
import ipdb
from IPython import embed
from keras.models import Model, Sequential # keras/engine/training.py
import keras.layers as L
import re
import numpy as np
import time
import matplotlib.pyplot as plt
import torch.nn.functional as F
import os.path as path
import math
import torch
from astropy.convolution import convolve
from astropy.convolution.kernels import Gaussian2DKernel
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
# creating ground truth gaze heatmap from gaze coordinates
class DatasetWithHeatmap:
def __init__(self, GHmap=None, heatmap_shape=14):
self.frameid2pos = None
self.GHmap = GHmap # GHmap means gaze heap map
self.NUM_ACTION = 18
self.xSCALE, self.ySCALE = 8, 4 # was 6,3
self.SCR_W, self.SCR_H = 160*self.xSCALE, 210*self.ySCALE
self.train_size = 10
self.HEATMAP_SHAPE = heatmap_shape
self.sigmaH = 28.50 * self.HEATMAP_SHAPE / self.SCR_H
self.sigmaW = 44.58 * self.HEATMAP_SHAPE / self.SCR_W
def createGazeHeatmap(self, gaze_coords, heatmap_shape, viz=False, asc=False, asc_file=''):
print('gaze_coords length: ', len(gaze_coords))
if not asc:
self.frameid2pos = self.get_gaze_data(gaze_coords)
else:
self.frameid2pos, _, _, _, _ = self.read_gaze_data_asc_file(
asc_file)
self.train_size = len(self.frameid2pos.keys())
self.HEATMAP_SHAPE = heatmap_shape
self.sigmaH = 28.50 * self.HEATMAP_SHAPE / self.SCR_H
self.sigmaW = 44.58 * self.HEATMAP_SHAPE / self.SCR_W
self.GHmap = np.zeros(
[self.train_size, self.HEATMAP_SHAPE, self.HEATMAP_SHAPE, 1], dtype=np.float32)
t1 = time.time()
bad_count, tot_count = 0, 0
for (i, fid) in enumerate(self.frameid2pos.keys()):
tot_count += len(self.frameid2pos[fid])
if asc:
bad_count += self.convert_gaze_pos_to_heap_map(
self.frameid2pos[fid], out=self.GHmap[i])
else:
bad_count += self.convert_gaze_coords_to_heap_map(
self.frameid2pos[fid], out=self.GHmap[i])
# print("Bad gaze (x,y) sample: %d (%.2f%%, total gaze sample: %d)" % (bad_count, 100*float(bad_count)/tot_count, tot_count))
# print("'Bad' means the gaze position is outside the 160*210 screen")
self.GHmap = self.preprocess_gaze_heatmap(0).astype(np.float32)
# normalizing such that values range between 0 and 1
for i in range(len(self.GHmap)):
max_val = self.GHmap[i].max()
min_val = self.GHmap[i].min()
if max_val != min_val:
self.GHmap[i] = (self.GHmap[i] - min_val)/(max_val - min_val)
if viz:
cv2.imwrite('gazeGT/viz/'+str(i)+'.png', self.GHmap[i]*255)
return self.GHmap
def get_gaze_data(self, gaze_coords):
frameid2pos = {}
frame_id = 0
for gaze_list in gaze_coords:
isnan = [x for x in gaze_list if math.isnan(x)]
if len(isnan) > 0:
frameid2pos[frame_id] = []
frame_id += 1
continue
gaze_xy_list = []
for i in range(0, len(gaze_list), 2):
x, y = gaze_list[i]*self.xSCALE, gaze_list[i+1]*self.ySCALE
gaze_xy_list.append((x, y))
frameid2pos[frame_id] = gaze_xy_list
frame_id += 1
if len(frameid2pos) < 1000: # simple sanity check
print("Warning: did you provide the correct gaze data? Because the data for only %d frames is detected" % (
len(frameid2pos)))
few_cnt = 0
for v in frameid2pos.values():
if len(v) < 10:
few_cnt += 1
# print ("Warning: %d frames have less than 10 gaze samples. (%.1f%%, total frame: %d)" % \
return frameid2pos
def read_gaze_data_asc_file(self, fname):
""" This function reads a ASC file and returns
a dictionary mapping frame ID to a list of gaze positions,
a dictionary mapping frame ID to action """
with open(fname, 'r') as f:
lines = f.readlines()
frameid, xpos, ypos = "BEFORE-FIRST-FRAME", None, None
frameid2pos = {frameid: []}
frameid2action = {frameid: None}
frameid2duration = {frameid: None}
frameid2unclipped_reward = {frameid: None}
frameid2episode = {frameid: None}
start_timestamp = 0
scr_msg = re.compile(
r"MSG\s+(\d+)\s+SCR_RECORDER FRAMEID (\d+) UTID (\w+)")
freg = r"[-+]?[0-9]*\.?[0-9]+" # regex for floating point numbers
gaze_msg = re.compile(r"(\d+)\s+(%s)\s+(%s)" % (freg, freg))
act_msg = re.compile(r"MSG\s+(\d+)\s+key_pressed atari_action (\d+)")
reward_msg = re.compile(r"MSG\s+(\d+)\s+reward (\d+)")
episode_msg = re.compile(r"MSG\s+(\d+)\s+episode (\d+)")
for (i, line) in enumerate(lines):
match_sample = gaze_msg.match(line)
if match_sample:
timestamp, xpos, ypos = match_sample.group(
1), match_sample.group(2), match_sample.group(3)
xpos, ypos = float(xpos), float(ypos)
frameid2pos[frameid].append((xpos, ypos))
continue
match_scr_msg = scr_msg.match(line)
if match_scr_msg: # when a new id is encountered
old_frameid = frameid
timestamp, frameid, UTID = match_scr_msg.group(
1), match_scr_msg.group(2), match_scr_msg.group(3)
frameid2duration[old_frameid] = int(
timestamp) - start_timestamp
start_timestamp = int(timestamp)
frameid = self.make_unique_frame_id(UTID, frameid)
frameid2pos[frameid] = []
frameid2action[frameid] = None
continue
match_action = act_msg.match(line)
if match_action:
timestamp, action_label = match_action.group(
1), match_action.group(2)
if frameid2action[frameid] is None:
frameid2action[frameid] = int(action_label)
else:
print("Warning: there is more than 1 action for frame id %s. Not supposed to happen." % str(
frameid))
continue
match_reward = reward_msg.match(line)
if match_reward:
timestamp, reward = match_reward.group(
1), match_reward.group(2)
if frameid not in frameid2unclipped_reward:
frameid2unclipped_reward[frameid] = int(reward)
else:
print("Warning: there is more than 1 reward for frame id %s. Not supposed to happen." % str(
frameid))
continue
match_episode = episode_msg.match(line)
if match_episode:
timestamp, episode = match_episode.group(
1), match_episode.group(2)
assert frameid not in frameid2episode, "ERROR: there is more than 1 episode for frame id %s. Not supposed to happen." % str(
frameid)
frameid2episode[frameid] = int(episode)
continue
# throw out gazes after the last frame, because the game has ended but eye tracker keeps recording
frameid2pos[frameid] = []
if len(frameid2pos) < 1000: # simple sanity check
print("Warning: did you provide the correct ASC file? Because the data for only %d frames is detected" % (
len(frameid2pos)))
raw_input("Press any key to continue")
few_cnt = 0
for v in frameid2pos.values():
if len(v) < 10:
few_cnt += 1
print("Warning: %d frames have less than 10 gaze samples. (%.1f%%, total frame: %d)" %
(few_cnt, 100.0*few_cnt/len(frameid2pos), len(frameid2pos)))
return frameid2pos, frameid2action, frameid2duration, frameid2unclipped_reward, frameid2episode
# bg_prob_density seems to hurt accuracy. Better set it to 0
def preprocess_gaze_heatmap(self, bg_prob_density, debug_plot_result=False):
from scipy.stats import multivariate_normal
import tensorflow as tf
# don't move this to the top, as people who import this file might not have keras or tf
import keras as K
model = K.models.Sequential()
model.add(K.layers.Lambda(lambda x: x+bg_prob_density,
input_shape=(self.GHmap.shape[1], self.GHmap.shape[2], 1)))
if self.sigmaH > 1 and self.sigmaW > 1: # was 0,0; if too small, dont blur
lh, lw = int(4*self.sigmaH), int(4*self.sigmaW)
# so the kernel size is [lh*2+1,lw*2+1]
x, y = np.mgrid[-lh:lh+1:1, -lw:lw+1:1]
pos = np.dstack((x, y))
gkernel = multivariate_normal.pdf(
pos, mean=[0, 0], cov=[[self.sigmaH*self.sigmaH, 0], [0, self.sigmaW*self.sigmaW]])
assert gkernel.sum() > 0.95, "Simple sanity check: prob density should add up to nearly 1.0"
model.add(K.layers.Lambda(lambda x: tf.pad(
x, [(0, 0), (lh, lh), (lw, lw), (0, 0)], 'REFLECT')))
# print(gkernel.shape, sigmaH, sigmaW)
model.add(K.layers.Conv2D(1, kernel_size=gkernel.shape, strides=1, padding="valid", use_bias=False,
activation="linear", kernel_initializer=K.initializers.Constant(gkernel)))
else:
print("WARNING: Gaussian filter's sigma is 0, i.e. no blur.")
model.compile(optimizer='rmsprop', # not used
loss='categorical_crossentropy', # not used
metrics=None)
output = model.predict(self.GHmap, batch_size=500)
if debug_plot_result:
print(r"""debug_plot_result is True. Entering IPython console. You can run:
%matplotlib
import matplotlib.pyplot as plt
f, axarr = plt.subplots(1,2)
axarr[0].imshow(gkernel)
rnd=np.random.randint(output.shape[0]); print "rand idx:", rnd
axarr[1].imshow(output[rnd,...,0])""")
embed()
shape_before, shape_after = self.GHmap.shape, output.shape
assert shape_before == shape_after, """
Simple sanity check: shape changed after preprocessing.
Your preprocessing code might be wrong. Check the shape of output tensor of your tensorflow code above"""
return output
def make_unique_frame_id(self, UTID, frameid):
return (hash(UTID), int(frameid))
def convert_gaze_coords_to_heap_map(self, gaze_pos_list, out):
h, w = out.shape[0], out.shape[1]
bad_count = 0
if(not np.isnan(gaze_pos_list).all()):
for j in range(0, len(gaze_pos_list)):
x = gaze_pos_list[j][0]
y = gaze_pos_list[j][1]
try:
out[int(y/self.SCR_H*h), int(x/self.SCR_W*w)] += 1
except IndexError: # the computed X,Y position is not in the gaze heat map
bad_count += 1
return bad_count
def convert_gaze_pos_to_heap_map(self, gaze_pos_list, out):
h, w = out.shape[0], out.shape[1]
bad_count = 0
for (x, y) in gaze_pos_list:
try:
out[int(y/self.SCR_H*h), int(x/self.SCR_W*w)] += 1
except IndexError: # the computed X,Y position is not in the gaze heat map
bad_count += 1
return bad_count
# pretrained gaze heat maps
class PretrainedHeatmap():
def __init__(self, game_name, model_path, meanfile_path):
# pretrained gaze model (novice/experts)
self.model_path = model_path
self.dropout_prob = 0.5 # float(sys.argv[3])
self.heatmap_shape = 84
k = 4
# height * width * channel This cannot read from file and needs to be provided here
self.SHAPE = (84, 84, k)
self.meanfile = meanfile_path
self.mean = np.load(self.meanfile) # shape: #(1,)
MU.BMU.save_GPU_mem_keras()
MU.keras_model_serialization_bug_fix()
self.model_arch()
def model_arch(self):
###############################
# Architecture of the network #
###############################
inputs = L.Input(shape=self.SHAPE)
x = inputs # inputs is used by the line "Model(inputs, ... )" below
conv1 = L.Conv2D(32, (8, 8), strides=4, padding='valid')
x = conv1(x)
# print(conv1.output_shape)
x = L.Activation('relu')(x)
x = L.BatchNormalization()(x)
x = L.Dropout(self.dropout_prob)(x)
conv2 = L.Conv2D(64, (4, 4), strides=2, padding='valid')
x = conv2(x)
# print(conv2.output_shape)
x = L.Activation('relu')(x)
x = L.BatchNormalization()(x)
x = L.Dropout(self.dropout_prob)(x)
conv3 = L.Conv2D(64, (3, 3), strides=1, padding='valid')
x = conv3(x)
# print(conv3.output_shape)
x = L.Activation('relu')(x)
x = L.BatchNormalization()(x)
x = L.Dropout(self.dropout_prob)(x)
deconv1 = L.Conv2DTranspose(64, (3, 3), strides=1, padding='valid')
x = deconv1(x)
# print(deconv1.output_shape)
x = L.Activation('relu')(x)
x = L.BatchNormalization()(x)
x = L.Dropout(self.dropout_prob)(x)
deconv2 = L.Conv2DTranspose(32, (4, 4), strides=2, padding='valid')
x = deconv2(x)
# print(deconv2.output_shape)
x = L.Activation('relu')(x)
x = L.BatchNormalization()(x)
x = L.Dropout(self.dropout_prob)(x)
deconv3 = L.Conv2DTranspose(1, (8, 8), strides=4, padding='valid')
x = deconv3(x)
# print(deconv3.output_shape)
outputs = L.Activation(MU.my_softmax)(x)
self.model = Model(inputs=inputs, outputs=outputs)
opt = K.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
self.model.compile(loss=MU.my_kld, optimizer=opt, metrics=[MU.NSS])
self.model.load_weights(self.model_path)
def normalize(self, obs):
# DONE: use softmax here instead!!
H, W = obs.shape[0], obs.shape[1]
obs = torch.tensor(obs)
norm_map = F.softmax(obs.view(1,-1), dim=1).view(H,W)
norm_map = norm_map.cpu().detach().numpy()
# max_val, min_val = np.max(obs), np.min(obs)
# if(max_val-min_val != 0):
# norm_map = (obs-min_val)/(max_val-min_val)
# else:
# norm_map = obs
return norm_map
def get_heatmap(self, stacked_img, shape, viz=False):
traj_length = len(stacked_img)
stacked_obs = np.zeros((traj_length, 84, 84, 4))
for i in range(traj_length):
# convert stacked frame list (for a trajectory) into a batch
img_np = stacked_img[i].squeeze() # (1,84,84,4) --> (84,84,4)
img_np = img_np.astype(np.float32) / 255.0 # not normalized
img_np -= self.mean
stacked_obs[i, :, :, :] = img_np.squeeze()
val_predict = self.model.predict(stacked_obs)
output = np.zeros((traj_length, shape, shape))
for i in range(traj_length):
heatmap = val_predict[i, :, :].squeeze()
#convolve heatmap with gaussian filter
heatmap = convolve(heatmap, Gaussian2DKernel(x_stddev=1))
# Normalize predicted heatmap such that largest element is 1.0
heatmap_norm = self.normalize(heatmap)
# subsample to desired shape for heatmap (TODO: meanpooling)
if shape!=84:
print('reshaping predicted heatmap: ', shape)
output[i, :, :] = self.normalize(misc.imresize(
heatmap_norm, [shape, shape], interp='bilinear'))
else:
output[i, :, :] = heatmap_norm
if viz:
cv2.imwrite('gazeNetwork/viz/'+str(i)+'_out.png',
output[i, :, :].squeeze()*255)
cv2.imwrite('gazeNetwork/viz/'+str(i) +
'_pred.png', heatmap_norm*255)
# return np.expand_dims(output, axis=3)
return output
| [
"keras.layers.Conv2D",
"tensorflow.pad",
"re.compile",
"gaze.misc_utils.BMU.save_GPU_mem_keras",
"keras.layers.Activation",
"scipy.misc.imresize",
"keras.optimizers.Adadelta",
"IPython.embed",
"astropy.convolution.kernels.Gaussian2DKernel",
"keras.models.Model",
"tensorflow.ConfigProto",
"gaze.misc_utils.keras_model_serialization_bug_fix",
"keras.models.Sequential",
"numpy.isnan",
"keras.layers.Conv2DTranspose",
"keras.layers.BatchNormalization",
"time.time",
"keras.layers.Dropout",
"numpy.dstack",
"scipy.stats.multivariate_normal.pdf",
"keras.layers.Lambda",
"torch.tensor",
"numpy.zeros",
"keras.layers.Input",
"keras.initializers.Constant",
"numpy.load",
"math.isnan"
] | [((576, 592), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (590, 592), True, 'import tensorflow as tf\n'), ((1814, 1906), 'numpy.zeros', 'np.zeros', (['[self.train_size, self.HEATMAP_SHAPE, self.HEATMAP_SHAPE, 1]'], {'dtype': 'np.float32'}), '([self.train_size, self.HEATMAP_SHAPE, self.HEATMAP_SHAPE, 1],\n dtype=np.float32)\n', (1822, 1906), True, 'import numpy as np\n'), ((1930, 1941), 'time.time', 'time.time', ([], {}), '()\n', (1939, 1941), False, 'import time\n'), ((4770, 4840), 're.compile', 're.compile', (['"""MSG\\\\s+(\\\\d+)\\\\s+SCR_RECORDER FRAMEID (\\\\d+) UTID (\\\\w+)"""'], {}), "('MSG\\\\s+(\\\\d+)\\\\s+SCR_RECORDER FRAMEID (\\\\d+) UTID (\\\\w+)')\n", (4780, 4840), False, 'import re\n'), ((4944, 4995), 're.compile', 're.compile', (["('(\\\\d+)\\\\s+(%s)\\\\s+(%s)' % (freg, freg))"], {}), "('(\\\\d+)\\\\s+(%s)\\\\s+(%s)' % (freg, freg))\n", (4954, 4995), False, 'import re\n'), ((5012, 5074), 're.compile', 're.compile', (['"""MSG\\\\s+(\\\\d+)\\\\s+key_pressed atari_action (\\\\d+)"""'], {}), "('MSG\\\\s+(\\\\d+)\\\\s+key_pressed atari_action (\\\\d+)')\n", (5022, 5074), False, 'import re\n'), ((5093, 5137), 're.compile', 're.compile', (['"""MSG\\\\s+(\\\\d+)\\\\s+reward (\\\\d+)"""'], {}), "('MSG\\\\s+(\\\\d+)\\\\s+reward (\\\\d+)')\n", (5103, 5137), False, 'import re\n'), ((5157, 5202), 're.compile', 're.compile', (['"""MSG\\\\s+(\\\\d+)\\\\s+episode (\\\\d+)"""'], {}), "('MSG\\\\s+(\\\\d+)\\\\s+episode (\\\\d+)')\n", (5167, 5202), False, 'import re\n'), ((8810, 8831), 'keras.models.Sequential', 'K.models.Sequential', ([], {}), '()\n', (8829, 8831), True, 'import keras as K\n'), ((12555, 12577), 'numpy.load', 'np.load', (['self.meanfile'], {}), '(self.meanfile)\n', (12562, 12577), True, 'import numpy as np\n'), ((12603, 12630), 'gaze.misc_utils.BMU.save_GPU_mem_keras', 'MU.BMU.save_GPU_mem_keras', ([], {}), '()\n', (12628, 12630), True, 'import gaze.misc_utils as MU\n'), ((12639, 12677), 'gaze.misc_utils.keras_model_serialization_bug_fix', 'MU.keras_model_serialization_bug_fix', ([], {}), '()\n', (12675, 12677), True, 'import gaze.misc_utils as MU\n'), ((12869, 12894), 'keras.layers.Input', 'L.Input', ([], {'shape': 'self.SHAPE'}), '(shape=self.SHAPE)\n', (12876, 12894), True, 'import keras.layers as L\n'), ((12989, 13037), 'keras.layers.Conv2D', 'L.Conv2D', (['(32)', '(8, 8)'], {'strides': '(4)', 'padding': '"""valid"""'}), "(32, (8, 8), strides=4, padding='valid')\n", (12997, 13037), True, 'import keras.layers as L\n'), ((13230, 13278), 'keras.layers.Conv2D', 'L.Conv2D', (['(64)', '(4, 4)'], {'strides': '(2)', 'padding': '"""valid"""'}), "(64, (4, 4), strides=2, padding='valid')\n", (13238, 13278), True, 'import keras.layers as L\n'), ((13471, 13519), 'keras.layers.Conv2D', 'L.Conv2D', (['(64)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""'}), "(64, (3, 3), strides=1, padding='valid')\n", (13479, 13519), True, 'import keras.layers as L\n'), ((13714, 13771), 'keras.layers.Conv2DTranspose', 'L.Conv2DTranspose', (['(64)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""'}), "(64, (3, 3), strides=1, padding='valid')\n", (13731, 13771), True, 'import keras.layers as L\n'), ((13970, 14027), 'keras.layers.Conv2DTranspose', 'L.Conv2DTranspose', (['(32)', '(4, 4)'], {'strides': '(2)', 'padding': '"""valid"""'}), "(32, (4, 4), strides=2, padding='valid')\n", (13987, 14027), True, 'import keras.layers as L\n'), ((14226, 14282), 'keras.layers.Conv2DTranspose', 'L.Conv2DTranspose', (['(1)', '(8, 8)'], {'strides': '(4)', 'padding': '"""valid"""'}), "(1, (8, 8), strides=4, padding='valid')\n", (14243, 14282), True, 'import keras.layers as L\n'), ((14416, 14453), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (14421, 14453), False, 'from keras.models import Model, Sequential\n'), ((14468, 14533), 'keras.optimizers.Adadelta', 'K.optimizers.Adadelta', ([], {'lr': '(1.0)', 'rho': '(0.95)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)\n', (14489, 14533), True, 'import keras as K\n'), ((14790, 14807), 'torch.tensor', 'torch.tensor', (['obs'], {}), '(obs)\n', (14802, 14807), False, 'import torch\n'), ((15257, 15291), 'numpy.zeros', 'np.zeros', (['(traj_length, 84, 84, 4)'], {}), '((traj_length, 84, 84, 4))\n', (15265, 15291), True, 'import numpy as np\n'), ((15726, 15763), 'numpy.zeros', 'np.zeros', (['(traj_length, shape, shape)'], {}), '((traj_length, shape, shape))\n', (15734, 15763), True, 'import numpy as np\n'), ((8850, 8960), 'keras.layers.Lambda', 'K.layers.Lambda', (['(lambda x: x + bg_prob_density)'], {'input_shape': '(self.GHmap.shape[1], self.GHmap.shape[2], 1)'}), '(lambda x: x + bg_prob_density, input_shape=(self.GHmap.\n shape[1], self.GHmap.shape[2], 1))\n', (8865, 8960), True, 'import keras as K\n'), ((9256, 9273), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (9265, 9273), True, 'import numpy as np\n'), ((9296, 9412), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['pos'], {'mean': '[0, 0]', 'cov': '[[self.sigmaH * self.sigmaH, 0], [0, self.sigmaW * self.sigmaW]]'}), '(pos, mean=[0, 0], cov=[[self.sigmaH * self.sigmaH, \n 0], [0, self.sigmaW * self.sigmaW]])\n', (9319, 9412), False, 'from scipy.stats import multivariate_normal\n'), ((10687, 10694), 'IPython.embed', 'embed', ([], {}), '()\n', (10692, 10694), False, 'from IPython import embed\n'), ((13107, 13127), 'keras.layers.Activation', 'L.Activation', (['"""relu"""'], {}), "('relu')\n", (13119, 13127), True, 'import keras.layers as L\n'), ((13143, 13165), 'keras.layers.BatchNormalization', 'L.BatchNormalization', ([], {}), '()\n', (13163, 13165), True, 'import keras.layers as L\n'), ((13181, 13209), 'keras.layers.Dropout', 'L.Dropout', (['self.dropout_prob'], {}), '(self.dropout_prob)\n', (13190, 13209), True, 'import keras.layers as L\n'), ((13348, 13368), 'keras.layers.Activation', 'L.Activation', (['"""relu"""'], {}), "('relu')\n", (13360, 13368), True, 'import keras.layers as L\n'), ((13384, 13406), 'keras.layers.BatchNormalization', 'L.BatchNormalization', ([], {}), '()\n', (13404, 13406), True, 'import keras.layers as L\n'), ((13422, 13450), 'keras.layers.Dropout', 'L.Dropout', (['self.dropout_prob'], {}), '(self.dropout_prob)\n', (13431, 13450), True, 'import keras.layers as L\n'), ((13589, 13609), 'keras.layers.Activation', 'L.Activation', (['"""relu"""'], {}), "('relu')\n", (13601, 13609), True, 'import keras.layers as L\n'), ((13625, 13647), 'keras.layers.BatchNormalization', 'L.BatchNormalization', ([], {}), '()\n', (13645, 13647), True, 'import keras.layers as L\n'), ((13663, 13691), 'keras.layers.Dropout', 'L.Dropout', (['self.dropout_prob'], {}), '(self.dropout_prob)\n', (13672, 13691), True, 'import keras.layers as L\n'), ((13845, 13865), 'keras.layers.Activation', 'L.Activation', (['"""relu"""'], {}), "('relu')\n", (13857, 13865), True, 'import keras.layers as L\n'), ((13881, 13903), 'keras.layers.BatchNormalization', 'L.BatchNormalization', ([], {}), '()\n', (13901, 13903), True, 'import keras.layers as L\n'), ((13919, 13947), 'keras.layers.Dropout', 'L.Dropout', (['self.dropout_prob'], {}), '(self.dropout_prob)\n', (13928, 13947), True, 'import keras.layers as L\n'), ((14101, 14121), 'keras.layers.Activation', 'L.Activation', (['"""relu"""'], {}), "('relu')\n", (14113, 14121), True, 'import keras.layers as L\n'), ((14137, 14159), 'keras.layers.BatchNormalization', 'L.BatchNormalization', ([], {}), '()\n', (14157, 14159), True, 'import keras.layers as L\n'), ((14175, 14203), 'keras.layers.Dropout', 'L.Dropout', (['self.dropout_prob'], {}), '(self.dropout_prob)\n', (14184, 14203), True, 'import keras.layers as L\n'), ((14363, 14390), 'keras.layers.Activation', 'L.Activation', (['MU.my_softmax'], {}), '(MU.my_softmax)\n', (14375, 14390), True, 'import keras.layers as L\n'), ((15945, 15973), 'astropy.convolution.kernels.Gaussian2DKernel', 'Gaussian2DKernel', ([], {'x_stddev': '(1)'}), '(x_stddev=1)\n', (15961, 15973), False, 'from astropy.convolution.kernels import Gaussian2DKernel\n'), ((3267, 3280), 'math.isnan', 'math.isnan', (['x'], {}), '(x)\n', (3277, 3280), False, 'import math\n'), ((11253, 11276), 'numpy.isnan', 'np.isnan', (['gaze_pos_list'], {}), '(gaze_pos_list)\n', (11261, 11276), True, 'import numpy as np\n'), ((16313, 16375), 'scipy.misc.imresize', 'misc.imresize', (['heatmap_norm', '[shape, shape]'], {'interp': '"""bilinear"""'}), "(heatmap_norm, [shape, shape], interp='bilinear')\n", (16326, 16375), False, 'from scipy import misc\n'), ((9575, 9633), 'tensorflow.pad', 'tf.pad', (['x', '[(0, 0), (lh, lh), (lw, lw), (0, 0)]', '"""REFLECT"""'], {}), "(x, [(0, 0), (lh, lh), (lw, lw), (0, 0)], 'REFLECT')\n", (9581, 9633), True, 'import tensorflow as tf\n'), ((9894, 9926), 'keras.initializers.Constant', 'K.initializers.Constant', (['gkernel'], {}), '(gkernel)\n', (9917, 9926), True, 'import keras as K\n')] |
from spatialaudio import *
import unittest
TEST_DIMENSIONS = (2, 10)
class TestSpatialAudio(unittest.TestCase):
def test_create_spatialaudio(self):
a = SpatialAudio()
def test_coordinates(self):
a = SpatialAudio()
a.coordinates = Coordinates()
def test_time_channel(self):
a = SpatialAudio()
ch = 1
a.coordinates = Coordinates()
a.time = np.random.rand(10,5,3)
time = a._get_time(slice(None),ch)
assert np.allclose(time, a.time[:,ch,:])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((559, 574), 'unittest.main', 'unittest.main', ([], {}), '()\n', (572, 574), False, 'import unittest\n')] |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, chi2
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
def prepareTrainData(X, Xwhole):
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 2), use_idf=True, smooth_idf=True, sublinear_tf=True, stop_words = 'english')
if Xwhole == None:
X = tfv.fit_transform(X)
else:
tfv.fit(Xwhole)
X = tfv.transform(X)
svd = TruncatedSVD(n_components=200, algorithm='randomized', n_iter=5, random_state=None, tol=0.0)
scl = StandardScaler(copy=True, with_mean=True, with_std=True)
X = svd.fit_transform(X)
X = scl.fit_transform(X)
return (X, tfv, svd, scl)
def prepareTestData(Xtest, tfv, svd, scl):
Xtest = tfv.transform(Xtest)
Xtest = svd.transform(Xtest)
Xtest = scl.transform(Xtest)
return Xtest
| [
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.decomposition.TruncatedSVD"
] | [((383, 595), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(3)', 'max_features': 'None', 'strip_accents': '"""unicode"""', 'analyzer': '"""word"""', 'token_pattern': '"""\\\\w{1,}"""', 'ngram_range': '(1, 2)', 'use_idf': '(True)', 'smooth_idf': '(True)', 'sublinear_tf': '(True)', 'stop_words': '"""english"""'}), "(min_df=3, max_features=None, strip_accents='unicode',\n analyzer='word', token_pattern='\\\\w{1,}', ngram_range=(1, 2), use_idf=\n True, smooth_idf=True, sublinear_tf=True, stop_words='english')\n", (398, 595), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((701, 797), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(200)', 'algorithm': '"""randomized"""', 'n_iter': '(5)', 'random_state': 'None', 'tol': '(0.0)'}), "(n_components=200, algorithm='randomized', n_iter=5,\n random_state=None, tol=0.0)\n", (713, 797), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((801, 857), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'copy': '(True)', 'with_mean': '(True)', 'with_std': '(True)'}), '(copy=True, with_mean=True, with_std=True)\n', (815, 857), False, 'from sklearn.preprocessing import StandardScaler\n')] |
import argparse
from functools import reduce
import operator
operations = dict(add=sum,
sub=lambda items: reduce(operator.sub, items),
mul=lambda items: reduce(operator.mul, items),
div=lambda items: reduce(operator.truediv, items))
def calculator(operation, numbers):
"""TODO 1:
Create a calculator that takes an operation and list of numbers.
Perform the operation returning the result rounded to 2 decimals"""
func = operations.get(operation.lower())
if not func:
raise ValueError('Invalid operation')
numbers = [float(num) for num in numbers]
return round(func(numbers), 2)
def create_parser():
"""TODO 2:
Create an ArgumentParser object:
- have one operation argument,
- have one or more integers that can be operated on.
Returns a argparse.ArgumentParser object.
Note that type=float times out here so do the casting in the calculator
function above!"""
parser = argparse.ArgumentParser(description='A simple calculator')
parser.add_argument('-a', '--add', nargs='+', help="Sums numbers")
parser.add_argument('-s', '--sub', nargs='+', help="Subtracts numbers")
parser.add_argument('-m', '--mul', nargs='+', help="Multiplies numbers")
parser.add_argument('-d', '--div', nargs='+', help="Divides numbers")
return parser
def call_calculator(args=None, stdout=False):
"""Provided/done:
Calls calculator with provided args object.
If args are not provided get them via create_parser,
if stdout is True print the result"""
parser = create_parser()
if args is None:
args = parser.parse_args()
# taking the first operation in args namespace
# if combo, e.g. -a and -s, take the first one
for operation, numbers in vars(args).items():
if numbers is None:
continue
try:
res = calculator(operation, numbers)
except ZeroDivisionError:
res = 0
if stdout:
print(res)
return res
if __name__ == '__main__':
call_calculator(stdout=True)
| [
"functools.reduce",
"argparse.ArgumentParser"
] | [((1022, 1080), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A simple calculator"""'}), "(description='A simple calculator')\n", (1045, 1080), False, 'import argparse\n'), ((125, 152), 'functools.reduce', 'reduce', (['operator.sub', 'items'], {}), '(operator.sub, items)\n', (131, 152), False, 'from functools import reduce\n'), ((190, 217), 'functools.reduce', 'reduce', (['operator.mul', 'items'], {}), '(operator.mul, items)\n', (196, 217), False, 'from functools import reduce\n'), ((255, 286), 'functools.reduce', 'reduce', (['operator.truediv', 'items'], {}), '(operator.truediv, items)\n', (261, 286), False, 'from functools import reduce\n')] |
# https://tjkendev.github.io/procon-library/python/geometry/closest_pair.html
# input sample
# n
# x0 y0
# x1 y1
# :
# xn−1 yn−1
# 最近点対を分割統治法で求める
from math import sqrt
INF = 10**9
# cp_rec - 再帰用関数
# 入力: 配列と区間
# 出力: 距離と区間内の要素をY座標でソートした配列
def cp_rec(ps, i, n):
if n <= 1:
return INF, [ps[i]]
m = n/2
x = ps[i+m][0] # 半分に分割した境界のX座標
# 配列を半分に分割して計算
d1, qs1 = cp_rec(ps, i, m)
d2, qs2 = cp_rec(ps, i+m, n-m)
d = min(d1, d2)
# Y座標が小さい順にmergeする
qs = [None]*n
s = t = idx = 0
while s < m and t < n-m:
if qs1[s][1] < qs2[t][1]:
qs[idx] = qs1[s]; s += 1
else:
qs[idx] = qs2[t]; t += 1
idx += 1
while s < m:
qs[idx] = qs1[s]; s += 1
idx += 1
while t < n-m:
qs[idx] = qs2[t]; t += 1
idx += 1
# 境界のX座標x(=ps[i+m][0])から距離がd以下のものについて距離を計算していく
# bは境界のX座標から距離d以下のものを集めたもの
b = []
for i in range(n):
ax, ay = q = qs[i]
if abs(ax - x) >= d:
continue
# Y座標について、qs[i]から距離がd以下のj(<i)について計算していく
for j in range(len(b)-1, -1, -1):
dx = ax - b[j][0]
dy = ay - b[j][1]
if dy >= d: break
d = min(d, sqrt(dx**2 + dy**2))
b.append(q)
return d, qs
def closest_pair(ps):
n = len(ps)
return cp_rec(ps, 0, n)[0]
# https://www.geeksforgeeks.org/closest-pair-of-points-using-divide-and-conquer-algorithm/
import math
import copy
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
def stripClosest(strip, size, d):
min_val = d
for i in range(size):
j = i + 1
while j < size and (strip[j].y -
strip[i].y) < min_val:
min_val = dist(strip[i], strip[j])
j += 1
return min_val
def closestUtil(P, Q, n):
if n <= 3:
return bruteForce(P, n)
mid = n // 2
midPoint = P[mid]
dl = closestUtil(P[:mid], Q, mid)
dr = closestUtil(P[mid:], Q, n - mid)
d = min(dl, dr)
strip = []
for i in range(n):
if abs(Q[i].x - midPoint.x) < d:
strip.append(Q[i])
return min(d, stripClosest(strip, len(strip), d))
def closest(P, n):
P.sort(key = lambda point: point.x)
Q = copy.deepcopy(P)
Q.sort(key = lambda point: point.y)
return closestUtil(P, Q, n)
# Driver code
P = [Point(2, 3), Point(12, 30),
Point(40, 50), Point(5, 1),
Point(12, 10), Point(3, 4)]
n = len(P)
print("The smallest distance is", closest(P, n)) | [
"math.sqrt",
"copy.deepcopy"
] | [((2315, 2331), 'copy.deepcopy', 'copy.deepcopy', (['P'], {}), '(P)\n', (2328, 2331), False, 'import copy\n'), ((1218, 1241), 'math.sqrt', 'sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (1222, 1241), False, 'from math import sqrt\n')] |
import hazelcast
import logging
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
client = hazelcast.HazelcastClient()
pn_counter = client.get_pn_counter("pn-counter").blocking()
print("Counter is initialized with {}".format(pn_counter.get()))
for i in range(10):
print("Added {} to the counter. Current value is {}".format(i, pn_counter.add_and_get(i)))
print("Incremented the counter after getting the current value. "
"Previous value is {}".format(pn_counter.get_and_increment()))
print("Final value is {}".format(pn_counter.get()))
| [
"logging.basicConfig",
"logging.getLogger",
"hazelcast.HazelcastClient"
] | [((64, 85), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (83, 85), False, 'import logging\n'), ((147, 174), 'hazelcast.HazelcastClient', 'hazelcast.HazelcastClient', ([], {}), '()\n', (172, 174), False, 'import hazelcast\n'), ((90, 109), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (107, 109), False, 'import logging\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SelectField,SubmitField
from wtforms.validators import Required
class PitchesForm(FlaskForm):
category = SelectField('Pitch Category',
choices=[('Select','Select Category'), ('Interview-Pitch', 'Interview Pitch'), ('Product-Pitch', 'Product Pitch'),
('Promotion-Pitch', 'Promotion Pitch'), ('Business-Pitch', 'Business Pitch')], validators=[Required()])
body = TextAreaField('Pitch Now',validators = [Required()])
submit = SubmitField('Submit')
class CommentsForm(FlaskForm):
comment = TextAreaField('Comment on pitch',validators = [Required()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
| [
"wtforms.validators.Required",
"wtforms.SubmitField"
] | [((583, 604), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (594, 604), False, 'from wtforms import StringField, TextAreaField, SelectField, SubmitField\n'), ((728, 749), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (739, 749), False, 'from wtforms import StringField, TextAreaField, SelectField, SubmitField\n'), ((867, 888), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (878, 888), False, 'from wtforms import StringField, TextAreaField, SelectField, SubmitField\n'), ((493, 503), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (501, 503), False, 'from wtforms.validators import Required\n'), ((557, 567), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (565, 567), False, 'from wtforms.validators import Required\n'), ((702, 712), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (710, 712), False, 'from wtforms.validators import Required\n'), ((841, 851), 'wtforms.validators.Required', 'Required', ([], {}), '()\n', (849, 851), False, 'from wtforms.validators import Required\n')] |
import copy
import numpy as np
import cv2
import math
class Interface:
def __init__(self, img):
self.img = img
self.cache = img
self.drawing = False # true if mouse is pressed
self.ix, self.iy = -1, -1
self.circles = []
self.points = []
# Create a function based on a CV2 Event (Left button click)
def draw_circle(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.drawing = True
# we take note of where that mouse was located
self.ix, self.iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
self.drawing = True
elif event == cv2.EVENT_LBUTTONUP:
radius = int(math.sqrt(((self.ix - x) ** 2) + ((self.iy - y) ** 2)))
centre = (self.ix, self.iy)
self.cache = copy.deepcopy(self.img)
cv2.circle(self.img, centre, radius, (0, 0, 255), thickness=2)
self.drawing = False
self.circles.append([centre, radius])
# cv2.imshow('src', self.img)
elif event == cv2.EVENT_RBUTTONDOWN:
self.img = copy.deepcopy(self.cache)
del self.circles[-1]
def draw_points(self, event, x, y, flags, params):
# checking for left mouse clicks
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(self.img, (x, y), 7, (0, 0, 255), thickness=-1)
self.points.append([x, y])
# cv2.imshow('src', self.img)
| [
"cv2.circle",
"math.sqrt",
"copy.deepcopy"
] | [((1395, 1453), 'cv2.circle', 'cv2.circle', (['self.img', '(x, y)', '(7)', '(0, 0, 255)'], {'thickness': '(-1)'}), '(self.img, (x, y), 7, (0, 0, 255), thickness=-1)\n', (1405, 1453), False, 'import cv2\n'), ((876, 899), 'copy.deepcopy', 'copy.deepcopy', (['self.img'], {}), '(self.img)\n', (889, 899), False, 'import copy\n'), ((913, 975), 'cv2.circle', 'cv2.circle', (['self.img', 'centre', 'radius', '(0, 0, 255)'], {'thickness': '(2)'}), '(self.img, centre, radius, (0, 0, 255), thickness=2)\n', (923, 975), False, 'import cv2\n'), ((753, 803), 'math.sqrt', 'math.sqrt', (['((self.ix - x) ** 2 + (self.iy - y) ** 2)'], {}), '((self.ix - x) ** 2 + (self.iy - y) ** 2)\n', (762, 803), False, 'import math\n'), ((1176, 1201), 'copy.deepcopy', 'copy.deepcopy', (['self.cache'], {}), '(self.cache)\n', (1189, 1201), False, 'import copy\n')] |
import tkinter.filedialog as filedialog
import os
from setuptools import glob
import warnings
from setup_functions import *
import tkinter.messagebox as mb
warnings.simplefilter(action='ignore', category=FutureWarning)
pd.options.mode.chained_assignment = 'raise'
def specific_schedule_name(df, schedule_name):
"""
This function returns a new df with specific animal schedule type, removes the timestamp from the schedule date, and
sorts the df by run date and ID in ascending order.
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param schedule_name: The name of the test the mouse ran, found under the schedule name in raw data
:returns: df: A dataframe that only contains rows for animals that performed the specific schedule name, sorted in
ascending order by run date and ID
"""
try:
df = df.loc[df['Schedule name'] == schedule_name]
# get rid of the timestamp from the SCHEDULE DATE column
df['Schedule run date'] = pd.to_datetime(df['Schedule run date']).dt.date
# sort the csv file by date and animal id in ascending order
df = df.sort_values(['Schedule run date', 'Animal ID'], ascending=[1, 1])
# reset the indices of the combined csv file
df = df.reset_index(drop=True)
return df
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error", 'specific_schedule_name() error: either you have selected the wrong type of test'
' or the headers are not the same for all files!')
print('specific_schedule_name() error: either you have selected the wrong type of test',
'or the headers are not the same for all files!')
def create_merge_file(df, script_location):
"""
This function creates a csv file called 'merged_file.csv'. This file appends all the raw files together and puts
them on a single csv file. Useful for testing or just looking at all the data in one file.
If the merged_file.csv' is already open, it will not update and this function will return and stop.
If you run this on non-raw data files,this function will return and stop.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located and used to store this file there as well
"""
try:
df.to_csv('merged_file.csv', index=False)
print(
'A file called "merged_file.csv" has been created in the same directory as the script! The location is:',
script_location)
except PermissionError:
mb.showerror("Setup Error",
'create_merge_file() error: You may have the "merged_file.csv" already open! Please close it!')
print('create_merge_file() error: You may have the "merged_file.csv" already open! Please close it!')
except AttributeError:
mb.showerror("Setup Error", 'create_merge_file() error: These are not the correct raw data files!')
print('create_merge_file() error: These are not the correct raw data files!')
def create_dropped_duplicates_file(df, script_location):
"""
This function creates a csv file called 'dropped_duplicates.csv'. This file shows all the rows that were treated as
duplicates and removed from the working dataframe. Useful for testing and making sure the correct
duplicates/unwanted were removed from the working dataframe.
If the 'dropped_duplicates.csv' is already open, it will not update and this function will return and stop.
If you run this on non-raw data files,this function will return and stop.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located and used to store this file there as well
"""
try:
df.to_csv('dropped_duplicates.csv', index=False)
print('A file called "dropped_duplicates.csv" has been created in the same directory! The location is:',
script_location)
except PermissionError:
mb.showerror("Setup Error",
'create_dropped_duplicates_file() error: You may have the "merged_file.csv" already open!'
' Please close it!')
print('create_dropped_duplicates_file() error: You may have the "merged_file.csv" already open!',
'Please close it!')
except AttributeError:
mb.showerror("Setup Error", 'create_dropped_duplicates_file() error: These are not the correct raw data files!')
print('create_dropped_duplicates_file() error: These are not the correct raw data files!')
def remove_duplicates(df, script_location):
"""
This function actually drops the duplicate rows from the working dataframe.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located and used to store this file there as well
:return: df: A version of the raw ABET file dataframe with the duplicates removed
"""
# create a dataframe that holds the duplicates
df_duplicates = pd.DataFrame()
duplicates = df.duplicated(subset=['Schedule run date', 'Animal ID'], keep='last')
df_duplicates = df_duplicates.append(df.loc[duplicates])
df_duplicates.sort_values(['Schedule run date', 'Animal ID'], ascending=[1, 1], inplace=True)
create_dropped_duplicates_file(df_duplicates, script_location)
# actually drop the duplicates from the working df
df = df.drop_duplicates(subset=['Schedule run date', 'Animal ID'], keep='last')
df = df.sort_values(['Schedule run date', 'Animal ID'], ascending=[1, 1])
df = df.reset_index(drop=True)
return df
def habituation_one(df, script_location):
"""
This function is used to specifically get cleaned data for the Habituation 1 test. The resulting dataframe will have
the following headers: 'Date', 'ID', 'SessionLength', 'RewardIRBeamBrokenCount', 'ScreenIRBeamBrokenCount',
'CrossedRewardToScreen', 'CrossedScreenToReward', 'BottomWindowTouches', 'TopWindowTouches', 'TrayEnteredCount',
'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return:df_final: A dataframe with all the Habituation 1 data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by time and remove duplicates
df = df.sort_values(by=['End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
session_length_header = index_range('End Summary - Condition (1)', raw_data_headers)
reward_ir_beam = index_range('End Summary - Reward IR Beam broken (1)', raw_data_headers)
screen_ir_beam = index_range('End Summary - Screen IR Beam broken (1)', raw_data_headers)
reward_to_screen = index_range('End Summary - Crossed reward to screen (1)', raw_data_headers)
screen_to_reward = index_range('End Summary - Crossed Screen to reward (1)', raw_data_headers)
bottom_window_touches = index_range('End Summary - Touches to bottom screen windows (1)', raw_data_headers)
top_window_touches = index_range('End Summary - Touches to top screen windows (1)', raw_data_headers)
tray_entered_count = index_range('End Summary - Tray Entered - Cnt (1)', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'SessionLength', 'RewardIRBeamBrokenCount', 'ScreenIRBeamBrokenCount',
'CrossedRewardToScreen',
'CrossedScreenToReward', 'BottomWindowTouches', 'TopWindowTouches', 'TrayEnteredCount', 'Day']
df_final = pd.DataFrame(columns=col_names)
# extract the necessary data from raw data
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['RewardIRBeamBrokenCount'] = df.iloc[:, reward_ir_beam[0]]
df_final['ScreenIRBeamBrokenCount'] = df.iloc[:, screen_ir_beam[0]]
df_final['CrossedRewardToScreen'] = df.iloc[:, reward_to_screen[0]]
df_final['CrossedScreenToReward'] = df.iloc[:, screen_to_reward[0]]
df_final['BottomWindowTouches'] = df.iloc[:, bottom_window_touches[0]]
df_final['TopWindowTouches'] = df.iloc[:, top_window_touches[0]]
df_final['TrayEnteredCount'] = df.iloc[:, tray_entered_count[0]]
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error", 'habituation_one() error: either you selected the wrong type of test'
' or headers are not the same on all files!')
print('habituation_one() error: either you selected the wrong type of test '
'or headers are not the same on all files!')
return
print('The program is almost done running... Please wait....')
return df_final
def habituation_two(df, script_location):
"""
This function is used to specifically get cleaned data for the Habituation 2 test. The resulting dataframe will have
the following headers: 'Date', 'ID', 'SessionLength', 'NumberOfTrial', 'RewardIRBeamBrokenCount',
'ScreenIRBeamBrokenCount', 'BottomLeftWindowTouches', 'BottomRightWindowTouches', 'TopWindowTouches',
'TrayEnteredCount', 'MeanRewardCollectionLatency', 'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return: df_final: A dataframe with all the Habituation 2 data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by time and remove duplicates
df = df.sort_values(by=['End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
session_length_header = index_range('End Summary - Condition (1)', raw_data_headers)
total_trials = index_range('End Summary - Trial Completed (1)', raw_data_headers)
reward_ir_beam = index_range('End Summary - Reward IR Breaks - Reward Beam Cnt (1)', raw_data_headers)
screen_ir_beam = index_range('End Summary - Screen IR Breaks - Screen IR Cnt (1)', raw_data_headers)
bottom_left_window_touches = index_range('End Summary - Bottom Left Touches - Bottom Left Cnt (1)',
raw_data_headers)
bottom_right_window_touches = index_range('End Summary - Bottom Right Touches - Bottom Right Cnt (1)',
raw_data_headers)
top_window_touches = index_range('End Summary - Top Touches - Top Cnt (1)', raw_data_headers)
tray_entered_count = index_range('End Summary - Tray Entered - Cnt (1)', raw_data_headers)
mean_reward_collection_latency = index_range('Reward Collection Latency (', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'SessionLength', 'NumberOfTrial', 'RewardIRBeamBrokenCount', 'ScreenIRBeamBrokenCount',
'BottomLeftWindowTouches',
'BottomRightWindowTouches', 'TopWindowTouches', 'TrayEnteredCount', 'MeanRewardCollectionLatency',
'Day']
df_final = pd.DataFrame(columns=col_names)
# extract the necessary data from raw data
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['NumberOfTrial'] = df.iloc[:, total_trials[0]]
df_final['RewardIRBeamBrokenCount'] = df.iloc[:, reward_ir_beam[0]]
df_final['ScreenIRBeamBrokenCount'] = df.iloc[:, screen_ir_beam[0]]
df_final['BottomLeftWindowTouches'] = df.iloc[:, bottom_left_window_touches[0]]
df_final['BottomRightWindowTouches'] = df.iloc[:, bottom_right_window_touches[0]]
df_final['TopWindowTouches'] = df.iloc[:, top_window_touches[0]]
df_final['TrayEnteredCount'] = df.iloc[:, tray_entered_count[0]]
df_final['MeanRewardCollectionLatency'] = df.iloc[:, mean_reward_collection_latency].mean(axis=1)
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error", 'habituation_two() error: Either you selected the wrong type of test '
' or headers are not the same on all files!')
print(
'habituation_two() error: Either you selected the wrong type of test '
'or headers are not the same on all files!')
return None
print('The program is almost done running... Please wait....')
return df_final
def initial_touch(df, script_location):
"""
This function is used to specifically get cleaned data for the Initial Touch test. The resulting dataframe will have
the following headers: 'Date', 'ID', 'SessionLength', 'ImagesTouched', 'Corrects', 'BlankTouches','TotalITITouches',
'MeanCorrectTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardCollectionLatency', 'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return: df_final: A dataframe with all the Initial Touch data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by time and remove duplicates
df = df.sort_values(by=['End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
session_length_header = index_range('End Summary - Condition (1)', raw_data_headers)
images_touched = index_range('End Summary - No. images (1)', raw_data_headers)
correct_touches = index_range('End Summary - Corrects (1)', raw_data_headers)
blank_touches = index_range('End Summary - Blank Touches (1)', raw_data_headers)
total_iti_touches = index_range('End Summary - Left ITI Touches (1)', raw_data_headers) + index_range(
'End Summary - Right ITI Touches (1)', raw_data_headers)
mean_correct_touch_latency = index_range('Correct touch latency (', raw_data_headers)
mean_blank_touch_latency = index_range('Blank Touch Latency (', raw_data_headers)
mean_reward_collection_latency = index_range('Correct Reward Collection (', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'SessionLength', 'ImagesTouched', 'Corrects', 'BlankTouches',
'TotalITITouches', 'MeanCorrectTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardCollectionLatency',
'Day']
df_final = pd.DataFrame(columns=col_names)
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['ImagesTouched'] = df.iloc[:, images_touched[0]]
df_final['Corrects'] = df.iloc[:, correct_touches[0]]
df_final['BlankTouches'] = df.iloc[:, blank_touches[0]]
df_final['TotalITITouches'] = df.iloc[:, total_iti_touches].sum(axis=1)
df_final['MeanCorrectTouchLatency'] = df.iloc[:, mean_correct_touch_latency].mean(axis=1)
df_final['MeanBlankTouchLatency'] = df.iloc[:, mean_blank_touch_latency].mean(axis=1)
df_final['MeanRewardCollectionLatency'] = df.iloc[:, mean_reward_collection_latency].mean(axis=1)
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error", 'initial_touch() error: Either you selected the wrong type of test '
' or headers are not the same on all files!')
print(
'initial_touch() error: Either you selected the wrong type of test '
'or headers are not the same on all files!')
return None
print('The program is almost done running... Please wait....')
return df_final
def must_touch_initiate(df, script_location):
"""
This function is used to specifically get cleaned data for the Must Touch/Must Initiate test. The resulting
dataframe will have the following headers: 'Date', 'ID', 'SessionLength', 'Corrects', 'TotalBlankTouches',
'TotalITITouches', 'MeanCorrectTouchLatency', 'MeanCorrectRightTouchLatency', 'MeanCorrectLeftTouchLatency',
'MeanCorrectLeftRightTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardCollectionLatency', 'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return: df_final: A dataframe with all the Initial Touch data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by corrects and time, remove duplicates
df = df.sort_values(by=['End Summary - Corrects (1)', 'End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
session_length_header = index_range('End Summary - Condition (1)', raw_data_headers)
correct_header = index_range('End Summary - Corrects (1)', raw_data_headers)
blank_touches_header = index_range('End Summary - Blank Touches (1)', raw_data_headers)
iti_blank_header = index_range('End Summary - Left ITI touches (1)', raw_data_headers) + index_range(
'End Summary - Right ITI touches (1)', raw_data_headers)
mean_correct_touch_header = index_range('Correct touch latency (', raw_data_headers)
mean_correct_left_touch = index_range('Correct Left touch latency (', raw_data_headers)
mean_correct_right_touch = index_range('Correct Right touch latency (', raw_data_headers)
mean_blank_touch_header = index_range('Blank Touch Latency (', raw_data_headers)
mean_reward_header = index_range('Correct Reward Collection (', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'SessionLength', 'Corrects', 'TotalBlankTouches', 'TotalITITouches',
'MeanCorrectTouchLatency', 'MeanCorrectRightTouchLatency', 'MeanCorrectLeftTouchLatency',
'MeanCorrectLeftRightTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardCollectionLatency', 'Day']
df_final = pd.DataFrame(columns=col_names)
# extract the necessary data from raw data
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['Corrects'] = df.iloc[:, correct_header[0]]
df_final['TotalBlankTouches'] = df.iloc[:, blank_touches_header[0]]
df_final['TotalITITouches'] = df.iloc[:, iti_blank_header].sum(axis=1)
df_final['MeanCorrectTouchLatency'] = df.iloc[:, mean_correct_touch_header].mean(axis=1)
df_final['MeanCorrectRightTouchLatency'] = df.iloc[:, mean_correct_right_touch].mean(axis=1)
df_final['MeanCorrectLeftTouchLatency'] = df.iloc[:, mean_correct_left_touch].mean(axis=1)
df_final['MeanCorrectLeftRightTouchLatency'] = df_final[
['MeanCorrectLeftTouchLatency', 'MeanCorrectRightTouchLatency']].mean(axis=1)
df_final['MeanBlankTouchLatency'] = df.iloc[:, mean_blank_touch_header].mean(axis=1)
df_final['MeanRewardCollectionLatency'] = df.iloc[:, mean_reward_header].mean(axis=1)
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error", 'must_touch_initiate() error: Either you selected the wrong type of test '
' or headers are not the same on all files!')
print(
'must_touch_initiate() error: Either you selected the wrong type of test '
'or headers are not the same on all files!')
return None
print('The program is almost done running... Please wait....')
return df_final
def punish_incorrect(df, script_location):
"""
This function is used to specifically get cleaned data for the Punish Incorrect test. The resulting dataframe will
have the following headers: 'Date', 'ID', 'SessionLength', 'NumberOfTrial', 'PercentCorrect', 'TotalITITouches',
'MeanCorrectTouchLatency', 'MeanCorrectRightTouchLatency', 'MeanCorrectLeftTouchLatency',
'MeanCorrectLeftRightTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardCollectionLatency', 'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return: df_final: A dataframe with all the Initial Touch data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by trials, time and remove duplicates
df = df.sort_values(by=['End Summary - Trials Completed (1)', 'End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
session_length_header = index_range('End Summary - Condition (1)', raw_data_headers)
trial_completed_header = index_range('End Summary - Trials Completed (1)', raw_data_headers)
percent_correct_headers = index_range('End Summary - % Correct (1)', raw_data_headers)
iti_blank_header = index_range('End Summary - Left ITI Touches (1)', raw_data_headers) + index_range(
'End Summary - Right ITI Touches (1)', raw_data_headers)
mean_correct_touch_header = index_range('Correct touch latency (', raw_data_headers)
mean_correct_left_touch = index_range('Correct Left touch latency (', raw_data_headers)
mean_correct_right_touch = index_range('Correct Right touch latency (', raw_data_headers)
mean_blank_touch_header = index_range('Blank Touch Latency (', raw_data_headers)
mean_reward_header = index_range('Correct Reward Collection (', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'SessionLength', 'NumberOfTrial', 'PercentCorrect', 'TotalITITouches',
'MeanCorrectTouchLatency', 'MeanCorrectRightTouchLatency', 'MeanCorrectLeftTouchLatency',
'MeanCorrectLeftRightTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardCollectionLatency', 'Day']
df_final = pd.DataFrame(columns=col_names)
# extract the necessary data from raw data
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['NumberOfTrial'] = df.iloc[:, trial_completed_header[0]]
df_final['PercentCorrect'] = df.iloc[:, percent_correct_headers[0]]
df_final['TotalITITouches'] = df.iloc[:, iti_blank_header[0]]
df_final['MeanCorrectTouchLatency'] = df.iloc[:, mean_correct_touch_header].mean(axis=1)
df_final['MeanCorrectRightTouchLatency'] = df.iloc[:, mean_correct_right_touch].mean(axis=1)
df_final['MeanCorrectLeftTouchLatency'] = df.iloc[:, mean_correct_left_touch].mean(axis=1)
df_final['MeanCorrectLeftRightTouchLatency'] = df_final[
['MeanCorrectLeftTouchLatency', 'MeanCorrectRightTouchLatency']].mean(axis=1)
df_final['MeanBlankTouchLatency'] = df.iloc[:, mean_blank_touch_header].mean(axis=1)
df_final['MeanRewardCollectionLatency'] = df.iloc[:, mean_reward_header].mean(axis=1)
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error", 'punish_incorrect() error: Either you selected the wrong type of test '
' or headers are not the same on all files!')
print(
'punish_incorrect() error: Either you selected the wrong type of test '
'or headers are not the same on all files!')
return None
print('The program is almost done running... Please wait....')
return df_final
def ld(df, script_location):
"""
This function is used to specifically get cleaned data for the LD Train/LD Probe test. The resulting
dataframe will have the following headers: 'Date', 'ID', 'Type', 'SessionLength', 'NumberOfTrial', 'PercentCorrect',
'NumberOfReversal', 'TotalITITouches', 'TotalBlankTouches', 'MeanRewardCollectionLatency',
'MeanCorrectTouchLatency', 'MeanIncorrectTouchLatency', 'SessionLengthTo1stReversalDuration',
'SessionLengthTo2ndReversalDuration', 'NumberOfTrialTo1stReversal', 'NumberOfTrialTo2ndReversal',
'PercentCorrectTo1stReversal', 'PercentCorrectTo2ndReversal', 'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return: df_final: A dataframe with all the Initial Touch data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by trials, time and remove duplicates
df = df.sort_values(by=['End Summary - Trials Completed (1)', 'End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
number_correct_header = index_range('Trial Analysis - No. Correct (', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
correct_position_header = index_range('Trial Analysis - Correct Position (', raw_data_headers)
session_length_header = index_range('End Summary - Session Time (1)', raw_data_headers)
trials_completed_header = index_range('End Summary - Trials Completed (1)', raw_data_headers)
percent_correct_header = index_range('End Summary - Percentage Correct (1)', raw_data_headers)
reversal_number_header = index_range('End Summary - Times Criteria reached (1)', raw_data_headers)
iti_blank_header = index_range('End Summary - Left ITI touches (1)', raw_data_headers) + index_range(
'End Summary - Right ITI touches (1)', raw_data_headers)
blank_header = index_range('End Summary - Left Blank Touches - Generic Counter (1)', raw_data_headers) + \
index_range('End Summary - Right Blank Touches - Generic Counter (1)', raw_data_headers) + \
index_range('End Summary - Top row touches - Generic Counter (1)', raw_data_headers)
mean_reward_header = index_range('Trial Analysis - Reward Collection Latency (', raw_data_headers)
mean_correct_touch_header = index_range('Trial Analysis - Correct Image Response Latency (', raw_data_headers)
mean_incorrect_header = index_range('Trial Analysis - Incorrect Image Latency (', raw_data_headers)
first_reversal_time_header = index_range('No trials to criterion - Condition (1)', raw_data_headers)
second_reversal_time_header = index_range('No trials to criterion - Condition (2)', raw_data_headers)
first_reversal_trials_header = index_range('No trials to criterion - Generic Evaluation (1)', raw_data_headers)
second_reversal_trials_header = index_range('No trials to criterion - Generic Evaluation (2)', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'Type', 'SessionLength', 'NumberOfTrial', 'PercentCorrect', 'NumberOfReversal',
'TotalITITouches', 'TotalBlankTouches', 'MeanRewardCollectionLatency', 'MeanCorrectTouchLatency',
'MeanIncorrectTouchLatency', 'SessionLengthTo1stReversalDuration',
'SessionLengthTo2ndReversalDuration', 'NumberOfTrialTo1stReversal', 'NumberOfTrialTo2ndReversal',
'PercentCorrectTo1stReversal', 'PercentCorrectTo2ndReversal', 'Day']
df_final = pd.DataFrame(columns=col_names)
# extract the necessary data from raw data
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df['Type'] = ''
correct_position_names = get_header_names(raw_data_headers, correct_position_header)
get_test_type(df, correct_position_names)
df_final['Type'] = df['Type']
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['NumberOfTrial'] = df.iloc[:, trials_completed_header[0]]
df_final['PercentCorrect'] = df.iloc[:, percent_correct_header]
df_final['NumberOfReversal'] = df.iloc[:, reversal_number_header[0]]
df_final['TotalITITouches'] = df.iloc[:, iti_blank_header].sum(axis=1)
df_final['TotalBlankTouches'] = df.iloc[:, blank_header].sum(axis=1)
df_final['MeanRewardCollectionLatency'] = df.iloc[:, mean_reward_header].mean(axis=1)
df_final['MeanCorrectTouchLatency'] = df.iloc[:, mean_correct_touch_header].mean(axis=1)
df_final['MeanIncorrectTouchLatency'] = df.iloc[:, mean_incorrect_header].mean(axis=1)
df_final['SessionLengthTo1stReversalDuration'] = df.iloc[:, first_reversal_time_header[0]]
df_final['SessionLengthTo2ndReversalDuration'] = df.iloc[:, second_reversal_time_header[0]]
df_final['NumberOfTrialTo1stReversal'] = df.iloc[:, first_reversal_trials_header[0]]
df_final['NumberOfTrialTo2ndReversal'] = df.iloc[:, second_reversal_trials_header[0]]
get_missing_reversal_trials(df_final)
get_fixed_session_time(df_final, df)
number_correct_column_names = get_header_names(raw_data_headers, number_correct_header)
df['PercentCorrectTo1stReversal'] = np.nan
get_percent_correctness_first(df, df_final, number_correct_column_names)
df_final['PercentCorrectTo1stReversal'] = df['PercentCorrectTo1stReversal']
df['PercentCorrectTo2ndReversal'] = np.nan
get_percent_correctness_second(df, df_final, number_correct_column_names)
df_final['PercentCorrectTo2ndReversal'] = df['PercentCorrectTo2ndReversal']
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error",
'ld() error: Either you selected the wrong type of test or headers are not the same on all files!')
print('ld() error: Either you selected the wrong type of test or headers are not the same on all files!')
return None
print('The program is almost done running... Please wait....')
return df_final
def acquisition(df, script_location):
"""
This function is used to specifically get cleaned data for the Acquisition test. The resulting dataframe will have
the following headers: 'Date', 'ID', 'SessionLength', 'Corrects', 'BlankTouches', 'TotalITITouches',
'MeanCorrectTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardTouchLatency', 'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return: df_final: A dataframe with all the Initial Touch data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by corrects and time, remove duplicates
df = df.sort_values(by=['End Summary - Corrects (1)', 'End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
# all the headers in the raw data file
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
session_length_header = index_range('End Summary - Condition (1)', raw_data_headers)
correct_header = index_range('End Summary - Corrects (1)', raw_data_headers)
blank_touches_header = index_range('End Summary - Blank Touches (1)', raw_data_headers)
iti_blank_header = index_range('End Summary - Left ITI Touches (1)', raw_data_headers) + index_range(
'End Summary - Right ITI Touches (1)', raw_data_headers) + index_range(
'End Summary - Centre ITI Touches (1)', raw_data_headers)
correct_touch_latency_header = index_range('Correct touch latency (', raw_data_headers)
blank_touch_latency_header = index_range('Blank Touch Latency (', raw_data_headers)
correct_reward_collect_header = index_range('Correct Reward Collection (', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'SessionLength', 'Corrects', 'BlankTouches', 'TotalITITouches',
'MeanCorrectTouchLatency', 'MeanBlankTouchLatency', 'MeanRewardTouchLatency', 'Day']
df_final = pd.DataFrame(columns=col_names)
# extract the necessary data from raw data
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['Corrects'] = df.iloc[:, correct_header[0]]
df_final['BlankTouches'] = df.iloc[:, blank_touches_header[0]]
df_final['TotalITITouches'] = df.iloc[:, iti_blank_header].sum(axis=1)
df_final['MeanCorrectTouchLatency'] = df.iloc[:, correct_touch_latency_header].mean(axis=1)
df_final['MeanBlankTouchLatency'] = df.iloc[:, blank_touch_latency_header].mean(axis=1)
df_final['MeanRewardTouchLatency'] = df.iloc[:, correct_reward_collect_header].mean(axis=1)
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error",
'acquisition() error: Either you selected the wrong type of test or headers are not the same on all files!')
print(
'acquisition() error: Either you selected the wrong type of test or headers are not the same on all files!')
return None
print('The program is almost done running... Please wait....')
return df_final
def extinction(df, script_location):
"""
This function is used to specifically get cleaned data for the Extinction test. The resulting dataframe will have
the following headers: 'Date', 'ID', 'SessionLength', 'Responses', 'Omissions', 'TotalITITouches',
'MeanResponseTouchLatency', 'MeanBlankTouchLatency', 'MeanTrayEntryLatency', 'Day'
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
:param df: The dataframe that represents the raw ABET file
:param script_location: The location where the script is located.
:return: df_final: A dataframe with all the Initial Touch data and proper headers.
"""
create_merge_file(df, script_location)
print('The program is running... Please wait....')
# sort by responses and time, remove duplicates
df = df.sort_values(by=['End Summary - Responses (1)', 'End Summary - Condition (1)'])
df = remove_duplicates(df, script_location)
raw_data_headers = df.columns.values.tolist()
# basically want to replace '-' with NaN values in this specific range
all_numeric_values = [*range(13, len(raw_data_headers), 1)]
df = convert_to_int(all_numeric_values, raw_data_headers, df)
# get the column indices for specific parameters
date_header = index_range('Schedule run date', raw_data_headers)
animal_id_header = index_range('Animal ID', raw_data_headers)
session_length_header = index_range('End Summary - Condition (1)', raw_data_headers)
responses_header = index_range('End Summary - Responses (1)', raw_data_headers)
omissions_header = index_range('End Summary - Omissions (1)', raw_data_headers)
mean_response_touch_header = index_range('Response touch latency ', raw_data_headers)
mean_blank_touch_header = index_range('Blank Touch Latency (', raw_data_headers)
mean_tray_entry_latency = index_range('Tray Entry Latency (', raw_data_headers)
iti_blank_header = index_range('End Summary - Left ITI Touches (1)', raw_data_headers) + index_range(
'End Summary - Right ITI Touches (1)', raw_data_headers) + index_range(
'End Summary - Centre ITI Touches (1)', raw_data_headers)
print('The program is still running... Please wait....')
col_names = ['Date', 'ID', 'SessionLength', 'Responses', 'Omissions', 'TotalITITouches',
'MeanResponseTouchLatency', 'MeanBlankTouchLatency', 'MeanTrayEntryLatency', 'Day']
df_final = pd.DataFrame(columns=col_names)
# extract the necessary data from raw data
try:
df_final['Date'] = df.iloc[:, date_header[0]]
df_final['ID'] = df.iloc[:, animal_id_header[0]]
df_final['SessionLength'] = df.iloc[:, session_length_header[0]]
df_final['Responses'] = df.iloc[:, responses_header[0]]
df_final['Omissions'] = df.iloc[:, omissions_header[0]]
df_final['TotalITITouches'] = df.iloc[:, iti_blank_header].sum(axis=1)
df_final['MeanResponseTouchLatency'] = df.iloc[:, mean_response_touch_header].mean(axis=1)
df_final['MeanBlankTouchLatency'] = df.iloc[:, mean_blank_touch_header].mean(axis=1)
df_final['MeanTrayEntryLatency'] = df.iloc[:, mean_tray_entry_latency].mean(axis=1)
df_final['Day'] = df_final.groupby('ID').cumcount() + 1
df_final = df_final.sort_values(by=['ID', 'Date'])
except (IndexError, KeyError, ValueError):
mb.showerror("Setup Error",
'extinction() error: Either you selected the wrong type of test or headers are not the same on all files!')
print(
'extinction() error: Either you selected the wrong type of test or headers are not the same on all files!')
return None
print('The program is almost done running... Please wait....')
return df_final
def data_setup(test_type):
"""
This functions prompts the user for the location of the raw data. It will read the raw data files and create a
dataframe. Depending on the test type, the function will clean the data and return the appropriate cleaned dataframe
which will then be made into a csv to be saved.
Running this function on the wrong test will cause an error message!
If you're running the function on the correct files, then maybe the headers for all the files are not identical.
If there are no csv files in the directory, the function will print an error message and stop and return.
:param test_type: The type of test that the animal ran, listed under schedule type
:return: A cleaned dataframe with the proper parameters based on the test type.
"""
print('Please open the directory that has all the raw data csv files')
file_path = filedialog.askdirectory(title='Open the directory with csv files')
if len(file_path) == 0:
mb.showerror("Setup Error", 'data_setup() error: The cancel button was clicked! Please try again!')
print('The cancel button was clicked! Please try again!')
return
# passes the folder directory and compiles all the csv files into ONE csv file
pattern = os.path.join(file_path, '*.csv')
files = glob.glob(pattern)
script_location = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_location)
try:
df = pd.read_csv(files[0], encoding='utf-8', delimiter=',', error_bad_lines=False)
except IndexError:
mb.showerror("Setup Error",
'data_setup() error: Either the directory is empty or does not contain any .csv files!')
print('data_setup() error: Either the directory is empty or does not contain any .csv files!')
return
# append all the other csv files onto the current dataframe
for file in files[1:len(files)]:
if not file.startswith('.'):
df_csv = pd.read_csv(file, index_col=False, encoding='utf-8', delimiter=',')
df = df.append(df_csv)
if test_type == 'Hab1':
try:
df_specific = specific_schedule_name(df, 'Mouse LD Habituation 1')
df_hab_one = habituation_one(df_specific, script_location)
return df_hab_one
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with Hab 1 in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with Hab 1 in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'Hab2':
try:
df_specific = specific_schedule_name(df, 'Mouse LD Habituation 2')
df_hab_two = habituation_two(df_specific, script_location)
return df_hab_two
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with Hab 2 in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with Hab 2 in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'IT':
try:
df_specific = specific_schedule_name(df, 'Mouse LD Initial Touch Training v2')
df_initial_touch = initial_touch(df_specific, script_location)
return df_initial_touch
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with IT in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with IT in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'MT':
try:
df_specific = specific_schedule_name(df, 'Mouse LD Must Touch Training v2')
df_must_touch = must_touch_initiate(df_specific, script_location)
return df_must_touch
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with MT in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with MT in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'MI':
try:
df_specific = specific_schedule_name(df, 'Mouse LD Must Initiate Training v2')
df_must_initiate = must_touch_initiate(df_specific, script_location)
return df_must_initiate
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with MI in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with MI in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'PI':
try:
df_specific = specific_schedule_name(df, 'Mouse LD Punish Incorrect Training v2')
df_punish_incorrect = punish_incorrect(df_specific, script_location)
return df_punish_incorrect
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with PI in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with PI in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'LD Train' or test_type == 'LD Probe':
try:
df_specific = specific_schedule_name(df, 'Mouse LD 1 choice reversal v3')
df_ld = ld(df_specific, script_location)
return df_ld
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with LD Train/LD Probe in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with LD Train/LD Probe in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'Acq':
try:
df_specific = specific_schedule_name(df, 'Mouse Extinction pt 1 v2')
df_acq = acquisition(df_specific, script_location)
return df_acq
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with Acq in setup.py!'
' Make sure you selected the right raw data folder!')
print('data_setup() error: There is an issue with Acq in setup.py!'
'Make sure you selected the right raw data folder!')
return None
if test_type == 'Ext':
try:
df_specific = specific_schedule_name(df, 'Mouse Extinction pt 2 v2')
df_ext = extinction(df_specific, script_location)
return df_ext
except (IndexError, ValueError, KeyError, AttributeError):
mb.showerror("Setup Error",
'data_setup() error: There is an issue with Ext in setup.py!'
' Make sure you selected the right raw data folder!)')
print('data_setup() error: There is an issue with Ext in setup.py!'
'Make sure you selected the right raw data folder!')
return None
def save_file_message(df):
"""
This functions prompts the user to save the cleaned dataframe as a csv file. The default save type is .csv and
cannot be changed!
:param df: The cleaned dataframe ready to be converted into csv file.
:except FileNotFoundError: This will occur when you close the save window before saving!
"""
try:
print('A window has opened asking for you to save your newly created csv file. Please look for it!')
save_file_path = filedialog.asksaveasfilename(defaultextension='.csv', title='Save the file')
df.to_csv(save_file_path, index=False)
print('A .csv file has been created. Please look at it in the saved directory!')
print('\n')
except FileNotFoundError:
mb.showerror("Setup Error",
'save_file_message() error: You closed the window before saving! Please run the program again!')
print('save_file_message() error: You closed the window before saving! Please run the program again!')
print('\n')
return
| [
"tkinter.filedialog.askdirectory",
"tkinter.messagebox.showerror",
"os.path.join",
"tkinter.filedialog.asksaveasfilename",
"os.chdir",
"setuptools.glob.glob",
"warnings.simplefilter",
"os.path.abspath"
] | [((164, 226), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (185, 226), False, 'import warnings\n'), ((44383, 44449), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'title': '"""Open the directory with csv files"""'}), "(title='Open the directory with csv files')\n", (44406, 44449), True, 'import tkinter.filedialog as filedialog\n'), ((44774, 44806), 'os.path.join', 'os.path.join', (['file_path', '"""*.csv"""'], {}), "(file_path, '*.csv')\n", (44786, 44806), False, 'import os\n'), ((44820, 44838), 'setuptools.glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (44829, 44838), False, 'from setuptools import glob\n'), ((44912, 44937), 'os.chdir', 'os.chdir', (['script_location'], {}), '(script_location)\n', (44920, 44937), False, 'import os\n'), ((44490, 44593), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: The cancel button was clicked! Please try again!"""'], {}), "('Setup Error',\n 'data_setup() error: The cancel button was clicked! Please try again!')\n", (44502, 44593), True, 'import tkinter.messagebox as mb\n'), ((44880, 44905), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (44895, 44905), False, 'import os\n'), ((52369, 52445), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'defaultextension': '""".csv"""', 'title': '"""Save the file"""'}), "(defaultextension='.csv', title='Save the file')\n", (52397, 52445), True, 'import tkinter.filedialog as filedialog\n'), ((1592, 1758), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""specific_schedule_name() error: either you have selected the wrong type of test or the headers are not the same for all files!"""'], {}), "('Setup Error',\n 'specific_schedule_name() error: either you have selected the wrong type of test or the headers are not the same for all files!'\n )\n", (1604, 1758), True, 'import tkinter.messagebox as mb\n'), ((2866, 2998), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""create_merge_file() error: You may have the "merged_file.csv" already open! Please close it!"""'], {}), '(\'Setup Error\',\n \'create_merge_file() error: You may have the "merged_file.csv" already open! Please close it!\'\n )\n', (2878, 2998), True, 'import tkinter.messagebox as mb\n'), ((3160, 3263), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""create_merge_file() error: These are not the correct raw data files!"""'], {}), "('Setup Error',\n 'create_merge_file() error: These are not the correct raw data files!')\n", (3172, 3263), True, 'import tkinter.messagebox as mb\n'), ((4345, 4490), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""create_dropped_duplicates_file() error: You may have the "merged_file.csv" already open! Please close it!"""'], {}), '(\'Setup Error\',\n \'create_dropped_duplicates_file() error: You may have the "merged_file.csv" already open! Please close it!\'\n )\n', (4357, 4490), True, 'import tkinter.messagebox as mb\n'), ((4708, 4829), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""create_dropped_duplicates_file() error: These are not the correct raw data files!"""'], {}), "('Setup Error',\n 'create_dropped_duplicates_file() error: These are not the correct raw data files!'\n )\n", (4720, 4829), True, 'import tkinter.messagebox as mb\n'), ((9717, 9866), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""habituation_one() error: either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'habituation_one() error: either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (9729, 9866), True, 'import tkinter.messagebox as mb\n'), ((14280, 14430), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""habituation_two() error: Either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'habituation_two() error: Either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (14292, 14430), True, 'import tkinter.messagebox as mb\n'), ((18375, 18523), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""initial_touch() error: Either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'initial_touch() error: Either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (18387, 18523), True, 'import tkinter.messagebox as mb\n'), ((23134, 23288), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""must_touch_initiate() error: Either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'must_touch_initiate() error: Either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (23146, 23288), True, 'import tkinter.messagebox as mb\n'), ((27923, 28074), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""punish_incorrect() error: Either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'punish_incorrect() error: Either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (27935, 28074), True, 'import tkinter.messagebox as mb\n'), ((35051, 35187), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""ld() error: Either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'ld() error: Either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (35063, 35187), True, 'import tkinter.messagebox as mb\n'), ((39082, 39227), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""acquisition() error: Either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'acquisition() error: Either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (39094, 39227), True, 'import tkinter.messagebox as mb\n'), ((43054, 43198), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""extinction() error: Either you selected the wrong type of test or headers are not the same on all files!"""'], {}), "('Setup Error',\n 'extinction() error: Either you selected the wrong type of test or headers are not the same on all files!'\n )\n", (43066, 43198), True, 'import tkinter.messagebox as mb\n'), ((45075, 45200), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: Either the directory is empty or does not contain any .csv files!"""'], {}), "('Setup Error',\n 'data_setup() error: Either the directory is empty or does not contain any .csv files!'\n )\n", (45087, 45200), True, 'import tkinter.messagebox as mb\n'), ((52645, 52778), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""save_file_message() error: You closed the window before saving! Please run the program again!"""'], {}), "('Setup Error',\n 'save_file_message() error: You closed the window before saving! Please run the program again!'\n )\n", (52657, 52778), True, 'import tkinter.messagebox as mb\n'), ((45910, 46061), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with Hab 1 in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with Hab 1 in setup.py! Make sure you selected the right raw data folder!'\n )\n", (45922, 46061), True, 'import tkinter.messagebox as mb\n'), ((46597, 46748), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with Hab 2 in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with Hab 2 in setup.py! Make sure you selected the right raw data folder!'\n )\n", (46609, 46748), True, 'import tkinter.messagebox as mb\n'), ((47304, 47452), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with IT in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with IT in setup.py! Make sure you selected the right raw data folder!'\n )\n", (47316, 47452), True, 'import tkinter.messagebox as mb\n'), ((48002, 48150), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with MT in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with MT in setup.py! Make sure you selected the right raw data folder!'\n )\n", (48014, 48150), True, 'import tkinter.messagebox as mb\n'), ((48709, 48857), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with MI in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with MI in setup.py! Make sure you selected the right raw data folder!'\n )\n", (48721, 48857), True, 'import tkinter.messagebox as mb\n'), ((49422, 49570), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with PI in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with PI in setup.py! Make sure you selected the right raw data folder!'\n )\n", (49434, 49570), True, 'import tkinter.messagebox as mb\n'), ((50118, 50281), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with LD Train/LD Probe in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with LD Train/LD Probe in setup.py! Make sure you selected the right raw data folder!'\n )\n", (50130, 50281), True, 'import tkinter.messagebox as mb\n'), ((50818, 50967), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with Acq in setup.py! Make sure you selected the right raw data folder!"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with Acq in setup.py! Make sure you selected the right raw data folder!'\n )\n", (50830, 50967), True, 'import tkinter.messagebox as mb\n'), ((51489, 51639), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Setup Error"""', '"""data_setup() error: There is an issue with Ext in setup.py! Make sure you selected the right raw data folder!)"""'], {}), "('Setup Error',\n 'data_setup() error: There is an issue with Ext in setup.py! Make sure you selected the right raw data folder!)'\n )\n", (51501, 51639), True, 'import tkinter.messagebox as mb\n')] |
import logging
import json
import os
import shutil
import socket
import stat
import time
from tempfile import mkdtemp
from behave import *
from channels import SocketChannel
from nose.tools import eq_, ok_
from utils import timeout
_LOGGER = logging.getLogger(__name__)
def _terminate(context, alias):
try:
context.runner.terminate(alias)
except KeyError:
_LOGGER.debug("%s was not started", alias)
@given(u'I have a socket path to use')
def step_impl(context):
dirname = mkdtemp()
context.add_cleanup(shutil.rmtree, dirname)
context.socket_path = os.path.join(dirname, "tr")
@step(u'I start pa2human with that path')
def step_impl(context):
context.runner.add("pa2human", command="./pa2human.py")
context.runner.start("pa2human", with_args=['--socket', context.socket_path])
context.add_cleanup(_terminate, context, "pa2human")
@given('the socket is created')
@when('I wait for socket to appear')
@then('the socket appears')
def step_impl(context):
for _ in range(100):
if os.path.exists(context.socket_path):
break
time.sleep(0.01)
else:
ok_(False, "{} not found".format(context.socket_path))
ok_(stat.S_ISSOCK(os.stat(context.socket_path).st_mode),
"{} is not a socket".format(context.socket_path))
def _connect(context):
if isinstance(context.socket_path, str):
af = socket.AF_UNIX
else:
af = socket.AF_INET
s = socket.socket(af, socket.SOCK_STREAM)
s.connect(context.socket_path)
return s
@then('the socket accepts connections')
@then('the socket accepts a connection')
@then('the socket accepts another connection')
def step_impl(context):
_connect(context)
@when('I stop pa2human')
def step_impl(context):
context.runner.terminate("pa2human")
@then('the socket doesn\'t exist')
def step_impl(context):
for _ in range(100):
if not os.path.exists(context.socket_path):
return
time.sleep(0.01)
ok_(False, "{} still exists".format(context.socket_path))
@given('the service is started')
def step_impl(context):
context.execute_steps("Given I have a socket path to use")
context.runner.add("pa2human", command="./pa2human.py",
type="socket", buffering="line")
context.runner.start("pa2human",
with_args=['--socket', context.socket_path],
socket=context.socket_path)
context.add_cleanup(_terminate, context, "pa2human")
@given(u'brain is connected')
def step_impl(context):
context.socket = context.runner.get_channel("pa2human")
@when(u'brain asks to translate "{intent}" to {recipient}')
def step_impl(context, intent, recipient):
context.socket.write(json.dumps({"intent": intent, "to": recipient}).encode()+b'\n')
@when(u'brain asks to translate "{text}" from {source}')
def step_impl(context, text, source):
context.socket.write(json.dumps({"text": text, "from": source}).encode()+b'\n')
@then(u'the result is {field} "{value}"')
def step_impl(context, field, value):
with timeout(1):
while True:
line = context.socket.read()
if line:
break
message = json.loads(line.decode())
expected = {field: value}
eq_(message, expected,
"Expected translation '{}', got '{}'".format(expected, message))
@when(u'I start pa2human with tcp socket')
def step_impl(context):
context.runner.add("pa2human", command="./pa2human.py")
context.runner.start("pa2human", with_args=['--socket', '0.0.0.0:0'])
context.add_cleanup(_terminate, context, "pa2human")
@then(u'pa2human prints that it is listening')
def step_impl(context):
chan = context.runner.get_channel("pa2human")
with timeout(1):
while True:
line = chan.read()
if line:
break
line = line.decode()
expected_head = "Pa2human listening on "
offt = len(expected_head)
eq_(line[:offt], expected_head)
host, port = line[offt:].strip().split(':')
context.socket = SocketChannel(socket.create_connection((host, int(port))))
context.socket_path = (host, int(port))
| [
"logging.getLogger",
"os.path.exists",
"utils.timeout",
"nose.tools.eq_",
"socket.socket",
"json.dumps",
"os.path.join",
"time.sleep",
"tempfile.mkdtemp",
"os.stat"
] | [((247, 274), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (264, 274), False, 'import logging\n'), ((509, 518), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (516, 518), False, 'from tempfile import mkdtemp\n'), ((593, 620), 'os.path.join', 'os.path.join', (['dirname', '"""tr"""'], {}), "(dirname, 'tr')\n", (605, 620), False, 'import os\n'), ((1463, 1500), 'socket.socket', 'socket.socket', (['af', 'socket.SOCK_STREAM'], {}), '(af, socket.SOCK_STREAM)\n', (1476, 1500), False, 'import socket\n'), ((3984, 4015), 'nose.tools.eq_', 'eq_', (['line[:offt]', 'expected_head'], {}), '(line[:offt], expected_head)\n', (3987, 4015), False, 'from nose.tools import eq_, ok_\n'), ((1047, 1082), 'os.path.exists', 'os.path.exists', (['context.socket_path'], {}), '(context.socket_path)\n', (1061, 1082), False, 'import os\n'), ((1110, 1126), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1120, 1126), False, 'import time\n'), ((1982, 1998), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1992, 1998), False, 'import time\n'), ((3096, 3106), 'utils.timeout', 'timeout', (['(1)'], {}), '(1)\n', (3103, 3106), False, 'from utils import timeout\n'), ((3774, 3784), 'utils.timeout', 'timeout', (['(1)'], {}), '(1)\n', (3781, 3784), False, 'from utils import timeout\n'), ((1918, 1953), 'os.path.exists', 'os.path.exists', (['context.socket_path'], {}), '(context.socket_path)\n', (1932, 1953), False, 'import os\n'), ((1222, 1250), 'os.stat', 'os.stat', (['context.socket_path'], {}), '(context.socket_path)\n', (1229, 1250), False, 'import os\n'), ((2761, 2808), 'json.dumps', 'json.dumps', (["{'intent': intent, 'to': recipient}"], {}), "({'intent': intent, 'to': recipient})\n", (2771, 2808), False, 'import json\n'), ((2946, 2988), 'json.dumps', 'json.dumps', (["{'text': text, 'from': source}"], {}), "({'text': text, 'from': source})\n", (2956, 2988), False, 'import json\n')] |
# -*- coding:utf-8 -*-
"""
Binance API wrapper over Pandas lib.
"""
import inspect
import os
import sys
import time as tm
import warnings
from collections import Iterable
from functools import partial
import ccxt
import numpy as np
import pandas as pd
import requests as req
from ccxt.base import errors as apierr
from decorator import decorator
from panance.utils import cnum, is_empty
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(BASE_DIR)
pd.options.display.precision = 8
warnings.filterwarnings(action='ignore', category=FutureWarning)
__version__ = '0.1.6'
__author__ = '<NAME>'
__license__ = 'UNLICENSE'
__package__ = 'panance'
__description__ = 'Python 3 Binance API wrapper built over Pandas Library'
__site__ = 'https://github.com/havocesp/panance'
__email__ = '<EMAIL>'
__requirements__ = ['ccxt', 'pandas', 'numpy', 'requests', 'decorator']
__all__ = ['Panance', '__package__', '__version__', '__author__', '__site__',
'__description__', '__email__', '__requirements__', '__license__']
_LIMITS = [5, 10, 20, 50, 100, 500, 1000]
@decorator
def checker(fn, *args, **kwargs):
"""
Param validator decorator.
:param fn: reference to caller class instance
:param args: method call args
:param kwargs: method class kwargs
:return:
"""
args = [v for v in args]
self = args.pop(0) # type: ccxt.binance
try:
sig = inspect.signature(fn)
except Exception as err:
print(str(err))
return None
param_names = [p for p in sig.parameters.keys()]
detected_params = [f for f in ['currency', 'limit', 'coin', 'symbol', 'symbols'] if f in param_names]
if len(detected_params):
def get_value(_v):
value = kwargs.get(_v)
if _v in param_names and value is None:
arg_position = param_names.index(_v)
value = args[arg_position - 1]
return value
for dp in detected_params:
param_value = get_value(dp)
if param_value is None:
continue
if 'limit' in dp and not str(param_value) in [l for l in map(str, _LIMITS)]:
str_limits = ','.join([l for l in map(str, _LIMITS)])
raise ValueError('Invalid limit: {}\nAccepted values: {}'.format(str(param_value), str_limits))
elif dp in ['currency', 'coin', 'symbol', 'symbols']:
if 'symbols' not in dp and not isinstance(dp, Iterable):
param_value = [param_value]
symbol_list = [str(s).upper() for s in param_value]
if self.symbols is None or not len(self.symbols):
self.load_markets(True)
if not all([any((s not in self.currencies, s not in self.symbols)) for s in symbol_list]):
raise ValueError(
'There is a not a valid currency or symbol in function params: {}'.format(symbol_list))
return fn(self, *args, **kwargs)
class Panance(ccxt.binance):
"""
Binance API wrapper over Pandas lib.
"""
usd = 'USDT'
def __init__(self, key=None, secret=None, config=None):
"""
Constructor.
:param str key: user account Binance api key
:param str secret: user account Binance secret key
:param dict config: ccxt.binance configuration dict
"""
if config is None or not isinstance(config, dict):
config = dict(verbose=False, enableRateLimit=True, timeout=15000)
if 'apiKey' not in config or 'secret' not in config:
if [k for k in os.environ if 'BINANCE_KEY' in k and 'BINANCE_SECRET' in 'k']:
config.update(apiKey=os.getenv('BINANCE_KEY'), secret=os.getenv('BINANCE_SECRET'))
elif not is_empty(key) and not is_empty(secret):
config.update(apiKey=key, secret=secret)
super(Panance, self).__init__(config=config)
self.load_time_difference()
self.markets = self.load_markets()
self.symbols = [k for k in self.markets if k[-5:] in str('/' + self.usd) or k[-4:] in '/BTC']
self.currencies = [s for s in {k.split('/')[0] for k in self.symbols}]
self.currencies.append(self.usd)
self.usd_symbols = [k for k in self.symbols if k[-5:] in str('/' + self.usd)]
self.usd_currencies = [k.split('/')[0] for k in self.usd_symbols]
@checker
def _get_amount(self, coin, amount):
"""
Get coin amount.
Amount should be a float / int or an string value like "max" or a percentage like "10%",
:param coin: the coin where amount will be returned.
:param amount: a float or int with price, "max" word or a percentage like "10%"
:type amount: str pr float or int
:return float: amount as countable item, this is as a float instance
"""
if amount and isinstance(amount, str):
amount = str(amount).lower()
balance = self.get_balances(coin=coin)
if amount in 'max':
percent = 1.0
elif len(amount) > 1 and amount[-1] in '%':
percent = float(amount[:-1])
percent /= 100.0
else:
raise ValueError('Invalid amount.')
if all((balance is not None, not balance.empty)):
amount = balance['total'] * percent
else:
raise ValueError('Not enough balance for {} currency.'.format(coin))
if amount and isinstance(amount, float):
amount = round(amount, 8)
else:
raise ValueError('Invalid amount.')
return amount
@checker
def _get_price(self, symbol, price):
"""
Get price for a symbol.
If price contains "ask" or "bid", it's value will be retrieve from order book ask or bid entries.
:param symbol: slash sep formatted pair (example: BTC/USDT)
:param price: a float or int with price, "ask" or "bid"
:type price: str pr float or int
:return:
"""
if price is not None:
if str(price).lower() in ['ask', 'bid']:
field = str(price).lower()
return self.get_depth(symbol, limit=5)[field][0]
elif isinstance(price, float):
return round(price, 8)
else:
raise ValueError('Invalid price')
else:
raise ValueError('Invalid price')
@checker
def _get_since(self, timeframe='15m', limit=100):
"""
Return number of seconds resulting by doing:
>>> self.parse_timeframe(timeframe) * limit
:param str timeframe: accepted values: 1m, 5m, 15m, 30m, 1h, 2h, 4h, 12h, 1d
:param int limit: limit of timeframes
:return int: number of seconds for limit and timeframe
"""
timeframe_mills = self.parse_timeframe(timeframe) * 1000.0
return int(ccxt.Exchange.milliseconds() - timeframe_mills * limit)
@checker
def get_tickers(self, symbols=None, market=None):
"""
Get all tickers (use market param to filter result by market).
:param list symbols: list of trade pairs
:param str market: accepted values: BTC, USDT
:return pd.DataFrame: ticker data filtered by market (if set)
"""
market = str(market).upper() if market and market in ['BTC', self.usd] else None
if market is None and symbols is not None:
symbols = [str(s).upper() for s in symbols if s in self.symbols]
elif market is not None and symbols is None:
symbols = [s for s in self.symbols if s.split('/')[1] in market]
else:
symbols = None
try:
if symbols:
raw = self.fetch_tickers(symbols)
else:
raw = self.fetch_tickers()
except (apierr.RequestTimeout, apierr.DDoSProtection, apierr.InvalidNonce) as err:
print(str(err))
return None
columns = [k for k in [k for k in raw.values()][0].keys()]
transposed = zip(k for k in [v.values() for v in raw.values()])
dict_data = dict(zip(columns, transposed))
del dict_data['info'], dict_data['average'], dict_data['timestamp'], dict_data['datetime']
df = pd.DataFrame(dict_data).dropna(axis=1)
df = df.round(8).set_index('symbol')
if (df.ask < 10.0).all():
df = df.round(dict(bidVolume=3, askVolume=3, baseVolume=0, percentage=2, quoteVolume=2))
return df.sort_values('quoteVolume', ascending=False)
@checker
def get_ticker(self, symbol):
"""
Get ticker for symbol.
Ticker fields:
ask 0.084969
askVolume 7.997
baseVolume 89046.924
bid 0.08493
bidVolume 2.301
change 0.000385
close 0.084969
datetime 2018-05-17T16:07:50.610Z
high 0.0854
last 0.084969
low 0.08371
open 0.084584
percentage 0.455
previousClose 0.084585
quoteVolume 7538.2366
timestamp 1526573270061
vwap 0.08465466
:param str symbol: slash sep formatted pair (example: BTC/USDT)
:return pd.Series: ticker data for symbol.
"""
try:
raw = self.fetch_ticker(symbol)
except (apierr.RequestTimeout,) as err:
print(str(err))
return None
del raw['info'], raw['symbol'], raw['average']
return pd.DataFrame({symbol: raw})[symbol]
@checker
def get_ohlc(self, symbol, timeframe='5m', limit=100):
"""
Get OHLC data for specific symbol and timeframe.
:param str symbol: a valid slash separated trade pair
:param str timeframe: accepted values: 1m, 5m, 15m, 30m, 1h, 2h, 4h, 12h, 1d
:param int limit: result rows limit
:return pd.DataFrame: OHLC data for specific symbol and timeframe.
"""
cols = ['date', 'open', 'high', 'low', 'close', 'volume']
since = self._get_since(timeframe=timeframe, limit=limit)
try:
data = self.fetch_ohlcv(symbol, timeframe=timeframe, since=since)
except (apierr.RequestTimeout, apierr.InvalidNonce) as err:
print(str(err))
tm.sleep(3)
return None
except (apierr.DDoSProtection,) as err:
print(str(err))
tm.sleep(15)
return None
seconds2datetime = partial(pd.to_datetime, unit='ms')
date = [seconds2datetime(v.pop(0)).round('1s') for v in data]
dt_index = pd.DatetimeIndex(date, name='date', tz='Europe/Madrid')
df = pd.DataFrame(data, columns=cols[1:], index=dt_index)
return df
@checker
def get_balances(self, coin=None, detailed=False):
"""
Get balance data.
:param str coin: if set only data for currency "coin" will be returned
:param detailed: if True detailed data will be added to result
:type detailed: bool
:return pd.DataFrame: balance data
"""
try:
raw = self.fetch_balance()
except (apierr.RequestTimeout, apierr.InvalidNonce, apierr.RequestTimeout) as err:
print(str(err))
return None
[raw.pop(f) for f in ['total', 'used', 'free', 'info'] if f in raw]
df = pd.DataFrame(raw).T.query('total > 0.0').T
result = pd.DataFrame()
if detailed:
symbols = ['BTC/USDT']
if all((coin is not None, str(coin).upper() in self.currencies, str(coin).upper() not in ['BTC', 'USDT'])):
symbols.append('{}/BTC'.format(coin))
else:
for c in df.keys():
if c not in ['BTC', 'USDT']:
symbols.append('{}/BTC'.format(c))
tickers = self.get_tickers(symbols=symbols)
if tickers is not None:
tickers = tickers.T
else:
print(' - [ERROR] Server return None for ticker data.')
sys.exit(1)
btc_usdt_last = tickers['BTC/USDT']['last']
for s in symbols:
c, b = s.split('/')
c_balance = df[c]
coin_total = c_balance['total']
if c in ['USDT', 'BTC']:
c_balance['total_{}'.format(c.lower())] = coin_total
if 'USDT' in c:
c_balance['total_btc'] = coin_total / btc_usdt_last
else:
c_balance['total_usdt'] = btc_usdt_last * c_balance['total_btc']
else:
ticker = tickers['{}/BTC'.format(c)]
c_balance['total_btc'] = coin_total * ticker['last']
c_balance['total_usdt'] = c_balance['total_btc'] * btc_usdt_last
result = result.append(c_balance)
else:
result = df
if all((coin is not None, str(coin).upper() in self.currencies, str(coin).upper() in result.T)):
result = result.T[str(coin).upper()]
return result.fillna(0.0)
@checker
def get_aggregated_trades(self, symbol, from_id=None, start=None, end=None, limit=500):
"""
Get aggregated trades for a symbol.
:param str symbol: trade pair
:param int from_id: get trades from specific id
:param int start: unix datetime starting date
:param int end: unix datetime ending date
:param int limit: row limits, max. 500 (default 500)
:return pd.DataFrame: aggregated trades as a Pandas DataFrame
"""
url = 'https://api.binance.com/api/v1/aggTrades?symbol={}'.format(symbol.replace('/', '').upper())
if from_id and isinstance(from_id, int):
url += '&fromId={:d}'.format(from_id)
else:
if start and isinstance(start, (int, float)):
start = int(start)
url += '&startTime={:d}'.format(start)
if end and isinstance(end, (int, float)):
end = int(end)
url += '&startTime={:d}'.format(end)
if limit != 500:
url += '&limit={:d}'.format(limit)
try:
response = req.get(url)
except (req.RequestException,) as err:
print(str(err))
return None
if response.ok:
raw = response.json()
cols = ['price', 'amount', 'first_id', 'last_id', 'timestamp']
df = pd.DataFrame([[r['p'], r['q'], r['f'], r['l'], r['T']] for r in raw], columns=cols).dropna(axis=1,
how='any')
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
df['price'] = df['price'].apply(float)
df['amount'] = df['amount'].apply(float)
df['first_id'] = df['first_id'].apply(int)
df['last_id'] = df['last_id'].apply(int)
df = df['first_id'][0]
df.set_index('timestamp', inplace=True)
grouped = pd.DataFrame()
grouped['price'] = df.price.groupby(pd.Grouper(freq='1s')).mean().apply(round, args=(8,))
grouped['amount'] = df.amount.groupby(pd.Grouper(freq='1s')).mean().apply(round, args=(3,))
grouped['total'] = (grouped['price'] * grouped['amount']).apply(round, args=(8,))
return grouped.dropna(axis=1, how='all').bfill()
else:
response.raise_for_status()
@checker
def get_trades(self, symbol, limit=100, side=None, from_id=None):
"""
Get last symbol trades.
:param str symbol: a valid trade pair.
:param int limit: result rows limit.
:param str side: accepted values: "buy", "sell", None.
:param int from_id: id where to start data retrieval.
:return pd.DataFrame: last symbol trades.
"""
params = dict()
if from_id and isinstance(from_id, int):
params = dict(fromId=from_id)
if len(params):
raw = self.fetch_trades(symbol, limit=limit, params=params)
else:
raw = self.fetch_trades(symbol, limit=limit)
result = self._parse_trades(raw, side)
return result
@staticmethod
def _parse_trades(raw, side=None):
"""
Parse trades data.
:param list raw: raw data from a trades like query to server.
:param str side: accepted values: "buy", "sell", None.
:return pd.DataFrame: parsed trades data.
"""
side = str(side).lower() if side and str(side).lower() in ['buy', 'sell'] else None
data = [{k: v for k, v in r.items() if k not in ['info', 'type']} for r in raw]
trades = pd.DataFrame(data)
ts = trades.pop('timestamp') / 1000
trades.drop(['symbol', 'datetime'], axis=1, inplace=True)
trades['datetime'] = pd.to_datetime(ts.apply(int), unit='s')
fee = trades.pop('fee')
if fee.any():
fee_currency = pd.Series(fee.apply(lambda v: v['currency']), index=trades.index.values, name='fee_currency')
trades['fee_currency'] = fee_currency
trades['fee_percent'] = trades.T.apply(lambda v: 0.05 if 'BNB' in v['fee_currency'] else 0.1).T
trades['fee_base'] = trades['fee_percent'] / 100. * trades['cost']
trades['total'] = trades.T.apply(
lambda v: v['cost'] - v['fee_base'] if v['side'] in 'sell' else v['cost'] + v['fee_base']).T
else:
trades = trades.drop(['takerOrMaker', 'order'], axis=1)
if side and side.lower() in ['buy', 'sell']:
trades = trades.query('side == "{}"'.format(side.lower()))
return trades.set_index('id')
@checker
def get_user_trades(self, symbol, limit=100, side=None):
"""
Get last user trades for a symbol.
:param str symbol: a valid trade pair
:param int limit: result rows limit
:param str side: accepted values: "buy", "sell", None
:return pd.DataFrame: last user trades for a symbol
"""
try:
raw = self.fetch_my_trades(symbol, limit=limit)
except (apierr.RequestTimeout,) as err:
print(str(err))
return None
return self._parse_trades(raw=raw, side=side) if raw else pd.DataFrame()
@checker
def get_profit(self, coin):
"""
Returns current profit for a currency and its weighted average buy cost
:param str coin: a valid currency to use at profit and cost calc
:return: current profit and weighted average buy cost as a tuple
"""
if str(coin).upper() not in self.currencies:
print('[ERROR] {} is not a valid currency.'.format(str(coin).upper()))
sys.exit(1)
else:
coin = str(coin).upper()
btc_symbol = '{}/BTC'.format(coin)
balance = self.get_balances(coin=coin, detailed=True)
if all((balance is not None, not balance.empty, balance['total_btc'] > 0.0)):
real_cost = self.get_weighted_average_cost(symbol=btc_symbol)
coin_ticker = self.get_ticker(btc_symbol)['last']
return cnum((coin_ticker * balance.total) - (real_cost * balance['total']), 8), cnum(real_cost, 8)
else:
return 0.0, 0.0
@checker
def get_weighted_average_cost(self, symbol):
"""
Get weighted average buy cost for a symbol.
:param str symbol: a valid slash separated trade pair
:return float: weighted average cost (0.0 if currency not in balance)
"""
quote, base = str(symbol).upper().split('/')
balances = self.get_balances(coin=quote, detailed=True)
if all((balances is not None, not balances.empty)):
if balances['total_btc'] >= 0.001:
last_symbol_user_trades = self.get_user_trades(symbol, side='buy')
last_symbol_user_trades.sort_values(by='datetime', ascending=False, inplace=True)
if not is_empty(last_symbol_user_trades):
amounts = list()
balance = balances['total']
for amount in last_symbol_user_trades.query('side == "buy"').amount:
if balance - amount <= 0.0:
amounts.append(balance)
break
else:
balance -= amount
amounts.append(amount)
prices = last_symbol_user_trades.price.values[:len(amounts)]
return cnum(np.average(prices, weights=amounts), 8)
else:
print(' - [ERROR] Balance returned by server is not valid.')
else:
print(' - [ERROR] Not enough balance returned by server is not valid.')
else:
print(' - [ERROR] Balance returned by server is not valid.')
return -1.0
@checker
def get_depth(self, symbol, limit=5, split=False):
"""
Get order book data for a symbol.
:param split:
:type split:
:param str symbol: a valid slash separated trade pair
:param int limit: result rows limit
:return pd.DataFrame: data frame with depth row for a symbol.
"""
try:
raw = self.fetch_order_book(symbol, limit=limit)
except (apierr.RequestTimeout,) as err:
print(str(err))
return None
data = pd.DataFrame(raw)
if split:
return data['asks'], data['bids']
else:
rows = pd.DataFrame(data['asks'] + data['bids'])
return pd.DataFrame(sum(rows.values.tolist(), []), columns=['ask', 'ask_amount', 'bid', 'bid_amount'])
@checker
def get_asks(self, symbol, limit=10):
"""
Return asks data from order book for a symbol.
:param str symbol: a valid slash separated trade pair
:param int limit: result rows limit
:return pd.Series: asks data from order book for a symbol
"""
try:
raw = self.fetch_order_book(symbol, limit=int(limit))
except (apierr.RequestTimeout,) as err:
print(str(err))
return None
return pd.DataFrame(raw['asks'], columns=['ask', 'amount'])
@checker
def get_bids(self, symbol, limit=10):
"""
Return bids data from order book for a symbol.
:param str symbol: a valid slash separated trade pair
:param int limit: result rows limit
:return pd.Series: bids data from order book for a symbol
"""
try:
raw = self.fetch_order_book(symbol, limit=limit)
except (apierr.RequestTimeout, apierr.InvalidNonce) as err:
print(str(err))
return None
return pd.DataFrame(raw['bids'], columns=['bid', 'amount'])
@checker
def market_buy(self, symbol, amount='max'):
"""
Place a market buy order.
:param str symbol: a valid trade pair symbol
:param str, float amount: quote amount to buy or sell or 'max' get the max amount from balance
:return dict: order info
"""
try:
order_data = self.create_market_buy_order(symbol, amount=amount)
except (apierr.RequestTimeout, apierr.InvalidNonce, apierr.InsufficientFunds) as err:
print(str(err))
return None
return order_data
@checker
def market_sell(self, symbol, amount='max'):
"""
Place a market sell order.
:param str symbol: a valid trade pair symbol
:param str, float amount: quote amount to buy or sell or 'max' get the max amount from balance
:return dict: order info
"""
try:
order_data = self.create_market_buy_order(symbol, amount=amount)
except (apierr.RequestTimeout, apierr.InsufficientFunds, apierr.InvalidNonce) as err:
print(str(err))
return None
return order_data
@checker
def limit_buy(self, symbol, amount='max', price='ask'):
"""
Place a limit buy order.
:param str symbol: a valid trade pair symbol
:param str, float amount: quote amount to buy or sell or 'max' get the max amount from balance
:param str, float price: valid price or None for a market order
:return dict: order info
"""
base, quote = symbol.split('/')
amount = self._get_amount(quote, amount)
price = self._get_price(symbol, price)
try:
order_data = self.create_limit_buy_order(symbol, amount, price)
except (apierr.RequestTimeout,) as err:
print(str(err))
return None
return order_data
@checker
def limit_sell(self, symbol, amount='max', price='bid'):
"""
Place a limit sell order.
:param str symbol: a valid trade pair symbol
:param str, float amount: quote amount to buy or sell or 'max' get the max amount from balance
:param str, float price: valid price or None for a market order
:return dict: order info
"""
base, quote = symbol.split('/')
amount = self._get_amount(base, amount)
price = self._get_price(symbol, price)
try:
order_data = self.create_limit_sell_order(symbol, amount, price)
except (apierr.RequestTimeout,) as err:
print(str(err))
return None
return order_data
@checker
def download_trade_history(self, symbol, limit=500, start=None, end=None, from_id=None):
"""
FIXIT not full implemented
:param symbol:
:param limit:
:param start:
:param end:
:param from_id:
"""
if from_id:
from_id = from_id
start = int(start) if start else int(tm.time() * 1000.0)
end = int(end) if end else int(tm.time() * 1000.0)
trades = self.get_aggregated_trades(symbol, from_id, start, end, limit) # type: pd.DataFrame
if not trades.empty:
filename = '{}_trades.csv'.format(symbol.lower())
df = pd.read_csv(filename)
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
d = pd.concat([df, trades]).drop_duplicates()
if not d.empty:
d.to_csv(filename, index_label='date', mode='w', header=True)
get_orderbook = get_depth
get_book = get_depth
get_obook = get_depth
if __name__ == '__main__':
api = Panance()
| [
"pandas.read_csv",
"pandas.Grouper",
"inspect.signature",
"time.sleep",
"sys.exit",
"sys.path.append",
"pandas.to_datetime",
"pandas.DataFrame",
"panance.utils.is_empty",
"numpy.average",
"pandas.DatetimeIndex",
"requests.get",
"os.path.dirname",
"panance.utils.cnum",
"time.time",
"warnings.filterwarnings",
"os.getenv",
"functools.partial",
"pandas.concat",
"ccxt.Exchange.milliseconds"
] | [((461, 486), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (476, 486), False, 'import sys\n'), ((521, 585), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (544, 585), False, 'import warnings\n'), ((433, 458), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (448, 458), False, 'import os\n'), ((1427, 1448), 'inspect.signature', 'inspect.signature', (['fn'], {}), '(fn)\n', (1444, 1448), False, 'import inspect\n'), ((11102, 11136), 'functools.partial', 'partial', (['pd.to_datetime'], {'unit': '"""ms"""'}), "(pd.to_datetime, unit='ms')\n", (11109, 11136), False, 'from functools import partial\n'), ((11226, 11281), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['date'], {'name': '"""date"""', 'tz': '"""Europe/Madrid"""'}), "(date, name='date', tz='Europe/Madrid')\n", (11242, 11281), True, 'import pandas as pd\n'), ((11296, 11348), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols[1:]', 'index': 'dt_index'}), '(data, columns=cols[1:], index=dt_index)\n', (11308, 11348), True, 'import pandas as pd\n'), ((12057, 12071), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12069, 12071), True, 'import pandas as pd\n'), ((17454, 17472), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (17466, 17472), True, 'import pandas as pd\n'), ((22277, 22294), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (22289, 22294), True, 'import pandas as pd\n'), ((23053, 23105), 'pandas.DataFrame', 'pd.DataFrame', (["raw['asks']"], {'columns': "['ask', 'amount']"}), "(raw['asks'], columns=['ask', 'amount'])\n", (23065, 23105), True, 'import pandas as pd\n'), ((23625, 23677), 'pandas.DataFrame', 'pd.DataFrame', (["raw['bids']"], {'columns': "['bid', 'amount']"}), "(raw['bids'], columns=['bid', 'amount'])\n", (23637, 23677), True, 'import pandas as pd\n'), ((10124, 10151), 'pandas.DataFrame', 'pd.DataFrame', (['{symbol: raw}'], {}), '({symbol: raw})\n', (10136, 10151), True, 'import pandas as pd\n'), ((14898, 14910), 'requests.get', 'req.get', (['url'], {}), '(url)\n', (14905, 14910), True, 'import requests as req\n'), ((15409, 15451), 'pandas.to_datetime', 'pd.to_datetime', (["df['timestamp']"], {'unit': '"""ms"""'}), "(df['timestamp'], unit='ms')\n", (15423, 15451), True, 'import pandas as pd\n'), ((15773, 15787), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15785, 15787), True, 'import pandas as pd\n'), ((19066, 19080), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19078, 19080), True, 'import pandas as pd\n'), ((19526, 19537), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (19534, 19537), False, 'import sys\n'), ((22393, 22434), 'pandas.DataFrame', 'pd.DataFrame', (["(data['asks'] + data['bids'])"], {}), "(data['asks'] + data['bids'])\n", (22405, 22434), True, 'import pandas as pd\n'), ((26982, 27003), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (26993, 27003), True, 'import pandas as pd\n'), ((27029, 27055), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (27043, 27055), True, 'import pandas as pd\n'), ((6990, 7018), 'ccxt.Exchange.milliseconds', 'ccxt.Exchange.milliseconds', ([], {}), '()\n', (7016, 7018), False, 'import ccxt\n'), ((8368, 8391), 'pandas.DataFrame', 'pd.DataFrame', (['dict_data'], {}), '(dict_data)\n', (8380, 8391), True, 'import pandas as pd\n'), ((10913, 10924), 'time.sleep', 'tm.sleep', (['(3)'], {}), '(3)\n', (10921, 10924), True, 'import time as tm\n'), ((11037, 11049), 'time.sleep', 'tm.sleep', (['(15)'], {}), '(15)\n', (11045, 11049), True, 'import time as tm\n'), ((12701, 12712), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12709, 12712), False, 'import sys\n'), ((19937, 20004), 'panance.utils.cnum', 'cnum', (["(coin_ticker * balance.total - real_cost * balance['total'])", '(8)'], {}), "(coin_ticker * balance.total - real_cost * balance['total'], 8)\n", (19941, 20004), False, 'from panance.utils import cnum, is_empty\n'), ((20010, 20028), 'panance.utils.cnum', 'cnum', (['real_cost', '(8)'], {}), '(real_cost, 8)\n', (20014, 20028), False, 'from panance.utils import cnum, is_empty\n'), ((15161, 15248), 'pandas.DataFrame', 'pd.DataFrame', (["[[r['p'], r['q'], r['f'], r['l'], r['T']] for r in raw]"], {'columns': 'cols'}), "([[r['p'], r['q'], r['f'], r['l'], r['T']] for r in raw],\n columns=cols)\n", (15173, 15248), True, 'import pandas as pd\n'), ((20785, 20818), 'panance.utils.is_empty', 'is_empty', (['last_symbol_user_trades'], {}), '(last_symbol_user_trades)\n', (20793, 20818), False, 'from panance.utils import cnum, is_empty\n'), ((26692, 26701), 'time.time', 'tm.time', ([], {}), '()\n', (26699, 26701), True, 'import time as tm\n'), ((26751, 26760), 'time.time', 'tm.time', ([], {}), '()\n', (26758, 26760), True, 'import time as tm\n'), ((27110, 27133), 'pandas.concat', 'pd.concat', (['[df, trades]'], {}), '([df, trades])\n', (27119, 27133), True, 'import pandas as pd\n'), ((3736, 3760), 'os.getenv', 'os.getenv', (['"""BINANCE_KEY"""'], {}), "('BINANCE_KEY')\n", (3745, 3760), False, 'import os\n'), ((3769, 3796), 'os.getenv', 'os.getenv', (['"""BINANCE_SECRET"""'], {}), "('BINANCE_SECRET')\n", (3778, 3796), False, 'import os\n'), ((3819, 3832), 'panance.utils.is_empty', 'is_empty', (['key'], {}), '(key)\n', (3827, 3832), False, 'from panance.utils import cnum, is_empty\n'), ((3841, 3857), 'panance.utils.is_empty', 'is_empty', (['secret'], {}), '(secret)\n', (3849, 3857), False, 'from panance.utils import cnum, is_empty\n'), ((11996, 12013), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (12008, 12013), True, 'import pandas as pd\n'), ((21375, 21410), 'numpy.average', 'np.average', (['prices'], {'weights': 'amounts'}), '(prices, weights=amounts)\n', (21385, 21410), True, 'import numpy as np\n'), ((15836, 15857), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""1s"""'}), "(freq='1s')\n", (15846, 15857), True, 'import pandas as pd\n'), ((15940, 15961), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""1s"""'}), "(freq='1s')\n", (15950, 15961), True, 'import pandas as pd\n')] |
from bs4 import BeautifulSoup
import requests
import json
#url = raw_input("")
class RCGItem(object):
def __init__(self, *args, **kwargs):
self.attributes = ['title', 'description', 'price', 'url', 'status', 'is_new']
#loop through attributes and set them based on what's passed in
for attr in self.attributes:
setattr(self, attr, kwargs.get(attr))
def to_array(self):
results = []
for attr in self.attributes:
results.append(getattr(self, attr))
return results
def is_sold(self):
return self.status.lower() == 'sold'
def is_wanted(self):
return self.status.lower() == 'wanted'
def is_for_sale(self):
return self.status.lower() == 'for sale'
class ForumRunner(object):
def __init__(self, *args, **kwargs):
"""
Saves the strings
"""
self.base_url = "http://www.rcgroups.com/"
self.search_strings = kwargs.get('search_strings')
self.url = kwargs.get('url')
def run(self):
"""
Runs the script, getting the information out of the forum
"""
#get the page, get the text from the page, and then make a beautiful soup object, which will help parse this whole beast
r = requests.get(self.base_url + self.url)
data = r.text
soup = BeautifulSoup(data)
#get the main table and then get every row within it
body = soup.find('table', {'class': 'tborder-rcgplus'})
rows = body.find_all('tr')
#this will contain all results for the page that we hit
items = []
#loop through every row that was found in the search using beutiful soup
for row in rows:
#initiate some empty variables that will be saved back to the RCGItem
price = 0
title = ''
description = ''
url = ''
status = ''
#find LINK information, including the URL and text
for link in row.find_all('a', {"class": "fsw-title"}):
title = link.text.strip()
url = self.base_url + link.attrs['href'].strip()
description = link.attrs.get('data-tip').strip()
#find FOR SALE STATUS
for link in row.find_all('div', {"class": "fsw-category"}):
status = link.find('span').text.strip()
#find price
for link in row.find_all('div', {"class": "fsw-price-text"}):
price = link.text.strip()
result = RCGItem(
title = title,
url = url,
description = description,
price = price,
status = status,
is_new = False
)
#the first element is often blank
if result.title:
items.append(result)
self.results = items
return items
def search(self, search_strings):
"""
given an array of strings to search for, search them against the things found on the front page of RCClassifieds
"""
search_results = []
#loop through the strings, because that will give us the lowest number of loops
for sstring in search_strings:
#loop through items in self.results
for item in self.results:
if item.is_for_sale():
#check to see if the search string is in this particular item
if sstring.lower() in item.title.lower():
#if we find a result, stick it on to the total results
search_results.append(item)
#since we may have found duplicates, pull the duplicates out
search_results = list(set(search_results))
#send the search results back to the home page
for result in search_results:
is_new = self.save_search(result.url)
result.is_new = is_new
return search_results
def save_search(self, url):
"""
This records which things have already been searched for. It saves all new things over so that notifications aren't sent to you over and over agian.
"""
all_urls = []
urls = None
is_new = False
#open the local json file containing the list of URLS that have already been located
with open('urls.json') as data_file:
urls = []
#try to load in json list
try:
urls = json.load(data_file)
#this essentially assumes that the json list is blank/empty and just overrides it all with a default
except ValueError:
urls = {'urls': []}
#if there are urls to grab, grab then
json_urls = urls['urls']
#check to see if the URL we are working with is already added.
#This will be the return value for the function
is_new = url not in json_urls
#do this outside of the context of the file being open
if is_new:
#plop the new url down on the end of the array
#take the new array, remove any duplicates anywhere
json_urls.append(url)
urls['urls'] = list(set(json_urls))
#write the whole thing back into the file
with open('urls.json', 'w') as outfile:
json.dump(urls, outfile)
#return whether or not we added a new URL to the list
return is_new
| [
"bs4.BeautifulSoup",
"json.load",
"json.dump",
"requests.get"
] | [((1318, 1356), 'requests.get', 'requests.get', (['(self.base_url + self.url)'], {}), '(self.base_url + self.url)\n', (1330, 1356), False, 'import requests\n'), ((1394, 1413), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data'], {}), '(data)\n', (1407, 1413), False, 'from bs4 import BeautifulSoup\n'), ((4755, 4775), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (4764, 4775), False, 'import json\n'), ((5648, 5672), 'json.dump', 'json.dump', (['urls', 'outfile'], {}), '(urls, outfile)\n', (5657, 5672), False, 'import json\n')] |
#
# This file is part of pysnmp-apps software.
#
# Copyright (c) 2005-2017, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp_apps.cli import base
from pysnmp import error
def getUsage():
return """\
SNMP message processing options:
-v VERSION SNMP version (1|2c|3)
"""
# Scanner
class MPScannerMixIn:
def t_version(self, s):
r' -v '
self.rv.append(base.ConfigToken('version'))
# Parser
class MPParserMixIn:
def p_mpSpec(self, args):
'''
Option ::= SnmpVersionId
SnmpVersionId ::= version string
SnmpVersionId ::= version whitespace string
'''
# Generator
class __MPGenerator(base.GeneratorTemplate):
_versionIdMap = {
'1': 0,
'2': 1,
'2c': 1,
'3': 3
}
def n_SnmpVersionId(self, cbCtx, node):
snmpEngine, ctx = cbCtx
if len(node) > 2:
versionId = node[2].attr
else:
versionId = node[1].attr
if versionId in self._versionIdMap:
ctx['versionId'] = self._versionIdMap[versionId]
else:
raise error.PySnmpError('Bad version value %s' % versionId)
def generator(cbCtx, ast):
snmpEngine, ctx = cbCtx
__MPGenerator().preorder((snmpEngine, ctx), ast)
# Commit defaults
if 'versionId' not in ctx:
ctx['versionId'] = 3
| [
"pysnmp_apps.cli.base.ConfigToken",
"pysnmp.error.PySnmpError"
] | [((424, 451), 'pysnmp_apps.cli.base.ConfigToken', 'base.ConfigToken', (['"""version"""'], {}), "('version')\n", (440, 451), False, 'from pysnmp_apps.cli import base\n'), ((1149, 1202), 'pysnmp.error.PySnmpError', 'error.PySnmpError', (["('Bad version value %s' % versionId)"], {}), "('Bad version value %s' % versionId)\n", (1166, 1202), False, 'from pysnmp import error\n')] |
import logging
import time
import typing
from http import HTTPStatus
from intezer_sdk import consts
from intezer_sdk import errors
from intezer_sdk.api import IntezerApi
from intezer_sdk.api import get_global_api
from intezer_sdk.consts import CodeItemType
logger = logging.getLogger(__name__)
class Analysis(object):
def __init__(self,
file_path: str = None,
file_hash: str = None,
file_stream: typing.BinaryIO = None,
disable_dynamic_unpacking: bool = None,
disable_static_unpacking: bool = None,
api: IntezerApi = None,
file_name: str = None,
code_item_type: str = None) -> None:
if [file_path, file_hash, file_stream].count(None) != 2:
raise ValueError('Choose between file hash, file stream or file path analysis')
if file_hash and code_item_type:
logger.warning('Analyze by hash ignores code item type')
if code_item_type and code_item_type not in [c.value for c in CodeItemType]:
raise ValueError('Invalid code item type, possible code item types are: file, memory module')
self.status = None
self.analysis_id = None
self._file_hash = file_hash
self._disable_dynamic_unpacking = disable_dynamic_unpacking
self._disable_static_unpacking = disable_static_unpacking
self._file_path = file_path
self._file_stream = file_stream
self._file_name = file_name
self._code_item_type = code_item_type
self._report = None
self._api = api or get_global_api()
def send(self, wait: bool = False) -> None:
if self.analysis_id:
raise errors.AnalysisHasAlreadyBeenSent()
if self._file_hash:
self.analysis_id = self._api.analyze_by_hash(self._file_hash,
self._disable_dynamic_unpacking,
self._disable_static_unpacking)
else:
self.analysis_id = self._api.analyze_by_file(self._file_path,
self._file_stream,
disable_dynamic_unpacking=self._disable_dynamic_unpacking,
disable_static_unpacking=self._disable_static_unpacking,
file_name=self._file_name,
code_item_type=self._code_item_type)
self.status = consts.AnalysisStatusCode.CREATED
if wait:
self.wait_for_completion()
def wait_for_completion(self):
if self._is_analysis_running():
status_code = self.check_status()
while status_code != consts.AnalysisStatusCode.FINISH:
time.sleep(consts.CHECK_STATUS_INTERVAL)
status_code = self.check_status()
def check_status(self):
if not self._is_analysis_running():
raise errors.IntezerError('Analysis dont running')
response = self._api.get_analysis_response(self.analysis_id)
if response.status_code == HTTPStatus.OK:
self._report = response.json()['result']
self.status = consts.AnalysisStatusCode.FINISH
elif response.status_code == HTTPStatus.ACCEPTED:
self.status = consts.AnalysisStatusCode.IN_PROGRESS
else:
raise errors.IntezerError('Error in response status code:{}'.format(response.status_code))
return self.status
def result(self):
if self._is_analysis_running():
raise errors.AnalysisIsStillRunning()
if not self._report:
raise errors.ReportDoesNotExistError()
return self._report
def set_report(self, report: dict):
if not report:
raise ValueError('Report can not be None')
self.analysis_id = report['analysis_id']
self._report = report
self.status = consts.AnalysisStatusCode.FINISH
def _is_analysis_running(self):
return self.status in (consts.AnalysisStatusCode.CREATED, consts.AnalysisStatusCode.IN_PROGRESS)
def get_latest_analysis(file_hash: str, api: IntezerApi = None) -> typing.Optional[Analysis]:
api = api or get_global_api()
analysis_report = api.get_latest_analysis(file_hash)
if not analysis_report:
return None
analysis = Analysis(file_hash=file_hash, api=api)
analysis.set_report(analysis_report)
return analysis
| [
"logging.getLogger",
"intezer_sdk.api.get_global_api",
"intezer_sdk.errors.ReportDoesNotExistError",
"intezer_sdk.errors.IntezerError",
"time.sleep",
"intezer_sdk.errors.AnalysisHasAlreadyBeenSent",
"intezer_sdk.errors.AnalysisIsStillRunning"
] | [((268, 295), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (285, 295), False, 'import logging\n'), ((4410, 4426), 'intezer_sdk.api.get_global_api', 'get_global_api', ([], {}), '()\n', (4424, 4426), False, 'from intezer_sdk.api import get_global_api\n'), ((1630, 1646), 'intezer_sdk.api.get_global_api', 'get_global_api', ([], {}), '()\n', (1644, 1646), False, 'from intezer_sdk.api import get_global_api\n'), ((1743, 1778), 'intezer_sdk.errors.AnalysisHasAlreadyBeenSent', 'errors.AnalysisHasAlreadyBeenSent', ([], {}), '()\n', (1776, 1778), False, 'from intezer_sdk import errors\n'), ((3135, 3179), 'intezer_sdk.errors.IntezerError', 'errors.IntezerError', (['"""Analysis dont running"""'], {}), "('Analysis dont running')\n", (3154, 3179), False, 'from intezer_sdk import errors\n'), ((3760, 3791), 'intezer_sdk.errors.AnalysisIsStillRunning', 'errors.AnalysisIsStillRunning', ([], {}), '()\n', (3789, 3791), False, 'from intezer_sdk import errors\n'), ((3839, 3871), 'intezer_sdk.errors.ReportDoesNotExistError', 'errors.ReportDoesNotExistError', ([], {}), '()\n', (3869, 3871), False, 'from intezer_sdk import errors\n'), ((2953, 2993), 'time.sleep', 'time.sleep', (['consts.CHECK_STATUS_INTERVAL'], {}), '(consts.CHECK_STATUS_INTERVAL)\n', (2963, 2993), False, 'import time\n')] |
import boto3
import json
import logging
import os
import socket
import sys
try:
import agent
import common
import fargate
from custom_logger import JsonFormatter
from plans import FargateRisk2Plan
from plans import SSMRisk2Plan
from risk import Finding
from notify import PublishEvent
from notify import PublishRemediation
from pcap import Analyze
except ImportError:
from lambda_handler import agent
from lambda_handler import common
from lambda_handler import fargate
from lambda_handler.custom_logger import JsonFormatter
from lambda_handler.plans import FargateRisk2Plan
from lambda_handler.plans import SSMRisk2Plan
from lambda_handler.risk import Finding
from lambda_handler.notify import PublishEvent
from lambda_handler.notify import PublishRemediation
from lambda_handler.pcap import Analyze
def setup_logging():
logger = logging.getLogger()
for h in logger.handlers:
logger.removeHandler(h)
h = logging.StreamHandler(sys.stdout)
h.setFormatter(JsonFormatter(extra={"hostname": socket.gethostname()}))
logger.addHandler(h)
logger.setLevel(logging.DEBUG)
return logger
def protect(event, context):
logger = setup_logging()
success = False
logger.info("Running the protect phase.")
region_name = event["detail"]["region"]
boto_session = boto3.session.Session(region_name=region_name)
ecs_client = boto_session.client("ecs")
tags = event["detail"]["resource"]["instanceDetails"].get("tags")
if tags:
# Interrogate ECS Api for additional targeting information.
clusters = fargate.get_all_clusters(ecs_client)
task_definitions = fargate.get_task_definitions_for_tag(ecs_client, tags)
running_tasks = fargate.get_running_tasks_for_definitions(
ecs_client, clusters, task_definitions
)
# Interrogate SSM for the instances associated with this tag.
ssm_instance_ids = agent.get_instance_ids_for_tags(boto_session, tags)
# Enrich the guardDuty event with information about the running tasks for the tags
event["detail"]["resource"]["fargateTasks"] = running_tasks
event["detail"]["resource"]["ssmInstanceIds"] = ssm_instance_ids
# Need to deserialize and reserialze to convert pydatetime objects.
event = json.loads(json.dumps(event, default=common.default_serializer))
# Increase the number of running instances in the fargate service associated
# Work on tasks that were only part of the incident at the time of reporting.
# Can't isolate the eni-xxxx involved but we can remove from DNS!
fargate_responder = FargateRisk2Plan(
event["detail"]["remediation"]["risk"], boto_session
)
success = fargate_responder.run(event)
event["detail"]["remediation"]["success"] = success
else:
raise ValueError("No tags were present in the event.")
return event
def detect(event, context):
logger = setup_logging()
logger.info("Running the detect phase.")
event["detail"]["remediation"] = {}
# Map the risk in this stage using our risk mapper.
risk_level = Finding(event).risk_level()
event["detail"]["remediation"]["risk"] = risk_level
return event
def low_respond(event, context):
logger = setup_logging()
logger.info("Running low response.")
event["detail"]["remediation"]["evidence"] = {}
event["detail"]["remediation"]["evidence"]["artifact_count"] = 0
return event
def medium_respond(event, context):
logger = setup_logging()
logger.info("Running medium response.")
event["detail"]["remediation"]["evidence"] = {}
event["detail"]["remediation"]["evidence"]["artifact_count"] = 0
return event
def high_respond(event, context):
logger = setup_logging()
logger.info("Running medium response.")
event["detail"]["remediation"]["evidence"] = {}
event["detail"]["remediation"]["evidence"]["artifact_count"] = 0
return event
def maximum_respond(event, context):
logger = setup_logging()
logger.info("Running maximum response.")
event["detail"]["remediation"]["evidence"] = {}
event["detail"]["remediation"]["evidence"]["artifact_count"] = 0
event["detail"]["remediation"]["success"] = True
# Use the guardDuty ID as a means of containing all evidence around the incident.
evidence_info = dict(
bucket=os.getenv("EVIDENCE_BUCKET", "public.demo.reinvent2019"),
case_folder=event["detail"]["id"],
)
# Take our risk levels and map them to discrete actions in code.
ssm_responder = SSMRisk2Plan(
risk_level=event["detail"]["remediation"]["risk"],
boto_session=boto3.session.Session(),
evidence_info=evidence_info,
credentials=common.get_session_token(),
)
# Execute our pre-defined plans as ssm_runcommand and wait.
evidence = ssm_responder.run(
instance_ids=event["detail"]["resource"]["ssmInstanceIds"]
)
# Enrich our state with the number of evidence items gathered.
event["detail"]["remediation"]["evidence"]["artifact_count"] = len(evidence)
event["detail"]["remediation"]["evidence"]["objects"] = evidence
return event
def recover(event, context):
logger = setup_logging()
logger.info("Running the recover phase. ")
# Stop all the containers we have been working on.
tasks = fargate.event_to_task_arn(event)
boto_session = boto3.session.Session()
# Stop the tasks now that we have extracted the evidence.
# In low, medium risks scenarios we migh leave these running for further investigation.
for task_dict in tasks:
fargate.stop_task(boto_session, task_dict)
return event
def process_evidence(event, context):
logger = setup_logging()
logger.info("Processing the evidence.")
# Check to see if we have evidence to process.
if event["detail"]["remediation"]["evidence"]["objects"] != []:
s3_bucket = os.getenv("EVIDENCE_BUCKET", "public.demo.reinvent2019")
logger.info(f"Processing evidence from: {s3_bucket}")
# Process all of our packet captures to VPC-Flowlike json and parquet.
for object_key in event["detail"]["remediation"]["evidence"]["objects"]:
try:
logger.info(f"Attempting to process: {object_key}")
full_path = f"s3://{s3_bucket}/{object_key}"
logger.info(f"Full path to file: {full_path}")
a = Analyze(full_path)
a.get_geoip_database()
logger.info(f"Geolite database retrieved.")
a.load_pcap()
extraction = a.get_extraction()
result = a.extraction_to_json(extraction)
a.json_to_parquet(result)
a.upload_all_processed()
logger.info("Uploading processed.")
except Exception as e:
logger.error(f"Could not reason about: {object_key} due to: {e}.")
return event
def notify(event, context):
logger = setup_logging()
logger.info("Sending a notification to slack.")
event = PublishEvent(event, context)
return event
def notify_complete(event, context):
logger = setup_logging()
logger.info("Sending a notification to slack.")
event = PublishRemediation(event, context)
return event
| [
"logging.getLogger",
"lambda_handler.fargate.get_task_definitions_for_tag",
"lambda_handler.fargate.stop_task",
"logging.StreamHandler",
"boto3.session.Session",
"os.getenv",
"lambda_handler.agent.get_instance_ids_for_tags",
"lambda_handler.fargate.event_to_task_arn",
"lambda_handler.risk.Finding",
"json.dumps",
"lambda_handler.fargate.get_running_tasks_for_definitions",
"lambda_handler.common.get_session_token",
"lambda_handler.pcap.Analyze",
"lambda_handler.notify.PublishRemediation",
"lambda_handler.fargate.get_all_clusters",
"lambda_handler.plans.FargateRisk2Plan",
"lambda_handler.notify.PublishEvent",
"socket.gethostname"
] | [((920, 939), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (937, 939), False, 'import logging\n'), ((1010, 1043), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1031, 1043), False, 'import logging\n'), ((1387, 1433), 'boto3.session.Session', 'boto3.session.Session', ([], {'region_name': 'region_name'}), '(region_name=region_name)\n', (1408, 1433), False, 'import boto3\n'), ((5462, 5494), 'lambda_handler.fargate.event_to_task_arn', 'fargate.event_to_task_arn', (['event'], {}), '(event)\n', (5487, 5494), False, 'from lambda_handler import fargate\n'), ((5514, 5537), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (5535, 5537), False, 'import boto3\n'), ((7198, 7226), 'lambda_handler.notify.PublishEvent', 'PublishEvent', (['event', 'context'], {}), '(event, context)\n', (7210, 7226), False, 'from lambda_handler.notify import PublishEvent\n'), ((7376, 7410), 'lambda_handler.notify.PublishRemediation', 'PublishRemediation', (['event', 'context'], {}), '(event, context)\n', (7394, 7410), False, 'from lambda_handler.notify import PublishRemediation\n'), ((1650, 1686), 'lambda_handler.fargate.get_all_clusters', 'fargate.get_all_clusters', (['ecs_client'], {}), '(ecs_client)\n', (1674, 1686), False, 'from lambda_handler import fargate\n'), ((1714, 1768), 'lambda_handler.fargate.get_task_definitions_for_tag', 'fargate.get_task_definitions_for_tag', (['ecs_client', 'tags'], {}), '(ecs_client, tags)\n', (1750, 1768), False, 'from lambda_handler import fargate\n'), ((1793, 1878), 'lambda_handler.fargate.get_running_tasks_for_definitions', 'fargate.get_running_tasks_for_definitions', (['ecs_client', 'clusters', 'task_definitions'], {}), '(ecs_client, clusters,\n task_definitions)\n', (1834, 1878), False, 'from lambda_handler import fargate\n'), ((1995, 2046), 'lambda_handler.agent.get_instance_ids_for_tags', 'agent.get_instance_ids_for_tags', (['boto_session', 'tags'], {}), '(boto_session, tags)\n', (2026, 2046), False, 'from lambda_handler import agent\n'), ((2712, 2782), 'lambda_handler.plans.FargateRisk2Plan', 'FargateRisk2Plan', (["event['detail']['remediation']['risk']", 'boto_session'], {}), "(event['detail']['remediation']['risk'], boto_session)\n", (2728, 2782), False, 'from lambda_handler.plans import FargateRisk2Plan\n'), ((5729, 5771), 'lambda_handler.fargate.stop_task', 'fargate.stop_task', (['boto_session', 'task_dict'], {}), '(boto_session, task_dict)\n', (5746, 5771), False, 'from lambda_handler import fargate\n'), ((6042, 6098), 'os.getenv', 'os.getenv', (['"""EVIDENCE_BUCKET"""', '"""public.demo.reinvent2019"""'], {}), "('EVIDENCE_BUCKET', 'public.demo.reinvent2019')\n", (6051, 6098), False, 'import os\n'), ((2384, 2436), 'json.dumps', 'json.dumps', (['event'], {'default': 'common.default_serializer'}), '(event, default=common.default_serializer)\n', (2394, 2436), False, 'import json\n'), ((3219, 3233), 'lambda_handler.risk.Finding', 'Finding', (['event'], {}), '(event)\n', (3226, 3233), False, 'from lambda_handler.risk import Finding\n'), ((4474, 4530), 'os.getenv', 'os.getenv', (['"""EVIDENCE_BUCKET"""', '"""public.demo.reinvent2019"""'], {}), "('EVIDENCE_BUCKET', 'public.demo.reinvent2019')\n", (4483, 4530), False, 'import os\n'), ((4765, 4788), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (4786, 4788), False, 'import boto3\n'), ((4847, 4873), 'lambda_handler.common.get_session_token', 'common.get_session_token', ([], {}), '()\n', (4871, 4873), False, 'from lambda_handler import common\n'), ((6551, 6569), 'lambda_handler.pcap.Analyze', 'Analyze', (['full_path'], {}), '(full_path)\n', (6558, 6569), False, 'from lambda_handler.pcap import Analyze\n'), ((1096, 1116), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1114, 1116), False, 'import socket\n')] |
# encoding=utf-8
import torch
import torch.nn.functional as F
from torch import nn
from fairseq.models.nat.old.latent.predictor_module import _Predictor
from fairseq.modules import MultiheadAttention
class MarginPredict(_Predictor):
def __init__(self, args, pad, **unused):
super().__init__(args, pad)
self.predictor = LogisticModel(args, activation=nn.Sigmoid, dropout=0.2, **unused)
self.loss_class = nn.MarginRankingLoss(reduction="none")
def loss_func(self, outputs, targets, masks):
# outputs 是对应的分数,得分越高表示不需要mask 和targets相反
# target: 1表示需要mask,0表示和reference一致,1表示和reference不一致
# mask False=reference,True的表示本轮预测的。只关心mask=True的
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
return loss
low_index = (targets & masks).nonzero(as_tuple=True) # 提取和reference不一致的,对应的mask肯定=False
low_feature = outputs[low_index]
# 现在1表示不需要mask, 理应对应的得分要高
targets = (~targets) & (masks) # 提取在本轮预测的且和reference一致的。
high_index = targets.nonzero(as_tuple=True)
high_feature = outputs[high_index]
# 随机选择一些pair margin_loss: (x1,x2,y) y=1表示x1的结果大于x2
pairs_x1 = []
pairs_x2 = []
# 整合low score
batch, _ = targets.shape
low_feature_list = [[] for _ in range(batch)]
for row, feature in zip(low_index[0].cpu().tolist(), low_feature):
low_feature_list[row].append(feature)
for row, feature in zip(high_index[0].cpu().tolist(), high_feature):
low = low_feature_list[row]
if len(low) == 0:
continue
for l in low:
pairs_x1.append(feature)
pairs_x2.append(l)
pairs_x1 = torch.stack(pairs_x1, dim=-1)
pairs_x2 = torch.stack(pairs_x2, dim=-1)
b, = pairs_x1.shape
y = pairs_x1.new_full((b, 1), fill_value=1)
losses = self.loss_class(pairs_x1, pairs_x2, y.squeeze(-1))
nll_loss = mean_ds(losses)
loss = nll_loss
return loss
class LinearPredict(_Predictor):
def __init__(self, args, pad=0):
super().__init__(super().__init__(args, pad))
self.predictor = nn.Linear(args.encoder_embed_dim, 2)
def loss_func(self, outputs, targets):
logits = F.log_softmax(outputs, dim=-1)
return F.nll_loss(logits, targets.to(logits.device).long(), reduction='none')
def forward(self, decoder_out=None, decoder_input=None, normalize=False, encoder_out=None, **unused):
predict_input = decoder_out
predict = self.predictor(predict_input)
return F.log_softmax(predict, -1) if normalize else predict
class MixAttentionPredict(_Predictor):
def __init__(self, args, pad=0, **unused):
super().__init__(args, pad)
self.mix_attention = MultiheadAttention(
args.encoder_embed_dim,
args.decoder_attention_heads,
kdim=args.encoder_embed_dim,
vdim=args.encoder_embed_dim,
dropout=args.attention_dropout,
encoder_decoder_attention=True
)
self.mix_attention.reset_parameters()
self.predictor = nn.Linear(args.encoder_embed_dim * 2, 2)
def loss_func(self, outputs, targets):
logits = F.log_softmax(outputs, dim=-1)
return F.nll_loss(logits, targets.to(logits.device).long(), reduction='none')
def forward(self, decoder_out=None, decoder_input=None, normalize=False, encoder_out=None, **unused):
# mask
encoder_padding_mask = encoder_out.encoder_padding_mask
target_mask = decoder_input.eq(self.padding_idx)
key_padding_mask = torch.cat((encoder_padding_mask, target_mask), dim=1)
# key,value,query
decoder_out = decoder_out.transpose(0, 1)
encoder_hidden_state = encoder_out.encoder_out
input = torch.cat((encoder_hidden_state, decoder_out), dim=0)
attn_out, attn_weigth = self.mix_attention(query=decoder_out, key=input, value=input,
key_padding_mask=key_padding_mask,
incremental_state=None, static_kv=True)
predict_input = torch.cat((attn_out, decoder_out), dim=-1).transpose(0, 1)
predict = self.predictor(predict_input)
return F.log_softmax(predict, -1) if normalize else predict
| [
"fairseq.modules.MultiheadAttention",
"torch.stack",
"torch.tensor",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.MarginRankingLoss",
"torch.cat"
] | [((437, 475), 'torch.nn.MarginRankingLoss', 'nn.MarginRankingLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (457, 475), False, 'from torch import nn\n'), ((1803, 1832), 'torch.stack', 'torch.stack', (['pairs_x1'], {'dim': '(-1)'}), '(pairs_x1, dim=-1)\n', (1814, 1832), False, 'import torch\n'), ((1852, 1881), 'torch.stack', 'torch.stack', (['pairs_x2'], {'dim': '(-1)'}), '(pairs_x2, dim=-1)\n', (1863, 1881), False, 'import torch\n'), ((2262, 2298), 'torch.nn.Linear', 'nn.Linear', (['args.encoder_embed_dim', '(2)'], {}), '(args.encoder_embed_dim, 2)\n', (2271, 2298), False, 'from torch import nn\n'), ((2360, 2390), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['outputs'], {'dim': '(-1)'}), '(outputs, dim=-1)\n', (2373, 2390), True, 'import torch.nn.functional as F\n'), ((2890, 3093), 'fairseq.modules.MultiheadAttention', 'MultiheadAttention', (['args.encoder_embed_dim', 'args.decoder_attention_heads'], {'kdim': 'args.encoder_embed_dim', 'vdim': 'args.encoder_embed_dim', 'dropout': 'args.attention_dropout', 'encoder_decoder_attention': '(True)'}), '(args.encoder_embed_dim, args.decoder_attention_heads,\n kdim=args.encoder_embed_dim, vdim=args.encoder_embed_dim, dropout=args.\n attention_dropout, encoder_decoder_attention=True)\n', (2908, 3093), False, 'from fairseq.modules import MultiheadAttention\n'), ((3239, 3279), 'torch.nn.Linear', 'nn.Linear', (['(args.encoder_embed_dim * 2)', '(2)'], {}), '(args.encoder_embed_dim * 2, 2)\n', (3248, 3279), False, 'from torch import nn\n'), ((3341, 3371), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['outputs'], {'dim': '(-1)'}), '(outputs, dim=-1)\n', (3354, 3371), True, 'import torch.nn.functional as F\n'), ((3728, 3781), 'torch.cat', 'torch.cat', (['(encoder_padding_mask, target_mask)'], {'dim': '(1)'}), '((encoder_padding_mask, target_mask), dim=1)\n', (3737, 3781), False, 'import torch\n'), ((3930, 3983), 'torch.cat', 'torch.cat', (['(encoder_hidden_state, decoder_out)'], {'dim': '(0)'}), '((encoder_hidden_state, decoder_out), dim=0)\n', (3939, 3983), False, 'import torch\n'), ((769, 784), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (781, 784), False, 'import torch\n'), ((2683, 2709), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['predict', '(-1)'], {}), '(predict, -1)\n', (2696, 2709), True, 'import torch.nn.functional as F\n'), ((4403, 4429), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['predict', '(-1)'], {}), '(predict, -1)\n', (4416, 4429), True, 'import torch.nn.functional as F\n'), ((4281, 4323), 'torch.cat', 'torch.cat', (['(attn_out, decoder_out)'], {'dim': '(-1)'}), '((attn_out, decoder_out), dim=-1)\n', (4290, 4323), False, 'import torch\n')] |
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QDialog, QComboBox, QVBoxLayout, QLabel
import sys
class Window(QDialog):
def __init__(self):
super().__init__()
self.title = "PyQt5 Combo Box"
self.top = 200
self.left = 500
self.width = 300
self.height = 100
self.InitWindow()
def InitWindow(self):
self.setWindowIcon(QtGui.QIcon("icon.png"))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
vbox = QVBoxLayout()
self.combo = QComboBox()
self.combo.addItem("Python")
self.combo.addItem("Java")
self.combo.addItem("C++")
self.combo.addItem("C#")
self.combo.addItem("Ruby")
self.combo.currentTextChanged.connect(self.comboChanged)
self.label = QLabel()
self.label.setFont(QtGui.QFont("Sanserif", 15))
self.label.setStyleSheet('color:red')
vbox.addWidget(self.combo)
vbox.addWidget(self.label)
self.setLayout(vbox)
self.show()
def comboChanged(self):
text = self.combo.currentText()
self.label.setText("You Have Selected : " + text)
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec())
| [
"PyQt5.QtGui.QIcon",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QVBoxLayout"
] | [((1242, 1264), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1254, 1264), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QComboBox, QVBoxLayout, QLabel\n'), ((564, 577), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (575, 577), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QComboBox, QVBoxLayout, QLabel\n'), ((600, 611), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (609, 611), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QComboBox, QVBoxLayout, QLabel\n'), ((874, 882), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (880, 882), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QComboBox, QVBoxLayout, QLabel\n'), ((412, 435), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""icon.png"""'], {}), "('icon.png')\n", (423, 435), False, 'from PyQt5 import QtGui\n'), ((910, 937), 'PyQt5.QtGui.QFont', 'QtGui.QFont', (['"""Sanserif"""', '(15)'], {}), "('Sanserif', 15)\n", (921, 937), False, 'from PyQt5 import QtGui\n')] |
import sys
from zeroMQServer import ServerAPI
# import the "endpoints" function here
import blueprints.plotting as plottingRoutes
import blueprints.data as dataRoutes
if __name__ == '__main__':
port = 4242 # if nothing is supplied
try:
port = int(sys.argv[1])
except:
pass
server = ServerAPI(port=port)
# register all the "endpoints"
server.registerServicesFrom(plottingRoutes)
server.registerServicesFrom(dataRoutes)
server.run()
| [
"zeroMQServer.ServerAPI"
] | [((322, 342), 'zeroMQServer.ServerAPI', 'ServerAPI', ([], {'port': 'port'}), '(port=port)\n', (331, 342), False, 'from zeroMQServer import ServerAPI\n')] |
from __future__ import absolute_import, division, print_function
from dxtbx.format.Format import Format
class FormatDIP2030b(Format):
@staticmethod
def understand(image_file):
# for MacScience DIP2030b only, file size is exactly 18001024 bytes
headerstart = 3000 * 3000 * 2
try:
F = FormatDIP2030b.open_file(image_file, 'rb')
F.seek(headerstart)
rawheader = F.read(1024)
eof = F.read(1)# end of file
F.close()
except IOError:
return False
return eof == "" and rawheader[0:3] == "DIP"
def __init__(self, image_file, **kwargs):
'''Initialise the image structure from the given file.'''
from dxtbx import IncorrectFormatError
if not self.understand(image_file):
raise IncorrectFormatError(self, image_file)
Format.__init__(self, image_file, **kwargs)
def detectorbase_start(self): pass
def _start(self):
from iotbx.detectors.macscience import DIPImage
self.detectorbase = DIPImage(self._image_file)
self.detectorbase.readHeader()
def _goniometer(self):
return self._goniometer_factory.single_axis()
def _detector(self):
'''Return a model for a simple detector'''
twotheta = self.detectorbase.parameters["TWOTHETA"]
# At present, ignore non-zero two theta for the dxtbx model
# XXX Return to this issue later.
return self._detector_factory.simple(
sensor = 'IMAGE_PLATE',
distance = self.detectorbase.parameters["DISTANCE"],
beam_centre = (self.detectorbase.parameters["BEAM_CENTER_X"],
self.detectorbase.parameters["BEAM_CENTER_Y"]),
fast_direction = '+x',
slow_direction = '-y',
pixel_size = (self.detectorbase.parameters["PIXEL_SIZE"],
self.detectorbase.parameters["PIXEL_SIZE"]),
image_size = (self.detectorbase.parameters["SIZE1"],
self.detectorbase.parameters["SIZE2"]),
trusted_range = (0, self.detectorbase.parameters["CCD_IMAGE_SATURATION"]),
mask = []) # a list of dead rectangles
def _beam(self):
'''Return a simple model for the beam.'''
return self._beam_factory.simple(self.detectorbase.parameters["WAVELENGTH"])
def _scan(self):
'''Return the scan information for this image.'''
return self._scan_factory.single(
filename = self._image_file,
format = "DIP",
exposure_times = self.detectorbase.parameters["TIME"],
osc_start = self.detectorbase.parameters["OSC_START"],
osc_width = self.detectorbase.parameters["OSC_RANGE"],
epoch = None)
if __name__ == '__main__':
import sys
for arg in sys.argv[1:]:
print(FormatDIP2030b.understand(arg))
| [
"dxtbx.format.Format.Format.__init__",
"dxtbx.IncorrectFormatError",
"iotbx.detectors.macscience.DIPImage"
] | [((795, 838), 'dxtbx.format.Format.Format.__init__', 'Format.__init__', (['self', 'image_file'], {}), '(self, image_file, **kwargs)\n', (810, 838), False, 'from dxtbx.format.Format import Format\n'), ((973, 999), 'iotbx.detectors.macscience.DIPImage', 'DIPImage', (['self._image_file'], {}), '(self._image_file)\n', (981, 999), False, 'from iotbx.detectors.macscience import DIPImage\n'), ((751, 789), 'dxtbx.IncorrectFormatError', 'IncorrectFormatError', (['self', 'image_file'], {}), '(self, image_file)\n', (771, 789), False, 'from dxtbx import IncorrectFormatError\n')] |
"""
This module contains classes handling the first stage of processing the pdf:
extracting rows of text.
"""
from typing import Callable, Any
from collections import namedtuple
import numpy as np
RowExtraction = namedtuple("RowExtraction", "rows, row_indices")
class TextRowExtractor:
"""
Extract indices representing the start and end points of rows ('regions')
matching some predicate.
"""
def __init__(
self,
pixel_predicate: """Callable[
[npt.NDArray], npt.NDArray
]""" = lambda arr: arr == 0
):
self._pixel_predicate = pixel_predicate
@property
def pixel_predicate(self) -> """Callable[
[npt.NDArray], npt.NDArray
]""":
return self._pixel_predicate
@pixel_predicate.setter
def pixel_predicate(
self,
pixel_predicate: "Callable[[npt.NDArray], npt.NDArray]"
):
self._pixel_predicate = pixel_predicate
def extract(self, image: "npt.NDArray") -> RowExtraction:
"""
Extract 'regions' from an image matching this objects predicate.
"""
row_contains_black = np.any(self._pixel_predicate(image), axis=1)
runs = find_runs(1, row_contains_black)
return RowExtraction(
# can't apply np.fromiter to 2d arrays
np.array([image[slice(*run)] for run in runs]),
runs
)
# credit:
# https://stackoverflow.com/questions/31544129/extract-separate-non-zero-blocks-from-array
def find_runs(value: Any, a: "npt.NDArray") -> "npt.NDArray":
"""inclusive-exclusive"""
# Create an array that is 1 where a is `value`, and pad each end with
# an extra 0.
isvalue = np.concatenate(([0], np.equal(a, value).view(np.int8), [0]))
absdiff = np.abs(np.diff(isvalue))
# Runs start and end where absdiff is 1.
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
return ranges
| [
"numpy.where",
"collections.namedtuple",
"numpy.diff",
"numpy.equal"
] | [((218, 266), 'collections.namedtuple', 'namedtuple', (['"""RowExtraction"""', '"""rows, row_indices"""'], {}), "('RowExtraction', 'rows, row_indices')\n", (228, 266), False, 'from collections import namedtuple\n'), ((1808, 1824), 'numpy.diff', 'np.diff', (['isvalue'], {}), '(isvalue)\n', (1815, 1824), True, 'import numpy as np\n'), ((1884, 1906), 'numpy.where', 'np.where', (['(absdiff == 1)'], {}), '(absdiff == 1)\n', (1892, 1906), True, 'import numpy as np\n'), ((1747, 1765), 'numpy.equal', 'np.equal', (['a', 'value'], {}), '(a, value)\n', (1755, 1765), True, 'import numpy as np\n')] |
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import maamped, config
import pytest
from dask.distributed import Client, LocalCluster
import naive
@pytest.fixture(scope="module")
def dask_cluster():
cluster = LocalCluster(n_workers=2, threads_per_worker=2)
yield cluster
cluster.close()
test_data = [
(np.array([[584, -11, 23, 79, 1001, 0, -19]], dtype=np.float64), 3),
(np.random.uniform(-1000, 1000, [5, 20]).astype(np.float64), 5),
]
substitution_locations = [slice(0, 0), 0, -1, slice(1, 3), [0, 3]]
def test_maamped_int_input(dask_cluster):
with pytest.raises(TypeError):
with Client(dask_cluster) as dask_client:
maamped(dask_client, np.arange(20).reshape(2, 10), 5)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T, m)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_include(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
for width in range(T.shape[0]):
for i in range(T.shape[0] - width):
include = np.asarray(range(i, i + width + 1))
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone, include)
comp_P, comp_I = maamped(dask_client, T, m, include)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_discords(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone, discords=True)
comp_P, comp_I = maamped(dask_client, T, m, discords=True)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_include_discords(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
for width in range(T.shape[0]):
for i in range(T.shape[0] - width):
include = np.asarray(range(i, i + width + 1))
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone, include, discords=True)
comp_P, comp_I = maamped(dask_client, T, m, include, discords=True)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_df(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone)
df = pd.DataFrame(T.T)
comp_P, comp_I = maamped(dask_client, df, m)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
def test_maamped_constant_subsequence_self_join(dask_cluster):
with Client(dask_cluster) as dask_client:
T_A = np.concatenate(
(np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64))
)
T = np.array([T_A, T_A, np.random.rand(T_A.shape[0])])
m = 3
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T, m)
npt.assert_almost_equal(ref_P, comp_P) # ignore indices
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
def test_maamped_identical_subsequence_self_join(dask_cluster):
with Client(dask_cluster) as dask_client:
identical = np.random.rand(8)
T_A = np.random.rand(20)
T_A[1 : 1 + identical.shape[0]] = identical
T_A[11 : 11 + identical.shape[0]] = identical
T = np.array([T_A, T_A, np.random.rand(T_A.shape[0])])
m = 3
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T, m)
npt.assert_almost_equal(
ref_P, comp_P, decimal=config.STUMPY_TEST_PRECISION
) # ignore indices
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
@pytest.mark.parametrize("substitution_location", substitution_locations)
def test_maamped_one_subsequence_inf_self_join_first_dimension(
T, m, substitution_location, dask_cluster
):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
T_sub = T.copy()
T_sub[0, substitution_location] = np.inf
ref_P, ref_I = naive.maamp(T_sub, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T_sub, m)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
@pytest.mark.parametrize("substitution_location", substitution_locations)
def test_maamped_one_subsequence_inf_self_join_all_dimensions(
T, m, substitution_location, dask_cluster
):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
T_sub = T.copy()
T_sub[:, substitution_location] = np.inf
ref_P, ref_I = naive.maamp(T_sub, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T_sub, m)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
@pytest.mark.parametrize("substitution_location", substitution_locations)
def test_maamped_one_subsequence_nan_self_join_first_dimension(
T, m, substitution_location, dask_cluster
):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
T_sub = T.copy()
T_sub[0, substitution_location] = np.nan
ref_P, ref_I = naive.maamp(T_sub, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T_sub, m)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
@pytest.mark.parametrize("substitution_location", substitution_locations)
def test_maamped_one_subsequence_nan_self_join_all_dimensions(
T, m, substitution_location, dask_cluster
):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
T_sub = T.copy()
T_sub[:, substitution_location] = np.nan
ref_P, ref_I = naive.maamp(T_sub, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T_sub, m)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
| [
"numpy.ceil",
"pytest.mark.filterwarnings",
"numpy.random.rand",
"pandas.DataFrame",
"numpy.ones",
"dask.distributed.LocalCluster",
"naive.maamp",
"stumpy.maamped",
"dask.distributed.Client",
"pytest.mark.parametrize",
"numpy.array",
"numpy.testing.assert_almost_equal",
"pytest.raises",
"numpy.zeros",
"numpy.random.uniform",
"pytest.fixture",
"numpy.arange"
] | [((182, 212), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (196, 212), False, 'import pytest\n'), ((759, 844), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (785, 844), False, 'import pytest\n'), ((841, 883), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (864, 883), False, 'import pytest\n'), ((1211, 1296), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (1237, 1296), False, 'import pytest\n'), ((1293, 1335), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (1316, 1335), False, 'import pytest\n'), ((1880, 1965), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (1906, 1965), False, 'import pytest\n'), ((1962, 2004), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (1985, 2004), False, 'import pytest\n'), ((2371, 2456), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (2397, 2456), False, 'import pytest\n'), ((2453, 2495), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (2476, 2495), False, 'import pytest\n'), ((3079, 3164), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (3105, 3164), False, 'import pytest\n'), ((3161, 3203), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (3184, 3203), False, 'import pytest\n'), ((3566, 3651), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (3592, 3651), False, 'import pytest\n'), ((4163, 4248), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (4189, 4248), False, 'import pytest\n'), ((4883, 4968), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (4909, 4968), False, 'import pytest\n'), ((4965, 5007), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (4988, 5007), False, 'import pytest\n'), ((5009, 5081), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_location"""', 'substitution_locations'], {}), "('substitution_location', substitution_locations)\n", (5032, 5081), False, 'import pytest\n'), ((5567, 5652), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (5593, 5652), False, 'import pytest\n'), ((5649, 5691), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (5672, 5691), False, 'import pytest\n'), ((5693, 5765), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_location"""', 'substitution_locations'], {}), "('substitution_location', substitution_locations)\n", (5716, 5765), False, 'import pytest\n'), ((6250, 6335), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (6276, 6335), False, 'import pytest\n'), ((6332, 6374), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (6355, 6374), False, 'import pytest\n'), ((6376, 6448), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_location"""', 'substitution_locations'], {}), "('substitution_location', substitution_locations)\n", (6399, 6448), False, 'import pytest\n'), ((6934, 7019), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\s+Port 8787 is already in use:UserWarning"""'], {}), "('ignore:\\\\s+Port 8787 is already in use:UserWarning'\n )\n", (6960, 7019), False, 'import pytest\n'), ((7016, 7058), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T, m"""', 'test_data'], {}), "('T, m', test_data)\n", (7039, 7058), False, 'import pytest\n'), ((7060, 7132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_location"""', 'substitution_locations'], {}), "('substitution_location', substitution_locations)\n", (7083, 7132), False, 'import pytest\n'), ((247, 294), 'dask.distributed.LocalCluster', 'LocalCluster', ([], {'n_workers': '(2)', 'threads_per_worker': '(2)'}), '(n_workers=2, threads_per_worker=2)\n', (259, 294), False, 'from dask.distributed import Client, LocalCluster\n'), ((354, 416), 'numpy.array', 'np.array', (['[[584, -11, 23, 79, 1001, 0, -19]]'], {'dtype': 'np.float64'}), '([[584, -11, 23, 79, 1001, 0, -19]], dtype=np.float64)\n', (362, 416), True, 'import numpy as np\n'), ((614, 638), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (627, 638), False, 'import pytest\n'), ((931, 951), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (937, 951), False, 'from dask.distributed import Client, LocalCluster\n'), ((1032, 1060), 'naive.maamp', 'naive.maamp', (['T', 'm', 'excl_zone'], {}), '(T, m, excl_zone)\n', (1043, 1060), False, 'import naive\n'), ((1086, 1112), 'stumpy.maamped', 'maamped', (['dask_client', 'T', 'm'], {}), '(dask_client, T, m)\n', (1093, 1112), False, 'from stumpy import maamped, config\n'), ((1122, 1160), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (1145, 1160), True, 'import numpy.testing as npt\n'), ((1169, 1207), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (1192, 1207), True, 'import numpy.testing as npt\n'), ((1391, 1411), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (1397, 1411), False, 'from dask.distributed import Client, LocalCluster\n'), ((2061, 2081), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (2067, 2081), False, 'from dask.distributed import Client, LocalCluster\n'), ((2162, 2205), 'naive.maamp', 'naive.maamp', (['T', 'm', 'excl_zone'], {'discords': '(True)'}), '(T, m, excl_zone, discords=True)\n', (2173, 2205), False, 'import naive\n'), ((2231, 2272), 'stumpy.maamped', 'maamped', (['dask_client', 'T', 'm'], {'discords': '(True)'}), '(dask_client, T, m, discords=True)\n', (2238, 2272), False, 'from stumpy import maamped, config\n'), ((2282, 2320), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (2305, 2320), True, 'import numpy.testing as npt\n'), ((2329, 2367), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (2352, 2367), True, 'import numpy.testing as npt\n'), ((2560, 2580), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (2566, 2580), False, 'from dask.distributed import Client, LocalCluster\n'), ((3254, 3274), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (3260, 3274), False, 'from dask.distributed import Client, LocalCluster\n'), ((3355, 3383), 'naive.maamp', 'naive.maamp', (['T', 'm', 'excl_zone'], {}), '(T, m, excl_zone)\n', (3366, 3383), False, 'import naive\n'), ((3397, 3414), 'pandas.DataFrame', 'pd.DataFrame', (['T.T'], {}), '(T.T)\n', (3409, 3414), True, 'import pandas as pd\n'), ((3440, 3467), 'stumpy.maamped', 'maamped', (['dask_client', 'df', 'm'], {}), '(dask_client, df, m)\n', (3447, 3467), False, 'from stumpy import maamped, config\n'), ((3477, 3515), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (3500, 3515), True, 'import numpy.testing as npt\n'), ((3524, 3562), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (3547, 3562), True, 'import numpy.testing as npt\n'), ((3719, 3739), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (3725, 3739), False, 'from dask.distributed import Client, LocalCluster\n'), ((4013, 4041), 'naive.maamp', 'naive.maamp', (['T', 'm', 'excl_zone'], {}), '(T, m, excl_zone)\n', (4024, 4041), False, 'import naive\n'), ((4067, 4093), 'stumpy.maamped', 'maamped', (['dask_client', 'T', 'm'], {}), '(dask_client, T, m)\n', (4074, 4093), False, 'from stumpy import maamped, config\n'), ((4103, 4141), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (4126, 4141), True, 'import numpy.testing as npt\n'), ((4317, 4337), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (4323, 4337), False, 'from dask.distributed import Client, LocalCluster\n'), ((4374, 4391), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (4388, 4391), True, 'import numpy as np\n'), ((4406, 4424), 'numpy.random.rand', 'np.random.rand', (['(20)'], {}), '(20)\n', (4420, 4424), True, 'import numpy as np\n'), ((4673, 4701), 'naive.maamp', 'naive.maamp', (['T', 'm', 'excl_zone'], {}), '(T, m, excl_zone)\n', (4684, 4701), False, 'import naive\n'), ((4727, 4753), 'stumpy.maamped', 'maamped', (['dask_client', 'T', 'm'], {}), '(dask_client, T, m)\n', (4734, 4753), False, 'from stumpy import maamped, config\n'), ((4763, 4839), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(ref_P, comp_P, decimal=config.STUMPY_TEST_PRECISION)\n', (4786, 4839), True, 'import numpy.testing as npt\n'), ((5204, 5224), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (5210, 5224), False, 'from dask.distributed import Client, LocalCluster\n'), ((5380, 5412), 'naive.maamp', 'naive.maamp', (['T_sub', 'm', 'excl_zone'], {}), '(T_sub, m, excl_zone)\n', (5391, 5412), False, 'import naive\n'), ((5438, 5468), 'stumpy.maamped', 'maamped', (['dask_client', 'T_sub', 'm'], {}), '(dask_client, T_sub, m)\n', (5445, 5468), False, 'from stumpy import maamped, config\n'), ((5478, 5516), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (5501, 5516), True, 'import numpy.testing as npt\n'), ((5525, 5563), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (5548, 5563), True, 'import numpy.testing as npt\n'), ((5887, 5907), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (5893, 5907), False, 'from dask.distributed import Client, LocalCluster\n'), ((6063, 6095), 'naive.maamp', 'naive.maamp', (['T_sub', 'm', 'excl_zone'], {}), '(T_sub, m, excl_zone)\n', (6074, 6095), False, 'import naive\n'), ((6121, 6151), 'stumpy.maamped', 'maamped', (['dask_client', 'T_sub', 'm'], {}), '(dask_client, T_sub, m)\n', (6128, 6151), False, 'from stumpy import maamped, config\n'), ((6161, 6199), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (6184, 6199), True, 'import numpy.testing as npt\n'), ((6208, 6246), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (6231, 6246), True, 'import numpy.testing as npt\n'), ((6571, 6591), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (6577, 6591), False, 'from dask.distributed import Client, LocalCluster\n'), ((6747, 6779), 'naive.maamp', 'naive.maamp', (['T_sub', 'm', 'excl_zone'], {}), '(T_sub, m, excl_zone)\n', (6758, 6779), False, 'import naive\n'), ((6805, 6835), 'stumpy.maamped', 'maamped', (['dask_client', 'T_sub', 'm'], {}), '(dask_client, T_sub, m)\n', (6812, 6835), False, 'from stumpy import maamped, config\n'), ((6845, 6883), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (6868, 6883), True, 'import numpy.testing as npt\n'), ((6892, 6930), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (6915, 6930), True, 'import numpy.testing as npt\n'), ((7254, 7274), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (7260, 7274), False, 'from dask.distributed import Client, LocalCluster\n'), ((7430, 7462), 'naive.maamp', 'naive.maamp', (['T_sub', 'm', 'excl_zone'], {}), '(T_sub, m, excl_zone)\n', (7441, 7462), False, 'import naive\n'), ((7488, 7518), 'stumpy.maamped', 'maamped', (['dask_client', 'T_sub', 'm'], {}), '(dask_client, T_sub, m)\n', (7495, 7518), False, 'from stumpy import maamped, config\n'), ((7528, 7566), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (7551, 7566), True, 'import numpy.testing as npt\n'), ((7575, 7613), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (7598, 7613), True, 'import numpy.testing as npt\n'), ((653, 673), 'dask.distributed.Client', 'Client', (['dask_cluster'], {}), '(dask_cluster)\n', (659, 673), False, 'from dask.distributed import Client, LocalCluster\n'), ((992, 1006), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (999, 1006), True, 'import numpy as np\n'), ((2122, 2136), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (2129, 2136), True, 'import numpy as np\n'), ((3315, 3329), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (3322, 3329), True, 'import numpy as np\n'), ((3973, 3987), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (3980, 3987), True, 'import numpy as np\n'), ((4633, 4647), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (4640, 4647), True, 'import numpy as np\n'), ((5265, 5279), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (5272, 5279), True, 'import numpy as np\n'), ((5948, 5962), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (5955, 5962), True, 'import numpy as np\n'), ((6632, 6646), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (6639, 6646), True, 'import numpy as np\n'), ((7315, 7329), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (7322, 7329), True, 'import numpy as np\n'), ((427, 466), 'numpy.random.uniform', 'np.random.uniform', (['(-1000)', '(1000)', '[5, 20]'], {}), '(-1000, 1000, [5, 20])\n', (444, 466), True, 'import numpy as np\n'), ((1659, 1696), 'naive.maamp', 'naive.maamp', (['T', 'm', 'excl_zone', 'include'], {}), '(T, m, excl_zone, include)\n', (1670, 1696), False, 'import naive\n'), ((1730, 1765), 'stumpy.maamped', 'maamped', (['dask_client', 'T', 'm', 'include'], {}), '(dask_client, T, m, include)\n', (1737, 1765), False, 'from stumpy import maamped, config\n'), ((1783, 1821), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (1806, 1821), True, 'import numpy.testing as npt\n'), ((1838, 1876), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (1861, 1876), True, 'import numpy.testing as npt\n'), ((2828, 2880), 'naive.maamp', 'naive.maamp', (['T', 'm', 'excl_zone', 'include'], {'discords': '(True)'}), '(T, m, excl_zone, include, discords=True)\n', (2839, 2880), False, 'import naive\n'), ((2914, 2964), 'stumpy.maamped', 'maamped', (['dask_client', 'T', 'm', 'include'], {'discords': '(True)'}), '(dask_client, T, m, include, discords=True)\n', (2921, 2964), False, 'from stumpy import maamped, config\n'), ((2982, 3020), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_P', 'comp_P'], {}), '(ref_P, comp_P)\n', (3005, 3020), True, 'import numpy.testing as npt\n'), ((3037, 3075), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['ref_I', 'comp_I'], {}), '(ref_I, comp_I)\n', (3060, 3075), True, 'import numpy.testing as npt\n'), ((3799, 3829), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (3807, 3829), True, 'import numpy as np\n'), ((3831, 3859), 'numpy.ones', 'np.ones', (['(5)'], {'dtype': 'np.float64'}), '(5, dtype=np.float64)\n', (3838, 3859), True, 'import numpy as np\n'), ((3903, 3931), 'numpy.random.rand', 'np.random.rand', (['T_A.shape[0]'], {}), '(T_A.shape[0])\n', (3917, 3931), True, 'import numpy as np\n'), ((4563, 4591), 'numpy.random.rand', 'np.random.rand', (['T_A.shape[0]'], {}), '(T_A.shape[0])\n', (4577, 4591), True, 'import numpy as np\n'), ((1611, 1625), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (1618, 1625), True, 'import numpy as np\n'), ((2780, 2794), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (2787, 2794), True, 'import numpy as np\n'), ((723, 736), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (732, 736), True, 'import numpy as np\n')] |
import unittest
from muvimaker import main_logger
from muvimaker.core.pictures import BasePicture
from muvimaker.core.project import ProjectHandler, standard_screen_size, standard_framerate, standard_hop_length
from muvimaker.core.video import Video
from muvimaker.example_data import example_song
logger = main_logger.getChild(__name__)
ph_name = 'test_ph_handler'
class TestPreComputedPicture(unittest.TestCase):
def test_a_picture(self):
logger.info('\n\n test PreComputedPicture\n\n')
ph = ProjectHandler.get_project_handler(ph_name)
video = ph.get_video(
standard_screen_size,
standard_hop_length,
standard_framerate
)
frames = [video.make_frame_per_frame(10)]
pre_computed_picture = BasePicture.create(
'pre_computed_picture',
None,
{'frames': frames},
None
)
analyser_video = Video(
[pre_computed_picture],
ph.main_sound_file,
standard_framerate,
ph.length,
standard_screen_size
)
analyser_video.make_frame_per_frame(0) | [
"muvimaker.core.project.ProjectHandler.get_project_handler",
"muvimaker.main_logger.getChild",
"muvimaker.core.video.Video",
"muvimaker.core.pictures.BasePicture.create"
] | [((310, 340), 'muvimaker.main_logger.getChild', 'main_logger.getChild', (['__name__'], {}), '(__name__)\n', (330, 340), False, 'from muvimaker import main_logger\n'), ((523, 566), 'muvimaker.core.project.ProjectHandler.get_project_handler', 'ProjectHandler.get_project_handler', (['ph_name'], {}), '(ph_name)\n', (557, 566), False, 'from muvimaker.core.project import ProjectHandler, standard_screen_size, standard_framerate, standard_hop_length\n'), ((787, 861), 'muvimaker.core.pictures.BasePicture.create', 'BasePicture.create', (['"""pre_computed_picture"""', 'None', "{'frames': frames}", 'None'], {}), "('pre_computed_picture', None, {'frames': frames}, None)\n", (805, 861), False, 'from muvimaker.core.pictures import BasePicture\n'), ((946, 1053), 'muvimaker.core.video.Video', 'Video', (['[pre_computed_picture]', 'ph.main_sound_file', 'standard_framerate', 'ph.length', 'standard_screen_size'], {}), '([pre_computed_picture], ph.main_sound_file, standard_framerate, ph.\n length, standard_screen_size)\n', (951, 1053), False, 'from muvimaker.core.video import Video\n')] |
""" Conics Intersection
Given the homogeneous matrices of two conics, it returns up to four intersection points.
This is the Python implementation of the code published by:
<NAME> (2021). Conics intersection (https://www.mathworks.com/matlabcentral/fileexchange/28318-conics-intersection), MATLAB Central File Exchange.
This script requires that `numpy` and `matplotlib` be installed within the Python
environment you are running this script in.
This file can also be imported as a module and contains the following
functions:
* intersectConics - returns the intersection points.
* plotConic - shows a matplotlib plot of two conics and their intersection points.
"""
import numpy as np
from numpy.linalg import matrix_rank, inv, det
from nudge.utils_intersectConics import completeIntersection, decomposeDegenerateConic, intersectConicLine
import matplotlib.pyplot as plt
def intersectConics(E1, E2):
""" Intersects two non degenerate conics
Args:
E1 (np.array): homogeneous matrix of conic section 1
E2 (np.array): homogeneous matrix of conic section 2
Returns:
points_x: a list of x's coordinates of intersection points
points_y: a list of y's coordinates of intersection points
"""
P = np.array([])
r1 = matrix_rank(E1)
r2 = matrix_rank(E2)
if(r1==3 and r2==3):
P = completeIntersection(E1,E2)
else:
if (r2 < 3): #E2 is degenerate
defE = E2
fullE = E1
else:
defE = E1 #E1 is degenerate
fullE = E2
m, l = decomposeDegenerateConic(defE)
P1 = intersectConicLine(fullE,m)
P2 = intersectConicLine(fullE,l)
P = np.array([P1, P2])
points_x = []
points_y = []
for i in range(2):
P1 = P[i]
if(P1.size!=0):
for j in range(P1.shape[0]):
points_x.append(P1[j,0]/P1[j,2])
points_y.append(P1[j,1]/P1[j,2])
return points_x, points_y
def plotConic(L, R, points_x, points_y, xBounds=[-50,50], yBounds=[-50,50]):
""" Plots two conic sections based on their homogeneous representation and
their intersection points
The homogeneous representation of a conic is:
Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0
And the matrix form:
[ A B/2 D/2
M = B/2 C E/2
D/2 E/2 F ]
Args:
L (np.array): homogeneous matrix of conic section 1
R (np.array): homogeneous matrix of conic section 2
points_x: a list of x's coordinates of intersection points
points_y: a list of y's coordinates of intersection points
xBounds, yBounds = a list of maximum x and y upper and lower bounds, by default [-50,50]
"""
x = np.linspace(xBounds[0],xBounds[1],5000)
y = np.linspace(yBounds[0],yBounds[1],5000)
x, y = np.meshgrid(x, y)
#assert B**2 - 4*A*C == 0
A = L[0,0]
B = 2*L[0,1]
C = L[1,1]
D = 2*L[0,2]
E = 2*L[1,2]
F = L[2,2]
plt.contour(x, y,(A*x**2 + B*x*y + C*y**2 + D*x + E*y + F), [0], colors='g')
A = R[0,0]
B = 2*R[0,1]
C = R[1,1]
D = 2*R[0,2]
E = 2*R[1,2]
F = R[2,2]
plt.contour(x, y,(A*x**2 + B*x*y + C*y**2 + D*x + E*y + F), [0], colors='r')
plt.scatter(points_x, points_y, marker='o', color='k')
plt.show()
| [
"numpy.linalg.matrix_rank",
"nudge.utils_intersectConics.intersectConicLine",
"nudge.utils_intersectConics.decomposeDegenerateConic",
"nudge.utils_intersectConics.completeIntersection",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.scatter",
"numpy.meshgrid",
"matplotlib.pyplot.show"
] | [((1264, 1276), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1272, 1276), True, 'import numpy as np\n'), ((1286, 1301), 'numpy.linalg.matrix_rank', 'matrix_rank', (['E1'], {}), '(E1)\n', (1297, 1301), False, 'from numpy.linalg import matrix_rank, inv, det\n'), ((1311, 1326), 'numpy.linalg.matrix_rank', 'matrix_rank', (['E2'], {}), '(E2)\n', (1322, 1326), False, 'from numpy.linalg import matrix_rank, inv, det\n'), ((2792, 2833), 'numpy.linspace', 'np.linspace', (['xBounds[0]', 'xBounds[1]', '(5000)'], {}), '(xBounds[0], xBounds[1], 5000)\n', (2803, 2833), True, 'import numpy as np\n'), ((2840, 2881), 'numpy.linspace', 'np.linspace', (['yBounds[0]', 'yBounds[1]', '(5000)'], {}), '(yBounds[0], yBounds[1], 5000)\n', (2851, 2881), True, 'import numpy as np\n'), ((2891, 2908), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2902, 2908), True, 'import numpy as np\n'), ((3043, 3138), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', '(A * x ** 2 + B * x * y + C * y ** 2 + D * x + E * y + F)', '[0]'], {'colors': '"""g"""'}), "(x, y, A * x ** 2 + B * x * y + C * y ** 2 + D * x + E * y + F,\n [0], colors='g')\n", (3054, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3316), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', '(A * x ** 2 + B * x * y + C * y ** 2 + D * x + E * y + F)', '[0]'], {'colors': '"""r"""'}), "(x, y, A * x ** 2 + B * x * y + C * y ** 2 + D * x + E * y + F,\n [0], colors='r')\n", (3232, 3316), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3356), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points_x', 'points_y'], {'marker': '"""o"""', 'color': '"""k"""'}), "(points_x, points_y, marker='o', color='k')\n", (3313, 3356), True, 'import matplotlib.pyplot as plt\n'), ((3361, 3371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3369, 3371), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1397), 'nudge.utils_intersectConics.completeIntersection', 'completeIntersection', (['E1', 'E2'], {}), '(E1, E2)\n', (1389, 1397), False, 'from nudge.utils_intersectConics import completeIntersection, decomposeDegenerateConic, intersectConicLine\n'), ((1591, 1621), 'nudge.utils_intersectConics.decomposeDegenerateConic', 'decomposeDegenerateConic', (['defE'], {}), '(defE)\n', (1615, 1621), False, 'from nudge.utils_intersectConics import completeIntersection, decomposeDegenerateConic, intersectConicLine\n'), ((1635, 1663), 'nudge.utils_intersectConics.intersectConicLine', 'intersectConicLine', (['fullE', 'm'], {}), '(fullE, m)\n', (1653, 1663), False, 'from nudge.utils_intersectConics import completeIntersection, decomposeDegenerateConic, intersectConicLine\n'), ((1676, 1704), 'nudge.utils_intersectConics.intersectConicLine', 'intersectConicLine', (['fullE', 'l'], {}), '(fullE, l)\n', (1694, 1704), False, 'from nudge.utils_intersectConics import completeIntersection, decomposeDegenerateConic, intersectConicLine\n'), ((1716, 1734), 'numpy.array', 'np.array', (['[P1, P2]'], {}), '([P1, P2])\n', (1724, 1734), True, 'import numpy as np\n')] |
import numpy
import pandas as pd
print("<<Pandas 데이터 구조에 익숙해지기>>")
inflation = pd.Series((2.2,3.4,2.8,1.6,2.3,2.7,3.4,3.2,2.8,3.8,-0.4,1.6,3.2,2.1,1.5,1.5))
print(inflation)
print(inflation.values)
print(inflation.index.values)
inflation.index = pd.Index(range(1999,2015))
inflation[2015] = numpy.nan
inflation.index.name = "Year"
inflation.name = "%"
print(inflation.head())
print(inflation.tail())
alco2009 = pd.read_csv("niaaa-report2009.csv", index_col = ["State"])
print(alco2009)
alco2009["Wine"].head()
alco2009["Total"] = 0
print(alco2009.head())
print("")
print("<<데이터 모양 바꾸기>>")
print(alco2009.columns.values)
alco2009.reset_index().set_index("Beer").head()
"Samoa" in alco2009.index
s_states = [state for state in alco2009.index if state[0] == 's']+["Samoa"]
print(s_states)
drinks = list(alco2009.columns)+["Water"]
print(drinks)
nan_alco = alco2009.reindex(s_states, columns = drinks)
print(nan_alco)
alco = pd.read_csv("niaaa-report.csv",index_col=["state","Year"])
print(alco)
nan_alco.dropna(how="all")
nan_alco.dropna(how="all", axis = 1)
print(nan_alco.isnull())
print(nan_alco_notnull())
sp = nan_alco["Sprits"]
clean = sp.notnull()
sp[-clean] = sp[clean].mean()
print(nan_alco)
print(nan_alco.fillna(0))
print(nan_alco.fillna(method = "ffill"))
| [
"pandas.Series",
"pandas.read_csv"
] | [((79, 175), 'pandas.Series', 'pd.Series', (['(2.2, 3.4, 2.8, 1.6, 2.3, 2.7, 3.4, 3.2, 2.8, 3.8, -0.4, 1.6, 3.2, 2.1, 1.5,\n 1.5)'], {}), '((2.2, 3.4, 2.8, 1.6, 2.3, 2.7, 3.4, 3.2, 2.8, 3.8, -0.4, 1.6, 3.2,\n 2.1, 1.5, 1.5))\n', (88, 175), True, 'import pandas as pd\n'), ((411, 467), 'pandas.read_csv', 'pd.read_csv', (['"""niaaa-report2009.csv"""'], {'index_col': "['State']"}), "('niaaa-report2009.csv', index_col=['State'])\n", (422, 467), True, 'import pandas as pd\n'), ((923, 983), 'pandas.read_csv', 'pd.read_csv', (['"""niaaa-report.csv"""'], {'index_col': "['state', 'Year']"}), "('niaaa-report.csv', index_col=['state', 'Year'])\n", (934, 983), True, 'import pandas as pd\n')] |
from flask import Flask, render_template
import json
app = Flask(__name__)
@app.route("/",methods = ["GET"])
def main():
with open("one_day_schedule.json") as schedule:
events = json.load(schedule)
return render_template("index.html", events = events)
if __name__ =="__main__":
app.run(debug=True) | [
"flask.render_template",
"json.load",
"flask.Flask"
] | [((60, 75), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (65, 75), False, 'from flask import Flask, render_template\n'), ((223, 267), 'flask.render_template', 'render_template', (['"""index.html"""'], {'events': 'events'}), "('index.html', events=events)\n", (238, 267), False, 'from flask import Flask, render_template\n'), ((192, 211), 'json.load', 'json.load', (['schedule'], {}), '(schedule)\n', (201, 211), False, 'import json\n')] |
import numpy as np
def _bootstrap_from_null(samp_inds, model_class, formula, data, null_formula, exog_test_params, count_col):
#model_class = eval(model_name)
np.random.seed(samp_inds[0])
model = model_class()
res = model.fit(formula=formula, data=data, null_formula=null_formula)
res.fit_null(null_formula)
coef_ind = [model.exog.design_info.column_names.index(ep) for ep in exog_test_params]
"""Simulate data under the null hypothesis [nsamps, data rows]"""
nsamps = len(samp_inds)
Wsamples = model.null_res.random_samples(size=nsamps)
param_samples = np.zeros((nsamps, len(exog_test_params)))
for i, si in enumerate(samp_inds):
"""Fit each null dataset with the actual covars"""
model.data.loc[:, count_col] = Wsamples[i, :]
model.fit(formula, model.data)
param_samples[i, :] = model.mu_coefs[coef_ind]
return param_samples | [
"numpy.random.seed"
] | [((169, 197), 'numpy.random.seed', 'np.random.seed', (['samp_inds[0]'], {}), '(samp_inds[0])\n', (183, 197), True, 'import numpy as np\n')] |
import pyinputplus as pyip
def auto_company_menu():
print('Welcome to the lnu auto company')
print('Menu')
print('1: Manage customer')
print('2: Manage cars')
print('3: Manage leasing')
print('4: The most popular car')
print('5: The most high rated customer')
print('6: exit')
return pyip.inputInt('Please enter one of the above options: ')
def car_menu():
print('1: Add a car')
print('2: Remove a car')
print('3: Change the car information')
print('4: back to the previous menu')
return pyip.inputInt('Please enter one of the above options: ')
def customer_menu():
print('1: Add a customer')
print('2: Remove a customer membership')
print('3: Change the customers information')
print('4: Get the information of the car based on the specific customer')
print('5: Back to the previous menu')
return pyip.inputInt('Please enter one of the above options: ')
def leasing_menu():
print('1: Lease a car')
print('2: Return the leased car')
print('3: Get the customers information based on the return date')
print('4: Back to the previous menu')
return pyip.inputInt('Please enter one of the above options: ')
def register_car():
print('to register a new car , fill out the following information ')
car_model = input('Car model: ')
manufacture = input('Car manufacture: ')
product_year = pyip.inputInt('Car production year: ')
c_category = car_category()
quantity = pyip.inputInt('Quantity: ')
car = (car_model, manufacture, product_year, c_category, int(quantity))
return car
def car_category():
print('Choose on of the below car categories that you are interested in:')
print('1. Sedan \n2. Coupe \n3. Sports \n4. Station wagon \n5. Hatchback \n6. Convertible \n7. SUV \n8. Minivan '
'\n9. Pickup truck \n10. Other')
categories = pyip.inputInt('Enter : ')
if categories == 1:
return 'Sedan'
elif categories == 2:
return 'Coupe'
elif categories == 3:
return 'Sports'
elif categories == 4:
return 'Station wagon'
elif categories == 5:
return 'Hatchback'
elif categories == 6:
return 'Convertible'
elif categories == 7:
return 'SUV'
elif categories == 8:
return 'Minivan'
elif categories == 9:
return 'Pickup truck'
else:
return 'Other'
def register_customer():
print('to register a new customer , fill out the following information ')
first_name = input('First model: ')
surname = input('Surname: ')
gender_specification = gender_menu()
customer_address = input('Customer address: ')
social_security_number = pyip.inputNum('Social security number (YYMMDDXXXX): ')
customer = (first_name, surname, gender_specification, customer_address, social_security_number)
return customer
def gender_menu():
categories = pyip.inputInt('1: Male \n2: Female \n3: Other \nEnter: ')
if categories == 1:
return 'Male'
elif categories == 2:
return 'Female'
else:
return 'Other'
def remove_customer():
social_security_number = pyip.inputInt('Social security number (YYMMDDXXXX): ')
return social_security_number
def remove_car():
car_model = input('Car model: ')
product_year = pyip.inputInt('Car production year: ')
return car_model, product_year
def invalid_input():
print('Invalid input')
def return_leased_car():
car_return_date = input('Enter the return date (YYYY-MM-DD): ')
year, month, day = car_return_date.split('-')
return_date = (year, month, day)
return return_date
def display_invalid_alternatives(alternatives):
if alternatives == 'customer':
print('Customer has not been found')
elif alternatives == 'register':
print('Registration has not been found')
elif alternatives == 'leasing':
print('No information about the entered leasing has been found')
elif alternatives == 'return':
print('No information about the entered return date has been found')
elif alternatives == 'social_security_number':
print('Wrong social security number format (the correct format = YYMMDDXXXX)')
def exit_program():
print('Exit the Program, See You!')
def social_number():
print('social_security_number')
def entering_leasing_info():
print('Fill out the following information to do a new leasing')
def entering_returning_car_info():
print('Fill out the following information to return the leased car')
def terminating_system():
print("Program terminated manually")
def database_connection_error():
print('Database connection failed')
def print_high_rated_customer():
print('The most high rated customers (customers with the most leased cars) are:')
def customer_rate(count, customer):
print('Number %d is Name: %s with the total leased of: %s' % (count, customer[0], customer[1]))
def print_high_rated_car():
print('The most popular cars (cars which has the highest number of leased) are:')
def car_rate(car, count):
print('Number %d is The car model: %s with the total number of leased: %s\n\tManufacture: %s Category: %s product '
'year: %s' % (
count, car[0], car[4], car[1], car[2], car[3]))
def customers_name_on_return_date(customer):
print('Customer Name is: %s %s' % (customer[0], customer[1]))
def customers_on_return_date():
print('The following customers has reached to the return date of their leasing time')
def add_customer_to_database():
print('A new customer added to the database')
def add_quantity_to_database():
print('The car quantity data added to the database')
def add_leasing_info_to_database():
print('The leasing info added to the database')
def drop_customer_from_database():
print('The entered customer removed from the database')
def drop_car_from_database():
print('The entered car quantity removed from the database')
def drop_leasing_info_from_database():
print('The entered leasing information removed from the database')
def update_customer_in_database():
print('The entered customer information has been updated')
def update_car_in_database():
print('The entered car information has been updated')
def print_cars_per_customer():
print('The following cars have been leased by the following customer')
def print_car_models(car, count):
print('%d car model: %s \n\tManufacture: %s Product Year: %s' % (count, car[1], car[2], car[3]))
def car_out_of_quantity():
print('The info is not available due to depletion of inventory')
def print_exception(exception):
print('The exception is: ', exception)
| [
"pyinputplus.inputInt",
"pyinputplus.inputNum"
] | [((322, 378), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Please enter one of the above options: """'], {}), "('Please enter one of the above options: ')\n", (335, 378), True, 'import pyinputplus as pyip\n'), ((548, 604), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Please enter one of the above options: """'], {}), "('Please enter one of the above options: ')\n", (561, 604), True, 'import pyinputplus as pyip\n'), ((884, 940), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Please enter one of the above options: """'], {}), "('Please enter one of the above options: ')\n", (897, 940), True, 'import pyinputplus as pyip\n'), ((1153, 1209), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Please enter one of the above options: """'], {}), "('Please enter one of the above options: ')\n", (1166, 1209), True, 'import pyinputplus as pyip\n'), ((1406, 1444), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Car production year: """'], {}), "('Car production year: ')\n", (1419, 1444), True, 'import pyinputplus as pyip\n'), ((1492, 1519), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Quantity: """'], {}), "('Quantity: ')\n", (1505, 1519), True, 'import pyinputplus as pyip\n'), ((1890, 1915), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Enter : """'], {}), "('Enter : ')\n", (1903, 1915), True, 'import pyinputplus as pyip\n'), ((2713, 2767), 'pyinputplus.inputNum', 'pyip.inputNum', (['"""Social security number (YYMMDDXXXX): """'], {}), "('Social security number (YYMMDDXXXX): ')\n", (2726, 2767), True, 'import pyinputplus as pyip\n'), ((2927, 2985), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""1: Male \n2: Female \n3: Other \nEnter: """'], {}), '("""1: Male \n2: Female \n3: Other \nEnter: """)\n', (2940, 2985), True, 'import pyinputplus as pyip\n'), ((3168, 3222), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Social security number (YYMMDDXXXX): """'], {}), "('Social security number (YYMMDDXXXX): ')\n", (3181, 3222), True, 'import pyinputplus as pyip\n'), ((3333, 3371), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""Car production year: """'], {}), "('Car production year: ')\n", (3346, 3371), True, 'import pyinputplus as pyip\n')] |
from rest_framework import serializers
from .models import Expense, OngoingExpense
class ExpenseSerializer(serializers.ModelSerializer):
owner = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Expense
fields = '__all__'
class OngoingExpenseSerializer(serializers.ModelSerializer):
owner = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = OngoingExpense
fields = '__all__'
| [
"rest_framework.serializers.CurrentUserDefault"
] | [((184, 216), 'rest_framework.serializers.CurrentUserDefault', 'serializers.CurrentUserDefault', ([], {}), '()\n', (214, 216), False, 'from rest_framework import serializers\n'), ((393, 425), 'rest_framework.serializers.CurrentUserDefault', 'serializers.CurrentUserDefault', ([], {}), '()\n', (423, 425), False, 'from rest_framework import serializers\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.