text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
Mobile IP.
"""
from scapy.fields import ByteEnumField, ByteField, IPField, LongField, \
ShortField, XByteField
from scapy.packet import Packet, bind_layers, bind_bottom_up
from scapy.layers.inet import IP, UDP
class MobileIP(Packet):
name = "Mobile IP (RFC3344)"
fields_desc = [ByteEnumField("type", 1, {1: "RRQ", 3: "RRP"})]
class MobileIPRRQ(Packet):
name = "Mobile IP Registration Request (RFC3344)"
fields_desc = [XByteField("flags", 0),
ShortField("lifetime", 180),
IPField("homeaddr", "0.0.0.0"),
IPField("haaddr", "0.0.0.0"),
IPField("coaddr", "0.0.0.0"),
LongField("id", 0), ]
class MobileIPRRP(Packet):
name = "Mobile IP Registration Reply (RFC3344)"
fields_desc = [ByteField("code", 0),
ShortField("lifetime", 180),
IPField("homeaddr", "0.0.0.0"),
IPField("haaddr", "0.0.0.0"),
LongField("id", 0), ]
class MobileIPTunnelData(Packet):
name = "Mobile IP Tunnel Data Message (RFC3519)"
fields_desc = [ByteField("nexthdr", 4),
ShortField("res", 0)]
bind_bottom_up(UDP, MobileIP, dport=434)
bind_bottom_up(UDP, MobileIP, sport=434)
bind_layers(UDP, MobileIP, sport=434, dport=434)
bind_layers(MobileIP, MobileIPRRQ, type=1)
bind_layers(MobileIP, MobileIPRRP, type=3)
bind_layers(MobileIP, MobileIPTunnelData, type=4)
bind_layers(MobileIPTunnelData, IP, nexthdr=4)
| {
"content_hash": "9234699bd69429599dd49c3f5c855c20",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 32.12765957446808,
"alnum_prop": 0.6059602649006622,
"repo_name": "4shadoww/hakkuframework",
"id": "67c2ce2059d841ab1a91963e2f7d5e0ca8ed93a9",
"size": "1704",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/scapy/layers/mobileip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7992059"
}
],
"symlink_target": ""
} |
from Expression import *
class LinearExpression(Expression):
"""
Class representing a linear expression node in the AST of a MLP
"""
def __init__(self):
Expression.__init__(self)
class ValuedLinearExpression(LinearExpression):
"""
Class representing a valued linear expression node in the AST of a MLP
"""
def __init__(self, value):
"""
Set the single value of this linear expression
:param value : Identifier | Number
"""
LinearExpression.__init__(self)
self.value = value
def __str__(self):
"""
to string
"""
return "ValuedExpr:" + str(self.value)
def setupEnvironment(self, codeSetup):
"""
Generate the MiniZinc code for the declaration of identifiers and sets in this linear expression
"""
codeSetup.setupEnvironment(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this valued linear expression
"""
return codeGenerator.generateCode(self)
class LinearExpressionBetweenParenthesis(LinearExpression):
"""
Class representing a linear expression between parenthesis node in the AST of a MLP
"""
def __init__(self, linearExpression):
"""
Set the linear expression
:param linearExpression : LinearExpression
"""
LinearExpression.__init__(self)
self.linearExpression = linearExpression
def __str__(self):
"""
to string
"""
return "LE: (" + str(self.linearExpression) + ")"
def setupEnvironment(self, codeSetup):
"""
Generate the MiniZinc code for the declaration of identifiers and sets in this linear expression
"""
codeSetup.setupEnvironment(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this linear expression
"""
return codeGenerator.generateCode(self)
class LinearExpressionWithArithmeticOperation(LinearExpression):
"""
Class representing a linear expression with arithmetic operation node in the AST of a MLP
"""
PLUS = "+"
MINUS = "-"
TIMES = "*"
DIV = "/"
def __init__(self, op, expression1, expression2):
"""
Set the expressions participating in the arithmetic operation
:param op : (PLUS, MINUS, TIMES, DIV)
:param expression1 : LinearExpression | NumericExpression
:param expression2 : LinearExpression | NumericExpression
"""
LinearExpression.__init__(self)
self.op = op
self.expression1 = expression1
self.expression2 = expression2
def __str__(self):
"""
to string
"""
return "OpLE:" + str(self.expression1) + " " + self.op + " " + str(self.expression2)
def setupEnvironment(self, codeSetup):
"""
Generate the MiniZinc code for the declaration of identifiers and sets in this linear expression
"""
codeSetup.setupEnvironment(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this linear expression with arithmetic pperation
"""
return codeGenerator.generateCode(self)
class MinusLinearExpression(LinearExpression):
"""
Class representing a minus linear expression node in the AST of a MLP
"""
def __init__(self, linearExpression):
"""
Set the expressions being negated
:param linearExpression: LinearExpression
"""
LinearExpression.__init__(self)
self.linearExpression = linearExpression
def __str__(self):
"""
to string
"""
return "MinusLE:" + "-(" + str(self.linearExpression) + ")"
def setupEnvironment(self, codeSetup):
"""
Generate the MiniZinc code for the declaration of identifiers and sets in this linear expression
"""
codeSetup.setupEnvironment(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this minus linear expression
"""
return codeGenerator.generateCode(self)
class IteratedLinearExpression(LinearExpression):
"""
Class representing a iterated linear expression node in the AST of a MLP
"""
def __init__(self, linearExpression, indexingExpression, numericExpression = None):
"""
Set the components of the iterated linear expression
:param linearExpression : LinearExpression
:param indexingExpression : IndexingExpression
:param numericExpression : NumericExpression
"""
LinearExpression.__init__(self)
self.linearExpression = linearExpression
self.indexingExpression = indexingExpression
self.numericExpression = numericExpression
def __str__(self):
"""
to string
"""
res = "sum(" + str(self.indexingExpression) + ")"
if self.numericExpression:
res += "^(" + str(self.numericExpression) + ")"
res += "(" + str(self.linearExpression) + ")"
return "ItLE:" + res
def setupEnvironment(self, codeSetup):
"""
Generate the MiniZinc code for the declaration of identifiers and sets in this linear expression
"""
codeSetup.setupEnvironment(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this iterated linear expression
"""
return codeGenerator.generateCode(self)
class ConditionalLinearExpression(LinearExpression):
"""
Class representing a conditional linear expression node in the AST of a MLP
"""
def __init__(self, logicalExpression, linearExpression1 = None, linearExpression2 = None, elseIfExpression = None):
"""
Set the conditional linear expression
:param logicalExpression : LogicalExpression
:param linearExpression1 : LinearExpression
:param linearExpression2 : LinearExpression
:param elseIfExpression : ElseIfExpressionList
"""
LinearExpression.__init__(self)
self.logicalExpression = logicalExpression
self.linearExpression1 = linearExpression1
self.linearExpression2 = linearExpression2
self.elseIfEpression = elseIfEpression
def __str__(self):
"""
to string
"""
res = "ConditionalLinearExpression: " + " IF "+str(self.logicalExpression)
if self.linearExpression1:
res += " THEN " + str(self.linearExpression1)
if self.elseIfExpression:
res += str(self.elseIfExpression)
if self.linearExpression2 != None:
res += " ELSE " + str(self.linearExpression2)
res += " ENDIF "
return res
def addElseIfExpression(self, elseIfExpression):
self.elseIfExpression = elseIfExpression
def addElseExpression(self, elseExpression):
self.linearExpression2 = elseExpression
def getDependencies(self, codeGenerator):
dep = self.logicalExpression.getDependencies(codeGenerator) + self.linearExpression1.getDependencies(codeGenerator)
if self.elseIfExpression != None:
dep += self.elseIfExpression.getDependencies(codeGenerator)
if self.linearExpression2 != None:
dep += self.linearExpression2.getDependencies(codeGenerator)
return list(set(dep))
def setupEnvironment(self, codeSetup):
"""
Setup the MiniZinc code for the identifiers and sets used in this conditional linear expression
"""
codeSetup.setupEnvironment(self)
def prepare(self, codePrepare):
"""
Prepare the MiniZinc code for the identifiers and sets used in this conditional linear expression
"""
codePrepare.prepare(self)
def generateCode(self, codeGenerator):
"""
Generate the MiniZinc code for this contitional linear expression
"""
return codeGenerator.generateCode(self)
| {
"content_hash": "4ac4fc1c439e095661f8a2b093d4af5a",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 123,
"avg_line_length": 29.5,
"alnum_prop": 0.6222760290556901,
"repo_name": "rafaellc28/Latex2MiniZinc",
"id": "a9100cfff21f249f59863ca5236d3b9be0b79ceb",
"size": "8260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "latex2minizinc/LinearExpression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2918583"
}
],
"symlink_target": ""
} |
"""The Vilfo Router integration."""
from datetime import timedelta
import logging
from vilfo import Client as VilfoClient
from vilfo.exceptions import VilfoException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.util import Throttle
from .const import ATTR_BOOT_TIME, ATTR_LOAD, DOMAIN, ROUTER_DEFAULT_HOST
PLATFORMS = [Platform.SENSOR]
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Vilfo Router from a config entry."""
host = entry.data[CONF_HOST]
access_token = entry.data[CONF_ACCESS_TOKEN]
vilfo_router = VilfoRouterData(hass, host, access_token)
await vilfo_router.async_update()
if not vilfo_router.available:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = vilfo_router
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class VilfoRouterData:
"""Define an object to hold sensor data."""
def __init__(self, hass, host, access_token):
"""Initialize."""
self._vilfo = VilfoClient(host, access_token)
self.hass = hass
self.host = host
self.available = False
self.firmware_version = None
self.mac_address = self._vilfo.mac
self.data = {}
self._unavailable_logged = False
@property
def unique_id(self):
"""Get the unique_id for the Vilfo Router."""
if self.mac_address:
return self.mac_address
if self.host == ROUTER_DEFAULT_HOST:
return self.host
return self.host
def _fetch_data(self):
board_information = self._vilfo.get_board_information()
load = self._vilfo.get_load()
return {
"board_information": board_information,
"load": load,
}
@Throttle(DEFAULT_SCAN_INTERVAL)
async def async_update(self):
"""Update data using calls to VilfoClient library."""
try:
data = await self.hass.async_add_executor_job(self._fetch_data)
self.firmware_version = data["board_information"]["version"]
self.data[ATTR_BOOT_TIME] = data["board_information"]["bootTime"]
self.data[ATTR_LOAD] = data["load"]
self.available = True
except VilfoException as error:
if not self._unavailable_logged:
_LOGGER.error(
"Could not fetch data from %s, error: %s", self.host, error
)
self._unavailable_logged = True
self.available = False
return
if self.available and self._unavailable_logged:
_LOGGER.info("Vilfo Router %s is available again", self.host)
self._unavailable_logged = False
| {
"content_hash": "e21f675c9cc12058f2e30bde7be24907",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 82,
"avg_line_length": 30.935185185185187,
"alnum_prop": 0.6477102663873092,
"repo_name": "GenericStudent/home-assistant",
"id": "ac3b8b87f3ff78f9dfe1bd4bb7e702fcc5799c72",
"size": "3341",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/vilfo/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
'''
A program with bugs. Try to fix them all!
-----------------------------------------------------------
(c) 2013 Allegra Via and Kristian Rother
Licensed under the conditions of the Python License
This code appears in section 12.2.2 of the book
"Managing Biological Data with Python".
-----------------------------------------------------------
'''
def evaluate_data(data, lower=100, upper=300):
"""Counts data points in three bins."""
import pdb
pdb.set_trace()
smaller = 0
between = 0
bigger = 0
for length in data:
if length < lower:
smaller = smaller + 1
elif lower < length < upper:
between = between + 1
elif length > upper:
bigger += 1
# bigger = 1
return smaller, between, bigger
def read_data(filename):
"""Reads neuron lengths from a text file."""
primary, secondary = [], []
for line in open(filename):
category, length = line.split("\t")
length = float(length)
if category == "Primary":
primary.append(length)
elif category == "Secondary":
# print(dir())
secondary.append(length)
return primary, secondary
def write_output(filename, count_pri, count_sec):
"""Writes counted values to a file."""
output = open(filename,"w")
output.write("category <100 100-300 >300\n")
output.write("Primary : %5i %5i %5i\n" % count_pri)
output.write("Secondary: %5i %5i %5i\n" % count_sec)
output.close()
primary, secondary = read_data('neuron_data.txt')
count_pri = evaluate_data(primary)
count_sec = evaluate_data(secondary)
write_output('results.txt' , count_pri,count_sec)
| {
"content_hash": "c2200dbc054a71a490d16f8e86dbfbd3",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 61,
"avg_line_length": 30.350877192982455,
"alnum_prop": 0.5630057803468208,
"repo_name": "raymonwu/Managing_Your_Biological_Data_with_Python_3",
"id": "19964a4d8d9da39ef3d2937bd95f9c20a91b25cd",
"size": "1730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "12-debugging/12.3.3.1_program_with_bugs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "96353"
},
{
"name": "Jupyter Notebook",
"bytes": "3309089"
},
{
"name": "Python",
"bytes": "103196"
}
],
"symlink_target": ""
} |
"""This module offers methods and user interaction widgets/windows for handling
the table-like step functions of elementary cellular automatons.
* A bit of functionality to generate a rule number from arbitrary step
functions by running them on a pre-generated target and finding out how it
behaved.
* ...
.. testsetup ::
from zasim import elementarytools
"""
# This file is part of zasim. zasim is licensed under the BSD 3-clause license.
# See LICENSE.txt for details.
from __future__ import absolute_import
from .cagen.utils import elementary_digits_and_values, rule_nr_to_rule_arr
neighbourhood_actions = {}
def neighbourhood_action(name):
def appender(fun):
neighbourhood_actions[name] = fun
return fun
return appender
def digits_and_values_to_rule_nr(digits_and_values, base=2):
if isinstance(digits_and_values[0], dict):
digits_and_values = [val["result_value"] for val in digits_and_values]
num = 0
for digit, value in enumerate(digits_and_values):
num += value * (base ** digit)
return num
_minimize_cache = {}
def minimize_rule_number(neighbourhood, number, base=2):
digits = base ** len(neighbourhood.offsets)
rule_arr = rule_nr_to_rule_arr(number, digits, base)
cache = {number: ([], rule_arr)}
tries = [([name], rule_arr) for name in neighbourhood_actions]
for route, data in tries:
new = neighbourhood_actions[route[-1]](neighbourhood, data)
rule_nr = digits_and_values_to_rule_nr(new)
if rule_nr in cache:
oldroute, olddata = cache[rule_nr]
if len(oldroute) > len(route):
cache[rule_nr] = (route, new)
tries.extend([(route + [name], new) for name in neighbourhood_actions])
else:
cache[rule_nr] = (route, new)
tries.extend([(route + [name], new) for name in neighbourhood_actions])
lowest_number = min(cache.keys())
return lowest_number, cache[lowest_number], cache
def minimize_rule_values(neighbourhood, digits_and_values):
number = digits_and_values_to_rule_nr(digits_and_values)
return minimize_rule_number(neighbourhood, number)
@neighbourhood_action("flip all bits")
def flip_all(neighbourhood, results, base=2):
if isinstance(results[0], dict):
nres = [val.copy() for val in results]
for val in nres:
val["result_value"] = base - 1 - val["result_value"]
return nres
else:
return [base - 1 - res for res in results]
def permutation_to_index_map(neighbourhood, permutation, base=2):
"""Figure out from the given neighbourhood and the permutation what
position in the old array each entry in the new array is supposed to
come from to realize the permutations.
:attr neighbourhood: The neighbourhood object to use.
:attr permutations: A dictionary that says what cell to take the value from
for any given cell."""
resultless_dav = elementary_digits_and_values(neighbourhood, base)
index_map = range(len(resultless_dav))
for index, dav in enumerate(resultless_dav):
ndav = dict((k, dav[permutation[k]]) for k in neighbourhood.names)
other_index = resultless_dav.index(ndav)
index_map[index] = other_index
return index_map
def apply_index_map_values(digits_and_values, index_map):
new = [value.copy() for value in digits_and_values]
for i, _ in enumerate(digits_and_values):
new[index_map[i]]["result_value"] = digits_and_values[i]["result_value"]
return new
def apply_index_map(results, index_map):
if isinstance(results[0], dict):
return apply_index_map_values(results, index_map)
return [results[index_map[i]] for i in range(len(results))]
def flip_offset_to_permutation(neighbourhood, permute_func):
"""Apply the permute_func, which takes in the offset and returns a new
offset to the neighbourhood offsets and return a permutation dictionary
that maps each name of a cell to the name of the cell its data is supposed
to come from."""
offs_to_name = dict(zip(neighbourhood.offsets, neighbourhood.names))
permutation = dict(zip(neighbourhood.names, neighbourhood.names))
for offset, old_name in offs_to_name.iteritems():
new_offset = permute_func(offset)
new_name = offs_to_name[new_offset]
permutation[old_name] = new_name
return permutation
def mirror_by_axis(neighbourhood, axis=[0], base=2):
def mirror_axis_permutation(position, axis=tuple(axis)):
return tuple(-a if num in axis else a for num, a in enumerate(position))
permutation = flip_offset_to_permutation(neighbourhood, mirror_axis_permutation)
return permutation_to_index_map(neighbourhood, permutation, base)
@neighbourhood_action("flip vertically")
def flip_v(neighbourhood, results, cache={}, base=2):
if neighbourhood not in cache:
cache[neighbourhood] = mirror_by_axis(neighbourhood, [1], base)
return apply_index_map(results, cache[neighbourhood])
@neighbourhood_action("flip horizontally")
def flip_h(neighbourhood, results, cache={}, base=2):
if neighbourhood not in cache:
cache[neighbourhood] = mirror_by_axis(neighbourhood, [0], base)
return apply_index_map(results, cache[neighbourhood])
@neighbourhood_action("rotate clockwise")
def rotate_clockwise(neighbourhood, results, cache={}, base=2):
if neighbourhood not in cache:
def rotate((a, b)):
return b, -a
permutation = flip_offset_to_permutation(neighbourhood, rotate)
cache[neighbourhood] = permutation_to_index_map(neighbourhood, permutation, base)
return apply_index_map(results, cache[neighbourhood])
| {
"content_hash": "2a79f41f9678461a1868cf5c6b3041b9",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 89,
"avg_line_length": 38.93877551020408,
"alnum_prop": 0.6865828092243187,
"repo_name": "timo/zasim",
"id": "f0743713349db2dc282c936c9167a66546cda8c1",
"size": "5724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zasim/elementarytools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "423165"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
} |
import os
import sys
import random
from os import path
from termcolor import colored
from people import Staff, Fellow
from rooms import Office, LivingSpace
from database.schema import People, DataBaseConnection, Rooms, Unallocated
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
white_line = colored('-' * 60, 'white')
class Dojo(object):
def __init__(self):
self.offices = []
self.livingrooms = []
self.staff = []
self.fellows = []
self.all_rooms = []
self.office_unallocated = []
self.living_unallocated = []
self.allocated = []
self.all_people = self.fellows + self.staff
def get_room(self, rooms):
"""A function to generate a list of random rooms with space.
:param rooms:
:return: room_name
"""
# a room is only available if it's capacity is not exceeded
available_rooms = [room for room in rooms if len(room.occupants)
< room.room_capacity]
# return False if all rooms are full
if available_rooms:
chosen_room = random.choice(available_rooms)
return chosen_room.room_name
return False
# choose a room from the list of available rooms.
def create_room(self, room_name, room_type):
"""Creates a room in the system, either office or living space.
:param room_name: A string representing a room's name.
:param room_type: A string representing a room's type
(Office or Living space)
"""
if room_type is 'office':
if room_name not in [room.room_name for room in self.offices]:
room = Office(room_name=room_name, room_type=room_type)
self.offices.append(room)
self.all_rooms.append(room)
print(white_line)
print(colored('An office called' + ' ' + room_name + ' ' +
'has been successfully created!', 'cyan'))
else:
print(white_line)
print(colored(
'An office with that name already exists!', 'red'))
if room_type is 'livingspace':
if room_name not in [room.room_name for room in self.livingrooms]:
room = LivingSpace(room_name=room_name, room_type=room_type)
# add object to list( has both room_name and room_type)
self.livingrooms.append(room)
self.all_rooms.append(room)
print(white_line)
print(colored('An room called' + ' ' + room_name + ' ' +
'has been successfully created!', 'cyan'))
else:
print(white_line)
print(colored(
'A living room with that name already exists!', 'red'))
def add_person(self, first_name, last_name,
occupation, wants_accommodation=None):
"""Adds person to the system and allocates them office space and
room space if they are a fellow and requested for accommodation.
:param first_name: A string representing the person's first name.
:param last_name: A string representing the person's second name.
:param occupation: A string representing
the persons's type (Fellow/Staff)
:param wants_accommodation: An optional string representing a
fellow's accommodation
"""
self.first_name = first_name
self.last_name = last_name
self.occupation = occupation
self.wants_accommodation = wants_accommodation
self.person_name = self.first_name + self.last_name
if occupation == 'Fellow':
if self.person_name not in [person.first_name + person.last_name
for person in self.fellows]:
person = Fellow(
first_name, last_name, occupation, wants_accommodation)
self.fellows.append(person)
print(white_line)
print(colored(first_name + ' ' + last_name +
' has been added successfully!', 'cyan'))
# check if fellow wants accommodation, set to 'N' by default
accommodation = person.wants_accommodation
if accommodation is None or accommodation != 'Y':
# if a fellow wants no accommodation grant just office
work_room = self.get_room(self.offices)
# if there is no available office space
if work_room:
for room in self.offices:
if room.room_name == work_room:
room.occupants.append(person)
print(white_line)
print(colored('A ' + person.occupation + ' '
+ person.first_name +
' has been added to '
+ work_room, 'cyan'))
else:
# Add person unallocated if no office space.
self.office_unallocated.append(person)
print(white_line)
print(colored('Office space unavailable, '
'added to office waiting list', 'red'))
else:
# Add person unallocated if no office space.
work_room = self.get_room(self.offices)
living_room = self.get_room(self.livingrooms)
# if there is no available office space
if work_room:
for room in self.offices:
if room.room_name == work_room:
room.occupants.append(person)
print(white_line)
print(colored('A ' + person.occupation +
' ' + person.first_name +
' has been added to '
+ work_room, 'cyan'))
else:
# Add person unallocated if no office space.
self.office_unallocated.append(person)
print(white_line)
print(colored('Office space unavailable, '
'added to office waiting list', 'red'))
if living_room:
for room in self.livingrooms:
if room.room_name == living_room:
room.occupants.append(person)
print(white_line)
print(colored('A ' + person.occupation +
' ' + person.first_name +
' has been added to '
+ living_room, 'cyan'))
else:
# Add person unallocated if no office space.
self.living_unallocated.append(person)
print(white_line)
print(colored('Living space unavailable, '
'added to accommodation waiting list',
'red'))
else:
print(white_line)
print(colored('A fellow with that name already exists',
'red'))
if occupation == 'Staff':
if self.person_name not in [person.first_name + person.last_name
for person in self.staff]:
person = Staff(
first_name, last_name, occupation, wants_accommodation)
self.staff.append(person)
print(white_line)
print(colored(first_name + ' ' + last_name +
' has been added successfully!', 'cyan'))
accommodation = person.wants_accommodation
if accommodation is None or accommodation != 'Y':
work_room = self.get_room(self.offices)
# if there is no available office space
if work_room:
for room in self.offices:
if room.room_name == work_room:
room.occupants.append(person)
print(white_line)
print(colored('A ' + person.occupation + ' '
+ person.first_name +
' has been added to '
+ work_room, 'cyan'))
else:
# Add person unallocated if no office space.
self.office_unallocated.append(person)
print(white_line)
print(colored('Office space unavailable, '
'added to office waiting list', 'red'))
else:
print(colored('Staff cannot get accommodation!', 'red'))
# Add person unallocated if no office space.
work_room = self.get_room(self.offices)
# if there is no available office space
if work_room:
for room in self.offices:
if room.room_name == work_room:
room.occupants.append(person)
print(white_line)
print(colored('A ' + person.occupation +
' ' + person.first_name +
' has been added to '
+ work_room, 'cyan'))
return'Staff cannot get accommodation'
else:
# Add person unallocated if no office space.
self.office_unallocated.append(person)
print(white_line)
print(colored('Office space unavailable, '
'added to office waiting list', 'red'))
else:
print(white_line)
print(colored('A member of staff with that name already exists',
'red'))
def print_room(self, room_name):
"""Gets a room name as an argument and returns a status of the room's
existence and occupants if room exists
:param room_name: A string representing the name of the room.
"""
# check if the requested room is available in created rooms.
data = ""
if room_name not in [room.room_name for room in self.all_rooms]:
print(white_line)
print(colored('The room you entered is not in the system!',
'red'))
return 'The room you entered is not in the system!'
for room in self.all_rooms:
if room.room_name == room_name:
# data += (
# "{0} - {1}\n".format(room.room_name, room.room_type))
# data += white_line + '\n'
print(room.room_name + '(' + room.room_type.title() + ')')
print(white_line)
print('Employee id' + ' ' + 'Employee Name')
print(white_line)
# check if room has occupants
if room.occupants:
for person in room.occupants:
print(person.id + ' ' +
person.first_name + ' ' + person.last_name)
data += person.first_name + ' ' + person.last_name + '\n'
else:
print(colored('Room has currently no occupants!', 'red'))
data += 'Room has currently no occupants!.'
return data
def print_allocation(self, filename):
"""Gets all the people in the dojo facility who have been awarded room
and office allocations.
"""
if self.all_rooms:
# writing to file
write_to_file = ''
for room in self.all_rooms:
if room.occupants:
print(colored(room.room_name +
'(' + room.room_type.title() + ')', 'cyan'))
write_to_file += room.room_name + '\n'
print(white_line)
print('Employee id' + ' ' + 'Employee Name')
print(white_line)
for person in room.occupants:
person_name = person.first_name + ' ' + person.last_name
write_to_file += person_name + '\n'
print(person.id + ' ' + person.first_name +
' ' + person.last_name)
# check if user has opted to print list
if filename:
file_name = filename + ".txt"
file_output = open(file_name, 'w')
file_output.write(write_to_file)
file_output.close()
return
else:
print(colored(room.room_name + '('
+ room.room_type.title() + ')', 'cyan'))
print(colored('This room has no occupants', 'red'))
else:
print(colored('There are no allocations in system', 'red'))
def print_unallocated(self, filename):
# collect all file info as a string
write_to_file = ''
if self.office_unallocated:
print(white_line)
print(colored('OFFICES WAITING LIST', 'cyan'))
print(white_line)
print('Employee id' + ' ' + 'Employee Name')
print(white_line)
for person in self.office_unallocated:
if person:
person_name = person.first_name + ' ' + person.last_name
write_to_file += person_name + '\n'
print(str(person.id) + ' ' +
person.first_name + ' ' +
person.last_name)
# check if user has opted to print list
if filename:
file_name = filename + ".txt"
file_output = open(file_name, 'w')
file_output.write(write_to_file)
file_output.close()
if self.living_unallocated:
print(white_line)
print(colored('LIVING ROOMS WAITING LIST', 'cyan'))
print(white_line)
print('Employee id' + ' ' + 'Employee Name')
print(white_line)
for person in self.living_unallocated:
person_name = person.first_name + ' ' + person.last_name
write_to_file += person_name + '\n'
print(str(person.id) + ' ' + person.first_name +
' ' + person.last_name)
# check if user has opted to print list
if filename:
file_name = filename + ".txt"
file_output = open(file_name, 'w')
file_output.write(write_to_file)
file_output.close()
else:
print(white_line)
print(colored('Currently no pending allocations!', 'cyan'))
def get_current_room(self, person_id, room_type):
for room in self.all_rooms:
if room.room_type == room_type:
for occupant in room.occupants:
if occupant.id == person_id:
return room
return 'Person does not have a room of type {}'.format(room_type)
def unallocate_person(self, person_id, intended_room_type):
"""Removes a person from the room they are currently assigned to.
:param intended_room_type:
:param person_id: A string representing the person's id.
:return: person: The person to be reallocated.
"""
for room in self.all_rooms:
if room.room_type == intended_room_type:
for occupant in room.occupants:
if occupant.id == person_id:
person = occupant
room.occupants.remove(occupant)
return person
def get_room_type(self, room_name):
"""Gets the room_type of the room to which reallocation is intended
:param room_name: The name of the room to reallocate to.
:return: room_type: The name type of the room to reallocate
"""
for room in self.all_rooms:
if room_name == room.room_name:
if room.room_type == 'office':
return room.room_type, self.office_unallocated
else:
return room.room_type, self.living_unallocated
@staticmethod
def check_current_room_object(current_room):
"""Catches the error in current room to prevent passing of a
string to function call in reallocate_person
:param current_room: A string representing the room
currently occupied by a person
:return: boolean: This depends on whether current room
is string or object.
"""
try:
if current_room.__dict__:
return True
except AttributeError:
return False
def reallocate_person(self, person_id, room_name):
"""Reallocates a person to a new room.
:param person_id: A string representing a person's id.
:param room_name: A string representing the name of the room to which
reallocation is intended.
"""
self.all_people = self.staff + self.fellows
if room_name in [room.room_name for room in self.all_rooms]:
for person in self.all_people:
if person_id == person.id:
intended_room_type, unallocated =\
self.get_room_type(room_name)
current_room = self.get_current_room(person_id,
intended_room_type)
if person not in unallocated:
for room in self.all_rooms:
if room_name == room.room_name:
if room.room_type ==\
intended_room_type \
and len(room.occupants) \
< room.room_capacity:
if self.check_current_room_object(current_room):
if room_name != current_room.room_name:
person = self.unallocate_person(
person_id, intended_room_type)
room.occupants.append(person)
print(white_line)
return colored(
'reallocation successful!,'
'new room: ' +
room_name, 'cyan')
else:
return colored(
'Person already occupies that'
' room!', 'red')
else:
return colored(
'Reallocation for similar '
'room_types only!', 'red')
return colored('That room is fully occupied', 'red')
else:
return colored('Only persons with rooms can be '
'reallocated!', 'red')
return colored(
'There is no person in the system with such an id!', 'red')
else:
return colored('The room specified does not exist!', 'red')
def load_people(self, file_name):
"""Loads people from a text file
:param file_name: A string representing the name of the file
from which the loading should take place
"""
try:
with open(file_name, 'r') as list_file:
people = list_file.readlines()
for person in people:
attributes = person.split()
if attributes:
first_name = attributes[0].title()
last_name = attributes[1].title()
occupation = attributes[2].title()
if len(attributes) == 4:
wants_accommodation = attributes[3]
self.add_person(first_name, last_name, occupation,
wants_accommodation)
else:
self.add_person(first_name, last_name, occupation)
except IOError:
print(colored('There exists no file with such a name!'))
def save_state(self, db_name=None):
"""Persists all the data stored in the app to a SQLite database.
:param db_name: The name of the database to create.
"""
if path.exists('default_amity_db.db'):
os.remove('default_amity_db.db')
if path.exists(str(db_name) + '.db'):
os.remove(str(db_name) + '.db')
if db_name is None:
connection = DataBaseConnection()
else:
connection = DataBaseConnection(db_name)
session = connection.Session()
self.all_people = self.staff + self.fellows
if self.all_people:
print(colored('saving people to database.....', 'yellow'))
for person in self.all_people:
employee = People(
person.id, person.first_name, person.last_name,
person.occupation, person.wants_accommodation)
session.add(employee)
session.commit()
else:
print(colored('There are currently no people at the dojo!',
'red'))
if self.all_rooms:
print(colored('saving rooms to database....', 'yellow'))
for room in self.all_rooms:
room_occupants = ",".join([str(person.id) for person
in room.occupants])
space = Rooms(room.room_name, room.room_type,
room.room_capacity, room_occupants)
session.add(space)
session.commit()
else:
print(colored('There currently no rooms in the dojo!', 'red'))
unallocated = self.office_unallocated + self.living_unallocated
if unallocated:
print(colored('saving unallocated to database....', 'yellow'))
for person in self.office_unallocated:
room_unallocated = 'Office'
employee = Unallocated(person.id, person.first_name,
person.last_name, person.occupation,
room_unallocated)
session.add(employee)
session.commit()
for person in self.living_unallocated:
room_unallocated = 'Living space'
employee = Unallocated(person.id, person.first_name,
person.last_name, person.occupation,
room_unallocated)
session.add(employee)
session.commit()
else:
print(colored('Currently there are no pending allocations!',
'cyan'))
print('Data persisted to {} database successfully!'.
format(connection.db_name))
def load_state(self, db_name):
""" Loads data from a database into the application.
:param db_name: The name of the database from which to load the data.
"""
# check if file is in root directory.
# path_to_db = os.path.join('../', db_name + '.db')
base_path = os.path.dirname(__file__)
file_path = os.path.abspath(os.path.join(base_path, '..', db_name + '.db'))
if not os.path.isfile(file_path):
print(colored("Database does not exist.", 'red'))
else:
connection = DataBaseConnection(db_name)
session = connection.Session()
saved_people = session.query(People).all()
saved_rooms = session.query(Rooms).all()
saved_unallocated = session.query(Unallocated).all()
if saved_people:
for person in saved_people:
data = {'id': person.person_id,
'first_name': person.first_name,
'last_name': person.last_name,
'occupation': person.occupation,
'wants_accommodation': person.wants_accommodation}
if person.occupation == 'Staff':
person = Staff(**data)
self.staff.append(person)
if person.occupation == 'Fellow':
person = Fellow(**data)
self.fellows.append(person)
else:
print(colored('No saved people in the database', 'red'))
if saved_rooms:
self.all_people = self.staff + self.fellows
for room in saved_rooms:
if room.room_type == 'office':
if room.room_name \
not in [room.room_name for room in self.offices]:
space = Office(room.room_name, room.room_type)
occupants = [person_id for person_id in
room.room_occupants.split(",")
if person_id]
self.all_rooms.append(space)
if occupants:
for occupant in occupants:
person = self.get_person_object(
occupant, self.all_people)
space.occupants.append(person)
if room.room_type == 'livingspace':
if room.room_name \
not in [room.room_name for room in self.offices]:
space = LivingSpace(room.room_name, room.room_type)
occupants = [person_id for person_id in
room.room_occupants.split(",") if person_id]
self.all_rooms.append(space)
if occupants:
for occupant in occupants:
person = self.get_person_object(occupant, self.all_people)
space.occupants.append(person)
print(colored('Rooms successfully loaded.', 'cyan'))
else:
print(colored('No saved rooms in the database.', 'red'))
if saved_unallocated:
for person in saved_unallocated:
if person.room_unallocated == 'Office' and\
person not in self.office_unallocated:
self.all_people = self.staff + self.fellows
person_object = self.get_person_object(
person.person_id, self.all_people)
self.office_unallocated.append(person_object)
if person.room_unallocated == 'Living space' and\
person not in self.living_unallocated:
person_object = self.get_person_object(
person.person_id, self.fellows)
self.living_unallocated.append(person_object)
print(colored('Unallocated people successfully loaded.', 'cyan'))
else:
print(colored(
'No saved unallocated people in the database.', 'red'))
@staticmethod
def get_person_object(person_id, person_location):
"""Gets the person object for the person to be loaded from database
:param person_id: the id of the person to be loaded
:param person_location: The list of people where the person exists.
:return: person: The person to be loaded.
"""
for person in person_location:
if person.id == person_id:
return person
def pre_load_state(self, db_name):
"""Cautions the user against loading state with current data in system"""
self.all_people = self.fellows + self.staff
self.all_rooms = self.livingrooms + self.offices
if not (self.all_rooms or self.all_people):
self.load_state(db_name)
return'load successful'
else:
print(colored('This action will wipe all existing data', 'red'))
res = raw_input('Would you like to continue[y/n]?: ')
if res.lower() == 'y':
self.offices = []
self.livingrooms = []
self.staff = []
self.fellows = []
self.all_rooms = []
self.office_unallocated = []
self.living_unallocated = []
self.allocated = []
self.all_people = self.fellows + self.staff
self.load_state(db_name)
else:
print(colored('load state exited with no changes', 'cyan'))
return 'load state exited with no changes'
| {
"content_hash": "b816f6710d252bd37b4284fa37a21924",
"timestamp": "",
"source": "github",
"line_count": 627,
"max_line_length": 94,
"avg_line_length": 48.75438596491228,
"alnum_prop": 0.47374791455395987,
"repo_name": "Alweezy/alvin-mutisya-dojo-project",
"id": "9f2cf5a4c99bdea4ec561cf64ca5beeaf60e3543",
"size": "30569",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "models/dojo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53777"
}
],
"symlink_target": ""
} |
def add_native_methods(clazz):
def sin__double__(a0):
raise NotImplementedError()
def cos__double__(a0):
raise NotImplementedError()
def tan__double__(a0):
raise NotImplementedError()
def asin__double__(a0):
raise NotImplementedError()
def acos__double__(a0):
raise NotImplementedError()
def atan__double__(a0):
raise NotImplementedError()
def exp__double__(a0):
raise NotImplementedError()
def log__double__(a0):
raise NotImplementedError()
def log10__double__(a0):
raise NotImplementedError()
def sqrt__double__(a0):
raise NotImplementedError()
def cbrt__double__(a0):
raise NotImplementedError()
def IEEEremainder__double__double__(a0, a1):
raise NotImplementedError()
def atan2__double__double__(a0, a1):
raise NotImplementedError()
def pow__double__double__(a0, a1):
raise NotImplementedError()
def sinh__double__(a0):
raise NotImplementedError()
def cosh__double__(a0):
raise NotImplementedError()
def tanh__double__(a0):
raise NotImplementedError()
def hypot__double__double__(a0, a1):
raise NotImplementedError()
def expm1__double__(a0):
raise NotImplementedError()
def log1p__double__(a0):
raise NotImplementedError()
clazz.sin__double__ = staticmethod(sin__double__)
clazz.cos__double__ = staticmethod(cos__double__)
clazz.tan__double__ = staticmethod(tan__double__)
clazz.asin__double__ = staticmethod(asin__double__)
clazz.acos__double__ = staticmethod(acos__double__)
clazz.atan__double__ = staticmethod(atan__double__)
clazz.exp__double__ = staticmethod(exp__double__)
clazz.log__double__ = staticmethod(log__double__)
clazz.log10__double__ = staticmethod(log10__double__)
clazz.sqrt__double__ = staticmethod(sqrt__double__)
clazz.cbrt__double__ = staticmethod(cbrt__double__)
clazz.IEEEremainder__double__double__ = staticmethod(IEEEremainder__double__double__)
clazz.atan2__double__double__ = staticmethod(atan2__double__double__)
clazz.pow__double__double__ = staticmethod(pow__double__double__)
clazz.sinh__double__ = staticmethod(sinh__double__)
clazz.cosh__double__ = staticmethod(cosh__double__)
clazz.tanh__double__ = staticmethod(tanh__double__)
clazz.hypot__double__double__ = staticmethod(hypot__double__double__)
clazz.expm1__double__ = staticmethod(expm1__double__)
clazz.log1p__double__ = staticmethod(log1p__double__)
| {
"content_hash": "8e354451b559a0aaebfdb49dd928fc9f",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 89,
"avg_line_length": 31.5609756097561,
"alnum_prop": 0.6367851622874807,
"repo_name": "laffra/pava",
"id": "d7073997d1871a68e38d0ee04f2a37d907505e2b",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pava/implementation/natives/java/lang/StrictMath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "144"
},
{
"name": "Python",
"bytes": "369288"
}
],
"symlink_target": ""
} |
"""
Convolution layer tests
"""
import itertools as itt
import numpy as np
from neon import NervanaObject
from neon.layers.layer import Convolution
from neon.initializers.initializer import Uniform
from tests.utils import allclose_with_out
def pytest_generate_tests(metafunc):
np.random.seed(1)
if metafunc.config.option.all:
bsz_rng = [32, 64]
else:
bsz_rng = [128]
if 'zeros_convargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
fs_rng = [2, 3, 5, 7]
nofm_rng = [16, 32]
else:
fs_rng = [2, 5]
nofm_rng = [16]
fargs = itt.product(fs_rng, nofm_rng, bsz_rng)
metafunc.parametrize('zeros_convargs', fargs)
if 'ones_convargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
bsz_rng = [64]
indim_rng = [16, 32]
nifm_rng = [1, 2, 3]
fs_rng = [2, 3]
stride_rng = [1, 2]
nofm_rng = [16, 32, 64]
pad_rng = [0, 1, 2]
fargs1 = itt.product(indim_rng, nifm_rng, fs_rng, nofm_rng,
bsz_rng, stride_rng, pad_rng)
fs_rng = [5]
stride_rng = [1, 5]
fargs2 = itt.product(indim_rng, nifm_rng, fs_rng, nofm_rng,
bsz_rng, stride_rng, pad_rng)
fargs = itt.chain(fargs1, fargs2)
else:
bsz_rng = [64]
indim_rng = [32]
nifm_rng = [3]
fs_rng = [2, 5]
nofm_rng = [16]
stride_rng = [1, 2]
pad_rng = [0, 1]
fargs = itt.product(indim_rng, nifm_rng, fs_rng, nofm_rng,
bsz_rng, stride_rng, pad_rng)
metafunc.parametrize('ones_convargs', fargs)
if 'rand_convargs' in metafunc.fixturenames:
fargs = []
eps = np.finfo(np.float32).eps
if metafunc.config.option.all:
indim_rng = [16, 32]
nifm_rng = [1, 3]
fs_rng = [2, 3]
nofm_rng = [16]
rng_max_rng = [eps, eps * 10, 1.0, 100]
wrng = [[0.0, 1.0], [-1.0, 0.0], [-1.0, 1.0]]
stride_rng = [1, 2, 3]
pad_rng = [0, 1, 2]
fargs1 = itt.product(indim_rng, nifm_rng, fs_rng, nofm_rng, bsz_rng,
stride_rng, rng_max_rng, wrng, pad_rng)
fs_rng = [5]
stride_rng = [1, 5]
fargs2 = itt.product(indim_rng, nifm_rng, fs_rng, nofm_rng, bsz_rng,
stride_rng, rng_max_rng, wrng, pad_rng)
fargs = itt.chain(fargs1, fargs2)
else:
indim_rng = [16]
nifm_rng = [1, 3]
fs_rng = [2, 5]
nofm_rng = [16]
rng_max_rng = [2.0]
stride_rng = [1, 2]
wrng = [[-1.0, 1.0]]
pad_rng = [0, 1]
fargs = itt.product(indim_rng, nifm_rng, fs_rng, nofm_rng, bsz_rng,
stride_rng, rng_max_rng, wrng, pad_rng)
metafunc.parametrize('rand_convargs', fargs)
def test_conv_zeros(backend_default, zeros_convargs):
fshape, nofm, batch_size = zeros_convargs
NervanaObject.be.bsz = batch_size
# basic sanity check with 0 weights random inputs
init_unif = Uniform(low=0.0, high=0.0)
inshape = (3, 32, 32)
insize = np.prod(inshape)
neon_layer = Convolution(fshape=(fshape, fshape, nofm),
strides=1, padding=0, init=init_unif)
inp = neon_layer.be.array(np.random.random((insize, batch_size)))
inp.lshape = inshape
neon_layer.configure(inshape)
neon_layer.prev_layer = True
neon_layer.allocate()
neon_layer.set_deltas([neon_layer.be.iobuf(inshape)])
out = neon_layer.fprop(inp).get()
assert np.min(out) == 0.0 and np.max(out) == 0.0
err = np.zeros(out.shape)
deltas = neon_layer.bprop(neon_layer.be.array(err)).get()
assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0
dw = neon_layer.dW.get()
assert np.min(dw) == 0.0 and np.max(dw) == 0.0
return
def test_conv_ones(backend_default, ones_convargs):
dtypeu = np.float32
indim, nifm, fshape, nofm, batch_size, stride, pad = ones_convargs
NervanaObject.be.bsz = batch_size
# weights set to one
init_unif = Uniform(low=1.0, high=1.0)
inshape = (nifm, indim, indim)
insize = np.prod(inshape)
neon_layer = Convolution(fshape=(fshape, fshape, nofm),
strides=stride, padding=pad, init=init_unif)
inp = neon_layer.be.array(np.ones((insize, batch_size)))
inp.lshape = inshape
neon_layer.configure(inshape)
neon_layer.prev_layer = True
neon_layer.allocate()
neon_layer.set_deltas([neon_layer.be.iobuf(inshape)])
# run fprop
out = neon_layer.fprop(inp).get()
# generate the reference layer
ref_layer = ConvLayerRef(1,
batch_size,
identity,
inshape[0],
inshape[1:3],
(fshape, fshape),
nofm,
stride,
dtypeu,
padding=pad)
# init weights to ones
ref_layer.weights = np.ones(neon_layer.W.shape).T.astype(dtypeu)
ref_layer.fprop(inp.get().T)
out_exp = ref_layer.y.copy()
assert np.allclose(out_exp.T, out, atol=0.0, rtol=0.0)
# generate err array
err = np.ones(out.shape).astype(np.float32)
# run bprop
neon_layer.bprop(neon_layer.be.array(err))
dw = neon_layer.dW.get()
# run bprop
ref_layer.bprop(err.T.astype(dtypeu), 1.0)
# expected output for updates is uniform matrix with
# all elements == ofmsize*batch_size
updates_exp = ref_layer.updates.T
# check dw from neon layer
assert np.allclose(dw, updates_exp, atol=0.0, rtol=0.0)
# the deltas are more complicated since the matricies are not
# uniform, going to use the reference code directly here
# no tolerance here should be exact
dd = np.abs(ref_layer.berror_nopad.T - neon_layer.deltas.get())
assert np.max(dd) == 0.0
return
def test_conv_rand(backend_default, rand_convargs):
indim, nifm, fshape, nofm, batch_size, stride, rng_max, w_rng, pad = rand_convargs
NervanaObject.be.bsz = batch_size
inp_rng = [0.0, rng_max]
dtypeu = np.float32
init_unif = Uniform(low=w_rng[0], high=w_rng[1])
inshape = (nifm, indim, indim)
insize = np.prod(inshape)
# generate neon conv layer
neon_layer = Convolution(fshape=(fshape, fshape, nofm),
strides=stride, padding=pad, init=init_unif)
# generate the reference layer
ref_layer = ConvLayerRef(1,
batch_size,
identity,
inshape[0],
inshape[1:3],
(fshape, fshape),
nofm,
stride,
dtypeu,
padding=pad)
# setup input in range inp_rng
inpa = np.random.random((insize, batch_size))
inpa *= inp_rng[1] - inp_rng[0]
inpa += inp_rng[0]
inpa = inpa.astype(dtypeu)
inp = neon_layer.be.array(inpa)
inp.lshape = inshape
# run fprop on neon
neon_layer.configure(inshape)
neon_layer.prev_layer = True
neon_layer.allocate()
neon_layer.set_deltas([neon_layer.be.iobuf(inshape)])
neon_out = neon_layer.fprop(inp).get()
# pull neon weights into ref layer weights
ref_layer.weights = neon_layer.W.get().T
ref_layer.fprop(inpa.T)
ref_out = np.copy(ref_layer.y)
# estimate the numerical precision by
# permuting order of ops in ref layer
# fprop calculation
ref_layer.fprop(inpa.T, permute=True)
ref_out_perm = ref_layer.y
atol = 4*np.max(np.abs(ref_out - ref_out_perm))
# compare ref and neon layer fprop outputs
# using the empirically determined atol
assert allclose_with_out(ref_out.T, neon_out, atol=atol, rtol=1.e-4)
# generate random deltas array
erra = np.random.random(neon_out.shape)
erra *= (inp_rng[1] - inp_rng[0])
erra += inp_rng[0]
erra = erra.astype(dtypeu)
err = neon_layer.be.array(erra)
# run neon bprop
neon_deltas = neon_layer.bprop(err).get()
neon_dW = neon_layer.dW.get()
# run ref code bprop
ref_layer.bprop(erra.T, 1.0)
ref_deltas = np.copy(ref_layer.berror_nopad.T)
ref_dW = np.copy(ref_layer.updates)
# estimate precision using permutation
# of operation order on ref layer code
ref_layer.bprop(erra.T, 1.0, permute=True)
ref_deltas_perm = ref_layer.berror_nopad.T
ref_dW_perm = ref_layer.updates
atol = 4*np.max(np.abs(ref_deltas - ref_deltas_perm))
assert allclose_with_out(ref_deltas, neon_deltas, atol=atol, rtol=1.e-4)
atol = 4*np.max(np.abs(ref_dW - ref_dW_perm))
assert allclose_with_out(ref_dW.T, neon_dW, atol=atol, rtol=1.e-4)
return
"""
Conv check code adapted from ref-des
cnn8
"""
def identity(x):
return x
def identity_prime(x):
return np.ones(x.shape)
def get_prime(func):
if func == identity:
return identity_prime
class ConvLayerRef(object):
def __init__(self, pos, mbs, g, nifm, ifmshape_nopad, fshape,
nofm, strides, dtypeu, padding=0):
assert g == identity
self.ifmheight, self.ifmwidth = ifmshape_nopad
self.ifmshape_nopad = ifmshape_nopad
self.padding = padding
self.ifmshape = (self.ifmheight + 2*padding, self.ifmwidth + 2*padding)
self.fshape = fshape
self.stride = strides
self.fheight, self.fwidth = fshape
self.ofmheight = (self.ifmshape[0] - self.fheight) / strides + 1
self.ofmwidth = (self.ifmshape[1] - self.fwidth) / strides + 1
ofmshape = (self.ofmheight, self.ofmwidth)
self.ifmsize = self.ifmshape[0]*self.ifmshape[1]
self.ifmsize_nopad = self.ifmshape_nopad[0]*self.ifmshape_nopad[1]
self.ofmsize = self.ofmheight * self.ofmwidth
self.nout = self.ofmsize * nofm
self.nifm = nifm
self.nofm = nofm
self.fsize = nifm * self.fheight * self.fwidth
self.weights = np.zeros((nofm, self.fsize), dtype=dtypeu)
self.g = g
self.gprime = get_prime(g)
self.z = np.zeros((mbs, self.nout), dtype=dtypeu)
self.y = np.zeros((mbs, self.nout), dtype=dtypeu)
ofmstarts = np.array(range(0, (self.ofmsize * nofm), self.ofmsize))
self.ofmlocs = np.zeros((self.ofmsize, nofm), dtype='i32')
for dst in range(self.ofmsize):
self.ofmlocs[dst, :] = ofmstarts + dst
# Figure out the connections with the previous layer.
# This is a list of lists.
self.links = []
# sfsize = self.fheight * self.fwidth # not used
self.makelinks(nifm, self.ifmsize, self.ifmshape, ofmshape, fshape, strides)
self.updates = np.zeros(self.weights.shape, dtype=dtypeu)
self.updateshards = np.zeros((self.fheight * self.fwidth,
nofm, self.fsize), dtype=dtypeu)
self.updatebuf = np.zeros((nofm, self.fsize), dtype=dtypeu).copy()
self.pos = pos
if self.pos > 0:
self.bpropbuf = np.zeros((mbs, self.fsize), dtype=dtypeu)
self.berror = np.zeros((mbs, self.ifmsize*nifm), dtype=dtypeu)
self.berrorshards = np.zeros((self.fheight * self.fwidth, mbs,
self.ifmsize * nifm), dtype=dtypeu)
def makelinks(self, nifm, ifmsize, ifmshape, ofmshape, fshape, strides):
# Figure out local connections to the previous layer.
# This function works for any number of dimensions.
ndims = len(ifmshape)
dimsizes = np.empty(ndims, dtype='int32')
for dim in range(ndims):
dimsizes[dim] = np.prod(ifmshape[dim:])
links = []
for ofmdim in np.ndindex(ofmshape):
# This variable tracks the top left corner of
# the receptive field.
src = ofmdim[-1]
for dim in range(-1, -ndims, -1):
src += dimsizes[dim] * ofmdim[dim - 1]
src *= strides
indlist = list(range(src, src + fshape[-1]))
for dim in range(-1, -ndims, -1):
indarray = np.array(indlist)
for dimind in range(1, fshape[dim - 1]):
indlist.extend(list(indarray + dimind * dimsizes[dim]))
indarray = np.array(indlist)
for ifm in range(1, nifm):
indlist.extend(list(indarray + ifm * ifmsize))
links.append(indlist)
self.links = np.array(links, dtype='int32')
def fprop(self, inputs_nopad, permute=False):
# add padding
if self.padding == 0:
inputs = inputs_nopad.astype(np.float32).copy()
else:
shp = inputs_nopad.shape
shp = [shp[0], self.nifm]
shp.extend(self.ifmshape_nopad)
in_rs = inputs_nopad.reshape(shp)
pad = self.padding
inputs = np.zeros((shp[0], self.nifm, self.ifmshape[0], self.ifmshape[1]))
inputs[:, :, pad:-pad, pad:-pad] = in_rs
inputs = inputs.reshape((shp[0], -1)).astype(np.float32).copy()
self.inputs = inputs
for dst in range(self.ofmsize):
# Compute the weighted average of the receptive field
# and store the result within the destination feature map.
# Do this for all filters in one shot.
rflinks = self.links[dst]
A = inputs[:, rflinks]
B = self.weights.T
if permute:
inds = np.random.permutation(A.shape[1])
self.y[:, self.ofmlocs[dst]] = np.dot(A[:, inds], B[inds, :])
else:
self.y[:, self.ofmlocs[dst]] = np.dot(A, B)
def bprop_naive(self, error, permute=False):
for dst in range(self.ofmsize):
rflinks = self.links[dst]
A = error[:, self.ofmlocs[dst]]
B = self.weights
if permute:
inds = np.random.permutation(A.shape[1])
np.dot(A[:, inds], B[inds, :], self.bpropbuf)
else:
np.dot(A, B, self.bpropbuf)
self.berror[:, rflinks] += self.bpropbuf
def bprop(self, error, epsilon, permute=False):
inputs = self.inputs
if self.pos > 0:
# Propagate the errors backwards.
self.berror.fill(0.0)
self.bprop_naive(error, permute=permute)
bshp = [self.berror.shape[0], self.nifm, self.ifmshape[0], self.ifmshape[1]]
pad = self.padding
# clip the padding out for neon comparison
if pad > 0:
self.berror_nopad = self.berror.reshape(bshp)[:, :, pad:-pad, pad:-pad]
self.berror_nopad = self.berror_nopad.reshape((bshp[0], -1)).copy()
else:
self.berror_nopad = self.berror.copy()
self.updates.fill(0.0)
for dst in range(self.ofmsize):
# Accumulate weight updates, going over all
# corresponding cells in the output feature maps.
rflinks = self.links[dst]
deltaslice = error[:, self.ofmlocs[dst]]
A = deltaslice.T
B = inputs[:, rflinks]
if permute:
inds = np.random.permutation(A.shape[1])
np.dot(A[:, inds], B[inds, :], out=self.updatebuf)
else:
np.dot(A, B, out=self.updatebuf)
self.updates += self.updatebuf
# Update the weights.
np.multiply(self.updates, epsilon, out=self.updates)
# skip updating weights, just return the dW and deltas
# np.subtract(self.weights, self.updates, out=self.weights)
| {
"content_hash": "e8185e7542629e8482245684809f05a4",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 86,
"avg_line_length": 36.463800904977376,
"alnum_prop": 0.5561829124526897,
"repo_name": "coufon/neon-distributed",
"id": "599b10b5e9132760783cd3bf3cfe3fa16b350940",
"size": "16858",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_conv_layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6534"
},
{
"name": "C++",
"bytes": "67757"
},
{
"name": "CSS",
"bytes": "927996"
},
{
"name": "Cap'n Proto",
"bytes": "448"
},
{
"name": "Cuda",
"bytes": "14937"
},
{
"name": "Makefile",
"bytes": "11145"
},
{
"name": "Python",
"bytes": "1625654"
}
],
"symlink_target": ""
} |
import os
import sys
import getopt
import traceback
import pprint
import time
import subprocess
import shlex
import re
import operator
calls = dict() # dict of dicts: {'name', 'bcet', 'wcet', 'lastcall', 'running'}
list_calls = False
pretty = False
coverage = False
covdata = {}
full_stats = False
filter_func = None
symbol_map = None
current_cycle = 0
start_cycle = None
stack = list() # tuple (function, call time)
wlist = dict() # decimal address => { 'BBname', 'is_begin'}
wstat = dict() # BBname => {'count': how_often_visited , 'laststart': when_visited_last 'running': is_still_in_block, 'bcet', 'wcet','sum','et' }
delayed_addr = None
def register_broken(fun):
"""
Calling this marks the timing of the function unknown, due to some unexpected call/ret
"""
try:
calls[fun]['valid'] = False
except: # keyerror
pass
def get_stack():
"""return current function stack as string"""
global stack
try:
# try to tind find main
i0 = (e for e, t in enumerate(stack) if t[0] == "main").next()
except StopIteration:
i0 = 0
return "=>".join([f[0] for f in stack[i0:]])
def register_call(addr, cycle):
"""
A function was called at given cycle. This is called *after* the call finished.
fun is the address of the first insn after the call, i.e., the callee.
"""
fun = get_symbol(addr)
if fun in calls:
calls[fun]['lastcall'] = cycle
calls[fun]['count'] = calls[fun]['count'] + 1
else:
# newly insert
calls[fun] = {'bcet': sys.maxint, 'wcet': 0, 'et': [], 'lastcall': cycle,
'count': 1, 'valid': True, 'fun': fun, 'total': 0}
global stack
stack.append((fun, cycle))
if list_calls:
print "{} called @{}. stack={}".format(fun, cycle, get_stack())
def register_ret(next_addr, cycle):
"""
A function returned at the given cycle. This is called *after* the return finished.
next_addr is the address of the first insn after the return, i.e., the returnee.
"""
global list_calls, full_stats, stack
fun, callcycle = stack.pop()
if fun not in calls:
print "WARN @" + str(cycle) + ": RET of " + fun + ", but have seen no call"
register_broken(fun)
return
et = cycle - callcycle
if list_calls:
print "{} returns at @{}, time={}. stack={}".format(fun, cycle, et, get_stack())
if et > calls[fun]['wcet']:
calls[fun]['wcet'] = et
if et < calls[fun]['bcet']:
calls[fun]['bcet'] = et
calls[fun]['total'] += et
if full_stats:
calls[fun]['et'].append(et)
calls[fun]['running'] = False
def register_visit(titl, is_begin, cycle):
"""
register a watchpoint that was visited
"""
global wstat
current = wstat[titl]
if is_begin:
# BB starts
if current["is_running"]:
# print "WARN: watchpoint " + titl + " @" + str(cycle) +\
# " starts before it ended. Last visit=" + str(current["last_begin"])
pass # that is a normal case, when no end addr is given
current["is_running"] = True
current["last_begin"] = cycle
current["count"] += 1
else:
# BB ends
if not current["is_running"]:
print "WARN: watchpoint " + titl + " @" + str(cycle) + \
" ends before it started. Last visit=" + str(current["last_begin"])
else:
duration = cycle - current["last_begin"]
current["sum"] += duration
current["is_running"] = False
if current["bcet"] > duration:
current["bcet"] = duration
if current["wcet"] < duration:
current["wcet"] = duration
# finally...update
wstat[titl] = current
pending_return = False
pending_call = False
next_show = 0
last_shown_cycle = 0
def consume_line(line, show_progress=True):
"""
parses a line of simulator's trace file, and keeps track of function calls and times
"""
global wlist, delayed_addr, next_show, last_shown_cycle, coverage, current_cycle, start_cycle
# parse line
# create dict of function names and min/max execution times on the fly
# <elfname> <PC>: <cycle>: <function>(+<offset>)? <asm>
# offset is number of instructions, whereas one instruction is assumed 2Bytes
# (though some are 4 bytes...)
# XXX: function *is not necessarily* the current function! Sometimes another label
# is used to compute offset. Therefore we need a symbol map
parts = line.split(None) # split at any whitespace
if (len(parts) < 5):
return # unknown format OR last line
try:
hexaddr = parts[1].rstrip(":")
decaddr = int(hexaddr, 16)
current_cycle = int(parts[2].strip()[0:-1])
if start_cycle is None:
start_cycle = current_cycle
# fun = parts[3].strip() # that is unreliable. Label/Offset sometimes is based on other func
asm = parts[4].strip()
if len(parts) > 5:
op = parts[5].strip()
# print line
except:
print "Skipping trace line: {}".format(line)
return
now = time.time()
if now > next_show:
cps = current_cycle - last_shown_cycle
last_shown_cycle = current_cycle
print "Cycles: {:,} ({:,} per second), stack={}".format(current_cycle, cps, get_stack())
next_show = now + 1.0
if asm == "CPU-waitstate":
return
# register end of watchpoint
if delayed_addr:
register_visit(delayed_addr, False, current_cycle)
delayed_addr = None
# watchlist
if decaddr in wlist:
if wlist[decaddr]["is_begin"]:
register_visit(wlist[decaddr]["name"], True, current_cycle)
if wlist[decaddr]["is_end"]:
# here we do a trick: we want to include the time of the jump to the next BB...
# so we have to register the end in the NEXT non-wait-cycle
# we could do the following, if the jump shall NOT count:
# register_visit(wlist[decaddr]["name"], False, current_cycle)
delayed_addr = wlist[decaddr]["name"]
global pending_return, pending_call
# this is only reached by no-wait-states instructions
if pending_call:
register_call(decaddr, current_cycle) # time to do the call is attributed to the caller
pending_call = False
elif pending_return:
register_ret(decaddr, current_cycle) # time for the return is attributed to the callee
pending_return = False
pending_return = asm in ("RET", "RETI")
pending_call = asm in ("ICALL", "CALL", "RCALL")
if pending_call:
# we must ignore call to next instruction, since this is actually a trick to find
# the addr of the next instruction in the code, and the stack return address is
# immediately popped
try:
if int(op, 16) == decaddr + 2:
# print "Ignoring (r)call .+0 @{}".format(current_cycle)
pending_call = False
except ValueError:
pass # could be "RCALL Z"
def load_symbols(elf):
"""query nm for symbol addresses"""
assert os.path.exists(elf), "ELF file not found (needed for symbol map)"
# --
global symbol_map
symbol_map = {}
re_sym = re.compile(r"([0-9a-fA-F]+)[\s\t]+(.)[\s\t+](\w+)")
proc = subprocess.Popen(['avr-nm', '-C', elf], stdout=subprocess.PIPE, bufsize=-1)
for line in iter(proc.stdout.readline, ''):
match = re_sym.match(line.rstrip())
if match:
decaddr = int(match.group(1), 16)
typ = match.group(2)
name = match.group(3)
if typ.lower() in ('t', 'u', 'v', 'w'):
if decaddr in symbol_map:
print "WARN: Symbol at {:x} already has a name: {}. Updating to {}.".\
format(decaddr, symbol_map[decaddr], name)
# the latest is better.
symbol_map[decaddr] = name
print "Loaded {} symbols.".format(len(symbol_map))
def get_symbol(addr):
"""return name of symbol at address, or return address if not known"""
global symbol_map
return symbol_map.get(addr, hex(addr))
def total_cycles():
"""total number of seen cycles"""
global current_cycle, start_cycle
return current_cycle - start_cycle
def display_coverage(result):
global symbol_map, pretty
cov = []
for _, func in symbol_map.iteritems():
if func in result:
cycles = result[func]['total']
else:
cycles = 0
perc = (100.* cycles) / total_cycles()
cov.append((func, cycles, perc))
sorted_by_cycles = sorted(cov, key=lambda x: x[1], reverse=True)
print "Coverage by cycles:"
if pretty:
for entry in sorted_by_cycles:
print "{:<35} {:>10,} {:>04.2f}%".format(entry[0], entry[1], entry[2])
else:
print str(sorted_by_cycles)
def do_work(tracefile, sim_args, elf):
"""either run simulation now, or inspect trace post-mortem"""
global wstat, filter_func, pretty, coverage
# --
load_symbols(elf)
if sim_args:
print "Running Simulator live..."
if tracefile: print "Tracefile ignored"
ret = run_simul(sim_args, elf)
else:
print "Parsing trace post-mortem..."
ret = parse_trace(tracefile)
if 0 == ret:
if filter_func:
if filter_func in calls:
if pretty:
pprint.pprint(calls[filter_func])
else:
print calls[filter_func]
else:
print "ERROR: function \"{}\" not found in trace".format(filter_func)
print "ERROR: only these available: {}".format(calls.keys())
return 1
else:
if pretty:
pprint.pprint(calls)
else:
print str(calls)
if len(wlist) > 0: pprint.pprint(wstat)
if coverage: display_coverage(calls)
print "Total cycles: {}".format(total_cycles())
return ret
def run_simul(sim_args, elf):
"""run simulation and simultaneously parse the trace"""
def del_arg(flag):
"""remove given flag and arg, if present"""
for c in xrange(len(cmd_split)):
if cmd_split[c].startswith(flag):
if len(cmd_split[c]) == 2:
del cmd_split[c:c + 2] # flag and value are separate
else:
del cmd_split[c] # flag and value are together
print "Removed cmdline flag ({}) for simulavr".format(flag)
return
cmd = 'simulavr ' + sim_args
cmd_split = shlex.split(cmd)
# override flags that the user may have given
del_arg('-t')
del_arg('-f')
cmd_split.extend(['-t', 'stdout']) # set trace to stdout
cmd_split.extend(['-f', elf]) # set same ELF that we are using
print "Running Simulator: {}".format(' '.join(cmd_split))
process = subprocess.Popen(cmd_split, bufsize=-1, stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None: break
if output: consume_line(output)
# rc = process.poll() #return code
return 0
def parse_trace(tfile):
"""
parse trace post-mortem
"""
global wstat, filter_func
try:
with open(tfile, 'rb') as f:
# read line by line
for line in f:
consume_line(line)
except:
print "File " + tfile + " could not be processed", sys.exc_info()[0]
print(traceback.format_exc())
return 1
return 0
def get_watchpoints(wfile):
"""
Read a file describing watchpoints, and put them into dictionary 'wlist'
"""
global wlist
global wstat
if not wfile:
return
try:
with open(wfile, 'rb') as f:
for line in f:
if line.startswith("#"):
continue
parts = line.split(None) # split at any whitespace
hexaddr_begin = parts[0].strip()
decaddr_begin = int(hexaddr_begin, 16) # hex string representing addr of watchpoint
if len(parts) > 1:
titl = parts[1].strip() # readable name
else:
titl = ''
# add watchpoint for begin of BB
is_single_step = True
if len(parts) > 2:
# add another watchpoint for end of BB, if we have an end address
hexaddr_end = parts[2].strip()
decaddr_end = int(hexaddr_end, 16) # hex string representing addr of watchpoint
if decaddr_end != decaddr_begin:
is_single_step = False
wlist[decaddr_end] = {'name': titl, 'is_begin': False, 'is_end': True}
wlist[decaddr_begin] = {'name': titl, 'is_begin': True, 'is_end': is_single_step}
# prepare wstats; this holds the visiting statistics in the end
wstat[titl] = {"addr": hexaddr_begin, "count": 0, "last_begin": -1,
'bcet': sys.maxint, 'wcet': 0, 'sum': 0, 'is_running': False}
except:
print "File " + wfile + " cound not be fully processed", sys.exc_info()[0]
# return
readable_list = [" " + hex(k) + " = " + v["name"] for k, v in wlist.iteritems()]
print 'Watchpoints (' + str(len(readable_list)) + "):"
print "\n".join(readable_list)
return
def print_usage():
print __file__ + ' [OPTION] -e <elf> -t <trace>'
print ''
print 'OPTIONS:'
print ' -o, --only-function=<name>'
print ' only show result for specific function'
print ' -c, --calls'
print ' show calls'
print ' -w, --watchlist=<file>'
print ' provide statistics for particular addresses'
print ' -f, --fullstats'
print ' keep execution time of every invocation, not just min/max'
print ' -s, --simulate=<args>'
print ' run simulavr with extra arguments and parse simultaneously'
print ' -p, --pretty'
print ' pretty-print the results with indentation'
print ' -g, --coverage'
print ' show number of cycles spend in each function (includes children)'
def main(argv):
global list_calls, full_stats, filter_func, pretty, coverage
sim_args = None
tracefile = "trace"
watchfile = None
elf = None
try:
opts, args = getopt.getopt(argv,
"ht:cw:fo:s:e:pg",
["trace=", "calls", "watchlist=", "fullstats", "only-function=",
"simulate=", "elf=", "pretty", "coverage"])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit()
elif opt in ("-o", "--only-function"):
filter_func = arg
elif opt in ("-f", "--fullstats"):
full_stats = True
elif opt in ("-e", "--elf"):
elf = arg
elif opt in ("-g", "--coverage"):
coverage = True
elif opt in ("-c", "--calls"):
list_calls = True
elif opt in ("-p", "--pretty"):
pretty = True
print "pretty-print on"
elif opt in ("-s", "--simulate=<simulavr-args>"):
sim_args = arg
if (arg.startswith('"') and arg.endswith('"')) or\
(arg.startswith("'") and arg.endswith("'")):
sim_args = arg[1:-1]
else:
sim_args = arg
elif opt in ("-t", "--trace"):
tracefile = arg
elif opt in ("-w", "--watchlist"):
watchfile = arg
# get list of instructions to be watched (when and how often do they execute)
get_watchpoints(watchfile)
t0 = time.time()
ret = do_work(tracefile, sim_args, elf)
t1 = time.time()
print "Total time: {:.1f}s".format(t1 - t0)
exit(ret)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "014d486065b7d9e729a720433c9a8ef0",
"timestamp": "",
"source": "github",
"line_count": 476,
"max_line_length": 147,
"avg_line_length": 34.220588235294116,
"alnum_prop": 0.5555282706120694,
"repo_name": "TRDDC-TUM/wcet-benchmarks",
"id": "0b7474e2fd7246a0055986f0c3037c6c7ff7d3be",
"size": "16597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/simulavr2times.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "9323361"
},
{
"name": "C",
"bytes": "1563980"
},
{
"name": "C++",
"bytes": "26102"
},
{
"name": "Makefile",
"bytes": "170775"
},
{
"name": "Python",
"bytes": "231587"
},
{
"name": "Shell",
"bytes": "115791"
}
],
"symlink_target": ""
} |
import mock
import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \
as ovs_const
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\
openflow.ovs_ofctl import ovs_bridge_test_base
call = mock.call # short hand
class OVSPhysicalBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase,
ovs_bridge_test_base.OVSDVRProcessTestMixin):
dvr_process_table_id = ovs_const.DVR_PROCESS_VLAN
dvr_process_next_table_id = ovs_const.LOCAL_VLAN_TRANSLATION
def setUp(self):
super(OVSPhysicalBridgeTest, self).setUp()
self.setup_bridge_mock('br-phys', self.br_phys_cls)
def test_setup_default_table(self):
self.br.setup_default_table()
expected = [
call.add_flow(priority=0, table=0, actions='normal'),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan(self):
port = 999
lvid = 888
segmentation_id = 777
distributed = False
self.br.provision_local_vlan(port=port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
expected = [
call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port,
actions='mod_vlan_vid:%s,normal' % segmentation_id),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan_novlan(self):
port = 999
lvid = 888
segmentation_id = None
distributed = False
self.br.provision_local_vlan(port=port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
expected = [
call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port,
actions='strip_vlan,normal')
]
self.assertEqual(expected, self.mock.mock_calls)
def test_reclaim_local_vlan(self):
port = 999
lvid = 888
self.br.reclaim_local_vlan(port=port, lvid=lvid)
expected = [
call.delete_flows(dl_vlan=lvid, in_port=port),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_add_dvr_mac_vlan(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.add_dvr_mac_vlan(mac=mac, port=port)
expected = [
call.add_flow(priority=2, table=3, dl_src=mac,
actions='output:%s' % port),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_remove_dvr_mac_vlan(self):
mac = '00:02:b3:13:fe:3d'
self.br.remove_dvr_mac_vlan(mac=mac)
expected = [
call.delete_flows(dl_src=mac, table=3),
]
self.assertEqual(expected, self.mock.mock_calls)
| {
"content_hash": "0e34b44315a20ffa6c11066a81a90b0f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 35.72839506172839,
"alnum_prop": 0.580511402902557,
"repo_name": "noironetworks/neutron",
"id": "317294f5515b5f387b6cb45d3a1332dc5b52171c",
"size": "3617",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11420614"
},
{
"name": "Shell",
"bytes": "38791"
}
],
"symlink_target": ""
} |
"""
__LocalDef1orMoreDefPart2_Complete_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Mon Mar 2 14:57:51 2015
______________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from LHS import *
from MT_pre__LocalDef import *
from MT_pre__directLink_T import *
from MT_pre__Def import *
from graph_MT_pre__Def import *
from graph_MT_pre__directLink_T import *
from graph_MT_pre__LocalDef import *
from graph_LHS import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def LocalDef1orMoreDefPart2_Complete_MDL(self, rootNode, MT_pre__UMLRT2Kiltera_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__UMLRT2Kiltera_MM ---
if( MT_pre__UMLRT2Kiltera_MMRootNode ):
# author
MT_pre__UMLRT2Kiltera_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__UMLRT2Kiltera_MMRootNode.description.setValue('\n')
MT_pre__UMLRT2Kiltera_MMRootNode.description.setHeight(15)
# name
MT_pre__UMLRT2Kiltera_MMRootNode.name.setValue('')
MT_pre__UMLRT2Kiltera_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('LocalDef1orMoreDefPart2_Complete')
# --- ASG attributes over ---
self.obj63227=LHS(self)
self.obj63227.isGraphObjectVisual = True
if(hasattr(self.obj63227, '_setHierarchicalLink')):
self.obj63227._setHierarchicalLink(False)
# constraint
self.obj63227.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj63227.constraint.setHeight(15)
self.obj63227.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(80.0,60.0,self.obj63227)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj63227.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63227)
self.globalAndLocalPostcondition(self.obj63227, rootNode)
self.obj63227.postAction( rootNode.CREATE )
self.obj63228=MT_pre__LocalDef(self)
self.obj63228.isGraphObjectVisual = True
if(hasattr(self.obj63228, '_setHierarchicalLink')):
self.obj63228._setHierarchicalLink(False)
# MT_label__
self.obj63228.MT_label__.setValue('1')
# MT_pivotOut__
self.obj63228.MT_pivotOut__.setValue('')
self.obj63228.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj63228.MT_subtypeMatching__.setValue(('True', 0))
self.obj63228.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj63228.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63228.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj63228.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63228.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj63228.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63228.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj63228.MT_pivotIn__.setValue('element1')
self.obj63228.graphClass_= graph_MT_pre__LocalDef
if self.genGraphics:
new_obj = graph_MT_pre__LocalDef(140.0,100.0,self.obj63228)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__LocalDef", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj63228.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63228)
self.globalAndLocalPostcondition(self.obj63228, rootNode)
self.obj63228.postAction( rootNode.CREATE )
self.obj73750=MT_pre__directLink_T(self)
self.obj73750.isGraphObjectVisual = True
if(hasattr(self.obj73750, '_setHierarchicalLink')):
self.obj73750._setHierarchicalLink(False)
# MT_label__
self.obj73750.MT_label__.setValue('3')
# MT_pivotOut__
self.obj73750.MT_pivotOut__.setValue('')
self.obj73750.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj73750.MT_subtypeMatching__.setValue(('True', 0))
self.obj73750.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj73750.MT_pivotIn__.setValue('')
self.obj73750.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj73750.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj73750.MT_pre__associationType.setHeight(15)
self.obj73750.graphClass_= graph_MT_pre__directLink_T
if self.genGraphics:
new_obj = graph_MT_pre__directLink_T(397.0,281.0,self.obj73750)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj73750.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj73750)
self.globalAndLocalPostcondition(self.obj73750, rootNode)
self.obj73750.postAction( rootNode.CREATE )
self.obj73747=MT_pre__Def(self)
self.obj73747.isGraphObjectVisual = True
if(hasattr(self.obj73747, '_setHierarchicalLink')):
self.obj73747._setHierarchicalLink(False)
# MT_pivotOut__
self.obj73747.MT_pivotOut__.setValue('')
self.obj73747.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj73747.MT_subtypeMatching__.setValue(('True', 1))
self.obj73747.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj73747.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj73747.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj73747.MT_pivotIn__.setValue('')
self.obj73747.MT_pivotIn__.setNone()
# MT_label__
self.obj73747.MT_label__.setValue('2')
# MT_pre__cardinality
self.obj73747.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj73747.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj73747.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj73747.MT_pre__name.setHeight(15)
self.obj73747.graphClass_= graph_MT_pre__Def
if self.genGraphics:
new_obj = graph_MT_pre__Def(220.0,260.0,self.obj73747)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Def", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj73747.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj73747)
self.globalAndLocalPostcondition(self.obj73747, rootNode)
self.obj73747.postAction( rootNode.CREATE )
# Connections for obj63227 (graphObject_: Obj8) of type LHS
self.drawConnections(
)
# Connections for obj63228 (graphObject_: Obj9) of type MT_pre__LocalDef
self.drawConnections(
(self.obj63228,self.obj73750,[357.0, 201.0, 397.0, 281.0],"true", 2) )
# Connections for obj73750 (graphObject_: Obj11) of type MT_pre__directLink_T
self.drawConnections(
(self.obj73750,self.obj73747,[397.0, 281.0, 437.0, 361.0],"true", 2) )
# Connections for obj73747 (graphObject_: Obj10) of type MT_pre__Def
self.drawConnections(
)
newfunction = LocalDef1orMoreDefPart2_Complete_MDL
loadedMMName = ['MT_pre__UMLRT2Kiltera_MM_META', 'MoTifRule_META']
atom3version = '0.3'
| {
"content_hash": "51b3977653f00f383179ae682f556ff8",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 632,
"avg_line_length": 52.51851851851852,
"alnum_prop": 0.6347751136185551,
"repo_name": "levilucio/SyVOLT",
"id": "c9e16150a709f6fc935af11914172d71997266c5",
"size": "12762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/Properties/Multiplicity/models/LocalDef1orMoreDefPart2_Complete_MDL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""
logbook.handlers
~~~~~~~~~~~~~~~~
The handler interface and builtin handlers.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import stat
import errno
import socket
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
import threading
import traceback
from datetime import datetime, timedelta
from threading import Lock
from collections import deque
from logbook.base import CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG, \
NOTSET, level_name_property, _missing, lookup_level, \
Flags, ContextObject, ContextStackManager
from logbook.helpers import rename, b, _is_text_stream, is_unicode, PY2, \
zip, xrange, string_types, integer_types, reraise, u
DEFAULT_FORMAT_STRING = (
u('[{record.time:%Y-%m-%d %H:%M}] ') +
u('{record.level_name}: {record.channel}: {record.message}')
)
SYSLOG_FORMAT_STRING = u('{record.channel}: {record.message}')
NTLOG_FORMAT_STRING = u('''\
Message Level: {record.level_name}
Location: {record.filename}:{record.lineno}
Module: {record.module}
Function: {record.func_name}
Exact Time: {record.time:%Y-%m-%d %H:%M:%S}
Event provided Message:
{record.message}
''')
TEST_FORMAT_STRING = \
u('[{record.level_name}] {record.channel}: {record.message}')
MAIL_FORMAT_STRING = u('''\
Subject: {handler.subject}
Message type: {record.level_name}
Location: {record.filename}:{record.lineno}
Module: {record.module}
Function: {record.func_name}
Time: {record.time:%Y-%m-%d %H:%M:%S}
Message:
{record.message}
''')
MAIL_RELATED_FORMAT_STRING = u('''\
Message type: {record.level_name}
Location: {record.filename}:{record.lineno}
Module: {record.module}
Function: {record.func_name}
{record.message}
''')
SYSLOG_PORT = 514
REGTYPE = type(re.compile("I'm a regular expression!"))
def create_syshandler(application_name, level=NOTSET):
"""Creates the handler the operating system provides. On Unix systems
this creates a :class:`SyslogHandler`, on Windows sytems it will
create a :class:`NTEventLogHandler`.
"""
if os.name == 'nt':
return NTEventLogHandler(application_name, level=level)
return SyslogHandler(application_name, level=level)
class _HandlerType(type):
"""The metaclass of handlers injects a destructor if the class has an
overridden close method. This makes it possible that the default
handler class as well as all subclasses that don't need cleanup to be
collected with less overhead.
"""
def __new__(cls, name, bases, d):
# aha, that thing has a custom close method. We will need a magic
# __del__ for it to be called on cleanup.
if bases != (ContextObject,) and 'close' in d and '__del__' not in d \
and not any(hasattr(x, '__del__') for x in bases):
def _magic_del(self):
try:
self.close()
except Exception:
# del is also invoked when init fails, so we better just
# ignore any exception that might be raised here
pass
d['__del__'] = _magic_del
return type.__new__(cls, name, bases, d)
class Handler(ContextObject):
"""Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
To bind a handler you can use the :meth:`push_application` and
:meth:`push_thread` methods. This will push the handler on a stack of
handlers. To undo this, use the :meth:`pop_application` and
:meth:`pop_thread` methods::
handler = MyHandler()
handler.push_application()
# all here goes to that handler
handler.pop_application()
By default messages sent to that handler will not go to a handler on
an outer level on the stack, if handled. This can be changed by
setting bubbling to `True`. This setup for example would not have
any effect::
handler = NullHandler(bubble=True)
handler.push_application()
Whereas this setup disables all logging for the application::
handler = NullHandler()
handler.push_application()
There are also context managers to setup the handler for the duration
of a `with`-block::
with handler.applicationbound():
...
with handler.threadbound():
...
Because `threadbound` is a common operation, it is aliased to a with
on the handler itself::
with handler:
...
"""
__metaclass__ = _HandlerType
stack_manager = ContextStackManager()
#: a flag for this handler that can be set to `True` for handlers that
#: are consuming log records but are not actually displaying it. This
#: flag is set for the :class:`NullHandler` for instance.
blackhole = False
def __init__(self, level=NOTSET, filter=None, bubble=False):
#: the level for the handler. Defaults to `NOTSET` which
#: consumes all entries.
self.level = lookup_level(level)
#: the formatter to be used on records. This is a function
#: that is passed a log record as first argument and the
#: handler as second and returns something formatted
#: (usually a unicode string)
self.formatter = None
#: the filter to be used with this handler
self.filter = filter
#: the bubble flag of this handler
self.bubble = bubble
level_name = level_name_property()
def format(self, record):
"""Formats a record with the given formatter. If no formatter
is set, the record message is returned. Generally speaking the
return value is most likely a unicode string, but nothing in
the handler interface requires a formatter to return a unicode
string.
The combination of a handler and formatter might have the
formatter return an XML element tree for example.
"""
if self.formatter is None:
return record.message
return self.formatter(record, self)
def should_handle(self, record):
"""Returns `True` if this handler wants to handle the record. The
default implementation checks the level.
"""
return record.level >= self.level
def handle(self, record):
"""Emits the record and falls back. It tries to :meth:`emit` the
record and if that fails, it will call into :meth:`handle_error` with
the record and traceback. This function itself will always emit
when called, even if the logger level is higher than the record's
level.
If this method returns `False` it signals to the calling function that
no recording took place in which case it will automatically bubble.
This should not be used to signal error situations. The default
implementation always returns `True`.
"""
try:
self.emit(record)
except Exception:
self.handle_error(record, sys.exc_info())
return True
def emit(self, record):
"""Emit the specified logging record. This should take the
record and deliver it to whereever the handler sends formatted
log records.
"""
def emit_batch(self, records, reason):
"""Some handlers may internally queue up records and want to forward
them at once to another handler. For example the
:class:`~logbook.FingersCrossedHandler` internally buffers
records until a level threshold is reached in which case the buffer
is sent to this method and not :meth:`emit` for each record.
The default behaviour is to call :meth:`emit` for each record in
the buffer, but handlers can use this to optimize log handling. For
instance the mail handler will try to batch up items into one mail
and not to emit mails for each record in the buffer.
Note that unlike :meth:`emit` there is no wrapper method like
:meth:`handle` that does error handling. The reason is that this
is intended to be used by other handlers which are already protected
against internal breakage.
`reason` is a string that specifies the rason why :meth:`emit_batch`
was called, and not :meth:`emit`. The following are valid values:
``'buffer'``
Records were buffered for performance reasons or because the
records were sent to another process and buffering was the only
possible way. For most handlers this should be equivalent to
calling :meth:`emit` for each record.
``'escalation'``
Escalation means that records were buffered in case the threshold
was exceeded. In this case, the last record in the iterable is the
record that triggered the call.
``'group'``
All the records in the iterable belong to the same logical
component and happened in the same process. For example there was
a long running computation and the handler is invoked with a bunch
of records that happened there. This is similar to the escalation
reason, just that the first one is the significant one, not the
last.
If a subclass overrides this and does not want to handle a specific
reason it must call into the superclass because more reasons might
appear in future releases.
Example implementation::
def emit_batch(self, records, reason):
if reason not in ('escalation', 'group'):
Handler.emit_batch(self, records, reason)
...
"""
for record in records:
self.emit(record)
def close(self):
"""Tidy up any resources used by the handler. This is automatically
called by the destructor of the class as well, but explicit calls are
encouraged. Make sure that multiple calls to close are possible.
"""
def handle_error(self, record, exc_info):
"""Handle errors which occur during an emit() call. The behaviour of
this function depends on the current `errors` setting.
Check :class:`Flags` for more information.
"""
try:
behaviour = Flags.get_flag('errors', 'print')
if behaviour == 'raise':
reraise(exc_info[0], exc_info[1], exc_info[2])
elif behaviour == 'print':
traceback.print_exception(*(exc_info + (None, sys.stderr)))
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass
class NullHandler(Handler):
"""A handler that does nothing, meant to be inserted in a handler chain
with ``bubble=False`` to stop further processing.
"""
blackhole = True
class WrapperHandler(Handler):
"""A class that can wrap another handler and redirect all calls to the
wrapped handler::
handler = WrapperHandler(other_handler)
Subclasses should override the :attr:`_direct_attrs` attribute as
necessary.
"""
#: a set of direct attributes that are not forwarded to the inner
#: handler. This has to be extended as necessary.
_direct_attrs = frozenset(['handler'])
def __init__(self, handler):
self.handler = handler
def __getattr__(self, name):
return getattr(self.handler, name)
def __setattr__(self, name, value):
if name in self._direct_attrs:
return Handler.__setattr__(self, name, value)
setattr(self.handler, name, value)
class StringFormatter(object):
"""Many handlers format the log entries to text format. This is done
by a callable that is passed a log record and returns an unicode
string. The default formatter for this is implemented as a class so
that it becomes possible to hook into every aspect of the formatting
process.
"""
def __init__(self, format_string):
self.format_string = format_string
def _get_format_string(self):
return self._format_string
def _set_format_string(self, value):
self._format_string = value
self._formatter = value
format_string = property(_get_format_string, _set_format_string)
del _get_format_string, _set_format_string
def format_record(self, record, handler):
try:
return self._formatter.format(record=record, handler=handler)
except UnicodeEncodeError:
# self._formatter is a str, but some of the record items
# are unicode
fmt = self._formatter.decode('ascii', 'replace')
return fmt.format(record=record, handler=handler)
except UnicodeDecodeError:
# self._formatter is unicode, but some of the record items
# are non-ascii str
fmt = self._formatter.encode('ascii', 'replace')
return fmt.format(record=record, handler=handler)
def format_exception(self, record):
return record.formatted_exception
def __call__(self, record, handler):
line = self.format_record(record, handler)
exc = self.format_exception(record)
if exc:
line += u('\n') + exc
return line
class StringFormatterHandlerMixin(object):
"""A mixin for handlers that provides a default integration for the
:class:`~logbook.StringFormatter` class. This is used for all handlers
by default that log text to a destination.
"""
#: a class attribute for the default format string to use if the
#: constructor was invoked with `None`.
default_format_string = DEFAULT_FORMAT_STRING
#: the class to be used for string formatting
formatter_class = StringFormatter
def __init__(self, format_string):
if format_string is None:
format_string = self.default_format_string
#: the currently attached format string as new-style format
#: string.
self.format_string = format_string
def _get_format_string(self):
if isinstance(self.formatter, StringFormatter):
return self.formatter.format_string
def _set_format_string(self, value):
if value is None:
self.formatter = None
else:
self.formatter = self.formatter_class(value)
format_string = property(_get_format_string, _set_format_string)
del _get_format_string, _set_format_string
class HashingHandlerMixin(object):
"""Mixin class for handlers that are hashing records."""
def hash_record_raw(self, record):
"""Returns a hashlib object with the hash of the record."""
hash = sha1()
hash.update(('%d\x00' % record.level).encode('ascii'))
hash.update((record.channel or u('')).encode('utf-8') + b('\x00'))
hash.update(record.filename.encode('utf-8') + b('\x00'))
hash.update(b(str(record.lineno)))
return hash
def hash_record(self, record):
"""Returns a hash for a record to keep it apart from other records.
This is used for the `record_limit` feature. By default
The level, channel, filename and location are hashed.
Calls into :meth:`hash_record_raw`.
"""
return self.hash_record_raw(record).hexdigest()
_NUMBER_TYPES = integer_types + (float,)
class LimitingHandlerMixin(HashingHandlerMixin):
"""Mixin class for handlers that want to limit emitting records.
In the default setting it delivers all log records but it can be set up
to not send more than n mails for the same record each hour to not
overload an inbox and the network in case a message is triggered multiple
times a minute. The following example limits it to 60 mails an hour::
from datetime import timedelta
handler = MailHandler(record_limit=1,
record_delta=timedelta(minutes=1))
"""
def __init__(self, record_limit, record_delta):
self.record_limit = record_limit
self._limit_lock = Lock()
self._record_limits = {}
if record_delta is None:
record_delta = timedelta(seconds=60)
elif isinstance(record_delta, _NUMBER_TYPES):
record_delta = timedelta(seconds=record_delta)
self.record_delta = record_delta
def check_delivery(self, record):
"""Helper function to check if data should be delivered by this
handler. It returns a tuple in the form ``(suppression_count,
allow)``. The first one is the number of items that were not delivered
so far, the second is a boolean flag if a delivery should happen now.
"""
if self.record_limit is None:
return 0, True
hash = self.hash_record(record)
self._limit_lock.acquire()
try:
allow_delivery = None
suppression_count = old_count = 0
first_count = now = datetime.utcnow()
if hash in self._record_limits:
last_count, suppression_count = self._record_limits[hash]
if last_count + self.record_delta < now:
allow_delivery = True
else:
first_count = last_count
old_count = suppression_count
if not suppression_count and \
len(self._record_limits) >= self.max_record_cache:
cache_items = self._record_limits.items()
cache_items.sort()
del cache_items[:int(self._record_limits) \
* self.record_cache_prune]
self._record_limits = dict(cache_items)
self._record_limits[hash] = (first_count, old_count + 1)
if allow_delivery is None:
allow_delivery = old_count < self.record_limit
return suppression_count, allow_delivery
finally:
self._limit_lock.release()
class StreamHandler(Handler, StringFormatterHandlerMixin):
"""a handler class which writes logging records, appropriately formatted,
to a stream. note that this class does not close the stream, as sys.stdout
or sys.stderr may be used.
If a stream handler is used in a `with` statement directly it will
:meth:`close` on exit to support this pattern::
with StreamHandler(my_stream):
pass
.. admonition:: Notes on the encoding
On Python 3, the encoding parameter is only used if a stream was
passed that was opened in binary mode.
"""
def __init__(self, stream, level=NOTSET, format_string=None,
encoding=None, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.encoding = encoding
self.lock = threading.Lock()
if stream is not _missing:
self.stream = stream
def __enter__(self):
return Handler.__enter__(self)
def __exit__(self, exc_type, exc_value, tb):
self.close()
return Handler.__exit__(self, exc_type, exc_value, tb)
def close(self):
"""The default stream handler implementation is not to close
the wrapped stream but to flush it.
"""
self.flush()
def flush(self):
"""Flushes the inner stream."""
if self.stream is not None and hasattr(self.stream, 'flush'):
self.stream.flush()
def format_and_encode(self, record):
"""Formats the record and encodes it to the stream encoding."""
stream = self.stream
rv = self.format(record) + '\n'
if (PY2 and is_unicode(rv)) or \
not (PY2 or is_unicode(rv) or _is_text_stream(stream)):
enc = self.encoding
if enc is None:
enc = getattr(stream, 'encoding', None) or 'utf-8'
rv = rv.encode(enc, 'replace')
return rv
def write(self, item):
"""Writes a bytestring to the stream."""
self.stream.write(item)
def emit(self, record):
self.lock.acquire()
try:
self.write(self.format_and_encode(record))
self.flush()
finally:
self.lock.release()
class FileHandler(StreamHandler):
"""A handler that does the task of opening and closing files for you.
By default the file is opened right away, but you can also `delay`
the open to the point where the first message is written.
This is useful when the handler is used with a
:class:`~logbook.FingersCrossedHandler` or something similar.
"""
def __init__(self, filename, mode='a', encoding=None, level=NOTSET,
format_string=None, delay=False, filter=None, bubble=False):
if encoding is None:
encoding = 'utf-8'
StreamHandler.__init__(self, None, level, format_string,
encoding, filter, bubble)
self._filename = filename
self._mode = mode
if delay:
self.stream = None
else:
self._open()
def _open(self, mode=None):
if mode is None:
mode = self._mode
self.stream = open(self._filename, mode)
def write(self, item):
if self.stream is None:
self._open()
if not PY2 and isinstance(item, bytes):
self.stream.buffer.write(item)
else:
self.stream.write(item)
def close(self):
if self.stream is not None:
self.flush()
self.stream.close()
self.stream = None
def format_and_encode(self, record):
# encodes based on the stream settings, so the stream has to be
# open at the time this function is called.
if self.stream is None:
self._open()
return StreamHandler.format_and_encode(self, record)
def emit(self, record):
if self.stream is None:
self._open()
StreamHandler.emit(self, record)
class MonitoringFileHandler(FileHandler):
"""A file handler that will check if the file was moved while it was
open. This might happen on POSIX systems if an application like
logrotate moves the logfile over.
Because of different IO concepts on Windows, this handler will not
work on a windows system.
"""
def __init__(self, filename, mode='a', encoding='utf-8', level=NOTSET,
format_string=None, delay=False, filter=None, bubble=False):
FileHandler.__init__(self, filename, mode, encoding, level,
format_string, delay, filter, bubble)
if os.name == 'nt':
raise RuntimeError('MonitoringFileHandler '
'does not support Windows')
self._query_fd()
def _query_fd(self):
if self.stream is None:
self._last_stat = None, None
else:
try:
st = os.stat(self._filename)
except OSError:
e = sys.exc_info()[1]
if e.errno != 2:
raise
self._last_stat = None, None
else:
self._last_stat = st[stat.ST_DEV], st[stat.ST_INO]
def emit(self, record):
last_stat = self._last_stat
self._query_fd()
if last_stat != self._last_stat:
self.close()
FileHandler.emit(self, record)
self._query_fd()
class StderrHandler(StreamHandler):
"""A handler that writes to what is currently at stderr. At the first
glace this appears to just be a :class:`StreamHandler` with the stream
set to :data:`sys.stderr` but there is a difference: if the handler is
created globally and :data:`sys.stderr` changes later, this handler will
point to the current `stderr`, whereas a stream handler would still
point to the old one.
"""
def __init__(self, level=NOTSET, format_string=None, filter=None,
bubble=False):
StreamHandler.__init__(self, _missing, level, format_string,
None, filter, bubble)
@property
def stream(self):
return sys.stderr
class RotatingFileHandlerBase(FileHandler):
"""Baseclass for rotating file handlers.
.. versionchanged:: 0.3
This class was deprecated because the interface is not flexible
enough to implement proper file rotations. The former builtin
subclasses no longer use this baseclass.
"""
def __init__(self, *args, **kwargs):
from warnings import warn
warn(DeprecationWarning('This class is deprecated'))
FileHandler.__init__(self, *args, **kwargs)
def emit(self, record):
self.lock.acquire()
try:
msg = self.format_and_encode(record)
if self.should_rollover(record, msg):
self.perform_rollover()
self.write(msg)
self.flush()
finally:
self.lock.release()
def should_rollover(self, record, formatted_record):
"""Called with the log record and the return value of the
:meth:`format_and_encode` method. The method has then to
return `True` if a rollover should happen or `False`
otherwise.
.. versionchanged:: 0.3
Previously this method was called with the number of bytes
returned by :meth:`format_and_encode`
"""
return False
def perform_rollover(self):
"""Called if :meth:`should_rollover` returns `True` and has
to perform the actual rollover.
"""
class RotatingFileHandler(FileHandler):
"""This handler rotates based on file size. Once the maximum size
is reached it will reopen the file and start with an empty file
again. The old file is moved into a backup copy (named like the
file, but with a ``.backupnumber`` appended to the file. So if
you are logging to ``mail`` the first backup copy is called
``mail.1``.)
The default number of backups is 5. Unlike a similar logger from
the logging package, the backup count is mandatory because just
reopening the file is dangerous as it deletes the log without
asking on rollover.
"""
def __init__(self, filename, mode='a', encoding='utf-8', level=NOTSET,
format_string=None, delay=False, max_size=1024 * 1024,
backup_count=5, filter=None, bubble=False):
FileHandler.__init__(self, filename, mode, encoding, level,
format_string, delay, filter, bubble)
self.max_size = max_size
self.backup_count = backup_count
assert backup_count > 0, 'at least one backup file has to be ' \
'specified'
def should_rollover(self, record, bytes):
self.stream.seek(0, 2)
return self.stream.tell() + bytes >= self.max_size
def perform_rollover(self):
self.stream.close()
for x in xrange(self.backup_count - 1, 0, -1):
src = '%s.%d' % (self._filename, x)
dst = '%s.%d' % (self._filename, x + 1)
try:
rename(src, dst)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
rename(self._filename, self._filename + '.1')
self._open('w')
def emit(self, record):
self.lock.acquire()
try:
msg = self.format_and_encode(record)
if self.should_rollover(record, len(msg)):
self.perform_rollover()
self.write(msg)
self.flush()
finally:
self.lock.release()
class TimedRotatingFileHandler(FileHandler):
"""This handler rotates based on dates. It will name the file
after the filename you specify and the `date_format` pattern.
So for example if you configure your handler like this::
handler = TimedRotatingFileHandler('/var/log/foo.log',
date_format='%Y-%m-%d')
The filenames for the logfiles will look like this::
/var/log/foo-2010-01-10.log
/var/log/foo-2010-01-11.log
...
By default it will keep all these files around, if you want to limit
them, you can specify a `backup_count`.
"""
def __init__(self, filename, mode='a', encoding='utf-8', level=NOTSET,
format_string=None, date_format='%Y-%m-%d',
backup_count=0, filter=None, bubble=False):
FileHandler.__init__(self, filename, mode, encoding, level,
format_string, True, filter, bubble)
self.date_format = date_format
self.backup_count = backup_count
self._fn_parts = os.path.splitext(os.path.abspath(filename))
self._filename = None
def _get_timed_filename(self, datetime):
return datetime.strftime('-' + self.date_format) \
.join(self._fn_parts)
def should_rollover(self, record):
fn = self._get_timed_filename(record.time)
rv = self._filename is not None and self._filename != fn
# remember the current filename. In case rv is True, the rollover
# performing function will already have the new filename
self._filename = fn
return rv
def files_to_delete(self):
"""Returns a list with the files that have to be deleted when
a rollover occours.
"""
directory = os.path.dirname(self._filename)
files = []
for filename in os.listdir(directory):
filename = os.path.join(directory, filename)
if filename.startswith(self._fn_parts[0] + '-') and \
filename.endswith(self._fn_parts[1]):
files.append((os.path.getmtime(filename), filename))
files.sort()
return files[:-self.backup_count + 1]
def perform_rollover(self):
self.stream.close()
if self.backup_count > 0:
for time, filename in self.files_to_delete():
os.remove(filename)
self._open('w')
def emit(self, record):
self.lock.acquire()
try:
if self.should_rollover(record):
self.perform_rollover()
self.write(self.format_and_encode(record))
self.flush()
finally:
self.lock.release()
class TestHandler(Handler, StringFormatterHandlerMixin):
"""Like a stream handler but keeps the values in memory. This
logger provides some ways to test for the records in memory.
Example usage::
def my_test():
with logbook.TestHandler() as handler:
logger.warn('A warning')
assert logger.has_warning('A warning')
...
"""
default_format_string = TEST_FORMAT_STRING
def __init__(self, level=NOTSET, format_string=None, filter=None,
bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
#: captures the :class:`LogRecord`\s as instances
self.records = []
self._formatted_records = []
self._formatted_record_cache = []
def close(self):
"""Close all records down when the handler is closed."""
for record in self.records:
record.close()
def emit(self, record):
# keep records open because we will want to examine them after the
# call to the emit function. If we don't do that, the traceback
# attribute and other things will already be removed.
record.keep_open = True
self.records.append(record)
@property
def formatted_records(self):
"""Captures the formatted log records as unicode strings."""
if len(self._formatted_record_cache) != len(self.records) or \
any(r1 != r2 for r1, r2 in
zip(self.records, self._formatted_record_cache)):
self._formatted_records = [self.format(r) for r in self.records]
self._formatted_record_cache = list(self.records)
return self._formatted_records
@property
def has_criticals(self):
"""`True` if any :data:`CRITICAL` records were found."""
return any(r.level == CRITICAL for r in self.records)
@property
def has_errors(self):
"""`True` if any :data:`ERROR` records were found."""
return any(r.level == ERROR for r in self.records)
@property
def has_warnings(self):
"""`True` if any :data:`WARNING` records were found."""
return any(r.level == WARNING for r in self.records)
@property
def has_notices(self):
"""`True` if any :data:`NOTICE` records were found."""
return any(r.level == NOTICE for r in self.records)
@property
def has_infos(self):
"""`True` if any :data:`INFO` records were found."""
return any(r.level == INFO for r in self.records)
@property
def has_debugs(self):
"""`True` if any :data:`DEBUG` records were found."""
return any(r.level == DEBUG for r in self.records)
def has_critical(self, *args, **kwargs):
"""`True` if a specific :data:`CRITICAL` log record exists.
See :ref:`probe-log-records` for more information.
"""
kwargs['level'] = CRITICAL
return self._test_for(*args, **kwargs)
def has_error(self, *args, **kwargs):
"""`True` if a specific :data:`ERROR` log record exists.
See :ref:`probe-log-records` for more information.
"""
kwargs['level'] = ERROR
return self._test_for(*args, **kwargs)
def has_warning(self, *args, **kwargs):
"""`True` if a specific :data:`WARNING` log record exists.
See :ref:`probe-log-records` for more information.
"""
kwargs['level'] = WARNING
return self._test_for(*args, **kwargs)
def has_notice(self, *args, **kwargs):
"""`True` if a specific :data:`NOTICE` log record exists.
See :ref:`probe-log-records` for more information.
"""
kwargs['level'] = NOTICE
return self._test_for(*args, **kwargs)
def has_info(self, *args, **kwargs):
"""`True` if a specific :data:`INFO` log record exists.
See :ref:`probe-log-records` for more information.
"""
kwargs['level'] = INFO
return self._test_for(*args, **kwargs)
def has_debug(self, *args, **kwargs):
"""`True` if a specific :data:`DEBUG` log record exists.
See :ref:`probe-log-records` for more information.
"""
kwargs['level'] = DEBUG
return self._test_for(*args, **kwargs)
def _test_for(self, message=None, channel=None, level=None):
def _match(needle, haystack):
"Matches both compiled regular expressions and strings"
if isinstance(needle, REGTYPE) and needle.search(haystack):
return True
if needle == haystack:
return True
return False
for record in self.records:
if level is not None and record.level != level:
continue
if channel is not None and record.channel != channel:
continue
if message is not None and not _match(message, record.message):
continue
return True
return False
class MailHandler(Handler, StringFormatterHandlerMixin,
LimitingHandlerMixin):
"""A handler that sends error mails. The format string used by this
handler are the contents of the mail plus the headers. This is handy
if you want to use a custom subject or ``X-`` header::
handler = MailHandler(format_string='''\
Subject: {record.level_name} on My Application
{record.message}
{record.extra[a_custom_injected_record]}
''')
This handler will always emit text-only mails for maximum portability and
best performance.
In the default setting it delivers all log records but it can be set up
to not send more than n mails for the same record each hour to not
overload an inbox and the network in case a message is triggered multiple
times a minute. The following example limits it to 60 mails an hour::
from datetime import timedelta
handler = MailHandler(record_limit=1,
record_delta=timedelta(minutes=1))
The default timedelta is 60 seconds (one minute).
The mail handler is sending mails in a blocking manner. If you are not
using some centralized system for logging these messages (with the help
of ZeroMQ or others) and the logging system slows you down you can
wrap the handler in a :class:`logbook.queues.ThreadedWrapperHandler`
that will then send the mails in a background thread.
.. versionchanged:: 0.3
The handler supports the batching system now.
"""
default_format_string = MAIL_FORMAT_STRING
default_related_format_string = MAIL_RELATED_FORMAT_STRING
default_subject = u('Server Error in Application')
#: the maximum number of record hashes in the cache for the limiting
#: feature. Afterwards, record_cache_prune percent of the oldest
#: entries are removed
max_record_cache = 512
#: the number of items to prune on a cache overflow in percent.
record_cache_prune = 0.333
def __init__(self, from_addr, recipients, subject=None,
server_addr=None, credentials=None, secure=None,
record_limit=None, record_delta=None, level=NOTSET,
format_string=None, related_format_string=None,
filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
LimitingHandlerMixin.__init__(self, record_limit, record_delta)
self.from_addr = from_addr
self.recipients = recipients
if subject is None:
subject = self.default_subject
self.subject = subject
self.server_addr = server_addr
self.credentials = credentials
self.secure = secure
if related_format_string is None:
related_format_string = self.default_related_format_string
self.related_format_string = related_format_string
def _get_related_format_string(self):
if isinstance(self.related_formatter, StringFormatter):
return self.related_formatter.format_string
def _set_related_format_string(self, value):
if value is None:
self.related_formatter = None
else:
self.related_formatter = self.formatter_class(value)
related_format_string = property(_get_related_format_string,
_set_related_format_string)
del _get_related_format_string, _set_related_format_string
def get_recipients(self, record):
"""Returns the recipients for a record. By default the
:attr:`recipients` attribute is returned for all records.
"""
return self.recipients
def message_from_record(self, record, suppressed):
"""Creates a new message for a record as email message object
(:class:`email.message.Message`). `suppressed` is the number
of mails not sent if the `record_limit` feature is active.
"""
from email.message import Message
from email.header import Header
msg = Message()
msg.set_charset('utf-8')
lineiter = iter(self.format(record).splitlines())
for line in lineiter:
if not line:
break
h, v = line.split(':', 1)
# We could probably just encode everything. For the moment encode
# only what really needed to avoid breaking a couple of tests.
try:
v.encode('ascii')
except UnicodeEncodeError:
msg[h.strip()] = Header(v.strip(), 'utf-8')
else:
msg[h.strip()] = v.strip()
msg.replace_header('Content-Transfer-Encoding', '8bit')
body = '\r\n'.join(lineiter)
if suppressed:
body += '\r\n\r\nThis message occurred additional %d ' \
'time(s) and was suppressed' % suppressed
# inconsistency in Python 2.5
# other versions correctly return msg.get_payload() as str
if sys.version_info < (2, 6) and isinstance(body, unicode):
body = body.encode('utf-8')
msg.set_payload(body, 'UTF-8')
return msg
def format_related_record(self, record):
"""Used for format the records that led up to another record or
records that are related into strings. Used by the batch formatter.
"""
return self.related_formatter(record, self)
def generate_mail(self, record, suppressed=0):
"""Generates the final email (:class:`email.message.Message`)
with headers and date. `suppressed` is the number of mails
that were not send if the `record_limit` feature is active.
"""
from email.utils import formatdate
msg = self.message_from_record(record, suppressed)
msg['From'] = self.from_addr
msg['Date'] = formatdate()
return msg
def collapse_mails(self, mail, related, reason):
"""When escaling or grouped mails are """
if not related:
return mail
if reason == 'group':
title = 'Other log records in the same group'
else:
title = 'Log records that led up to this one'
mail.set_payload('%s\r\n\r\n\r\n%s:\r\n\r\n%s' % (
mail.get_payload(),
title,
'\r\n\r\n'.join(body.rstrip() for body in related)
))
return mail
def get_connection(self):
"""Returns an SMTP connection. By default it reconnects for
each sent mail.
"""
from smtplib import SMTP, SMTP_PORT, SMTP_SSL_PORT
if self.server_addr is None:
host = 'localhost'
port = self.secure and SMTP_SSL_PORT or SMTP_PORT
else:
host, port = self.server_addr
con = SMTP()
con.connect(host, port)
if self.credentials is not None:
if self.secure is not None:
con.ehlo()
con.starttls(*self.secure)
con.ehlo()
con.login(*self.credentials)
return con
def close_connection(self, con):
"""Closes the connection that was returned by
:meth:`get_connection`.
"""
try:
if con is not None:
con.quit()
except Exception:
pass
def deliver(self, msg, recipients):
"""Delivers the given message to a list of recpients."""
con = self.get_connection()
try:
con.sendmail(self.from_addr, recipients, msg.as_string())
finally:
self.close_connection(con)
def emit(self, record):
suppressed = 0
if self.record_limit is not None:
suppressed, allow_delivery = self.check_delivery(record)
if not allow_delivery:
return
self.deliver(self.generate_mail(record, suppressed),
self.get_recipients(record))
def emit_batch(self, records, reason):
if reason not in ('escalation', 'group'):
return MailHandler.emit_batch(self, records, reason)
records = list(records)
if not records:
return
trigger = records.pop(reason == 'escalation' and -1 or 0)
suppressed = 0
if self.record_limit is not None:
suppressed, allow_delivery = self.check_delivery(trigger)
if not allow_delivery:
return
trigger_mail = self.generate_mail(trigger, suppressed)
related = [self.format_related_record(record)
for record in records]
self.deliver(self.collapse_mails(trigger_mail, related, reason),
self.get_recipients(trigger))
class GMailHandler(MailHandler):
"""
A customized mail handler class for sending emails via GMail (or Google Apps mail)::
handler = GMailHandler("[email protected]", "mypassword", ["to_user@some_mail.com"], ...) # other arguments same as MailHandler
.. versionadded:: 0.6.0
"""
def __init__(self, account_id, password, recipients, **kw):
super(GMailHandler, self).__init__(
account_id, recipients, secure=(), server_addr=("smtp.gmail.com", 587),
credentials=(account_id, password), **kw)
class SyslogHandler(Handler, StringFormatterHandlerMixin):
"""A handler class which sends formatted logging records to a
syslog server. By default it will send to it via unix socket.
"""
default_format_string = SYSLOG_FORMAT_STRING
# priorities
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
facility_names = {
'auth': LOG_AUTH,
'authpriv': LOG_AUTHPRIV,
'cron': LOG_CRON,
'daemon': LOG_DAEMON,
'ftp': LOG_FTP,
'kern': LOG_KERN,
'lpr': LOG_LPR,
'mail': LOG_MAIL,
'news': LOG_NEWS,
'syslog': LOG_SYSLOG,
'user': LOG_USER,
'uucp': LOG_UUCP,
'local0': LOG_LOCAL0,
'local1': LOG_LOCAL1,
'local2': LOG_LOCAL2,
'local3': LOG_LOCAL3,
'local4': LOG_LOCAL4,
'local5': LOG_LOCAL5,
'local6': LOG_LOCAL6,
'local7': LOG_LOCAL7,
}
level_priority_map = {
DEBUG: LOG_DEBUG,
INFO: LOG_INFO,
NOTICE: LOG_NOTICE,
WARNING: LOG_WARNING,
ERROR: LOG_ERR,
CRITICAL: LOG_CRIT
}
def __init__(self, application_name=None, address=None,
facility='user', socktype=socket.SOCK_DGRAM,
level=NOTSET, format_string=None, filter=None,
bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.application_name = application_name
if address is None:
if sys.platform == 'darwin':
address = '/var/run/syslog'
else:
address = '/dev/log'
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, string_types):
self._connect_unixsocket()
else:
self._connect_netsocket()
def _connect_unixsocket(self):
self.unixsocket = True
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
self.socket.connect(self.address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(self.address)
def _connect_netsocket(self):
self.unixsocket = False
self.socket = socket.socket(socket.AF_INET, self.socktype)
if self.socktype == socket.SOCK_STREAM:
self.socket.connect(self.address)
self.address = self.socket.getsockname()
def encode_priority(self, record):
facility = self.facility_names[self.facility]
priority = self.level_priority_map.get(record.level,
self.LOG_WARNING)
return (facility << 3) | priority
def emit(self, record):
prefix = u('')
if self.application_name is not None:
prefix = self.application_name + u(':')
self.send_to_socket((u('<%d>%s%s\x00') % (
self.encode_priority(record),
prefix,
self.format(record)
)).encode('utf-8'))
def send_to_socket(self, data):
if self.unixsocket:
try:
self.socket.send(data)
except socket.error:
self._connect_unixsocket()
self.socket.send(data)
elif self.socktype == socket.SOCK_DGRAM:
# the flags are no longer optional on Python 3
self.socket.sendto(data, 0, self.address)
else:
self.socket.sendall(data)
def close(self):
self.socket.close()
class NTEventLogHandler(Handler, StringFormatterHandlerMixin):
"""A handler that sends to the NT event log system."""
dllname = None
default_format_string = NTLOG_FORMAT_STRING
def __init__(self, application_name, log_type='Application',
level=NOTSET, format_string=None, filter=None,
bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
if os.name != 'nt':
raise RuntimeError('NTLogEventLogHandler requires a Windows '
'operating system.')
try:
import win32evtlogutil
import win32evtlog
except ImportError:
raise RuntimeError('The pywin32 library is required '
'for the NTEventLogHandler.')
self.application_name = application_name
self._welu = win32evtlogutil
dllname = self.dllname
if not dllname:
dllname = os.path.join(os.path.dirname(self._welu.__file__),
'../win32service.pyd')
self.log_type = log_type
self._welu.AddSourceToRegistry(self.application_name, dllname,
log_type)
self._default_type = win32evtlog.EVENTLOG_INFORMATION_TYPE
self._type_map = {
DEBUG: win32evtlog.EVENTLOG_INFORMATION_TYPE,
INFO: win32evtlog.EVENTLOG_INFORMATION_TYPE,
NOTICE: win32evtlog.EVENTLOG_INFORMATION_TYPE,
WARNING: win32evtlog.EVENTLOG_WARNING_TYPE,
ERROR: win32evtlog.EVENTLOG_ERROR_TYPE,
CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE
}
def unregister_logger(self):
"""Removes the application binding from the registry. If you call
this, the log viewer will no longer be able to provide any
information about the message.
"""
self._welu.RemoveSourceFromRegistry(self.application_name,
self.log_type)
def get_event_type(self, record):
return self._type_map.get(record.level, self._default_type)
def get_event_category(self, record):
return 0
def get_message_id(self, record):
return 1
def emit(self, record):
id = self.get_message_id(record)
cat = self.get_event_category(record)
type = self.get_event_type(record)
self._welu.ReportEvent(self.application_name, id, cat, type,
[self.format(record)])
class FingersCrossedHandler(Handler):
"""This handler wraps another handler and will log everything in
memory until a certain level (`action_level`, defaults to `ERROR`)
is exceeded. When that happens the fingers crossed handler will
activate forever and log all buffered records as well as records
yet to come into another handled which was passed to the constructor.
Alternatively it's also possible to pass a factory function to the
constructor instead of a handler. That factory is then called with
the triggering log entry and the finger crossed handler to create
a handler which is then cached.
The idea of this handler is to enable debugging of live systems. For
example it might happen that code works perfectly fine 99% of the time,
but then some exception happens. But the error that caused the
exception alone might not be the interesting bit, the interesting
information were the warnings that lead to the error.
Here a setup that enables this for a web application::
from logbook import FileHandler
from logbook import FingersCrossedHandler
def issue_logging():
def factory(record, handler):
return FileHandler('/var/log/app/issue-%s.log' % record.time)
return FingersCrossedHandler(factory)
def application(environ, start_response):
with issue_logging():
return the_actual_wsgi_application(environ, start_response)
Whenever an error occours, a new file in ``/var/log/app`` is created
with all the logging calls that lead up to the error up to the point
where the `with` block is exited.
Please keep in mind that the :class:`~logbook.FingersCrossedHandler`
handler is a one-time handler. Once triggered, it will not reset. Because
of that you will have to re-create it whenever you bind it. In this case
the handler is created when it's bound to the thread.
Due to how the handler is implemented, the filter, bubble and level
flags of the wrapped handler are ignored.
.. versionchanged:: 0.3
The default behaviour is to buffer up records and then invoke another
handler when a severity theshold was reached with the buffer emitting.
This now enables this logger to be properly used with the
:class:`~logbook.MailHandler`. You will now only get one mail for
each bfufered record. However once the threshold was reached you would
still get a mail for each record which is why the `reset` flag was added.
When set to `True`, the handler will instantly reset to the untriggered
state and start buffering again::
handler = FingersCrossedHandler(MailHandler(...),
buffer_size=10,
reset=True)
.. versionadded:: 0.3
The `reset` flag was added.
"""
#: the reason to be used for the batch emit. The default is
#: ``'escalation'``.
#:
#: .. versionadded:: 0.3
batch_emit_reason = 'escalation'
def __init__(self, handler, action_level=ERROR, buffer_size=0,
pull_information=True, reset=False, filter=None,
bubble=False):
Handler.__init__(self, NOTSET, filter, bubble)
self.lock = Lock()
self._level = action_level
if isinstance(handler, Handler):
self._handler = handler
self._handler_factory = None
else:
self._handler = None
self._handler_factory = handler
#: the buffered records of the handler. Once the action is triggered
#: (:attr:`triggered`) this list will be None. This attribute can
#: be helpful for the handler factory function to select a proper
#: filename (for example time of first log record)
self.buffered_records = deque()
#: the maximum number of entries in the buffer. If this is exhausted
#: the oldest entries will be discarded to make place for new ones
self.buffer_size = buffer_size
self._buffer_full = False
self._pull_information = pull_information
self._action_triggered = False
self._reset = reset
def close(self):
if self._handler is not None:
self._handler.close()
def enqueue(self, record):
if self._pull_information:
record.pull_information()
if self._action_triggered:
self._handler.emit(record)
else:
self.buffered_records.append(record)
if self._buffer_full:
self.buffered_records.popleft()
elif self.buffer_size and \
len(self.buffered_records) >= self.buffer_size:
self._buffer_full = True
return record.level >= self._level
return False
def rollover(self, record):
if self._handler is None:
self._handler = self._handler_factory(record, self)
self._handler.emit_batch(iter(self.buffered_records), 'escalation')
self.buffered_records.clear()
self._action_triggered = not self._reset
@property
def triggered(self):
"""This attribute is `True` when the action was triggered. From
this point onwards the finger crossed handler transparently
forwards all log records to the inner handler. If the handler resets
itself this will always be `False`.
"""
return self._action_triggered
def emit(self, record):
self.lock.acquire()
try:
if self.enqueue(record):
self.rollover(record)
finally:
self.lock.release()
class GroupHandler(WrapperHandler):
"""A handler that buffers all messages until it is popped again and then
forwards all messages to another handler. This is useful if you for
example have an application that does computations and only a result
mail is required. A group handler makes sure that only one mail is sent
and not multiple. Some other handles might support this as well, though
currently none of the builtins do.
Example::
with GroupHandler(MailHandler(...)):
# everything here ends up in the mail
The :class:`GroupHandler` is implemented as a :class:`WrapperHandler`
thus forwarding all attributes of the wrapper handler.
Notice that this handler really only emit the records when the handler
is popped from the stack.
.. versionadded:: 0.3
"""
_direct_attrs = frozenset(['handler', 'pull_information',
'buffered_records'])
def __init__(self, handler, pull_information=True):
WrapperHandler.__init__(self, handler)
self.pull_information = pull_information
self.buffered_records = []
def rollover(self):
self.handler.emit_batch(self.buffered_records, 'group')
self.buffered_records = []
def pop_application(self):
Handler.pop_application(self)
self.rollover()
def pop_thread(self):
Handler.pop_thread(self)
self.rollover()
def emit(self, record):
if self.pull_information:
record.pull_information()
self.buffered_records.append(record)
| {
"content_hash": "0e11853e0923ca6d63298919273a7590",
"timestamp": "",
"source": "github",
"line_count": 1631,
"max_line_length": 134,
"avg_line_length": 37.06866952789699,
"alnum_prop": 0.608114590052763,
"repo_name": "dplepage/logbook",
"id": "8725e98f4b24795b1714d769a44f5c9bbb6133c4",
"size": "60483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logbook/handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
CacheItem interface:
'_id': string,
'url': string,
'response_url': string,
'body': string,
'head': string,
'response_code': int,
'cookies': None,#grab.response.cookies,
TODO: WTF with cookies???
"""
from hashlib import sha1
import zlib
import logging
import pymongo
from bson import Binary
import time
import six
from weblib.encoding import make_str
from grab.response import Response
from grab.cookie import CookieManager
logger = logging.getLogger('grab.spider.cache_backend.mongo')
class CacheBackend(object):
def __init__(self, database, use_compression=True, spider=None, **kwargs):
self.spider = spider
self.db = pymongo.MongoClient(**kwargs)[database]
self.use_compression = use_compression
def get_item(self, url, timeout=None):
"""
Returned item should have specific interface. See module docstring.
"""
_hash = self.build_hash(url)
if timeout is not None:
ts = int(time.time()) - timeout
query = {'_id': _hash, 'timestamp': {'$gt': ts}}
else:
query = {'_id': _hash}
return self.db.cache.find_one(query)
def build_hash(self, url):
utf_url = make_str(url)
return sha1(utf_url).hexdigest()
def remove_cache_item(self, url):
_hash = self.build_hash(url)
self.db.cache.remove({'_id': _hash})
def load_response(self, grab, cache_item):
grab.setup_document(cache_item['body'])
body = cache_item['body']
if self.use_compression:
body = zlib.decompress(body)
def custom_prepare_response_func(transport, grab):
response = Response()
response.head = cache_item['head'].decode('utf-8')
response.body = body
response.code = cache_item['response_code']
response.download_size = len(body)
response.upload_size = 0
response.download_speed = 0
response.url = cache_item['response_url']
response.parse(charset=grab.config['document_charset'])
response.cookies = CookieManager(transport.extract_cookiejar())
response.from_cache = True
return response
grab.process_request_result(custom_prepare_response_func)
def save_response(self, url, grab):
body = grab.response.body
if self.use_compression:
body = zlib.compress(body)
_hash = self.build_hash(url)
item = {
'_id': _hash,
'timestamp': int(time.time()),
'url': url,
'response_url': grab.response.url,
'body': Binary(body),
'head': Binary(grab.response.head.encode('utf-8')),
'response_code': grab.response.code,
'cookies': None,
}
try:
self.db.cache.save(item, w=1)
except Exception as ex:
if 'document too large' in six.text_type(ex):
logging.error('Document too large. It was not saved into mongo'
' cache. Url: %s' % url)
else:
raise
def clear(self):
self.db.cache.remove()
def size(self):
return self.db.cache.count()
def has_item(self, url, timeout=None):
"""
Test if required item exists in the cache.
"""
_hash = self.build_hash(url)
if timeout is not None:
ts = int(time.time()) - timeout
query = {'_id': _hash, 'timestamp': {'$gt': ts}}
else:
query = {'_id': _hash}
doc = self.db.cache.find_one(query, {'id': 1})
return doc is not None
| {
"content_hash": "0e8fac1fc584ffdbdb8705278fa0e8d3",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 30.322314049586776,
"alnum_prop": 0.5696375034069229,
"repo_name": "huiyi1990/grab",
"id": "7f79dda385839742707f61e46853c3d104e45a80",
"size": "3669",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "grab/spider/cache_backend/mongo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5434"
},
{
"name": "Makefile",
"bytes": "910"
},
{
"name": "PostScript",
"bytes": "2788"
},
{
"name": "Python",
"bytes": "405529"
}
],
"symlink_target": ""
} |
"""
Application related tasks for Invoke.
"""
from invoke import Collection
from . import dependencies, env, db, run
from config import BaseConfig
namespace = Collection(
dependencies,
env,
db,
run,
)
namespace.configure({
'app': {
'static_root': BaseConfig.STATIC_ROOT,
}
})
| {
"content_hash": "b1002c64e762b32948641f3c91ac8015",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 46,
"avg_line_length": 14.227272727272727,
"alnum_prop": 0.6517571884984026,
"repo_name": "ssls/beetle-agent",
"id": "4ada6f6932b976f1086fde99b2633304ce520d24",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14909"
},
{
"name": "Mako",
"bytes": "1637"
},
{
"name": "Python",
"bytes": "234058"
}
],
"symlink_target": ""
} |
"""
Broadcast a message, with or without a price.
Multiple messages per block are allowed. Bets are be made on the 'timestamp'
field, and not the block index.
An address is a feed of broadcasts. Feeds may be locked with a broadcast whose
text field is identical to ‘lock’ (case insensitive). Bets on a feed reference
the address that is the source of the feed in an output which includes the
(latest) required fee.
Broadcasts without a price may not be used for betting. Broadcasts about events
with a small number of possible outcomes (e.g. sports games), should be
written, for example, such that a price of 1 XLT means one outcome, 2 XLT means
another, etc., which schema should be described in the 'text' field.
fee_fraction: .05 XLT means 5%. It may be greater than 1, however; but
because it is stored as a four‐byte integer, it may not be greater than about
42.
"""
import struct
import decimal
D = decimal.Decimal
from fractions import Fraction
import logging
from . import (util, exceptions, config, litecoin)
from . import (bet)
FORMAT = '>IdI'
LENGTH = 4 + 8 + 4
ID = 30
# NOTE: Pascal strings are used for storing texts for backwards‐compatibility.
def validate (db, source, timestamp, value, fee_fraction_int, text, block_index):
problems = []
if fee_fraction_int > 4294967295:
problems.append('fee fraction greater than 42.94967295')
if timestamp < 0: problems.append('negative timestamp')
if not source:
problems.append('null source address')
# Check previous broadcast in this feed.
cursor = db.cursor()
broadcasts = list(cursor.execute('''SELECT * FROM broadcasts WHERE (status = ? AND source = ?) ORDER BY tx_index ASC''', ('valid', source)))
cursor.close()
if broadcasts:
last_broadcast = broadcasts[-1]
if last_broadcast['locked']:
problems.append('locked feed')
elif timestamp <= last_broadcast['timestamp']:
problems.append('feed timestamps not monotonically increasing')
if not (block_index >= 317500 or config.TESTNET): # Protocol change.
if len(text) > 52:
problems.append('text too long')
return problems
def compose (db, source, timestamp, value, fee_fraction, text):
# Store the fee fraction as an integer.
fee_fraction_int = int(fee_fraction * 1e8)
problems = validate(db, source, timestamp, value, fee_fraction_int, text, util.last_block(db)['block_index'])
if problems: raise exceptions.BroadcastError(problems)
data = struct.pack(config.TXTYPE_FORMAT, ID)
if len(text) <= 52:
curr_format = FORMAT + '{}p'.format(len(text) + 1)
else:
curr_format = FORMAT + '{}s'.format(len(text))
data += struct.pack(curr_format, timestamp, value, fee_fraction_int,
text.encode('utf-8'))
return (source, [], data)
def parse (db, tx, message):
cursor = db.cursor()
# Unpack message.
try:
if len(message) - LENGTH <= 52:
curr_format = FORMAT + '{}p'.format(len(message) - LENGTH)
else:
curr_format = FORMAT + '{}s'.format(len(message) - LENGTH)
timestamp, value, fee_fraction_int, text = struct.unpack(curr_format, message)
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
text = ''
status = 'valid'
except (struct.error) as e:
timestamp, value, fee_fraction_int, text = 0, None, 0, None
status = 'invalid: could not unpack'
if status == 'valid':
# For SQLite3
timestamp = min(timestamp, config.MAX_INT)
value = min(value, config.MAX_INT)
problems = validate(db, tx['source'], timestamp, value, fee_fraction_int, text, tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
# Lock?
lock = False
if text and text.lower() == 'lock':
lock = True
timestamp, value, fee_fraction_int, text = 0, None, None, None
else:
lock = False
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'timestamp': timestamp,
'value': value,
'fee_fraction_int': fee_fraction_int,
'text': text,
'locked': lock,
'status': status,
}
sql='insert into broadcasts values(:tx_index, :tx_hash, :block_index, :source, :timestamp, :value, :fee_fraction_int, :text, :locked, :status)'
cursor.execute(sql, bindings)
# Negative values (default to ignore).
if value == None or value < 0:
# Cancel Open Bets?
if value == -2:
cursor.execute('''SELECT * FROM bets \
WHERE (status = ? AND feed_address = ?)''',
('open', tx['source']))
for i in list(cursor):
bet.cancel_bet(db, i, 'dropped', tx['block_index'])
# Cancel Pending Bet Matches?
if value == -3:
cursor.execute('''SELECT * FROM bet_matches \
WHERE (status = ? AND feed_address = ?)''',
('pending', tx['source']))
for bet_match in list(cursor):
bet.cancel_bet_match(db, bet_match, 'dropped', tx['block_index'])
cursor.close()
return
# Handle bet matches that use this feed.
cursor.execute('''SELECT * FROM bet_matches \
WHERE (status=? AND feed_address=?)
ORDER BY tx1_index ASC, tx0_index ASC''',
('pending', tx['source']))
for bet_match in cursor.fetchall():
broadcast_bet_match_cursor = db.cursor()
bet_match_id = bet_match['tx0_hash'] + bet_match['tx1_hash']
bet_match_status = None
# Calculate total funds held in escrow and total fee to be paid if
# the bet match is settled. Escrow less fee is amount to be paid back
# to betters.
total_escrow = bet_match['forward_quantity'] + bet_match['backward_quantity']
fee_fraction = fee_fraction_int / config.UNIT
fee = int(fee_fraction * total_escrow) # Truncate.
escrow_less_fee = total_escrow - fee
# Get known bet match type IDs.
cfd_type_id = util.BET_TYPE_ID['BullCFD'] + util.BET_TYPE_ID['BearCFD']
equal_type_id = util.BET_TYPE_ID['Equal'] + util.BET_TYPE_ID['NotEqual']
# Get the bet match type ID of this bet match.
bet_match_type_id = bet_match['tx0_bet_type'] + bet_match['tx1_bet_type']
# Contract for difference, with determinate settlement date.
if bet_match_type_id == cfd_type_id:
# Recognise tx0, tx1 as the bull, bear (in the right direction).
if bet_match['tx0_bet_type'] < bet_match['tx1_bet_type']:
bull_address = bet_match['tx0_address']
bear_address = bet_match['tx1_address']
bull_escrow = bet_match['forward_quantity']
bear_escrow = bet_match['backward_quantity']
else:
bull_address = bet_match['tx1_address']
bear_address = bet_match['tx0_address']
bull_escrow = bet_match['backward_quantity']
bear_escrow = bet_match['forward_quantity']
leverage = Fraction(bet_match['leverage'], 5040)
initial_value = bet_match['initial_value']
bear_credit = bear_escrow - (value - initial_value) * leverage * config.UNIT
bull_credit = escrow_less_fee - bear_credit
bear_credit = round(bear_credit)
bull_credit = round(bull_credit)
# Liquidate, as necessary.
if bull_credit >= escrow_less_fee or bull_credit <= 0:
if bull_credit >= escrow_less_fee:
bull_credit = escrow_less_fee
bear_credit = 0
bet_match_status = 'settled: liquidated for bull'
util.credit(db, tx['block_index'], bull_address, config.XLT, bull_credit, action='bet {}'.format(bet_match_status), event=tx['tx_hash'])
elif bull_credit <= 0:
bull_credit = 0
bear_credit = escrow_less_fee
bet_match_status = 'settled: liquidated for bear'
util.credit(db, tx['block_index'], bear_address, config.XLT, bear_credit, action='bet {}'.format(bet_match_status), event=tx['tx_hash'])
# Pay fee to feed.
util.credit(db, tx['block_index'], bet_match['feed_address'], config.XLT, fee, action='feed fee', event=tx['tx_hash'])
# For logging purposes.
bindings = {
'bet_match_id': bet_match_id,
'bet_match_type_id': bet_match_type_id,
'block_index': tx['block_index'],
'settled': False,
'bull_credit': bull_credit,
'bear_credit': bear_credit,
'winner': None,
'escrow_less_fee': None,
'fee': fee
}
sql='insert into bet_match_resolutions values(:bet_match_id, :bet_match_type_id, :block_index, :settled, :bull_credit, :bear_credit, :winner, :escrow_less_fee, :fee)'
cursor.execute(sql, bindings)
# Settle (if not liquidated).
elif timestamp >= bet_match['deadline']:
bet_match_status = 'settled'
util.credit(db, tx['block_index'], bull_address, config.XLT, bull_credit, action='bet {}'.format(bet_match_status), event=tx['tx_hash'])
util.credit(db, tx['block_index'], bear_address, config.XLT, bear_credit, action='bet {}'.format(bet_match_status), event=tx['tx_hash'])
# Pay fee to feed.
util.credit(db, tx['block_index'], bet_match['feed_address'], config.XLT, fee, action='feed fee', event=tx['tx_hash'])
# For logging purposes.
bindings = {
'bet_match_id': bet_match_id,
'bet_match_type_id': bet_match_type_id,
'block_index': tx['block_index'],
'settled': True,
'bull_credit': bull_credit,
'bear_credit': bear_credit,
'winner': None,
'escrow_less_fee': None,
'fee': fee
}
sql='insert into bet_match_resolutions values(:bet_match_id, :bet_match_type_id, :block_index, :settled, :bull_credit, :bear_credit, :winner, :escrow_less_fee, :fee)'
cursor.execute(sql, bindings)
# Equal[/NotEqual] bet.
elif bet_match_type_id == equal_type_id and timestamp >= bet_match['deadline']:
# Recognise tx0, tx1 as the bull, bear (in the right direction).
if bet_match['tx0_bet_type'] < bet_match['tx1_bet_type']:
equal_address = bet_match['tx0_address']
notequal_address = bet_match['tx1_address']
else:
equal_address = bet_match['tx1_address']
notequal_address = bet_match['tx0_address']
# Decide who won, and credit appropriately.
if value == bet_match['target_value']:
winner = 'Equal'
bet_match_status = 'settled: for equal'
util.credit(db, tx['block_index'], equal_address, config.XLT, escrow_less_fee, action='bet {}'.format(bet_match_status), event=tx['tx_hash'])
else:
winner = 'NotEqual'
bet_match_status = 'settled: for notequal'
util.credit(db, tx['block_index'], notequal_address, config.XLT, escrow_less_fee, action='bet {}'.format(bet_match_status), event=tx['tx_hash'])
# Pay fee to feed.
util.credit(db, tx['block_index'], bet_match['feed_address'], config.XLT, fee, action='feed fee', event=tx['tx_hash'])
# For logging purposes.
bindings = {
'bet_match_id': bet_match_id,
'bet_match_type_id': bet_match_type_id,
'block_index': tx['block_index'],
'settled': None,
'bull_credit': None,
'bear_credit': None,
'winner': winner,
'escrow_less_fee': escrow_less_fee,
'fee': fee
}
sql='insert into bet_match_resolutions values(:bet_match_id, :bet_match_type_id, :block_index, :settled, :bull_credit, :bear_credit, :winner, :escrow_less_fee, :fee)'
cursor.execute(sql, bindings)
# Update the bet match’s status.
if bet_match_status:
bindings = {
'status': bet_match_status,
'bet_match_id': bet_match['tx0_hash'] + bet_match['tx1_hash']
}
sql='update bet_matches set status = :status where id = :bet_match_id'
cursor.execute(sql, bindings)
util.message(db, tx['block_index'], 'update', 'bet_matches', bindings)
broadcast_bet_match_cursor.close()
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| {
"content_hash": "f8d92e1a771288a2ca102d1ff7dddbf8",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 182,
"avg_line_length": 43.213592233009706,
"alnum_prop": 0.5667640230659777,
"repo_name": "Litetokens/litetokensd",
"id": "ec57dbc5e3fa9cf019e133ded5b8f5a6855df0c8",
"size": "13386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/broadcast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "392223"
}
],
"symlink_target": ""
} |
from win32clipboard import *
from pywin32_testutil import str2bytes # py3k-friendly helper
import win32con
import types
if not __debug__:
print "WARNING: The test code in this module uses assert"
print "This instance of Python has asserts disabled, so many tests will be skipped"
cf_names = {}
# Build map of CF_* constants to names.
for name, val in win32con.__dict__.items():
if name[:3]=="CF_" and name != "CF_SCREENFONTS": # CF_SCREEN_FONTS==CF_TEXT!?!?
cf_names[val] = name
def TestEmptyClipboard():
OpenClipboard()
try:
EmptyClipboard()
assert EnumClipboardFormats(0)==0, "Clipboard formats were available after emptying it!"
finally:
CloseClipboard()
def TestText():
OpenClipboard()
try:
text = "Hello from Python"
text_bytes = str2bytes(text)
SetClipboardText(text)
got = GetClipboardData(win32con.CF_TEXT)
# CF_TEXT always gives us 'bytes' back .
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
finally:
CloseClipboard()
OpenClipboard()
try:
# CF_UNICODE text always gives unicode objects back.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
# CF_OEMTEXT is a bytes-based format.
got = GetClipboardData(win32con.CF_OEMTEXT)
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
# Unicode tests
EmptyClipboard()
text = u"Hello from Python unicode"
text_bytes = str2bytes(text)
# Now set the Unicode value
SetClipboardData(win32con.CF_UNICODETEXT, text)
# Get it in Unicode.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
# Close and open the clipboard to ensure auto-conversions take place.
finally:
CloseClipboard()
OpenClipboard()
try:
# Make sure I can still get the text as bytes
got = GetClipboardData(win32con.CF_TEXT)
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
# Make sure we get back the correct types.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
got = GetClipboardData(win32con.CF_OEMTEXT)
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
print "Clipboard text tests worked correctly"
finally:
CloseClipboard()
def TestClipboardEnum():
OpenClipboard()
try:
# Enumerate over the clipboard types
enum = 0
while 1:
enum = EnumClipboardFormats(enum)
if enum==0:
break
assert IsClipboardFormatAvailable(enum), "Have format, but clipboard says it is not available!"
n = cf_names.get(enum,"")
if not n:
try:
n = GetClipboardFormatName(enum)
except error:
n = "unknown (%s)" % (enum,)
print "Have format", n
print "Clipboard enumerator tests worked correctly"
finally:
CloseClipboard()
class Foo:
def __init__(self, **kw):
self.__dict__.update(kw)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
def __eq__(self, other):
return self.__dict__==other.__dict__
def TestCustomFormat():
OpenClipboard()
try:
# Just for the fun of it pickle Python objects through the clipboard
fmt = RegisterClipboardFormat("Python Pickle Format")
import cPickle
pickled_object = Foo(a=1, b=2, Hi=3)
SetClipboardData(fmt, cPickle.dumps( pickled_object ) )
# Now read it back.
data = GetClipboardData(fmt)
loaded_object = cPickle.loads(data)
assert cPickle.loads(data) == pickled_object, "Didnt get the correct data!"
print "Clipboard custom format tests worked correctly"
finally:
CloseClipboard()
if __name__=='__main__':
TestEmptyClipboard()
TestText()
TestCustomFormat()
TestClipboardEnum()
# And leave it empty at the end!
TestEmptyClipboard()
| {
"content_hash": "4b050eb526a2ae5a903124178bb870f6",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 107,
"avg_line_length": 35.916030534351144,
"alnum_prop": 0.593836344314559,
"repo_name": "ntuecon/server",
"id": "ee94996f18260209f491b4b8dff9a3ec433f8939",
"size": "4776",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pyenv/Lib/site-packages/win32/Demos/win32clipboardDemo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2209"
},
{
"name": "Batchfile",
"bytes": "1509"
},
{
"name": "C",
"bytes": "504013"
},
{
"name": "C++",
"bytes": "96440"
},
{
"name": "CSS",
"bytes": "133288"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "150026"
},
{
"name": "JavaScript",
"bytes": "243314"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "27048260"
},
{
"name": "Shell",
"bytes": "47820"
},
{
"name": "Tcl",
"bytes": "1237796"
},
{
"name": "Visual Basic",
"bytes": "949"
},
{
"name": "XSLT",
"bytes": "2113"
}
],
"symlink_target": ""
} |
"""
包含一些CTA因子的可视化函数
"""
import matplotlib
matplotlib.use('Qt4Agg')
import numpy as np
import pandas as pd
import seaborn as sns
#import matplotlib as mpl
#mpl.rcParams["font.sans-serif"] = ["Microsoft YaHei"]#
#mpl.rcParams['axes.unicode_minus'] = False
import matplotlib.pyplot as plt
from calcFunction import get_capital_np
#----------------------------------------------------------------------
def plotSigCaps(signals,markets,climit=4,wlimit=2,size=1,rate=0.0001,op=True):
"""
打印某一个信号的资金曲线
"""
plt.close()
pnls,poss = get_capital_np(markets,signals,size,rate,\
climit=climit, wlimit=wlimit,op=op)
caps = np.cumsum(pnls[pnls!=0])
return caps,poss
#----------------------------------------------------------------------
def plotSigHeats(signals,markets,start=0,step=2,size=1,iters=6):
"""
打印信号回测盈损热度图,寻找参数稳定岛
"""
sigMat = pd.DataFrame(index=range(iters),columns=range(iters))
for i in range(iters):
for j in range(iters):
climit = start + i*step
wlimit = start + j*step
caps,poss = plotSigCaps(signals,markets,climit=climit,wlimit=wlimit,size=size,op=False)
sigMat[i][j] = caps[-1]
sns.heatmap(sigMat.values.astype(np.float64),annot=True,fmt='.2f',annot_kws={"weight": "bold"})
xTicks = [i+0.5 for i in range(iters)]
yTicks = [iters-i-0.5 for i in range(iters)]
xyLabels = [str(start+i*step) for i in range(iters)]
_, labels = plt.yticks(yTicks,xyLabels)
plt.setp(labels, rotation=0)
_, labels = plt.xticks(xTicks,xyLabels)
plt.setp(labels, rotation=90)
plt.xlabel('Loss Stop @')
plt.ylabel('Profit Stop @')
return sigMat
| {
"content_hash": "1e4f68b260b0d9db58bacd61369740e1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 99,
"avg_line_length": 35.229166666666664,
"alnum_prop": 0.6014192785334121,
"repo_name": "moonnejs/uiKLine",
"id": "8bbed72f388097273d3a7d08f8820484688ac87e",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctaFunction/visFunction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "32"
},
{
"name": "Python",
"bytes": "62267"
}
],
"symlink_target": ""
} |
"""
.. module:: Downsample test
:platform: Unix
:synopsis: unittest test classes for plugins
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
from savu.test.plugin_test import PluginTest
class DownsampleTest(PluginTest):
def setUp(self):
self.plugin_name = "savu.plugins.downsample_filter"
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c10f516f5ace9fac2b93cc379d5a1a3d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 18.666666666666668,
"alnum_prop": 0.6836734693877551,
"repo_name": "swtp1v07/Savu",
"id": "ec31e66eee3bf4fb1d9c778d2a4ed9b15d6d6d4b",
"size": "982",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "savu/test/downsample_plugin_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "84400"
},
{
"name": "C++",
"bytes": "509"
},
{
"name": "Makefile",
"bytes": "2126"
},
{
"name": "Python",
"bytes": "349231"
},
{
"name": "Shell",
"bytes": "6321"
}
],
"symlink_target": ""
} |
import json
from mock import patch
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.user_profile_viewset import UserProfileViewSet
from onadata.apps.main.models import UserProfile
from django.contrib.auth.models import User
from onadata.libs.serializers.user_profile_serializer import (
_get_first_last_names
)
def _profile_data():
return {
'username': u'deno',
'first_name': u'Dennis',
'last_name': u'erama',
'email': u'[email protected]',
'city': u'Denoville',
'country': u'US',
'organization': u'Dono Inc.',
'website': u'deno.com',
'twitter': u'denoerama',
'require_auth': False,
'password': 'denodeno',
'is_org': False,
'name': u'Dennis erama'
}
class TestUserProfileViewSet(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
self.view = UserProfileViewSet.as_view({
'get': 'list',
'post': 'create',
'patch': 'partial_update',
'put': 'update'
})
def test_profiles_list(self):
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertNotEqual(response.get('Last-Modified'), None)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [self.user_profile_data()])
def test_profiles_get(self):
"""Test get user profile"""
view = UserProfileViewSet.as_view({
'get': 'retrieve'
})
request = self.factory.get('/', **self.extra)
response = view(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.data, {'detail': 'Expected URL keyword argument `user`.'})
# by username
response = view(request, user='bob')
self.assertNotEqual(response.get('Last-Modified'), None)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.user_profile_data())
# by pk
response = view(request, user=self.user.pk)
self.assertNotEqual(response.get('Last-Modified'), None)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.user_profile_data())
def test_profiles_get_anon(self):
view = UserProfileViewSet.as_view({
'get': 'retrieve'
})
request = self.factory.get('/')
response = view(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.data, {'detail': 'Expected URL keyword argument `user`.'})
request = self.factory.get('/')
response = view(request, user='bob')
data = self.user_profile_data()
del data['email']
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, data)
self.assertNotIn('email', response.data)
def test_profiles_get_org_anon(self):
self._org_create()
self.client.logout()
view = UserProfileViewSet.as_view({
'get': 'retrieve'
})
request = self.factory.get('/')
response = view(request, user=self.company_data['org'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['first_name'],
self.company_data['name'])
self.assertIn('is_org', response.data)
self.assertEqual(response.data['is_org'], True)
def test_profile_create(self):
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
data = _profile_data()
del data['name']
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
del data['password']
profile = UserProfile.objects.get(user__username=data['username'])
data['id'] = profile.user.pk
data['gravatar'] = profile.gravatar
data['url'] = 'http://testserver/api/v1/profiles/deno'
data['user'] = 'http://testserver/api/v1/users/deno'
data['metadata'] = {}
data['joined_on'] = profile.user.date_joined
data['name'] = "%s %s" % ('Dennis', 'erama')
self.assertEqual(response.data, data)
user = User.objects.get(username='deno')
self.assertTrue(user.is_active)
def test_profile_create_anon(self):
data = _profile_data()
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json")
response = self.view(request)
self.assertEqual(response.status_code, 201)
del data['password']
del data['email']
profile = UserProfile.objects.get(user__username=data['username'])
data['id'] = profile.user.pk
data['gravatar'] = profile.gravatar
data['url'] = 'http://testserver/api/v1/profiles/deno'
data['user'] = 'http://testserver/api/v1/users/deno'
data['metadata'] = {}
data['joined_on'] = profile.user.date_joined
self.assertEqual(response.data, data)
self.assertNotIn('email', response.data)
def test_profile_create_missing_name_field(self):
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
data = _profile_data()
del data['first_name']
del data['name']
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
response.render()
self.assertContains(response,
'Either name or first_name should be provided',
status_code=400)
def test_split_long_name_to_first_name_and_last_name(self):
name = "(CPLTGL) Centre Pour la Promotion de la Liberte D'Expression "\
"et de la Tolerance Dans La Region de"
first_name, last_name = _get_first_last_names(name)
self.assertEqual(first_name, "(CPLTGL) Centre Pour la Promot")
self.assertEqual(last_name, "ion de la Liberte D'Expression")
def test_partial_updates(self):
self.assertEqual(self.user.profile.country, u'US')
country = u'KE'
metadata = {u'computer': u'mac'}
json_metadata = json.dumps(metadata)
data = {'country': country, 'metadata': json_metadata}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(profile.country, country)
self.assertEqual(profile.metadata, metadata)
def test_partial_update_metadata_field(self):
metadata = {u"zebra": {u"key1": "value1", u"key2": "value2"}}
json_metadata = json.dumps(metadata)
data = {
'metadata': json_metadata,
}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(profile.metadata, metadata)
# create a new key/value object if it doesn't exist
data = {
'metadata': '{"zebra": {"key3": "value3"}}',
'overwrite': u'false'
}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(
profile.metadata, {u"zebra": {
u"key1": "value1", u"key2": "value2", u"key3": "value3"}})
# update an existing key/value object
data = {
'metadata': '{"zebra": {"key2": "second"}}', 'overwrite': u'false'}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(
profile.metadata, {u"zebra": {
u"key1": "value1", u"key2": "second", u"key3": "value3"}})
# add a new key/value object if the key doesn't exist
data = {
'metadata': '{"animal": "donkey"}', 'overwrite': u'false'}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(
profile.metadata, {
u"zebra": {
u"key1": "value1", u"key2": "second", u"key3": "value3"},
u'animal': u'donkey'})
# don't pass overwrite param
data = {'metadata': '{"b": "caah"}'}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(
profile.metadata, {u'b': u'caah'})
# pass 'overwrite' param whose value isn't false
data = {'metadata': '{"b": "caah"}', 'overwrite': u'falsey'}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(
profile.metadata, {u'b': u'caah'})
def test_put_update(self):
data = _profile_data()
# create profile
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
# edit username with existing different user's username
data['username'] = 'bob'
request = self.factory.put(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request, user='deno')
self.assertEqual(response.status_code, 400)
# update
data['username'] = 'roger'
data['city'] = 'Nairobi'
request = self.factory.put(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request, user='deno')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['city'], data['city'])
def test_profile_create_mixed_case(self):
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
data = _profile_data()
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
del data['password']
profile = UserProfile.objects.get(
user__username=data['username'].lower())
data['id'] = profile.user.pk
data['gravatar'] = unicode(profile.gravatar)
data['url'] = 'http://testserver/api/v1/profiles/deno'
data['user'] = 'http://testserver/api/v1/users/deno'
data['username'] = u'deno'
data['metadata'] = {}
data['joined_on'] = profile.user.date_joined
self.assertEqual(response.data, data)
data['username'] = u'deno'
data['joined_on'] = str(profile.user.date_joined)
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 400)
self.assertIn("%s already exists" %
data['username'], response.data['username'])
def test_change_password(self):
view = UserProfileViewSet.as_view(
{'post': 'change_password'})
current_password = "bobbob"
new_password = "bobbob1"
post_data = {'current_password': current_password,
'new_password': new_password}
request = self.factory.post('/', data=post_data, **self.extra)
response = view(request, user='bob')
user = User.objects.get(username__iexact=self.user.username)
self.assertEqual(response.status_code, 200)
self.assertTrue(user.check_password(new_password))
def test_change_password_wrong_current_password(self):
view = UserProfileViewSet.as_view(
{'post': 'change_password'})
current_password = "wrong_pass"
new_password = "bobbob1"
post_data = {'current_password': current_password,
'new_password': new_password}
request = self.factory.post('/', data=post_data, **self.extra)
response = view(request, user='bob')
user = User.objects.get(username__iexact=self.user.username)
self.assertEqual(response.status_code, 400)
self.assertFalse(user.check_password(new_password))
def test_profile_create_with_name(self):
data = {
'username': u'deno',
'name': u'Dennis deno',
'email': u'[email protected]',
'city': u'Denoville',
'country': u'US',
'organization': u'Dono Inc.',
'website': u'deno.com',
'twitter': u'denoerama',
'require_auth': False,
'password': 'denodeno',
'is_org': False,
}
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
del data['password']
profile = UserProfile.objects.get(user__username=data['username'])
data['id'] = profile.user.pk
data['first_name'] = 'Dennis'
data['last_name'] = 'deno'
data['gravatar'] = profile.gravatar
data['url'] = 'http://testserver/api/v1/profiles/deno'
data['user'] = 'http://testserver/api/v1/users/deno'
data['metadata'] = {}
data['joined_on'] = profile.user.date_joined
self.assertEqual(response.data, data)
user = User.objects.get(username='deno')
self.assertTrue(user.is_active)
def test_twitter_username_validation(self):
data = {
'username': u'deno',
'name': u'Dennis deno',
'email': u'[email protected]',
'city': u'Denoville',
'country': u'US',
'organization': u'Dono Inc.',
'website': u'deno.com',
'twitter': u'denoerama',
'require_auth': False,
'password': 'denodeno',
'is_org': False,
}
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
data['twitter'] = 'denoerama'
data = {
'username': u'deno',
'name': u'Dennis deno',
'email': u'[email protected]',
'city': u'Denoville',
'country': u'US',
'organization': u'Dono Inc.',
'website': u'deno.com',
'twitter': u'denoeramaddfsdsl8729320392ujijdswkp--22kwklskdsjs',
'require_auth': False,
'password': 'denodeno',
'is_org': False,
}
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['twitter'],
[u'Invalid twitter username'])
user = User.objects.get(username='deno')
self.assertTrue(user.is_active)
def test_put_patch_method_on_names(self):
data = _profile_data()
# create profile
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
# update
data['first_name'] = 'Tom'
del data['name']
request = self.factory.put(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request, user='deno')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['first_name'], data['first_name'])
first_name = u'Henry'
last_name = u'Thierry'
data = {'first_name': first_name, 'last_name': last_name}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['first_name'], data['first_name'])
self.assertEqual(response.data['last_name'], data['last_name'])
@patch('django.core.mail.EmailMultiAlternatives.send')
def test_send_email_activation_api(self, mock_send_mail):
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
data = _profile_data()
del data['name']
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
# Activation email not sent
self.assertFalse(mock_send_mail.called)
user = User.objects.get(username='deno')
self.assertTrue(user.is_active)
def test_partial_update_email(self):
self.assertEqual(self.user.profile.country, u'US')
data = {'email': '[email protected]'}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
profile = UserProfile.objects.get(user=self.user)
self.assertEqual(response.status_code, 200)
self.assertEqual(profile.user.email, '[email protected]')
data = {'email': 'user@example'}
request = self.factory.patch('/', data=data, **self.extra)
response = self.view(request, user=self.user.username)
self.assertEqual(response.status_code, 400)
def test_partial_update_unique_email_api(self):
data = {'email': '[email protected]'}
request = self.factory.patch(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request, user=self.user.username)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['email'], data['email'])
# create User
request = self.factory.post(
'/api/v1/profiles', data=json.dumps(_profile_data()),
content_type="application/json", **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 201)
user = User.objects.get(username='deno')
# Update email
request = self.factory.patch(
'/api/v1/profiles', data=json.dumps(data),
content_type="application/json", **self.extra)
response = self.view(request, user=user.username)
self.assertEqual(response.status_code, 400)
| {
"content_hash": "43a66d7b2cf9bbad23f31e99f5797533",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 79,
"avg_line_length": 40.08023483365949,
"alnum_prop": 0.5895708217372199,
"repo_name": "qlands/onadata",
"id": "559d8d3e099de89ee92afac2c3211cd9b21bdbb5",
"size": "20481",
"binary": false,
"copies": "5",
"ref": "refs/heads/20151009",
"path": "onadata/apps/api/tests/viewsets/test_user_profile_viewset.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "HTML",
"bytes": "248525"
},
{
"name": "JavaScript",
"bytes": "904742"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "2817195"
},
{
"name": "Shell",
"bytes": "14149"
}
],
"symlink_target": ""
} |
import json
import utils
import logger
import problemdb
import teamdb
from decorators import api_wrapper
from flask import Blueprint, request, session, jsonify, make_response
from decorators import admins_only
api = Blueprint("api", __name__)
@api.route("/api/register", methods=["POST"])
@api_wrapper
def register():
team = request.form["team"]
password = request.form["password"]
password2 = request.form["password2"]
if password != password2:
return {"message": "Passwords do not match"}
if len(password) < 4:
return {"message": "Passwords should be at least 4 characters long"}
if teamdb.team_exists(team):
return {"message": "Team already exists"}
else:
response = teamdb.add_team(team, password)
if "Success" in response:
logger.log("registrations", logger.INFO, "%s has registered" % team)
return {"message": response}
@api.route("/api/login", methods=["POST"])
@api_wrapper
def login():
team = request.form["team"]
password = request.form["password"]
stored_team_password = teamdb.get_team_password(team)
if utils.check_password(stored_team_password, password):
session["tid"] = team
session["logged_in"] = True
if teamdb.is_admin(team):
session["admin"] = True
logger.log("logins", logger.WARNING, "%s logged as admin" % team)
else:
logger.log("logins", logger.INFO, "%s logged in" % team)
return {"success": 1, "message": "Success!"}
else:
return {"success": 0, "message": "Invalid credentials"}
@api.route("/api/add_problem", methods=["POST"])
@admins_only
@api_wrapper
def add_problem():
name = request.form["problem_name"]
desc = request.form["problem_desc"]
hint = request.form["problem_hint"]
category = request.form["problem_category"]
value = request.form["problem_value"]
flag = request.form["problem_flag"]
response = problemdb.add_problem(name, desc, hint, category, value, flag)
return {"message": response}
@api.route("/api/submit_flag", methods=["POST"])
@api_wrapper
def submit_flag():
flag = request.form["flag"]
pid = request.form["pid"]
team = session["tid"]
response = problemdb.submit_flag(team, pid, flag)
return {"message": response}
@api.route("/api/remove_problem", methods=["POST"])
@admins_only
@api_wrapper
def remove_problem():
pid = request.form["pid"]
response = problemdb.remove_problem(pid)
return {"message": response}
@api.route("/api/update_problem", methods=["POST"])
@admins_only
@api_wrapper
def update_problem():
pid = request.form["pid"]
name = request.form["name"]
desc = request.form["description"]
hint = request.form["hint"]
category = request.form["category"]
points = request.form["points"]
flag = request.form["flag"]
response = problemdb.update_problem(pid, name, desc, hint, category, points, flag)
return {"message": response}
@api.route("/api/top/<count>", methods=["GET", "POST"])
def top(count):
data = {}
teams = teamdb.get_scoreboard_data(5)
scoreboard = []
for team in teams:
jason = {}
jason["name"] = team[0]
jason["score"] = team[2]
jason["last_solve"] = team[5]
jason["progression"] = team[6] + ",%s,%s" % (team[2], utils.get_time_since_epoch()) # Add current time score to make graph look nicer
scoreboard.append(jason)
data["scoreboard"] = scoreboard
return jsonify(data=data)
@api.route("/api/export_data", methods=["GET", "POST"])
@admins_only
def export_data():
data = {}
form = request.form
if "problems" in form:
problem_list = []
problems = problemdb.get_problems()
for problem in problems:
jason = {}
jason["pid"] = problem[0]
jason["name"] = problem[1]
jason["description"] = problem[2]
jason["hint"] = problem[3]
jason["category"] = problem[4]
jason["points"] = problem[5]
jason["flag"] = problem[6]
problem_list.append(jason)
data["problems"] = problem_list
if "scoreboard" in form:
scoreboard = []
teams = teamdb.get_scoreboard_data()
for team in teams:
jason = {}
jason["name"] = team[0]
jason["score"] = team[2]
jason["solves"] = [problemdb.get_name_from_pid(pid) for pid in team[4].split(",")[1:]]
jason["last_solve"] = team[5]
jason["progression"] = team[6] + ",%s,%s" % (team[2], utils.get_time_since_epoch()) # Add current time score to make graph look nicer
scoreboard.append(jason)
data["scoreboard"] = scoreboard
if "teams" in form:
team_list = []
teams = teamdb.get_teams()
for team in teams:
jason = {}
jason["name"] = team[0]
jason["password"] = team[1]
jason["score"] = team[2]
jason["admin"] = team[3]
jason["solves"] = [problemdb.get_name_from_pid(pid) for pid in team[4].split(",")[1:]]
jason["last_solve"] = team[5]
jason["progression"] = team[6]
team_list.append(jason)
data["teams"] = team_list
data = json.dumps(data, indent=4)
if "download" in form:
response = make_response(data)
response.headers["Content-Disposition"] = "attachment; filename=data.json"
return response
return jsonify(data=json.loads(data))
| {
"content_hash": "bd35aabf8c0ab8141dab2623d3e53e73",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 145,
"avg_line_length": 34.21604938271605,
"alnum_prop": 0.5987732274941368,
"repo_name": "james9909/IntroCTF",
"id": "9dd6ddb879ccc123066e7ccd9bd9bfcdbd764eec",
"size": "5543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "HTML",
"bytes": "21714"
},
{
"name": "JavaScript",
"bytes": "7518"
},
{
"name": "Makefile",
"bytes": "118"
},
{
"name": "PHP",
"bytes": "6995"
},
{
"name": "Python",
"bytes": "40825"
},
{
"name": "Shell",
"bytes": "4260"
}
],
"symlink_target": ""
} |
import json
import bpy
import os
from bpy.types import Operator
from . import utils
from .assets.scripts.evertims import ( Evertims, evertUtils )
# to launch EVERTims raytracing client
import subprocess
# ---------------------------------------------------------------
# import components necessary to report EVERTims raytracing client
# logs in console
import sys
import threading
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
ASSET_FILE_NAME = "evertims-assets.blend"
class EVERTimsImportObject(Operator):
"""Import default EVERTims element (KX_GameObject) into scene"""
bl_label = "Import an object (KX_GameObject, Empty, etc.) from anther .blend file"
bl_idname = 'evert.import_template'
bl_options = {'REGISTER', 'UNDO'}
arg = bpy.props.StringProperty()
def execute(self, context):
loadType = self.arg
evertims = context.scene.evertims
# cancel if simulation is running
if evertims.enable_edit_mode:
self.report({'WARNING'}, 'Cannot import element while simulation is running')
return {'CANCELLED'}
# cleanup before we start
bpy.ops.object.select_all(action='DESELECT')
# set asset .blend file name
filename = ASSET_FILE_NAME
obj = None
if loadType == 'scene':
# load each object separately for access to resulting obj.name
obj = self.loadAsset(filename, ('Room'))
evertims.room_object = obj.name
obj = self.loadAsset(filename, ('Source'))
evertims.source_object = obj.name
obj = self.loadAsset(filename, ('Listener_Rotate', 'Listener'))
evertims.listener_object = obj.name
# load others
self.loadAsset(filename, ('Logic_EVERTims'))
if loadType == 'logic':
obj = self.loadAsset(filename, ('Logic_EVERTims'))
if loadType == 'room':
obj = self.loadAsset(filename, ('Room'))
evertims.room_object = obj.name
if loadType == 'source':
obj = self.loadAsset(filename, ('Source'))
evertims.source_object = obj.name
if loadType == 'listener':
obj = self.loadAsset(filename, ('Listener_Rotate', 'Listener'))
evertims.listener_object = obj.name
if not obj:
self.report({'ERROR'}, 'something went wrong')
return {'CANCELLED'}
else:
obj.select = True
bpy.context.scene.objects.active = obj
return {'FINISHED'}
def loadAsset(self, filename, objList):
scriptPath = os.path.realpath(__file__)
assetPath = os.path.join(os.path.dirname(scriptPath), 'assets', filename)
try:
with bpy.data.libraries.load(assetPath) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name in objList]
except:
return 'Asset file not found'
for obj in data_to.objects:
bpy.context.scene.objects.link(obj)
return obj
class EVERTimsImportText(Operator):
"""Import the list of available EVERTims acoustic materials as a text file"""
bl_label = "Import a text file from anther .blend file"
bl_idname = 'evert.import_script'
bl_options = {'REGISTER', 'UNDO'}
arg = bpy.props.StringProperty()
def execute(self, context):
loadType = self.arg
# set asset .blend file name
filename = ASSET_FILE_NAME
if loadType == 'materialList':
isLoaded = self.loadAsset(filename, ('evertims-materials.txt'))
if not isLoaded:
self.report({'ERROR'}, 'something went wrong')
return {'CANCELLED'}
else:
self.report({'INFO'}, 'EVERTims material file imported in Text Editor window.')
return {'FINISHED'}
def loadAsset(self, filename, objList):
scriptPath = os.path.realpath(__file__)
assetPath = os.path.join(os.path.dirname(scriptPath), 'assets', filename)
try:
with bpy.data.libraries.load(assetPath) as (data_from, data_to):
data_to.texts = [name for name in data_from.texts if name in objList]
except:
return False
return True
class EVERTimsInBge(Operator):
"""setup EVERTims for BGE session"""
bl_label = "setup EVERTims in BGE"
bl_idname = 'evert.evertims_in_bge'
bl_options = {'REGISTER'}
arg = bpy.props.StringProperty()
def execute(self, context):
loadType = self.arg
evertims = context.scene.evertims
logic_obj = bpy.context.scene.objects.get('Logic_EVERTims')
if loadType == 'ENABLE':
# check if evertims setup properly
if not evertims.room_object or not evertims.source_object or not evertims.listener_object or not logic_obj:
self.report({'ERROR'}, 'Create at least 1 room (with material), 1 listener, 1 source and import EVERTims Logic object')
return {'CANCELLED'}
# update logic object properties for in-BGE access
self.init_evertims_module_path(context)
# update flag
evertims.enable_bge = True
elif loadType == 'DISABLE':
# update flag
evertims.enable_bge = False
# update logic object properties for in-BGE access
self.update_evertims_props(context)
return {'FINISHED'}
def update_evertims_props(self, context):
evertims = context.scene.evertims
# remember current active object
old_obj = bpy.context.scene.objects.active
# get logic object
obj = bpy.context.scene.objects.get('Logic_EVERTims')
if obj:
bpy.context.scene.objects.active = obj
propList = [ 'enable_bge', 'debug_logs', 'debug_rays', 'ip_local', 'ip_raytracing', 'ip_auralization', 'port_read', 'port_write_raytracing', 'port_write_auralization', 'movement_threshold_loc', 'movement_threshold_rot', 'room_object', 'source_object', 'listener_object']
# sync. properties (for bge access) with GUI's
for propName in propList:
propValue = eval('evertims.' + propName)
obj.game.properties[propName].value = propValue
# get room object, update its RT60 values (not possible in bge, based on bpy routines)
room = bpy.context.scene.objects.get(evertims.room_object)
rt60Values = evertUtils.getRt60Values(utils.str2dict(evertims.mat_list), room)
obj.game.properties['rt60Values'].value = json.dumps(rt60Values)
# reset old active object
bpy.context.scene.objects.active = old_obj
def init_evertims_module_path(self, context):
# get add-on path
current_script_file = os.path.realpath(__file__)
current_script_directory = os.path.dirname(current_script_file)
addon_path = os.path.join(current_script_directory, 'assets', 'scripts')
# get logic object
obj = context.scene.objects.get('Logic_EVERTims')
if obj:
obj.game.properties['evertims_path'].value = addon_path
class EVERTimsInEditMode(Operator):
"""Start OSC sync. with EVERTims raytracing client.
Continuous upload of scene info (room, listener, etc.) for auralization and
download of raytracing results for visual feedback."""
bl_label = "Enable Blender to EVERTims raytracing client connection in edit mode (as opposed to BGE mode)"
bl_idname = 'evert.evertims_in_edit_mode'
bl_options = {'REGISTER'}
arg = bpy.props.StringProperty()
_evertims = Evertims()
_handle_timer = None
@staticmethod
def handle_add(self, context):
# EVERTimsInEditMode._handle_draw_callback = bpy.types.SpaceView3D.draw_handler_add(EVERTimsInEditMode._draw_callback, (self,context), 'WINDOW', 'PRE_VIEW')
context.window_manager.modal_handler_add(self)
EVERTimsInEditMode._handle_timer = context.window_manager.event_timer_add(0.075, context.window)
if context.scene.evertims.debug_logs: print('added evertims callback to draw_handler')
@staticmethod
def handle_remove(context):
if EVERTimsInEditMode._handle_timer is not None:
context.window_manager.event_timer_remove(EVERTimsInEditMode._handle_timer)
EVERTimsInEditMode._handle_timer = None
# context.window_manager.modal_handler_add(self)
# bpy.types.SpaceView3D.draw_handler_remove(EVERTimsInEditMode._handle_draw_callback, 'WINDOW')
# EVERTimsInEditMode._handle_draw_callback = None
if context.scene.evertims.debug_logs: print('removed evertims callback from draw_handler')
@classmethod
def poll(cls, context):
return context.area.type == 'VIEW_3D'
def invoke(self, context, event):
scene = context.scene
evertims = scene.evertims
loadType = self.arg
# get active object
# obj = bpy.context.scene.objects.active
if loadType == 'PLAY':
# load mat file if not loaded already
evertims.mat_list = utils.loadMatFile(context)
self._evertims.setMaterials(utils.str2dict(evertims.mat_list))
# check room materials definition
roomMatError = self.checkRoomMaterials(context)
if roomMatError:
self.report({'ERROR'}, roomMatError)
return {'CANCELLED'}
# init evertims
isInitOk = self.initEvertims(context)
if isInitOk:
# update enable flag
evertims.enable_edit_mode = True
# add callback
self.handle_add(self,context)
return {'RUNNING_MODAL'}
else:
self.report({'ERROR'}, 'Create at least 1 room (with material), 1 listener, 1 source and import EVERTims Logic object')
return {'CANCELLED'}
elif loadType == 'STOP':
# update enable flag
evertims.enable_edit_mode = False
# erase rays from screen
context.area.tag_redraw()
return {'CANCELLED'}
elif loadType == 'CRYSTALIZE':
self._evertims.crystalizeVisibleRays()
return {'FINISHED'}
def modal(self, context, event):
"""
modal method, run always, call cancel function when Blender quit / load new scene
"""
# kill modal
if not context.scene.evertims.enable_edit_mode:
self.cancel(context)
# return flag to notify callback manager that this callback is no longer running
return {'CANCELLED'}
# execute modal
elif event.type == 'TIMER':
# run evertims internal callbacks
self._evertims.bpy_modal()
# force bgl rays redraw (else only redraw rays on user input event)
if not context.area is None:
context.area.tag_redraw()
return {'PASS_THROUGH'}
def cancel(self, context):
"""
called when Blender quit / load new scene. Remove local callback from stack
"""
# remove local callback
self.handle_remove(context)
# remove nested callback
self._evertims.handle_remove()
# erase rays from screen
if not context.area is None:
context.area.tag_redraw()
def checkRoomMaterials(self, context):
"""
check if all room materials are defined in mat file, return error msg (string)
"""
evertims = context.scene.evertims
objects = bpy.context.scene.objects
room = objects.get(evertims.room_object)
if not room: return 'no room defined'
slots = room.material_slots
evertMat = utils.str2dict(evertims.mat_list)
for mat in slots:
if not mat.name in evertMat: return 'undefined room material: ' + mat.name
return ''
def initEvertims(self, context):
"""
init the Evertims() class instance, using GUI defined parameters (in EVERTims add-on pannel)
"""
evertims = context.scene.evertims
IP_RAYTRACING = evertims.ip_raytracing # EVERTims client IP address
IP_AURALIZATION = evertims.ip_auralization # Auralization Engine IP address
PORT_W_RAYTRACING = evertims.port_write_raytracing # port used by EVERTims client to read data sent by the BGE
PORT_W_AURALIZATION = evertims.port_write_auralization # port used by EVERTims client to read data sent by the BGE
IP_LOCAL = evertims.ip_local # local host (this computer) IP address, running the BGE
PORT_R = evertims.port_read # port used by the BGE to read data sent by the EVERTims client
DEBUG_LOG = evertims.debug_logs # enable / disable console log
DEBUG_RAYS = evertims.debug_rays # enable / disable visual feedback on traced rays
MOVE_UPDATE_THRESHOLD_VALUE_LOC = evertims.movement_threshold_loc # minimum value a listener / source must move to be updated on EVERTims client (m)
MOVE_UPDATE_THRESHOLD_VALUE_ROT = evertims.movement_threshold_rot # minimum value a listener / source must rotate to be updated on EVERTims client (deg)
# set debug mode
self._evertims.setDebugMode(DEBUG_LOG)
# self._evertims.setBufferSize(4096)
# define EVERTs elements: room, listener and source
# 1. reset local dicts if already filled
self._evertims.resetObjDict()
# (in case something changed, don't want to keep old evertims elmt instances)
# 2. define new elements
objects = bpy.context.scene.objects
# add room
obj = objects.get(evertims.room_object)
if obj: self._evertims.addRoom(obj)
if evertims.debug_logs: print('adding room: ', obj.name)
# add source
obj = objects.get(evertims.source_object)
if obj: self._evertims.addSource(obj)
if evertims.debug_logs: print('adding source: ', obj.name)
# add listener
obj = objects.get(evertims.listener_object)
if obj: self._evertims.addListener(obj)
if evertims.debug_logs: print('adding listener: ', obj.name)
# get logic object
logic_obj = bpy.context.scene.objects.get('Logic_EVERTims')
# get room for later check
room_obj = self._evertims.getRoom()
# limit listener / source position updates in EVERTims Client
self._evertims.setMovementUpdateThreshold(MOVE_UPDATE_THRESHOLD_VALUE_LOC, MOVE_UPDATE_THRESHOLD_VALUE_ROT)
# init network connections
self._evertims.initConnection_writeRaytracing(IP_RAYTRACING, PORT_W_RAYTRACING)
self._evertims.initConnection_writeAuralization(IP_AURALIZATION, PORT_W_AURALIZATION)
self._evertims.initConnection_read(IP_LOCAL, PORT_R)
# activate raytracing
self._evertims.activateRayTracingFeedback(DEBUG_RAYS)
# check if evertims module is ready to start
# print (self._evertims.isReady(), logic_obj, room_obj.material_slots)
if self._evertims.isReady() and logic_obj:
# print('isready', [e for e in room_obj.material_slots])
if room_obj.material_slots:
# print('2')
# start EVERTims client
if evertims.debug_logs: print ('start simulation...')
# print('3')
self._evertims.startClientSimulation()
# print('4')
return True
print ('\n###### EVERTims SIMULATION ABORTED ###### \nYou should create at least 1 room (with an EVERTims material), 1 listener, 1 source, \nimport the EVERTims Logic object \nand define EVERTims client parameters.\n')
return False
class EVERTimsRaytracingClient(Operator):
"""Start the Evertims raytracing client as a subprocess"""
bl_label = "Start / Stop the EVERTims raytracing client from Blender GUI"
bl_idname = 'evert.evertims_raytracing_client'
bl_options = {'REGISTER'}
_raytracing_process = None
_raytracing_debug_log_thread = None
_raytracing_debug_log_queue = None
_handle_timer = None
_raytracing_debug_thread_stop_event = None
arg = bpy.props.StringProperty()
@staticmethod
def handle_add(self, context, debugEnabled):
"""
called when starting the EVERTims raytracing client,
starts necessary callbacks to output client logs in Blender console.
called even if debug mode disabled, the modal then runs uselessly yet we're sure
that the self.cancel method is called if blender is closed, hence giving us a handle to kill
the evertims ims sub process.
"""
# start thread for non-blocking log
if debugEnabled:
EVERTimsRaytracingClient._raytracing_debug_log_queue = Queue()
EVERTimsRaytracingClient._raytracing_debug_thread_stop_event = threading.Event() # used to stop the thread
EVERTimsRaytracingClient._raytracing_debug_log_thread = threading.Thread(target=self.enqueue_output, args=(self._raytracing_process.stdout, EVERTimsRaytracingClient._raytracing_debug_log_queue, EVERTimsRaytracingClient._raytracing_debug_thread_stop_event))
EVERTimsRaytracingClient._raytracing_debug_log_thread.daemon = True # thread dies with the program
EVERTimsRaytracingClient._raytracing_debug_log_thread.start()
# start modal
context.window_manager.modal_handler_add(self)
EVERTimsRaytracingClient._handle_timer = context.window_manager.event_timer_add(0.075, context.window)
if context.scene.evertims.debug_logs: print('added evertims raytracing modal callback to draw_handler')
@staticmethod
def handle_remove(context):
"""
called when terminating the EVERTims raytracing client,
remove callbacks added in handle_add method.
"""
# skip if somehow this runs while handle time not defined in handle_add
if EVERTimsRaytracingClient._handle_timer is None: return
# kill modal
context.window_manager.event_timer_remove(EVERTimsRaytracingClient._handle_timer)
EVERTimsRaytracingClient._handle_timer = None
# context.window_manager.modal_handler_add(self)
# indicate it's ok to finish log in stdout thread
# EVERTimsRaytracingClient._raytracing_debug_log_thread.daemon = False
if EVERTimsRaytracingClient._raytracing_debug_thread_stop_event is not None:
EVERTimsRaytracingClient._raytracing_debug_thread_stop_event.set()
if context.scene.evertims.debug_logs_raytracing:
print('removed raytracing modal callback from modal stack')
@classmethod
def poll(cls, context):
return context.area.type == 'VIEW_3D'
def enqueue_output(self, out, queue, stop_event):
"""
Based on the Queue python module, this callback runs when debug mode enabled,
allowing non-blocking print of EVERTims raytracing client logs in Blender console.
"""
if not stop_event.is_set():
for line in iter(out.readline, b''):
queue.put(line)
out.close()
else:
EVERTimsRaytracingClient._raytracing_debug_log_thread.stop()
if evertims.debug_logs_raytracing:
print('removed raytracing client log callback from modal stack')
def invoke(self, context, event):
"""
Method called when button attached to local bl_idname clicked
"""
evertims = context.scene.evertims
addon_prefs = context.user_preferences.addons[__package__].preferences
loadType = self.arg
# start Evertims raytracing client (subprocess)
if loadType == 'PLAY':
# get launch command out of GUI properties
# cmd = "/Users/.../evertims/bin/ims -s 3858 -a 'listener_1/127.0.0.1:3860' -v 'listener_1/localhost:3862' -d 1 -D 2 -m /Users/.../evertims/resources/materials.dat -p 'listener_1/'"
client_cmd = bpy.path.abspath(addon_prefs.raytracing_client_path_to_binary)
client_cmd += " -s " + str(evertims.port_write_raytracing) # reader port
client_cmd += " -a " + "listener_1" + "/" + evertims.ip_auralization + ":" + str(evertims.port_write_auralization)
client_cmd += " -v " + "listener_1" + "/" + evertims.ip_local + ":" + str(evertims.port_read)
client_cmd += " -d " + str(evertims.min_reflection_order)
client_cmd += " -D " + str(evertims.max_reflection_order)
client_cmd += " -p " + "listener_1/ "
client_cmd += " -m " + bpy.path.abspath(addon_prefs.raytracing_client_path_to_matFile)
# launch subprocess
EVERTimsRaytracingClient._raytracing_process = subprocess.Popen(client_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, close_fds=ON_POSIX)
evertims.enable_raytracing_client = True
# enable log in Blender console if debug mode enabled
if evertims.debug_logs_raytracing:
print('launch EVERTims raytracing client subprocess')
print('command: \n', client_cmd)
self.handle_add(self, context, evertims.debug_logs_raytracing)
return {'RUNNING_MODAL'}
# terminate Evertims raytracing client (subprocess)
elif loadType == 'STOP':
# terminate subprocess
if self._raytracing_process: # if process has been (e.g. manually) closed since
self._raytracing_process.terminate()
evertims.enable_raytracing_client = False
# terminate modal thread
if self._handle_timer:
if self._raytracing_debug_thread_stop_event is not None: # debug enabled
self.handle_remove(context)
else:
self.handle_remove(context)
if evertims.debug_logs_raytracing: print('terminate EVERTims raytracing client subprocess')
return {'CANCELLED'}
def modal(self, context, event):
"""
modal method, run always, call cancel function when Blender quit / load new scene
"""
# useful only if debug mode not enabled
if event.type == 'TIMER' and EVERTimsRaytracingClient._raytracing_debug_log_queue is not None:
try:
# get line from non-blocking Queue, attached to debug-log thread
line = EVERTimsRaytracingClient._raytracing_debug_log_queue.get_nowait() # or q.get(timeout=.1)
except Empty: # no output to print yet
pass
else: # got line, ready to print
sys.stdout.write(line.decode('utf-8'))
return {'PASS_THROUGH'}
def cancel(self, context):
"""
function called when Blender quit / load new scene. Remove local callback from stack
"""
# kill ims process if blender exit while modal still running
if self._raytracing_process: # if process has been (e.g. manually) closed since
self._raytracing_process.terminate()
# remove modal
self.handle_remove(context)
class EVERTimsAuralizationClient(Operator):
"""Start the EVERTims auralization client as a subprocess"""
bl_label = "Start / Stop the EVERTims auralization client from Blender GUI"
bl_idname = 'evert.evertims_auralization_client'
bl_options = {'REGISTER'}
_process = None
arg = bpy.props.StringProperty()
def invoke(self, context, event):
"""
Method called when button attached to local bl_idname clicked
"""
evertims = context.scene.evertims
addon_prefs = context.user_preferences.addons[__package__].preferences
loadType = self.arg
# start Evertims auralization client (subprocess)
if loadType == 'PLAY':
# get launch command out of GUI properties
client_cmd = bpy.path.abspath(addon_prefs.auralization_client_path_to_binary)
# launch subprocess
EVERTimsAuralizationClient._process = subprocess.Popen(client_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, close_fds=ON_POSIX)
evertims.enable_auralization_client = True
return {'FINISHED'}
# terminate Evertims raytracing client (subprocess)
elif loadType == 'STOP':
# terminate subprocess
if self._process: # if process has been (e.g. manually) closed since
self._process.terminate()
evertims.enable_auralization_client = False
return {'CANCELLED'}
class EVERTimsUtils(Operator):
"""Miscellaneous utilities"""
bl_label = "Mist. utils operators"
bl_idname = 'evert.misc_utils'
bl_options = {'REGISTER'}
arg = bpy.props.StringProperty()
def invoke(self, context, event):
"""
Method called when button attached to local bl_idname clicked
"""
evertims = context.scene.evertims
loadType = self.arg
# flag that mat file needs update (will happend just before next auralization session)
if loadType == 'FLAG_MAT_UPDATE':
evertims.mat_list_need_update = True
return {'FINISHED'}
# ############################################################
# Un/Registration
# ############################################################
classes = (
EVERTimsImportObject,
EVERTimsImportText,
EVERTimsInBge,
EVERTimsInEditMode,
EVERTimsRaytracingClient,
EVERTimsAuralizationClient,
EVERTimsUtils
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
| {
"content_hash": "69f1aa9574aeaba2d09dbcf0b2c30a45",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 282,
"avg_line_length": 39.249249249249246,
"alnum_prop": 0.6285003825554706,
"repo_name": "EVERTims/game_engine_evertims",
"id": "f786220a92332d5b685721e30cc6a892f7994bf6",
"size": "26140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "operators.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "179065"
}
],
"symlink_target": ""
} |
import unittest
import snippet
class SnippetTest(unittest.TestCase):
def testParseWithImage(self):
img_snp = snippet.Snippet("2011-10-28T16:51:00.000-07:00")
self.assertEquals(datetime.datetime(2011, 10, 28, 16, 51), published)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "53dc7150fec8d29983769fe19dfaf55b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 26,
"alnum_prop": 0.7027972027972028,
"repo_name": "starpow971/Stanford-Humanities-Center-Updater",
"id": "b5aa8e92b7d4a563ac35ee6397e9fbfc89ad86c0",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snippet_test.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "80689"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
} |
"""This module contains regression tests for flows-related API handlers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_core.lib import registry
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import flow
from grr_response_server import flow_base
from grr_response_server.flows.general import discovery
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import processes
from grr_response_server.flows.general import transfer
from grr_response_server.gui import api_regression_test_lib
from grr_response_server.gui.api_plugins import flow as flow_plugin
from grr_response_server.output_plugins import email_plugin
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
from grr.test_lib import acl_test_lib
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
class ApiGetFlowHandlerRegressionTest(api_regression_test_lib.ApiRegressionTest
):
"""Regression test for ApiGetFlowHandler."""
api_method = "GetFlow"
handler = flow_plugin.ApiGetFlowHandler
def Run(self):
# Fix the time to avoid regressions.
with test_lib.FakeTime(42):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
discovery.Interrogate,
client_id=client_id,
creator=self.token.username)
replace = api_regression_test_lib.GetFlowTestReplaceDict(
client_id, flow_id, "F:ABCDEF12")
self.Check(
"GetFlow",
args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id),
replace=replace)
flow_base.TerminateFlow(client_id, flow_id,
"Pending termination: Some reason")
replace = api_regression_test_lib.GetFlowTestReplaceDict(
client_id, flow_id, "F:ABCDEF13")
# Fetch the same flow which is now should be marked as pending
# termination.
self.Check(
"GetFlow",
args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id),
replace=replace)
class ApiListFlowsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Test client flows list handler."""
api_method = "ListFlows"
handler = flow_plugin.ApiListFlowsHandler
def Run(self):
acl_test_lib.CreateUser(self.token.username)
with test_lib.FakeTime(42):
client_id = self.SetupClient(0)
with test_lib.FakeTime(43):
flow_id_1 = flow_test_lib.StartFlow(
discovery.Interrogate, client_id, creator=self.token.username)
with test_lib.FakeTime(44):
flow_id_2 = flow_test_lib.StartFlow(
processes.ListProcesses, client_id, creator=self.token.username)
replace = api_regression_test_lib.GetFlowTestReplaceDict(
client_id, flow_id_1, "F:ABCDEF10")
replace.update(
api_regression_test_lib.GetFlowTestReplaceDict(client_id, flow_id_2,
"F:ABCDEF11"))
self.Check(
"ListFlows",
args=flow_plugin.ApiListFlowsArgs(client_id=client_id),
replace=replace)
class ApiListFlowRequestsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiListFlowRequestsHandler."""
api_method = "ListFlowRequests"
handler = flow_plugin.ApiListFlowRequestsHandler
def Run(self):
client_id = self.SetupClient(0)
with test_lib.FakeTime(42):
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id, creator=self.token.username)
test_process = rdf_client.Process(name="test_process")
mock = flow_test_lib.MockClient(
client_id,
action_mocks.ListProcessesMock([test_process]),
token=self.token)
mock.Next()
replace = api_regression_test_lib.GetFlowTestReplaceDict(client_id, flow_id)
self.Check(
"ListFlowRequests",
args=flow_plugin.ApiListFlowRequestsArgs(
client_id=client_id, flow_id=flow_id),
replace=replace)
class ApiListFlowResultsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiListFlowResultsHandler."""
api_method = "ListFlowResults"
handler = flow_plugin.ApiListFlowResultsHandler
def _RunFlow(self, client_id):
flow_args = transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path="/tmp/evil.txt", pathtype=rdf_paths.PathSpec.PathType.OS))
client_mock = hunt_test_lib.SampleHuntMock(failrate=2)
with test_lib.FakeTime(42):
return flow_test_lib.StartAndRunFlow(
transfer.GetFile,
client_id=client_id,
client_mock=client_mock,
flow_args=flow_args)
def Run(self):
acl_test_lib.CreateUser(self.token.username)
client_id = self.SetupClient(0)
flow_id = self._RunFlow(client_id)
self.Check(
"ListFlowResults",
args=flow_plugin.ApiListFlowResultsArgs(
client_id=client_id, flow_id=flow_id, filter="evil"),
replace={flow_id: "W:ABCDEF"})
self.Check(
"ListFlowResults",
args=flow_plugin.ApiListFlowResultsArgs(
client_id=client_id, flow_id=flow_id, filter="benign"),
replace={flow_id: "W:ABCDEF"})
class ApiListFlowLogsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiListFlowResultsHandler."""
api_method = "ListFlowLogs"
handler = flow_plugin.ApiListFlowLogsHandler
def _AddLogToFlow(self, client_id, flow_id, log_string):
entry = rdf_flow_objects.FlowLogEntry(
client_id=client_id, flow_id=flow_id, message=log_string)
data_store.REL_DB.WriteFlowLogEntries([entry])
def Run(self):
client_id = self.SetupClient(0)
flow_id = flow_test_lib.StartFlow(
processes.ListProcesses, client_id, creator=self.token.username)
with test_lib.FakeTime(52):
self._AddLogToFlow(client_id, flow_id, "Sample message: foo.")
with test_lib.FakeTime(55):
self._AddLogToFlow(client_id, flow_id, "Sample message: bar.")
replace = {flow_id: "W:ABCDEF"}
self.Check(
"ListFlowLogs",
args=flow_plugin.ApiListFlowLogsArgs(
client_id=client_id, flow_id=flow_id),
replace=replace)
self.Check(
"ListFlowLogs",
args=flow_plugin.ApiListFlowLogsArgs(
client_id=client_id, flow_id=flow_id, count=1),
replace=replace)
self.Check(
"ListFlowLogs",
args=flow_plugin.ApiListFlowLogsArgs(
client_id=client_id, flow_id=flow_id, count=1, offset=1),
replace=replace)
class ApiGetFlowResultsExportCommandHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiGetFlowResultsExportCommandHandler."""
api_method = "GetFlowResultsExportCommand"
handler = flow_plugin.ApiGetFlowResultsExportCommandHandler
def Run(self):
client_id = self.SetupClient(0)
flow_urn = "F:ABCDEF"
self.Check(
"GetFlowResultsExportCommand",
args=flow_plugin.ApiGetFlowResultsExportCommandArgs(
client_id=client_id, flow_id=flow_urn))
class ApiListFlowOutputPluginsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiListFlowOutputPluginsHandler."""
api_method = "ListFlowOutputPlugins"
handler = flow_plugin.ApiListFlowOutputPluginsHandler
# ApiOutputPlugin's state is an AttributedDict containing URNs that
# are always random. Given that currently their JSON representation
# is proto-serialized and then base64-encoded, there's no way
# we can replace these URNs with something stable.
uses_legacy_dynamic_protos = True
def Run(self):
client_id = self.SetupClient(0)
email_descriptor = rdf_output_plugin.OutputPluginDescriptor(
plugin_name=email_plugin.EmailOutputPlugin.__name__,
plugin_args=email_plugin.EmailOutputPluginArgs(
email_address="test@localhost", emails_limit=42))
with test_lib.FakeTime(42):
flow_id = flow.StartFlow(
flow_cls=processes.ListProcesses,
client_id=client_id,
output_plugins=[email_descriptor])
self.Check(
"ListFlowOutputPlugins",
args=flow_plugin.ApiListFlowOutputPluginsArgs(
client_id=client_id, flow_id=flow_id),
replace={flow_id: "W:ABCDEF"})
class ApiListFlowOutputPluginLogsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiListFlowOutputPluginLogsHandler."""
api_method = "ListFlowOutputPluginLogs"
handler = flow_plugin.ApiListFlowOutputPluginLogsHandler
# ApiOutputPlugin's state is an AttributedDict containing URNs that
# are always random. Given that currently their JSON representation
# is proto-serialized and then base64-encoded, there's no way
# we can replace these URNs with something stable.
uses_legacy_dynamic_protos = True
def Run(self):
client_id = self.SetupClient(0)
email_descriptor = rdf_output_plugin.OutputPluginDescriptor(
plugin_name=email_plugin.EmailOutputPlugin.__name__,
plugin_args=email_plugin.EmailOutputPluginArgs(
email_address="test@localhost", emails_limit=42))
with test_lib.FakeTime(42):
flow_id = flow_test_lib.StartAndRunFlow(
flow_cls=flow_test_lib.DummyFlowWithSingleReply,
client_id=client_id,
output_plugins=[email_descriptor])
self.Check(
"ListFlowOutputPluginLogs",
args=flow_plugin.ApiListFlowOutputPluginLogsArgs(
client_id=client_id,
flow_id=flow_id,
plugin_id="EmailOutputPlugin_0"),
replace={flow_id: "W:ABCDEF"})
class ApiListFlowOutputPluginErrorsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiListFlowOutputPluginErrorsHandler."""
api_method = "ListFlowOutputPluginErrors"
handler = flow_plugin.ApiListFlowOutputPluginErrorsHandler
# ApiOutputPlugin's state is an AttributedDict containing URNs that
# are always random. Given that currently their JSON representation
# is proto-serialized and then base64-encoded, there's no way
# we can replace these URNs with something stable.
uses_legacy_dynamic_protos = True
def Run(self):
client_id = self.SetupClient(0)
failing_descriptor = rdf_output_plugin.OutputPluginDescriptor(
plugin_name=hunt_test_lib.FailingDummyHuntOutputPlugin.__name__)
with test_lib.FakeTime(42):
flow_id = flow_test_lib.StartAndRunFlow(
flow_cls=flow_test_lib.DummyFlowWithSingleReply,
client_id=client_id,
output_plugins=[failing_descriptor])
self.Check(
"ListFlowOutputPluginErrors",
args=flow_plugin.ApiListFlowOutputPluginErrorsArgs(
client_id=client_id,
flow_id=flow_id,
plugin_id="FailingDummyHuntOutputPlugin_0"),
replace={flow_id: "W:ABCDEF"})
class ApiCreateFlowHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiCreateFlowHandler."""
api_method = "CreateFlow"
handler = flow_plugin.ApiCreateFlowHandler
def Run(self):
client_id = self.SetupClient(0)
def ReplaceFlowId():
flows = data_store.REL_DB.ReadAllFlowObjects(client_id=client_id)
self.assertNotEmpty(flows)
flow_id = flows[0].flow_id
return api_regression_test_lib.GetFlowTestReplaceDict(client_id, flow_id)
with test_lib.FakeTime(42):
self.Check(
"CreateFlow",
args=flow_plugin.ApiCreateFlowArgs(
client_id=client_id,
flow=flow_plugin.ApiFlow(
name=processes.ListProcesses.__name__,
args=processes.ListProcessesArgs(
filename_regex=".", fetch_binaries=True),
runner_args=rdf_flow_runner.FlowRunnerArgs(
output_plugins=[], notify_to_user=True))),
replace=ReplaceFlowId)
class ApiCancelFlowHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest):
"""Regression test for ApiCancelFlowHandler."""
api_method = "CancelFlow"
handler = flow_plugin.ApiCancelFlowHandler
def Run(self):
client_id = self.SetupClient(0)
flow_id = flow.StartFlow(
flow_cls=processes.ListProcesses, client_id=client_id)
self.Check(
"CancelFlow",
args=flow_plugin.ApiCancelFlowArgs(
client_id=client_id, flow_id=flow_id),
replace={flow_id: "W:ABCDEF"})
class ApiListFlowDescriptorsHandlerRegressionTest(
api_regression_test_lib.ApiRegressionTest, acl_test_lib.AclTestMixin):
"""Regression test for ApiListFlowDescriptorsHandler."""
api_method = "ListFlowDescriptors"
handler = flow_plugin.ApiListFlowDescriptorsHandler
def Run(self):
test_registry = {
processes.ListProcesses.__name__: processes.ListProcesses,
file_finder.FileFinder.__name__: file_finder.FileFinder,
}
with utils.Stubber(registry.FlowRegistry, "FLOW_REGISTRY", test_registry):
self.CreateAdminUser(u"test")
self.Check("ListFlowDescriptors")
def main(argv):
api_regression_test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "c30818c4498bc08329f2e0050f38f4a0",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 80,
"avg_line_length": 34.51,
"alnum_prop": 0.6952332657200812,
"repo_name": "dunkhong/grr",
"id": "dbe5ad2a358198d33e2abbe08d766562b167cdb3",
"size": "13826",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/gui/api_plugins/flow_regression_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import pkg_resources
try:
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| {
"content_hash": "681b744e1504bbb22b4e8355d5e78198",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 39.0625,
"alnum_prop": 0.5504,
"repo_name": "fserena/agora-stoa",
"id": "d71f6eb331207dfd73ad38852a6539904bc22a2d",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agora/stoa/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "121135"
}
],
"symlink_target": ""
} |
import requests
import webbrowser
import re
import os
import sys
from bs4 import BeautifulSoup
BASE_URL = "http://www.reddit.com/r/dailyprogrammer/new/"
def get_soup(url):
return BeautifulSoup(requests.get(url).text)
def get_page_challenges(soup):
return [challenge for challenge in soup.find_all('div', class_='thing')]
def get_completed_challenges():
regex = re.compile("^challenge_(\d{1,}).py$")
return [f[10:-3] for f in os.listdir(os.getcwd()) if regex.match(f)]
def build_challenges(page_challenges):
challenges = []
regex = re.compile("^.{21}#(\d{1,}) \[([a-zA-Z]{4,12})\] (.+)$")
for page_challenge in page_challenges:
title = page_challenge.find('a', class_='title')
result = regex.match(title.text)
if result is None:
continue
challenge = {
'fullname': page_challenge.get('data-fullname'),
'name': result.group(3),
'number': result.group(1),
'url': title.get('href'),
'difficulty': result.group(2).lower(),
'title': result.group(0)
}
challenges.append(challenge)
return challenges
def main():
if len(sys.argv) != 2:
print "Usage: new_challenge.py <difficulty>"
exit(-1)
difficulty = sys.argv[1]
if difficulty.lower() not in ['easy', 'intermediate', 'hard']:
print "Invalid type of difficulty. "\
"Available choices: easy, intermediate or hard."
exit(-1)
# process completed files and get new challenges from reddit
completed = get_completed_challenges()
page_challenges = get_page_challenges(get_soup(BASE_URL))
# chooses the first one that hasn't been completed
while True:
challenges = build_challenges(page_challenges)
if not challenges:
print "No challenges found!"
exit()
for c in challenges:
if c['number'] not in completed and c['difficulty'] == difficulty:
print c
webbrowser.open_new("".join([
"http://www.reddit.com",
c['url']]))
exit()
# no challenges available in the current page, go to next page
page_challenges = get_page_challenges(get_soup("".join(
[BASE_URL,
"?count=",
str(len(page_challenges)),
"&after=",
challenges[len(challenges)-1]['fullname']
])))
if __name__ == "__main__":
main() | {
"content_hash": "7a233d7840dfdb4d7821cdfc560fa288",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 78,
"avg_line_length": 26.79787234042553,
"alnum_prop": 0.5712584358872569,
"repo_name": "miguelgazela/reddit-dailyprogrammer",
"id": "6664ca5e55366a04dbc498a69eaf78433382047d",
"size": "2542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "new_challenge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8184"
}
],
"symlink_target": ""
} |
import json
import re
import defusedxml.ElementTree as et
from geowatchutil.base import GeoWatchError
from geowatchutil.broker.base import GeoWatchBroker
from geowatchutil.codec.geowatch_codec_slack import GeoWatchCodecSlack
from slackbotosm.enumerations import URL_PROJECT_VIEW, URL_PROJECT_EDIT, URL_PROJECT_TASKS, URL_CHANGESET_API
from slackbotosm.mapping.base import GeoWatchMappingProject, GeoWatchMappingChangeset
from slackbotosm.utils import load_patterns
class SlackBotOSMBroker(GeoWatchBroker):
"""
Broker for Slack Bot for OpenStreetMap
"""
_user_id = None # Dervied from consumer authtoken
_user_name = None # Dervied from consumer authtoken
patterns = None
def _make_request(self, url, params=None, data=None, cookie=None, contentType=None):
"""
Prepares a request from a url, params, and optionally authentication.
"""
import urllib
import urllib2
if params:
url = url + '?' + urllib.urlencode(params)
req = urllib2.Request(url, data=data)
if cookie:
req.add_header('Cookie', cookie)
if contentType:
req.add_header('Content-type', contentType)
else:
if data:
req.add_header('Content-type', 'text/xml')
return urllib2.urlopen(req)
def _pre(self):
pass
def _post(self, messages=None):
for m in messages:
msgtype = m[u'type']
if msgtype == u'hello': # slack always open up connection with hello message
pass
elif msgtype == u'message':
msgsubtype = m.get(u'subtype', None)
if msgsubtype == u'bot_message':
username = m[u'username']
text = m[u'text']
pass
elif msgsubtype == u'message_deleted':
pass
else:
user = m[u'user']
text = m[u'text']
channel = m[u'channel']
#print "testing Message", m
match_question = None
match_value = None
for question in self.patterns:
for pattern in self.patterns[question]:
match_value = re.search(pattern, text, re.M|re.I)
if match_value:
match_question = question
break
if match_value:
break
if match_value:
outgoing = None
print "Match Question: ", match_question
print "Match Value: ", match_value
if match_question == "project":
try:
ctx = self._request_project(match_value.group("project"), URL_PROJECT_TASKS)
t = self.templates.get('SLACK_MESSAGE_TEMPLATE_PROJECT', None)
if t:
outgoing = self.codec_slack.render(ctx, t=t)
except:
print "Error processing match for original text: ", text
elif match_question == "changeset":
try:
ctx = self._request_changeset(match_value.group("changeset"), URL_CHANGESET_API)
t = self.templates.get('SLACK_MESSAGE_TEMPLATE_CHANGESET', None)
if t:
outgoing = self.codec_slack.render(ctx, t=t)
except:
print "Error processing match for original text: ", text
raise
if outgoing:
print "Sending message ..."
print "+ Data = ", outgoing
self.duplex[0]._channel.send_message(outgoing, topic=channel)
def _request_project(self, project, baseurl):
url = baseurl.format(project=project)
request = self._make_request(url, contentType="application/json")
if request.getcode () != 200:
raise Exception("Could not fetch json for project "+project+".")
response = request.read()
data = json.loads(response)
counter = {
"0": 0,
"1": 0,
"2": 0,
"3": 0,
"-1": 0
}
for f in data[u'features']:
p = f[u'properties']
state = str(p.get(u'state', None))
counter[state] = counter[state] + 1
return GeoWatchMappingProject().forward(project=int(project), counter=counter)
def _request_changeset(self, changesetID, baseurl):
url = baseurl.format(changeset=changesetID)
request = self._make_request(url, contentType="text/xml")
if request.getcode () != 200:
raise Exception("Could not fetch xml for changeset "+changesetID+".")
response = request.read()
root = et.fromstring(response)
kwargs = {
'id': changesetID
}
for changeset in root.findall('changeset'):
kwargs['user'] = changeset.get('user')
kwargs['closed_at'] = changeset.get('closed_at')
for tag in changeset.findall('tag'):
kwargs[tag.get('k')] = tag.get('v', '')
return GeoWatchMappingChangeset().forward(**kwargs)
def _req_user(self, messages):
passs
def __init__(self, name, description, templates=None, duplex=None, consumers=None, producers=None, stores_out=None, filter_metadata=None, sleep_period=5, count=1, timeout=5, deduplicate=False, verbose=False): # noqa
super(SlackBotOSMBroker, self).__init__(
name,
description,
duplex=duplex,
consumers=consumers,
producers=producers,
stores_out=stores_out,
count=count,
threads=1,
sleep_period=sleep_period,
timeout=timeout,
deduplicate=deduplicate,
filter_metadata=filter_metadata,
verbose=verbose)
self.templates = templates # loaded from templates.yml
self._user_id = self.duplex[0]._client._user_id
self._user_name = self.duplex[0]._client._user_name
self.codec_slack = GeoWatchCodecSlack()
self.patterns = load_patterns()
| {
"content_hash": "07596c550d50323d693de6933e95e7a3",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 220,
"avg_line_length": 36.93333333333333,
"alnum_prop": 0.5186522262334536,
"repo_name": "pjdufour/slackbot-osm",
"id": "4f71e3a34fe2637063effee238ceb816b004ee70",
"size": "6648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slackbotosm/broker/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11550"
},
{
"name": "Shell",
"bytes": "1106"
}
],
"symlink_target": ""
} |
import abc
import json
import os
from typing import Dict
import six
from ...datastructures import JSONDict
from ...git import git_show_file
from ...utils import get_metadata_file, has_logs, is_metric_in_metadata_file, read_metadata_rows
from ..constants import V1, V1_STRING, V2, V2_STRING
class ValidationResult(object):
def __init__(self):
self.failed = False
self.warning = False
self.fixed = False
self.messages = {'success': [], 'warning': [], 'failure': [], 'info': []}
def __str__(self):
return '\n'.join(['\n'.join(messages) for messages in self.messages.values()])
def __repr__(self):
return str(self)
@six.add_metaclass(abc.ABCMeta)
class BaseManifestValidator(object):
def __init__(
self,
is_extras=False,
is_marketplace=False,
check_in_extras=True,
check_in_marketplace=True,
ctx=None,
version=V1,
skip_if_errors=False,
):
self.result = ValidationResult()
self.is_extras = is_extras
self.is_marketplace = is_marketplace
self.check_in_extras = check_in_extras
self.check_in_markeplace = check_in_marketplace
self.ctx = ctx
self.version = version
self.skip_if_errors = skip_if_errors
def should_validate(self):
"""Determine if validator applicable given the current repo.
Logic will always validate integrations-core, but flags exist to
selectively include extras and marketplace
"""
if not self.is_extras and not self.is_marketplace:
return True
if self.is_extras and self.check_in_extras:
return True
if self.is_marketplace and self.check_in_markeplace:
return True
return False
def validate(self, check_name, manifest, should_fix):
# type: (str, Dict, bool) -> None
"""Validates the decoded manifest. Will perform inline changes if fix is true"""
raise NotImplementedError
def fail(self, error_message):
self.result.failed = True
self.result.messages['failure'].append(error_message)
def warning(self, warning_message):
self.result.warning = True
self.result.messages['warning'].append(warning_message)
def fix(self, problem, solution):
self.result.warning_msg = problem
self.result.success_msg = solution
self.result.fixed = True
self.result.failed = False
def __repr__(self):
return str(self.result)
class MaintainerValidator(BaseManifestValidator):
MAINTAINER_PATH = {V1: '/maintainer', V2: '/author/support_email'}
def validate(self, check_name, decoded, fix):
if not self.should_validate():
return
correct_maintainer = '[email protected]'
path = self.MAINTAINER_PATH[self.version]
maintainer = decoded.get_path(path)
if not maintainer.isascii():
self.fail(f' `maintainer` contains non-ascii character: {maintainer}')
return
if maintainer != correct_maintainer:
output = f' incorrect `maintainer`: {maintainer}'
if fix:
decoded.set_path(path, correct_maintainer)
self.fix(output, f' new `maintainer`: {correct_maintainer}')
else:
self.fail(output)
class MetricsMetadataValidator(BaseManifestValidator):
METADATA_PATH = {V1: "/assets/metrics_metadata", V2: "/assets/integration/metrics/metadata_path"}
def validate(self, check_name, decoded, fix):
# metrics_metadata
path = self.METADATA_PATH[self.version]
metadata_in_manifest = decoded.get_path(path)
metadata_file = get_metadata_file(check_name)
metadata_file_exists = os.path.isfile(metadata_file)
if not metadata_in_manifest and metadata_file_exists:
# There is a metadata.csv file but no entry in the manifest.json
self.fail(' metadata.csv exists but not defined in the manifest.json of {}'.format(check_name))
elif metadata_in_manifest and not metadata_file_exists:
# There is an entry in the manifest.json file but the referenced csv file does not exist.
self.fail(' metrics_metadata in manifest.json references a non-existing file: {}.'.format(metadata_file))
class MetricToCheckValidator(BaseManifestValidator):
CHECKS_EXCLUDE_LIST = {
'agent_metrics', # this (agent-internal) check doesn't guarantee a list of stable metrics for now
'moogsoft',
'snmp',
}
METRIC_TO_CHECK_EXCLUDE_LIST = {
'openstack.controller', # "Artificial" metric, shouldn't be listed in metadata file.
'riakcs.bucket_list_pool.workers', # RiakCS 2.1 metric, but metadata.csv lists RiakCS 2.0 metrics only.
}
METADATA_PATH = {V1: "/assets/metrics_metadata", V2: "/assets/integration/metrics/metadata_path"}
METRIC_PATH = {V1: "/metric_to_check", V2: "/assets/integration/metrics/check"}
PRICING_PATH = {V1: "/pricing", V2: "/pricing"}
def validate(self, check_name, decoded, _):
if not self.should_validate() or check_name in self.CHECKS_EXCLUDE_LIST:
return
metadata_path = self.METADATA_PATH[self.version]
metadata_in_manifest = decoded.get_path(metadata_path)
# metric_to_check
metric_path = self.METRIC_PATH[self.version]
metric_to_check = decoded.get_path(metric_path)
pricing_path = self.PRICING_PATH[self.version]
pricing = decoded.get_path(pricing_path) or []
if metric_to_check:
metrics_to_check = metric_to_check if isinstance(metric_to_check, list) else [metric_to_check]
for metric in metrics_to_check:
# if metric found in pricing, skip and continue evaluating other metrics_to_check
if any(p.get('metric') == metric for p in pricing):
continue
metric_integration_check_name = check_name
# snmp vendor specific integrations define metric_to_check
# with metrics from `snmp` integration
if check_name.startswith('snmp_') and not metadata_in_manifest:
metric_integration_check_name = 'snmp'
if (
not is_metric_in_metadata_file(metric, metric_integration_check_name)
and metric not in self.METRIC_TO_CHECK_EXCLUDE_LIST
):
self.fail(f' metric_to_check not in metadata.csv: {metric!r}')
elif metadata_in_manifest:
# if we have a metadata.csv file but no `metric_to_check` raise an error
metadata_file = get_metadata_file(check_name)
if os.path.isfile(metadata_file):
for _, row in read_metadata_rows(metadata_file):
# there are cases of metadata.csv files with just a header but no metrics
if row:
self.fail(' metric_to_check not included in manifest.json')
class ImmutableAttributesValidator(BaseManifestValidator):
"""
Ensure that immutable attributes haven't changed
Skip if the manifest is a new file (i.e. new integration) or if the manifest is being upgraded to V2
"""
MANIFEST_VERSION_PATH = "manifest_version"
IMMUTABLE_FIELD_PATHS = {
V1: ("integration_id", "display_name", "guid"),
V2: (
"app_id",
"app_uuid",
"assets/integration/id",
"assets/integration/source_type_name",
),
}
SHORT_NAME_PATHS = {
V1: (
"assets/dashboards",
"assets/monitors",
"assets/saved_views",
),
V2: (
"assets/dashboards",
"assets/monitors",
"assets/saved_views",
),
}
def validate(self, check_name, decoded, fix):
# Check if previous version of manifest exists
# If not, this is a new file so this validation is skipped
try:
previous = git_show_file(path=f"{check_name}/manifest.json", ref="origin/master")
previous_manifest = JSONDict(json.loads(previous))
except Exception:
self.result.messages['info'].append(
" skipping check for changed fields: integration not on default branch"
)
return
# Skip this validation if the manifest is being updated from 1.0.0 -> 2.0.0
current_manifest = decoded
if (
previous_manifest[self.MANIFEST_VERSION_PATH] == "1.0.0"
and current_manifest[self.MANIFEST_VERSION_PATH] == "2.0.0"
):
self.result.messages['info'].append(" skipping check for changed fields: manifest version was upgraded")
return
# Check for differences in immutable attributes
for key_path in self.IMMUTABLE_FIELD_PATHS[self.version]:
previous_value = previous_manifest.get_path(key_path)
current_value = current_manifest.get_path(key_path)
if previous_value != current_value:
output = f'Attribute `{current_value}` at `{key_path}` is not allowed to be modified. Please revert it \
to the original value `{previous_value}`.'
self.fail(output)
# Check for differences in `short_name` keys
for key_path in self.SHORT_NAME_PATHS[self.version]:
previous_short_name_dict = previous_manifest.get_path(key_path) or {}
current_short_name_dict = current_manifest.get_path(key_path) or {}
# Every `short_name` in the prior manifest must be in the current manifest
# The key cannot change and it cannot be removed
previous_short_names = previous_short_name_dict.keys()
current_short_names = set(current_short_name_dict.keys())
for short_name in previous_short_names:
if short_name not in current_short_names:
output = f'Short name `{short_name}` at `{key_path}` is not allowed to be modified. \
Please revert to original value.'
self.fail(output)
class LogsCategoryValidator(BaseManifestValidator):
"""If an integration defines logs it should have the log collection category"""
LOG_COLLECTION_CATEGORY = {V1: "log collection", V2: "Category::Log Collection"}
CATEGORY_PATH = {V1: "/categories", V2: "/tile/classifier_tags"}
IGNORE_LIST = {
'databricks', # Logs are provided by Spark
'docker_daemon',
'ecs_fargate', # Logs are provided by FireLens or awslogs
'cassandra_nodetool', # Logs are provided by cassandra
'jmeter',
'kafka_consumer', # Logs are provided by kafka
'kubernetes',
'pan_firewall',
'altostra',
'hasura_cloud',
'sqreen',
}
def validate(self, check_name, decoded, fix):
path = self.CATEGORY_PATH[self.version]
categories = decoded.get_path(path) or []
check_has_logs = has_logs(check_name)
log_collection_category = self.LOG_COLLECTION_CATEGORY[self.version]
check_has_logs_category = log_collection_category in categories
if check_has_logs == check_has_logs_category or check_name in self.IGNORE_LIST:
return
if check_has_logs:
output = ' required category: ' + log_collection_category
if fix:
correct_categories = sorted(categories + [self.LOG_COLLECTION_CATEGORY])
decoded.set_path(path, correct_categories)
self.fix(output, f' new `categories`: {correct_categories}')
else:
self.fail(output)
else:
output = (
' This integration does not have logs, please remove the category: '
+ log_collection_category
+ ' or define the logs properly'
)
self.fail(output)
class VersionValidator(BaseManifestValidator):
def validate(self, check_name, decoded, fix):
if decoded.get('manifest_version', V2_STRING) == V1_STRING:
self.fail('Manifest version must be >= 2.0.0')
| {
"content_hash": "51fc41cc3e5dc78eb2407c7400a6f847",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 120,
"avg_line_length": 38.939682539682536,
"alnum_prop": 0.6128322191423446,
"repo_name": "DataDog/integrations-core",
"id": "6a9aac2b2607b904ba8024f0c8402a43cfc2ae5c",
"size": "12382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/common/validator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
from ._abstract import AbstractScraper
class EthanChlebowski(AbstractScraper):
@classmethod
def host(cls):
return "ethanchlebowski.com"
def author(self):
return self.schema.author()
def title(self):
return self.schema.title()
def category(self):
return self.schema.category()
def total_time(self):
return self.schema.total_time()
def yields(self):
return self.schema.yields()
def image(self):
return self.schema.image()
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
def ratings(self):
return None
def cuisine(self):
return None
def description(self):
return self.soup.head.find("meta", {"property": "og:description"})["content"]
| {
"content_hash": "a5e2d7b11aba05573f6405ff62f45f31",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 85,
"avg_line_length": 21.45,
"alnum_prop": 0.6305361305361306,
"repo_name": "hhursev/recipe-scraper",
"id": "d555c438dc5a654f4cbf2a821612dc76f9cb8c66",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "recipe_scrapers/ethanchlebowski.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88554"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
from parameterized import parameterized
from airflow.api.common.experimental.trigger_dag import _trigger_dag
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun
from airflow.utils import timezone
from tests.test_utils import db
class TestTriggerDag(unittest.TestCase):
def setUp(self) -> None:
db.clear_db_runs()
def tearDown(self) -> None:
db.clear_db_runs()
@mock.patch('airflow.models.DagBag')
def test_trigger_dag_dag_not_found(self, dag_bag_mock):
dag_bag_mock.dags = {}
with self.assertRaises(AirflowException):
_trigger_dag('dag_not_found', dag_bag_mock)
@mock.patch('airflow.api.common.experimental.trigger_dag.DagRun', spec=DagRun)
@mock.patch('airflow.models.DagBag')
def test_trigger_dag_dag_run_exist(self, dag_bag_mock, dag_run_mock):
dag_id = "dag_run_exist"
dag = DAG(dag_id)
dag_bag_mock.dags = [dag_id]
dag_bag_mock.get_dag.return_value = dag
dag_run_mock.find.return_value = DagRun()
with self.assertRaises(AirflowException):
_trigger_dag(dag_id, dag_bag_mock)
@mock.patch('airflow.models.DAG')
@mock.patch('airflow.api.common.experimental.trigger_dag.DagRun', spec=DagRun)
@mock.patch('airflow.models.DagBag')
def test_trigger_dag_include_subdags(self, dag_bag_mock, dag_run_mock, dag_mock):
dag_id = "trigger_dag"
dag_bag_mock.dags = [dag_id]
dag_bag_mock.get_dag.return_value = dag_mock
dag_run_mock.find.return_value = None
dag1 = mock.MagicMock(subdags=[])
dag2 = mock.MagicMock(subdags=[])
dag_mock.subdags = [dag1, dag2]
triggers = _trigger_dag(dag_id, dag_bag_mock)
self.assertEqual(3, len(triggers))
@mock.patch('airflow.models.DAG')
@mock.patch('airflow.api.common.experimental.trigger_dag.DagRun', spec=DagRun)
@mock.patch('airflow.models.DagBag')
def test_trigger_dag_include_nested_subdags(self, dag_bag_mock, dag_run_mock, dag_mock):
dag_id = "trigger_dag"
dag_bag_mock.dags = [dag_id]
dag_bag_mock.get_dag.return_value = dag_mock
dag_run_mock.find.return_value = None
dag1 = mock.MagicMock(subdags=[])
dag2 = mock.MagicMock(subdags=[dag1])
dag_mock.subdags = [dag1, dag2]
triggers = _trigger_dag(dag_id, dag_bag_mock)
self.assertEqual(3, len(triggers))
@mock.patch('airflow.models.DagBag')
def test_trigger_dag_with_too_early_start_date(self, dag_bag_mock):
dag_id = "trigger_dag_with_too_early_start_date"
dag = DAG(dag_id, default_args={'start_date': timezone.datetime(2016, 9, 5, 10, 10, 0)})
dag_bag_mock.dags = [dag_id]
dag_bag_mock.get_dag.return_value = dag
with self.assertRaises(ValueError):
_trigger_dag(dag_id, dag_bag_mock, execution_date=timezone.datetime(2015, 7, 5, 10, 10, 0))
@mock.patch('airflow.models.DagBag')
def test_trigger_dag_with_valid_start_date(self, dag_bag_mock):
dag_id = "trigger_dag_with_valid_start_date"
dag = DAG(dag_id, default_args={'start_date': timezone.datetime(2016, 9, 5, 10, 10, 0)})
dag_bag_mock.dags = [dag_id]
dag_bag_mock.get_dag.return_value = dag
dag_bag_mock.dags_hash = {}
triggers = _trigger_dag(dag_id, dag_bag_mock, execution_date=timezone.datetime(2018, 7, 5, 10, 10, 0))
assert len(triggers) == 1
@parameterized.expand(
[
(None, {}),
({"foo": "bar"}, {"foo": "bar"}),
('{"foo": "bar"}', {"foo": "bar"}),
]
)
@mock.patch('airflow.models.DagBag')
def test_trigger_dag_with_conf(self, conf, expected_conf, dag_bag_mock):
dag_id = "trigger_dag_with_conf"
dag = DAG(dag_id)
dag_bag_mock.dags = [dag_id]
dag_bag_mock.get_dag.return_value = dag
dag_bag_mock.dags_hash = {}
triggers = _trigger_dag(dag_id, dag_bag_mock, conf=conf)
self.assertEqual(triggers[0].conf, expected_conf)
| {
"content_hash": "dbd3c095acb93058f0e71d6387d3a49e",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 110,
"avg_line_length": 37.77981651376147,
"alnum_prop": 0.6301602719766877,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "9fb772d3576f9079419be933b8d73160a58f9b2c",
"size": "4906",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/api/common/experimental/test_trigger_dag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
import datetime
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length = 200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return (timezone.now() -datetime.timedelta(days = 1)) <= self.pub_date <= timezone.now()
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
question_text.short_description = 'Question'
pub_date.short_description = 'Published Date'
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length = 200)
votes = models.IntegerField(default = 0)
def __str__(self):
return self.choice_text | {
"content_hash": "ab798c3ee5f43a4d6e6a194f2760d493",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 97,
"avg_line_length": 32.275862068965516,
"alnum_prop": 0.7040598290598291,
"repo_name": "LoopSun/PythonWay",
"id": "10246668659f7ad3333713c3422d9b8968b79f7e",
"size": "936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "GetYourStar/DjangoProj/packets/polls/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "222"
},
{
"name": "HTML",
"bytes": "3355"
},
{
"name": "Python",
"bytes": "59983"
}
],
"symlink_target": ""
} |
import os
import subprocess
import benchmark.util as Util
import benchmark.tools.template
import benchmark.result as result
class Tool(benchmark.tools.template.BaseTool):
"""
This class serves as tool adaptor for LLBMC
"""
def getExecutable(self):
return Util.findExecutable('lib/native/x86_64-linux/llbmc')
def getVersion(self, executable):
return subprocess.Popen([executable, '--version'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[2][8:18]
def getName(self):
return 'LLBMC'
def getCmdline(self, executable, options, sourcefile):
# compile sourcefile with clang
self.prepSourcefile = self._prepareSourcefile(sourcefile)
return [executable] + options + [self.prepSourcefile]
def _prepareSourcefile(self, sourcefile):
clangExecutable = Util.findExecutable('clang')
newFilename = sourcefile + ".o"
subprocess.Popen([clangExecutable,
'-c',
'-emit-llvm',
'-std=gnu89',
'-m32',
sourcefile,
'-O0',
'-o',
newFilename,
'-w'],
stdout=subprocess.PIPE).wait()
return newFilename
def getStatus(self, returncode, returnsignal, output, isTimeout):
status = result.STR_UNKNOWN
for line in output.splitlines():
if 'Error detected.' in line:
status = result.STR_FALSE_LABEL
elif 'No error detected.' in line:
status = result.STR_TRUE
# delete tmp-files
try:
os.remove(self.prepSourcefile)
except OSError, e:
print "Could not remove file " + self.prepSourcefile + "! Maybe clang call failed"
pass
return status
def addColumnValues(self, output, columns):
"""
This method adds the values that the user requested to the column objects.
If a value is not found, it should be set to '-'.
If not supported, this method does not need to get overridden.
"""
pass
| {
"content_hash": "7852e7ff4deaf9a86bf973716fcdd55b",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 94,
"avg_line_length": 29.77922077922078,
"alnum_prop": 0.5503706934147405,
"repo_name": "TommesDee/cpachecker",
"id": "63ccf6c007eaf7c4eea84a4f0ee66f7845d2ba79",
"size": "2293",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/benchmark/tools/llbmc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26296962"
},
{
"name": "C++",
"bytes": "1832"
},
{
"name": "CSS",
"bytes": "25"
},
{
"name": "Java",
"bytes": "8113113"
},
{
"name": "PHP",
"bytes": "36129"
},
{
"name": "Perl",
"bytes": "6690"
},
{
"name": "Python",
"bytes": "275076"
},
{
"name": "Shell",
"bytes": "16666"
}
],
"symlink_target": ""
} |
import subprocess
import os
from glim.core import Facade
from glim import Log
from glim import paths
OPTION_MAP = {
'nojs': '--no-js',
'lint': '--lint',
'verbose': '--verbose',
'units': '-sm=on',
'compress': '--compress'
}
DEFAULT_CONFIG = {
'source': os.path.join(paths.APP_PATH, 'assets/less/main.less'),
'destination': os.path.join(paths.APP_PATH, 'assets/css/main.css'),
'options': [
'lint',
'units',
'verbose'
]
}
class Less(object):
def __init__(self, config):
self.config = DEFAULT_CONFIG
for key, value in config.items():
self.config[key] = value
# Log.info("config")
# Log.info(self.config)
def compile(self):
source = self.config['source']
destination = self.config['destination']
options = self.config['options']
try:
options_string = ''
for option in options:
options_string += '%s ' % OPTION_MAP[option]
options_string = options_string.rstrip()
command = 'lessc'
arguments = '%s %s > %s' % (options_string, source, destination)
# Log.debug("command: %s" % command)
# Log.debug("arguments: %s" % arguments)
cmd = '%s %s' % (command, arguments)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = p.communicate()
Log.info("Compiling LESS source..")
except Exception as e:
Log.error(e)
class LessFacade(Facade):
accessor = Less
| {
"content_hash": "3dcf9bb4a74e8278c054daf01cd00568",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 68,
"avg_line_length": 21.890625,
"alnum_prop": 0.635260528194147,
"repo_name": "aacanakin/glim-extensions",
"id": "9b5dcf78f4b4bb31bdbede42b67cc66f64aac0f6",
"size": "1401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glim_extensions/less/less.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26739"
}
],
"symlink_target": ""
} |
"""
Pagination: select how many results to display.
"""
# Maybe refactor later the same way as variant_filter,
# but maybe not necessary as long as they are so simple.
def pagination_from_request(request):
lim = request.GET.get('limit')
off = request.GET.get('offset', '0')
assert off.isdigit(), "Argument to 'offset' must be an integer"
off = int(off)
if lim is not None:
assert lim.isdigit(), "Argument to 'limit' must be an integer"
lim = int(lim)
return Pagination(lim, off)
class Pagination:
def __init__(self, limit=None, offset=0):
"""
:param limit: (int) keep only that many.
:param offset: (int) skip that many.
"""
self.lim = limit
self.off = offset
def limit(self, variants):
"""Keep only the first *lim* variants.
Corresponds to the 'LIMIT' and 'OFFSET' SQL statements.
:param variants: QuerySet.
"""
return variants[:self.lim]
def offset(self, variants):
"""Skip the first *off* variants.
Corresponds to the 'OFFSET' SQL statement.
:param variants: QuerySet.
"""
return variants[self.off:]
def paginate(self, variants):
var = self.offset(variants)
if self.lim:
var = self.limit(var)
return var
| {
"content_hash": "1b34a2c1849a7b1100bef7135ac80648",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 70,
"avg_line_length": 29,
"alnum_prop": 0.5982008995502249,
"repo_name": "444thLiao/VarappX-flask",
"id": "23a47459be9689c0586a52bec877165ddc996c48",
"size": "1334",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "varappx/filters/pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "506164"
},
{
"name": "HTML",
"bytes": "267707"
},
{
"name": "JavaScript",
"bytes": "4184850"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PHP",
"bytes": "10512"
},
{
"name": "Python",
"bytes": "280703"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
import csv
import os
import re
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import pkgutil
import inspect
import bb
import supportedrecipesreport
class Columns(object):
"""Base class for all classes which extend the SUPPORTED_RECIPES_SOURCES report.
Typically used to add columns, hence the name. Usage of the class is:
- instantiated when starting to write a report
- extend_header() - add new columns
- extend_row() - add data for new colums to each row as it is getting written
To add new classes, create a "lib/supportedrecipesreport" directory in your layer,
with an empty "__init__.py" file and one or more classes inheriting from this base
class defined in one or more regular .py files.
"""
def __init__(self, d, all_rows):
"""Initialize instance.
Gets access to the global datastore and all rows that are to be written (unmodified
and read-only).
"""
pass
def extend_header(self, row_headers):
"""Add new columns.
Called with a list of field names, in the order in which the
resultig .cvs report will have them. extend_header() then may
extend the list of fields. See supportedrecipes.py for
a list of already present fields.
"""
pass
def extend_row(self, row):
"""Add data for new columns or modify existing ones.
Called with a hash mapping field names to the corresponding data.
"""
pass
def parse_regex(regex, filename, linenumber):
try:
# must match entire string, hence the '$'
return (re.compile(regex + '$'), regex)
except Exception as ex:
raise RuntimeError("%s.%d: parsing '%s' as regular expression failed: %s" % (
filename,
linenumber,
regex,
str(ex)))
class SupportedRecipe:
def __init__(self, pattern, supportedby, filename, linenumber):
self.supportedby = supportedby
self.filename = filename
self.pattern = pattern
self.linenumber = linenumber
parts = pattern.split('@')
if len(parts) != 2:
raise RuntimeError("%s.%d: entry must have format <recipe name regex>@<collection name regex>, "
"splitting by @ found %d parts instead: %s" %
(filename, linenumber, len(parts), pattern))
self.pn_re = parse_regex(parts[0], filename, linenumber)
self.collection_re = parse_regex(parts[1], filename, linenumber)
def is_supportedby(self, pn, collection):
# Returns string identifying the team supporting the recipe or
# empty string if unsupported.
supported = bool((pn is None or self.pn_re[0].match(pn)) and
(collection is None or self.collection_re[0].match(collection)))
return self.supportedby if supported else ''
class SupportedRecipes:
def __init__(self):
self.supported = []
def append(self, recipe):
self.supported.append(recipe)
def current_recipe_supportedby(self, d):
pn = d.getVar('PN', True)
filename = d.getVar('FILE', True)
collection = bb.utils.get_file_layer(filename, d)
return self.recipe_supportedby(pn, collection)
def recipe_supportedby(self, pn, collection):
# Returns list of of teams supporting the recipe (could be
# more than one or none).
result = set()
for recipe in self.supported:
supportedby = recipe.is_supportedby(pn, collection)
if supportedby:
result.add(supportedby)
return sorted(result)
def load_supported_recipes(d):
files = []
supported_files = d.getVar('SUPPORTED_RECIPES', True)
if not supported_files:
bb.fatal('SUPPORTED_RECIPES is not set')
supported_recipes = SupportedRecipes()
for filename in supported_files.split():
try:
base = os.path.basename(filename)
supportedby = d.getVarFlag('SUPPORTED_RECIPES', base, True)
if not supportedby:
supportedby = base.rstrip('.txt')
with open(filename) as f:
linenumber = 1
for line in f:
if line.startswith('#'):
continue
# TODO (?): sanity check the content to catch
# obsolete entries or typos.
pn = line.strip()
if pn:
supported_recipes.append(SupportedRecipe(line.strip(),
supportedby,
filename,
linenumber))
linenumber += 1
files.append(filename)
except OSError as ex:
bb.fatal('Could not read SUPPORTED_RECIPES = %s: %s' % (supported_files, str(ex)))
return (supported_recipes, files)
SOURCE_FIELDS = 'component,collection,version,homepage,source,summary,license'.split(',')
# Collects information about one recipe during parsing for SUPPORTED_RECIPES_SOURCES.
# The dumped information cannot be removed because it might be needed in future
# bitbake invocations, so the default location is inside the tmp directory.
def dump_sources(d):
pn = d.getVar('PN', True)
filename = d.getVar('FILE', True)
collection = bb.utils.get_file_layer(filename, d)
pv = d.getVar('PV', True)
summary = d.getVar('SUMMARY', True) or ''
homepage = d.getVar('HOMEPAGE', True) or ''
src = d.getVar('SRC_URI', True).split()
license = d.getVar('LICENSE', True)
sources = []
for url in src:
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if scheme != 'file':
parts = path.split(';')
if len(parts) > 1:
path = parts[0]
params = dict([x.split('=') if '=' in x else (x, '') for x in parts[1:]])
else:
params = {}
name = params.get('name', None)
sources.append((name, '%s://%s%s' % (scheme, netloc, path)))
dumpfile = d.getVar('SUPPORTED_RECIPES_SOURCES_DIR', True) + '/' + pn + filename
bb.utils.mkdirhier(os.path.dirname(dumpfile))
with open(dumpfile, 'w') as f:
# File intentionally kept small by not writing a header
# line. Guaranteed to contain SOURCE_FIELDS.
writer = csv.writer(f)
for idx, val in enumerate(sources):
name, url = val
if name and len(sources) != 1:
fullname = '%s/%s' % (pn, name)
elif idx > 0:
fullname = '%s/%d' % (pn, idx)
else:
fullname = pn
writer.writerow((fullname, collection, pv, homepage, url, summary, license))
class IsNative(object):
def __init__(self, d):
# Always add a trailing $ to ensure a full match.
native_recipes = d.getVar('SUPPORTED_RECIPES_NATIVE_RECIPES', True).split()
self.isnative_exception = re.compile('(' + '|'.join(native_recipes) + ')$')
self.isnative_baseclasses = d.getVar('SUPPORTED_RECIPES_NATIVE_BASECLASSES', True).split()
def __call__(self, pn, pndata):
for inherited in pndata['inherits']:
if os.path.basename(inherited) in self.isnative_baseclasses:
return True
# Some build recipes do not inherit cross.bbclass and must be skipped explicitly.
# The "real" recipes (in cases like glibc) still get checked. Other recipes are OE-core
# internal helpers.
if self.isnative_exception.match(pn):
return True
class TruncatedError(Exception):
pass
def dump_dependencies(depgraph, max_lines, unsupported):
# Walk the recipe dependency tree and add one line for each path that ends in
# an unsupported recipe.
lines = []
current_line = []
# Pre-compute complete dependencies (DEPEND and RDEPEND) for each recipe
# instead of doing it each time we reach a recipe. Also identifies those
# recipes that nothing depends on. They are the start points for the build.
roots = set(depgraph['pn'])
deps = {}
for task, taskdeps in depgraph['tdepends'].items():
pn = task.split('.')[0]
pndeps = deps.setdefault(pn, set())
for taskdep in taskdeps:
pndep = taskdep.split('.')[0]
if pndep != pn:
pndeps.add(pndep)
roots.discard(pndep)
for pn in deps:
deps[pn] = sorted(deps[pn])
# We can prune the search tree a lot by keeping track of those recipes which are already
# known to not depend on an unsupported recipe.
okay = set()
def visit_recipe(pn):
if pn in okay:
return False
if pn in current_line:
# Recursive dependency, bail out. Can happen
# because we flattened the task dependencies; those don't have
# cycles.
return False
current_line.append(pn)
printed = False
for dep in deps.get(pn, []):
if visit_recipe(dep):
printed = True
if not printed and \
pn in unsupported and \
not len(current_line) == 1:
# Current path is non-trivial, ends in an unsupported recipe and was not alread
# included in a longer, printed path. Add a copy to the output.
if len(lines) >= max_lines:
raise TruncatedError()
lines.append(current_line[:])
printed = True
if not printed and not pn in unsupported:
okay.add(pn)
del current_line[-1]
return printed
truncated = False
try:
for pn in sorted(roots):
visit_recipe(pn)
except TruncatedError:
truncated = True
return lines, truncated
def collection_hint(pn, supported_recipes):
# Determines whether the recipe would be supported in some other collection.
collections = set([supported_recipe.collection_re[1]
for supported_recipe
in supported_recipes.supported
if supported_recipe.is_supportedby(pn, None)])
return ' (would be supported in %s)' % ' '.join(collections) if collections else ''
def dump_unsupported(unsupported, supported_recipes):
# Turns the mapping from unsupported recipe to is collection
# into a sorted list of entries in the final report.
lines = []
for pn, collection in unsupported.items():
# Left and right side of the <recipe>@<collection> entries are
# regular expressions. In contrast to re.escape(), we only
# escape + (as in gtk+3). Escaping all non-alphanumerics
# makes many entries (like linux-yocto) unnecessarily less
# readable (linux\-yocto).
pn = pn.replace('+', r'\+')
collection = collection.replace('+', r'\+')
hint = collection_hint(pn, supported_recipes)
entry = '%s@%s%s' % (pn, collection, hint)
lines.append(entry)
return sorted(lines)
def check_build(d, event):
supported_recipes, files = load_supported_recipes(d)
supported_recipes_check = d.getVar('SUPPORTED_RECIPES_CHECK', True)
if not supported_recipes_check:
return
isnative = IsNative(d)
valid = ('note', 'warn', 'error', 'fatal')
if supported_recipes_check not in valid:
bb.fatal('SUPPORTED_RECIPES_CHECK must be set to one of %s, currently is: %s' %
('/'.join(valid), supported_recipes_check))
logger = bb.__dict__[supported_recipes_check]
# See bitbake/lib/bb/cooker.py buildDependTree() for the content of the depgraph hash.
# Basically it mirrors the information dumped by "bitbake -g".
depgraph = event._depgraph
# import pprint
# bb.note('depgraph: %s' % pprint.pformat(depgraph))
dirname = d.getVar('SUPPORTED_RECIPES_SOURCES_DIR', True)
report_sources = d.getVar('SUPPORTED_RECIPES_SOURCES', True)
unsupported = {}
sources = []
for pn, pndata in depgraph['pn'].items():
# We only care about recipes compiled for the target.
# Most native ones can be detected reliably because they inherit native.bbclass,
# but some special cases have to be hard-coded.
# Image recipes also do not matter.
if not isnative(pn, pndata):
filename = pndata['filename']
collection = bb.utils.get_file_layer(filename, d)
supportedby = supported_recipes.recipe_supportedby(pn, collection)
if not supportedby:
unsupported[pn] = collection
if report_sources:
dumpfile = os.path.join(dirname, pn + filename)
with open(dumpfile) as f:
reader = csv.reader(f)
for row in reader:
row_hash = {f: row[i] for i, f in enumerate(SOURCE_FIELDS)}
row_hash['supported'] = 'yes (%s)' % ' '.join(supportedby) \
if supportedby else 'no'
sources.append(row_hash)
if report_sources:
with open(report_sources, 'w') as f:
fields = SOURCE_FIELDS[:]
# Insert after 'collection'.
fields.insert(fields.index('collection') + 1, 'supported')
extensions = []
for importer, modname, ispkg in pkgutil.iter_modules(supportedrecipesreport.__path__):
module = __import__('supportedrecipesreport.' + modname, fromlist="dummy")
for name, clazz in inspect.getmembers(module, inspect.isclass):
if issubclass(clazz, Columns):
extensions.append(clazz(d, sources))
for e in extensions:
e.extend_header(fields)
writer = csv.DictWriter(f, fields)
writer.writeheader()
for row in sources:
for e in extensions:
e.extend_row(row)
# Sort by first column, then second column, etc., after extending all rows.
for row in sorted(sources, key=lambda r: [r.get(f, None) for f in fields]):
writer.writerow(row)
bb.note('Created SUPPORTED_RECIPES_SOURCES = %s file.' % report_sources)
if unsupported:
max_lines = int(d.getVar('SUPPORTED_RECIPES_CHECK_DEPENDENCY_LINES', True))
dependencies, truncated = dump_dependencies(depgraph, max_lines, unsupported)
output = []
output.append('The following unsupported recipes are required for the build:')
output.extend([' ' + line for line in dump_unsupported(unsupported, supported_recipes)])
output.append('''
Each unsupported recipe is identified by the recipe name and the collection
in which it occurs and has to be marked as supported (see below) using that
format. Typically each layer has exactly one collection.''')
if dependencies:
# Add the optional dependency dump.
output.append('''
Here are the dependency chains (including DEPENDS and RDEPENDS)
which include one or more of the unsupported recipes. -> means "depends on"
and * marks unsupported recipes:''')
for line in dependencies:
line_entries = [('*' if pn in unsupported else '') + pn for pn in line]
output.append(' ' + ' -> '.join(line_entries))
if truncated:
output.append('''...
Output truncated, to see more increase SUPPORTED_RECIPES_CHECK_DEPENDENCY_LINES (currently %d).''' %
max_lines)
output.append('''
To avoid this message, several options exist:
* Check the dependency chain(s) to see why a recipe gets pulled in and perhaps
change recipe configurations or image content to avoid pulling in undesired
components.
* If the recipe is supported in some other layer, disable the unsupported one
with BBMASK.
* Add the unsupported recipes to one of the following files:
%s
Regular expressions are supported on both sides of the @ separator.
* Create a new file which lists the unsupported recipes and extend SUPPORTED_RECIPES:
SUPPORTED_RECIPES_append = " <path>/recipes-supported-by-me.txt"
See meta-refkit/conf/layer.conf and refkit.conf for an example how the path can be
derived automatically. The expectation is that SUPPORTED_RECIPES gets set in
distro configuration files, depending on the support provided by the distro
creator.
* Disable the check with SUPPORTED_RECIPES_CHECK = "" in local.conf.
'bitbake -g <build target>' produces .dot files showing these dependencies.
''' % '\n '.join(files))
logger('\n'.join(output))
| {
"content_hash": "1d611e7dbd42e0d375c43890ff08059b",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 108,
"avg_line_length": 42.114713216957604,
"alnum_prop": 0.6040383704405495,
"repo_name": "jairglez/intel-iot-refkit",
"id": "e9907eae1772761d8c6ab416e23b7d733a20cbc7",
"size": "16970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meta-refkit-core/lib/supportedrecipes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "11451"
},
{
"name": "BitBake",
"bytes": "95103"
},
{
"name": "C",
"bytes": "133328"
},
{
"name": "C++",
"bytes": "1178"
},
{
"name": "CMake",
"bytes": "838"
},
{
"name": "Java",
"bytes": "504"
},
{
"name": "JavaScript",
"bytes": "25003"
},
{
"name": "M4",
"bytes": "7374"
},
{
"name": "Makefile",
"bytes": "1190"
},
{
"name": "Mask",
"bytes": "599"
},
{
"name": "PHP",
"bytes": "10437"
},
{
"name": "Pascal",
"bytes": "1416"
},
{
"name": "Python",
"bytes": "506975"
},
{
"name": "Shell",
"bytes": "65079"
},
{
"name": "SourcePawn",
"bytes": "2662"
}
],
"symlink_target": ""
} |
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
import os
import sys
import traceback
import spark
def output(*strings):
for s in strings:
sys.stdout.write(str(s) + "\n")
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError("unmatched input: %r" % s)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, info):
" module ::= Id Id { } "
module, name, _0, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None)
def p_module(self, info):
" module ::= Id Id { definitions } "
module, name, _0, definitions, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions)
def p_definition_0(self, definition):
" definitions ::= definition "
return definition[0]
def p_definition_1(self, definitions):
" definitions ::= definition definitions "
return definitions[0] + definitions[1]
def p_definition(self, info):
" definition ::= Id = type "
id, _, type = info
return [Type(id, type)]
def p_type_0(self, product):
" type ::= product "
return product[0]
def p_type_1(self, sum):
" type ::= sum "
return Sum(sum[0])
def p_type_2(self, info):
" type ::= sum Id ( fields ) "
sum, id, _0, attributes, _1 = info
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
return Sum(sum, attributes)
def p_product_0(self, info):
" product ::= ( fields ) "
_0, fields, _1 = info
return Product(fields)
def p_product_1(self, info):
" product ::= ( fields ) Id ( fields ) "
_0, fields, _1, id, _2, attributes, _3 = info
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
return Product(fields, attributes)
def p_sum_0(self, constructor):
" sum ::= constructor "
return [constructor[0]]
def p_sum_1(self, info):
" sum ::= constructor | sum "
constructor, _, sum = info
return [constructor] + sum
def p_sum_2(self, info):
" sum ::= constructor | sum "
constructor, _, sum = info
return [constructor] + sum
def p_constructor_0(self, id):
" constructor ::= Id "
return Constructor(id[0])
def p_constructor_1(self, info):
" constructor ::= Id ( fields ) "
id, _0, fields, _1 = info
return Constructor(id, fields)
def p_fields_0(self, field):
" fields ::= field "
return [field[0]]
def p_fields_1(self, info):
" fields ::= fields , field "
fields, _, field = info
return fields + [field]
def p_field_0(self, type_):
" field ::= Id "
return Field(type_[0])
def p_field_1(self, info):
" field ::= Id Id "
type, name = info
return Field(type, name)
def p_field_2(self, info):
" field ::= Id * Id "
type, _, name = info
return Field(type, name, seq=True)
def p_field_3(self, info):
" field ::= Id ? Id "
type, _, name = info
return Field(type, name, opt=True)
def p_field_4(self, type_):
" field ::= Id * "
return Field(type_[0], seq=True)
def p_field_5(self, type_):
" field ::= Id ? "
return Field(type[0], opt=True)
builtin_types = ("identifier", "string", "bytes", "int", "object", "singleton")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns):
self.name = name
self.dfns = dfns
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields, attributes=None):
self.fields = fields
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Product(%s)" % self.fields
else:
return "Product(%s, %s)" % (self.fields, self.attributes)
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception:
output("Error visiting" + repr(object))
output(str(sys.exc_info()[1]))
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
output("Redefinition of constructor %s" % key)
output("Defined in %s and %s" % (conflict, name))
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
output("Undefined type %s, used in %s" % (t, uses))
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
f = open(file)
try:
buf = f.read()
finally:
f.close()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError:
err = sys.exc_info()[1]
output(str(err))
lines = buf.split("\n")
output(lines[err.lineno - 1]) # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
output(file)
mod = parse(file)
if not mod:
break
output("module", mod.name)
output(len(mod.dfns), "definitions")
if not check(mod):
output("Check failed")
else:
for dfn in mod.dfns:
output(dfn.name, dfn.value)
| {
"content_hash": "6e37e10bee26e62d84361cbee1cd5e47",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 79,
"avg_line_length": 26.96330275229358,
"alnum_prop": 0.5340251786321878,
"repo_name": "OptimusGitEtna/RestSymf",
"id": "fc1b16c668153f630234377d9b33e2059750ceee",
"size": "11756",
"binary": false,
"copies": "36",
"ref": "refs/heads/master",
"path": "Python-3.4.2/Parser/asdl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "594205"
},
{
"name": "C",
"bytes": "15348597"
},
{
"name": "C++",
"bytes": "65109"
},
{
"name": "CSS",
"bytes": "12039"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "JavaScript",
"bytes": "10597"
},
{
"name": "Makefile",
"bytes": "9444"
},
{
"name": "Objective-C",
"bytes": "1390141"
},
{
"name": "PHP",
"bytes": "93070"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24018306"
},
{
"name": "Shell",
"bytes": "440753"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import psycopg2 as db
import sys
import os
from flask import Flask, request, Response, render_template, session, redirect, json
from twilio import twiml
from twilio.rest import TwilioRestClient
import imp
#loads account_sid and auth_token from private space
config = imp.load_source('config', '../sensitive_data/config.py')
TWILIO_ACCOUNT_SID = config.TWILIO_ACCOUNT_SID
TWILIO_AUTH_TOKEN = config.TWILIO_AUTH_TOKEN
app = Flask(__name__, template_folder='templates')
app.secret_key = config.SECRET_KEY
client = TwilioRestClient(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
requests = {}
# connect to doctor database
try:
con = db.connect(database="testdbdoc", user="postgres", password="seetoh", host="localhost")
print 'Successfully connected to databases!'
cur = con.cursor()
except:
print 'Failed to connect to database.'
@app.route('/receivemessage', methods = ['GET', 'POST'])
def receivemessage():
body = request.values.get('Body', None)
fromnum = request.values.get('From', None)
r = twiml.Response()
body = body.lower()
try:
answer = body[0]
request_id = body[1:]
if answer == 'y':
# This does prevent us from tracking multiple requests to a doctor, and requires him to respond only to the newest one.
# Will add a method to deconflict clashes in requests from the same area
if requests[request_id] == False:
requests[request_id] = True
r.message("Thank you for replying. You are the first to accept and other doctors will not be able to accept this request \
already. A message will be sent to the requester to let him know you are on the way.")
# create a response to the requestor
elif requests[request_id] == True:
r.message("Thank you for replying. Another doctor is on the way to the scene already, but we appreciate your prompt \
response. Have a good day.")
else:
r.message("The request is no longer valid, thank you for replying.")
elif answer == 'n':
r.message("Sorry to hear that, thank you for your prompt response, have a good day.")
else:
r.message("Invalid response.")
except:
r.message("Invalid response.")
# has to be valid twiml
return str(r)
@app.route('/sendmessage', methods = ['POST', 'GET'])
def sendmessage():
# Test values
# Need to add either a request database or a function that carries the request values in. I prefer database.
zipcode = '94720' # requester zipcode
username = 'danielseetoh' # just to message only me
request_id = '1' # retrieve request id in table request
requests[request_id] = False
# will add the ability to track gps location of requester to match locally with doctors nearby
# for now just testing by matching zipcode
cur.execute("SELECT phonenumber FROM doctors WHERE zipcode = '%s' AND username = '%s'" % (zipcode, username))
doctor_phone_numbers = cur.fetchall()
for number in doctor_phone_numbers:
client.messages.create(
body = "Request id: %s. There is an emergency at '%s', will you be able to respond in \
less than 8 minutes?(y%s/n%s)" % (request_id, zipcode, request_id, request_id),
to = number[0],
from_ = "+14245438814",
)
print session['requestor']
return('Messages sent')
@app.route('/')
def index(name = None):
return render_template('index.html', name=name)
@app.route('/signup', methods = ['POST', 'GET'])
def signup():
if request.method == 'POST':
try:
# insert into database
_username = request.form['username']
_password = request.form['password']
_phonenumber = request.form['phonenumber']
_zipcode = request.form['zipcode']
return redirect('/', name = _username)
cur.execute("INSERT INTO doctors (username, password, phonenumber, zipcode, active) \
VALUES ('%s', '%s', '%s', '%s', True ) " % (_username, _password, _phonenumber, _zipcode))
return redirect('/', name = _username)
except:
error = 'Unable to insert into database. {} {} {} {}'.format(_username,_password,_phonenumber,_zipcode)
return render_template('signup.html', error=error)
else:
return render_template('signup.html', error=error)
# if request.method == 'POST':
# if request.form['username'] != 'admin' or request.form['password'] != 'admin':
# error = 'Invalid Credentials. Please try again.'
# else:
# return redirect(url_for('home'))
# _username = request.form['username']
# _password = request.form['password']
# _phonenumber = request.form['phonenumber']
# _zipcode = request.form['zipcode']
# if not (_username and _password and _phonenumber and _zipcode):
# return json.dumps({'html':'<span>Enter the required fields</span>'})
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=8080)
| {
"content_hash": "a2d7efffd093beab510edc1277b23f2b",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 138,
"avg_line_length": 38.355555555555554,
"alnum_prop": 0.6235998455001931,
"repo_name": "danielseetoh/twilio185",
"id": "43ef0c0cd78ef4efbf7b32bf35b8172c36574a86",
"size": "5178",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": ".~c9_invoke_aGP52y.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "928"
},
{
"name": "HTML",
"bytes": "23891"
},
{
"name": "JavaScript",
"bytes": "825"
},
{
"name": "Python",
"bytes": "31797"
}
],
"symlink_target": ""
} |
import sys
import unittest
import libsbml
class TestModel_newSetters(unittest.TestCase):
global M
M = None
def setUp(self):
self.M = libsbml.Model(2,4)
if (self.M == None):
pass
pass
def tearDown(self):
_dummyList = [ self.M ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment1(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(2,2)
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
c.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartments() == 1 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment2(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(2,1)
c.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumCompartments() == 0 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment3(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(1,2)
c.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumCompartments() == 0 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment4(self):
m = libsbml.Model(2,2)
c = None
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumCompartments() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartment5(self):
m = libsbml.Model(2,2)
c = libsbml.Compartment(2,2)
c.setId( "c")
c1 = libsbml.Compartment(2,2)
c1.setId( "c")
i = m.addCompartment(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartments() == 1 )
i = m.addCompartment(c1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumCompartments() == 1 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ c1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType1(self):
m = libsbml.Model(2,2)
ct = libsbml.CompartmentType(2,2)
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ct.setId( "ct")
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartmentTypes() == 1 )
_dummyList = [ ct ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType2(self):
m = libsbml.Model(2,2)
ct = libsbml.CompartmentType(2,3)
ct.setId( "ct")
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumCompartmentTypes() == 0 )
_dummyList = [ ct ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType3(self):
m = libsbml.Model(2,2)
ct = None
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumCompartmentTypes() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addCompartmentType4(self):
m = libsbml.Model(2,2)
ct = libsbml.CompartmentType(2,2)
ct.setId( "ct")
ct1 = libsbml.CompartmentType(2,2)
ct1.setId( "ct")
i = m.addCompartmentType(ct)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumCompartmentTypes() == 1 )
i = m.addCompartmentType(ct1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumCompartmentTypes() == 1 )
_dummyList = [ ct ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ct1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addConstraint1(self):
m = libsbml.Model(2,2)
c = libsbml.Constraint(2,2)
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
c.setMath(libsbml.parseFormula("a+b"))
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumConstraints() == 1 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addConstraint2(self):
m = libsbml.Model(2,2)
c = libsbml.Constraint(2,3)
c.setMath(libsbml.parseFormula("a+b"))
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumConstraints() == 0 )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addConstraint3(self):
m = libsbml.Model(2,2)
c = None
i = m.addConstraint(c)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumConstraints() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent1(self):
m = libsbml.Model(2,2)
e = libsbml.Event(2,2)
t = libsbml.Trigger(2,2)
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
t.setMath(libsbml.parseFormula("true"))
e.setTrigger(t)
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
e.createEventAssignment()
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumEvents() == 1 )
_dummyList = [ e ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent2(self):
m = libsbml.Model(2,2)
e = libsbml.Event(2,1)
t = libsbml.Trigger(2,1)
t.setMath(libsbml.parseFormula("true"))
e.setTrigger(t)
e.createEventAssignment()
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumEvents() == 0 )
_dummyList = [ e ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent3(self):
m = libsbml.Model(2,2)
e = None
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumEvents() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addEvent4(self):
m = libsbml.Model(2,2)
e = libsbml.Event(2,2)
t = libsbml.Trigger(2,2)
t.setMath(libsbml.parseFormula("true"))
e.setId( "e")
e.setTrigger(t)
e.createEventAssignment()
e1 = libsbml.Event(2,2)
e1.setId( "e")
e1.setTrigger(t)
e1.createEventAssignment()
i = m.addEvent(e)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumEvents() == 1 )
i = m.addEvent(e1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumEvents() == 1 )
_dummyList = [ e ]; _dummyList[:] = []; del _dummyList
_dummyList = [ e1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition1(self):
m = libsbml.Model(2,2)
fd = libsbml.FunctionDefinition(2,2)
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
fd.setId( "fd")
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
fd.setMath(libsbml.parseFormula("fd"))
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumFunctionDefinitions() == 1 )
_dummyList = [ fd ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition2(self):
m = libsbml.Model(2,2)
fd = libsbml.FunctionDefinition(2,1)
fd.setId( "fd")
fd.setMath(libsbml.parseFormula("fd"))
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumFunctionDefinitions() == 0 )
_dummyList = [ fd ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition3(self):
m = libsbml.Model(2,2)
fd = None
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumFunctionDefinitions() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addFunctionDefinition4(self):
m = libsbml.Model(2,2)
fd = libsbml.FunctionDefinition(2,2)
fd.setId( "fd")
fd.setMath(libsbml.parseFormula("fd"))
fd1 = libsbml.FunctionDefinition(2,2)
fd1.setId( "fd")
fd1.setMath(libsbml.parseFormula("fd"))
i = m.addFunctionDefinition(fd)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumFunctionDefinitions() == 1 )
i = m.addFunctionDefinition(fd1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumFunctionDefinitions() == 1 )
_dummyList = [ fd ]; _dummyList[:] = []; del _dummyList
_dummyList = [ fd1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment1(self):
m = libsbml.Model(2,2)
ia = libsbml.InitialAssignment(2,2)
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ia.setSymbol( "i")
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ia.setMath(libsbml.parseFormula("gg"))
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumInitialAssignments() == 1 )
_dummyList = [ ia ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment2(self):
m = libsbml.Model(2,2)
ia = libsbml.InitialAssignment(2,3)
ia.setSymbol( "i")
ia.setMath(libsbml.parseFormula("gg"))
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumInitialAssignments() == 0 )
_dummyList = [ ia ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment3(self):
m = libsbml.Model(2,2)
ia = None
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumInitialAssignments() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addInitialAssignment4(self):
m = libsbml.Model(2,2)
ia = libsbml.InitialAssignment(2,2)
ia.setSymbol( "ia")
ia.setMath(libsbml.parseFormula("a+b"))
ia1 = libsbml.InitialAssignment(2,2)
ia1.setSymbol( "ia")
ia1.setMath(libsbml.parseFormula("a+b"))
i = m.addInitialAssignment(ia)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumInitialAssignments() == 1 )
i = m.addInitialAssignment(ia1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumInitialAssignments() == 1 )
_dummyList = [ ia ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ia1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter1(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(2,2)
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
p.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumParameters() == 1 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter2(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(2,1)
p.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumParameters() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter3(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(1,2)
p.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumParameters() == 0 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter4(self):
m = libsbml.Model(2,2)
p = None
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumParameters() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addParameter5(self):
m = libsbml.Model(2,2)
p = libsbml.Parameter(2,2)
p.setId( "p")
p1 = libsbml.Parameter(2,2)
p1.setId( "p")
i = m.addParameter(p)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumParameters() == 1 )
i = m.addParameter(p1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumParameters() == 1 )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction1(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(2,2)
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
r.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumReactions() == 1 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction2(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(2,1)
r.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumReactions() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction3(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(1,2)
r.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumReactions() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction4(self):
m = libsbml.Model(2,2)
r = None
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumReactions() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addReaction5(self):
m = libsbml.Model(2,2)
r = libsbml.Reaction(2,2)
r.setId( "r")
r1 = libsbml.Reaction(2,2)
r1.setId( "r")
i = m.addReaction(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumReactions() == 1 )
i = m.addReaction(r1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumReactions() == 1 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ r1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule1(self):
m = libsbml.Model(2,2)
r = libsbml.AssignmentRule(2,2)
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
r.setVariable( "f")
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
r.setMath(libsbml.parseFormula("a-n"))
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumRules() == 1 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule2(self):
m = libsbml.Model(2,2)
r = libsbml.AssignmentRule(2,1)
r.setVariable( "f")
r.setMath(libsbml.parseFormula("a-n"))
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumRules() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule3(self):
m = libsbml.Model(2,2)
r = libsbml.AssignmentRule(1,2)
r.setVariable( "f")
r.setMath(libsbml.parseFormula("a-n"))
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumRules() == 0 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule4(self):
m = libsbml.Model(2,2)
r = None
i = m.addRule(r)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumRules() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addRule5(self):
m = libsbml.Model(2,2)
ar = libsbml.AssignmentRule(2,2)
ar.setVariable( "ar")
ar.setMath(libsbml.parseFormula("a-j"))
ar1 = libsbml.AssignmentRule(2,2)
ar1.setVariable( "ar")
ar1.setMath(libsbml.parseFormula("a-j"))
i = m.addRule(ar)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumRules() == 1 )
i = m.addRule(ar1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumRules() == 1 )
_dummyList = [ ar ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ar1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies1(self):
m = libsbml.Model(2,2)
s = libsbml.Species(2,2)
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
s.setId( "s")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
s.setCompartment( "c")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpecies() == 1 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies2(self):
m = libsbml.Model(2,2)
s = libsbml.Species(2,1)
s.setId( "s")
s.setCompartment( "c")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumSpecies() == 0 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies3(self):
m = libsbml.Model(2,2)
s = libsbml.Species(1,2)
s.setId( "s")
s.setCompartment( "c")
s.setInitialAmount(2)
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumSpecies() == 0 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies4(self):
m = libsbml.Model(2,2)
s = None
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumSpecies() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpecies5(self):
m = libsbml.Model(2,2)
s = libsbml.Species(2,2)
s.setId( "s")
s.setCompartment( "c")
s1 = libsbml.Species(2,2)
s1.setId( "s")
s1.setCompartment( "c")
i = m.addSpecies(s)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpecies() == 1 )
i = m.addSpecies(s1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumSpecies() == 1 )
_dummyList = [ s ]; _dummyList[:] = []; del _dummyList
_dummyList = [ s1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType1(self):
m = libsbml.Model(2,2)
st = libsbml.SpeciesType(2,2)
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
st.setId( "st")
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpeciesTypes() == 1 )
_dummyList = [ st ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType2(self):
m = libsbml.Model(2,2)
st = libsbml.SpeciesType(2,3)
st.setId( "st")
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumSpeciesTypes() == 0 )
_dummyList = [ st ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType3(self):
m = libsbml.Model(2,2)
st = None
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumSpeciesTypes() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addSpeciesType4(self):
m = libsbml.Model(2,2)
st = libsbml.SpeciesType(2,2)
st.setId( "st")
st1 = libsbml.SpeciesType(2,2)
st1.setId( "st")
i = m.addSpeciesType(st)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumSpeciesTypes() == 1 )
i = m.addSpeciesType(st1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumSpeciesTypes() == 1 )
_dummyList = [ st ]; _dummyList[:] = []; del _dummyList
_dummyList = [ st1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition1(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(2,2)
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ud.createUnit()
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
ud.setId( "ud")
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumUnitDefinitions() == 1 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition2(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(2,1)
ud.createUnit()
ud.setId( "ud")
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_VERSION_MISMATCH )
self.assert_( m.getNumUnitDefinitions() == 0 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition3(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(1,2)
ud.createUnit()
ud.setId( "ud")
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_LEVEL_MISMATCH )
self.assert_( m.getNumUnitDefinitions() == 0 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition4(self):
m = libsbml.Model(2,2)
ud = None
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
self.assert_( m.getNumUnitDefinitions() == 0 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_addUnitDefinition5(self):
m = libsbml.Model(2,2)
ud = libsbml.UnitDefinition(2,2)
ud.setId( "ud")
ud.createUnit()
ud1 = libsbml.UnitDefinition(2,2)
ud1.setId( "ud")
ud1.createUnit()
i = m.addUnitDefinition(ud)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( m.getNumUnitDefinitions() == 1 )
i = m.addUnitDefinition(ud1)
self.assert_( i == libsbml.LIBSBML_DUPLICATE_OBJECT_ID )
self.assert_( m.getNumUnitDefinitions() == 1 )
_dummyList = [ ud ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ud1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createCompartment(self):
m = libsbml.Model(2,2)
p = m.createCompartment()
self.assert_( m.getNumCompartments() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createCompartmentType(self):
m = libsbml.Model(2,2)
p = m.createCompartmentType()
self.assert_( m.getNumCompartmentTypes() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createConstraint(self):
m = libsbml.Model(2,2)
p = m.createConstraint()
self.assert_( m.getNumConstraints() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createEvent(self):
m = libsbml.Model(2,2)
p = m.createEvent()
self.assert_( m.getNumEvents() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createEventAssignment(self):
m = libsbml.Model(2,2)
p = m.createEvent()
ea = m.createEventAssignment()
self.assert_( p.getNumEventAssignments() == 1 )
self.assert_( (ea).getLevel() == 2 )
self.assert_( (ea).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createFunctionDefinition(self):
m = libsbml.Model(2,2)
p = m.createFunctionDefinition()
self.assert_( m.getNumFunctionDefinitions() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createInitialAssignment(self):
m = libsbml.Model(2,2)
p = m.createInitialAssignment()
self.assert_( m.getNumInitialAssignments() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createKineticLaw(self):
m = libsbml.Model(2,2)
p = m.createReaction()
kl = m.createKineticLaw()
self.assert_( p.isSetKineticLaw() == True )
self.assert_( (kl).getLevel() == 2 )
self.assert_( (kl).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createKineticLawParameters(self):
m = libsbml.Model(2,2)
r = m.createReaction()
kl = m.createKineticLaw()
p = m.createKineticLawParameter()
self.assert_( r.isSetKineticLaw() == True )
self.assert_( kl.getNumParameters() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createModifier(self):
m = libsbml.Model(2,2)
p = m.createReaction()
sr = m.createModifier()
self.assert_( p.getNumModifiers() == 1 )
self.assert_( (sr).getLevel() == 2 )
self.assert_( (sr).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createParameter(self):
m = libsbml.Model(2,2)
p = m.createParameter()
self.assert_( m.getNumParameters() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createProduct(self):
m = libsbml.Model(2,2)
p = m.createReaction()
sr = m.createProduct()
self.assert_( p.getNumProducts() == 1 )
self.assert_( (sr).getLevel() == 2 )
self.assert_( (sr).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createReactant(self):
m = libsbml.Model(2,2)
p = m.createReaction()
sr = m.createReactant()
self.assert_( p.getNumReactants() == 1 )
self.assert_( (sr).getLevel() == 2 )
self.assert_( (sr).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createReaction(self):
m = libsbml.Model(2,2)
p = m.createReaction()
self.assert_( m.getNumReactions() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createRule(self):
m = libsbml.Model(2,2)
p = m.createAssignmentRule()
self.assert_( m.getNumRules() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createSpecies(self):
m = libsbml.Model(2,2)
p = m.createSpecies()
self.assert_( m.getNumSpecies() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createSpeciesType(self):
m = libsbml.Model(2,2)
p = m.createSpeciesType()
self.assert_( m.getNumSpeciesTypes() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createUnit(self):
m = libsbml.Model(2,2)
p = m.createUnitDefinition()
u = m.createUnit()
self.assert_( p.getNumUnits() == 1 )
self.assert_( (u).getLevel() == 2 )
self.assert_( (u).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_createUnitDefinition(self):
m = libsbml.Model(2,2)
p = m.createUnitDefinition()
self.assert_( m.getNumUnitDefinitions() == 1 )
self.assert_( (p).getLevel() == 2 )
self.assert_( (p).getVersion() == 2 )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_setId1(self):
id = "1e1";
i = self.M.setId(id)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, self.M.isSetId() )
pass
def test_Model_setId2(self):
id = "e1";
i = self.M.setId(id)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_(( id == self.M.getId() ))
self.assertEqual( True, self.M.isSetId() )
i = self.M.setId("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetId() )
pass
def test_Model_setId3(self):
id = "e1";
i = self.M.setId(id)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_(( id == self.M.getId() ))
self.assertEqual( True, self.M.isSetId() )
i = self.M.unsetId()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetId() )
pass
def test_Model_setModelHistory1(self):
self.M.setMetaId("_001")
mh = libsbml.ModelHistory()
i = self.M.setModelHistory(mh)
self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.M.isSetModelHistory() )
i = self.M.unsetModelHistory()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetModelHistory() )
_dummyList = [ mh ]; _dummyList[:] = []; del _dummyList
pass
def test_Model_setModelHistory2(self):
self.M.setMetaId("_001")
i = self.M.setModelHistory(None)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetModelHistory() )
i = self.M.unsetModelHistory()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetModelHistory() )
pass
def test_Model_setName1(self):
name = "3Set_k2";
i = self.M.setName(name)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( True, self.M.isSetName() )
pass
def test_Model_setName2(self):
name = "Set k2";
i = self.M.setName(name)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_(( name == self.M.getName() ))
self.assertEqual( True, self.M.isSetName() )
i = self.M.unsetName()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetName() )
pass
def test_Model_setName3(self):
i = self.M.setName("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertEqual( False, self.M.isSetName() )
pass
def test_Model_setName4(self):
m = libsbml.Model(1,2)
i = m.setName( "11dd")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assertEqual( False, m.isSetName() )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestModel_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| {
"content_hash": "09d8a164ba13c6091411c7ab6e08fe1b",
"timestamp": "",
"source": "github",
"line_count": 991,
"max_line_length": 72,
"avg_line_length": 34.36629667003027,
"alnum_prop": 0.6057785477288076,
"repo_name": "TheCoSMoCompany/biopredyn",
"id": "52bfb95b8653fb8680e5455898da00c99e95fdd0",
"size": "35461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestModel_newSetters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3535918"
},
{
"name": "C++",
"bytes": "26120778"
},
{
"name": "CMake",
"bytes": "455400"
},
{
"name": "CSS",
"bytes": "49020"
},
{
"name": "Gnuplot",
"bytes": "206"
},
{
"name": "HTML",
"bytes": "193068"
},
{
"name": "Java",
"bytes": "66517"
},
{
"name": "JavaScript",
"bytes": "3847"
},
{
"name": "Makefile",
"bytes": "30905"
},
{
"name": "Perl",
"bytes": "3018"
},
{
"name": "Python",
"bytes": "7891301"
},
{
"name": "Shell",
"bytes": "247654"
},
{
"name": "TeX",
"bytes": "22566"
},
{
"name": "XSLT",
"bytes": "55564"
}
],
"symlink_target": ""
} |
from mlscripts.ml.som.classes import *
from mlscripts.ml.som.flatten import *
import scipy as scipy
import mlscripts.ml.som.functions as fn
import re
from collections import Counter
def word_plot(matrix):
matrix = scipy.transpose(matrix)
nrows = len(matrix)
for i in range(nrows):
i = nrows - 1 - i
row = matrix[i]
for m in row:
print str(m) + "\t",
print "\n"
def draw_basis_activation(map): # This mapping finds the closest neuron for each basis vector and prints the "name" of the basis vector on the neuron position
words = empty_list(map.size, 1)
basis_vectors = []
d = map.space.dimension
for i in range(d):
b = scipy.zeros(d, int)
b[i] = 1
basis_vectors.append(b)
for i, bv in enumerate(basis_vectors):
bmu = map.find_bmu0(bv)
x = map.positions[bmu]
x = fn.to_int(x)
words[x[0]][x[1]] = map.space.words[i]
word_plot(words)
return words
def draw_item_activation(mymap, named=True, overwrite=False, symbols=False):
words = empty_list(mymap.size, 1)
mymap.renormalize()
if named:
vectors = mymap.vectors
keys = mymap.keys
else:
vectors = []
keys = []
idea_names = mymap.space.idea_names
for item in mymap.space.table:
keys.append(idea_names[item])
vectors.append(mymap.space.table[item])
if symbols:
s = mymap.space.symbol_vectors
keys = []
vectors = []
for item in s:
keys.append(item)
vectors.append(s[item])
for i, vector in enumerate(vectors):
match = fn.find_best_match(vector, mymap.weights)
x = mymap.positions[match]
x = fn.to_int(x)
w = words[x[0]][x[1]]
if w == "" or overwrite:
if overwrite:
winner = fn.find_best_match(mymap.weights[match], mymap.vectors)
w = keys[winner]
else:
w = keys[i]
else:
w = w + "," + keys[i]
words[x[0]][x[1]] = w
word_plot(words)
return words
def draw_neuron_activation(mymap, named=True, symbols=False): # iterates through EACH neuron and finds closest vector
words = distances = empty_list(mymap.size, 1)
if named:
vectors = mymap.vectors
keys = mymap.keys
else:
vectors = []
keys = []
idea_names = mymap.space.idea_names
for item in mymap.space.table:
keys.append(idea_names[item])
vectors.append(mymap.space.table[item])
if symbols:
s = mymap.space.symbol_vectors
keys = []
vectors = []
for item in s:
keys.append(mymap.space.idea_names[item])
vectors.append(s[item])
for neuron in flatten(mymap.neurons):
weight = neuron.weight
match = fn.find_best_match(weight, vectors)
distance = fn.distance(weight, vectors[match])
x = neuron.position
x = fn.to_int(x)
words[x[0]][x[1]] = keys[match]
# distances[x[0]][x[1]] = distance
word_plot(words)
return words
def draw_clusters(mymap, clusters):
cluster_map = empty_list(mymap.size, 1)
vectors = mymap.vectors
keys = mymap.keys
for neuron in flatten(mymap.neurons):
weight = neuron.weight
match = fn.find_best_match(weight, vectors)
key = keys[match]
cluster = clusters[key]
x = neuron.position
x = fn.to_int(x)
# cluster_map[x[0]][x[1]] = key
cluster_map[x[0]][x[1]] = cluster
return cluster_map
def draw_clusters_per_item(mymap, clusters):
cluster_map = empty_list(mymap.size, 1)
vectors = mymap.vectors
keys = mymap.keys
for neuron in flatten(mymap.neurons):
weight = neuron.weight
match = fn.find_best_match(weight, vectors)
key = keys[match]
cluster = clusters[key]
x = neuron.position
x = fn.to_int(x)
cluster_map[x[0]][x[1]] = key
# cluster_map[x[0]][x[1]] = cluster
return cluster_map
def get_distances_to_nearest(mymap):
distances = empty_list(mymap.size, 1)
vectors = mymap.vectors
matches = []
for neuron in flatten(mymap.neurons):
weight = neuron.weight
match = fn.find_best_match(weight, vectors)
matches.append(match)
distance = fn.distance(weight, vectors[match])
x = neuron.position
x = fn.to_int(x)
distances[x[0]][x[1]] = distance
c = Counter(matches)
print c
print 'items mapped : ' + str(len(sorted(c)))
return distances
def get_umatrix(mymap, radius=1):
umatrix = empty_list(mymap.size, 1)
xmax = mymap.size[1]
ymax = mymap.size[0]
rad = range(-radius, radius + 1)
# print rad
for neuron in flatten(mymap.neurons):
weight = neuron.weight
position = neuron.position
x = position[0]
y = position[1]
xrange = []
yrange = []
for i in rad:
xrange.append(int((x + i) % xmax))
yrange.append(int((y + i) % ymax))
average_dist = 0
for x in xrange:
for y in yrange:
neighbour_weight = mymap.neurons[x][y].weight
d = fn.distance(neighbour_weight, weight)
average_dist += d
umatrix[x][y] = average_dist
return umatrix
def create_idea_names(space):
idea_names = {}
for item in space.table:
name = ""
for i, element in enumerate(space.table[item]):
word = space.words[i]
word = re.sub("\"", "", str(word))
if element > 0 :
if len(name) == 0:
name = name + word
else:
name = name + "+" + word
idea_names[item] = name
return idea_names
def empty_list(shape, i):
x = []
for n in range(shape[i]):
if i == 0:
x.append("")
else:
x.append(empty_list(shape, i - 1))
return x
| {
"content_hash": "dd6d1794e32f047d26e924240f7a415f",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 171,
"avg_line_length": 28.957943925233646,
"alnum_prop": 0.5460706793609811,
"repo_name": "IAS-ZHAW/machine_learning_scripts",
"id": "bd2e831585476cf37fde297aa5636db0b86a446c",
"size": "6375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlscripts/ml/som/visualize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "78718"
}
],
"symlink_target": ""
} |
import copy
import itertools
import json
import logging
import math
import os
import socket
import traceback
from collections import defaultdict
from datetime import timedelta
import markdown
import nvd3
import pendulum
import sqlalchemy as sqla
from flask import (
redirect, request, Markup, Response, render_template,
make_response, flash, jsonify)
from flask._compat import PY2
from flask_appbuilder import BaseView, ModelView, expose, has_access
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_babel import lazy_gettext
from past.builtins import unicode
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import or_, desc, and_, union_all
from wtforms import SelectField, validators
import airflow
from airflow import configuration as conf
from airflow import models, jobs
from airflow import settings
from airflow.api.common.experimental.mark_tasks import (set_dag_run_state_to_success,
set_dag_run_state_to_failed)
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.utils import timezone
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.www_rbac import utils as wwwutils
from airflow.www_rbac.app import app, appbuilder
from airflow.www_rbac.decorators import action_logging, gzipped, has_dag_access
from airflow.www_rbac.forms import (DateTimeForm, DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm,
DagRunForm, ConnectionForm)
from airflow.www_rbac.widgets import AirflowModelListWidget
PAGE_SIZE = conf.getint('webserver', 'page_size')
if os.environ.get('SKIP_DAGS_PARSING') != 'True':
dagbag = models.DagBag(settings.DAGS_FOLDER)
else:
dagbag = models.DagBag
def get_date_time_num_runs_dag_runs_form_data(request, session, dag):
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
base_date = request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
DR = models.DagRun
drs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(desc(DR.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
dttm = dr.execution_date
dr_state = dr.state
return {
'dttm': dttm,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': dttm.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
######################################################################################
# BaseViews
######################################################################################
class AirflowBaseView(BaseView):
route_base = ''
def render(self, template, **context):
return render_template(template,
base_template=self.appbuilder.base_template,
appbuilder=self.appbuilder,
**context)
class Airflow(AirflowBaseView):
@expose('/home')
@has_access
@provide_session
def index(self, session=None):
DM = models.DagModel
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM).filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
# Get all the dag id the user could access
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
if 'all_dags' in filter_dag_ids:
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
else:
orm_dags = {dag.dag_id: dag for dag in
sql_query.filter(DM.dag_id.in_(filter_dag_ids)).all()}
unfiltered_webserver_dags = [dag for dag in
unfiltered_webserver_dags
if dag.dag_id in filter_dag_ids]
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
@expose('/dag_stats')
@has_access
@provide_session
def dag_stats(self, session=None):
ds = models.DagStat
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
payload = {}
if filter_dag_ids:
if 'all_dags' not in filter_dag_ids:
qry = qry.filter(ds.dag_id.in_(filter_dag_ids))
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
for dag in dagbag.dags.values():
if 'all_dags' in filter_dag_ids or dag.dag_id in filter_dag_ids:
payload[dag.safe_dag_id] = []
for state in State.dag_states:
count = data.get(dag.dag_id, {}).get(state, 0)
payload[dag.safe_dag_id].append({
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
})
return wwwutils.json_response(payload)
@expose('/task_stats')
@has_access
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
payload = {}
if not filter_dag_ids:
return
LastDagRun = (
session.query(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True) # noqa
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True) # noqa
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun,
and_(LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun,
and_(RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if 'all_dags' in filter_dag_ids or dag_id in filter_dag_ids:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
for dag in dagbag.dags.values():
if 'all_dags' in filter_dag_ids or dag.dag_id in filter_dag_ids:
payload[dag.safe_dag_id] = []
for state in State.task_states:
count = data.get(dag.dag_id, {}).get(state, 0)
payload[dag.safe_dag_id].append({
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
})
return wwwutils.json_response(payload)
@expose('/code')
@has_dag_access(can_dag_read=True)
@has_access
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with wwwutils.open_maybe_zipped(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@has_dag_access(can_dag_read=True)
@has_access
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/pickle_info')
@has_access
def pickle_info(self):
d = {}
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if not filter_dag_ids:
return wwwutils.json_response({})
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if 'all_dags' in filter_dag_ids or dag.dag_id in filter_dag_ids:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/rendered')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in wwwutils.get_attr_renderer():
html_dict[template_field] = \
wwwutils.get_attr_renderer()[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/get_logs_with_metadata')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
try_number = int(request.args.get('try_number'))
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
response = jsonify({'error': error_message})
response.status_code = 400
return response
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
try:
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
metadata['end_of_log'] = True
else:
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(ti.task_id)
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
metadata = metadatas[0]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
message = logs[0]
return jsonify(message=message, metadata=metadata)
except AttributeError as e:
error_message = ["Task log handler {} does not support read logs.\n{}\n"
.format(task_log_reader, str(e))]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
logs = [''] * (ti.next_try_number - 1 if ti is not None else 0)
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts",
dag_id=dag.dag_id, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in wwwutils.get_attr_renderer(): # noqa
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in wwwutils.get_attr_renderer():
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = \
wwwutils.get_attr_renderer()[attr_name](source)
no_failed_deps_result = [(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br/>\n- The scheduler is down or under heavy load<br/>\n{}\n"
"<br/>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br/>" if ti.state == State.NONE else ""))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/')
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
from airflow.executors import GetDefaultExecutor
executor = GetDefaultExecutor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/delete')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def delete(self):
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound, DagFileExists
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/"
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id), 'error')
return redirect(request.referrer)
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id),
'error')
return redirect(request.referrer)
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon success return to origin.
return redirect(origin)
@expose('/trigger')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
dry_run=True,
)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@has_access
@provide_session
def blocked(self, session=None):
DR = models.DagRun
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
payload = []
if filter_dag_ids:
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
)
if 'all_dags' not in filter_dag_ids:
dags = dags.filter(DR.dag_id.in_(filter_dag_ids))
dags = dags.all()
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as failed"),
details=details)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as success"),
details=details)
return response
@expose('/dagrun_failed')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_failed(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_failed(dag_id, execution_date,
confirmed, origin)
@expose('/dagrun_success')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_success(dag_id, execution_date,
confirmed, origin)
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, state):
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = pendulum.parse(execution_date)
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=True)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as {}:".format(state)),
details=details)
return response
@expose('/failed')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def failed(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.FAILED)
@expose('/success')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.SUCCESS)
@expose('/tree')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
@provide_session
def tree(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(DR.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = timezone.utcnow() - pendulum.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
# minimize whitespace as this can be huge for bigger dags
data = json.dumps(data, default=json_ser, separators=(',', ':'))
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur, num_runs=num_runs)
@expose('/graph')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
'rx': 5,
'ry': 5,
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) \
if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def duration(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if dag is None:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/')
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session.query(TF)
.filter(TF.dag_id == dag.dag_id, # noqa
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all() # noqa
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def tries(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def landing_times(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def paused(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = (
session.query(DagModel)
.filter(DagModel.dag_id == dag_id).first()
)
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
# sync dag permission
appbuilder.sm.sync_perm_for_dag(dag_id)
models.DagStat.update([dag_id], session=session, dirty_only=False)
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@has_access
@action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
# sync permissions for all dags
appbuilder.sm.sync_perm_for_dag()
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
TF = models.TaskFail
ti_fails = list(itertools.chain(*[(
session
.query(TF)
.filter(TF.dag_id == ti.dag_id,
TF.task_id == ti.task_id,
TF.execution_date == ti.execution_date)
.all()
) for ti in tis]))
TR = models.TaskReschedule
ti_reschedules = list(itertools.chain(*[(
session
.query(TR)
.filter(TR.dag_id == ti.dag_id,
TR.task_id == ti.task_id,
TR.execution_date == ti.execution_date)
.all()
) for ti in tis]))
# determine bars to show in the gantt chart
# all reschedules of one attempt are combinded into one bar
gantt_bar_items = []
for task_id, items in itertools.groupby(
sorted(tis + ti_fails + ti_reschedules, key=lambda ti: ti.task_id),
key=lambda ti: ti.task_id):
start_date = None
for i in sorted(items, key=lambda ti: ti.start_date):
start_date = start_date or i.start_date
end_date = i.end_date or timezone.utcnow()
if type(i) == models.TaskInstance:
gantt_bar_items.append((task_id, start_date, end_date, i.state))
start_date = None
elif type(i) == TF and (len(gantt_bar_items) == 0 or
end_date != gantt_bar_items[-1][2]):
gantt_bar_items.append((task_id, start_date, end_date, State.FAILED))
start_date = None
tasks = []
for gantt_bar_item in gantt_bar_items:
task_id = gantt_bar_item[0]
start_date = gantt_bar_item[1]
end_date = gantt_bar_item[2]
state = gantt_bar_item[3]
tasks.append({
'startDate': wwwutils.epoch(start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': task_id,
'duration': "{}".format(end_date - start_date)[:-4],
'status': state,
'executionDate': dttm.isoformat(),
})
states = {task['status']: task['status'] for task in tasks}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
class VersionView(AirflowBaseView):
@expose('/version')
@has_access
def version(self):
try:
airflow_version = airflow.__version__
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME,
'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(AirflowBaseView):
@expose('/configuration')
@has_access
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
# Don't show config when expose_config variable is False in airflow config
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
def apply(self, query, func): # noqa
if appbuilder.sm.has_all_dags_access():
return query
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
return query.filter(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView):
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
class SlaMissModelView(AirflowModelView):
route_base = '/slamiss'
datamodel = AirflowModelView.CustomSQLAInterface(models.SlaMiss)
base_permissions = ['can_list']
list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
class XComModelView(AirflowModelView):
route_base = '/xcom'
datamodel = AirflowModelView.CustomSQLAInterface(models.XCom)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
add_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
edit_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
@action('muldelete', 'Delete', "Are you sure you want to delete selected records?",
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ConnectionModelView(AirflowModelView):
route_base = '/connection'
datamodel = AirflowModelView.CustomSQLAInterface(models.Connection)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
extra_fields = ['extra__jdbc__drv_path', 'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope']
list_columns = ['conn_id', 'conn_type', 'host', 'port', 'is_encrypted',
'is_extra_encrypted']
add_columns = edit_columns = ['conn_id', 'conn_type', 'host', 'schema',
'login', 'password', 'port', 'extra'] + extra_fields
add_form = edit_form = ConnectionForm
add_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
base_order = ('conn_id', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
@has_dag_access(can_dag_edit=True)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.extra_fields if key in formdata}
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
if not hasattr(d, 'get'):
logging.warning('extra field for {} is not iterable'.format(
form.data.get('conn_id', '<unknown>')))
return
for field in self.extra_fields:
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class PoolModelView(AirflowModelView):
route_base = '/pool'
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
list_columns = ['pool', 'slots', 'used_slots', 'queued_slots']
add_columns = ['pool', 'slots', 'description']
edit_columns = ['pool', 'slots', 'description']
base_order = ('pool', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pool_link(attr):
pool_id = attr.get('pool')
if pool_id is not None:
url = '/taskinstance/list/?_flt_3_pool=' + str(pool_id)
return Markup("<a href='{url}'>{pool_id}</a>".format(**locals()))
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fused_slots(attr):
pool_id = attr.get('pool')
used_slots = attr.get('used_slots')
if pool_id is not None and used_slots is not None:
url = '/taskinstance/list/?_flt_3_pool=' + str(pool_id) + \
'&_flt_3_state=running'
return Markup("<a href='{url}'>{used_slots}</a>".format(**locals()))
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(attr):
pool_id = attr.get('pool')
queued_slots = attr.get('queued_slots')
if pool_id is not None and queued_slots is not None:
url = '/taskinstance/list/?_flt_3_pool=' + str(pool_id) + \
'&_flt_3_state=queued'
return Markup("<a href='{url}'>{queued_slots}</a>".format(**locals()))
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'pool': pool_link,
'used_slots': fused_slots,
'queued_slots': fqueued_slots
}
validators_columns = {
'pool': [validators.DataRequired()],
'slots': [validators.NumberRange(min=0)]
}
class VariableModelView(AirflowModelView):
route_base = '/variable'
list_template = 'airflow/variable_list.html'
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete', 'can_varimport']
list_columns = ['key', 'val', 'is_encrypted']
add_columns = ['key', 'val', 'is_encrypted']
edit_columns = ['key', 'val']
search_columns = ['key', 'val']
base_order = ('key', 'asc')
def hidden_field_formatter(attr):
key = attr.get('key')
val = attr.get('val')
if wwwutils.should_hide_value_for_key(key):
return Markup('*' * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'val': hidden_field_formatter,
}
validators_columns = {
'key': [validators.DataRequired()]
}
def prefill_form(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('varexport', 'Export', '', single=False)
def action_varexport(self, items):
var_dict = {}
d = json.JSONDecoder()
for var in items:
try:
val = d.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
@expose('/varimport', methods=["POST"])
@has_access
@action_logging
def varimport(self):
try:
out = request.files['file'].read()
if not PY2 and isinstance(out, bytes):
d = json.loads(out.decode('utf-8'))
else:
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.", 'error')
else:
suc_count = fail_count = 0
for k, v in d.items():
try:
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
except Exception as e:
logging.info('Variable import failed: {}'.format(repr(e)))
fail_count += 1
else:
suc_count += 1
flash("{} variable(s) successfully updated.".format(suc_count))
if fail_count:
flash("{} variable(s) failed to be updated.".format(fail_count), 'error')
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
route_base = '/job'
datamodel = AirflowModelView.CustomSQLAInterface(jobs.BaseJob)
base_permissions = ['can_list']
list_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat',
'executor_class', 'hostname', 'unixname']
search_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat', 'executor_class',
'hostname', 'unixname']
base_order = ('start_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),
}
class DagRunModelView(AirflowModelView):
route_base = '/dagrun'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun)
base_permissions = ['can_list']
list_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
search_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
add_form = edit_form = DagRunForm
formatters_columns = {
'execution_date': wwwutils.datetime_f('execution_date'),
'state': wwwutils.state_f,
'start_date': wwwutils.datetime_f('start_date'),
'dag_id': wwwutils.dag_link,
'run_id': wwwutils.dag_run_link,
}
validators_columns = {
'dag_id': [validators.DataRequired()]
}
@action('muldelete', "Delete", "Are you sure you want to delete selected records?",
single=False)
@has_dag_access(can_dag_edit=True)
@provide_session
def action_muldelete(self, items, session=None):
self.datamodel.delete_all(items)
self.update_redirect()
dirty_ids = []
for item in items:
dirty_ids.append(item.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
return redirect(self.get_redirect())
@action('set_running', "Set state to 'running'", '', single=False)
@provide_session
def action_set_running(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.start_date = timezone.utcnow()
dr.state = State.RUNNING
models.DagStat.update(dirty_ids, session=session)
session.commit()
flash("{count} dag runs were set to running".format(**locals()))
except Exception as ex:
flash(str(ex), 'error')
flash('Failed to set state', 'error')
return redirect(self.route_base + '/list')
@action('set_failed', "Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False)
@provide_session
def action_set_failed(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(**locals()))
except Exception as ex:
flash('Failed to set state', 'error')
return redirect(self.route_base + '/list')
@action('set_success', "Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False)
@provide_session
def action_set_success(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(**locals()))
except Exception as ex:
flash('Failed to set state', 'error')
return redirect(self.route_base + '/list')
class LogModelView(AirflowModelView):
route_base = '/log'
datamodel = AirflowModelView.CustomSQLAInterface(models.Log)
base_permissions = ['can_list']
list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date',
'owner', 'extra']
search_columns = ['dag_id', 'task_id', 'execution_date', 'extra']
base_order = ('dttm', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'dttm': wwwutils.datetime_f('dttm'),
'execution_date': wwwutils.datetime_f('execution_date'),
'dag_id': wwwutils.dag_link,
}
class TaskInstanceModelView(AirflowModelView):
route_base = '/taskinstance'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance)
base_permissions = ['can_list']
page_size = PAGE_SIZE
list_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url']
search_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date']
base_order = ('job_id', 'asc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def log_url_formatter(attr):
log_url = attr.get('log_url')
return Markup(
'<a href="{log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def duration_f(attr):
end_date = attr.get('end_date')
duration = attr.get('duration')
if end_date and duration:
return timedelta(seconds=duration)
formatters_columns = {
'log_url': log_url_formatter,
'task_id': wwwutils.task_instance_link,
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'execution_date': wwwutils.datetime_f('execution_date'),
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'queued_dttm': wwwutils.datetime_f('queued_dttm'),
'dag_id': wwwutils.dag_link,
'duration': duration_f,
}
@provide_session
@action('clear', lazy_gettext('Clear'),
lazy_gettext('Are you sure you want to clear the state of the selected task'
' instance(s) and set their dagruns to the running state?'),
single=False)
def action_clear(self, tis, session=None):
try:
dag_to_tis = {}
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(tis)))
self.update_redirect()
return redirect(self.get_redirect())
except Exception:
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, tis, target_state, session=None):
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session)
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
flash('Failed to set state', 'error')
@action('set_running', "Set state to 'running'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_running(self, tis):
self.set_task_instance_state(tis, State.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_failed', "Set state to 'failed'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_failed(self, tis):
self.set_task_instance_state(tis, State.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_success', "Set state to 'success'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_success(self, tis):
self.set_task_instance_state(tis, State.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_retry', "Set state to 'up_for_retry'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_retry(self, tis):
self.set_task_instance_state(tis, State.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's
ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on
Flask-Admin side. https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id) # noqa
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class DagModelView(AirflowModelView):
route_base = '/dagmodel'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagModel)
base_permissions = ['can_list', 'can_show']
list_columns = ['dag_id', 'is_paused', 'last_scheduler_run',
'last_expired', 'scheduler_lock', 'fileloc', 'owners']
formatters_columns = {
'dag_id': wwwutils.dag_link
}
base_filters = [['dag_id', DagFilter, lambda: []]]
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self).get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self).get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| {
"content_hash": "ed9d6e11892303e7d3827618d746c8fd",
"timestamp": "",
"source": "github",
"line_count": 2411,
"max_line_length": 90,
"avg_line_length": 36.950642886768975,
"alnum_prop": 0.5419809626436781,
"repo_name": "malmiron/incubator-airflow",
"id": "49a9a734cc483ccb381ec2c3c9c35754262929a4",
"size": "89902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/www_rbac/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56979"
},
{
"name": "HTML",
"bytes": "145974"
},
{
"name": "JavaScript",
"bytes": "1364212"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1847247"
},
{
"name": "Shell",
"bytes": "19680"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.urls import include, path
from core import views as core_views
from api.resources import OrderableResource, PostResource, SearchResource
from core.sitemap import IndexSitemap, BlogSitemap, StaticSitemap
sitemaps = {"index": IndexSitemap, "blog": BlogSitemap, "static": StaticSitemap}
urlpatterns = [
path("", core_views.index, name="index"),
path("email/<email_id>/", core_views.email, name="email"),
path("about/", core_views.static, {"page": "about"}, name="about"),
path("resume/", core_views.static, {"page": "resume"}, name="resume"),
path("copyrights/", core_views.static, {"page": "copyrights"}, name="copyrights"),
# API urls
path("api/posts/", PostResource.as_view()),
path("api/orderable/", OrderableResource.as_view()),
path("api/search/", SearchResource.as_view()),
# Blog urls
path("blog/", include("blog.urls"), name="blog"),
# Gallery urls
path("gallery/", include("gallery.urls")),
# Profile urls
path("profile/", include("profiles.urls")),
# URL shortener
path("sr/", include("shortener.urls")),
# Admin urls
path("dashboard/", admin.site.urls),
# Django RQ
path("dashboard/django-rq/", include("django_rq.urls")),
# Sitemap
path(
"sitemap.xml",
sitemap,
{"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitemap",
),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "62b903b50c3bf178f5778567cd813176",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 86,
"avg_line_length": 35.26923076923077,
"alnum_prop": 0.678298800436205,
"repo_name": "manti-by/M2MICRO",
"id": "712ba01a9ca4656d0141cd1ddd49f41e6c08d480",
"size": "1834",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/core/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "13675"
},
{
"name": "Batchfile",
"bytes": "518"
},
{
"name": "CSS",
"bytes": "32089"
},
{
"name": "HTML",
"bytes": "53"
},
{
"name": "JavaScript",
"bytes": "30285"
},
{
"name": "PHP",
"bytes": "573567"
},
{
"name": "PLSQL",
"bytes": "910"
},
{
"name": "SQLPL",
"bytes": "17657"
},
{
"name": "Shell",
"bytes": "13408"
}
],
"symlink_target": ""
} |
from yapsy.IPlugin import IPlugin
from logbook.Importer import Plugin
from messages import TimeSeriesData,TimeSeriesMetaData,LogMetaData,UIData,TimeSeries
from sqlalchemy import *
import logging
from tools.profiling import timing
from PyQt5.QtWidgets import QLabel, QFormLayout, QLineEdit
class Swimming(IPlugin,Plugin):
def __init__(self,log_name=None,metadata=None):
self._actions=['import']
self._type=['swimming']
self.logging = logging.getLogger(__name__)
self._filename = log_name
self.file_table = None
if metadata:
self._metadata = LogMetaData(file_hash=metadata.file_hash,
date=metadata.creation_date,
name=metadata.event_name,
maintype=metadata.event_type,
subtype=metadata.event_subtype
)
self._formdata = []
self._formdata.append(TimeSeriesMetaData("Lap length",0,"m"))
self._formdata.append(TimeSeriesMetaData("Total Length",0,"m"))
self._formdata.append(TimeSeriesMetaData("Time per 100m","%.1f" %0,"s"))
self._formdata.append(TimeSeriesMetaData("average speed","%.1f" %0,"m/s"))
self._formdata.append(TimeSeriesMetaData("Total calories",0,"kcal"))
self._formdata.append(TimeSeriesMetaData("Event duration","%.1f" %0,"min"))
def open_logbook(self,filename):
self._filename = filename
self._alchemy_logbook = create_engine('sqlite:///'+self._filename)
_metadata = MetaData(bind=self._alchemy_logbook)
self.file_table = Table('file', _metadata, autoload=True)
self.swim_table = Table("event_swimming",_metadata,
Column('event_swimming_id',Integer,primary_key=True),
Column('f_id',Integer,ForeignKey("file.file_id"), nullable=False),
Column('timestamp',DateTime),
Column('start_time',DateTime),
Column('swim_stroke',String(30)),
Column('total_calories',Integer),
Column('total_elapsed_time',Float),
Column('total_strokes',Integer),
Column('distance',Integer)
)
self.swim_table.create(checkfirst=True)
@timing
def import_fit(self,fitfile=None):
stmt = self.file_table.select(self.file_table.c.file_hash==fitfile.digest)
row = stmt.execute().fetchone()
file_id = row.file_id
for record in fitfile.get_messages(["length"]):
event_timestamp = None
start_time = None
swim_stroke = None
total_calories = None
total_elapsed_time = None
total_strokes = None
distance=None
data = []
fields=0
for record_data in record:
if record_data.name == "timestamp":
event_timestamp = record_data.value
fields +=1
if record_data.name =="start_time":
start_time = record_data.value
fields +=1
if record_data.name == "swim_stroke":
swim_stroke = record_data.value
fields +=1
if record_data.name == "total_calories":
total_calories = record_data.value
fields +=1
if record_data.name == "total_strokes":
total_strokes = record_data.value
fields +=1
if record_data.name == "total_elapsed_time":
total_elapsed_time = record_data.value
fields +=1
if fields == 6:
data.append({'f_id':file_id,'timestamp':event_timestamp,
'start_time':start_time,'swim_stroke':swim_stroke,
'total_calories':total_calories,'total_elapsed_time':total_elapsed_time,
'total_strokes':total_strokes})
self._alchemy_logbook.execute(self.swim_table.insert(),data)
data=[]
for record in fitfile.get_messages(["record"]):
event_timestamp = None
distance = None
for record_data in record:
if record_data.name == "timestamp":
event_timestamp = record_data.value
if record_data.name == "distance":
distance = record_data.value
if event_timestamp and distance:
data.append({'evtimestamp':event_timestamp,'distance':distance})
stmt = self.swim_table.update().\
where(self.swim_table.c.timestamp==bindparam('evtimestamp')).\
values(distance=bindparam('distance'))
self._alchemy_logbook.execute(stmt,data)
@timing
def get_data(self,filehash):
s = self.swim_table.join(self.file_table).\
select().where(self.file_table.c.file_hash==filehash)
strokes_data = TimeSeriesData(name="strokes" ,labels=[],data=[],unit=None,xlabel="duration(min)")
avg_strokes = TimeSeriesData(name="avg strokes",labels=[],data=[],unit="Strokes/lap",xlabel="duration(min)")
calories_data = TimeSeriesData(name="calories",labels=[],data=[],unit=None,xlabel="duration(min)")
speed_data = TimeSeriesData(name="speed" ,labels=[],data=[],unit="min/100m",xlabel="duration(min)")
rows = 0
total_calories = 0
event_duration = 0
strokes_data.data.append(0)
strokes_data.labels.append(0)
avg_strokes.data.append(0)
avg_strokes.labels.append(0)
calories_data.data.append(0)
calories_data.labels.append(0)
speed_data.data.append(0)
speed_data.labels.append(0)
stro = 0
last_ts = 0
row = None
for row in self._alchemy_logbook.execute(s):
if row.total_strokes and row.distance and row.total_calories and row.total_elapsed_time:
rows = rows + 1
if last_ts == 0:
last_ts = row.timestamp
ts = ((row.timestamp-last_ts).seconds/60)
strokes_data.data.append(row.total_strokes)
strokes_data.labels.append(ts)
# strokes_data.labels.append(row.distance)
stro = stro + row.total_strokes
avg_strokes.data.append((stro/row.distance)*50)
avg_strokes.labels.append(ts)
# avg_strokes.labels.append(row.distance)
calories_data.data.append(row.total_calories)
calories_data.labels.append(ts)
# calories_data.labels.append(row.distance)
speed_data.data.append(((row.total_elapsed_time/50)*100)/60) #FIXME
speed_data.labels.append(ts)
# speed_data.labels.append(row.distance)
total_calories = total_calories + row.total_calories
event_duration = event_duration + row.total_elapsed_time
if row:
lap_distance = int(row.distance / rows)
total_length = row.distance
total_time = row.start_time
self._data = [strokes_data,calories_data,speed_data,avg_strokes]
time_per_hundred = (100/lap_distance)*(event_duration/lap_distance)
formdata = []
formdata.append(TimeSeriesMetaData("Lap length",lap_distance,"m"))
formdata.append(TimeSeriesMetaData("Total Length",total_length,"m"))
formdata.append(TimeSeriesMetaData("Time per 100m","%.1f" %time_per_hundred,"s"))
formdata.append(TimeSeriesMetaData("average speed","%.1f" %(total_length/event_duration),"m/s"))
formdata.append(TimeSeriesMetaData("Total calories",total_calories,"kcal"))
formdata.append(TimeSeriesMetaData("Event duration","%.1f" %(event_duration/60),"min"))
return TimeSeries(data=self._data,metadata=formdata)
@property
def ui(self):
layout = QFormLayout()
labels=[]
fields=[]
if self._formdata:
for i in range(len(self._formdata)):
labels.append(QLabel(self._formdata[i].name+" ("+self._formdata[i].unit+")"))
fields.append(QLineEdit(str(self._formdata[i].value)))
layout.addRow(labels[-1],
fields[-1])
return UIData(ui=layout,labels=labels,fields=fields)
| {
"content_hash": "871da583e7ee588996c5c3ddd086829a",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 118,
"avg_line_length": 43.33796296296296,
"alnum_prop": 0.516718299326995,
"repo_name": "romses/FitView",
"id": "b91d249b7244981c0a637c3ad04b0ae685fc7620",
"size": "9361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logbook/Importer/swimming.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "499695"
}
],
"symlink_target": ""
} |
import unittest
from framework import TestCase
from earth.category import Category
class TestCategoryCase(TestCase):
def test_category(self):
# ADD
ctg_name = '社区'
ctg = Category.add(name=ctg_name)
assert ctg.id
assert ctg.name == ctg_name
assert ctg.pid == None
# UPDATE
ctg_movie = Category.add(name='电影社区', pid=ctg.id)
assert ctg_movie.pid == ctg.id
new_pid = 3
other_name = '其他社区'
ctg_movie.pid = 3
ctg_movie.name = other_name
ctg_movie.update()
ctg_get = Category.get_by_name(other_name)
assert ctg_get.id == ctg_movie.id
assert ctg_get.name == other_name
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "11b2e17b688bcbf3baa6e3f335100820",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 57,
"avg_line_length": 22.939393939393938,
"alnum_prop": 0.5772787318361955,
"repo_name": "tottily/terabithia",
"id": "f7a953bf9a4231184e7d491d942a955786ec9db0",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monster/test_category.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4364"
},
{
"name": "Python",
"bytes": "17015"
}
],
"symlink_target": ""
} |
"""
.. py:currentmodule:: __init__
:synopsis: Init for the package.
.. moduleauthor:: Hendrix Demers <[email protected]>
Init for the package.
"""
###############################################################################
# Copyright 2007 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules.
# Globals and constants variables. | {
"content_hash": "3a0c5234e47411646667dc4f8eee2ef2",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 30.264705882352942,
"alnum_prop": 0.6277939747327502,
"repo_name": "drix00/microanalysis_file_format",
"id": "39a1c38adcde014e7223139bc25599a60b273c7d",
"size": "1076",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pySpectrumFileFormat/OxfordInstruments/INCA/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2367"
},
{
"name": "PLSQL",
"bytes": "129"
},
{
"name": "Python",
"bytes": "235002"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
import base64
import json
import os
import os.path
import shutil
import sys
import tarfile
import tempfile
import pytest
import six
try:
from ssl import OP_NO_SSLv3, OP_NO_SSLv2, OP_NO_TLSv1
except ImportError:
OP_NO_SSLv2 = 0x1000000
OP_NO_SSLv3 = 0x2000000
OP_NO_TLSv1 = 0x4000000
from docker.client import Client
from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import DockerException, InvalidVersion
from docker.ssladapter import ssladapter
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
exclude_paths, convert_volume_binds, decode_json_header, tar,
split_command, create_ipam_config, create_ipam_pool, parse_devices,
)
from docker.utils.utils import create_endpoint_config
from docker.utils.ports import build_port_bindings, split_port
from .. import base
from ..helpers import make_tree
TEST_CERT_DIR = os.path.join(
os.path.dirname(__file__),
'testdata/certs',
)
class HostConfigTest(base.BaseTestCase):
def test_create_host_config_no_options(self):
config = create_host_config(version='1.19')
self.assertFalse('NetworkMode' in config)
def test_create_host_config_no_options_newer_api_version(self):
config = create_host_config(version='1.20')
self.assertEqual(config['NetworkMode'], 'default')
def test_create_host_config_invalid_cpu_cfs_types(self):
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_quota='0')
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_period='0')
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_quota=23.11)
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_period=1999.0)
def test_create_host_config_with_cpu_quota(self):
config = create_host_config(version='1.20', cpu_quota=1999)
self.assertEqual(config.get('CpuQuota'), 1999)
def test_create_host_config_with_cpu_period(self):
config = create_host_config(version='1.20', cpu_period=1999)
self.assertEqual(config.get('CpuPeriod'), 1999)
def test_create_host_config_with_shm_size(self):
config = create_host_config(version='1.22', shm_size=67108864)
self.assertEqual(config.get('ShmSize'), 67108864)
def test_create_host_config_with_shm_size_in_mb(self):
config = create_host_config(version='1.22', shm_size='64M')
self.assertEqual(config.get('ShmSize'), 67108864)
def test_create_host_config_with_oom_kill_disable(self):
config = create_host_config(version='1.20', oom_kill_disable=True)
self.assertEqual(config.get('OomKillDisable'), True)
self.assertRaises(
InvalidVersion, lambda: create_host_config(version='1.18.3',
oom_kill_disable=True))
def test_create_endpoint_config_with_aliases(self):
config = create_endpoint_config(version='1.22', aliases=['foo', 'bar'])
assert config == {'Aliases': ['foo', 'bar']}
with pytest.raises(InvalidVersion):
create_endpoint_config(version='1.21', aliases=['foo', 'bar'])
class UlimitTest(base.BaseTestCase):
def test_create_host_config_dict_ulimit(self):
ulimit_dct = {'name': 'nofile', 'soft': 8096}
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
def test_create_host_config_dict_ulimit_capitals(self):
ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
def test_create_host_config_obj_ulimit(self):
ulimit_dct = Ulimit(name='nofile', soft=8096)
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj, ulimit_dct)
def test_ulimit_invalid_type(self):
self.assertRaises(ValueError, lambda: Ulimit(name=None))
self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
class LogConfigTest(base.BaseTestCase):
def test_create_host_config_dict_logconfig(self):
dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=dct
)
self.assertIn('LogConfig', config)
self.assertTrue(isinstance(config['LogConfig'], LogConfig))
self.assertEqual(dct['type'], config['LogConfig'].type)
def test_create_host_config_obj_logconfig(self):
obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=obj
)
self.assertIn('LogConfig', config)
self.assertTrue(isinstance(config['LogConfig'], LogConfig))
self.assertEqual(obj, config['LogConfig'])
def test_logconfig_invalid_config_type(self):
with pytest.raises(ValueError):
LogConfig(type=LogConfig.types.JSON, config='helloworld')
class KwargsFromEnvTest(base.BaseTestCase):
def setUp(self):
self.os_environ = os.environ.copy()
def tearDown(self):
os.environ = self.os_environ
def test_kwargs_from_env_empty(self):
os.environ.update(DOCKER_HOST='',
DOCKER_CERT_PATH='')
os.environ.pop('DOCKER_TLS_VERIFY', None)
kwargs = kwargs_from_env()
self.assertEqual(None, kwargs.get('base_url'))
self.assertEqual(None, kwargs.get('tls'))
def test_kwargs_from_env_tls(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
self.assertTrue('key.pem' in kwargs['tls'].cert[1])
self.assertEqual(False, kwargs['tls'].assert_hostname)
self.assertTrue(kwargs['tls'].verify)
try:
client = Client(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].ca_cert, client.verify)
self.assertEqual(kwargs['tls'].cert, client.cert)
except TypeError as e:
self.fail(e)
def test_kwargs_from_env_tls_verify_false(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='')
kwargs = kwargs_from_env(assert_hostname=True)
self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
self.assertTrue('key.pem' in kwargs['tls'].cert[1])
self.assertEqual(True, kwargs['tls'].assert_hostname)
self.assertEqual(False, kwargs['tls'].verify)
try:
client = Client(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].cert, client.cert)
self.assertFalse(kwargs['tls'].verify)
except TypeError as e:
self.fail(e)
def test_kwargs_from_env_tls_verify_false_no_cert(self):
temp_dir = tempfile.mkdtemp()
cert_dir = os.path.join(temp_dir, '.docker')
shutil.copytree(TEST_CERT_DIR, cert_dir)
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
HOME=temp_dir,
DOCKER_TLS_VERIFY='')
os.environ.pop('DOCKER_CERT_PATH', None)
kwargs = kwargs_from_env(assert_hostname=True)
self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
self.assertTrue('key.pem' in kwargs['tls'].cert[1])
self.assertEqual(True, kwargs['tls'].assert_hostname)
self.assertEqual(False, kwargs['tls'].verify)
try:
client = Client(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].cert, client.cert)
self.assertFalse(kwargs['tls'].verify)
except TypeError as e:
self.fail(e)
def test_kwargs_from_env_no_cert_path(self):
try:
temp_dir = tempfile.mkdtemp()
cert_dir = os.path.join(temp_dir, '.docker')
shutil.copytree(TEST_CERT_DIR, cert_dir)
os.environ.update(HOME=temp_dir,
DOCKER_CERT_PATH='',
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env()
self.assertTrue(kwargs['tls'].verify)
self.assertIn(cert_dir, kwargs['tls'].ca_cert)
self.assertIn(cert_dir, kwargs['tls'].cert[0])
self.assertIn(cert_dir, kwargs['tls'].cert[1])
finally:
if temp_dir:
shutil.rmtree(temp_dir)
class ConverVolumeBindsTest(base.BaseTestCase):
def test_convert_volume_binds_empty(self):
self.assertEqual(convert_volume_binds({}), [])
self.assertEqual(convert_volume_binds([]), [])
def test_convert_volume_binds_list(self):
data = ['/a:/a:ro', '/b:/c:z']
self.assertEqual(convert_volume_binds(data), data)
def test_convert_volume_binds_complete(self):
data = {
'/mnt/vol1': {
'bind': '/data',
'mode': 'ro'
}
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:ro'])
def test_convert_volume_binds_compact(self):
data = {
'/mnt/vol1': '/data'
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
def test_convert_volume_binds_no_mode(self):
data = {
'/mnt/vol1': {
'bind': '/data'
}
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
def test_convert_volume_binds_unicode_bytes_input(self):
if six.PY2:
expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
data = {
'/mnt/지연': {
'bind': '/unicode/박',
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
else:
expected = ['/mnt/지연:/unicode/박:rw']
data = {
bytes('/mnt/지연', 'utf-8'): {
'bind': bytes('/unicode/박', 'utf-8'),
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
def test_convert_volume_binds_unicode_unicode_input(self):
if six.PY2:
expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
data = {
unicode('/mnt/지연', 'utf-8'): {
'bind': unicode('/unicode/박', 'utf-8'),
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
else:
expected = ['/mnt/지연:/unicode/박:rw']
data = {
'/mnt/지연': {
'bind': '/unicode/박',
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
class ParseEnvFileTest(base.BaseTestCase):
def generate_tempfile(self, file_content=None):
"""
Generates a temporary file for tests with the content
of 'file_content' and returns the filename.
Don't forget to unlink the file with os.unlink() after.
"""
local_tempfile = tempfile.NamedTemporaryFile(delete=False)
local_tempfile.write(file_content.encode('UTF-8'))
local_tempfile.close()
return local_tempfile.name
def test_parse_env_file_proper(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file,
{'USER': 'jdoe', 'PASS': 'secret'})
os.unlink(env_file)
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
get_parse_env_file = parse_env_file((env_file))
self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
os.unlink(env_file)
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
self.assertRaises(
DockerException, parse_env_file, env_file)
os.unlink(env_file)
class ParseHostTest(base.BaseTestCase):
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
'tcp://',
'udp://127.0.0.1',
'udp://127.0.0.1:2375',
]
valid_hosts = {
'0.0.0.1:5555': 'http://0.0.0.1:5555',
':6666': 'http://127.0.0.1:6666',
'tcp://:7777': 'http://127.0.0.1:7777',
'http://:7777': 'http://127.0.0.1:7777',
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
'unix://': 'http+unix://var/run/docker.sock',
'somehost.net:80/service/swarm': (
'http://somehost.net:80/service/swarm'
),
}
for host in invalid_hosts:
with pytest.raises(DockerException):
parse_host(host, None)
for host, expected in valid_hosts.items():
self.assertEqual(parse_host(host, None), expected, msg=host)
def test_parse_host_empty_value(self):
unix_socket = 'http+unix://var/run/docker.sock'
tcp_port = 'http://127.0.0.1:2375'
for val in [None, '']:
for platform in ['darwin', 'linux2', None]:
assert parse_host(val, platform) == unix_socket
assert parse_host(val, 'win32') == tcp_port
def test_parse_host_tls(self):
host_value = 'myhost.docker.net:3348'
expected_result = 'https://myhost.docker.net:3348'
self.assertEqual(parse_host(host_value, None, True), expected_result)
class ParseRepositoryTagTest(base.BaseTestCase):
sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
def test_index_image_no_tag(self):
self.assertEqual(
parse_repository_tag("root"), ("root", None)
)
def test_index_image_tag(self):
self.assertEqual(
parse_repository_tag("root:tag"), ("root", "tag")
)
def test_index_user_image_no_tag(self):
self.assertEqual(
parse_repository_tag("user/repo"), ("user/repo", None)
)
def test_index_user_image_tag(self):
self.assertEqual(
parse_repository_tag("user/repo:tag"), ("user/repo", "tag")
)
def test_private_reg_image_no_tag(self):
self.assertEqual(
parse_repository_tag("url:5000/repo"), ("url:5000/repo", None)
)
def test_private_reg_image_tag(self):
self.assertEqual(
parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag")
)
def test_index_image_sha(self):
self.assertEqual(
parse_repository_tag("root@sha256:{0}".format(self.sha)),
("root", "sha256:{0}".format(self.sha))
)
def test_private_reg_image_sha(self):
self.assertEqual(
parse_repository_tag("url:5000/repo@sha256:{0}".format(self.sha)),
("url:5000/repo", "sha256:{0}".format(self.sha))
)
class ParseDeviceTest(base.BaseTestCase):
def test_dict(self):
devices = parse_devices([{
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
}])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
})
def test_partial_string_definition(self):
devices = parse_devices(['/dev/sda1'])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/sda1',
'CgroupPermissions': 'rwm'
})
def test_permissionless_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1'])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rwm'
})
def test_full_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1:r'])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
})
def test_hybrid_list(self):
devices = parse_devices([
'/dev/sda1:/dev/mnt1:rw',
{
'PathOnHost': '/dev/sda2',
'PathInContainer': '/dev/mnt2',
'CgroupPermissions': 'r'
}
])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rw'
})
self.assertEqual(devices[1], {
'PathOnHost': '/dev/sda2',
'PathInContainer': '/dev/mnt2',
'CgroupPermissions': 'r'
})
class ParseBytesTest(base.BaseTestCase):
def test_parse_bytes_valid(self):
self.assertEqual(parse_bytes("512MB"), 536870912)
self.assertEqual(parse_bytes("512M"), 536870912)
self.assertEqual(parse_bytes("512m"), 536870912)
def test_parse_bytes_invalid(self):
self.assertRaises(DockerException, parse_bytes, "512MK")
self.assertRaises(DockerException, parse_bytes, "512L")
self.assertRaises(DockerException, parse_bytes, "127.0.0.1K")
def test_parse_bytes_float(self):
self.assertRaises(DockerException, parse_bytes, "1.5k")
def test_parse_bytes_maxint(self):
self.assertEqual(
parse_bytes("{0}k".format(sys.maxsize)), sys.maxsize * 1024
)
class UtilsTest(base.BaseTestCase):
longMessage = True
def test_convert_filters(self):
tests = [
({'dangling': True}, '{"dangling": ["true"]}'),
({'dangling': "true"}, '{"dangling": ["true"]}'),
({'exited': 0}, '{"exited": [0]}'),
({'exited': [0, 1]}, '{"exited": [0, 1]}'),
]
for filters, expected in tests:
self.assertEqual(convert_filters(filters), expected)
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
data = None
if six.PY3:
data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
else:
data = base64.urlsafe_b64encode(json.dumps(obj))
decoded_data = decode_json_header(data)
self.assertEqual(obj, decoded_data)
def test_create_ipam_config(self):
ipam_pool = create_ipam_pool(subnet='192.168.52.0/24',
gateway='192.168.52.254')
ipam_config = create_ipam_config(pool_configs=[ipam_pool])
self.assertEqual(ipam_config, {
'Driver': 'default',
'Config': [{
'Subnet': '192.168.52.0/24',
'Gateway': '192.168.52.254',
'AuxiliaryAddresses': None,
'IPRange': None,
}]
})
class SplitCommandTest(base.BaseTestCase):
def test_split_command_with_unicode(self):
if six.PY2:
self.assertEqual(
split_command(unicode('echo μμ', 'utf-8')),
['echo', 'μμ']
)
else:
self.assertEqual(split_command('echo μμ'), ['echo', 'μμ'])
@pytest.mark.skipif(six.PY3, reason="shlex doesn't support bytes in py3")
def test_split_command_with_bytes(self):
self.assertEqual(split_command('echo μμ'), ['echo', 'μμ'])
class PortsTest(base.BaseTestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, [("127.0.0.1", "1000")])
def test_split_port_with_protocol(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
self.assertEqual(internal_port, ["2000/udp"])
self.assertEqual(external_port, [("127.0.0.1", "1000")])
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, [("127.0.0.1", None)])
def test_split_port_range_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port,
[("127.0.0.1", None), ("127.0.0.1", None)])
def test_split_port_with_host_port(self):
internal_port, external_port = split_port("1000:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, ["1000"])
def test_split_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port, ["1000", "1001"])
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, None)
def test_split_port_range_no_host_port(self):
internal_port, external_port = split_port("2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port, None)
def test_split_port_range_with_protocol(self):
internal_port, external_port = split_port(
"127.0.0.1:1000-1001:2000-2001/udp")
self.assertEqual(internal_port, ["2000/udp", "2001/udp"])
self.assertEqual(external_port,
[("127.0.0.1", "1000"), ("127.0.0.1", "1001")])
def test_split_port_invalid(self):
self.assertRaises(ValueError,
lambda: split_port("0.0.0.0:1000:2000:tcp"))
def test_non_matching_length_port_ranges(self):
self.assertRaises(
ValueError,
lambda: split_port("0.0.0.0:1000-1010:2000-2002/tcp")
)
def test_port_and_range_invalid(self):
self.assertRaises(ValueError,
lambda: split_port("0.0.0.0:1000:2000-2002/tcp"))
def test_port_only_with_colon(self):
self.assertRaises(ValueError,
lambda: split_port(":80"))
def test_host_only_with_colon(self):
self.assertRaises(ValueError,
lambda: split_port("localhost:"))
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
def test_build_port_bindings_with_matching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
self.assertEqual(port_bindings["1000"],
[("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
def test_build_port_bindings_with_nonmatching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
def test_build_port_bindings_with_port_range(self):
port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["1001"], [("127.0.0.1", "1001")])
def test_build_port_bindings_with_matching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
self.assertEqual(port_bindings["1000"],
[("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
self.assertEqual(port_bindings["1001"],
[("127.0.0.1", "1001"), ("127.0.0.1", "2001")])
def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
class ExcludePathsTest(base.BaseTestCase):
dirs = [
'foo',
'foo/bar',
'bar',
]
files = [
'Dockerfile',
'Dockerfile.alt',
'.dockerignore',
'a.py',
'a.go',
'b.py',
'cde.py',
'foo/a.py',
'foo/b.py',
'foo/bar/a.py',
'bar/a.py',
'foo/Dockerfile3',
]
all_paths = set(dirs + files)
def setUp(self):
self.base = make_tree(self.dirs, self.files)
def tearDown(self):
shutil.rmtree(self.base)
def exclude(self, patterns, dockerfile=None):
return set(exclude_paths(self.base, patterns, dockerfile=dockerfile))
def test_no_excludes(self):
assert self.exclude(['']) == self.all_paths
def test_no_dupes(self):
paths = exclude_paths(self.base, ['!a.py'])
assert sorted(paths) == sorted(set(paths))
def test_wildcard_exclude(self):
assert self.exclude(['*']) == set(['Dockerfile', '.dockerignore'])
def test_exclude_dockerfile_dockerignore(self):
"""
Even if the .dockerignore file explicitly says to exclude
Dockerfile and/or .dockerignore, don't exclude them from
the actual tar file.
"""
assert self.exclude(['Dockerfile', '.dockerignore']) == self.all_paths
def test_exclude_custom_dockerfile(self):
"""
If we're using a custom Dockerfile, make sure that's not
excluded.
"""
assert self.exclude(['*'], dockerfile='Dockerfile.alt') == \
set(['Dockerfile.alt', '.dockerignore'])
assert self.exclude(['*'], dockerfile='foo/Dockerfile3') == \
set(['foo/Dockerfile3', '.dockerignore'])
def test_exclude_dockerfile_child(self):
includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
assert 'foo/Dockerfile3' in includes
assert 'foo/a.py' not in includes
def test_single_filename(self):
assert self.exclude(['a.py']) == self.all_paths - set(['a.py'])
# As odd as it sounds, a filename pattern with a trailing slash on the
# end *will* result in that file being excluded.
def test_single_filename_trailing_slash(self):
assert self.exclude(['a.py/']) == self.all_paths - set(['a.py'])
def test_wildcard_filename_start(self):
assert self.exclude(['*.py']) == self.all_paths - set([
'a.py', 'b.py', 'cde.py',
])
def test_wildcard_with_exception(self):
assert self.exclude(['*.py', '!b.py']) == self.all_paths - set([
'a.py', 'cde.py',
])
def test_wildcard_with_wildcard_exception(self):
assert self.exclude(['*.*', '!*.go']) == self.all_paths - set([
'a.py', 'b.py', 'cde.py', 'Dockerfile.alt',
])
def test_wildcard_filename_end(self):
assert self.exclude(['a.*']) == self.all_paths - set(['a.py', 'a.go'])
def test_question_mark(self):
assert self.exclude(['?.py']) == self.all_paths - set(['a.py', 'b.py'])
def test_single_subdir_single_filename(self):
assert self.exclude(['foo/a.py']) == self.all_paths - set(['foo/a.py'])
def test_single_subdir_wildcard_filename(self):
assert self.exclude(['foo/*.py']) == self.all_paths - set([
'foo/a.py', 'foo/b.py',
])
def test_wildcard_subdir_single_filename(self):
assert self.exclude(['*/a.py']) == self.all_paths - set([
'foo/a.py', 'bar/a.py',
])
def test_wildcard_subdir_wildcard_filename(self):
assert self.exclude(['*/*.py']) == self.all_paths - set([
'foo/a.py', 'foo/b.py', 'bar/a.py',
])
def test_directory(self):
assert self.exclude(['foo']) == self.all_paths - set([
'foo', 'foo/a.py', 'foo/b.py',
'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3'
])
def test_directory_with_trailing_slash(self):
assert self.exclude(['foo']) == self.all_paths - set([
'foo', 'foo/a.py', 'foo/b.py',
'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3'
])
def test_directory_with_single_exception(self):
assert self.exclude(['foo', '!foo/bar/a.py']) == self.all_paths - set([
'foo/a.py', 'foo/b.py', 'foo', 'foo/bar',
'foo/Dockerfile3'
])
def test_directory_with_subdir_exception(self):
assert self.exclude(['foo', '!foo/bar']) == self.all_paths - set([
'foo/a.py', 'foo/b.py', 'foo',
'foo/Dockerfile3'
])
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == self.all_paths - set([
'foo/bar', 'foo/bar/a.py', 'foo',
'foo/Dockerfile3'
])
def test_subdirectory(self):
assert self.exclude(['foo/bar']) == self.all_paths - set([
'foo/bar', 'foo/bar/a.py',
])
class TarTest(base.Cleanup, base.BaseTestCase):
def test_tar_with_excludes(self):
dirs = [
'foo',
'foo/bar',
'bar',
]
files = [
'Dockerfile',
'Dockerfile.alt',
'.dockerignore',
'a.py',
'a.go',
'b.py',
'cde.py',
'foo/a.py',
'foo/b.py',
'foo/bar/a.py',
'bar/a.py',
]
exclude = [
'*.py',
'!b.py',
'!a.go',
'foo',
'Dockerfile*',
'.dockerignore',
]
expected_names = set([
'Dockerfile',
'.dockerignore',
'a.go',
'b.py',
'bar',
'bar/a.py',
])
base = make_tree(dirs, files)
self.addCleanup(shutil.rmtree, base)
with tar(base, exclude=exclude) as archive:
tar_data = tarfile.open(fileobj=archive)
assert sorted(tar_data.getnames()) == sorted(expected_names)
def test_tar_with_empty_directory(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar_data.getnames()), ['bar', 'foo'])
def test_tar_with_file_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
with open(os.path.join(base, 'foo'), 'w') as f:
f.write("content")
os.makedirs(os.path.join(base, 'bar'))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
self.assertEqual(
sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
)
def test_tar_with_directory_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
self.assertEqual(
sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
)
class SSLAdapterTest(base.BaseTestCase):
def test_only_uses_tls(self):
ssl_context = ssladapter.urllib3.util.ssl_.create_urllib3_context()
assert ssl_context.options & OP_NO_SSLv3
assert ssl_context.options & OP_NO_SSLv2
assert not ssl_context.options & OP_NO_TLSv1
| {
"content_hash": "a8d04d9f4001c1a7e5685db3daeb5e7e",
"timestamp": "",
"source": "github",
"line_count": 956,
"max_line_length": 79,
"avg_line_length": 36.13284518828452,
"alnum_prop": 0.5668876472802015,
"repo_name": "mark-adams/docker-py",
"id": "87796d11752aa87ed4de8b7629080ccffce8a6c0",
"size": "34603",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2227"
},
{
"name": "Python",
"bytes": "353887"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from directionFinder_backend.correlator import Correlator
import scipy.signal
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
if __name__ == '__main__':
c = Correlator()
c.fetch_time_domain_snapshot(force=True)
time_domain_padding = 10
fs = 800e6
upsample_factor = 100
a_idx = 0
b_idx = 1
a = np.concatenate(
(np.zeros(time_domain_padding),
c.time_domain_signals[a_idx],
np.zeros(time_domain_padding)))
a_time = np.linspace(-(time_domain_padding/fs),
(len(a)-time_domain_padding)/fs,
len(a),
endpoint=False)
b = c.time_domain_signals[b_idx]
b_time = np.linspace(0,
len(b)/fs,
len(b),
endpoint=False)
correlation = np.correlate(a, b, mode='valid')
correlation_time = np.linspace(a_time[0] - b_time[0],
a_time[-1] - b_time[-1],
len(correlation),
endpoint=True)
correlation_upped, correlation_time_upped = scipy.signal.resample(
correlation,
len(correlation)*upsample_factor,
t = correlation_time)
# normalise
correlation_upped /= max(correlation)
correlation /= max(correlation)
correlation_time *= 1e9
correlation_time_upped *= 1e9
fig = plt.figure()
ax1 = fig.gca()
ax1.plot(correlation_time_upped, correlation_upped, color='b', linewidth=2, label="Upsampled")
ax1.plot(correlation_time, correlation, color='r', linewidth=2, marker='.', markersize=15, label="Raw")
xy_before = (correlation_time[np.argmax(correlation)-1], correlation[np.argmax(correlation)-1])
ax1.annotate('higher', xy=xy_before,
xytext=(0.2, 0.4), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.09, width=2),
horizontalalignment='right', verticalalignment='top',)
xy_after = (correlation_time[np.argmax(correlation)+1], correlation[np.argmax(correlation)+1])
ax1.annotate('lower', xy=xy_after,
xytext=(0.6, 0.3), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.09, width=2),
horizontalalignment='right', verticalalignment='top',)
axins = zoomed_inset_axes(ax1, 9, loc=1)
axins.plot(correlation_time_upped, correlation_upped, color='b', marker='.', linewidth=2, label="Upsampled")
axins.plot(correlation_time, correlation, color='r', linewidth=2, marker='.', markersize=15, label="Raw")
axins.set_xlim(-3.2, -2.2)
axins.set_ylim(0.97, 1.05)
axins.xaxis.set_ticks(np.arange(-3.4, -1.9, 0.4))
#plt.xticks(visible=False)
plt.yticks(visible=False)
mark_inset(ax1, axins, loc1=2, loc2=3, fc='none', ec='0.5')
ax1.set_title("Comparison of raw time domain cross correlation with upsampled version")
ax1.set_xlabel("Time shift (ns)")
ax1.set_ylabel("Cross correlation value (normalised)")
ax1.legend(loc=2)
plt.show()
| {
"content_hash": "c80b50b5e3ea01424cfb835f640b2c02",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 112,
"avg_line_length": 43.17333333333333,
"alnum_prop": 0.6065472513897467,
"repo_name": "jgowans/correlation_plotter",
"id": "5a0011f2e3058d2f6f22a732704f80aa208ca483",
"size": "3261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "time_domain_cross_upsampled.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83593"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function , unicode_literals, absolute_import
import os, sys, subprocess
crow_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
eigen_cflags = ""
try:
has_pkg_eigen = subprocess.call(["pkg-config","--exists","eigen3"]) == 0
except:
has_pkg_eigen = False
if has_pkg_eigen:
eigen_cflags = subprocess.check_output(["pkg-config","eigen3","--cflags"])
libmesh_eigen = os.path.abspath(os.path.join(crow_dir,os.pardir,"moose","libmesh","contrib","eigen","eigen"))
if os.path.exists(libmesh_eigen):
eigen_cflags = "-I"+libmesh_eigen
if os.path.exists(os.path.join(crow_dir,"contrib","include","Eigen")):
eigen_cflags = ""
print(eigen_cflags)
| {
"content_hash": "5fbbb3420fc6a45b29e42a08857a4893",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 109,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.6949152542372882,
"repo_name": "joshua-cogliati-inl/raven",
"id": "86779741a78857a3581a50e6664461c19c447f29",
"size": "726",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "scripts/find_eigen.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556080"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "6952659"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8574"
},
{
"name": "Shell",
"bytes": "124279"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
} |
from octopus.core import app
from service import dao
from octopus.lib import dataobj
class ContentLog(dataobj.DataObj, dao.ContentLogDAO):
'''
{
"id" : "<unique persistent account id>",
"created_date" : "<date account created>",
"last_updated" : "<date account last modified>",
"user" : "<user that requested the content>",
"notification": "<the notification the requested content is from>",
"filename": ">the requested filename if any",
"delivered_from" : "<one of store, proxy, notfound>",
}
'''
@property
def user(self):
return self._get_single("user", coerce=self._utf8_unicode())
@user.setter
def user(self, user):
self._set_single("user", user, coerce=self._utf8_unicode())
@property
def notification(self):
return self._get_single("notification", coerce=self._utf8_unicode())
@user.setter
def notification(self, notification):
self._set_single("notification", notification, coerce=self._utf8_unicode())
@property
def filename(self):
return self._get_single("filename", coerce=self._utf8_unicode())
@user.setter
def filename(self, filename):
self._set_single("filename", filename, coerce=self._utf8_unicode())
@property
def delivered_from(self):
return self._get_single("delivered_from", coerce=self._utf8_unicode())
@user.setter
def delivered_from(self, delivered_from):
self._set_single("delivered_from", delivered_from, coerce=self._utf8_unicode())
| {
"content_hash": "b778b42f5313b9f441e4b361aadb980e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 87,
"avg_line_length": 32,
"alnum_prop": 0.6403061224489796,
"repo_name": "JiscPER/jper",
"id": "a659421695ed3209acbfbc67d872174256c7258e",
"size": "1569",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "service/models/contentlog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8416"
},
{
"name": "HTML",
"bytes": "85910"
},
{
"name": "JavaScript",
"bytes": "31030"
},
{
"name": "Python",
"bytes": "489883"
},
{
"name": "Shell",
"bytes": "6559"
}
],
"symlink_target": ""
} |
from random import randrange, shuffle
get_alpha = lambda: chr(randrange(97, 123))
get_word = lambda: ''.join([get_alpha() for i in range(randrange(4, 10))])
get_article = lambda: ' '.join([' '.join([get_word()] * randrange(10))
for i in range(randrange(2000, 3000))])
def get_words():
words = get_article().split()
shuffle(words)
return words
| {
"content_hash": "89e77c2ec21206bd1eaa18892f9cccf0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 21.58823529411765,
"alnum_prop": 0.6403269754768393,
"repo_name": "phyng/c-lang",
"id": "1ec14e3658eeb4de6097f9c84f0716c99702833c",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter6/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27753"
},
{
"name": "Python",
"bytes": "4978"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
} |
import six
from six.moves import range
from webob import exc
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova import utils
CHUNKS = 4
CHUNK_LENGTH = 255
MAX_SIZE = CHUNKS * CHUNK_LENGTH
def extract_password(instance):
result = ''
sys_meta = utils.instance_sys_meta(instance)
for key in sorted(sys_meta.keys()):
if key.startswith('password_'):
result += sys_meta[key]
return result or None
def convert_password(context, password):
"""Stores password as system_metadata items.
Password is stored with the keys 'password_0' -> 'password_3'.
"""
password = password or ''
if six.PY3 and isinstance(password, bytes):
password = password.decode('utf-8')
meta = {}
for i in range(CHUNKS):
meta['password_%d' % i] = password[:CHUNK_LENGTH]
password = password[CHUNK_LENGTH:]
return meta
def handle_password(req, meta_data):
ctxt = context.get_admin_context()
if req.method == 'GET':
return meta_data.password
elif req.method == 'POST':
# NOTE(vish): The conflict will only happen once the metadata cache
# updates, but it isn't a huge issue if it can be set for
# a short window.
if meta_data.password:
raise exc.HTTPConflict()
if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE):
msg = _("Request is too large.")
raise exc.HTTPBadRequest(explanation=msg)
im = objects.InstanceMapping.get_by_instance_uuid(ctxt, meta_data.uuid)
with context.target_cell(ctxt, im.cell_mapping) as cctxt:
try:
instance = objects.Instance.get_by_uuid(cctxt, meta_data.uuid)
except exception.InstanceNotFound as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
instance.system_metadata.update(convert_password(ctxt, req.body))
instance.save()
else:
msg = _("GET and POST only are supported.")
raise exc.HTTPBadRequest(explanation=msg)
| {
"content_hash": "38d7e35ccf645d5670bd3b6e19e23acd",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 32.15151515151515,
"alnum_prop": 0.6357210179076344,
"repo_name": "gooddata/openstack-nova",
"id": "c906de78908c479288bf20b19477fca0c752ab61",
"size": "2750",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/api/metadata/password.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "43584"
},
{
"name": "Python",
"bytes": "23012372"
},
{
"name": "Shell",
"bytes": "32567"
},
{
"name": "Smarty",
"bytes": "429290"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Module API
from .enum import check_enum
from .maximum import check_maximum
from .maxLength import check_maxLength
from .minimum import check_minimum
from .minLength import check_minLength
from .pattern import check_pattern
from .required import check_required
from .unique import check_unique
| {
"content_hash": "766275695f40b516c88bf07285726a4a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 39,
"avg_line_length": 27.9375,
"alnum_prop": 0.7986577181208053,
"repo_name": "okfn/json-table-schema-py",
"id": "5854d663b216421c30284b30a840097b8fc105bc",
"size": "471",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tableschema/constraints/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "134974"
}
],
"symlink_target": ""
} |
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from pybullet_envs.bullet.kukaCamGymEnv import KukaCamGymEnv
import time
def main():
environment = KukaCamGymEnv(renders=True, isDiscrete=False)
motorsIds = []
#motorsIds.append(environment._p.addUserDebugParameter("posX",0.4,0.75,0.537))
#motorsIds.append(environment._p.addUserDebugParameter("posY",-.22,.3,0.0))
#motorsIds.append(environment._p.addUserDebugParameter("posZ",0.1,1,0.2))
#motorsIds.append(environment._p.addUserDebugParameter("yaw",-3.14,3.14,0))
#motorsIds.append(environment._p.addUserDebugParameter("fingerAngle",0,0.3,.3))
dv = 1
motorsIds.append(environment._p.addUserDebugParameter("posX", -dv, dv, 0))
motorsIds.append(environment._p.addUserDebugParameter("posY", -dv, dv, 0))
motorsIds.append(environment._p.addUserDebugParameter("posZ", -dv, dv, 0))
motorsIds.append(environment._p.addUserDebugParameter("yaw", -dv, dv, 0))
motorsIds.append(environment._p.addUserDebugParameter("fingerAngle", 0, 0.3, .3))
done = False
while (not done):
action = []
for motorId in motorsIds:
action.append(environment._p.readUserDebugParameter(motorId))
state, reward, done, info = environment.step(action)
obs = environment.getExtendedObservation()
if __name__ == "__main__":
main()
| {
"content_hash": "67b2e1b46023003c99a2fa2b3a32e057",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 86,
"avg_line_length": 36.375,
"alnum_prop": 0.7312714776632302,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "6cf9909cb2953d0ab85fda760cf3ab617398ef8b",
"size": "1552",
"binary": false,
"copies": "3",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/examples/kukaCamGymEnvTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
} |
"""
39. Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.core import mail
from django.test import Client, TestCase, RequestFactory
from django.test.utils import override_settings
from .views import get_view
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ClientTest(TestCase):
fixtures = ['testdata.json']
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/test_client/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/test_client/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/test_client/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/test_client/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/test_client/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?><library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>"""
response = self.client.post("/test_client/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/test_client/redirect_view/')
# Check that the response was a 302 (redirect) and that
# assertRedirect() understands to put an implicit http://testserver/ in
# front of non-absolute URLs.
self.assertRedirects(response, '/test_client/get_view/')
host = 'django.testserver'
client_providing_host = Client(HTTP_HOST=host)
response = client_providing_host.get('/test_client/redirect_view/')
# Check that the response was a 302 (redirect) with absolute URI
self.assertRedirects(response, '/test_client/get_view/', host=host)
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, 'http://testserver/test_client/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/test_client/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=301)
client_providing_host = Client(HTTP_HOST='django.testserver')
response = client_providing_host.get('/test_client/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect) with absolute URI
self.assertRedirects(response, 'http://django.testserver/test_client/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/test_client/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/test_client/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/test_client/double_redirect_view/', follow=True)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/test_client/http_redirect_view/',follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/test_client/https_redirect_view/',follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/test_client/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b','c','e')
}
response = self.client.get('/test_client/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid e-mail address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid e-mail address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/test_client/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/test_client/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/test_client/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_view_custom_redirect/')
self.assertRedirects(response, 'http://testserver/accounts/login/?redirect_to=/test_client/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.assertFalse(login)
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_view/')
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/test_client/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises a exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/test_client/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/test_client/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/test_client/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
from django.contrib.sessions.models import Session
response = self.client.post('/test_client/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/test_client/broken_view/")
#Try the same assertion, a different way
try:
self.client.get('/test_client/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/test_client/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, '[email protected]')
self.assertEqual(mail.outbox[0].to[0], '[email protected]')
self.assertEqual(mail.outbox[0].to[1], '[email protected]')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/test_client/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, '[email protected]')
self.assertEqual(mail.outbox[0].to[0], '[email protected]')
self.assertEqual(mail.outbox[0].to[1], '[email protected]')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, '[email protected]')
self.assertEqual(mail.outbox[1].to[0], '[email protected]')
self.assertEqual(mail.outbox[1].to[1], '[email protected]')
class CSRFEnabledClientTests(TestCase):
def setUp(self):
# Enable the CSRF middleware for this test
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
csrf_middleware_class = 'django.middleware.csrf.CsrfViewMiddleware'
if csrf_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (csrf_middleware_class,)
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/test_client/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/test_client/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(TestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
class RequestFactoryTest(TestCase):
def test_request_factory(self):
factory = RequestFactory()
request = factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
| {
"content_hash": "6216dfef691e7af10aa82778de6f9e7f",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 148,
"avg_line_length": 44.44181459566075,
"alnum_prop": 0.6615924019172732,
"repo_name": "vsajip/django",
"id": "1d9c999f21b0c0bc6279a5cca1a712c16f7f25bb",
"size": "22548",
"binary": false,
"copies": "1",
"ref": "refs/heads/django3",
"path": "tests/modeltests/test_client/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "89078"
},
{
"name": "Python",
"bytes": "8200429"
},
{
"name": "Shell",
"bytes": "4241"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
| {
"content_hash": "35928ef686b573b72d4d4229717e873e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 45.86666666666667,
"alnum_prop": 0.7834302325581395,
"repo_name": "Swappsco/koalixerp",
"id": "dd179b1aa72659606dd4d3a6f97c8b28731107cb",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "58508"
},
{
"name": "HTML",
"bytes": "208636"
},
{
"name": "JavaScript",
"bytes": "200359"
},
{
"name": "Python",
"bytes": "289012"
},
{
"name": "Shell",
"bytes": "159"
}
],
"symlink_target": ""
} |
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from sklearn.externals import six
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because
they occurred in either too many
(`max_df`) or in too few (`min_df`) documents.
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary = False
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
# some Python/Scipy versions won't accept an array.array:
if j_indices:
j_indices = np.frombuffer(j_indices, dtype=np.intc)
else:
j_indices = np.array([], dtype=np.int32)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log1p instead of log makes sure terms with zero idf don't get
# suppressed entirely
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
if not hasattr(self, "_idf_diag"):
raise ValueError("idf vector not fitted")
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a term frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, False by default.
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| {
"content_hash": "6e55c7317190b2e2e0444240f304b4c3",
"timestamp": "",
"source": "github",
"line_count": 1254,
"max_line_length": 79,
"avg_line_length": 37.91866028708134,
"alnum_prop": 0.6154574132492113,
"repo_name": "RPGOne/Skynet",
"id": "d5590f06040672f581c38aec49ba5e6220c0b20a",
"size": "47905",
"binary": false,
"copies": "2",
"ref": "refs/heads/Miho",
"path": "scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/feature_extraction/text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "11425802"
},
{
"name": "Batchfile",
"bytes": "123467"
},
{
"name": "C",
"bytes": "34703955"
},
{
"name": "C#",
"bytes": "55955"
},
{
"name": "C++",
"bytes": "84647314"
},
{
"name": "CMake",
"bytes": "220849"
},
{
"name": "CSS",
"bytes": "39257"
},
{
"name": "Cuda",
"bytes": "1344541"
},
{
"name": "DIGITAL Command Language",
"bytes": "349320"
},
{
"name": "DTrace",
"bytes": "37428"
},
{
"name": "Emacs Lisp",
"bytes": "19654"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Fortran",
"bytes": "16914"
},
{
"name": "HTML",
"bytes": "929759"
},
{
"name": "Java",
"bytes": "112658"
},
{
"name": "JavaScript",
"bytes": "32806873"
},
{
"name": "Jupyter Notebook",
"bytes": "1616334"
},
{
"name": "Lua",
"bytes": "22549"
},
{
"name": "M4",
"bytes": "64967"
},
{
"name": "Makefile",
"bytes": "1046428"
},
{
"name": "Matlab",
"bytes": "888"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "2860"
},
{
"name": "Objective-C",
"bytes": "131433"
},
{
"name": "PHP",
"bytes": "750783"
},
{
"name": "Pascal",
"bytes": "75208"
},
{
"name": "Perl",
"bytes": "626627"
},
{
"name": "Perl 6",
"bytes": "2495926"
},
{
"name": "PowerShell",
"bytes": "38374"
},
{
"name": "Prolog",
"bytes": "300018"
},
{
"name": "Python",
"bytes": "26363074"
},
{
"name": "R",
"bytes": "236175"
},
{
"name": "Rebol",
"bytes": "217"
},
{
"name": "Roff",
"bytes": "328366"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scala",
"bytes": "248902"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "360815"
},
{
"name": "TeX",
"bytes": "105346"
},
{
"name": "Vim script",
"bytes": "6101"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "eC",
"bytes": "5158"
}
],
"symlink_target": ""
} |
from .base import *
from .fields import *
from .serializer_fields import *
from .time_fields import *
| {
"content_hash": "63ce6452b841a16e6b9d923772da3169",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 25.5,
"alnum_prop": 0.7450980392156863,
"repo_name": "onyg/aserializer",
"id": "0fbaa4ed4ee79a7fd35f9b7cad192a934236f9ee",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aserializer/fields/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "217282"
}
],
"symlink_target": ""
} |
'''
This module defines :class:`Segment`, a container for data sharing a common
time basis.
:class:`Segment` derives from :class:`Container`,
from :module:`neo.core.container`.
'''
from datetime import datetime
import numpy as np
from copy import deepcopy
from neo.core.container import Container
from neo.core.spiketrainlist import SpikeTrainList
class Segment(Container):
'''
A container for data sharing a common time basis.
A :class:`Segment` is a heterogeneous container for discrete or continuous
data sharing a common clock (time basis) but not necessary the same
sampling rate, start or end time.
*Usage*::
>>> from neo.core import Segment, SpikeTrain, AnalogSignal
>>> from quantities import Hz, s
>>>
>>> seg = Segment(index=5)
>>>
>>> train0 = SpikeTrain(times=[.01, 3.3, 9.3], units='sec', t_stop=10)
>>> seg.spiketrains.append(train0)
>>>
>>> train1 = SpikeTrain(times=[100.01, 103.3, 109.3], units='sec',
... t_stop=110)
>>> seg.spiketrains.append(train1)
>>>
>>> sig0 = AnalogSignal(signal=[.01, 3.3, 9.3], units='uV',
... sampling_rate=1*Hz)
>>> seg.analogsignals.append(sig0)
>>>
>>> sig1 = AnalogSignal(signal=[100.01, 103.3, 109.3], units='nA',
... sampling_period=.1*s)
>>> seg.analogsignals.append(sig1)
*Required attributes/properties*:
None
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:file_datetime: (datetime) The creation date and time of the original
data file.
:rec_datetime: (datetime) The date and time of the original recording
:index: (int) You can use this to define a temporal ordering of
your Segment. For instance you could use this for trial numbers.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:all_data: (list) A list of all child objects in the :class:`Segment`.
*Container of*:
:class:`Epoch`
:class:`Event`
:class:`AnalogSignal`
:class:`IrregularlySampledSignal`
:class:`SpikeTrain`
'''
_data_child_objects = ('AnalogSignal',
'Epoch', 'Event',
'IrregularlySampledSignal', 'SpikeTrain', 'ImageSequence')
_parent_objects = ('Block',)
_recommended_attrs = ((('file_datetime', datetime),
('rec_datetime', datetime),
('index', int)) +
Container._recommended_attrs)
_repr_pretty_containers = ('analogsignals',)
def __init__(self, name=None, description=None, file_origin=None,
file_datetime=None, rec_datetime=None, index=None,
**annotations):
'''
Initialize a new :class:`Segment` instance.
'''
super().__init__(name=name, description=description,
file_origin=file_origin, **annotations)
self.spiketrains = SpikeTrainList(segment=self)
self.file_datetime = file_datetime
self.rec_datetime = rec_datetime
self.index = index
# t_start attribute is handled as a property so type checking can be done
@property
def t_start(self):
'''
Time when first signal begins.
'''
t_starts = [sig.t_start for sig in self.analogsignals +
self.spiketrains + self.irregularlysampledsignals]
for e in self.epochs + self.events:
if hasattr(e, 't_start'): # in case of proxy objects
t_starts += [e.t_start]
elif len(e) > 0:
t_starts += [e.times[0]]
# t_start is not defined if no children are present
if len(t_starts) == 0:
return None
t_start = min(t_starts)
return t_start
# t_stop attribute is handled as a property so type checking can be done
@property
def t_stop(self):
'''
Time when last signal ends.
'''
t_stops = [sig.t_stop for sig in self.analogsignals +
self.spiketrains + self.irregularlysampledsignals]
for e in self.epochs + self.events:
if hasattr(e, 't_stop'): # in case of proxy objects
t_stops += [e.t_stop]
elif len(e) > 0:
t_stops += [e.times[-1]]
# t_stop is not defined if no children are present
if len(t_stops) == 0:
return None
t_stop = max(t_stops)
return t_stop
def time_slice(self, t_start=None, t_stop=None, reset_time=False, **kwargs):
"""
Creates a time slice of a Segment containing slices of all child
objects.
Parameters
----------
t_start: Quantity
Starting time of the sliced time window.
t_stop: Quantity
Stop time of the sliced time window.
reset_time: bool, optional, default: False
If True the time stamps of all sliced objects are set to fall
in the range from t_start to t_stop.
If False, original time stamps are retained.
**kwargs
Additional keyword arguments used for initialization of the sliced
Segment object.
Returns
-------
subseg: Segment
Temporal slice of the original Segment from t_start to t_stop.
"""
subseg = Segment(**kwargs)
for attr in ['file_datetime', 'rec_datetime', 'index',
'name', 'description', 'file_origin']:
setattr(subseg, attr, getattr(self, attr))
subseg.annotations = deepcopy(self.annotations)
if t_start is None:
t_start = self.t_start
if t_stop is None:
t_stop = self.t_stop
t_shift = - t_start
# cut analogsignals and analogsignalarrays
for ana_id in range(len(self.analogsignals)):
if hasattr(self.analogsignals[ana_id], '_rawio'):
ana_time_slice = self.analogsignals[ana_id].load(time_slice=(t_start, t_stop))
else:
ana_time_slice = self.analogsignals[ana_id].time_slice(t_start, t_stop)
if reset_time:
ana_time_slice = ana_time_slice.time_shift(t_shift)
subseg.analogsignals.append(ana_time_slice)
# cut irregularly sampled signals
for irr_id in range(len(self.irregularlysampledsignals)):
if hasattr(self.irregularlysampledsignals[irr_id], '_rawio'):
ana_time_slice = self.irregularlysampledsignals[irr_id].load(
time_slice=(t_start, t_stop))
else:
ana_time_slice = self.irregularlysampledsignals[irr_id].time_slice(t_start, t_stop)
if reset_time:
ana_time_slice = ana_time_slice.time_shift(t_shift)
subseg.irregularlysampledsignals.append(ana_time_slice)
# cut spiketrains
for st_id in range(len(self.spiketrains)):
if hasattr(self.spiketrains[st_id], '_rawio'):
st_time_slice = self.spiketrains[st_id].load(time_slice=(t_start, t_stop))
else:
st_time_slice = self.spiketrains[st_id].time_slice(t_start, t_stop)
if reset_time:
st_time_slice = st_time_slice.time_shift(t_shift)
subseg.spiketrains.append(st_time_slice)
# cut events
for ev_id in range(len(self.events)):
if hasattr(self.events[ev_id], '_rawio'):
ev_time_slice = self.events[ev_id].load(time_slice=(t_start, t_stop))
else:
ev_time_slice = self.events[ev_id].time_slice(t_start, t_stop)
if reset_time:
ev_time_slice = ev_time_slice.time_shift(t_shift)
# appending only non-empty events
if len(ev_time_slice):
subseg.events.append(ev_time_slice)
# cut epochs
for ep_id in range(len(self.epochs)):
if hasattr(self.epochs[ep_id], '_rawio'):
ep_time_slice = self.epochs[ep_id].load(time_slice=(t_start, t_stop))
else:
ep_time_slice = self.epochs[ep_id].time_slice(t_start, t_stop)
if reset_time:
ep_time_slice = ep_time_slice.time_shift(t_shift)
# appending only non-empty epochs
if len(ep_time_slice):
subseg.epochs.append(ep_time_slice)
subseg.create_relationship()
return subseg
| {
"content_hash": "ac66ab4f3c55ddad75f67d3b6a5336ad",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 99,
"avg_line_length": 37.24267782426778,
"alnum_prop": 0.5697112683968093,
"repo_name": "apdavison/python-neo",
"id": "db0004496092370b94ffc305f85bb85f0bf89929",
"size": "8901",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neo/core/segment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2476868"
}
],
"symlink_target": ""
} |
""" GENERATED FILE. ALL CHANGES WILL BE OVERWRITTEN! """
from django import forms
from django.contrib.formtools.wizard.views import SessionWizardView
from cuescience_shop.models import {% for step in wizard.steps %}{%if step.form.model%}{%if not loop.first %}, {%endif%}{{step.form.model}}{%endif%}{%endfor%}
{% for step in wizard.steps %}
class Step{{ step.number }}Form(forms.{%if step.form.model%}Model{%endif%}Form):
{% for field in step.form.extra_fields %}{{field.name}} = forms.{{field.field_type}}(required=False, label="{{field.verbose_name}}", {% for k,v in field.kwargs.items() %}{{k}}={{v}}{% endfor %})
{% endfor %}
{%if step.form.model%}class Meta:
model = {{step.form.model}}
{% if step.form.fields %}fields = ({% for field in step.form.fields %}{{field}},{% endfor %}) {%endif%}{%endif%}
def __init__(self, *args, **kwargs):
super(Step{{ step.number }}Form, self).__init__(*args, **kwargs)
{%if step.form.heading%}self.heading = "{{step.form.heading}}"{%endif%}
{%if step.form.grouped_fields%}self.grouped_fields = [
{%for group in step.form.grouped_fields%}({%for field in group%}self[{{field}}],{%endfor%}),
{%endfor%}
]{%endif%}
{% if step.condition %}
def condition_step_{{step.number}}(wizard):
cleaned_data = wizard.get_cleaned_data_for_step("{{step.condition.step}}") or {"{{step.condition.name}}": 'none'}
return cleaned_data["{{step.condition.name}}"] == {{step.condition.value}}
{%endif%}
{%endfor%}
class {{wizard.name}}WizardBase(SessionWizardView):
form_list = [{% for step in wizard.steps %}("{{step.number}}",Step{{step.number}}Form),{%endfor%}]
{%if wizard.conditions%}condition_dict = { {% for step in wizard.steps %}{% if step.condition %}"{{step.number}}": condition_step_{{step.number}},{%endif%}{% endfor %} }{% endif %} | {
"content_hash": "4f8187cc8e98ce3afe4065688ec5e380",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 198,
"avg_line_length": 62.266666666666666,
"alnum_prop": 0.6274089935760171,
"repo_name": "cuescience/cuescience-shop",
"id": "4e179d3bd5d24521aa671468a7243b106fccdd1e",
"size": "1868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/specs/templates/wizard_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6720"
},
{
"name": "Python",
"bytes": "46728"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('semesterpage', '0027_options_last_user_modification'),
]
operations = [
migrations.AlterField(
model_name='semester',
name='published',
field=models.BooleanField(default=False, help_text='Semesteret dukker ikke opp i navigasjonsbaren før det er publisert, men det er fortsatt mulig å besøke semesteret manuelt (URL: wikilinks.no/studieprogram/hovedprofil/semesternummer) for å teste resultatet før du publiserer.', verbose_name='publisert'),
),
migrations.AlterField(
model_name='studyprogram',
name='has_archive',
field=models.BooleanField(default=False, help_text='Huk av hvis studieprogrammet har filer i arkivet på wikilinks.no/arkiv.', verbose_name='har arkiv'),
),
migrations.AlterField(
model_name='studyprogram',
name='published',
field=models.BooleanField(default=False, help_text='Studieprogrammet dukker ikke opp i studieprogramlisten i navigasjonsbaren før det er publisert, men det er fortsatt mulig å besøke studieprogrammet manuelt (URL: wikilinks.no/visningsnavn) for å teste resultatet før du publiserer.', verbose_name='publisert'),
),
]
| {
"content_hash": "2cec7fa10a4a96b1dcb7349c59b804c5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 323,
"avg_line_length": 49.285714285714285,
"alnum_prop": 0.6891304347826087,
"repo_name": "afriestad/WikiLinks",
"id": "2da07cbe1bf7c6480ca98f557013ee5fda183801",
"size": "1464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "semesterpage/migrations/0028_auto_20170906_2226.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8075"
},
{
"name": "HTML",
"bytes": "16335"
},
{
"name": "JavaScript",
"bytes": "5439"
},
{
"name": "Python",
"bytes": "197303"
}
],
"symlink_target": ""
} |
from storage import *
| {
"content_hash": "a230ab26b1fb333866db58bf64f1ee9e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7727272727272727,
"repo_name": "agniveshadhikari/edx-agea",
"id": "02d16cd1fc18fa425edb62be922fe0c6852fd449",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agea/storage/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1063"
},
{
"name": "HTML",
"bytes": "11478"
},
{
"name": "JavaScript",
"bytes": "19470"
},
{
"name": "Python",
"bytes": "85247"
}
],
"symlink_target": ""
} |
"""Add ``root_dag_id`` to ``DAG``
Revision ID: b3b105409875
Revises: d38e04c12aa2
Create Date: 2019-09-28 23:20:01.744775
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
from airflow.migrations.db_types import StringID
# revision identifiers, used by Alembic.
revision = 'b3b105409875'
down_revision = 'd38e04c12aa2'
branch_labels = None
depends_on = None
airflow_version = '1.10.7'
def upgrade():
"""Apply Add ``root_dag_id`` to ``DAG``"""
op.add_column('dag', sa.Column('root_dag_id', StringID(), nullable=True))
op.create_index('idx_root_dag_id', 'dag', ['root_dag_id'], unique=False)
def downgrade():
"""Unapply Add ``root_dag_id`` to ``DAG``"""
op.drop_index('idx_root_dag_id', table_name='dag')
op.drop_column('dag', 'root_dag_id')
| {
"content_hash": "e5a4512de8917915ffe5f50420a795cf",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 25.28125,
"alnum_prop": 0.6736711990111248,
"repo_name": "cfei18/incubator-airflow",
"id": "571338cf85f2cb316c6c049a50bf5c3a9edf2353",
"size": "1596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/migrations/versions/0045_1_10_7_add_root_dag_id_to_dag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
"""Support for Home Assistant iOS app sensors."""
from homeassistant.components import ios
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
DEPENDENCIES = ['ios']
SENSOR_TYPES = {
'level': ['Battery Level', '%'],
'state': ['Battery State', None]
}
DEFAULT_ICON_LEVEL = 'mdi:battery'
DEFAULT_ICON_STATE = 'mdi:power-plug'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the iOS sensor."""
# Leave here for if someone accidentally adds platform: ios to config
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up iOS from a config entry."""
dev = list()
for device_name, device in ios.devices(hass).items():
for sensor_type in ('level', 'state'):
dev.append(IOSSensor(sensor_type, device_name, device))
async_add_entities(dev, True)
class IOSSensor(Entity):
"""Representation of an iOS sensor."""
def __init__(self, sensor_type, device_name, device):
"""Initialize the sensor."""
self._device_name = device_name
self._name = "{} {}".format(device_name, SENSOR_TYPES[sensor_type][0])
self._device = device
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def device_info(self):
"""Return information about the device."""
return {
'identifiers': {
(ios.DOMAIN,
self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_PERMANENT_ID]),
},
'name': self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_NAME],
'manufacturer': 'Apple',
'model': self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_TYPE],
'sw_version':
self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_SYSTEM_VERSION],
}
@property
def name(self):
"""Return the name of the iOS sensor."""
device_name = self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_NAME]
return "{} {}".format(device_name, SENSOR_TYPES[self.type][0])
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unique_id(self):
"""Return the unique ID of this sensor."""
device_id = self._device[ios.ATTR_DEVICE_ID]
return "{}_{}".format(self.type, device_id)
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the device state attributes."""
device = self._device[ios.ATTR_DEVICE]
device_battery = self._device[ios.ATTR_BATTERY]
return {
"Battery State": device_battery[ios.ATTR_BATTERY_STATE],
"Battery Level": device_battery[ios.ATTR_BATTERY_LEVEL],
"Device Type": device[ios.ATTR_DEVICE_TYPE],
"Device Name": device[ios.ATTR_DEVICE_NAME],
"Device Version": device[ios.ATTR_DEVICE_SYSTEM_VERSION],
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
device_battery = self._device[ios.ATTR_BATTERY]
battery_state = device_battery[ios.ATTR_BATTERY_STATE]
battery_level = device_battery[ios.ATTR_BATTERY_LEVEL]
charging = True
icon_state = DEFAULT_ICON_STATE
if battery_state in (ios.ATTR_BATTERY_STATE_FULL,
ios.ATTR_BATTERY_STATE_UNPLUGGED):
charging = False
icon_state = "{}-off".format(DEFAULT_ICON_STATE)
elif battery_state == ios.ATTR_BATTERY_STATE_UNKNOWN:
battery_level = None
charging = False
icon_state = "{}-unknown".format(DEFAULT_ICON_LEVEL)
if self.type == "state":
return icon_state
return icon_for_battery_level(battery_level=battery_level,
charging=charging)
def update(self):
"""Get the latest state of the sensor."""
self._device = ios.devices(self.hass).get(self._device_name)
self._state = self._device[ios.ATTR_BATTERY][self.type]
| {
"content_hash": "0d0b5a45f5a22818618dfa643da06e0a",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 36.15126050420168,
"alnum_prop": 0.6059972105997211,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "404b313368cebe42b64c97f9f66c9db6152ac9e8",
"size": "4302",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ios/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
from collections import deque;
if __name__ == "__main__":
queue = deque()
print (queue)
queue.append(2)
queue.append(4)
queue.append(6)
print (queue)
queue.popleft()
print (queue)
queue.pop()
print (queue) | {
"content_hash": "026a0455cc2e53a14e9da0b2fb2ab52a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 30,
"avg_line_length": 13.944444444444445,
"alnum_prop": 0.5657370517928287,
"repo_name": "cprakashagr/PythonClass",
"id": "a59cfc2890a23ed6898f2f13c8d363941190eb9b",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ds/Queue.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "726190"
},
{
"name": "Python",
"bytes": "37362"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from pyql import __author__, __version__, __email__, __license__, __maintainer__
short_description = 'YQL Queries and Yahoo Weather in Python v.%s' % __version__
try:
long_description = open('README.md').read()
except:
long_description = "YQL Queries and Yahoo Weather in Python v.%s" % __version__
setup(name='pyql-weather',
version=__version__,
description=short_description,
long_description=long_description,
license=__license__,
author=__author__,
author_email=__email__,
maintainer=__maintainer__,
maintainer_email=__email__,
url='http://www.github.com/alexdzul/pyql-weather/',
packages=['pyql', 'pyql.weather', 'pyql.geo', 'demos'],
data_files=[('', ['README.md', 'LICENSE'])],
keywords=['pyql', 'yahoo', 'weather', 'forecast', 'yql'],
platforms='any',
classifiers=["Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
]
) | {
"content_hash": "79d567c1d0f1f5449b8dc644cf9058ea",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 83,
"avg_line_length": 42.26190476190476,
"alnum_prop": 0.540281690140845,
"repo_name": "alexdzul/pyql-weather",
"id": "0acc91a884190a920219e44376a146e5d6569e69",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67096"
}
],
"symlink_target": ""
} |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Snmpv2Mib.System' : {
'meta_info' : _MetaInfoClass('Snmpv2Mib.System',
False,
[
_MetaInfoClassMember('sysContact', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' The textual identification of the contact person for
this managed node, together with information on how
to contact this person. If no contact information is
known, the value is the zero-length string.
''',
'syscontact',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysDescr', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' A textual description of the entity. This value should
include the full name and version identification of
the system's hardware type, software operating-system,
and networking software.
''',
'sysdescr',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysLocation', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' The physical location of this node (e.g., 'telephone
closet, 3rd floor'). If the location is unknown, the
value is the zero-length string.
''',
'syslocation',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysName', ATTRIBUTE, 'str' , None, None,
[(0, 255)], [],
''' An administratively-assigned name for this managed
node. By convention, this is the node's fully-qualified
domain name. If the name is unknown, the value is
the zero-length string.
''',
'sysname',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysObjectID', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*'],
''' The vendor's authoritative identification of the
network management subsystem contained in the entity.
This value is allocated within the SMI enterprises
subtree (1.3.6.1.4.1) and provides an easy and
unambiguous means for determining `what kind of box' is
being managed. For example, if vendor `Flintstones,
Inc.' was assigned the subtree 1.3.6.1.4.1.424242,
it could assign the identifier 1.3.6.1.4.1.424242.1.1
to its `Fred Router'.
''',
'sysobjectid',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysORLastChange', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value of sysUpTime at the time of the most recent
change in state or value of any instance of sysORID.
''',
'sysorlastchange',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysServices', ATTRIBUTE, 'int' , None, None,
[('0', '127')], [],
''' A value which indicates the set of services that this
entity may potentially offer. The value is a sum.
This sum initially takes the value zero. Then, for
each layer, L, in the range 1 through 7, that this node
performs transactions for, 2 raised to (L - 1) is added
to the sum. For example, a node which performs only
routing functions would have a value of 4 (2^(3-1)).
In contrast, a node which is a host offering application
services would have a value of 72 (2^(4-1) + 2^(7-1)).
Note that in the context of the Internet suite of
protocols, values should be calculated accordingly:
layer functionality
1 physical (e.g., repeaters)
2 datalink/subnetwork (e.g., bridges)
3 internet (e.g., supports the IP)
4 end-to-end (e.g., supports the TCP)
7 applications (e.g., supports the SMTP)
For systems including OSI protocols, layers 5 and 6
may also be counted.
''',
'sysservices',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysUpTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The time (in hundredths of a second) since the
network management portion of the system was last
re-initialized.
''',
'sysuptime',
'SNMPv2-MIB', False),
],
'SNMPv2-MIB',
'system',
_yang_ns._namespaces['SNMPv2-MIB'],
'ydk.models.cisco_ios_xe.SNMPv2_MIB'
),
},
'Snmpv2Mib.Snmp.SnmpenableauthentrapsEnum' : _MetaInfoEnum('SnmpenableauthentrapsEnum', 'ydk.models.cisco_ios_xe.SNMPv2_MIB',
{
'enabled':'enabled',
'disabled':'disabled',
}, 'SNMPv2-MIB', _yang_ns._namespaces['SNMPv2-MIB']),
'Snmpv2Mib.Snmp' : {
'meta_info' : _MetaInfoClass('Snmpv2Mib.Snmp',
False,
[
_MetaInfoClassMember('snmpEnableAuthenTraps', REFERENCE_ENUM_CLASS, 'SnmpenableauthentrapsEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_MIB', 'Snmpv2Mib.Snmp.SnmpenableauthentrapsEnum',
[], [],
''' Indicates whether the SNMP entity is permitted to
generate authenticationFailure traps. The value of this
object overrides any configuration information; as such,
it provides a means whereby all authenticationFailure
traps may be disabled.
Note that it is strongly recommended that this object
be stored in non-volatile memory so that it remains
constant across re-initializations of the network
management system.
''',
'snmpenableauthentraps',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInASNParseErrs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of ASN.1 or BER errors encountered by
the SNMP entity when decoding received SNMP messages.
''',
'snmpinasnparseerrs',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInBadCommunityNames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of community-based SNMP messages (for
example, SNMPv1) delivered to the SNMP entity which
used an SNMP community name not known to said entity.
Also, implementations which authenticate community-based
SNMP messages using check(s) in addition to matching
the community name (for example, by also checking
whether the message originated from a transport address
allowed to use a specified community name) MAY include
in this value the number of messages which failed the
additional check(s). It is strongly RECOMMENDED that
the documentation for any security model which is used
to authenticate community-based SNMP messages specify
the precise conditions that contribute to this value.
''',
'snmpinbadcommunitynames',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInBadCommunityUses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of community-based SNMP messages (for
example, SNMPv1) delivered to the SNMP entity which
represented an SNMP operation that was not allowed for
the SNMP community named in the message. The precise
conditions under which this counter is incremented
(if at all) depend on how the SNMP entity implements
its access control mechanism and how its applications
interact with that access control mechanism. It is
strongly RECOMMENDED that the documentation for any
access control mechanism which is used to control access
to and visibility of MIB instrumentation specify the
precise conditions that contribute to this value.
''',
'snmpinbadcommunityuses',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInBadValues', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were
delivered to the SNMP protocol entity and for
which the value of the error-status field was
`badValue'.
''',
'snmpinbadvalues',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInBadVersions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP messages which were delivered
to the SNMP entity and were for an unsupported SNMP
version.
''',
'snmpinbadversions',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInGenErrs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were delivered
to the SNMP protocol entity and for which the value
of the error-status field was `genErr'.
''',
'snmpingenerrs',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInGetNexts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Get-Next PDUs which have been
accepted and processed by the SNMP protocol entity.
''',
'snmpingetnexts',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInGetRequests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Get-Request PDUs which
have been accepted and processed by the SNMP
protocol entity.
''',
'snmpingetrequests',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInGetResponses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Get-Response PDUs which
have been accepted and processed by the SNMP protocol
entity.
''',
'snmpingetresponses',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInNoSuchNames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were
delivered to the SNMP protocol entity and for
which the value of the error-status field was
`noSuchName'.
''',
'snmpinnosuchnames',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of messages delivered to the SNMP
entity from the transport service.
''',
'snmpinpkts',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInReadOnlys', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number valid SNMP PDUs which were delivered
to the SNMP protocol entity and for which the value
of the error-status field was `readOnly'. It should
be noted that it is a protocol error to generate an
SNMP PDU which contains the value `readOnly' in the
error-status field, as such this object is provided
as a means of detecting incorrect implementations of
the SNMP.
''',
'snmpinreadonlys',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInSetRequests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Set-Request PDUs which
have been accepted and processed by the SNMP protocol
entity.
''',
'snmpinsetrequests',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInTooBigs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were
delivered to the SNMP protocol entity and for
which the value of the error-status field was
`tooBig'.
''',
'snmpintoobigs',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInTotalReqVars', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of MIB objects which have been
retrieved successfully by the SNMP protocol entity
as the result of receiving valid SNMP Get-Request
and Get-Next PDUs.
''',
'snmpintotalreqvars',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInTotalSetVars', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of MIB objects which have been
altered successfully by the SNMP protocol entity as
the result of receiving valid SNMP Set-Request PDUs.
''',
'snmpintotalsetvars',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpInTraps', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Trap PDUs which have been
accepted and processed by the SNMP protocol entity.
''',
'snmpintraps',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutBadValues', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were generated
by the SNMP protocol entity and for which the value
of the error-status field was `badValue'.
''',
'snmpoutbadvalues',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutGenErrs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were generated
by the SNMP protocol entity and for which the value
of the error-status field was `genErr'.
''',
'snmpoutgenerrs',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutGetNexts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Get-Next PDUs which have
been generated by the SNMP protocol entity.
''',
'snmpoutgetnexts',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutGetRequests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Get-Request PDUs which
have been generated by the SNMP protocol entity.
''',
'snmpoutgetrequests',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutGetResponses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Get-Response PDUs which
have been generated by the SNMP protocol entity.
''',
'snmpoutgetresponses',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutNoSuchNames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were generated
by the SNMP protocol entity and for which the value
of the error-status was `noSuchName'.
''',
'snmpoutnosuchnames',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Messages which were
passed from the SNMP protocol entity to the
transport service.
''',
'snmpoutpkts',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutSetRequests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Set-Request PDUs which
have been generated by the SNMP protocol entity.
''',
'snmpoutsetrequests',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutTooBigs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP PDUs which were generated
by the SNMP protocol entity and for which the value
of the error-status field was `tooBig.'
''',
'snmpouttoobigs',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpOutTraps', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of SNMP Trap PDUs which have
been generated by the SNMP protocol entity.
''',
'snmpouttraps',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpProxyDrops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of Confirmed Class PDUs
(such as GetRequest-PDUs, GetNextRequest-PDUs,
GetBulkRequest-PDUs, SetRequest-PDUs, and
InformRequest-PDUs) delivered to the SNMP entity which
were silently dropped because the transmission of
the (possibly translated) message to a proxy target
failed in a manner (other than a time-out) such that
no Response Class PDU (such as a Response-PDU) could
be returned.
''',
'snmpproxydrops',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpSilentDrops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of Confirmed Class PDUs (such as
GetRequest-PDUs, GetNextRequest-PDUs,
GetBulkRequest-PDUs, SetRequest-PDUs, and
InformRequest-PDUs) delivered to the SNMP entity which
were silently dropped because the size of a reply
containing an alternate Response Class PDU (such as a
Response-PDU) with an empty variable-bindings field
was greater than either a local constraint or the
maximum message size associated with the originator of
the request.
''',
'snmpsilentdrops',
'SNMPv2-MIB', False),
],
'SNMPv2-MIB',
'snmp',
_yang_ns._namespaces['SNMPv2-MIB'],
'ydk.models.cisco_ios_xe.SNMPv2_MIB'
),
},
'Snmpv2Mib.Snmpset' : {
'meta_info' : _MetaInfoClass('Snmpv2Mib.Snmpset',
False,
[
_MetaInfoClassMember('snmpSetSerialNo', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' An advisory lock used to allow several cooperating
command generator applications to coordinate their
use of the SNMP set operation.
This object is used for coarse-grain coordination.
To achieve fine-grain coordination, one or more similar
objects might be defined within each MIB group, as
appropriate.
''',
'snmpsetserialno',
'SNMPv2-MIB', False),
],
'SNMPv2-MIB',
'snmpSet',
_yang_ns._namespaces['SNMPv2-MIB'],
'ydk.models.cisco_ios_xe.SNMPv2_MIB'
),
},
'Snmpv2Mib.Sysortable.Sysorentry' : {
'meta_info' : _MetaInfoClass('Snmpv2Mib.Sysortable.Sysorentry',
False,
[
_MetaInfoClassMember('sysORIndex', ATTRIBUTE, 'int' , None, None,
[('1', '2147483647')], [],
''' The auxiliary variable used for identifying instances
of the columnar objects in the sysORTable.
''',
'sysorindex',
'SNMPv2-MIB', True),
_MetaInfoClassMember('sysORDescr', ATTRIBUTE, 'str' , None, None,
[], [],
''' A textual description of the capabilities identified
by the corresponding instance of sysORID.
''',
'sysordescr',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysORID', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*'],
''' An authoritative identification of a capabilities
statement with respect to various MIB modules supported
by the local SNMP application acting as a command
responder.
''',
'sysorid',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysORUpTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value of sysUpTime at the time this conceptual
row was last instantiated.
''',
'sysoruptime',
'SNMPv2-MIB', False),
],
'SNMPv2-MIB',
'sysOREntry',
_yang_ns._namespaces['SNMPv2-MIB'],
'ydk.models.cisco_ios_xe.SNMPv2_MIB'
),
},
'Snmpv2Mib.Sysortable' : {
'meta_info' : _MetaInfoClass('Snmpv2Mib.Sysortable',
False,
[
_MetaInfoClassMember('sysOREntry', REFERENCE_LIST, 'Sysorentry' , 'ydk.models.cisco_ios_xe.SNMPv2_MIB', 'Snmpv2Mib.Sysortable.Sysorentry',
[], [],
''' An entry (conceptual row) in the sysORTable.
''',
'sysorentry',
'SNMPv2-MIB', False),
],
'SNMPv2-MIB',
'sysORTable',
_yang_ns._namespaces['SNMPv2-MIB'],
'ydk.models.cisco_ios_xe.SNMPv2_MIB'
),
},
'Snmpv2Mib' : {
'meta_info' : _MetaInfoClass('Snmpv2Mib',
False,
[
_MetaInfoClassMember('snmp', REFERENCE_CLASS, 'Snmp' , 'ydk.models.cisco_ios_xe.SNMPv2_MIB', 'Snmpv2Mib.Snmp',
[], [],
''' ''',
'snmp',
'SNMPv2-MIB', False),
_MetaInfoClassMember('snmpSet', REFERENCE_CLASS, 'Snmpset' , 'ydk.models.cisco_ios_xe.SNMPv2_MIB', 'Snmpv2Mib.Snmpset',
[], [],
''' ''',
'snmpset',
'SNMPv2-MIB', False),
_MetaInfoClassMember('sysORTable', REFERENCE_CLASS, 'Sysortable' , 'ydk.models.cisco_ios_xe.SNMPv2_MIB', 'Snmpv2Mib.Sysortable',
[], [],
''' The (conceptual) table listing the capabilities of
the local SNMP application acting as a command
responder with respect to various MIB modules.
SNMP entities having dynamically-configurable support
of MIB modules will have a dynamically-varying number
of conceptual rows.
''',
'sysortable',
'SNMPv2-MIB', False),
_MetaInfoClassMember('system', REFERENCE_CLASS, 'System' , 'ydk.models.cisco_ios_xe.SNMPv2_MIB', 'Snmpv2Mib.System',
[], [],
''' ''',
'system',
'SNMPv2-MIB', False),
],
'SNMPv2-MIB',
'SNMPv2-MIB',
_yang_ns._namespaces['SNMPv2-MIB'],
'ydk.models.cisco_ios_xe.SNMPv2_MIB'
),
},
}
_meta_table['Snmpv2Mib.Sysortable.Sysorentry']['meta_info'].parent =_meta_table['Snmpv2Mib.Sysortable']['meta_info']
_meta_table['Snmpv2Mib.System']['meta_info'].parent =_meta_table['Snmpv2Mib']['meta_info']
_meta_table['Snmpv2Mib.Snmp']['meta_info'].parent =_meta_table['Snmpv2Mib']['meta_info']
_meta_table['Snmpv2Mib.Snmpset']['meta_info'].parent =_meta_table['Snmpv2Mib']['meta_info']
_meta_table['Snmpv2Mib.Sysortable']['meta_info'].parent =_meta_table['Snmpv2Mib']['meta_info']
| {
"content_hash": "b2a4c6e9bcc0ae6fb08161ef3dca80d6",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 197,
"avg_line_length": 51.35647279549718,
"alnum_prop": 0.5026851276805612,
"repo_name": "111pontes/ydk-py",
"id": "67031c064833cf2e367b44ce94fecabff8c388ef",
"size": "27376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_SNMPv2_MIB.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payments', '0005_auto_20170919_1621'),
]
operations = [
migrations.AlterModelOptions(
name='orderpayment',
options={'permissions': (('refund_orderpayment', 'Can refund order payments'),), 'verbose_name': 'order payment', 'verbose_name_plural': 'order payments'},
),
migrations.AlterModelOptions(
name='payment',
options={'ordering': ('-created', '-updated'), 'verbose_name': 'payment', 'verbose_name_plural': 'payments'},
),
migrations.AlterField(
model_name='orderpayment',
name='status',
field=models.CharField(choices=[(b'created', 'Created'), (b'started', 'Started'), (b'cancelled', 'Cancelled'), (b'pledged', 'Pledged'), (b'authorized', 'Authorized'), (b'settled', 'Settled'), (b'charged_back', 'Charged_back'), (b'refund_requested', 'Refund requested'), (b'refunded', 'Refunded'), (b'failed', 'Failed'), (b'unknown', 'Unknown')], default=b'created', max_length=50),
),
migrations.AlterField(
model_name='payment',
name='status',
field=models.CharField(choices=[(b'created', 'Created'), (b'started', 'Started'), (b'cancelled', 'Cancelled'), (b'pledged', 'Pledged'), (b'authorized', 'Authorized'), (b'settled', 'Settled'), (b'charged_back', 'Charged_back'), (b'refund_requested', 'Refund requested'), (b'refunded', 'Refunded'), (b'failed', 'Failed'), (b'unknown', 'Unknown')], default=b'started', max_length=50),
),
]
| {
"content_hash": "ca8fcb2f0664602e82de92caf8a2001d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 393,
"avg_line_length": 54,
"alnum_prop": 0.6051373954599761,
"repo_name": "onepercentclub/bluebottle",
"id": "7be70c79239d4edca99502580f62a2c85c4bc813",
"size": "1747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/payments/migrations/0006_auto_20181115_1321.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
"""
Python API for B2FIND.
Retrieve dataset info by given search criteria using CKAN portal
"""
__author__ = 'Roberto Mucci ([email protected])'
import json
import requests
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(NullHandler())
def get_dataset_source(ckan_url='eudat-b1.dkrz.de', community='', pattern=[],
ckan_limit=1000):
"""
Retrieve datasets source by given search criteria using CKAN portal.
:param ckan_url: CKAN portal address, to which search requests are submitted
(default is eudat-b1.dkrz.de).
:param community: Community where you want to search in.
:param pattern: CKAN search pattern, i.e. (a list of) field:value terms.
:param ckan_limit: Limit of listed datasets (default is 1000).
:return: list of datasets source (each source should be physical URL to the
data object).
"""
if (not pattern) and (not community):
print "[ERROR] Need at least a community or a search pattern as " \
"argument!"
return
ckan_pattern = ''
sand = ''
pattern = ' AND '.join(pattern)
if community:
ckan_pattern += "groups:%s" % community
sand = " AND "
if pattern:
ckan_pattern += sand + pattern
LOGGER.debug("Search in %s for pattern %s\n....." % (ckan_url, ckan_pattern))
answer = _action(ckan_url, {"q": ckan_pattern, "rows": ckan_limit,
"start": 0})
if answer is None:
return answer
countURL = 0
results = []
for ds in answer['result']['results']:
results.append(ds['url'])
countURL += 1
LOGGER.info("Found %d Sources\n" % (countURL))
return results
def get_dataset_info(ckan_url='eudat-b1.dkrz.de', community='', pattern=[],
ckan_limit=1000):
"""
Retrieve datasets info by given search criteria using CKAN portal.
:param ckan_url: CKAN portal address, to which search requests are submitted
(default is eudat-b1.dkrz.de).
:param community: Community where you want to search in.
:param pattern: CKAN search pattern, i.e. (a list of) field:value terms.
:param ckan_limit: Limit of listed datasets (default is 1000).
:return: list of datasets (each dataset is a list of dictionary
composed by key and value) considering only the datasets containing a pid
value.
"""
if (not pattern) and (not community):
print "[ERROR] Need at least a community or a search pattern as " \
"argument!"
return
ckan_pattern = ''
sand = ''
pattern = ' AND '.join(pattern)
if community:
ckan_pattern += "groups:%s" % community
sand = " AND "
if pattern:
ckan_pattern += sand + pattern
LOGGER.debug("Search in %s for pattern %s\n....." % (ckan_url, ckan_pattern))
answer = _action(ckan_url, {"q": ckan_pattern, "rows": ckan_limit,
"start": 0})
if answer is None:
return answer
countPID = 0
results = []
for ds in answer['result']['results']:
for extra in ds['extras']:
if extra['key'] == 'PID':
# add dataset to list
results.append(ds['extras'])
countPID += 1
break
LOGGER.info("Found %d PIDs\n" % (countPID))
return results
def _action(host, data, action='package_search'):
# Make the HTTP request for data set generation.
action_url = "http://{host}/api/3/action/{action}".format(host=host,
action=action)
try:
response = requests.get(action_url, params=data)
except requests.exceptions.RequestException as e:
print e.message
return
except requests.exceptions.HTTPError as e:
print e
return
if response.status_code != 200:
print "Error code {0}. The server {1} couldn't fulfill the action {2}.\n"\
.format(response.status_code, host, action)
return
out = json.loads(response.text)
return out
def main():
""" Main function to test the script """
#get_dataset_info(pattern=['tags:MPIOM'])
get_dataset_info(community='aleph')
get_dataset_source(community='aleph')
get_dataset_source(pattern=['tags:climate'])
if __name__ == '__main__':
main() | {
"content_hash": "dbe94ebc9bb71c5acae5e024dc4f08d7",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 82,
"avg_line_length": 31.20408163265306,
"alnum_prop": 0.5955962502725093,
"repo_name": "EUDAT-B2STAGE/EUDAT-Library",
"id": "af4ae2fd101204399a7ce238f06e7861b451b281",
"size": "4610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eudat/b2find.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28800"
}
],
"symlink_target": ""
} |
"""
Known Issues:
-------------
- GlyphLineView does not properly handle fonts that are not saved to a file.
this is because there is not good way to find the font index for a given font
and the preview control requires a font index. so, the best thing i can
do at this point is get the font index by comparing file paths.
- GlyphView only works with the first master in a multiple master font.
Dealing with multiple masters is a pain, so I have no plans for fixing this.
"""
import os
import weakref
from FL import *
__all__ = ['ModalDialog', 'Button', 'TextBox', 'EditText', 'PopUpButton', 'List', 'CheckBox', 'GlyphLineView', 'GlyphView', 'HorizontalLine', 'VerticalLine']
osName = os.name
if osName == 'possix':
osName = 'mac'
class ModalDialog(object):
def __init__(self, posSize, title=None, okText="OK", cancelText="Cancel", okCallback=None, cancelCallback=None):
self._dialog = Dialog(self)
if len(posSize) == 2:
x, y = posSize
self._dialog.size = Point(x, y)
self._dialog.Center()
self._size = (x, y)
else:
x, y, w, h = posSize
self._dialog.rectangle = Rect(x, y, x+w, y+h)
self._size = (w, h)
if title is None:
title = ''
self._dialog.title = title
self._dialog.ok = okText
self._dialog.cancel = cancelText
#
self._okCallback=okCallback
self._cancelCallback=cancelCallback
def __setattr__(self, attr, value):
if isinstance(value, UIBaseObject):
assert not hasattr(self, attr), "attribute '%s' can not be replaced" % attr
#
value._contentID = 'contentID_for_' + attr
# hack for canvas controls:
# FL requires that custom controls begin with '_'
if isinstance(value, _CanvasWrapper):
value._contentID = '_' + value._contentID
#
value._setDialog(self)
value._setupContent()
#
x, y, w, h = value._posSize
# convert posSize to Dialog coordinates
winW, winH = self._size
if x < 0:
l = winW + x
else:
l = x
#
if w <= 0:
r = winW + w
else:
r = l + w
#
if y < 0:
t = winH + y
else:
t = y
#
if h <= 0:
b = winH + h
else:
b = t + h
#
# _CanvasWrapper needs to know the rect size
# when it is painting, so store it.
value._rectSize = (r-l, b-t)
#
pos = Rect(l, t, r, b)
self._dialog.AddControl(value._type, pos, value._contentID, value._style, value._title)
#
# it doesn't matter if the value does not have a callback
# assigned. the _callbackWrapper method safely handles
# those cases. the reason it is not handled here is that
# custom controls (used by _CanvasWrapper) use the method
# normally reserved for control hits to paint the control.
setattr(self, 'on_%s' % value._contentID, value._callbackWrapper)
super(ModalDialog, self).__setattr__(attr, value)
def open(self):
"""open the dialog"""
self._dialog.Run()
def close(self):
"""close the dialog"""
self._dialog.End()
def on_cancel(self, code):
if self._cancelCallback is not None:
self._cancelCallback(self)
def on_ok(self, code):
if self._okCallback is not None:
self._okCallback(self)
class UIBaseObject(object):
def __init__(self, posSize, title, callback=None, content=None):
self._posSize = posSize
self._title = title
self._callback = callback
self._content = content
def _setDialog(self, dialog):
self._dialog = weakref.ref(dialog)
def _callbackWrapper(self, code):
if self._callback is not None:
self._callback(self)
def _setupContent(self):
# set the attribute data in the parent class.
# this will be used for GetValue and PutValue operations.
setattr(self._dialog(), self._contentID, self._content)
def enable(self, value):
"""
enable the object by passing True
disable the object by passing False
"""
value = int(value)
dialog = self._dialog()
dialog._dialog.Enable(self._contentID, value)
def show(self, value):
"""
show the object by passing True
hide the object by passing False
"""
dialog = self._dialog()
dialog._dialog.Show(self._contentID, value)
def set(self, value):
"""
set the content of the object
"""
# temporarily suspend the callback
# bacause FontLab 5 calls the method
# assigned to a control when the
# control value is set programatically
callback = self._callback
self._callback = None
# set the ccontent
self._content = value
dialog = self._dialog()
setattr(dialog, self._contentID, value)
dialog._dialog.PutValue(self._contentID)
# reset the callback
self._callback = callback
def get(self):
"""
return the contents of the object
"""
dialog = self._dialog()
dialog._dialog.GetValue(self._contentID)
self._content = getattr(dialog, self._contentID)
return self._content
class Button(UIBaseObject):
_type = BUTTONCONTROL
_style = STYLE_BUTTON
def __init__(self, posSize, title, callback=None):
super(Button, self).__init__(posSize=posSize, title=title, callback=callback, content=title)
def set(self, value):
"""
Not implemented for Button
"""
raise NotImplementedError, "It is not possible to set the text in a button"
class PopUpButton(UIBaseObject):
_type = CHOICECONTROL
_style = STYLE_CHOICE
def __init__(self, posSize, items, callback=None):
super(PopUpButton, self).__init__(posSize=posSize, title='', callback=callback, content=items)
def _setupContent(self):
super(PopUpButton, self)._setupContent()
self._contentIndexID = self._contentID + '_index'
self.setSelection(0)
def setSelection(self, value):
"""
set the selected item
value should be an index
"""
# temporarily suspend the callback
callback = self._callback
self._callback = None
# set the value
if value is None:
value = -1
dialog = self._dialog()
setattr(dialog, self._contentIndexID, value)
dialog._dialog.PutValue(self._contentID)
# reset the callback
self._callback = callback
def getSelection(self):
"""
return the index of the selected item
"""
dialog = self._dialog()
dialog._dialog.GetValue(self._contentID)
getattr(dialog, self._contentID)
index = getattr(dialog, self._contentIndexID)
if index == -1:
index = None
return index
class List(UIBaseObject):
_type = LISTCONTROL
_style = STYLE_LIST
def __init__(self, posSize, items, callback=None):
super(List, self).__init__(posSize=posSize, title='', callback=callback, content=items)
def _setupContent(self):
super(List, self)._setupContent()
self._contentIndexID = self._contentID + '_index'
self.setSelection([0])
def __len__(self):
return len(self._content)
def __getitem__(self, index):
return self._content[index]
def __setitem__(self, index, value):
self._content[index] = value
self.set(self._content)
def __delitem__(self, index):
del self._content[index]
self.set(self._content)
def __getslice__(self, a, b):
return self._content[a:b]
def __delslice__(self, a, b):
del self._content[a:b]
self.set(self._content)
def __setslice__(self, a, b, items):
self._content[a:b] = items
self.set(self._content)
def append(self, item):
self._content.append(item)
self.set(self._content)
def remove(self, item):
index = self._content.index(item)
del self._content[index]
self.set(self._content)
def index(self, item):
return self._content.index(item)
def insert(self, index, item):
self._content.insert(index, item)
self.set(self._content)
def extend(self, items):
self._content.extend(items)
self.set(self._content)
def replace(self, index, item):
del self._content[index]
self._content.insert(index, item)
self.set(self._content)
#
def setSelection(self, value):
"""
set the selected item index(es)
value should be a list of indexes
in FontLab, it setting multiple
selection indexes is not possible.
"""
dialog = self._dialog()
if len(value) < 1:
value = -1
else:
value = value[0]
setattr(dialog, self._contentIndexID, value)
dialog._dialog.PutValue(self._contentID)
def getSelection(self):
"""
return a list of selected item indexes
"""
import sys
dialog = self._dialog()
dialog._dialog.GetValue(self._contentID)
getattr(dialog, self._contentID)
index = getattr(dialog, self._contentIndexID)
# Since FLS v5.2, the GetValue() method of the Dialog() class returns
# a 'wrong' index value from the specified LISTCONTROL.
# If the selected index is n, it will return n-1. For example, when
# the index is 1, it returns 0; when it's 2, it returns 1, and so on.
# If the selection is empty, FLS v5.2 returns -2, while the old v5.0
# returned None.
# See also:
# - https://github.com/robofab-developers/robofab/pull/14
# - http://forum.fontlab.com/index.php?topic=8807.0
# - http://forum.fontlab.com/index.php?topic=9003.0
if fl.buildnumber > 4600 and sys.platform == 'win32':
if index == -2:
index == -1
else:
index += 1
if index == -1:
return []
return [index]
def set(self, value):
"""
set the contents of the list
"""
self._content = value
dialog = self._dialog()
setattr(dialog, self._contentID, value)
dialog._dialog.PutValue(self._contentID)
def get(self):
"""
return the contents of the list
"""
return self._content
class EditText(UIBaseObject):
_type = EDITCONTROL
_style = STYLE_EDIT
def __init__(self, posSize, text="", callback=None):
super(EditText, self).__init__(posSize=posSize, title='', callback=callback, content=text)
def set(self, value):
if osName == 'mac':
value = '\r'.join(value.splitlines())
super(EditText, self).set(value)
class TextBox(UIBaseObject):
_type = STATICCONTROL
_style = STYLE_LABEL
def __init__(self, posSize, text):
super(TextBox, self).__init__(posSize=posSize, title=text, callback=None, content=text)
def set(self, value):
if osName == 'mac':
value = '\r'.join(value.splitlines())
super(TextBox, self).set(value)
class CheckBox(UIBaseObject):
_type = CHECKBOXCONTROL
_style = STYLE_CHECKBOX
def __init__(self, posSize, title, callback=None, value=False):
value = int(value)
super(CheckBox, self).__init__(posSize=posSize, title=title, callback=callback, content=value)
def set(self, value):
"""
set the state of the object
value should be a boolean
"""
value = int(value)
super(CheckBox, self).set(value)
def get(self):
"""
returns a boolean representing the state of the object
"""
value = super(CheckBox, self).get()
return bool(value)
class _CanvasWrapper(UIBaseObject):
_type = STATICCONTROL
_style = STYLE_CUSTOM
def __init__(self, posSize):
super(_CanvasWrapper, self).__init__(posSize=posSize, title='', callback=None, content=None)
def _callbackWrapper(self, canvas):
# oddly, the custom control is painted
# by the method that would normally be
# called when the control is hit.
self._paint(canvas)
class _Line(_CanvasWrapper):
def __init__(self, posSize):
super(_Line, self).__init__(posSize=posSize)
def _paint(self, canvas):
canvas.brush_color = cRGB_GRAY
canvas.brush_style = cBRUSH_SOLID
canvas.draw_style = 1
#
w, h = self._rectSize
r = Rect(0, 0, w, h)
canvas.Rectangle(0, r)
class HorizontalLine(_Line):
def __init__(self, posSize):
x, y, w, h = posSize
super(HorizontalLine, self).__init__(posSize=(x, y, w, 1))
class VerticalLine(_Line):
def __init__(self, posSize):
x, y, w, h = posSize
super(VerticalLine, self).__init__(posSize=(x, y, 1, h))
def _unwrapRobofab(obj):
# this could be a raw FontLab object or a robofab object.
# the preference is for raw FontLab objects. this
# function safely unwraps robofab objects.
try:
from robofab.world import RFont, RGlyph
haveRobofab = True
except ImportError:
haveRobofab = False
if haveRobofab:
if isinstance(obj, RFont) or isinstance(obj, RGlyph):
return obj.naked()
return obj
def _fontIndex(font):
font = _unwrapRobofab(font)
#
fonts = [(fl[i], i) for i in xrange(len(fl))]
#
for otherFont, index in fonts:
if otherFont.file_name == font.file_name: # grrr.
return index
return -1
class GlyphLineView(UIBaseObject):
_type = PREVIEWCONTROL
_style = STYLE_LABEL
def __init__(self, posSize, text="", font=None, rightToLeft=False):
if font is None:
self._fontIndex = fl.ifont
else:
self._fontIndex = _fontIndex(font)
self._rightToLeft = False
text = self._makeText(text)
super(GlyphLineView, self).__init__(posSize=posSize, title="", callback=None, content=text)
def _makeText(self, text):
text = "f:%d|d:%s|r:%d" % (self._fontIndex, text, self._rightToLeft)
return text
def set(self, text):
"""
set the text displayed text string
"""
text = self._makeText(text)
super(GlyphLineView, self).set(text)
def get(self):
"""
return the displayed text string
"""
return self._content[6:-4]
def setFont(self, font):
"""
set the index for the font that should be displayed
"""
if font is None:
self._fontIndex = -1
else:
self._fontIndex = _fontIndex(font)
self.set(self.get())
def setRightToLeft(self, value):
"""
set the setting directon of the display
"""
self._rightToLeft = value
self.set(self.get())
class GlyphView(_CanvasWrapper):
def __init__(self, posSize, font, glyph, margin=30,
showFill=True, showOutline=False,
showDescender=True, showBaseline=True, showXHeight=True,
showAscender=True, showCapHeight=True, showUPMTop=False,
showLeftSidebearing=True, showRightSidebearing=True,
showOnCurvePoints=True):
#
super(GlyphView, self).__init__(posSize=posSize)
#
self._showFill = showFill
self._showOutline = showOutline
self._margin = margin
self._showDescender = showDescender
self._showBaseline = showBaseline
self._showXHeight = showXHeight
self._showAscender = showAscender
self._showCapHeight = showCapHeight
self._showUPMTop = showUPMTop
self._showLeftSidebearing = showLeftSidebearing
self._showRightSidebearing = showRightSidebearing
#
self._showOnCurvePoints = showOnCurvePoints
#
self.set(font, glyph)
def set(self, font, glyph):
"""
change the glyph displayed in the view
"""
if font is None or glyph is None:
self._font = None
self._glyph = None
else:
self._font = _unwrapRobofab(font)
self._glyph = _unwrapRobofab(glyph)
###
def getShowFill(self):
return self._showFill
def setShowFill(self, value):
self._showFill = value
def getShowOutline(self):
return self._showOutline
def setShowOutline(self, value):
self._showOutline = value
def getMargin(self):
return self._margin
def setMargin(self, value):
self._margin = value
def getShowDescender(self):
return self._showDescender
def setShowDescender(self, value):
self._showDescender = value
def getShowBaseline(self):
return self._showBaseline
def setShowBaseline(self, value):
self._showBaseline = value
def getShowXHeight(self):
return self._showXHeight
def setShowXHeight(self, value):
self._showXHeight = value
def getShowAscender(self):
return self._showAscender
def setShowAscender(self, value):
self._showAscender = value
def getShowCapHeight(self):
return self._showCapHeight
def setShowCapHeight(self, value):
self._showCapHeight = value
def getShowUPMTop(self):
return self._showUPMTop
def setShowUPMTop(self, value):
self._showUPMTop = value
def getShowLeftSidebearing(self):
return self._showLeftSidebearing
def setShowLeftSidebearing(self, value):
self._showLeftSidebearing = value
def getShowRightSidebearing(self):
return self._showRightSidebearing
def setShowRightSidebearing(self, value):
self._showRightSidebearing = value
def getShowOnCurvePoints(self):
return self._showOnCurvePoints
def setShowOnCurvePoints(self, value):
self._showOnCurvePoints = value
###
def update(self):
if hasattr(self, '_dialog'):
dialog = self._dialog()
dialog._dialog.Repaint(self._contentID)
def _paint(self, canvas):
if self._font is None or self._glyph is None:
return
font = self._font
glyph = self._glyph
#
upm = font.upm
descender = font.descender[0]
baseline = 0
xHeight = font.x_height[0]
ascender = font.ascender[0]
capHeight = font.cap_height[0]
#
glyphWidth = glyph.width
#
viewWidth, viewHeight = self._rectSize
liveWidth = viewWidth - (self._margin * 2)
liveHeight = viewHeight - (self._margin * 2)
#
scale = liveHeight / float(upm)
#
xOffset = (viewWidth - (glyphWidth * scale)) / 2
yOffset = ((upm + descender) * scale) + self._margin
#
left = -xOffset * (1.0 / scale)
right = glyphWidth + abs(left)
top = upm + descender + (self._margin * (1.0 / scale))
bottom = descender - (self._margin * (1.0 / scale))
#
canvas.delta = Point(xOffset, yOffset)
canvas.scale = Point(scale, -scale)
#
canvas.pen_color = cRGB_LTGRAY
#
if self._showDescender:
canvas.MoveTo(Point(left, descender))
canvas.LineTo(Point(right, descender))
if self._showBaseline:
canvas.MoveTo(Point(left, baseline))
canvas.LineTo(Point(right, baseline))
if self._showXHeight:
canvas.MoveTo(Point(left, xHeight))
canvas.LineTo(Point(right, xHeight))
if self._showAscender:
canvas.MoveTo(Point(left, ascender))
canvas.LineTo(Point(right, ascender))
if self._showCapHeight:
canvas.MoveTo(Point(left, capHeight))
canvas.LineTo(Point(right, capHeight))
if self._showUPMTop:
canvas.MoveTo(Point(left, upm+descender))
canvas.LineTo(Point(right, upm+descender))
#
if self._showLeftSidebearing:
canvas.MoveTo(Point(0, bottom))
canvas.LineTo(Point(0, top))
if self._showRightSidebearing:
canvas.MoveTo(Point(glyphWidth, bottom))
canvas.LineTo(Point(glyphWidth, top))
#
if self._showFill:
canvas.FillGlyph(glyph)
canvas.OutlineGlyph(glyph) # XXX hack to hide gray outline
if self._showOutline:
canvas.OutlineGlyph(glyph)
#
if self._showOnCurvePoints:
canvas.pen_color = cRGB_RED
canvas.brush_color = cRGB_RED
markerSize = 5 * (1.0 / scale)
halfMarkerSize = markerSize / 2
for node in glyph.nodes:
x, y = node.x, node.y
x -= halfMarkerSize
y -= halfMarkerSize
if node.alignment == nSMOOTH:
mode = 1
else:
mode = 0
canvas.Rectangle(mode, Rect(x, y, x+markerSize, y+markerSize))
| {
"content_hash": "27fd2205c60f849f137efe6328d8406d",
"timestamp": "",
"source": "github",
"line_count": 730,
"max_line_length": 157,
"avg_line_length": 29.980821917808218,
"alnum_prop": 0.5673032989125468,
"repo_name": "anthrotype/dialogKit",
"id": "69210672d792ef78e059e29a69ed1cfacb9829c8",
"size": "21886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Lib/dialogKit/_dkFL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "63091"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import base64
import hashlib
import hmac
import json
import sys
import time
import datetime
import urllib
import urllib2
from socket import error as SocketError
import errno
class VDCApiCall(object):
"""
Class for making signed API calls to the Interoute VDC.
"""
def __init__(self, api_url, apiKey, secret):
"""
Initialise the signed API call object with the URL and the
required API key and Secret key.
"""
self.api_url = api_url
self.apiKey = apiKey
self.secret = secret
def request(self, args):
"""
Form the request based on the provided args and using the apikey
and secret. This ensures the request has the correct signature
calculated.
"""
args['apiKey'] = self.apiKey
request = zip(args.keys(), args.values())
request.sort(key=lambda x: x[0].lower())
request_data = "&".join(["=".join([r[0], urllib.quote_plus(str(r[1]),safe='*')])
for r in request])
hashStr = "&".join(
[
"=".join(
[r[0].lower(),
str.lower(urllib.quote_plus(str(r[1]),safe='*')).replace(
"+", "%20"
)]
) for r in request
]
)
sig = urllib.quote_plus(base64.b64encode(
hmac.new(
self.secret,
hashStr,
hashlib.sha1
).digest()
).strip())
request_data += "&signature=%s" % sig
# print the URL string for debug
###print(request_data)
###print(self.api_url + "?" + request_data)
try:
connection = urllib2.urlopen(self.api_url + "?" + request_data ) # GET request
##connection = urllib2.urlopen(self.api_url, request_data) # POST request
response = connection.read()
except SocketError as e:
if e.errno != errno.ECONNRESET:
# not a RESET error so report it and exit
print('Socket error: %s' % e.errno)
sys.exit()
# ignore a RESET error and carry on by returning an empty response
error_timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S+0000")
print("\n%s Exception for socket error 104 for request:\n%s" % (error_timestamp,self.api_url + "?" + request_data))
return '{"CONNECTIONERRORresponse":{}}'
except urllib2.HTTPError as error:
print('HTTP Error: %s' % error.code)
description = str(error.info())
description = description.split('\n')
description = [line
for line
in description
if line.startswith('X-Description: ')]
if len(description) > 0:
description = description[0].split(':', 1)[-1].lstrip()
else:
description = '(No extended error message.)'
print(description)
sys.exit()
return response
def wait_for_job(self, job_id, delay=2, display_progress=True):
"""
Wait for the given job ID to return a result.
Sleeps for 'delay' seconds between each check for the job
finishing- default 2.
Will output a '.' for every 'delay' if 'display_progress' is true.
"""
request = {
'jobid': job_id,
}
while(True):
result = self.queryAsyncJobResult(request)
if display_progress:
print('.', end='')
sys.stdout.flush()
if 'jobresult' in result:
print('')
return result['jobresult']
time.sleep(delay)
def __getattr__(self, name):
def handlerFunction(*args, **kwargs):
if kwargs:
return self._make_request(name, kwargs)
return self._make_request(name, args[0])
return handlerFunction
def _make_request(self, command, args):
args['response'] = 'json'
args['command'] = command
data = self.request(args)
# The response is of the format {commandresponse: actual-data}
##key = command.lower() + "response"
## Temporary change due to incompatible behaviour of the new network commands
##return json.loads(data)[key]
##print("DEBUG data: %s" % data)
return json.loads(data).values()[0]
| {
"content_hash": "f87021cffe0b7483a890198208c83ef3",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 127,
"avg_line_length": 35.32824427480916,
"alnum_prop": 0.5252808988764045,
"repo_name": "Interoute/API-fun-and-education",
"id": "ad76f2465e86839564a5dffe15e6233613143ab7",
"size": "5036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vdc_api_call.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "172000"
},
{
"name": "Shell",
"bytes": "1387"
}
],
"symlink_target": ""
} |
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="a"/>
<xs:element name="b"/>
<xs:element name="c"/>
<xs:element name="d"/>
<xs:element name="e"/>
<xs:group name="Cabc">
<xs:choice>
<xs:element ref="a"/>
<xs:element ref="b"/>
<xs:element ref="c"/>
</xs:choice>
</xs:group>
<xs:group name="Cbcd">
<xs:choice>
<xs:element ref="b"/>
<xs:element ref="c"/>
<xs:element ref="d"/>
</xs:choice>
</xs:group>
<xs:group name="Cbe">
<xs:choice>
<xs:element ref="b"/>
<xs:element ref="e"/>
</xs:choice>
</xs:group>
<xs:group name="CabcPCbcdPCbe">
<xs:sequence>
<xs:group ref="Cabc"/>
<xs:group ref="Cbcd"/>
<xs:group ref="Cbe"/>
</xs:sequence>
</xs:group>
<xs:group name="CbcdPCbe">
<xs:sequence>
<xs:group ref="Cbcd"/>
<xs:group ref="Cbe"/>
</xs:sequence>
</xs:group>
<xs:complexType name="aBCde">
<xs:sequence>
<xs:group ref="CabcPCbcdPCbe"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="Bcde">
<xs:sequence>
<xs:group ref="CbcdPCbe"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="aBCDE">
<xs:sequence>
<xs:group ref="CabcPCbcdPCbe"/>
<xs:group ref="CbcdPCbe"/>
</xs:sequence>
</xs:complexType>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0034 (unittest.TestCase):
def test_aBCde (self):
instance = aBCde()
self.assertEqual(None, instance.a)
self.assertEqual([], instance.b)
self.assertEqual([], instance.c)
self.assertEqual(None, instance.d)
self.assertEqual(None, instance.e)
def test_Bcde (self):
instance = Bcde()
self.assertEqual([], instance.b)
self.assertEqual(None, instance.c)
self.assertEqual(None, instance.d)
self.assertEqual(None, instance.e)
def test_aBCDE (self):
instance = aBCDE()
self.assertEqual(None, instance.a)
self.assertEqual([], instance.b)
self.assertEqual([], instance.c)
self.assertEqual([], instance.d)
self.assertEqual([], instance.e)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "274112b597e0b4b667a2d36d09543c6a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 60,
"avg_line_length": 25.29126213592233,
"alnum_prop": 0.5930902111324377,
"repo_name": "pabigot/pyxb",
"id": "b572edcab86a10428f592bb1a10c4b0d850495ea",
"size": "2629",
"binary": false,
"copies": "2",
"ref": "refs/heads/next",
"path": "tests/trac/test-trac-0034b.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1927697"
},
{
"name": "Shell",
"bytes": "20792"
}
],
"symlink_target": ""
} |
""" Defines the MoveTool class.
"""
# Enthought library imports
from traits.api import Tuple
from enable.tools.drag_tool import DragTool
class MoveTool(DragTool):
""" A tool for moving a plot component.
"""
# The (x,y) offset of the start of the drag relative to the component.
_offset = Tuple((0,0))
def drag_start(self, event):
""" Called when the drag operation starts.
Implements DragTool.
"""
self._offset = (event.x - self.component.x, event.y - self.component.y)
event.handled = True
def dragging(self, event):
""" This method is called for every mouse_move event that the tool
receives while the user is dragging the mouse.
Implements DragTool. Moves the component.
"""
c = self.component
c.position = [event.x - self._offset[0], event.y - self._offset[1]]
if getattr(c, "x_mapper", None):
c.x_mapper.updated = True
if getattr(c, "y_mapper", None):
c.y_mapper.updated = True
if getattr(c, "vgrid", None):
c.vgrid.invalidate()
if getattr(c, "hgrid", None):
c.hgrid.invalidate()
event.handled = True
c.request_redraw()
| {
"content_hash": "353ccb1141421d7eacfcfbb8ec027fa2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 30.26829268292683,
"alnum_prop": 0.5954875100725222,
"repo_name": "burnpanck/chaco",
"id": "5dfe0ed72670b1c4b1afa0592d01086c49166749",
"size": "1241",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chaco/tools/move_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57089"
},
{
"name": "C++",
"bytes": "9881"
},
{
"name": "Gnuplot",
"bytes": "611"
},
{
"name": "Python",
"bytes": "1761203"
}
],
"symlink_target": ""
} |
import subprocess
import datetime
from custom_logger import get_logger
import sys
__author__ = 'cenk'
logger = get_logger()
def daily_rollup(args):
keyspace = args[1]
now = datetime.datetime.now()
end_time = datetime.datetime(now.year, now.month, now.day, now.hour, 00)
end_time = int(end_time.strftime("%s"))
start_time = end_time - (60 * 60 * 24)
logger.debug("End Time: %s, Start Time: %s", end_time, start_time)
command = "nohup /data/spark/bin/spark-submit --class net.egemsoft.rrd.Main " \
"--master spark://ipam-ulus-db-2 target/cassandra-spark-rollup-1.0-driver.jar " \
" spMaster=spark://ipam-ulus-db-2:7077 casHost=ipam-ulus-db-2 " \
"casKeyspace=%s casTable=metric rollup=3600 start=%s end=%s destRollup=86400 ttl=94608000 &\n" % (
keyspace, start_time, end_time)
logger.debug("Command: %s", command)
try:
p = subprocess.call(command, shell=True, stdout=subprocess.PIPE, cwd="/home/sparkuser/cassandra-spark-rollup")
logger.debug(p)
except Exception, e:
logger.error(e.message)
daily_rollup(sys.argv)
## python daily_rollup.py ipam | {
"content_hash": "516d736fe3cd10cf83e1bf55f4356186",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 118,
"avg_line_length": 34.588235294117645,
"alnum_prop": 0.6445578231292517,
"repo_name": "egemsoft/cassandra-spark-rollup",
"id": "0f442f94afed419104695863467ec400614d7576",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cronjob/app/daily_rollup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "38984"
},
{
"name": "Python",
"bytes": "8652"
}
],
"symlink_target": ""
} |
from flask.templating import render_template, render_template_string
import logging
from waitlist import permissions
from typing import List, Optional, Dict, Union, Any
from flask_babel import lazy_gettext
from flask_login.utils import current_user
logger = logging.getLogger(__name__)
class OrderedItem(object):
def __init__(self, order=None):
self.order = 9999 if order is None else int(order)
@staticmethod
def sort_key(item) -> int:
return item.order
class MenuItem(OrderedItem):
def __init__(self, title, classes, url, iconclass=None, order=None,
url_for=False, perms=None, customtemplate=None,
use_gettext=True,
need_authenticated: bool=False):
super(MenuItem, self).__init__(order)
if use_gettext:
self.title = lazy_gettext(title)
else:
self.title = title
self.classes = classes
self.url = url
self.iconclass = iconclass
self.template = 'mainmenu/item.html'
self.url_for = url_for
self.perms = [] if perms is None else perms
self.customtemplate = customtemplate
self.need_authenticated = need_authenticated
def render(self):
for perm_name in self.perms:
if not permissions.perm_manager.get_permission(perm_name).can():
return ''
if self.need_authenticated and not current_user.is_authenticated:
return ''
customhtml = None
if self.customtemplate:
customhtml = render_template_string(self.customtemplate, item=self)
return render_template(self.template,
item=self, customhtml=customhtml)
def __repr__(self):
return f'<MenuItem order={self.order} text={self.title}>'
class Menu(OrderedItem):
def __init__(self, identity: str, classes: str='justify-content-start',
order: int=None, perms: List[str]=None,
need_authenticated: bool=False):
super(Menu, self).__init__(order)
self.items = []
self.identity = identity
self.perms = [] if perms is None else perms
self.classes = classes
self.__submenuregistry = dict()
self.__delayed_item_adds = dict()
self.__delayed_menu_adds = dict()
self.template = 'mainmenu/menu.html'
self.need_authenticated = need_authenticated
def add_item(self, item: MenuItem, target_id: str=None):
if target_id is None:
target_id = self.identity
logger.debug('Registering %r under %s', item, target_id)
target = self.__get_menu_by_identity(target_id)
# target menu is not know yet
if target is None:
self.__add_delayed(item, target_id, self.__delayed_item_adds)
return
if target is self:
self.items.append(item)
self.items.sort(key=OrderedItem.sort_key, reverse=False)
else:
target.add_item(item, target_id)
def add_submenu(self, menu, target_id: str=None):
if target_id is None:
target_id = self.identity
logger.debug('Registering %r under %s', menu, target_id)
target = self.__get_menu_by_identity(target_id)
# lets check if we have delayed adds for this menu
self.__handle_delayed_adds(menu)
# if the target is not know (yet?)
# save it for delayed adding
if target is None:
logger.debug('Target is None delaying %r', menu)
self.__add_delayed(menu, target_id, self.__delayed_menu_adds)
self.__submenuregistry[menu.identity] = menu
return
# if it is us add it
if target is self:
logger.debug('Adding as submenu to %r', self),
self.items.append(menu)
self.items.sort(key=OrderedItem.sort_key, reverse=False)
else:
logger.debug('Calling %r for add', target)
target.add_submenu(menu, target_id)
self.__submenuregistry[menu.identity] = menu
def __get_menu_by_identity(self, identity: str):
if self.identity == identity:
return self
if identity in self.__submenuregistry:
return self.__submenuregistry[identity]
logger.debug('Failed to get menu for identity=%s returning None',
identity)
return None
def __add_delayed(self, item: Union[MenuItem, Any],
target_id: str, queue: Dict[str, Any]):
if target_id in queue:
queue[target_id].append(item)
else:
queue[target_id] = [item]
return
def __handle_delayed_adds(self, menu):
# check for menus first
if menu.identity in self.__delayed_menu_adds:
for delayed_menu in self.__delayed_menu_adds[menu.identity]:
menu.add_submenu(delayed_menu)
# now check for item adds
for menu.identity in self.__delayed_item_adds:
for delayed_item in self.__delayed_item_adds[menu.identity]:
menu.add_item(delayed_item)
def render(self):
for perm_name in self.perms:
if not permissions.perm_manager.get_permission(perm_name).can():
return ''
if self.need_authenticated and not current_user.is_authenticated:
return ''
return render_template(self.template,
menu=self)
def __repr__(self):
return f'<Menu identity={self.identity} order={self.order}>'
class Navbar(Menu):
def __init__(self, identity: str, htmlid: str, brand: str=None,
need_authenticated: bool=False):
super(Navbar, self).__init__(identity, need_authenticated)
self.htmlid = htmlid
self.brand = brand
self.template = 'mainmenu/navbar.html'
def __repr__(self):
return (f'<Navbar identity={self.identity} order={self.order} '
f'htmlid={self.htmlid}>')
class DropdownMenu(Menu):
def __init__(self, identity, title: str='', classes: str='',
iconclass: Optional[str]=None, order: Optional[int]=None,
perms: List[str]=None, customtemplate: Optional[str]=None,
nodetag: str='a', dropclasses: str='',
triggerclasses: str='nav-link', use_gettext=True,
need_authenticated: bool=False
):
super(DropdownMenu, self).__init__(identity, classes, order, perms,
need_authenticated)
self.iconclass = iconclass
if use_gettext:
self.title = lazy_gettext(title)
else:
self.title = title
self.classes = classes
self.customtemplate = customtemplate
self.nodetag = nodetag
self.dropclasses = dropclasses
self.triggerclasses = triggerclasses
self.template = 'mainmenu/dropdown.html'
def render(self):
for perm_name in self.perms:
if not permissions.perm_manager.get_permission(perm_name).can():
return
customhtml = None
if self.customtemplate is not None:
customhtml = render_template_string(self.customtemplate, menu=self)
return render_template(self.template,
menu=self, customhtml=customhtml)
class DropdownDivider(MenuItem):
def __init__(self, order=None, perms=None,
need_authenticated: bool=False):
super(DropdownDivider, self).__init__(None, None, None, order=order,
perms=perms, need_authenticated=need_authenticated)
def render(self):
for perm_name in self.perms:
if not permissions.perm_manager.get_permission(perm_name).can():
return ''
if self.need_authenticated and not current_user.is_authenticated:
return ''
return '<div class="dropdown-divider"></div>'
class DropdownItem(MenuItem):
def __init__(self, title, classes, url, iconclass=None, order=None,
url_for=False, perms=None, customtemplate=None,
use_gettext=True,
need_authenticated: bool=False
):
super(DropdownItem, self).__init__(title, classes, url, iconclass,
order, url_for, perms,
customtemplate, use_gettext,
need_authenticated
)
self.template = 'mainmenu/dropdownitem.html'
| {
"content_hash": "5505e85d6741cad42244d377c2b18320",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 97,
"avg_line_length": 35.75819672131148,
"alnum_prop": 0.576160458452722,
"repo_name": "SpeedProg/eve-inc-waitlist",
"id": "ffbfdb44482802c14fb25a3a1f0484fdb177b8b9",
"size": "8725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waitlist/utility/mainmenu/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "96"
},
{
"name": "CSS",
"bytes": "597076"
},
{
"name": "HTML",
"bytes": "153350"
},
{
"name": "JavaScript",
"bytes": "275286"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "676219"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
"""Constant values for pvpc_hourly_pricing."""
DOMAIN = "pvpc_hourly_pricing"
PLATFORMS = ["sensor"]
ATTR_POWER = "power"
ATTR_POWER_P3 = "power_p3"
ATTR_TARIFF = "tariff"
DEFAULT_NAME = "PVPC"
| {
"content_hash": "1a2af67dc29b02ee166c3946b6865c51",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.6958762886597938,
"repo_name": "aronsky/home-assistant",
"id": "ad97124c33089e1cfcc32ba745a3fef67074c3f7",
"size": "194",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/pvpc_hourly_pricing/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""Errors used in the urlfetch API
developers.
"""
class Error(Exception):
"""Base URL fetcher error type."""
class DownloadError(Error):
"""Raised when we could not fetch the URL for any reason.
Note that this exception is only raised when we cannot contact the
server. HTTP errors (e.g., 404) are returned in the status_code field
in the return value of fetch, and no exception is raised.
"""
class MalformedReplyError(DownloadError):
"""Raised when the target server returns an invalid HTTP response.
Responses are invalid if they contain no headers, malformed or
incomplete headers, or have content missing.
"""
class TooManyRedirectsError(DownloadError):
"""Raised when follow_redirects input parameter was set to true and the
redirect limit was hit."""
class InternalTransientError(Error):
"""Raised when an internal transient error occurs."""
class ConnectionClosedError(DownloadError):
"""Raised when the target server prematurely closes the connection."""
class InvalidURLError(Error):
"""Raised when the URL given is empty or invalid.
Only http: and https: URLs are allowed. The maximum URL length
allowed is 2048 characters. The login/pass portion is not
allowed. In deployed applications, only ports 80 and 443 for http
and https respectively are allowed.
"""
class PayloadTooLargeError(InvalidURLError):
"""Raised when the request payload exceeds the limit."""
class DNSLookupFailedError(DownloadError):
"""Raised when the DNS lookup for a URL failed."""
class DeadlineExceededError(DownloadError):
"""Raised when we could not fetch the URL because the deadline was exceeded.
This can occur with either the client-supplied 'deadline' or the system
default, if the client does not supply a 'deadline' parameter.
"""
class ResponseTooLargeError(Error):
"""Raised when the response was too large and was truncated."""
def __init__(self, response):
self.response = response
class InvalidMethodError(Error):
"""Raised when an invalid value for 'method' is provided"""
class SSLCertificateError(Error):
"""Raised when an invalid server certificate is presented."""
| {
"content_hash": "7d1d473ca455ca4e713eb5ec94b8981a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 25.114942528735632,
"alnum_prop": 0.7423340961098398,
"repo_name": "zlsun/XX-Net",
"id": "df4d80a5f991508f9ceacc6438f790ded5d625c3",
"size": "2789",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "code/default/gae_proxy/server/lib/google/appengine/api/urlfetch_errors.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3884"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "86883"
},
{
"name": "HTML",
"bytes": "188948"
},
{
"name": "JavaScript",
"bytes": "6274"
},
{
"name": "Python",
"bytes": "15347559"
},
{
"name": "Shell",
"bytes": "7812"
},
{
"name": "Visual Basic",
"bytes": "1700"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from accounts.models import TodoUser
admin.site.register(TodoUser)
| {
"content_hash": "1e412596e864fb97246af6209369ace8",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 20.4,
"alnum_prop": 0.8333333333333334,
"repo_name": "hirune525/todolist",
"id": "0db25aa1c5323d67f082140e4464bf1cc3705122",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2859"
},
{
"name": "JavaScript",
"bytes": "1585"
},
{
"name": "Python",
"bytes": "19191"
}
],
"symlink_target": ""
} |
response.logo = A('nStock',
_class="navbar-brand", _href=URL('default', 'index'),
_id="web2py-logo")
response.title = request.application.replace('_', ' ').title()
response.subtitle = ''
# ----------------------------------------------------------------------------------------------------------------------
# read more at http://dev.w3.org/html5/markup/meta.name.html
# ----------------------------------------------------------------------------------------------------------------------
response.meta.author = myconf.get('app.author')
response.meta.description = myconf.get('app.description')
response.meta.keywords = myconf.get('app.keywords')
response.meta.generator = myconf.get('app.generator')
# ----------------------------------------------------------------------------------------------------------------------
# your http://google.com/analytics id
# ----------------------------------------------------------------------------------------------------------------------
response.google_analytics_id = None
# ----------------------------------------------------------------------------------------------------------------------
# this is the main application menu add/remove items as required
# ----------------------------------------------------------------------------------------------------------------------
response.menu = []
# prepare dashboard menu
def _():
query = (db.dashboard.id > 0)
query &= (db.dashboard.created_by == auth.user.id)
dash_list = db(query).select(db.dashboard.ALL)
if not dash_list:
# make a new dashboard
name = T('My Dashboard', lazy=False)
d_id = db.dashboard.insert(
name=name, item_list=[])
query = (db.dashboard.id > 0)
query &= (db.dashboard.created_by == auth.user.id)
dash_list = db(query).select(db.dashboard.ALL)
auth.add_permission(0, 'owner', db.dashboard, d_id)
submenu = []
submenu.append(("CAST", False, URL('default', 'index'), []))
for dash in dash_list:
submenu.append(
(dash.name, False, URL('dashboard', 'index', args=[dash.id]), [])
)
response.menu += [
(T("Dashboard's"), False, URL('default', 'index'), submenu)
]
if auth.user:
#_()
pass
DEVELOPMENT_MENU = False
# ----------------------------------------------------------------------------------------------------------------------
# provide shortcuts for development. remove in production
# ----------------------------------------------------------------------------------------------------------------------
def _():
# ------------------------------------------------------------------------------------------------------------------
# shortcuts
# ------------------------------------------------------------------------------------------------------------------
app = request.application
ctr = request.controller
# ------------------------------------------------------------------------------------------------------------------
# useful links to internal and external resources
# ------------------------------------------------------------------------------------------------------------------
response.menu += [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, '#', [
(T('Design'), False, URL('admin', 'default', 'design/%s' % app)),
LI(_class="divider"),
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Config.ini'), False,
URL(
'admin', 'default', 'edit/%s/private/appconfig.ini' % app)),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py-bootstrap3.css' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, '#', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
]),
(T('Documentation'), False, '#', [
(T('Online book'), False, 'http://www.web2py.com/book'),
LI(_class="divider"),
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Helping web2py'), False,
'http://www.web2py.com/book/default/chapter/15'),
(T("Buy web2py's book"), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
]
if DEVELOPMENT_MENU:
_()
if "auth" in locals():
auth.wikimenu()
| {
"content_hash": "2d9ca3389ba26d8ed5b4515fa08ca980",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 120,
"avg_line_length": 45.19298245614035,
"alnum_prop": 0.42274844720496896,
"repo_name": "ybenitezf/nstock",
"id": "b91aa70afdc086155aae1c324c80b2de73829a88",
"size": "8128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105585"
},
{
"name": "HTML",
"bytes": "332414"
},
{
"name": "JavaScript",
"bytes": "160918"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "2835776"
}
],
"symlink_target": ""
} |
"""Run and parse output from FastQC.
http://www.bioinformatics.babraham.ac.uk/projects/fastqc/
"""
import os
import shutil
import pandas as pd
try:
from fadapa import Fadapa
except ImportError:
Fadapa = None
from bcbio import bam, utils
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.log import logger
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
def run(bam_file, data, fastqc_out):
"""Run fastqc, generating report in specified directory and parsing metrics.
Downsamples to 10 million reads to avoid excessive processing times with large
files, unless we're running a Standard/smallRNA-seq/QC pipeline.
Handles fastqc 0.11+, which use a single HTML file and older versions that use
a directory of files + images. The goal is to eventually move to only 0.11+
"""
sentry_file = os.path.join(fastqc_out, "fastqc_report.html")
if not os.path.exists(sentry_file):
work_dir = os.path.dirname(fastqc_out)
utils.safe_makedir(work_dir)
ds_file = (bam.downsample(bam_file, data, 1e7, work_dir=work_dir)
if data.get("analysis", "").lower() not in ["standard", "smallrna-seq"]
else None)
if ds_file is not None:
bam_file = ds_file
frmt = "bam" if bam_file.endswith("bam") else "fastq"
fastqc_name = utils.splitext_plus(os.path.basename(bam_file))[0]
fastqc_clean_name = dd.get_sample_name(data)
# FastQC scales memory with threads (250mb per thread) so we avoid
# very low memory usage
num_cores = max(data["config"]["algorithm"].get("num_cores", 1), 2)
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
cl = [config_utils.get_program("fastqc", data["config"]),
"-d", tx_tmp_dir,
"-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", frmt, bam_file]
cl = "%s %s %s" % (utils.java_freetype_fix(),
utils.local_path_export(), " ".join([str(x) for x in cl]))
do.run(cl, "FastQC: %s" % dd.get_sample_name(data))
tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name)
tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name)
if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file):
utils.safe_makedir(fastqc_out)
# Use sample name for reports instead of bam file name
with open(os.path.join(tx_fastqc_out, "fastqc_data.txt"), 'r') as fastqc_bam_name, \
open(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), 'w') as fastqc_sample_name:
for line in fastqc_bam_name:
fastqc_sample_name.write(line.replace(os.path.basename(bam_file), fastqc_clean_name))
shutil.move(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), os.path.join(fastqc_out, 'fastqc_data.txt'))
shutil.move(tx_combo_file, sentry_file)
if os.path.exists("%s.zip" % tx_fastqc_out):
shutil.move("%s.zip" % tx_fastqc_out, os.path.join(fastqc_out, "%s.zip" % fastqc_clean_name))
elif not os.path.exists(sentry_file):
raise ValueError("FastQC failed to produce output HTML file: %s" % os.listdir(tx_tmp_dir))
logger.info("Produced HTML report %s" % sentry_file)
parser = FastQCParser(fastqc_out, dd.get_sample_name(data))
stats = parser.get_fastqc_summary()
parser.save_sections_into_file()
return stats
class FastQCParser:
def __init__(self, base_dir, sample=None):
self._dir = base_dir
self.sample = sample
def get_fastqc_summary(self):
ignore = set(["Total Sequences", "Filtered Sequences",
"Filename", "File type", "Encoding"])
stats = {}
for stat_line in self._fastqc_data_section("Basic Statistics")[1:]:
k, v = stat_line.split("\t")[:2]
if k not in ignore:
stats[k] = v
return stats
def _fastqc_data_section(self, section_name):
out = []
in_section = False
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file):
with open(data_file) as in_handle:
for line in in_handle:
if line.startswith(">>%s" % section_name):
in_section = True
elif in_section:
if line.startswith(">>END"):
break
out.append(line.rstrip("\r\n"))
return out
def save_sections_into_file(self):
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file) and Fadapa:
parser = Fadapa(data_file)
module = [m[1] for m in parser.summary()][2:9]
for m in module:
out_file = os.path.join(self._dir, m.replace(" ", "_") + ".tsv")
dt = self._get_module(parser, m)
dt.to_csv(out_file, sep="\t", index=False)
def _get_module(self, parser, module):
"""
Get module using fadapa package
"""
dt = []
lines = parser.clean_data(module)
header = lines[0]
for data in lines[1:]:
if data[0].startswith("#"): # some modules have two headers
header = data
continue
if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3
f, s = map(int, data[0].split("-"))
for pos in range(f, s):
dt.append([str(pos)] + data[1:])
else:
dt.append(data)
dt = pd.DataFrame(dt)
dt.columns = [h.replace(" ", "_") for h in header]
dt['sample'] = self.sample
return dt
| {
"content_hash": "351b5ef3f3fd66c8163df466be1741f4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 125,
"avg_line_length": 45,
"alnum_prop": 0.5563786008230452,
"repo_name": "lbeltrame/bcbio-nextgen",
"id": "f4a798c74ae06b4365aa92d7e8b7cffb020945e6",
"size": "6075",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bcbio/qc/fastqc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3620"
},
{
"name": "Lua",
"bytes": "7695"
},
{
"name": "Python",
"bytes": "2557176"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "16730"
}
],
"symlink_target": ""
} |
"""This code example gets all child ad units of the effective root ad unit.
To create ad units, run create_ad_units.py
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: InventoryService.getAdUnitsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201405')
network_service = client.GetService('NetworkService', version='v201405')
root_id = network_service.getCurrentNetwork()['effectiveRootAdUnitId']
# Create a statement to select the children of the effective root ad unit.
values = [{
'key': 'id',
'value': {
'xsi_type': 'TextValue',
'value': root_id
}
}]
query = 'WHERE parentId = :id'
statement = dfp.FilterStatement(query, values)
# Get ad units by statement.
while True:
response = inventory_service.getAdUnitsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for ad_unit in response['results']:
print ('Ad unit with ID \'%s\' and name \'%s\' was found.'
% (ad_unit['id'], ad_unit['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| {
"content_hash": "0f495be610766e4638c10a5baf407e76",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 31.25,
"alnum_prop": 0.6811428571428572,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "241907a191c2406029cfd623dfae0aa3dd54127d",
"size": "2368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201405/inventory_service/get_top_level_ad_units.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
import tests_query_eval
import test_query_files
| {
"content_hash": "171d63c170e0f1f686651523eed8d6d3",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.8333333333333334,
"repo_name": "BRAINSia/tract_querier",
"id": "3956084250fce40eb75ba411bda75e93fa4427c1",
"size": "48",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tract_querier/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1345"
},
{
"name": "Python",
"bytes": "253581"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.contrib import slim
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, label_list, crop_height, crop_width):
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies(
[rank_assertions[0]],
tf.shape(image_list[0]))
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.', image_height, image_width, crop_height, crop_width])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
cropped_images = [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
cropped_labels = [_crop(label, offset_height, offset_width,
crop_height, crop_width) for label in label_list]
return cropped_images, cropped_labels
def _central_crop(image_list, label_list, crop_height, crop_width):
output_images = []
output_labels = []
for image, label in zip(image_list, label_list):
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
output_images.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
output_labels.append(_crop(label, offset_height, offset_width,
crop_height, crop_width))
return output_images, output_labels
def _smallest_size_at_least(height, width, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
def _aspect_preserving_resize(image, label, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image, axis=[0])
resized_image.set_shape([None, None, 3])
label = tf.expand_dims(label, 0)
resized_label = tf.image.resize_nearest_neighbor(label, [new_height, new_width],
align_corners=False)
resized_label = tf.squeeze(resized_label, axis=[0])
resized_label.set_shape([None, None, 1])
return resized_image, resized_label
def flip_gt_boxes(gt_boxes, ih, iw):
x1s, y1s, x2s, y2s, cls = \
gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2], gt_boxes[:, 3], gt_boxes[:, 4]
x1s = tf.to_float(iw) - x1s
x2s = tf.to_float(iw) - x2s
return tf.concat(values=(x2s[:, tf.newaxis],
y1s[:, tf.newaxis],
x1s[:, tf.newaxis],
y2s[:, tf.newaxis],
cls[:, tf.newaxis]), axis=1)
def flip_gt_masks(gt_masks):
return tf.reverse(gt_masks, axis=[2])
def flip_image(image):
return tf.reverse(image, axis=[1])
def resize_gt_boxes(gt_boxes, scale_ratio):
xys, cls = \
gt_boxes[:, 0:4], gt_boxes[:, 4]
xys = xys * scale_ratio
return tf.concat(values=(xys, cls[:, tf.newaxis]), axis=1)
| {
"content_hash": "7bfa1df06f3c77e5e7f939e3bc1d8b04",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 101,
"avg_line_length": 37.067796610169495,
"alnum_prop": 0.6317634506934918,
"repo_name": "CharlesShang/FastMaskRCNN",
"id": "373e573f9ae3ae1860e39743d41c59d8788ab40f",
"size": "6561",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/preprocessings/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10425"
},
{
"name": "C++",
"bytes": "13169"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Makefile",
"bytes": "266"
},
{
"name": "Python",
"bytes": "414151"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
from flask import current_app, escape
import redis
# Connect to redis. This is run only when this file is loaded; as
# long as the pod is alive, the connection is reused.
redisConnection = redis.StrictRedis(host='redis.guestbook', port=6379, db=0)
def main():
messages = redisConnection.lrange('guestbook', 0, -1)
items = [("<li>%s</li>" % escape(m.decode('utf-8'))) for m in messages]
ul = "<ul>%s</ul>" % "\n".join(items)
return """
<html><body style="font-family:sans-serif;font-size:2rem;padding:40px">
<h1>Guestbook</h1>
<form action="/guestbook" method="POST">
<input type="text" name="text">
<button type="submit">Add</button>
</form>
<hr/>
%s
</body></html>
""" % ul
| {
"content_hash": "b05c18c8fe5356fd19694dd6ec40fb27",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 34.47826086956522,
"alnum_prop": 0.5825977301387137,
"repo_name": "life1347/fission",
"id": "a95016b4fe0d687cfce0f36d87f4dabd25c7aefc",
"size": "894",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "demos/guestbook/get.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "74312"
},
{
"name": "Dockerfile",
"bytes": "11423"
},
{
"name": "Go",
"bytes": "1454635"
},
{
"name": "HCL",
"bytes": "1275"
},
{
"name": "Java",
"bytes": "4527"
},
{
"name": "JavaScript",
"bytes": "7718"
},
{
"name": "Makefile",
"bytes": "1556"
},
{
"name": "PHP",
"bytes": "2920"
},
{
"name": "Perl",
"bytes": "852"
},
{
"name": "Python",
"bytes": "4722"
},
{
"name": "Roff",
"bytes": "949"
},
{
"name": "Ruby",
"bytes": "5042"
},
{
"name": "Shell",
"bytes": "193063"
},
{
"name": "Smarty",
"bytes": "3049"
}
],
"symlink_target": ""
} |
"""Podcache core extension."""
from grow import extensions
from grow.cache import podcache
from grow.collections import collection
from grow.extensions import hooks
class PodcacheDevFileChangeHook(hooks.DevFileChangeHook):
"""Handle the dev file change hook."""
# pylint: disable=arguments-differ
def trigger(self, previous_result, pod_path, *_args, **_kwargs):
"""Trigger the file change hook."""
# Remove any raw file in the cache.
self.pod.podcache.file_cache.remove(pod_path)
if pod_path == '/{}'.format(self.pod.FILE_PODSPEC):
self.pod.podcache.reset()
elif (pod_path.endswith(collection.Collection.BLUEPRINT_PATH)
and pod_path.startswith(collection.Collection.CONTENT_PATH)):
doc = self.pod.get_doc(pod_path)
self.pod.podcache.collection_cache.remove_collection(doc.collection)
elif pod_path == '/{}'.format(podcache.FILE_OBJECT_CACHE):
self.pod.podcache.update(obj_cache=self.pod._parse_object_cache_file())
if self.pod.podcache.is_dirty:
self.pod.logger.info('Object cache changed, updating with new data.')
self.pod.podcache.write()
if previous_result:
return previous_result
return None
# pylint: disable=abstract-method
class PodcacheExtension(extensions.BaseExtension):
"""Extension for handling core podcache functionality."""
@property
def available_hooks(self):
"""Returns the available hook classes."""
return [PodcacheDevFileChangeHook]
| {
"content_hash": "35c6a5add663a55309b1ce9772d8f6bb",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 36.883720930232556,
"alnum_prop": 0.667717528373266,
"repo_name": "grow/pygrow",
"id": "78c337c14fa9df65631d58250504dd0099347a17",
"size": "1586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/extensions/core/podcache_extension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "8714"
},
{
"name": "Python",
"bytes": "309004"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
} |
"""Wildcard parsing."""
from __future__ import annotations
import re
import functools
import bracex
import os
from . import util
from . import posix
from . _wcmatch import WcRegexp
from typing import AnyStr, Iterable, Pattern, Generic, Optional, Sequence, overload, cast
UNICODE_RANGE = '\u0000-\U0010ffff'
ASCII_RANGE = '\x00-\xff'
PATTERN_LIMIT = 1000
RE_WIN_DRIVE_START = re.compile(r'((?:\\\\|/){2}((?:\\[^\\/]|[^\\/])+)|([\\]?[a-z][\\]?:))((?:\\\\|/)|$)', re.I)
RE_WIN_DRIVE_LETTER = re.compile(r'([a-z]:)((?:\\|/)|$)', re.I)
RE_WIN_DRIVE_PART = re.compile(r'((?:\\[^\\/]|[^\\/])+)((?:\\\\|/)|$)', re.I)
RE_WIN_DRIVE_UNESCAPE = re.compile(r'\\(.)', re.I)
RE_WIN_DRIVE = (
re.compile(
r'''(?x)
(
(?:\\\\|/){2}[?.](?:\\\\|/)(?:
[a-z]:|
unc(?:(?:\\\\|/)[^\\/]+){2} |
(?:global(?:\\\\|/))+(?:[a-z]:|unc(?:(?:\\\\|/)[^\\/]+){2}|[^\\/]+)
) |
(?:\\\\|/){2}[^\\/]+(?:\\\\|/)[^\\/]+|
[a-z]:
)((?:\\\\|/){1}|$)
''',
re.I
),
re.compile(
br'''(?x)
(
(?:\\\\|/){2}[?.](?:\\\\|/)(?:
[a-z]:|
unc(?:(?:\\\\|/)[^\\/]+){2} |
(?:global(?:\\\\|/))+(?:[a-z]:|unc(?:(?:\\\\|/)[^\\/]+){2}|[^\\/]+)
) |
(?:\\\\|/){2}[^\\/]+(?:\\\\|/)[^\\/]+|
[a-z]:
)((?:\\\\|/){1}|$)
''',
re.I
)
)
RE_MAGIC_ESCAPE = (
re.compile(r'([-!~*?()\[\]|{}]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))'),
re.compile(br'([-!~*?()\[\]|{}]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))')
)
MAGIC_DEF = (
frozenset("*?[]\\"),
frozenset(b"*?[]\\")
)
MAGIC_SPLIT = (
frozenset("|"),
frozenset(b"|")
)
MAGIC_NEGATE = (
frozenset('!'),
frozenset(b'!')
)
MAGIC_MINUS_NEGATE = (
frozenset('-'),
frozenset(b'-')
)
MAGIC_TILDE = (
frozenset('~'),
frozenset(b'~')
)
MAGIC_EXTMATCH = (
frozenset('()'),
frozenset(b'()')
)
MAGIC_BRACE = (
frozenset("{}"),
frozenset(b"{}")
)
RE_MAGIC = (
re.compile(r'([-!~*?(\[|{\\])'),
re.compile(br'([-!~*?(\[|{\\])')
)
RE_WIN_DRIVE_MAGIC = (
re.compile(r'([{}|]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))'),
re.compile(br'([{}|]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))')
)
RE_NO_DIR = (
re.compile(r'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$'),
re.compile(br'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$')
)
RE_WIN_NO_DIR = (
re.compile(r'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$'),
re.compile(br'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$')
)
RE_TILDE = (
re.compile(r'~[^/]*(?=/|$)'),
re.compile(br'~[^/]*(?=/|$)')
)
RE_WIN_TILDE = (
re.compile(r'~(?:\\(?![\\/])|[^\\/])*(?=\\\\|/|$)'),
re.compile(br'~(?:\\(?![\\/])|[^\\/])*(?=\\\\|/|$)')
)
TILDE_SYM = (
'~',
b'~'
)
RE_ANCHOR = re.compile(r'^/+')
RE_WIN_ANCHOR = re.compile(r'^(?:\\\\|/)+')
RE_POSIX = re.compile(r':(alnum|alpha|ascii|blank|cntrl|digit|graph|lower|print|punct|space|upper|word|xdigit):\]')
SET_OPERATORS = frozenset(('&', '~', '|'))
NEGATIVE_SYM = frozenset((b'!', '!'))
MINUS_NEGATIVE_SYM = frozenset((b'-', '-'))
ROUND_BRACKET = frozenset((b'(', '('))
EXT_TYPES = frozenset(('*', '?', '+', '@', '!'))
# Common flags are found between `0x0001 - 0xffffff`
# Implementation specific (`glob` vs `fnmatch` vs `wcmatch`) are found between `0x01000000 - 0xff000000`
# Internal special flags are found at `0x100000000` and above
CASE = 0x0001
IGNORECASE = 0x0002
RAWCHARS = 0x0004
NEGATE = 0x0008
MINUSNEGATE = 0x0010
PATHNAME = 0x0020
DOTMATCH = 0x0040
EXTMATCH = 0x0080
GLOBSTAR = 0x0100
BRACE = 0x0200
REALPATH = 0x0400
FOLLOW = 0x0800
SPLIT = 0x1000
MATCHBASE = 0x2000
NODIR = 0x4000
NEGATEALL = 0x8000
FORCEWIN = 0x10000
FORCEUNIX = 0x20000
GLOBTILDE = 0x40000
NOUNIQUE = 0x80000
NODOTDIR = 0x100000
# Internal flag
_TRANSLATE = 0x100000000 # Lets us know we are performing a translation, and we just want the regex.
_ANCHOR = 0x200000000 # The pattern, if it starts with a slash, is anchored to the working directory; strip the slash.
_EXTMATCHBASE = 0x400000000 # Like `MATCHBASE`, but works for multiple directory levels.
_NOABSOLUTE = 0x800000000 # Do not allow absolute patterns
_RTL = 0x1000000000 # Match from right to left
_NO_GLOBSTAR_CAPTURE = 0x2000000000 # Disallow `GLOBSTAR` capturing groups.
FLAG_MASK = (
CASE |
IGNORECASE |
RAWCHARS |
NEGATE |
MINUSNEGATE |
PATHNAME |
DOTMATCH |
EXTMATCH |
GLOBSTAR |
BRACE |
REALPATH |
FOLLOW |
MATCHBASE |
NODIR |
NEGATEALL |
FORCEWIN |
FORCEUNIX |
GLOBTILDE |
SPLIT |
NOUNIQUE |
NODOTDIR |
_TRANSLATE |
_ANCHOR |
_EXTMATCHBASE |
_RTL |
_NOABSOLUTE |
_NO_GLOBSTAR_CAPTURE
)
CASE_FLAGS = IGNORECASE | CASE
# Pieces to construct search path
# Question Mark
_QMARK = r'.'
# Star
_STAR = r'.*?'
# For paths, allow trailing /
_PATH_TRAIL = r'{}*?'
# Disallow . and .. (usually applied right after path separator when needed)
_NO_DIR = r'(?!(?:\.{{1,2}})(?:$|[{sep}]))'
# Star for `PATHNAME`
_PATH_STAR = r'[^{sep}]*?'
# Star when at start of filename during `DOTMATCH`
# (allow dot, but don't allow directory match /./ or /../)
_PATH_STAR_DOTMATCH = _NO_DIR + _PATH_STAR
# Star for `PATHNAME` when `DOTMATCH` is disabled and start is at start of file.
# Disallow . and .. and don't allow match to start with a dot.
_PATH_STAR_NO_DOTMATCH = _NO_DIR + r'(?:(?!\.){})?'.format(_PATH_STAR)
# `GLOBSTAR` during `DOTMATCH`. Avoid directory match /./ or /../
_PATH_GSTAR_DOTMATCH = r'(?:(?!(?:[{sep}]|^)(?:\.{{1,2}})($|[{sep}])).)*?'
# `GLOBSTAR` with `DOTMATCH` disabled. Don't allow a dot to follow /
_PATH_GSTAR_NO_DOTMATCH = r'(?:(?!(?:[{sep}]|^)\.).)*?'
# Special right to left matching
_PATH_GSTAR_RTL_MATCH = r'.*?'
# Next char cannot be a dot
_NO_DOT = r'(?![.])'
# Following char from sequence cannot be a separator or a dot
_PATH_NO_SLASH_DOT = r'(?![{sep}.])'
# Following char from sequence cannot be a separator
_PATH_NO_SLASH = r'(?![{sep}])'
# One or more
_ONE_OR_MORE = r'+'
# End of pattern
_EOP = r'$'
_PATH_EOP = r'(?:$|[{sep}])'
# Divider between `globstar`. Can match start or end of pattern
# in addition to slashes.
_GLOBSTAR_DIV = r'(?:^|$|{})+'
# Lookahead to see there is one character.
_NEED_CHAR_PATH = r'(?=[^{sep}])'
_NEED_CHAR = r'(?=.)'
_NEED_SEP = r'(?={})'
# Group that matches one or none
_QMARK_GROUP = r'(?:{})?'
_QMARK_CAPTURE_GROUP = r'((?#)(?:{})?)'
# Group that matches Zero or more
_STAR_GROUP = r'(?:{})*'
_STAR_CAPTURE_GROUP = r'((?#)(?:{})*)'
# Group that matches one or more
_PLUS_GROUP = r'(?:{})+'
_PLUS_CAPTURE_GROUP = r'((?#)(?:{})+)'
# Group that matches exactly one
_GROUP = r'(?:{})'
_CAPTURE_GROUP = r'((?#){})'
# Inverse group that matches none
# This is the start. Since Python can't
# do variable look behinds, we have stuff
# everything at the end that it needs to lookahead
# for. So there is an opening and a closing.
_EXCLA_GROUP = r'(?:(?!(?:{})'
_EXCLA_CAPTURE_GROUP = r'((?#)(?!(?:{})'
# Closing for inverse group
_EXCLA_GROUP_CLOSE = r'){})'
# Restrict root
_NO_ROOT = r'(?!/)'
_NO_WIN_ROOT = r'(?!(?:[\\/]|[a-zA-Z]:))'
# Restrict directories
_NO_NIX_DIR = (
r'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$',
rb'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$'
)
_NO_WIN_DIR = (
r'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$',
rb'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$'
)
class InvPlaceholder(str):
"""Placeholder for inverse pattern !(...)."""
class PathNameException(Exception):
"""Path name exception."""
class DotException(Exception):
"""Dot exception."""
class PatternLimitException(Exception):
"""Pattern limit exception."""
@overload
def iter_patterns(patterns: str | Sequence[str]) -> Iterable[str]:
...
@overload
def iter_patterns(patterns: bytes | Sequence[bytes]) -> Iterable[bytes]:
...
def iter_patterns(patterns: AnyStr | Sequence[AnyStr]) -> Iterable[AnyStr]:
"""Return a simple string sequence."""
if isinstance(patterns, (str, bytes)):
yield patterns
else:
yield from patterns
def escape(pattern: AnyStr, unix: Optional[bool] = None, pathname: bool = True, raw: bool = False) -> AnyStr:
"""
Escape.
`unix`: use Unix style path logic.
`pathname`: Use path logic.
`raw`: Handle raw strings (deprecated)
"""
if isinstance(pattern, bytes):
drive_pat = cast(Pattern[AnyStr], RE_WIN_DRIVE[util.BYTES])
magic = cast(Pattern[AnyStr], RE_MAGIC_ESCAPE[util.BYTES])
drive_magic = cast(Pattern[AnyStr], RE_WIN_DRIVE_MAGIC[util.BYTES])
replace = br'\\\1'
slash = b'\\'
double_slash = b'\\\\'
drive = b''
else:
drive_pat = cast(Pattern[AnyStr], RE_WIN_DRIVE[util.UNICODE])
magic = cast(Pattern[AnyStr], RE_MAGIC_ESCAPE[util.UNICODE])
drive_magic = cast(Pattern[AnyStr], RE_WIN_DRIVE_MAGIC[util.UNICODE])
replace = r'\\\1'
slash = '\\'
double_slash = '\\\\'
drive = ''
if not raw:
pattern = pattern.replace(slash, double_slash)
# Handle windows drives special.
# Windows drives are handled special internally.
# So we shouldn't escape them as we'll just have to
# detect and undo it later.
length = 0
if pathname and ((unix is None and util.platform() == "windows") or unix is False):
m = drive_pat.match(pattern)
if m:
# Replace splitting magic chars
drive = m.group(0)
length = len(drive)
drive = drive_magic.sub(replace, m.group(0))
pattern = pattern[length:]
return drive + magic.sub(replace, pattern)
def _get_win_drive(
pattern: str,
regex: bool = False,
case_sensitive: bool = False
) -> tuple[bool, Optional[str], bool, int]:
"""Get Windows drive."""
drive = None
slash = False
end = 0
root_specified = False
m = RE_WIN_DRIVE_START.match(pattern)
if m:
end = m.end(0)
if m.group(3) and RE_WIN_DRIVE_LETTER.match(m.group(0)):
if regex:
drive = escape_drive(RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(3)).replace('/', '\\'), case_sensitive)
else:
drive = RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(0)).replace('/', '\\')
slash = bool(m.group(4))
root_specified = True
elif m.group(2):
root_specified = True
part = [RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(2))]
is_special = part[-1].lower() in ('.', '?')
complete = 1
first = 1
count = 0
for count, m in enumerate(RE_WIN_DRIVE_PART.finditer(pattern, m.end(0)), 1):
end = m.end(0)
part.append(RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(1)))
slash = bool(m.group(2))
if is_special:
if count == first and part[-1].lower() == 'unc':
complete += 2
elif count == first and part[-1].lower() == 'global':
first += 1
complete += 1
if count == complete:
break
if count == complete:
if not regex:
drive = '\\\\{}{}'.format('\\'.join(part), '\\' if slash else '')
else:
drive = r'[\\/]{2}' + r'[\\/]'.join([escape_drive(p, case_sensitive) for p in part])
elif pattern.startswith(('\\\\', '/')):
root_specified = True
return root_specified, drive, slash, end
def _get_magic_symbols(pattern: AnyStr, unix: bool, flags: int) -> tuple[set[AnyStr], set[AnyStr]]:
"""Get magic symbols."""
if isinstance(pattern, bytes):
ptype = util.BYTES
slash = b'\\' # type: AnyStr
else:
ptype = util.UNICODE
slash = '\\'
magic = set() # type: set[AnyStr]
if unix:
magic_drive = set() # type: set[AnyStr]
else:
magic_drive = set([slash])
magic |= cast('set[AnyStr]', MAGIC_DEF[ptype])
if flags & BRACE:
magic |= cast('set[AnyStr]', MAGIC_BRACE[ptype])
magic_drive |= cast('set[AnyStr]', MAGIC_BRACE[ptype])
if flags & SPLIT:
magic |= cast('set[AnyStr]', MAGIC_SPLIT[ptype])
magic_drive |= cast('set[AnyStr]', MAGIC_SPLIT[ptype])
if flags & GLOBTILDE:
magic |= cast('set[AnyStr]', MAGIC_TILDE[ptype])
if flags & EXTMATCH:
magic |= cast('set[AnyStr]', MAGIC_EXTMATCH[ptype])
if flags & NEGATE:
if flags & MINUSNEGATE:
magic |= cast('set[AnyStr]', MAGIC_MINUS_NEGATE[ptype])
else:
magic |= cast('set[AnyStr]', MAGIC_NEGATE[ptype])
return magic, magic_drive
def is_magic(pattern: AnyStr, flags: int = 0) -> bool:
"""Check if pattern is magic."""
magical = False
unix = is_unix_style(flags)
if isinstance(pattern, bytes):
ptype = util.BYTES
else:
ptype = util.UNICODE
drive_pat = cast(Pattern[AnyStr], RE_WIN_DRIVE[ptype])
magic, magic_drive = _get_magic_symbols(pattern, unix, flags)
is_path = flags & PATHNAME
length = 0
if is_path and ((unix is None and util.platform() == "windows") or unix is False):
m = drive_pat.match(pattern)
if m:
drive = m.group(0)
length = len(drive)
for c in magic_drive:
if c in drive:
magical = True
break
if not magical:
pattern = pattern[length:]
for c in magic:
if c in pattern:
magical = True
break
return magical
def is_negative(pattern: AnyStr, flags: int) -> bool:
"""Check if negative pattern."""
if flags & MINUSNEGATE:
return bool(flags & NEGATE and pattern[0:1] in MINUS_NEGATIVE_SYM)
elif flags & EXTMATCH:
return bool(flags & NEGATE and pattern[0:1] in NEGATIVE_SYM and pattern[1:2] not in ROUND_BRACKET)
else:
return bool(flags & NEGATE and pattern[0:1] in NEGATIVE_SYM)
def tilde_pos(pattern: AnyStr, flags: int) -> int:
"""Is user folder."""
pos = -1
if flags & GLOBTILDE and flags & REALPATH:
if flags & NEGATE:
if pattern[0:1] in TILDE_SYM:
pos = 0
elif pattern[0:1] in NEGATIVE_SYM and pattern[1:2] in TILDE_SYM:
pos = 1
elif pattern[0:1] in TILDE_SYM:
pos = 0
return pos
def expand_braces(patterns: AnyStr, flags: int, limit: int) -> Iterable[AnyStr]:
"""Expand braces."""
if flags & BRACE:
for p in ([patterns] if isinstance(patterns, (str, bytes)) else patterns):
try:
# Turn off limit as we are handling it ourselves.
yield from bracex.iexpand(p, keep_escapes=True, limit=limit)
except bracex.ExpansionLimitException:
raise
except Exception: # pragma: no cover
# We will probably never hit this as `bracex`
# doesn't throw any specific exceptions and
# should normally always parse, but just in case.
yield p
else:
for p in ([patterns] if isinstance(patterns, (str, bytes)) else patterns):
yield p
def expand_tilde(pattern: AnyStr, is_unix: bool, flags: int) -> AnyStr:
"""Expand tilde."""
pos = tilde_pos(pattern, flags)
if pos > -1:
string_type = util.BYTES if isinstance(pattern, bytes) else util.UNICODE
tilde = cast(AnyStr, TILDE_SYM[string_type])
re_tilde = cast(Pattern[AnyStr], RE_WIN_TILDE[string_type] if not is_unix else RE_TILDE[string_type])
m = re_tilde.match(pattern, pos)
if m:
expanded = os.path.expanduser(m.group(0))
if not expanded.startswith(tilde) and os.path.exists(expanded):
pattern = (pattern[0:1] if pos else pattern[0:0]) + escape(expanded, is_unix) + pattern[m.end(0):]
return pattern
def expand(pattern: AnyStr, flags: int, limit: int) -> Iterable[AnyStr]:
"""Expand and normalize."""
for expanded in expand_braces(pattern, flags, limit):
for splitted in split(expanded, flags):
yield expand_tilde(splitted, is_unix_style(flags), flags)
def is_case_sensitive(flags: int) -> bool:
"""Is case sensitive."""
if bool(flags & FORCEWIN):
case_sensitive = False
elif bool(flags & FORCEUNIX):
case_sensitive = True
else:
case_sensitive = util.is_case_sensitive()
return case_sensitive
def get_case(flags: int) -> bool:
"""Parse flags for case sensitivity settings."""
if not bool(flags & CASE_FLAGS):
case_sensitive = is_case_sensitive(flags)
elif flags & CASE:
case_sensitive = True
else:
case_sensitive = False
return case_sensitive
def escape_drive(drive: str, case: bool) -> str:
"""Escape drive."""
return '(?i:{})'.format(re.escape(drive)) if case else re.escape(drive)
def is_unix_style(flags: int) -> bool:
"""Check if we should use Unix style."""
return (
(
(util.platform() != "windows") or
(not bool(flags & REALPATH) and bool(flags & FORCEUNIX))
) and
not flags & FORCEWIN
)
def no_negate_flags(flags: int) -> int:
"""No negation."""
if flags & NEGATE:
flags ^= NEGATE
if flags & NEGATEALL:
flags ^= NEGATEALL
return flags
@overload
def translate(
patterns: str | Sequence[str],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[str | Sequence[str]] = None
) -> tuple[list[str], list[str]]:
...
@overload
def translate(
patterns: bytes | Sequence[bytes],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[bytes | Sequence[bytes]] = None
) -> tuple[list[bytes], list[bytes]]:
...
def translate(
patterns: AnyStr | Sequence[AnyStr],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[AnyStr | Sequence[AnyStr]] = None
) -> tuple[list[AnyStr], list[AnyStr]]:
"""Translate patterns."""
positive = [] # type: list[AnyStr]
negative = [] # type: list[AnyStr]
if exclude is not None:
flags = no_negate_flags(flags)
negative = translate(exclude, flags=flags | DOTMATCH | _NO_GLOBSTAR_CAPTURE, limit=limit)[0]
limit -= len(negative)
flags = (flags | _TRANSLATE) & FLAG_MASK
is_unix = is_unix_style(flags)
seen = set()
try:
current_limit = limit
total = 0
for pattern in iter_patterns(patterns):
pattern = util.norm_pattern(pattern, not is_unix, bool(flags & RAWCHARS))
count = 0
for count, expanded in enumerate(expand(pattern, flags, current_limit), 1):
total += 1
if 0 < limit < total:
raise PatternLimitException("Pattern limit exceeded the limit of {:d}".format(limit))
if expanded not in seen:
seen.add(expanded)
if is_negative(expanded, flags):
negative.append(WcParse(expanded[1:], flags | _NO_GLOBSTAR_CAPTURE | DOTMATCH).parse())
else:
positive.append(WcParse(expanded, flags).parse())
if limit:
current_limit -= count
if current_limit < 1:
current_limit = 1
except bracex.ExpansionLimitException:
raise PatternLimitException("Pattern limit exceeded the limit of {:d}".format(limit))
if negative and not positive:
if flags & NEGATEALL:
default = b'**' if isinstance(negative[0], bytes) else '**'
positive.append(
WcParse(default, flags | (GLOBSTAR if flags & PATHNAME else 0)).parse()
)
if positive and flags & NODIR:
index = util.BYTES if isinstance(positive[0], bytes) else util.UNICODE
negative.append(cast(AnyStr, _NO_NIX_DIR[index] if is_unix else _NO_WIN_DIR[index]))
return positive, negative
def split(pattern: AnyStr, flags: int) -> Iterable[AnyStr]:
"""Split patterns."""
if flags & SPLIT:
yield from WcSplit(pattern, flags).split()
else:
yield pattern
@overload
def compile_pattern(
patterns: str | Sequence[str],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[str | Sequence[str]] = None
) -> tuple[list[Pattern[str]], list[Pattern[str]]]:
...
@overload
def compile_pattern(
patterns: bytes | Sequence[bytes],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[bytes | Sequence[bytes]] = None
) -> tuple[list[Pattern[bytes]], list[Pattern[bytes]]]:
...
def compile_pattern(
patterns: AnyStr | Sequence[AnyStr],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[AnyStr | Sequence[AnyStr]] = None
) -> tuple[list[Pattern[AnyStr]], list[Pattern[AnyStr]]]:
"""Compile the patterns."""
positive = [] # type: list[Pattern[AnyStr]]
negative = [] # type: list[Pattern[AnyStr]]
if exclude is not None:
flags = no_negate_flags(flags)
negative = compile_pattern(exclude, flags=flags | DOTMATCH | _NO_GLOBSTAR_CAPTURE, limit=limit)[0]
limit -= len(negative)
is_unix = is_unix_style(flags)
seen = set()
try:
current_limit = limit
total = 0
for pattern in iter_patterns(patterns):
pattern = util.norm_pattern(pattern, not is_unix, bool(flags & RAWCHARS))
count = 0
for count, expanded in enumerate(expand(pattern, flags, current_limit), 1):
total += 1
if 0 < limit < total:
raise PatternLimitException("Pattern limit exceeded the limit of {:d}".format(limit))
if expanded not in seen:
seen.add(expanded)
if is_negative(expanded, flags):
negative.append(_compile(expanded[1:], flags | _NO_GLOBSTAR_CAPTURE | DOTMATCH))
else:
positive.append(_compile(expanded, flags))
if limit:
current_limit -= count
if current_limit < 1:
current_limit = 1
except bracex.ExpansionLimitException:
raise PatternLimitException("Pattern limit exceeded the limit of {:d}".format(limit))
if negative and not positive:
if flags & NEGATEALL:
default = b'**' if isinstance(negative[0].pattern, bytes) else '**'
positive.append(_compile(default, flags | (GLOBSTAR if flags & PATHNAME else 0)))
if positive and flags & NODIR:
ptype = util.BYTES if isinstance(positive[0].pattern, bytes) else util.UNICODE
negative.append(cast(Pattern[AnyStr], RE_NO_DIR[ptype] if is_unix else RE_WIN_NO_DIR[ptype]))
return positive, negative
@overload
def compile( # noqa: A001
patterns: str | Sequence[str],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[str | Sequence[str]] = None
) -> WcRegexp[str]:
...
@overload
def compile( # noqa: A001
patterns: bytes | Sequence[bytes],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[bytes | Sequence[bytes]] = None
) -> WcRegexp[bytes]:
...
def compile( # noqa: A001
patterns: AnyStr | Sequence[AnyStr],
flags: int,
limit: int = PATTERN_LIMIT,
exclude: Optional[AnyStr | Sequence[AnyStr]] = None
) -> WcRegexp[AnyStr]:
"""Compile patterns."""
positive, negative = compile_pattern(patterns, flags, limit, exclude)
return WcRegexp(
tuple(positive), tuple(negative),
bool(flags & REALPATH), bool(flags & PATHNAME), bool(flags & FOLLOW)
)
@functools.lru_cache(maxsize=256, typed=True)
def _compile(pattern: AnyStr, flags: int) -> Pattern[AnyStr]:
"""Compile the pattern to regex."""
return re.compile(WcParse(pattern, flags & FLAG_MASK).parse())
class WcSplit(Generic[AnyStr]):
"""Class that splits patterns on |."""
def __init__(self, pattern: AnyStr, flags: int) -> None:
"""Initialize."""
self.pattern = pattern # type: AnyStr
self.pathname = bool(flags & PATHNAME)
self.extend = bool(flags & EXTMATCH)
self.unix = is_unix_style(flags)
self.bslash_abort = not self.unix
def _sequence(self, i: util.StringIter) -> None:
"""Handle character group."""
c = next(i)
if c == '!':
c = next(i)
if c in ('^', '-', '['):
c = next(i)
while c != ']':
if c == '\\':
# Handle escapes
try:
self._references(i, True)
except PathNameException:
raise StopIteration
elif c == '/':
if self.pathname:
raise StopIteration
c = next(i)
def _references(self, i: util.StringIter, sequence: bool = False) -> None:
"""Handle references."""
c = next(i)
if c == '\\':
# \\
if sequence and self.bslash_abort:
raise PathNameException
elif c == '/':
# \/
if sequence and self.pathname:
raise PathNameException
else:
# \a, \b, \c, etc.
pass
def parse_extend(self, c: str, i: util.StringIter) -> bool:
"""Parse extended pattern lists."""
# Start list parsing
success = True
index = i.index
list_type = c
try:
c = next(i)
if c != '(':
raise StopIteration
while c != ')':
c = next(i)
if self.extend and c in EXT_TYPES and self.parse_extend(c, i):
continue
if c == '\\':
try:
self._references(i)
except StopIteration:
pass
elif c == '[':
index = i.index
try:
self._sequence(i)
except StopIteration:
i.rewind(i.index - index)
except StopIteration:
success = False
c = list_type
i.rewind(i.index - index)
return success
def _split(self, pattern: str) -> Iterable[str]:
"""Split the pattern."""
start = -1
i = util.StringIter(pattern)
for c in i:
if self.extend and c in EXT_TYPES and self.parse_extend(c, i):
continue
if c == '|':
split = i.index - 1
p = pattern[start + 1:split]
yield p
start = split
elif c == '\\':
index = i.index
try:
self._references(i)
except StopIteration:
i.rewind(i.index - index)
elif c == '[':
index = i.index
try:
self._sequence(i)
except StopIteration:
i.rewind(i.index - index)
if start < len(pattern):
yield pattern[start + 1:]
def split(self) -> Iterable[AnyStr]:
"""Split the pattern."""
if isinstance(self.pattern, bytes):
for p in self._split(self.pattern.decode('latin-1')):
yield p.encode('latin-1')
else:
yield from self._split(self.pattern)
class WcParse(Generic[AnyStr]):
"""Parse the wildcard pattern."""
def __init__(self, pattern: AnyStr, flags: int = 0) -> None:
"""Initialize."""
self.pattern = pattern # type: AnyStr
self.no_abs = bool(flags & _NOABSOLUTE)
self.braces = bool(flags & BRACE)
self.is_bytes = isinstance(pattern, bytes)
self.pathname = bool(flags & PATHNAME)
self.raw_chars = bool(flags & RAWCHARS)
self.globstar = self.pathname and bool(flags & GLOBSTAR)
self.realpath = bool(flags & REALPATH) and self.pathname
self.translate = bool(flags & _TRANSLATE)
self.negate = bool(flags & NEGATE)
self.globstar_capture = self.realpath and not self.translate and not bool(flags & _NO_GLOBSTAR_CAPTURE)
self.dot = bool(flags & DOTMATCH)
self.extend = bool(flags & EXTMATCH)
self.matchbase = bool(flags & MATCHBASE)
self.extmatchbase = bool(flags & _EXTMATCHBASE)
self.rtl = bool(flags & _RTL)
self.anchor = bool(flags & _ANCHOR)
self.nodotdir = bool(flags & NODOTDIR)
self.capture = self.translate
self.case_sensitive = get_case(flags)
self.in_list = False
self.inv_nest = False
self.flags = flags
self.inv_ext = 0
self.unix = is_unix_style(self.flags)
if not self.unix:
self.win_drive_detect = self.pathname
self.char_avoid = (ord('\\'), ord('/'), ord('.')) # type: tuple[int, ...]
self.bslash_abort = self.pathname
sep = {"sep": re.escape('\\/')}
else:
self.win_drive_detect = False
self.char_avoid = (ord('/'), ord('.'))
self.bslash_abort = False
sep = {"sep": re.escape('/')}
self.bare_sep = sep['sep']
self.sep = '[{}]'.format(self.bare_sep)
self.path_eop = _PATH_EOP.format(**sep)
self.no_dir = _NO_DIR.format(**sep)
self.seq_path = _PATH_NO_SLASH.format(**sep)
self.seq_path_dot = _PATH_NO_SLASH_DOT.format(**sep)
self.path_star = _PATH_STAR.format(**sep)
self.path_star_dot1 = _PATH_STAR_DOTMATCH.format(**sep)
self.path_star_dot2 = _PATH_STAR_NO_DOTMATCH.format(**sep)
self.path_gstar_dot1 = _PATH_GSTAR_DOTMATCH.format(**sep)
self.path_gstar_dot2 = _PATH_GSTAR_NO_DOTMATCH.format(**sep)
if self.pathname:
self.need_char = _NEED_CHAR_PATH.format(**sep)
else:
self.need_char = _NEED_CHAR
def set_after_start(self) -> None:
"""Set tracker for character after the start of a directory."""
self.after_start = True
self.dir_start = False
def set_start_dir(self) -> None:
"""Set directory start."""
self.dir_start = True
self.after_start = False
def reset_dir_track(self) -> None:
"""Reset directory tracker."""
self.dir_start = False
self.after_start = False
def update_dir_state(self) -> None:
"""
Update the directory state.
If we are at the directory start,
update to after start state (the character right after).
If at after start, reset state.
"""
if self.dir_start and not self.after_start:
self.set_after_start()
elif not self.dir_start and self.after_start:
self.reset_dir_track()
def _restrict_extended_slash(self) -> str:
"""Restrict extended slash."""
return self.seq_path if self.pathname else ''
def _restrict_sequence(self) -> str:
"""Restrict sequence."""
if self.pathname:
value = self.seq_path_dot if self.after_start and not self.dot else self.seq_path
if self.after_start:
value = self.no_dir + value
else:
value = _NO_DOT if self.after_start and not self.dot else ""
self.reset_dir_track()
return value
def _sequence_range_check(self, result: list[str], last: str) -> bool:
"""
If range backwards, remove it.
A bad range will cause the regular expression to fail,
so we need to remove it, but return that we removed it
so the caller can know the sequence wasn't empty.
Caller will have to craft a sequence that makes sense
if empty at the end with either an impossible sequence
for inclusive sequences or a sequence that matches
everything for an exclusive sequence.
"""
removed = False
first = result[-2]
v1 = ord(first[1:2] if len(first) > 1 else first)
v2 = ord(last[1:2] if len(last) > 1 else last)
if v2 < v1:
result.pop()
result.pop()
removed = True
else:
result.append(last)
return removed
def _handle_posix(self, i: util.StringIter, result: list[str], end_range: int) -> bool:
"""Handle posix classes."""
last_posix = False
m = i.match(RE_POSIX)
if m:
last_posix = True
# Cannot do range with posix class
# so escape last `-` if we think this
# is the end of a range.
if end_range and i.index - 1 >= end_range:
result[-1] = '\\' + result[-1]
result.append(posix.get_posix_property(m.group(1), self.is_bytes))
return last_posix
def _sequence(self, i: util.StringIter) -> str:
"""Handle character group."""
result = ['[']
end_range = 0
escape_hyphen = -1
removed = False
last_posix = False
c = next(i)
if c in ('!', '^'):
# Handle negate char
result.append('^')
c = next(i)
if c == '[':
last_posix = self._handle_posix(i, result, 0)
if not last_posix:
result.append(re.escape(c))
c = next(i)
elif c in ('-', ']'):
result.append(re.escape(c))
c = next(i)
while c != ']':
if c == '-':
if last_posix:
result.append('\\' + c)
last_posix = False
elif i.index - 1 > escape_hyphen:
# Found a range delimiter.
# Mark the next two characters as needing to be escaped if hyphens.
# The next character would be the end char range (s-e),
# and the one after that would be the potential start char range
# of a new range (s-es-e), so neither can be legitimate range delimiters.
result.append(c)
escape_hyphen = i.index + 1
end_range = i.index
elif end_range and i.index - 1 >= end_range:
if self._sequence_range_check(result, '\\' + c):
removed = True
end_range = 0
else:
result.append('\\' + c)
c = next(i)
continue
last_posix = False
if c == '[':
last_posix = self._handle_posix(i, result, end_range)
if last_posix:
c = next(i)
continue
if c == '\\':
# Handle escapes
try:
value = self._references(i, True)
except DotException:
value = re.escape(next(i))
except PathNameException:
raise StopIteration
elif c == '/':
if self.pathname:
raise StopIteration
value = c
elif c in SET_OPERATORS:
# Escape &, |, and ~ to avoid &&, ||, and ~~
value = '\\' + c
else:
# Anything else
value = c
if end_range and i.index - 1 >= end_range:
if self._sequence_range_check(result, value):
removed = True
end_range = 0
else:
result.append(value)
c = next(i)
result.append(']')
# Bad range removed.
if removed:
value = "".join(result)
if value == '[]':
# We specified some ranges, but they are all
# out of reach. Create an impossible sequence to match.
result = ['[^{}]'.format(ASCII_RANGE if self.is_bytes else UNICODE_RANGE)]
elif value == '[^]':
# We specified some range, but hey are all
# out of reach. Since this is exclusive
# that means we can match *anything*.
result = ['[{}]'.format(ASCII_RANGE if self.is_bytes else UNICODE_RANGE)]
else:
result = [value]
if self.pathname or self.after_start:
return self._restrict_sequence() + ''.join(result)
return ''.join(result)
def _references(self, i: util.StringIter, sequence: bool = False) -> str:
"""Handle references."""
value = ''
c = next(i)
if c == '\\':
# \\
if sequence and self.bslash_abort:
raise PathNameException
value = r'\\'
if self.bslash_abort:
if not self.in_list:
value = self.sep + _ONE_OR_MORE
self.set_start_dir()
else:
value = self._restrict_extended_slash() + self.sep
elif not self.unix:
value = self.sep if not sequence else self.bare_sep
elif c == '/':
# \/
if sequence and self.pathname:
raise PathNameException
if self.pathname:
if not self.in_list:
value = self.sep + _ONE_OR_MORE
self.set_start_dir()
else:
value = self._restrict_extended_slash() + self.sep
else:
value = self.sep if not sequence else self.bare_sep
elif c == '.':
# Let dots be handled special
i.rewind(1)
raise DotException
else:
# \a, \b, \c, etc.
value = re.escape(c)
return value
def _handle_dot(self, i: util.StringIter, current: list[str]) -> None:
"""Handle dot."""
is_current = True
is_previous = False
if self.after_start and self.pathname and self.nodotdir:
try:
index = i.index
while True:
c = next(i)
if c == '.' and is_current:
is_previous = True
is_current = False
elif c == '.' and is_previous:
is_previous = False
raise StopIteration
elif c in ('|', ')') and self.in_list:
raise StopIteration
elif c == '\\':
try:
self._references(i, True)
# Was not what we expected
is_current = False
is_previous = False
raise StopIteration
except DotException:
if is_current:
is_previous = True
is_current = False
c = next(i)
else:
is_previous = False
raise StopIteration
except PathNameException:
raise StopIteration
elif c == '/':
raise StopIteration
else:
is_current = False
is_previous = False
raise StopIteration
except StopIteration:
i.rewind(i.index - index)
if not is_current and not is_previous:
current.append(r'(?!\.[.]?{})\.'.format(self.path_eop))
else:
current.append(re.escape('.'))
def _handle_star(self, i: util.StringIter, current: list[str]) -> None:
"""Handle star."""
if self.pathname:
if self.after_start and not self.dot:
star = self.path_star_dot2
globstar = self.path_gstar_dot2
elif self.after_start:
star = self.path_star_dot1
globstar = self.path_gstar_dot1
else:
star = self.path_star
globstar = self.path_gstar_dot1
if self.globstar_capture:
globstar = '({})'.format(globstar)
else:
if self.after_start and not self.dot:
star = _NO_DOT + _STAR
else:
star = _STAR
globstar = ''
value = star
if self.after_start and self.globstar and not self.in_list:
skip = False
try:
c = next(i)
if c != '*':
i.rewind(1)
raise StopIteration
except StopIteration:
# Could not acquire a second star, so assume single star pattern
skip = True
if not skip:
try:
index = i.index
c = next(i)
if c == '\\':
try:
self._references(i, True)
# Was not what we expected
# Assume two single stars
except DotException:
pass
except PathNameException:
# Looks like escape was a valid slash
# Store pattern accordingly
value = globstar
self.matchbase = False
except StopIteration:
# Escapes nothing, ignore and assume double star
value = globstar
elif c == '/':
value = globstar
self.matchbase = False
if value != globstar:
i.rewind(i.index - index)
except StopIteration:
# Could not acquire directory slash due to no more characters
# Use double star
value = globstar
if self.after_start and value != globstar:
value = self.need_char + value
# Consume duplicate starts
try:
c = next(i)
while c == '*':
c = next(i)
i.rewind(1)
except StopIteration:
pass
self.reset_dir_track()
if value == globstar:
sep = _GLOBSTAR_DIV.format(self.sep)
# Check if the last entry was a `globstar`
# If so, don't bother adding another.
if current[-1] != sep:
if current[-1] == '':
# At the beginning of the pattern
current[-1] = value
else:
# Replace the last path separator
current[-1] = _NEED_SEP.format(self.sep)
current.append(value)
self.consume_path_sep(i)
current.append(sep)
self.set_start_dir()
else:
current.append(value)
def clean_up_inverse(self, current: list[str], nested: bool = False) -> None:
"""
Clean up current.
Python doesn't have variable lookbehinds, so we have to do negative lookaheads.
!(...) when converted to regular expression is atomic, so once it matches, that's it.
So we use the pattern `(?:(?!(?:stuff|to|exclude)<x>))[^/]*?)` where <x> is everything
that comes after the negative group. `!(this|that)other` --> `(?:(?!(?:this|that)other))[^/]*?)`.
We have to update the list before | in nested cases: *(!(...)|stuff). Before we close a parent
`extmatch`: `*(!(...))`. And of course on path separators (when path mode is on): `!(...)/stuff`.
Lastly we make sure all is accounted for when finishing the pattern at the end. If there is nothing
to store, we store `$`: `(?:(?!(?:this|that)$))[^/]*?)`.
"""
if not self.inv_ext:
return
index = len(current) - 1
while index >= 0:
if isinstance(current[index], InvPlaceholder):
content = current[index + 1:]
if not nested:
content.append(_EOP if not self.pathname else self.path_eop)
current[index] = (
(''.join(content).replace('(?#)', '?:') if self.capture else ''.join(content)) +
(_EXCLA_GROUP_CLOSE.format(str(current[index])))
)
index -= 1
self.inv_ext = 0
def parse_extend(self, c: str, i: util.StringIter, current: list[str], reset_dot: bool = False) -> bool:
"""Parse extended pattern lists."""
# Save state
temp_dir_start = self.dir_start
temp_after_start = self.after_start
temp_in_list = self.in_list
temp_inv_ext = self.inv_ext
temp_inv_nest = self.inv_nest
self.in_list = True
self.inv_nest = c == '!'
if reset_dot:
self.match_dot_dir = False
# Start list parsing
success = True
index = i.index
list_type = c
extended = [] # type: list[str]
try:
c = next(i)
if c != '(':
raise StopIteration
while c != ')':
c = next(i)
if self.extend and c in EXT_TYPES and self.parse_extend(c, i, extended):
# Nothing more to do
pass
elif c == '*':
self._handle_star(i, extended)
elif c == '.':
self._handle_dot(i, extended)
if self.after_start:
self.match_dot_dir = self.dot and not self.nodotdir
self.reset_dir_track()
elif c == '?':
extended.append(self._restrict_sequence() + _QMARK)
elif c == '/':
if self.pathname:
extended.append(self._restrict_extended_slash())
extended.append(self.sep)
elif c == "|":
self.clean_up_inverse(extended, temp_inv_nest and self.inv_nest)
extended.append(c)
if temp_after_start:
self.set_start_dir()
elif c == '\\':
try:
extended.append(self._references(i))
except DotException:
continue
except StopIteration:
# We've reached the end.
# Do nothing because this is going to abort the `extmatch` anyways.
pass
elif c == '[':
subindex = i.index
try:
extended.append(self._sequence(i))
except StopIteration:
i.rewind(i.index - subindex)
extended.append(r'\[')
elif c != ')':
extended.append(re.escape(c))
self.update_dir_state()
if list_type == '?':
current.append((_QMARK_CAPTURE_GROUP if self.capture else _QMARK_GROUP).format(''.join(extended)))
elif list_type == '*':
current.append((_STAR_CAPTURE_GROUP if self.capture else _STAR_GROUP).format(''.join(extended)))
elif list_type == '+':
current.append((_PLUS_CAPTURE_GROUP if self.capture else _PLUS_GROUP).format(''.join(extended)))
elif list_type == '@':
current.append((_CAPTURE_GROUP if self.capture else _GROUP).format(''.join(extended)))
elif list_type == '!':
self.inv_ext += 1
# If pattern is at the end, anchor the match to the end.
current.append((_EXCLA_CAPTURE_GROUP if self.capture else _EXCLA_GROUP).format(''.join(extended)))
if self.pathname:
if not temp_after_start or self.match_dot_dir:
star = self.path_star
elif temp_after_start and not self.dot:
star = self.path_star_dot2
else:
star = self.path_star_dot1
else:
if not temp_after_start or self.dot:
star = _STAR
else:
star = _NO_DOT + _STAR
if temp_after_start:
star = self.need_char + star
# Place holder for closing, but store the proper star
# so we know which one to use
current.append(InvPlaceholder(star))
if temp_in_list:
self.clean_up_inverse(current, temp_inv_nest and self.inv_nest)
except StopIteration:
success = False
self.inv_ext = temp_inv_ext
i.rewind(i.index - index)
# Either restore if extend parsing failed, or reset if it worked
if not temp_in_list:
self.in_list = False
if not temp_inv_nest:
self.inv_nest = False
if success:
self.reset_dir_track()
else:
self.dir_start = temp_dir_start
self.after_start = temp_after_start
return success
def consume_path_sep(self, i: util.StringIter) -> None:
"""Consume any consecutive path separators as they count as one."""
try:
if self.bslash_abort:
count = -1
c = '\\'
while c in ('\\', '/'):
if c != '/' or count % 2:
count += 1
else:
count += 2
c = next(i)
i.rewind(1)
# Rewind one more if we have an odd number (escape): \\\*
if count > 0 and count % 2:
i.rewind(1)
else:
c = '/'
while c == '/':
c = next(i)
i.rewind(1)
except StopIteration:
pass
def root(self, pattern: str, current: list[str]) -> None:
"""Start parsing the pattern."""
self.set_after_start()
i = util.StringIter(pattern)
root_specified = False
if self.win_drive_detect:
root_specified, drive, slash, end = _get_win_drive(pattern, True, self.case_sensitive)
if drive is not None:
current.append(drive)
if slash:
current.append(self.sep + _ONE_OR_MORE)
i.advance(end)
self.consume_path_sep(i)
elif drive is None and root_specified:
root_specified = True
elif self.pathname and pattern.startswith('/'):
root_specified = True
if self.no_abs and root_specified:
raise ValueError('The pattern must be a relative path pattern')
if root_specified:
self.matchbase = False
self.extmatchbase = False
self.rtl = False
if not root_specified and self.realpath:
current.append(_NO_WIN_ROOT if self.win_drive_detect else _NO_ROOT)
current.append('')
for c in i:
index = i.index
if self.extend and c in EXT_TYPES and self.parse_extend(c, i, current, True):
# Nothing to do
pass
elif c == '.':
self._handle_dot(i, current)
elif c == '*':
self._handle_star(i, current)
elif c == '?':
current.append(self._restrict_sequence() + _QMARK)
elif c == '/':
if self.pathname:
self.set_start_dir()
self.clean_up_inverse(current)
current.append(self.sep + _ONE_OR_MORE)
self.consume_path_sep(i)
self.matchbase = False
else:
current.append(self.sep)
elif c == '\\':
index = i.index
try:
value = self._references(i)
if self.dir_start:
self.clean_up_inverse(current)
self.consume_path_sep(i)
self.matchbase = False
current.append(value)
except DotException:
continue
except StopIteration:
# Escapes nothing, ignore
i.rewind(i.index - index)
elif c == '[':
index = i.index
try:
current.append(self._sequence(i))
except StopIteration:
i.rewind(i.index - index)
current.append(re.escape(c))
else:
current.append(re.escape(c))
self.update_dir_state()
self.clean_up_inverse(current)
if self.pathname:
current.append(_PATH_TRAIL.format(self.sep))
def _parse(self, p: str) -> str:
"""Parse pattern."""
result = ['']
prepend = ['']
if self.anchor:
p, number = (RE_ANCHOR if not self.win_drive_detect else RE_WIN_ANCHOR).subn('', p)
if number:
self.matchbase = False
self.extmatchbase = False
self.rtl = False
if self.matchbase or self.extmatchbase:
globstar = self.globstar
self.globstar = True
self.root('**', prepend)
self.globstar = globstar
elif self.rtl:
# Add a `**` that can capture anything: dots, special directories, symlinks, etc.
# We are simulating right to left, so everything on the left should be accepted without
# question.
globstar = self.globstar
dot = self.dot
gstar = self.path_gstar_dot1
globstar_capture = self.globstar_capture
self.path_gstar_dot1 = _PATH_GSTAR_RTL_MATCH
self.dot = True
self.globstar = True
self.globstar_capture = False
self.root('**', prepend)
self.globstar = globstar
self.dot = dot
self.path_gstar_dot1 = gstar
self.globstar_capture = globstar_capture
# We have an escape, but it escapes nothing
if p == '\\':
p = ''
if p:
self.root(p, result)
if p and (self.matchbase or self.extmatchbase or self.rtl):
result = prepend + result
case_flag = 'i' if not self.case_sensitive else ''
pattern = R'^(?s{}:{})$'.format(case_flag, ''.join(result))
if self.capture:
# Strip out unnecessary regex comments
pattern = pattern.replace('(?#)', '')
return pattern
def parse(self) -> AnyStr:
"""Parse pattern list."""
if isinstance(self.pattern, bytes):
pattern = self._parse(self.pattern.decode('latin-1')).encode('latin-1')
else:
pattern = self._parse(self.pattern)
return pattern
| {
"content_hash": "b88ea4ebb4aa37100922df17fadd2dba",
"timestamp": "",
"source": "github",
"line_count": 1686,
"max_line_length": 119,
"avg_line_length": 33.1773428232503,
"alnum_prop": 0.5040670754598924,
"repo_name": "facelessuser/wcmatch",
"id": "05912d1533964f9ded3d3fabed249642731b88b2",
"size": "55937",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "wcmatch/_wcparse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "340678"
}
],
"symlink_target": ""
} |
"""AsyncIO support for zmq
Requires asyncio and Python 3.
"""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
# Derived from Python 3.5.1 selectors._BaseSelectorImpl, used under PSF License
from collections import Mapping
import zmq as _zmq
from zmq.eventloop import future as _future
# TODO: support trollius for Legacy Python? (probably not)
import asyncio
from asyncio import SelectorEventLoop, Future
try:
import selectors
except ImportError:
from asyncio import selectors # py33
_aio2zmq_map = {
selectors.EVENT_READ: _zmq.POLLIN,
selectors.EVENT_WRITE: _zmq.POLLOUT,
}
_AIO_EVENTS = 0
for aio_evt in _aio2zmq_map:
_AIO_EVENTS |= aio_evt
def _aio2zmq(aio_evt):
"""Turn AsyncIO event mask into ZMQ event mask"""
z_evt = 0
for aio_mask, z_mask in _aio2zmq_map.items():
if aio_mask & aio_evt:
z_evt |= z_mask
return z_evt
def _zmq2aio(z_evt):
"""Turn ZMQ event mask into AsyncIO event mask"""
aio_evt = 0
for aio_mask, z_mask in _aio2zmq_map.items():
if z_mask & z_evt:
aio_evt |= aio_mask
return aio_evt
class _AsyncIO(object):
_Future = Future
_WRITE = selectors.EVENT_WRITE
_READ = selectors.EVENT_READ
def _default_loop(self):
return asyncio.get_event_loop()
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
"{!r}".format(fileobj)) from None
if fd < 0:
raise ValueError("Invalid file descriptor: {}".format(fd))
return fd
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
def __iter__(self):
return iter(self._selector._fd_to_key)
class ZMQSelector(selectors.BaseSelector):
"""zmq_poll-based selector for asyncio"""
def __init__(self):
super().__init__()
# this maps file descriptors to keys
self._fd_to_key = {}
# read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
self._zmq_poller = _zmq.Poller()
def _fileobj_lookup(self, fileobj):
"""Return a zmq socket or a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
if isinstance(fileobj, _zmq.Socket):
return fileobj
else:
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
"""Register a file object.
Parameters:
fileobj -- zmq socket, file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
ValueError if events is invalid
KeyError if fileobj is already registered
OSError if fileobj is closed or otherwise is unacceptable to
the underlying system call (if a system call is made)
Note:
OSError may or may not be raised
"""
if (not events) or (events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)):
raise ValueError("Invalid events: {!r}".format(events))
key = selectors.SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{!r} (FD {}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
self._zmq_poller.register(key.fd, _aio2zmq(events))
return key
def unregister(self, fileobj):
"""Unregister a file object.
Parameters:
fileobj -- zmq socket, file object or file descriptor
Returns:
SelectorKey instance
Raises:
KeyError if fileobj is not registered
Note:
If fileobj is registered but has since been closed this does
*not* raise OSError (even if the wrapped syscall does)
"""
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
self._zmq_poller.unregister(key.fd)
return key
def modify(self, fileobj, events, data=None):
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
"""Perform the actual selection, until some monitored file objects are
ready or a timeout expires.
Parameters:
timeout -- if timeout > 0, this specifies the maximum wait time, in
seconds
if timeout <= 0, the select() call won't block, and will
report the currently ready file objects
if timeout is None, select() will block until a monitored
file object becomes ready
Returns:
list of (key, events) for ready file objects
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
"""
if timeout is not None:
if timeout < 0:
timeout = 0
else:
timeout = 1e3 * timeout
fd_event_list = self._zmq_poller.poll(timeout)
ready = []
for fd, event in fd_event_list:
key = self._key_from_fd(fd)
if key:
events = _zmq2aio(event)
ready.append((key, events))
return ready
def close(self):
"""Close the selector.
This must be called to make sure that any underlying resource is freed.
"""
self._fd_to_key.clear()
self._map = None
self._zmq_poller = None
def get_map(self):
return self._map
def _key_from_fd(self, fd):
"""Return the key associated to a given file descriptor.
Parameters:
fd -- file descriptor
Returns:
corresponding key, or None if not found
"""
try:
return self._fd_to_key[fd]
except KeyError:
return None
class Poller(_AsyncIO, _future._AsyncPoller):
"""Poller returning asyncio.Future for poll results."""
pass
class Socket(_AsyncIO, _future._AsyncSocket):
"""Socket returning asyncio Futures for send/recv/poll methods."""
_poller_class = Poller
def _add_io_state(self, state):
"""Add io_state to poller."""
if not self._state & state:
self._state = self._state | state
if state & self._READ:
self.io_loop.add_reader(self, self._handle_recv)
if state & self._WRITE:
self.io_loop.add_writer(self, self._handle_send)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
if state & self._READ:
self.io_loop.remove_reader(self)
if state & self._WRITE:
self.io_loop.remove_writer(self)
def _init_io_state(self):
"""initialize the ioloop event handler"""
pass
class Context(_zmq.Context):
"""Context for creating asyncio-compatible Sockets"""
_socket_class = Socket
class ZMQEventLoop(SelectorEventLoop):
"""AsyncIO eventloop using zmq_poll"""
def __init__(self, selector=None):
if selector is None:
selector = ZMQSelector()
return super(ZMQEventLoop, self).__init__(selector)
_loop = None
def install():
"""Install and return the global ZMQEventLoop
registers the loop with asyncio.set_event_loop
"""
global _loop
if _loop is None:
_loop = ZMQEventLoop()
asyncio.set_event_loop(_loop)
return _loop
__all__ = [
'Context',
'Socket',
'Poller',
'ZMQEventLoop',
'install',
]
| {
"content_hash": "016e52a09282c395e5bca2c9eeb939d5",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 89,
"avg_line_length": 28.92192192192192,
"alnum_prop": 0.5859204651645727,
"repo_name": "fzheng/codejam",
"id": "9e5eaf1c598258fbf9b81cfb240dd6d981d233b5",
"size": "9631",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/zmq/asyncio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26929"
},
{
"name": "CSS",
"bytes": "70961"
},
{
"name": "HTML",
"bytes": "80615"
},
{
"name": "Java",
"bytes": "376384"
},
{
"name": "JavaScript",
"bytes": "5201764"
},
{
"name": "Jupyter Notebook",
"bytes": "13408"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "Python",
"bytes": "16542061"
},
{
"name": "Smarty",
"bytes": "22430"
},
{
"name": "TeX",
"bytes": "85477"
}
],
"symlink_target": ""
} |
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n.filters.core import ValueFilter, type_schema
@resources.register('webapp')
class WebApp(ArmResourceManager):
"""Web Applications Resource
:example:
This policy will find all web apps with 10 or less requests over the last 72 hours
.. code-block:: yaml
policies:
- name: webapp-dropping-messages
resource: azure.webapp
filters:
- type: metric
metric: Requests
op: le
aggregation: total
threshold: 10
timeframe: 72
actions:
- type: mark-for-op
op: delete
days: 7
:example:
This policy will find all web apps with 1000 or more server errors over the last 72 hours
.. code-block:: yaml
policies:
- name: webapp-high-error-count
resource: azure.webapp
filters:
- type: metric
metric: Http5xxx
op: ge
aggregation: total
threshold: 1000
timeframe: 72
:example:
This policy will find all web apps with minimum TLS encryption version not equal to 1.2
.. code-block:: yaml
policies:
- name: webapp-min-tls-enforcement
resource: azure.webapp
filters:
- type: configuration
key: minTlsVersion
value: '1.2'
op: ne
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Compute', 'Web']
service = 'azure.mgmt.web'
client = 'WebSiteManagementClient'
enum_spec = ('web_apps', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup',
'kind',
'properties.hostNames[0]'
)
resource_type = 'Microsoft.Web/sites'
@WebApp.filter_registry.register('configuration')
class ConfigurationFilter(ValueFilter):
schema = type_schema('configuration', rinherit=ValueFilter.schema)
schema_alias = True
def __call__(self, i):
if 'c7n:configuration' not in i:
client = self.manager.get_client().web_apps
instance = (
client.get_configuration(i['resourceGroup'], i['name'])
)
i['c7n:configuration'] = instance.serialize(keep_readonly=True)['properties']
return super(ConfigurationFilter, self).__call__(i['c7n:configuration'])
| {
"content_hash": "9940fa5afe16806421d7715bd491d804",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 93,
"avg_line_length": 28.148936170212767,
"alnum_prop": 0.5570672713529856,
"repo_name": "capitalone/cloud-custodian",
"id": "9ef8d630ad4dc17cb618a08999b965188b8c2b41",
"size": "2769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/resources/web_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
import subprocess
from . import base
class Cmus(base.ThreadPoolText):
"""A simple Cmus widget.
Show the artist and album of now listening song and allow basic mouse
control from the bar:
- toggle pause (or play if stopped) on left click;
- skip forward in playlist on scroll up;
- skip backward in playlist on scroll down.
Cmus (https://cmus.github.io) should be installed.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('play_color', '00ff00', 'Text colour when playing.'),
('noplay_color', 'cecece', 'Text colour when not playing.'),
('max_chars', 0, 'Maximum number of characters to display in widget.'),
('update_interval', 0.5, 'Update Time in seconds.')
]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(Cmus.defaults)
self.status = ""
self.local = None
def get_info(self):
"""Return a dictionary with info about the current cmus status."""
try:
output = self.call_process(['cmus-remote', '-C', 'status'])
except subprocess.CalledProcessError as err:
output = err.output.decode()
if output.startswith("status"):
output = output.splitlines()
info = {'status': "",
'file': "",
'artist': "",
'album': "",
'title': "",
'stream': ""}
for line in output:
for data in info:
if data in line:
index = line.index(data)
if index < 5:
info[data] = line[len(data) + index:].strip()
break
elif line.startswith("set"):
return info
return info
def now_playing(self):
"""Return a string with the now playing info (Artist - Song Title)."""
info = self.get_info()
now_playing = ""
if info:
status = info['status']
if self.status != status:
self.status = status
if self.status == "playing":
self.layout.colour = self.play_color
else:
self.layout.colour = self.noplay_color
self.local = info['file'].startswith("/")
title = info['title']
if self.local:
artist = info['artist']
now_playing = "{0} - {1}".format(artist, title)
else:
if info['stream']:
now_playing = info['stream']
else:
now_playing = title
if now_playing:
now_playing = "♫ {0}".format(now_playing)
return now_playing
def update(self, text):
"""Update the text box."""
old_width = self.layout.width
if not self.status:
return
if len(text) > self.max_chars > 0:
text = text[:self.max_chars] + "…"
self.text = text
if self.layout.width == old_width:
self.draw()
else:
self.bar.draw()
def poll(self):
"""Poll content for the text box."""
return self.now_playing()
def button_press(self, x, y, button):
"""What to do when press a mouse button over the cmus widget.
Will:
- toggle pause (or play if stopped) on left click;
- skip forward in playlist on scroll up;
- skip backward in playlist on scroll down.
"""
if button == 1:
if self.status in ('playing', 'paused'):
subprocess.Popen(['cmus-remote', '-u'])
elif self.status == 'stopped':
subprocess.Popen(['cmus-remote', '-p'])
elif button == 4:
subprocess.Popen(['cmus-remote', '-n'])
elif button == 5:
subprocess.Popen(['cmus-remote', '-r'])
| {
"content_hash": "579e4f3a06686afa38c34d18356f171b",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 34.45762711864407,
"alnum_prop": 0.4955730447614363,
"repo_name": "soulchainer/qtile",
"id": "c032527882795073e04d65a41077c7a1ca4ca8c3",
"size": "4775",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "libqtile/widget/cmus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1135"
},
{
"name": "Python",
"bytes": "1152583"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "5643"
}
],
"symlink_target": ""
} |
'''
Add the hubble version to the grains
'''
import logging
from hubblestack import __version__
log = logging.getLogger(__name__)
def hubble_version():
'''
Add the hubble version to the grains
'''
return {'hubble_version': __version__}
| {
"content_hash": "f0a94036e1b41a1b16e74dad1cae2333",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 42,
"avg_line_length": 17.066666666666666,
"alnum_prop": 0.6484375,
"repo_name": "HubbleStack/Hubble",
"id": "00a7f89a43a66b799402e4b50b5b5e26e7e39749",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hubblestack/extmods/grains/hubbleversion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
full_name = input('Enter your name: ')
list_of_names = full_name.split()
for index, name in enumerate(list_of_names):
if name[0].islower():
del list_of_names[index]
[print(name[0] + '.', end='') for name in list_of_names] | {
"content_hash": "11245836f7609bc7be4be436d6561318",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.635593220338983,
"repo_name": "KristianMariyanov/PythonPlayground",
"id": "e72ec34e3a022adb7b09798e5d484568e2b533e9",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "softuni-course/Lecture02/initials.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5811"
}
],
"symlink_target": ""
} |
"""
module for generating C, C++, Fortran77, Fortran90 and python routines that
evaluate sympy expressions. This module is work in progress. Only the
milestones with a '+' character in the list below have been completed.
--- How is sympy.utilities.codegen different from sympy.printing.ccode? ---
We considered the idea to extend the printing routines for sympy functions in
such a way that it prints complete compilable code, but this leads to a few
unsurmountable issues that can only be tackled with dedicated code generator:
- For C, one needs both a code and a header file, while the printing routines
generate just one string. This code generator can be extended to support .pyf
files for f2py.
- Sympy functions are not concerned with programming-technical issues, such as
input, output and input-output arguments. Other examples are contiguous or
non-contiguous arrays, including headers of other libraries such as gsl or others.
- It is highly interesting to evaluate several sympy functions in one C routine,
eventually sharing common intermediate results with the help of the cse routine.
This is more than just printing.
- From the programming perspective, expressions with constants should be
evaluated in the code generator as much as possible. This is different for
printing.
--- Basic assumptions ---
* A generic Routine data structure describes the routine that must be translated
into C/Fortran/... code. This data structure covers all features present in
one or more of the supported languages.
* Descendants from the CodeGen class transform multiple Routine instances into
compilable code. Each derived class translates into a specific language.
* In many cases, one wants a simple workflow. The friendly functions in the last
part are a simple api on top of the Routine/CodeGen stuff. They are easier to
use, but are less powerful.
--- Milestones ---
+ First working version with scalar input arguments, generating C code, tests
+ Friendly functions that are easier to use than the rigorous Routine/CodeGen
workflow.
+ Integer and Real numbers as input and output
- Optional extra include lines for libraries/objects that can eval special
functions
- Test other C compilers and libraries: gcc, tcc, libtcc, gcc+gsl, ...
+ Output arguments
+ InputOutput arguments
+ Sort input/output arguments properly
+ Contiguous array arguments (numpy matrices)
- Contiguous array arguments (sympy matrices)
- Non-contiguous array arguments (sympy matrices)
- ccode must raise an error when it encounters something that can not be
translated into c. ccode(integrate(sin(x)/x, x)) does not make sense.
- Complex numbers as input and output
+ Also generate .pyf code for f2py (in autowrap module)
- A default complex datatype
- Include extra information in the header: date, user, hostname, sha1 hash, ...
+ Isolate constants and evaluate them beforehand in double precision
- Common Subexpression Elimination
- User defined comments in the generated code
- Fortran 77
+ Fortran 90
- C++
- Python
- ...
"""
import os
from StringIO import StringIO
from sympy import __version__ as sympy_version
from sympy.core import Symbol, S, Expr, Tuple, Equality, Function
from sympy.printing.codeprinter import AssignmentError
from sympy.printing.ccode import ccode, CCodePrinter
from sympy.printing.fcode import fcode, FCodePrinter
from sympy.tensor import Idx, Indexed, IndexedBase
from sympy.utilities import flatten
__all__ = [
# description of routines
"Routine", "DataType", "default_datatypes", "get_default_datatype",
"Argument", "InputArgument", "Result",
# routines -> code
"CodeGen", "CCodeGen", "FCodeGen",
# friendly functions
"codegen",
]
#
# Description of routines
#
class Routine(object):
"""Generic description of an evaluation routine for a set of sympy expressions.
A CodeGen class can translate instances of this class into C/Fortran/...
code. The routine specification covers all the features present in these
languages. The CodeGen part must raise an exception when certain features
are not present in the target language. For example, multiple return
values are possible in Python, but not in C or Fortran. Another example:
Fortran and Python support complex numbers, while C does not.
"""
def __init__(self, name, expr, argument_sequence=None):
"""Initialize a Routine instance.
``name``
A string with the name of this routine in the generated code
``expr``
The sympy expression that the Routine instance will represent. If
given a list or tuple of expressions, the routine will be
considered to have multiple return values.
``argument_sequence``
Optional list/tuple containing arguments for the routine in a
preferred order. If omitted, arguments will be ordered
alphabetically, but with all input aguments first, and then output
or in-out arguments.
A decision about whether to use output arguments or return values,
is made depending on the mathematical expressions. For an expression
of type Equality, the left hand side is made into an OutputArgument
(or an InOutArgument if appropriate). Else, the calculated
expression is the return values of the routine.
A tuple of exressions can be used to create a routine with both
return value(s) and output argument(s).
"""
arg_list = []
if isinstance(expr, (list, tuple)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = set([i.label for i in expressions.atoms(Idx)])
# symbols that should be arguments
symbols = expressions.atoms(Symbol) - local_vars
# Decide whether to use output argument or return value
return_val = []
output_args = []
for expr in expressions:
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
if isinstance(out_arg, Indexed):
dims = tuple([ (S.Zero, dim-1) for dim in out_arg.shape])
symbol = out_arg.base.label
elif isinstance(out_arg, Symbol):
dims = []
symbol = out_arg
else:
raise CodeGenError("Only Indexed or Symbol can define output arguments")
if expr.has(symbol):
output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims))
else:
output_args.append(OutputArgument(symbol, out_arg, expr, dimensions=dims))
# avoid duplicate arguments
symbols.remove(symbol)
else:
return_val.append(Result(expr))
# setup input argument list
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for symbol in sorted(symbols, key=str):
if symbol in array_symbols:
dims = []
array = array_symbols[symbol]
for dim in array.shape:
dims.append((S.Zero, dim - 1))
metadata = {'dimensions': dims}
else:
metadata = {}
arg_list.append(InputArgument(symbol, **metadata))
output_args.sort(key=lambda x:str(x.name))
arg_list.extend(output_args)
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = filter(lambda x: x.name not in argument_sequence, arg_list)
if missing:
raise CodeGenArgumentListError("Argument list didn't specify: %s" %
", ".join([str(m.name) for m in missing]), missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = dict([(x.name, x) for x in arg_list])
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
self.name = name
self.arguments = arg_list
self.results = return_val
self.local_vars = local_vars
@property
def variables(self):
"""Returns a set containing all variables possibly used in this routine.
For routines with unnamed return values, the dummies that may or may
not be used will be included in the set.
"""
v = set(self.local_vars)
for arg in self.arguments:
v.add(arg.name)
for res in self.results:
v.add(res.result_var)
return v
@property
def result_variables(self):
"""Returns a list of OutputArgument, InOutArgument and Result.
If return values are present, they are at the end ot the list.
"""
args = [arg for arg in self.arguments if isinstance(arg, (OutputArgument, InOutArgument))]
args.extend(self.results)
return args
class DataType(object):
"""Holds strings for a certain datatype in different programming languages."""
def __init__(self, cname, fname, pyname):
self.cname = cname
self.fname = fname
self.pyname = pyname
default_datatypes = {
"int": DataType("int", "INTEGER*4", "int"),
"float": DataType("double", "REAL*8", "float")
}
def get_default_datatype(expr):
"""Derives a decent data type based on the assumptions on the expression."""
if expr.is_integer:
return default_datatypes["int"]
else:
return default_datatypes["float"]
class Variable(object):
"""Represents a typed variable."""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
"""Initializes a Variable instance
name -- must be of class Symbol
datatype -- When not given, the data type will be guessed based
on the assumptions on the symbol argument.
dimension -- If present, the argument is interpreted as an array.
Dimensions must be a sequence containing tuples, i.e.
(lower, upper) bounds for each index of the array
precision -- FIXME
"""
if not isinstance(name, Symbol):
raise TypeError("The first argument must be a sympy symbol.")
if datatype is None:
datatype = get_default_datatype(name)
elif not isinstance(datatype, DataType):
raise TypeError("The (optional) `datatype' argument must be an instance of the DataType class.")
if dimensions and not isinstance(dimensions, (tuple, list)):
raise TypeError("The dimension argument must be a sequence of tuples")
self._name = name
self._datatype = {
'C': datatype.cname,
'FORTRAN': datatype.fname,
'PYTHON': datatype.pyname
}
self.dimensions = dimensions
self.precision = precision
@property
def name(self):
return self._name
def get_datatype(self, language):
"""Returns the datatype string for the requested langage.
>>> from sympy import Symbol
>>> from sympy.utilities.codegen import Variable
>>> x = Variable(Symbol('x'))
>>> x.get_datatype('c')
'double'
>>> x.get_datatype('fortran')
'REAL*8'
"""
try:
return self._datatype[language.upper()]
except KeyError:
raise CodeGenError("Has datatypes for languages: %s" %
", ".join(self._datatype))
class Argument(Variable):
"""An abstract Argument data structure: a name and a data type.
This structure is refined in the descendants below.
"""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
""" See docstring of Variable.__init__
"""
Variable.__init__(self, name, datatype, dimensions, precision)
class InputArgument(Argument):
pass
class ResultBase(object):
"""Base class for all ``outgoing'' information from a routine
Objects of this class stores a sympy expression, and a sympy object
representing a result variable that will be used in the generated code
only if necessary.
"""
def __init__(self, expr, result_var):
self.expr = expr
self.result_var = result_var
class OutputArgument(Argument, ResultBase):
"""OutputArgument are always initialized in the routine
"""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
""" See docstring of Variable.__init__
"""
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
class InOutArgument(Argument, ResultBase):
"""InOutArgument are never initialized in the routine
"""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
""" See docstring of Variable.__init__
"""
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
class Result(ResultBase):
"""An expression for a scalar return value.
The name result is used to avoid conflicts with the reserved word
'return' in the python language. It is also shorter than ReturnValue.
"""
def __init__(self, expr, datatype=None, precision=None):
"""Initialize a (scalar) return value.
The second argument is optional. When not given, the data type will
be guessed based on the assumptions on the expression argument.
"""
if not isinstance(expr, Expr):
raise TypeError("The first argument must be a sympy expression.")
temp_var = Variable(Symbol('result_%s'%hash(expr)),
datatype=datatype, dimensions=None, precision=precision)
ResultBase.__init__(self, expr, temp_var.name)
self._temp_variable = temp_var
def get_datatype(self, language):
return self._temp_variable.get_datatype(language)
#
# Transformation of routine objects into code
#
class CodeGen(object):
"""Abstract class for the code generators."""
def __init__(self, project="project"):
"""Initialize a code generator.
Derived classes will offer more options that affect the generated
code.
"""
self.project = project
def write(self, routines, prefix, to_files=False, header=True, empty=True):
"""Writes all the source code files for the given routines.
The generate source is returned as a list of (filename, contents)
tuples, or is written to files (see options). Each filename consists
of the given prefix, appended with an appropriate extension.
``routines``
A list of Routine instances to be written
``prefix``
The prefix for the output files
``to_files``
When True, the output is effectively written to files.
[DEFAULT=False] Otherwise, a list of (filename, contents)
tuples is returned.
``header``
When True, a header comment is included on top of each source
file. [DEFAULT=True]
``empty``
When True, empty lines are included to structure the source
files. [DEFAULT=True]
"""
if to_files:
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
f = file(filename, "w")
dump_fn(self, routines, f, prefix, header, empty)
f.close()
else:
result = []
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
contents = StringIO()
dump_fn(self, routines, contents, prefix, header, empty)
result.append((filename, contents.getvalue()))
return result
def dump_code(self, routines, f, prefix, header=True, empty=True):
"""Write the code file by calling language specific methods in correct order
The generated file contains all the definitions of the routines in
low-level code and refers to the header file if appropriate.
:Arguments:
routines
A list of Routine instances
f
A file-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file. Only
the basename of the prefix is used.
:Optional arguments:
header
When True, a header comment is included on top of each source file.
[DEFAULT=True]
empty
When True, empty lines are included to structure the source files.
[DEFAULT=True]
"""
code_lines = self._preprosessor_statements(prefix)
for routine in routines:
if empty: code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_locals(routine))
if empty: code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty: code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if header:
code_lines = ''.join(self._get_header() + [code_lines])
if code_lines:
print >> f, code_lines,
class CodeGenError(Exception):
pass
class CodeGenArgumentListError(Exception):
@property
def missing_args(self):
return self.args[1]
header_comment = """Code generated with sympy %(version)s
See http://www.sympy.org/ for more information.
This file is part of '%(project)s'
"""
class CCodeGen(CodeGen):
"""
Generator for C code
The .write() method inherited from CodeGen will output a code file and an
inteface file, <prefix>.c and <prefix>.h respectively.
"""
code_extension = "c"
interface_extension = "h"
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version, "project": self.project}
for line in tmp.splitlines():
code_lines.append(" *%s*\n" % line.center(76))
code_lines.append(" " + "*"*78 + "/\n")
return code_lines
def get_prototype(self, routine):
"""Returns a string for the function prototype for the given routine.
If the routine has multiple result objects, an CodeGenError is
raised.
See: http://en.wikipedia.org/wiki/Function_prototype
"""
if len(routine.results) > 1:
raise CodeGenError("C only supports a single or no return value.")
elif len(routine.results) == 1:
ctype = routine.results[0].get_datatype('C')
else:
ctype = "void"
type_args = []
for arg in routine.arguments:
name = ccode(arg.name)
if arg.dimensions:
type_args.append((arg.get_datatype('C'), "*%s" % name))
elif isinstance(arg, ResultBase):
type_args.append((arg.get_datatype('C'), "&%s" % name))
else:
type_args.append((arg.get_datatype('C'), name))
arguments = ", ".join([ "%s %s" % t for t in type_args])
return "%s %s(%s)" % (ctype, routine.name, arguments)
def _preprosessor_statements(self, prefix):
code_lines = []
code_lines.append("#include \"%s.h\"\n" % os.path.basename(prefix))
code_lines.append("#include <math.h>\n")
return code_lines
def _get_routine_opening(self, routine):
prototype = self.get_prototype(routine)
return ["%s {\n" % prototype]
def _declare_arguments(self, routine):
# arguments are declared in prototype
return []
def _declare_locals(self, routine):
# loop variables are declared in loop statement
return []
def _call_printer(self, routine):
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = None
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
try:
constants, not_c, c_expr = ccode(result.expr, assign_to=assign_to, human=False)
except AssignmentError:
assign_to = result.result_var
code_lines.append("%s %s;\n" % (result.get_datatype('c'), str(assign_to)))
constants, not_c, c_expr = ccode(result.expr, assign_to=assign_to, human=False)
for name, value in sorted(constants, key=str):
code_lines.append("double const %s = %s;\n" % (name, value))
if assign_to:
code_lines.append("%s\n" % c_expr)
else:
code_lines.append(" return %s;\n" % c_expr)
return code_lines
def _indent_code(self, codelines):
p = CCodePrinter()
return p.indent_code(codelines)
def _get_routine_ending(self, routine):
return ["}\n"]
def dump_c(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_c.extension = code_extension
dump_c.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the C header file.
This file contains all the function declarations.
:Arguments:
routines
A list of Routine instances
f
A file-like object to write the file to
prefix
The filename prefix, used to construct the include guards.
:Optional arguments:
header
When True, a header comment is included on top of each source
file. [DEFAULT=True]
empty
When True, empty lines are included to structure the source
files. [DEFAULT=True]
"""
if header:
print >> f, ''.join(self._get_header())
guard_name = "%s__%s__H" % (self.project.replace(" ", "_").upper(), prefix.replace("/", "_").upper())
# include guards
if empty: print >> f
print >> f, "#ifndef %s" % guard_name
print >> f, "#define %s" % guard_name
if empty: print >> f
# declaration of the function prototypes
for routine in routines:
prototype = self.get_prototype(routine)
print >> f, "%s;" % prototype
# end if include guards
if empty: print >> f
print >> f, "#endif"
if empty: print >> f
dump_h.extension = interface_extension
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_c, dump_h]
class FCodeGen(CodeGen):
"""
Generator for Fortran 95 code
The .write() method inherited from CodeGen will output a code file and an
inteface file, <prefix>.f90 and <prefix>.h respectively.
"""
code_extension = "f90"
interface_extension = "h"
def __init__(self, project='project'):
CodeGen.__init__(self, project)
def _get_symbol(self, s):
"""returns the symbol as fcode print it"""
return fcode(s).strip()
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("!" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version, "project": self.project}
for line in tmp.splitlines():
code_lines.append("!*%s*\n" % line.center(76))
code_lines.append("!" + "*"*78 + '\n')
return code_lines
def _preprosessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""
Returns the opening statements of the fortran routine
"""
code_list = []
if len(routine.results) > 1:
raise CodeGenError("Fortran only supports a single or no return value.")
elif len(routine.results) == 1:
result = routine.results[0]
code_list.append(result.get_datatype('fortran'))
code_list.append("function")
else:
code_list.append("subroutine")
args = ", ".join("%s" % self._get_symbol(arg.name)
for arg in routine.arguments)
# name of the routine + arguments
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ " ".join(code_list) ]
code_list.append('implicit none\n')
return code_list
def _declare_arguments(self, routine):
# argument type declarations
code_list = []
array_list = []
scalar_list = []
for arg in routine.arguments:
if isinstance(arg, InputArgument):
typeinfo = "%s, intent(in)" % arg.get_datatype('fortran')
elif isinstance(arg, InOutArgument):
typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran')
elif isinstance(arg, OutputArgument):
typeinfo = "%s, intent(out)" % arg.get_datatype('fortran')
else:
raise CodeGenError("Unkown Argument type: %s"%type(arg))
fprint = self._get_symbol
if arg.dimensions:
# fortran arrays start at 1
dimstr = ", ".join(["%s:%s"%(
fprint(dim[0]+1), fprint(dim[1]+1))
for dim in arg.dimensions])
typeinfo += ", dimension(%s)" % dimstr
array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
else:
scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
# scalars first, because they can be used in array declarations
code_list.extend(scalar_list)
code_list.extend(array_list)
return code_list
def _declare_locals(self, routine):
code_list = []
for var in sorted(routine.local_vars, key=str):
typeinfo = get_default_datatype(var)
code_list.append("%s :: %s\n" % (
typeinfo.fname, self._get_symbol(var)))
return code_list
def _get_routine_ending(self, routine):
"""
Returns the closing statements of the fortran routine
"""
if len(routine.results) == 1:
return ["end function\n"]
else:
return ["end subroutine\n"]
def get_interface(self, routine):
"""Returns a string for the function interface for the given routine and
a single result object, which can be None.
If the routine has multiple result objects, a CodeGenError is
raised.
See: http://en.wikipedia.org/wiki/Function_prototype
"""
prototype = [ "interface\n" ]
prototype.extend(self._get_routine_opening(routine))
prototype.extend(self._declare_arguments(routine))
prototype.extend(self._get_routine_ending(routine))
prototype.append("end interface\n")
return "".join(prototype)
def _call_printer(self, routine):
declarations = []
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
constants, not_fortran, f_expr = fcode(result.expr,
assign_to=assign_to, source_format='free', human=False)
for obj, v in sorted(constants, key=str):
t = get_default_datatype(obj)
declarations.append("%s, parameter :: %s = %s\n" % (t.fname, obj, v))
for obj in sorted(not_fortran, key=str):
t = get_default_datatype(obj)
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("%s :: %s\n" % (t.fname, name))
code_lines.append("%s\n" % f_expr)
return declarations + code_lines
def _indent_code(self, codelines):
p = FCodePrinter({'source_format': 'free', 'human': False})
return p.indent_code(codelines)
def dump_f95(self, routines, f, prefix, header=True, empty=True):
# check that symbols are unique with ignorecase
for r in routines:
lowercase = set(map(lambda x: str(x).lower(), r.variables))
orig_case = set(map(lambda x: str(x), r.variables))
if len(lowercase) < len(orig_case):
raise CodeGenError("Fortran ignores case. Got symbols: %s"%
(", ".join([str(var) for var in r.variables])))
self.dump_code(routines, f, prefix, header, empty)
dump_f95.extension = code_extension
dump_f95.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the interface to a header file.
This file contains all the function declarations.
:Arguments:
routines
A list of Routine instances
f
A file-like object to write the file to
prefix
The filename prefix
:Optional arguments:
header
When True, a header comment is included on top of each source
file. [DEFAULT=True]
empty
When True, empty lines are included to structure the source
files. [DEFAULT=True]
"""
if header:
print >> f, ''.join(self._get_header())
if empty: print >> f
# declaration of the function prototypes
for routine in routines:
prototype = self.get_interface(routine)
print >> f, prototype,
if empty: print >> f
dump_h.extension = interface_extension
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_f95, dump_h]
def get_code_generator(language, project):
CodeGenClass = {"C": CCodeGen, "F95": FCodeGen}.get(language.upper())
if CodeGenClass is None:
raise ValueError("Language '%s' is not supported." % language)
return CodeGenClass(project)
#
# Friendly functions
#
def codegen(name_expr, language, prefix, project="project", to_files=False, header=True, empty=True,
argument_sequence=None):
"""Write source code for the given expressions in the given language.
:Mandatory Arguments:
``name_expr``
A single (name, expression) tuple or a list of (name, expression)
tuples. Each tuple corresponds to a routine. If the expression is an
equality (an instance of class Equality) the left hand side is
considered an output argument.
``language``
A string that indicates the source code language. This is case
insensitive. For the moment, only 'C' and 'F95' is supported.
``prefix``
A prefix for the names of the files that contain the source code.
Proper (language dependent) suffixes will be appended.
:Optional Arguments:
``project``
A project name, used for making unique preprocessor instructions.
[DEFAULT="project"]
``to_files``
When True, the code will be written to one or more files with the given
prefix, otherwise strings with the names and contents of these files
are returned. [DEFAULT=False]
``header``
When True, a header is written on top of each source file.
[DEFAULT=True]
``empty``
When True, empty lines are used to structure the code. [DEFAULT=True]
``argument_sequence``
sequence of arguments for the routine in a preferred order. A
CodeGenError is raised if required arguments are missing. Redundant
arguments are used without warning.
If omitted, arguments will be ordered alphabetically, but with all
input aguments first, and then output or in-out arguments.
>>> from sympy import symbols
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(c_name, c_code), (h_name, c_header)] = \\
... codegen(("f", x+y*z), "C", "test", header=False, empty=False)
>>> print c_name
test.c
>>> print c_code,
#include "test.h"
#include <math.h>
double f(double x, double y, double z) {
return x + y*z;
}
>>> print h_name
test.h
>>> print c_header,
#ifndef PROJECT__TEST__H
#define PROJECT__TEST__H
double f(double x, double y, double z);
#endif
"""
# Initialize the code generator.
code_gen = get_code_generator(language, project)
# Construct the routines based on the name_expression pairs.
# mainly the input arguments require some work
routines = []
if isinstance(name_expr[0], basestring):
# single tuple is given, turn it into a singleton list with a tuple.
name_expr = [name_expr]
for name, expr in name_expr:
routines.append(Routine(name, expr, argument_sequence))
# Write the code.
return code_gen.write(routines, prefix, to_files, header, empty)
| {
"content_hash": "27031568e1f0c04f530a81b8be92ba32",
"timestamp": "",
"source": "github",
"line_count": 955,
"max_line_length": 109,
"avg_line_length": 36.27225130890052,
"alnum_prop": 0.6032332563510393,
"repo_name": "pernici/sympy",
"id": "801e5aa10b03b0582235b4b528cb83bb5a1cf466",
"size": "34640",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/utilities/codegen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6531741"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "TeX",
"bytes": "8"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""
WSGI config for {{ project_name }} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
from __future__ import unicode_literals
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings.prod")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "69d4f4b9a680e9f94e9917693a806e64",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 30.4,
"alnum_prop": 0.7543859649122807,
"repo_name": "rkorkosz/django-base-template",
"id": "d29b619238362cf4ef4e7b3ab34ba96589f39d52",
"size": "456",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project_name/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59449"
},
{
"name": "HTML",
"bytes": "9211"
},
{
"name": "Python",
"bytes": "8338"
}
],
"symlink_target": ""
} |
import argparse
import sys
import os
def write_header(fh, key, value):
fh.write("#" + key + "\t" + value + "\n")
# Argument handling
parser = argparse.ArgumentParser( description='Convert ONT model file into nanopolish format')
parser.add_argument('-i', '--input', type=str, required=True)
parser.add_argument('-o', '--output-dir', type=str, required=False)
args = parser.parse_args()
f = open(args.input)
# Parse metadata out of the type dir
(dirs, filename) = os.path.split(args.input)
(_, type_dir) = os.path.split(dirs)
metadata_fields = type_dir.split("_")
if(len(metadata_fields) != 4):
sys.stderr.write("Error, could not parse type dir\n")
sys.exit(1)
pore = metadata_fields[0]
speed = metadata_fields[2]
K = metadata_fields[3].replace("mer", "")
new_kit_name = pore + "_" + speed
alphabet = "nucleotide"
strand = ""
if filename.find("template") != -1:
strand = "template"
else:
assert(filename.find("complement") != -1)
if filename.find("pop1") != -1:
strand = "complement.pop1"
else:
assert(filename.find("pop2") != -1)
strand = "complement.pop2"
dir_str = ""
if args.output_dir is not None:
dir_str = args.output_dir + "/"
out_name = "%s%s.%s.%smer.%s.model" % (dir_str, new_kit_name, alphabet, K, strand)
out_file = open(out_name, "w")
write_header(out_file, "ont_model_name", type_dir)
write_header(out_file, "kit", new_kit_name)
write_header(out_file, "strand", strand)
write_header(out_file, "k", K)
write_header(out_file, "original_file", type_dir + "/" + filename)
# Copy everything to the output
for line in f:
# ONT files shouldnt have header tags
assert(line[0] != "#")
out_file.write(line)
sys.stdout.write(out_name + "\n")
| {
"content_hash": "8948bba70863bde4495a42e8ce3b02e4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 94,
"avg_line_length": 28.262295081967213,
"alnum_prop": 0.6519721577726219,
"repo_name": "mateidavid/nanopolish",
"id": "d39366543bf3d023443c3185742476c7e0e988f5",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/import_ont_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10245"
},
{
"name": "C++",
"bytes": "5071536"
},
{
"name": "Makefile",
"bytes": "3929"
},
{
"name": "Perl",
"bytes": "603"
},
{
"name": "Python",
"bytes": "14644"
},
{
"name": "Shell",
"bytes": "284"
}
],
"symlink_target": ""
} |
from ._shared import InvalidArgs
from .Log import Log
from .Aide import Aide
from .Alias import Alias
from .Budget import Budget
from .Camera import Camera
from .Courses import Courses
from .Dis import Dis
from .Disclaimer import Disclaimer
from .Emprunt import Emprunt
from .Historique import Historique
from .Info import Info
from .Jeu import Jeu
from .Lien import Lien
from .Lumiere import Lumiere
from .Moderation import Moderation
from .Tchou_Tchou import Tchou_Tchou
from .Update import Update
from .Version import Version
from .Ping import Ping
from .Retour import Retour
__all__ = [
"InvalidArgs",
"Log",
"Aide",
"Alias",
"Budget",
"Camera",
"Courses",
"Dis",
"Disclaimer",
"Emprunt",
"Historique",
"Info",
"Jeu",
"Lien",
"Lumiere",
"Moderation",
"Tchou_Tchou",
"Update",
"Version",
"Ping",
"Retour",
]
| {
"content_hash": "3c89fdd24cd70561bdd87d905ec96578",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 36,
"avg_line_length": 19.91111111111111,
"alnum_prop": 0.6774553571428571,
"repo_name": "hackEns/Jarvis",
"id": "e1bb1b8f04b3ce374408cbc63afc3b5710eb518c",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Rules/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1219"
},
{
"name": "Python",
"bytes": "87477"
},
{
"name": "Shell",
"bytes": "1889"
}
],
"symlink_target": ""
} |
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
from pyspark import SparkConf, SparkContext
from numpy import array
# Boilerplate Spark stuff:
conf = SparkConf().setMaster("local").setAppName("SparkDecisionTree")
sc = SparkContext(conf = conf)
# Some functions that convert our CSV input data into numerical
# features for each job candidate
def binary(YN):
if (YN == 'Y'):
return 1
else:
return 0
def mapEducation(degree):
if (degree == 'BS'):
return 1
elif (degree =='MS'):
return 2
elif (degree == 'PhD'):
return 3
else:
return 0
# Convert a list of raw fields from our CSV file to a
# LabeledPoint that MLLib can use. All data must be numerical...
def createLabeledPoints(fields):
yearsExperience = int(fields[0])
employed = binary(fields[1])
previousEmployers = int(fields[2])
educationLevel = mapEducation(fields[3])
topTier = binary(fields[4])
interned = binary(fields[5])
hired = binary(fields[6])
return LabeledPoint(hired, array([yearsExperience, employed,
previousEmployers, educationLevel, topTier, interned]))
#Load up our CSV file, and filter out the header line with the column names
rawData = sc.textFile("e:/sundog-consult/udemy/datascience/PastHires.csv")
header = rawData.first()
rawData = rawData.filter(lambda x:x != header)
# Split each line into a list based on the comma delimiters
csvData = rawData.map(lambda x: x.split(","))
# Convert these lists to LabeledPoints
trainingData = csvData.map(createLabeledPoints)
# Create a test candidate, with 10 years of experience, currently employed,
# 3 previous employers, a BS degree, but from a non-top-tier school where
# he or she did not do an internship. You could of course load up a whole
# huge RDD of test candidates from disk, too.
testCandidates = [ array([10, 1, 3, 1, 0, 0])]
testData = sc.parallelize(testCandidates)
# Train our DecisionTree classifier using our data set
model = DecisionTree.trainClassifier(trainingData, numClasses=2,
categoricalFeaturesInfo={1:2, 3:4, 4:2, 5:2},
impurity='gini', maxDepth=5, maxBins=32)
# Now get predictions for our unknown candidates. (Note, you could separate
# the source data into a training set and a test set while tuning
# parameters and measure accuracy as you go!)
predictions = model.predict(testData)
print ('Hire prediction:')
results = predictions.collect()
for result in results:
print result
# We can also print out the decision tree itself:
print('Learned classification tree model:')
print(model.toDebugString())
| {
"content_hash": "2814ee933c4bf73131f7a94d4bba7f31",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 82,
"avg_line_length": 36.48684210526316,
"alnum_prop": 0.6887847097006852,
"repo_name": "yevheniyc/C",
"id": "d507587cf786312934f334025dbd3608839ff41c",
"size": "2773",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "1t_DataAnalysisMLPython/1j_ML/DS_ML_Py_SBO/DataScience/SparkDecisionTree.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "262325"
},
{
"name": "HTML",
"bytes": "4136"
},
{
"name": "Makefile",
"bytes": "7381"
}
],
"symlink_target": ""
} |
'''
..
Red9 Studio Pack: Maya Pipeline Solutions
Author: Mark Jackson
email: [email protected]
Red9 blog : http://red9-consultancy.blogspot.co.uk/
MarkJ blog: http://markj3d.blogspot.co.uk
This is the General library of utils used throughout the modules
These are abstract general functions
NOTHING IN THIS MODULE SHOULD REQUIRE RED9
'''
from __future__ import with_statement # required only for Maya2009/8
from functools import wraps
import maya.cmds as cmds
import maya.mel as mel
import os
import time
import inspect
import sys
import tempfile
import subprocess
import json
import itertools
#Only valid Red9 import
import Red9.startup.setup as r9Setup
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# Generic Utility Functions ---
#---------------------------------------------------------------------------------
def getCurrentFPS():
'''
returns the current frames per second as a number, rather than a useless string
'''
fpsDict = {"game":15.0, "film":24.0, "pal":25.0, "ntsc":30.0, "show":48.0, "palf":50.0, "ntscf":60.0}
return fpsDict[cmds.currentUnit(q=True, fullName=True, time=True)]
def forceToString(text):
'''
simple function to ensure that data can be passed correctly into
textFields for the UI (ensuring lists are converted)
'''
if issubclass(type(text), list):
return ','.join(text)
else:
return text
def formatPath(path):
'''
take a path and format it to forward slashes with catches for the exceptions
'''
return os.path.normpath(path).replace('\\','/').replace('\t','/t').replace('\n','/n').replace('\a', '/a')
def itersubclasses(cls, _seen=None):
"""
itersubclasses(cls)
http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
Iterator to yield full inheritance from a given class, including subclasses. This
is used in the MetaClass to build the RED9_META_REGISTERY inheritance dict
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def inspectFunctionSource(value):
'''
This is a neat little wrapper over the mel "whatIs" and Pythons inspect
module that finds the given functions source filePath, either Mel or Python
and opens the original file in the default program.
Great for developers
Supports all Mel functions, and Python Class / functions
'''
path=None
#sourceType=None
#Inspect for MEL
log.debug('inspecting given command: %s' % value)
#if issubclass(sourceType(value),str):
try:
path=mel.eval('whatIs("%s")' % value)
if path and not path=="Command":
path=path.split("in: ")[-1]
#if path:
#sourceType='mel'
elif path=="Command":
cmds.warning('%s : is a Command not a script' % value)
return False
except StandardError, error:
log.info(error)
#Inspect for Python
if not path or not os.path.exists(path):
log.info('This is not a known Mel command, inspecting Python libs for : %s' % value)
try:
log.debug('value : %s' % value)
log.debug('value isString : ', isinstance(value, str))
log.debug('value callable: ', callable(value))
log.debug('value is module : ', inspect.ismodule(value))
log.debug('value is method : ', inspect.ismethod(value))
if isinstance(value, str):
#if not callable(value):
value=eval(value)
path=inspect.getsourcefile(value)
if path:
#sourceType='python'
log.info('path : %s' % path)
except StandardError, error:
log.exception(error)
#Open the file with the default editor
#FIXME: If Python and you're a dev then the .py file may be set to open in the default
#Python runtime/editor and won't open as expected. Need to look at this.
if path and os.path.exists(path):
log.debug('NormPath : %s' % os.path.normpath(path))
os.startfile(os.path.normpath(path))
return True
else:
log.warning('No valid path or functions found matches selection')
return False
def getScriptEditorSelection():
'''
this is a hack to bypass an issue with getting the data back from the
ScriptEditorHistory scroll. We need to copy the selected text to the
clipboard then pull it back afterwards.
'''
import Red9.packages.pyperclip as pyperclip
control=mel.eval("$v=$gLastFocusedCommandControl")
executer=mel.eval("$v=$gLastFocusedCommandExecuter")
reporter=mel.eval("$v=$gLastFocusedCommandReporter")
func=""
if control==executer:
func=cmds.cmdScrollFieldExecuter(control, q=True, selectedText=True)
elif control == reporter:
cmds.cmdScrollFieldReporter(reporter, e=True, copySelection=True)
#func=Clipboard.getText()
#pyperclip.py : IN TESTING : Platform independant clipboard support
func=pyperclip.paste()
log.info('command caught: %s ' % func)
return func
# Context Managers and Decorators ---
#---------------------------------------------------------------------------------
def Timer(func):
'''
Simple timer decorator
'''
@wraps(func)
def wrapper(*args, **kws):
t1 = time.time()
res=func(*args, **kws)
t2 = time.time()
functionTrace=''
try:
#module if found
mod = inspect.getmodule(args[0])
functionTrace+='%s >>' % mod.__name__.split('.')[-1]
except:
log.debug('function module inspect failure')
try:
#class function is part of, if found
cls = args[0].__class__
functionTrace+='%s.' % args[0].__class__.__name__
except:
log.debug('function class inspect failure')
functionTrace += func.__name__
log.debug('TIMER : %s: took %0.3f ms' % (functionTrace, (t2 - t1) * 1000.0))
#log.info('%s: took %0.3f ms' % (func.func_name, (t2-t1)*1000.0))
return res
return wrapper
def runProfile(func):
'''
run the profiler - only ever used when debugging /optimizing function call speeds.
visualize the data using 'runsnakerun' to view the profiles and debug
'''
import cProfile
from time import gmtime, strftime
@wraps(func)
def wrapper(*args, **kwargs):
currentTime = strftime("%d-%m-%H.%M.%S", gmtime())
dumpFileName = 'c:/%s(%s).profile' % (func.__name__, currentTime)
def command():
func(*args, **kwargs)
profile = cProfile.runctx("command()", globals(), locals(), dumpFileName)
return profile
return wrapper
class AnimationContext(object):
"""
Simple Context Manager for restoring Animation settings
"""
def __init__(self):
self.autoKeyState=None
self.timeStore=None
def __enter__(self):
self.autoKeyState=cmds.autoKeyframe(query=True, state=True)
self.timeStore=cmds.currentTime(q=True)
cmds.undoInfo(openChunk=True)
def __exit__(self, exc_type, exc_value, traceback):
# Close the undo chunk, warn if any exceptions were caught:
cmds.autoKeyframe(state=self.autoKeyState)
cmds.currentTime(self.timeStore)
log.info('autoKeyState restored: %s' % self.autoKeyState)
log.info('currentTime restored: %f' % self.timeStore)
cmds.undoInfo(closeChunk=True)
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
# If this was false, it would re-raise the exception when complete
return True
class undoContext(object):
"""
Simple Context Manager for chunking the undoState
"""
def __init__(self, initialUndo=False, undoFuncCache=[], undoDepth=1):
'''
If initialUndo is True then the context manager will manage what to do on entry with
the undoStack. The idea is that if True the code will look at the last functions in the
undoQueue and if any of those mantch those in the undoFuncCache, it'll undo them to the
depth given.
WHY?????? This is specifically designed for things like floatFliders where you've
set a function to act on the 'dc' flag, (drag command) by passing that func through this
each drag will only go into the stack once, enabling you to drag as much as you want
and return to the initial state, pre ALL drags, in one chunk.
:param initialUndo: on first process whether undo on entry to the context manager
:param undoFuncCache: only if initialUndo = True : functions to catch in the undo stack
:param undoDepth: only if initialUndo = True : depth of the undo stack to go to
.. note ::
When adding funcs to this you CAN'T call the 'dc' command on any slider with a lambda func,
it has to call a specific func to catch in the undoStack. See Red9_AnimationUtils.FilterCurves
code for a live example of this setup.
'''
self.initialUndo = initialUndo
self.undoFuncCache = undoFuncCache
self.undoDepth = undoDepth
def undoCall(self):
for _ in range(1, self.undoDepth + 1):
#log.depth('undoDepth : %s' % i)
if [func for func in self.undoFuncCache if func in cmds.undoInfo(q=True, undoName=True)]:
cmds.undo()
def __enter__(self):
if self.initialUndo:
self.undoCall()
cmds.undoInfo(openChunk=True)
def __exit__(self, exc_type, exc_value, traceback):
cmds.undoInfo(closeChunk=True)
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
# If this was false, it would re-raise the exception when complete
return True
class ProgressBarContext(object):
'''
Context manager to make it easier to wrap progressBars
>>> #Example of using this in code
>>>
>>> step=5
>>> progressBar=r9General.ProgressBarContext(1000)
>>> progressBar.setStep(step)
>>> count=0
>>>
>>> #now do your code but increment and check the progress state
>>> with progressBar:
>>> for i in range(1:1000):
>>> if progressBar.isCanceled():
>>> print 'process cancelled'
>>> return
>>> progressBar.setProgress(count)
>>> count+=step
'''
def __init__(self, maxValue=100, interruptable=True):
self.disable=False
if r9Setup.mayaIsBatch():
self.disable=True
return
if maxValue <= 0:
raise ValueError("Max has to be greater than 0")
self._maxValue = maxValue
self._interruptable = interruptable
self._gMainProgressBar = mel.eval('$gmtmp = $gMainProgressBar')
def isCanceled(self):
if not self.disable:
return cmds.progressBar(self._gMainProgressBar, query=True, isCancelled=True)
def setText(self, text):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, status=text)
def setMaxValue(self, value):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, maxValue=int(value))
def setStep(self, value):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, step=int(value))
def setProgress(self, value):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, progress=int(value))
def reset(self):
if not self.disable:
self.setMaxValue(self._maxValue)
self.setText("")
def __enter__(self):
if not self.disable:
cmds.progressBar(self._gMainProgressBar,
edit=True,
beginProgress=True,
isInterruptable=self._interruptable,
maxValue=self._maxValue)
def __exit__(self, exc_type, exc_value, traceback):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, endProgress=True)
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
del(self)
return False # False so that the exceptiopn gets re-raised
class HIKContext(object):
"""
Simple Context Manager for restoring HIK Animation settings and managing HIK callbacks
"""
def __init__(self, NodeList):
self.objs=cmds.ls(sl=True, l=True)
self.NodeList=NodeList
self.managedHIK = False
def __enter__(self):
try:
#We set the keying group mainly for the copyKey code, stops the entire rig being
#manipulated on copy of single effector data
self.keyingGroups=cmds.keyingGroup(q=True, fil=True)
if [node for node in self.NodeList if cmds.nodeType(node) == 'hikIKEffector'\
or cmds.nodeType(node) == 'hikFKJoint']:
self.managedHIK = True
if self.managedHIK:
cmds.keyingGroup(fil="NoKeyingGroups")
log.info('Processing HIK Mode >> using HIKContext Manager:')
cmds.select(self.NodeList)
mel.eval("hikManipStart 1 1")
except:
self.managedHIK = False
def __exit__(self, exc_type, exc_value, traceback):
if self.managedHIK:
cmds.keyingGroup(fil=self.keyingGroups)
cmds.select(self.NodeList)
mel.eval("hikManipStop")
log.info('Exit HIK Mode >> HIKContext Manager:')
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
if self.objs:
cmds.select(self.objs)
return True
class SceneRestoreContext(object):
"""
Simple Context Manager for restoring Scene Global settings
Basically we store the state of all the modelPanels and timeLine
setups. Think of it like this, you export a scene, file -new, then re-import it
but you've now lost all the scenes UI and setups. This is capable of returning
the UI to the previous state. Maybe this could be a tool in it's own write?
Things stored:
* All UI viewport states, display and settings
* currentTime, timeRanges, timeUnits, sceneUnits, upAxis
* Main cameras and transforms for the 4 main modelPanels
* active sound and sound displays
>>> from Red9.core.Red9_General import SceneRestoreContext as sceneStore
>>> with sceneStore:
>>> #do something to modify the scene setup
>>> cmds.currentTime(100)
>>>
>>> #out of the context manager the scene will be restored as it was
>>> #before the code entered the context. (with sceneStore:)
"""
def __init__(self):
self.gPlayBackSlider=mel.eval("string $temp=$gPlayBackSlider")
self.dataStore={}
def __enter__(self):
self.storeSettings()
def __exit__(self, exc_type, exc_value, traceback):
self.restoreSettings()
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
return True
def storeSettings(self):
'''
main work function, store all UI settings
'''
self.dataStore['autoKey'] = cmds.autoKeyframe(query=True, state=True)
# timeline management
self.dataStore['currentTime'] = cmds.currentTime(q=True)
self.dataStore['minTime'] = cmds.playbackOptions(q=True, min=True)
self.dataStore['maxTime'] = cmds.playbackOptions(q=True, max=True)
self.dataStore['startTime'] = cmds.playbackOptions(q=True, ast=True)
self.dataStore['endTime'] = cmds.playbackOptions(q=True, aet=True)
self.dataStore['playSpeed'] = cmds.playbackOptions(query=True, playbackSpeed=True)
# unit management
self.dataStore['timeUnit'] = cmds.currentUnit(q=True, fullName=True, time=True)
self.dataStore['sceneUnits'] = cmds.currentUnit(q=True, fullName=True, linear=True)
self.dataStore['upAxis'] = cmds.upAxis(q=True, axis=True)
#viewport colors
self.dataStore['displayGradient'] = cmds.displayPref(q=True, displayGradient=True)
#objects colors
self.dataStore['curvecolor'] = cmds.displayColor("curve", q=True, dormant=True)
#panel management
self.dataStore['panelStore'] = {}
for panel in ['modelPanel1', 'modelPanel2', 'modelPanel3', 'modelPanel4']:
if not cmds.modelPanel(panel, q=True, exists=True):
continue
self.dataStore['panelStore'][panel] = {}
self.dataStore['panelStore'][panel]['settings'] = cmds.modelEditor(panel, q=True, sts=True)
activeCam = cmds.modelPanel(panel, q=True, camera=True)
if not cmds.nodeType(activeCam) == 'camera':
activeCam = cmds.listRelatives(activeCam, f=True)[0]
self.dataStore['panelStore'][panel]['activeCam'] = activeCam
#camera management
#TODO : store the camera field of view etc also
self.dataStore['cameraTransforms']={}
for cam in ['persp', 'top', 'side', 'front']:
try:
self.dataStore['cameraTransforms'][cam] = [cmds.getAttr('%s.translate' % cam),
cmds.getAttr('%s.rotate' % cam),
cmds.getAttr('%s.scale' % cam)]
except:
log.debug("Camera doesn't exists : %s" % cam)
#sound management
self.dataStore['activeSound'] = cmds.timeControl(self.gPlayBackSlider, q=True, s=1)
self.dataStore['displaySound'] = cmds.timeControl(self.gPlayBackSlider, q=True, ds=1)
def restoreSettings(self):
'''
restore all UI settings
'''
cmds.autoKeyframe(state=self.dataStore['autoKey'])
#timeline management
cmds.currentTime(self.dataStore['currentTime'])
cmds.playbackOptions(min=self.dataStore['minTime'])
cmds.playbackOptions(max=self.dataStore['maxTime'])
cmds.playbackOptions(ast=self.dataStore['startTime'])
cmds.playbackOptions(aet=self.dataStore['endTime'])
cmds.playbackOptions(ps=self.dataStore['playSpeed'])
#unit management
cmds.currentUnit(time=self.dataStore['timeUnit'])
cmds.currentUnit(linear=self.dataStore['sceneUnits'])
cmds.upAxis(axis=self.dataStore['upAxis'])
log.info('Restored PlayBack / Timeline setup')
#viewport colors
cmds.displayPref(displayGradient=self.dataStore['displayGradient'])
cmds.displayRGBColor(resetToSaved=True)
#objects colors
cmds.displayColor("curve", self.dataStore['curvecolor'], dormant=True)
#panel management
for panel, data in self.dataStore['panelStore'].items():
try:
cmdString = data['settings'].replace('$editorName', panel)
mel.eval(cmdString)
log.info("Restored Panel Settings Data >> %s" % panel)
mel.eval('lookThroughModelPanel("%s","%s")' % (data['activeCam'], panel))
log.info("Restored Panel Active Camera Data >> %s >> cam : %s" % (panel, data['activeCam']))
except:
log.debug("Failed to fully Restore ActiveCamera Data >> %s >> cam : %s" % (panel, data['activeCam']))
# camera management
for cam, settings in self.dataStore['cameraTransforms'].items():
try:
cmds.setAttr('%s.translate' % cam, settings[0][0][0], settings[0][0][1], settings[0][0][2])
cmds.setAttr('%s.rotate' % cam, settings[1][0][0], settings[1][0][1], settings[1][0][2])
cmds.setAttr('%s.scale' % cam, settings[2][0][0], settings[2][0][1], settings[2][0][2])
log.info('Restored Default Camera Transform Data : % s' % cam)
except:
log.debug("Failed to fully Restore Default Camera Transform Data : % s" % cam)
#sound management
if self.dataStore['displaySound']:
cmds.timeControl(self.gPlayBackSlider, e=True, ds=1, sound=self.dataStore['activeSound'])
log.info('Restored Audio setup')
else:
cmds.timeControl(self.gPlayBackSlider, e=True, ds=0)
log.debug('Scene Restored fully')
return True
# General ---
#---------------------------------------------------------------------------------
def thumbNailScreen(filepath, width, height, mode='api'):
path='%s.bmp' % os.path.splitext(filepath)[0]
if mode=='api':
thumbnailApiFromView(path, width, height)
log.debug('API Thumb > path : %s' % path)
else:
thumbnailFromPlayBlast(path, width, height)
log.debug('Playblast Thumb > path : %s' % path)
def thumbnailFromPlayBlast(filepath, width, height):
'''
Generate a ThumbNail of the screen
Note: 'cf' flag is broken in 2012
:param filepath: path to Thumbnail
:param width: width of capture
:param height: height of capture
'''
filepath=os.path.splitext(filepath)[0]
filename=os.path.basename(filepath)
filedir=os.path.dirname(filepath)
#get modelPanel and camera
win = cmds.playblast(activeEditor=True).split('|')[-1]
cam = cmds.modelPanel(win, q=True, camera=True)
if not cmds.nodeType(cam) == 'camera':
cam = cmds.listRelatives(cam)[0]
storedformat = cmds.getAttr('defaultRenderGlobals.imageFormat')
storedResolutionGate = cmds.getAttr('%s.filmFit' % cam)
cmds.setAttr('defaultRenderGlobals.imageFormat', 20)
cmds.setAttr('%s.filmFit' % cam, 2) # set to Vertical so we don't get so much overscan
cmds.playblast(frame=cmds.currentTime(q=True), # startTime=cmds.currentTime(q=True),
# endTime=cmds.currentTime(q=True),
format="image",
filename=filepath,
width=width,
height=height,
percent=100,
quality=90,
forceOverwrite=True,
framePadding=0,
showOrnaments=False,
compression="BMP",
viewer=False)
cmds.setAttr('defaultRenderGlobals.imageFormat', storedformat)
cmds.setAttr('%s.filmFit' % cam, storedResolutionGate)
#Why do this rename? In Maya2012 the 'cf' flag fails which means you have to use
#the 'f' flag and that adds framePadding, crap I know! So we strip it and rename
#the file after it's made.
try:
newfile=[f for f in os.listdir(filedir)
if f.split('.bmp')[0].split('.')[0] == filename and not
'.pose' in f]
log.debug('Original Playblast file : %s' % newfile)
os.rename(os.path.join(filedir, newfile[0]), '%s.bmp' % filepath)
log.debug('Thumbnail Renamed : %s' % ('%s.bmp' % filepath))
return '%s.bmp' % filepath
except:
pass
def thumbnailApiFromView(filename, width, height, compression='bmp', modelPanel='modelPanel4'):
'''
grab the thumbnail direct from the buffer?
TODO: not yet figured out how you crop the data here?
'''
import maya.OpenMaya as OpenMaya
import maya.OpenMayaUI as OpenMayaUI
#Grab the last active 3d viewport
view = None
if modelPanel is None:
view = OpenMayaUI.M3dView.active3dView()
else:
try:
view = OpenMayaUI.M3dView()
OpenMayaUI.M3dView.getM3dViewFromModelEditor(modelPanel, view)
except:
#in case the given modelPanel doesn't exist!!
view = OpenMayaUI.M3dView.active3dView()
#read the color buffer from the view, and save the MImage to disk
image = OpenMaya.MImage()
view.readColorBuffer(image, True)
image.resize(width, height, True)
image.writeToFile(filename, compression)
log.info('API Thumbname call path : %s' % filename)
def getModifier():
'''
return the modifier key pressed
'''
mods = cmds.getModifiers()
if (mods & 1) > 0:
return 'Shift'
if (mods & 2) > 0:
return 'CapsLock'
if (mods & 4) > 0:
return 'Ctrl'
if (mods & 8) > 0:
return 'Alt'
else:
return False
# OS functions ---
#---------------------------------------------------------------------------------
class Clipboard:
'''
Get or Set data to the Windows clipboard...Used in the inspect code to grab the
ScriptEditor's selected history
CURRENTLY NOT BEING CALLED - switched to pyperclip.py module
'''
@staticmethod
def getText():
'''
Get clipboard text if available
'''
import ctypes
# declare win32 API
user32 = ctypes.windll.user32
kernel32 = ctypes.windll.kernel32
if not user32.OpenClipboard(0):
return ''
CF_TEXT = 1
hClipMem = user32.GetClipboardData(CF_TEXT)
kernel32.GlobalLock.restype = ctypes.c_char_p
value = kernel32.GlobalLock(hClipMem)
kernel32.GlobalUnlock(hClipMem)
user32.CloseClipboard()
if isinstance(value, str):
return value
elif hasattr(value, 'decode'):
return value.decode(sys.getfilesystemencoding())
else:
return ''
@staticmethod
def setText(value):
'''
Set clipbard text
'''
import ctypes
if not value:
raise IOError('No text passed to the clipboard')
if isinstance(value, unicode):
value=str(value)
if not isinstance(value, str):
raise TypeError('value should be of str type')
# declare win32 API
user32 = ctypes.windll.user32
kernel32 = ctypes.windll.kernel32
GlobalLock = kernel32.GlobalLock
memcpy = ctypes.cdll.msvcrt.memcpy
CF_TEXT = 1
GHND = 66
buf = ctypes.c_buffer(value.encode(sys.getfilesystemencoding()))
bufferSize = ctypes.sizeof(buf)
hGlobalMem = kernel32.GlobalAlloc(GHND, bufferSize)
GlobalLock.restype = ctypes.c_void_p
lpGlobalMem = GlobalLock(hGlobalMem)
memcpy(lpGlobalMem, ctypes.addressof(buf), bufferSize)
kernel32.GlobalUnlock(hGlobalMem)
if user32.OpenClipboard(0):
user32.EmptyClipboard()
user32.SetClipboardData(CF_TEXT, hGlobalMem)
user32.CloseClipboard()
log.info('Data set to clipboard : %s' % value)
return True
def os_OpenFileDirectory(path):
'''
open the given folder in the default OS browser
'''
import subprocess
path=os.path.abspath(path)
if sys.platform == 'win32':
subprocess.Popen('explorer /select, "%s"' % path)
elif sys.platform == 'darwin': # macOS
subprocess.Popen(['open', path])
else: # linux
try:
subprocess.Popen(['xdg-open', path])
except OSError:
raise OSError('unsupported xdg-open call??')
def os_OpenFile(filePath, *args):
'''
open the given file in the default program for this OS
'''
import subprocess
#log.debug('filePath : %s' % filePath)
#filePath=os.path.abspath(filePath)
#log.debug('abspath : %s' % filePath)
if sys.platform == 'win32':
os.startfile(filePath)
elif sys.platform == 'darwin': # macOS
subprocess.Popen(['open', filePath])
else: # linux
try:
subprocess.Popen(['xdg-open', filePath])
except OSError:
raise OSError('unsupported xdg-open call??')
def os_formatPath(path):
'''
take the given path and format it for Maya path
'''
return os.path.normpath(path).replace('\\','/').replace('\t','/t').replace('\n','/n').replace('\a', '/a')
def os_listFiles(folder, filters=[], byDate=False, fullPath=False):
'''
simple os wrap to list a dir with filters for file type and sort byDate
:param folder: folder to dir list
:param filters: list of file extensions to filter for
:param byData: sort the list by modified date, newest first!
'''
files = os.listdir(folder)
filtered=[]
if filters:
for f in files:
for flt in filters:
if f.lower().endswith(flt):
filtered.append(f)
files=filtered
if byDate and files:
files.sort(key=lambda x: os.stat(os.path.join(folder, x)).st_mtime)
files.reverse()
if fullPath:
files=[os_formatPath(os.path.join(folder, f)) for f in files]
return files
def os_openCrashFile(openDir=False):
'''
Open the default temp dir where Maya stores it's crash files and logs
'''
tempdir=tempfile.gettempdir()
if openDir:
os_OpenFileDirectory(tempdir)
else:
mayafiles = os_listFiles(tempdir, filters=['.ma','.mb'], byDate=True, fullPath=True)
cmds.file(mayafiles[0], open=True, f=True)
def os_fileCompare(file1, file2, openDiff=False):
'''
Pass in 2 files for diffComparision. If files are identical, ie there are no
differences then the code returns 0
:param file1: first file to compare with second file
:param file2: second file to compare against the first
:param openDiff: if a difference was found then boot Diffmerge UI, highlighting the diff
.. note::
This is a stub function that requires Diffmerge.exe, you can download from
https://sourcegear.com/diffmerge/.
Once downloaded drop it here Red9/pakcages/diffMerge.exe
'''
diffmerge=os.path.join(r9Setup.red9ModulePath(),'packages','diffMerge.exe')
outputDir=tempfile.gettempdir()
if os.path.exists(diffmerge):
process=subprocess.Popen([diffmerge, '-d', os.path.join(outputDir, 'diffmergeOutput.diff'), file1, file2],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
#output = process.communicate()
process.wait()
retcode = process.poll()
if not retcode:
log.info('Files are Identical')
return retcode
elif retcode==1:
log.info('Files are not Identical - use the openDiff flag to open up the differences in the editor')
if openDiff:
process=subprocess.Popen([diffmerge, file1, file2], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
return retcode
elif retcode==2:
raise IOError('Files failed to compare - issues prevented the compare processing both files')
return retcode
else:
log.warning('Diffmerge commandline was not found, compare aborted')
def writeJson(filepath=None, content=None):
'''
write json file to disk
:param filepath: file pat to drive where to write the file
:param content: file content
:return: None
'''
if filepath:
path = os.path.dirname(filepath)
if not os.path.exists(path):
os.makedirs(path)
name = open(filepath, "w")
name.write(json.dumps(content, sort_keys=True, indent=4))
name.close()
def readJson(filepath=None):
'''
file pat to drive where to read the file
:param filepath:
:return:
'''
if os.path.exists(filepath):
name = open(filepath, 'r')
try:
return json.load(name)
except ValueError:
pass
class abcIndex(object):
'''
Alphabetic iterator
'''
def __init__(self, lower=True):
if lower:
self.__abc = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
else:
self.__abc = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
self.__iter = 0
self.__iterator = None
self.__Iterate()
def __Iterate(self):
self.__iter += 1
self.__iterator = itertools.permutations(self.__abc, self.__iter)
def next(self):
'''
Return and Alphabetic index
'''
try:
temp = ''.join([x for x in self.__iterator.next()])
except StopIteration:
self.__Iterate()
temp = ''.join([x for x in self.__iterator.next()])
return '%s' % temp
| {
"content_hash": "b1d567aaacb1786c2aa6a9a1ac725504",
"timestamp": "",
"source": "github",
"line_count": 924,
"max_line_length": 142,
"avg_line_length": 36.41558441558441,
"alnum_prop": 0.5874940561103186,
"repo_name": "Free3Dee/Red9_StudioPack",
"id": "392b04efc54f35f45ad21944fcdda72f1f0a0e17",
"size": "33648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/Red9_General.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mathematica",
"bytes": "954216"
},
{
"name": "Python",
"bytes": "1057739"
}
],
"symlink_target": ""
} |
from Event import PictureEvent, LightEvent, FeedEvent
import datetime
names = ['Feed','Light','Take picture']
events = []
enabled = True
idcounter = 0
today = datetime.datetime.today().date()
def createEvent(type):
if type == 0:
return FeedEvent()
elif type == 1:
return LightEvent()
elif type == 2:
return PictureEvent()
return None
def load(ini):
global idcounter, enabled, events
section = 'events'
if not ini.has_section(section):
raise Exception("Broken state.ini file")
idcounter = ini.getint(section,'idcounter')
enabled = ini.getboolean(section,'enabled')
count = ini.getint(section,'count')
events = []
for i in range(count):
event = createEvent(ini.getint('event' + str(i),'type'))
event.readFromIni(ini, 'event' + str(i))
events.append(event)
def save(ini):
section = 'events'
if not ini.has_section(section):
ini.add_section(section)
ini.set(section,'idcounter',str(idcounter))
ini.set(section,'enabled',str(enabled))
ini.set(section,'count',str(len(events)))
i = 0
for event in events:
event.writeToIni(ini, 'event' + str(i))
i += 1
def getSerializeable():
return [event.getSerializeable() for event in events]
def update(params):
global idcounter
id = int(params['event'])
type = int(params['type'])
event = createEvent(type)
event.setDayInt(int(params['day']))
event.hour = int(params['hour'])
event.minute = int(params['minute'])
event.executed = event.timePassed()
if type == 0:
event.setFoodInt(int(params['food']))
event.maxSaturation = float(params['maxsaturation'])
event.minAmount = float(params['minamount'])
event.maxAmount = float(params['maxamount'])
elif type == 1:
event.value = params['value'] == 'true'
if id == -1:
event.id = idcounter
idcounter += 1
events.append(event)
else:
event.id = id
for i in range(len(events)):
if events[i].id == id:
events[i] = event
return event
def getEvent(id):
for event in events:
if event.id == id:
return event
return None
def tick():
global today
if today != datetime.datetime.today().date():
today = datetime.datetime.today().date()
for event in events:
event.executed = event.timePassed()
if enabled:
for event in events:
event.tick() | {
"content_hash": "adcd20d29d3c94bfbb176b0323c5ce73",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 58,
"avg_line_length": 23.197916666666668,
"alnum_prop": 0.6834306241580602,
"repo_name": "marian42/fishtank",
"id": "235df5952a332c9bc68f2af5187755d8868655ae",
"size": "2227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/EventList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "9109"
},
{
"name": "CSS",
"bytes": "5777"
},
{
"name": "HTML",
"bytes": "36066"
},
{
"name": "JavaScript",
"bytes": "44892"
},
{
"name": "Python",
"bytes": "46705"
}
],
"symlink_target": ""
} |
from gitlint.tests.base import BaseTestCase
from gitlint.rules import Rule, RuleViolation
class RuleTests(BaseTestCase):
def test_rule_equality(self):
self.assertEqual(Rule(), Rule())
# Ensure rules are not equal if they differ on their attributes
for attr in ["id", "name", "target", "options"]:
rule = Rule()
setattr(rule, attr, "åbc")
self.assertNotEqual(Rule(), rule)
def test_rule_log(self):
rule = Rule()
rule.log.debug("Tēst message")
self.assert_log_contains("DEBUG: gitlint.rules Tēst message")
def test_rule_violation_equality(self):
violation1 = RuleViolation("ïd1", "My messåge", "My cöntent", 1)
self.object_equality_test(violation1, ["rule_id", "message", "content", "line_nr"])
| {
"content_hash": "970e600dfa02053a565d76890a8eddee",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 91,
"avg_line_length": 38.523809523809526,
"alnum_prop": 0.6341161928306551,
"repo_name": "jorisroovers/gitlint",
"id": "199cc7e55308be7703f8f4099f6f7bdd42693076",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gitlint-core/gitlint/tests/rules/test_rules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1486"
},
{
"name": "Dockerfile",
"bytes": "2011"
},
{
"name": "Python",
"bytes": "435509"
},
{
"name": "Shell",
"bytes": "21557"
}
],
"symlink_target": ""
} |
"""
===============================
Test for qplotutils.chart.items
===============================
Autogenerated package stub.
"""
import unittest
import logging
import sys
import os
import numpy as np
from qtpy.QtCore import *
from qtpy.QtGui import *
from qtpy.QtOpenGL import *
from qtpy.QtWidgets import *
from qplotutils.chart.items import *
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2019, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "[email protected]"
__status__ = "Development"
_log = logging.getLogger(__name__)
class BaseMixinTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = BaseMixin() # TODO: may fail!
class ChartItemTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartItem() # TODO: may fail!
class ChartItemFlagsTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartItemFlags() # TODO: may fail!
class ChartItemGroupTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartItemGroup() # TODO: may fail!
class ChartWidgetItemTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartWidgetItem() # TODO: may fail!
class ColorSetTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ColorSet() # TODO: may fail!
class CoordCrossTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = CoordCross() # TODO: may fail!
class HLineTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = HLine() # TODO: may fail!
class LineChartItemTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = LineChartItem() # TODO: may fail!
class RectMarkerTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = RectMarker(QPointF(0,0)) # TODO: may fail!
class TextItemTests(unittest.TestCase):
app = None
@classmethod
def setUpClass(cls):
TextItemTests.app = QApplication([])
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = TextItem(QPointF(0,0), "Text") # TODO: may fail!
class VLineTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = VLine() # TODO: may fail!
class WireItemTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = WireItem() # TODO: may fail! | {
"content_hash": "9179210f623da6c54fe5b5082864d63a",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 63,
"avg_line_length": 20.54696132596685,
"alnum_prop": 0.5598279107286905,
"repo_name": "unrza72/qplotutils",
"id": "f4755959cb4d0dc71a15a54d8e07bb2bae6487c8",
"size": "3766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/qplotutils/chart/test_items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "983"
},
{
"name": "Python",
"bytes": "294832"
},
{
"name": "Shell",
"bytes": "271"
}
],
"symlink_target": ""
} |
"""
Description: Round-trip example. This script takes a STIX instance document from XML to
a binding object, then to a api object and then to a dictionary. That dictionary is then
converted back into an api object, which is then used to generate an XML document.
"""
# stdlib
from pprint import pprint
# python-stix
from stix.core import STIXPackage
def main():
FILENAME = 'sample.xml'
# Parse input file
stix_package = STIXPackage.from_xml(FILENAME)
# Convert STIXPackage to a Python dictionary via the to_dict() method.
stix_dict = stix_package.to_dict()
# Print the dictionary!
pprint(stix_dict)
# Convert the first STIXPackage dictionary into another STIXPackage via
# the from_dict() method.
stix_package_two = STIXPackage.from_dict(stix_dict)
# Serialize the new STIXPackage object to XML
xml = stix_package_two.to_xml()
# Print the XML!
print(xml)
if __name__ == '__main__':
main()
| {
"content_hash": "73f0e179c73205a42d99933eeff94cbf",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 88,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.7010416666666667,
"repo_name": "STIXProject/python-stix",
"id": "80989a03b6131b04cd144634a001cba63c396250",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/xml2object.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1422974"
}
],
"symlink_target": ""
} |
import argparse
import os
import shutil
import BasketGlobals as config
# SET THE BASE LOCATION
SERVER = config.serverDir()
LOCAL = config.rootDir()
# Basic 'Safe' Server Directory make
def make_dir(indir, loc=SERVER):
baseDir = os.path.join(loc, indir)
try:
if not os.path.isdir(baseDir):
os.makedirs(baseDir)
except IOError as e:
print e
pass
# Return subdirectories
def sub_dirs(indir):
return config.BASE_DIRS[indir]
# Iterate through sub dirs in the given top dir
def make_sub(top, loc=SERVER):
# If the sub directory isn't empty
if sub_dirs(top) is not []:
for sub in sub_dirs(top):
subdir = os.path.join(top, sub)
make_dir(subdir, loc=loc)
# Iterate through and make the top level directories
# Then create the sub directories
def make_top(top, loc=SERVER):
for key, dirname in top.iteritems():
make_dir(key, loc=loc)
make_sub(key, loc=loc)
# --- SHOT SPECIFIC DIRECTORY CREATION ---
# Make a Working/Publish Shot directory
def make_prod_dir(scene, shot, loc=SERVER):
for prod_dir in config.PROD_DIRS:
if prod_dir == 'publish':
pdir = os.path.join(prod_dir, scene, shot)
else:
pdir = os.path.join('working', prod_dir, scene, shot)
for stage in config.STAGE_DIRS:
sdir = os.path.join(pdir, stage)
make_dir(sdir, loc=loc)
# Make a Frame Shot directory
def make_frame_dir(scene, shot, loc=SERVER):
fdir = os.path.join('frames', scene, shot)
for sub in config.FRAME_DIRS:
sdir = os.path.join(fdir, sub)
make_dir(sdir, loc=loc)
# One stop Function to initialize the server directories
def build_base_server():
make_dir(SERVER)
make_top(config.BASE_DIRS)
# Creates Base local directories
def build_base_local():
make_dir(LOCAL)
make_top(config.BASE_DIRS, loc=LOCAL)
def ignore_files(dir, files):
return [f for f in files if os.path.isfile(os.path.join(dir, f))]
def rep_prod_dir():
sdirs = []
ldirs = []
for sdir in next(os.walk(os.path.join(SERVER, 'working', 'scenes')))[1]:
sdirs.append(sdir)
for ldir in next(os.walk(os.path.join(LOCAL, 'working', 'scenes')))[1]:
ldirs.append(ldir)
missingdirs = list(set(sdirs) - set(ldirs))
for mdir in missingdirs:
if not os.path.exists(os.path.join(LOCAL, 'working', 'scenes', mdir)):
shutil.copytree(
os.path.join(SERVER, 'working', 'scenes', mdir),
os.path.join(LOCAL, 'working', 'scenes', mdir),
ignore=ignore_files)
for sdir in next(os.walk(os.path.join(SERVER, 'working', 'scenes')))[1]:
for shot in next(os.walk(os.path.join(SERVER, 'working', 'scenes', sdir)))[1]:
if not os.path.exists(os.path.join(LOCAL, 'working', 'scenes', sdir, shot)):
shutil.copytree(
os.path.join(SERVER, 'working', 'scenes', sdir, shot),
os.path.join(LOCAL, 'working', 'scenes', sdir, shot),
ignore=ignore_files)
if __name__ == "__main__":
print next(os.walk(os.path.join(SERVER, 'working', 'scenes')))[1]
| {
"content_hash": "5557bcafd6b648d38d60c2ad226fa847",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 88,
"avg_line_length": 29.118181818181817,
"alnum_prop": 0.6134873556041212,
"repo_name": "Hartman-/Basket",
"id": "2a71edce467c0f06598bcd264d379efe95769462",
"size": "3226",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "basket/BasketBuilder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2870"
},
{
"name": "Python",
"bytes": "140569"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
} |
import re
from datetime import datetime
def handle_parse_exception(soup):
print '\nException parsing HTML.', \
'Probably contained something unexpected.', \
'Check unexpected_output.html'
with open('unexpected_output.html', 'wb') as output:
output.write(soup.prettify().encode('UTF-8'))
def parse_court_names(soup):
try:
# Load list of courts and fips codes
fips = [tag['value'] for tag in soup.find_all('input',
{'name':'courtFips'})]
names = [tag['value'] for tag in soup.find_all('input',
{'name':'courtName'})]
court_names = {}
for f, c in zip(fips, names):
court_names[f] = c
return court_names
except:
handle_parse_exception(soup)
raise
def parse_hearing_date_search(soup):
try:
no_results = re.compile(r'No results found for the search criteria')
if soup.find('td', text=no_results) is not None:
return []
cases = []
rows = soup.find('table', {'class':'tableborder'}).find_all('tr')
for row in rows:
cells = row.find_all('td')
if cells[0]['class'][0] == 'gridheader':
continue
details_url = cells[1].a['href']
status_cell_content = list(cells[6].stripped_strings)
status = ''
if len(status_cell_content) > 0:
status = status_cell_content[0]
cases.append({
'details_url': details_url,
'status': status
})
return cases
except:
handle_parse_exception(soup)
raise
def next_button_found(soup):
try:
return soup.find('input', {'name': 'caseInfoScrollForward'}) is not None
except:
handle_parse_exception(soup)
raise
def parse_case_details(soup):
try:
cells = list(soup.find('td', text=re.compile('Case Number')) \
.parent.find_all('td'))
case_number = cells[1].text.strip()
filed_date = datetime.strptime(cells[3].text.strip(), "%m/%d/%Y")
cells = list(soup.find('td', text=re.compile('Name')) \
.parent.find_all('td'))
name = cells[1].text.strip()
cells = list(soup.find('td', text=re.compile('Case Type')) \
.parent.find_all('td'))
case_type = cells[3].text.strip()
offense_class = cells[5].text.strip()
return {
'case_number': case_number,
'name': name,
'filed_date': filed_date,
'case_type': case_type,
'offense_class': offense_class
}
except:
handle_parse_exception(soup)
raise
| {
"content_hash": "01a6de27a363a06feb8ff13eabe6e05a",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 80,
"avg_line_length": 33.670731707317074,
"alnum_prop": 0.5313292285403839,
"repo_name": "openva/attorney-analysis",
"id": "0f9a227e03d385121814aae8564b2d6cb05c635e",
"size": "2761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courtreader/districtcourtparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13887"
}
],
"symlink_target": ""
} |
Subsets and Splits