max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
src/Products/Five/sizeconfigure.py | rbanffy/Zope | 289 | 11099317 | ##############################################################################
#
# Copyright (c) 2004, 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Use structured monkey-patching to enable ``ISized`` adapters for
Zope 2 objects.
"""
from Products.Five import fivemethod
from Products.Five import isFiveMethod
from zope.size.interfaces import ISized
from zope.testing.cleanup import addCleanUp # NOQA
# holds classes that were monkeyed with; for clean up
_monkied = []
@fivemethod
def get_size(self):
size = ISized(self, None)
if size is not None:
unit, amount = size.sizeForSorting()
if unit == 'byte':
return amount
method = getattr(self, '__five_original_get_size', None)
if method is not None:
return self.__five_original_get_size()
def classSizable(class_):
"""Monkey the class to be sizable through Five"""
# tuck away the original method if necessary
if hasattr(class_, "get_size") and not isFiveMethod(class_.get_size):
class_.__five_original_get_size = class_.get_size
class_.get_size = get_size
# remember class for clean up
_monkied.append(class_)
def sizable(_context, class_):
_context.action(
discriminator=('five:sizable', class_),
callable=classSizable,
args=(class_,)
)
def killMonkey(class_, name, fallback, attr=None):
"""Die monkey, die!"""
method = getattr(class_, name, None)
if isFiveMethod(method):
original = getattr(class_, fallback, None)
if original is not None:
delattr(class_, fallback)
if original is None or isFiveMethod(original):
try:
delattr(class_, name)
except AttributeError:
pass
else:
setattr(class_, name, original)
if attr is not None:
try:
delattr(class_, attr)
except (AttributeError, KeyError):
pass
def unsizable(class_):
"""Restore class's initial state with respect to being sizable"""
killMonkey(class_, 'get_size', '__five_original_get_size')
def cleanUp():
for class_ in _monkied:
unsizable(class_)
addCleanUp(cleanUp)
del addCleanUp
|
webapp/tests/test_browser.py | romanek-adam/graphite-web | 4,281 | 11099332 | <gh_stars>1000+
# -*- coding: utf-8 -*-
import os
from django.contrib.auth.models import User
try:
from django.urls import reverse
except ImportError: # Django < 1.10
from django.core.urlresolvers import reverse
from .base import TestCase
from django.test.utils import override_settings
from graphite.util import json
from . import DATA_DIR
class BrowserTest(TestCase):
def test_browser(self):
url = reverse('browser')
response = self.client.get(url)
self.assertContains(response, 'Graphite Browser')
def test_header(self):
self.assertEqual(User.objects.count(), 0)
url = reverse('browser_header')
response = self.client.get(url)
self.assertContains(response, 'Graphite Browser Header')
# Graphite has created a default user
self.assertEqual(User.objects.get().username, 'default')
def test_url_prefix(self):
self.assertEqual(reverse('browser'), '/graphite/')
@override_settings(INDEX_FILE=os.path.join(DATA_DIR, 'index'))
def test_search(self):
url = reverse('browser_search')
response = self.client.post(url)
self.assertEqual(response.content, b'')
# simple query
response = self.client.post(url, {'query': 'collectd'})
self.assertEqual(response.content.split(b',')[0],
b'collectd.test.df-root.df_complex-free')
# No match
response = self.client.post(url, {'query': 'other'})
self.assertEqual(response.content, b'')
# Multiple terms (OR)
response = self.client.post(url, {'query': 'midterm shortterm'})
self.assertEqual(response.content.split(b','),
[b'collectd.test.load.load.midterm',
b'collectd.test.load.load.shortterm'])
def test_unicode_graph_name(self):
url = reverse('browser_my_graph')
user = User.objects.create_user('test', '<EMAIL>', '<PASSWORD>')
self.client.login(username='test', password='<PASSWORD>')
response = self.client.get(url, {'path': ''})
self.assertEqual(response.status_code, 200)
user.profile.mygraph_set.create(name=u'fòo', url='bar')
response = self.client.get(url, {'path': ''})
self.assertEqual(response.status_code, 200)
[leaf] = json.loads(response.content)
self.assertEqual(leaf['text'], u'fòo')
def test_unicode_usergraph(self):
url = reverse('browser_usergraph')
user = User.objects.create_user('tèst', '<EMAIL>', '<PASSWORD>')
self.client.login(username='tèst', password='<PASSWORD>')
self.client.get(reverse('browser_header')) # this creates a profile for the user
user.profile.mygraph_set.create(name=u'fòo', url='bar')
response = self.client.get(url, {'query': 'tèst.*',
'format': 'treejson',
'path': 'tèst',
'user': 'tèst',
'node': 'tèst'})
self.assertEqual(response.status_code, 200)
[leaf] = json.loads(response.content)
self.assertEqual(leaf, {
u'leaf': 1,
u'text': u'fòo',
u'allowChildren': 0,
u'graphUrl': u'bar',
u'id': u'tèst.845aa5781192007e1866648eea9f7355',
u'expandable': 0,
})
|
recipes/Python/141602_Barebones_VC_code_invoking_PythCOM_factory/recipe-141602.py | tdiprima/code | 2,023 | 11099356 | <gh_stars>1000+
# Python code
from win32com . server . register import UseCommandLine
from win32api import MessageBox
from win32com . client import Dispatch
from win32ui import MessageBox
class StemmerFactory :
_reg_clsid_ = "{602D10EB-426C-4D6F-A4DF-C05572EB780B}"
_reg_desc_ = "LangTech Stemmer"
_reg_progid_ = "LangTech.Stemmer"
_public_methods_ = [ 'new' ]
def new ( self, scriptFile ) :
self . scriptFile = scriptFile
stemmer = Dispatch ( "LangTech.Stemmer.Product" )
return stemmer
class Stemmer :
_reg_clsid_ = "{B306454A-CAE6-4A74-ACAD-0BB11EF256DD}"
_reg_desc_ = "LangTech Stemmer Product"
_reg_progid_ = "LangTech.Stemmer.Product"
_public_methods_ = [ 'stemWord' ]
def stemWord ( self, word ) :
# extremely simple stemming: if the word ends in 's' then drop the 's'
if word [ -1 ] == "s":
return word [ : -1 ]
else:
return word
if __name__ == '__main__' :
UseCommandLine ( StemmerFactory )
UseCommandLine ( Stemmer )
#----------------------------------------
#
# C++
#include <comdef.h>
#include <initguid.h>
DEFINE_GUID(CLSID_StemmerFactory,
0x602D10EB, 0x426C, 0x4D6F, 0xA4, 0xDF, 0xC0, 0x55, 0x72, 0xEB, 0x78, 0x0B);
DISPID rgDispId ;
OLECHAR * rgszNames [ ] = { OLESTR ( "new" ) };
DISPPARAMS DispParams;
VARIANT VarResult;
EXCEPINFO excepinfo;
UINT uArgErr;
VARIANTARG * pvarg = NULL;
_bstr_t stemmedWord;
HRESULT hr;
IDispatch * stemmerFactory;
IDispatch * stemmer;
if ( FAILED ( hr = CoInitialize ( NULL ) ) ) {
MessageBox ( 0, "CoInitialize failure", "Fault", MB_OK );
break;
}
if ( FAILED ( hr = CoCreateInstance (
CLSID_StemmerFactory,
NULL,
CLSCTX_INPROC_SERVER,
IID_IDispatch,
( LPVOID * ) & stemmerFactory ) ) ) {
MessageBox ( 0, "CoCreateInstance failure", "Fault", MB_OK );
break;
}
if ( FAILED ( hr = stemmerFactory -> GetIDsOfNames (
IID_NULL,
rgszNames,
1,
LOCALE_SYSTEM_DEFAULT,
& rgDispId
) ) ) {
MessageBox ( 0, "GetIDsOfNames failure", "Fault", MB_OK );
break;
}
DispParams.cArgs = 1;
DispParams.cNamedArgs = 0;
DispParams.rgdispidNamedArgs = 0;
pvarg = new VARIANTARG [ DispParams . cArgs ];
if ( pvarg == NULL ) {
MessageBox ( 0, "Insufficient 1st memory", "Fault", MB_OK );
break;
}
pvarg -> vt = VT_BSTR;
pvarg -> bstrVal = SysAllocString ( L"engRules.pl" );
DispParams.rgvarg = pvarg;
if ( FAILED ( hr = stemmerFactory -> Invoke (
rgDispId,
IID_NULL,
LOCALE_SYSTEM_DEFAULT,
DISPATCH_METHOD,
& DispParams,
& VarResult,
& excepinfo,
& uArgErr
) ) ) {
MessageBox ( 0, "1st Invoke failure", "Fault", MB_OK );
break;
}
delete ( pvarg );
stemmer = VarResult.pdispVal;
pvarg = new VARIANTARG [ DispParams . cArgs ];
if ( pvarg == NULL ) {
MessageBox ( 0, "Insufficient 2nd memory", "Fault", MB_OK );
break;
}
pvarg -> vt = VT_BSTR;
pvarg -> bstrVal = SysAllocString ( L"cats" );
DispParams.rgvarg = pvarg;
if ( FAILED ( hr = stemmer -> Invoke (
rgDispId,
IID_NULL,
LOCALE_SYSTEM_DEFAULT,
DISPATCH_METHOD,
& DispParams,
& VarResult,
& excepinfo,
& uArgErr
) ) ) {
MessageBox ( 0, "2nd Invoke failure", "Fault", MB_OK );
break;
}
delete ( pvarg );
stemmedWord = VarResult.bstrVal;
MessageBox (
0,
( const char * ) stemmedWord,
"Resulting Stemmed Word",
MB_OK
);
CoUninitialize ( );
|
tests/ouimeaux_device/api/unit/__init__.py | esev/pywemo | 102 | 11099365 | """Intentionally Empty."""
|
nototools/unittests/layout.py | RoelN/nototools | 156 | 11099386 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test general health of the fonts."""
import json
from nototools import render
def _run_harfbuzz(text, font, language, extra_parameters=None):
"""Run harfbuzz on some text and return the shaped list."""
try:
# if extra_parameters is a string, split it into a list
extra_parameters = extra_parameters.split(" ")
except AttributeError:
pass
hb_output = render.run_harfbuzz_on_text(text, font, language, extra_parameters)
return json.loads(hb_output)
_advance_cache = {}
def get_advances(text, font, extra_parameters=None):
"""Get a list of horizontal advances for text rendered in a font."""
try:
return _advance_cache[(text, font, extra_parameters)]
except KeyError:
pass
hb_output = _run_harfbuzz(text, font, None, extra_parameters)
advances = [glyph["ax"] for glyph in hb_output]
_advance_cache[(text, font, extra_parameters)] = advances
return advances
_shape_cache = {}
def get_glyphs(text, font, extra_parameters=None):
"""Get a list of shaped glyphs for text rendered in a font."""
try:
return _shape_cache[(text, font, extra_parameters)]
except KeyError:
pass
hb_output = _run_harfbuzz(text, font, None, extra_parameters)
shapes = [glyph["g"] for glyph in hb_output]
_shape_cache[(text, font, extra_parameters)] = shapes
return shapes
|
tests/test_ecdsa.py | adithyabsk/cryptos | 822 | 11099439 | <gh_stars>100-1000
"""
Test our ability to sign and verify digital signatures
"""
import os
from io import BytesIO
from cryptos.bitcoin import BITCOIN
from cryptos.keys import gen_key_pair
from cryptos.ecdsa import Signature, sign, verify
from cryptos.transaction import Tx
def test_ecdsa():
# let's create two identities
sk1, pk1 = gen_key_pair()
sk2, pk2 = gen_key_pair() # pylint: disable=unused-variable
message = ('user pk1 would like to pay user pk2 1 BTC kkthx').encode('ascii')
# an evil user2 attempts to submit the transaction to the network with some totally random signature
sig = Signature(int.from_bytes(os.urandom(32), 'big'), int.from_bytes(os.urandom(32), 'big'))
# a few seconds later a hero miner inspects the candidate transaction
is_legit = verify(pk1, message, sig)
assert not is_legit
# unlike user2, hero miner is honest and discards the transaction, all is well
# evil user2 does not give up and tries to sign with his key pair
sig = sign(sk2, message)
is_legit = verify(pk1, message, sig)
assert not is_legit
# denied, again!
# lucky for user2, user1 feels sorry for them and the hardships they have been through recently
sig = sign(sk1, message)
is_legit = verify(pk1, message, sig)
assert is_legit
# hero miner validates the transaction and adds it to their block
# user2 happy, buys a Tesla, and promises to turn things around
# the end.
def test_sig_der():
# a transaction used as an example in programming bitcoin
raw = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600')
tx = Tx.decode(BytesIO(raw))
der = tx.tx_ins[0].script_sig.cmds[0][:-1] # this is the DER signature of the first input on this tx. :-1 crops out the sighash-type byte
sig = Signature.decode(der) # making sure no asserts get tripped up inside this call
# from programming bitcoin chapter 4
der = bytes.fromhex('3045022037206a0610995c58074999cb9767b87af4c4978db68c06e8e6e81d282047a7c60221008ca63759c1157ebeaec0d03cecca119fc9a75bf8e6d0fa65c841c8e2738cdaec')
sig = Signature.decode(der)
assert sig.r == 0x37206a0610995c58074999cb9767b87af4c4978db68c06e8e6e81d282047a7c6
assert sig.s == 0x8ca63759c1157ebeaec0d03cecca119fc9a75bf8e6d0fa65c841c8e2738cdaec
# test that we can also recover back the same der encoding
der2 = sig.encode()
assert der == der2
|
waffle/tests/base.py | theunraveler/django-waffle | 302 | 11099450 | <reponame>theunraveler/django-waffle<filename>waffle/tests/base.py<gh_stars>100-1000
from __future__ import unicode_literals
from django import test
from django.core import cache
class TestCase(test.TransactionTestCase):
def _pre_setup(self):
cache.cache.clear()
super(TestCase, self)._pre_setup()
class ReplicationRouter(object):
"""Router for simulating an environment with DB replication
This router directs all DB reads to a completely different database than
writes. This can be useful for simulating an environment where DB
replication is delayed to identify potential race conditions.
"""
def db_for_read(self, model, **hints):
return 'readonly'
def db_for_write(self, model, **hints):
return 'default'
|
pymtl3/passes/backends/yosys/test/TranslationImport_closed_loop_component_input_test.py | kevinyuan/pymtl3 | 152 | 11099513 | <gh_stars>100-1000
#=========================================================================
# TranslationImport_closed_loop_component_input_test.py
#=========================================================================
# Author : <NAME>
# Date : June 6, 2019
"""Closed-loop test cases for translation-import with component and input."""
from random import seed
from pymtl3.passes.backends.verilog.test.TranslationImport_closed_loop_component_input_test import (
test_adder,
test_mux,
)
from pymtl3.passes.backends.verilog.util.test_utility import (
closed_loop_component_input_test,
)
from pymtl3.passes.rtlir.util.test_utility import do_test
seed( 0xdeadebeef )
def local_do_test( m ):
closed_loop_component_input_test( m, m._tvs, m._tv_in, "yosys" )
|
bindings/pydeck/examples/h3_cluster_layer.py | StijnAmeloot/deck.gl | 7,702 | 11099515 | """
H3ClusterLayer
==============
Data grouped by H3 geohash, as an example of one of the geohash schemes supported by pydeck.
This layer joins contiguous regions into the same color. Data format is as follows:
[
{
"mean": 73.333,
"count": 440,
"hexIds": [
"88283082b9fffff",
"88283082b1fffff",
"88283082b5fffff",
"88283082b7fffff",
"88283082bbfffff",
"882830876dfffff"
]
},
{
"mean": 384.444,
"count": 3460,
"hexIds": [
"8828308281fffff",
"8828308287fffff",
"88283082abfffff",
"88283082a3fffff",
"8828308289fffff",
"88283082c7fffff",
"88283082c3fffff",
"88283082c1fffff",
"88283082d5fffff"
]
},
...
If you'd simply like to plot a value at an H3 hex ID, see the H3HexagonLayer.
This example is adapted from the deck.gl documentation.
"""
import pydeck as pdk
import pandas as pd
H3_CLUSTER_LAYER_DATA = "https://raw.githubusercontent.com/visgl/deck.gl-data/master/website/sf.h3clusters.json" # noqa
df = pd.read_json(H3_CLUSTER_LAYER_DATA)
# Define a layer to display on a map
layer = pdk.Layer(
"H3ClusterLayer",
df,
pickable=True,
stroked=True,
filled=True,
extruded=False,
get_hexagons="hexIds",
get_fill_color="[255, (1 - mean / 500) * 255, 0]",
get_line_color=[255, 255, 255],
line_width_min_pixels=2,
)
# Set the viewport location
view_state = pdk.ViewState(latitude=37.7749295, longitude=-122.4194155, zoom=11, bearing=0, pitch=30)
# Render
r = pdk.Deck(layers=[layer], initial_view_state=view_state, tooltip={"text": "Density: {mean}"})
r.to_html("h3_cluster_layer.html")
|
zktraffic/base/network.py | fakeNetflix/twitter-repo-zktraffic | 159 | 11099518 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
""" network packets & header processing stuff """
import socket
import dpkt
from abc import ABCMeta, abstractmethod
import six
from threading import Thread
class Error(Exception): pass
class BadPacket(Error): pass
_loopback = dpkt.loopback.Loopback()
_ethernet = dpkt.ethernet.Ethernet()
def get_ip_packet(data, client_port=0, server_port=0, is_loopback=False):
""" if {client,server}_port is 0 any {client,server}_port is good """
header = _loopback if is_loopback else _ethernet
header.unpack(data)
tcp_p = getattr(header.data, "data", None)
if type(tcp_p) != dpkt.tcp.TCP:
raise BadPacket("Not a TCP packet")
if tcp_p.dport == server_port:
if client_port != 0 and tcp_p.sport != client_port:
raise BadPacket("Request from different client")
elif tcp_p.sport == server_port:
if client_port != 0 and tcp_p.dport != client_port:
raise BadPacket("Reply for different client")
else:
if server_port > 0:
raise BadPacket("Packet not for/from client/server")
return header.data
def get_ip(ip_packet, packed_addr):
af_type = socket.AF_INET if type(ip_packet) == dpkt.ip.IP else socket.AF_INET6
return socket.inet_ntop(af_type, packed_addr)
@six.add_metaclass(ABCMeta)
class SnifferBase(Thread):
def __init__(self):
super(SnifferBase, self).__init__()
@abstractmethod
def handle_packet(self, packet): # pragma: no cover
pass
@abstractmethod
def handle_message(self, message): # pragma: no cover
pass
@abstractmethod
def message_from_packet(self, packet): # pragma: no cover
pass
|
networkx-d3-v2/lib/gdata/exif/__init__.py | suraj-testing2/Clock_Websites | 2,293 | 11099532 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.exif, implementing the exif namespace in gdata
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 <NAME>
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
These elements describe image data, using exif attributes[2].
Picasa Web Albums uses the exif namespace to represent Exif data encoded
in a photo [3].
Picasa Web Albums uses the following exif elements:
exif:distance
exif:exposure
exif:flash
exif:focallength
exif:fstop
exif:imageUniqueID
exif:iso
exif:make
exif:model
exif:tags
exif:time
[1]: http://schemas.google.com/photos/exif/2007.
[2]: http://en.wikipedia.org/wiki/Exif
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
"""
__author__ = u'<EMAIL>'# (<NAME>)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
class ExifBaseElement(atom.AtomBase):
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
""" % EXIF_NAMESPACE
_tag = ''
_namespace = EXIF_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Distance(ExifBaseElement):
"(float) The distance to the subject, e.g. 0.0"
_tag = 'distance'
def DistanceFromString(xml_string):
return atom.CreateClassFromXMLString(Distance, xml_string)
class Exposure(ExifBaseElement):
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
_tag = 'exposure'
def ExposureFromString(xml_string):
return atom.CreateClassFromXMLString(Exposure, xml_string)
class Flash(ExifBaseElement):
"""(string) Boolean value indicating whether the flash was used.
The .text attribute will either be `true' or `false'
As a convenience, this object's .bool method will return what you want,
so you can say:
flash_used = bool(Flash)
"""
_tag = 'flash'
def __bool__(self):
if self.text.lower() in ('true','false'):
return self.text.lower() == 'true'
def FlashFromString(xml_string):
return atom.CreateClassFromXMLString(Flash, xml_string)
class Focallength(ExifBaseElement):
"(float) The focal length used, e.g. 23.7"
_tag = 'focallength'
def FocallengthFromString(xml_string):
return atom.CreateClassFromXMLString(Focallength, xml_string)
class Fstop(ExifBaseElement):
"(float) The fstop value used, e.g. 5.0"
_tag = 'fstop'
def FstopFromString(xml_string):
return atom.CreateClassFromXMLString(Fstop, xml_string)
class ImageUniqueID(ExifBaseElement):
"(string) The unique image ID for the photo. Generated by Google Photo servers"
_tag = 'imageUniqueID'
def ImageUniqueIDFromString(xml_string):
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
class Iso(ExifBaseElement):
"(int) The iso equivalent value used, e.g. 200"
_tag = 'iso'
def IsoFromString(xml_string):
return atom.CreateClassFromXMLString(Iso, xml_string)
class Make(ExifBaseElement):
"(string) The make of the camera used, e.g. Fictitious Camera Company"
_tag = 'make'
def MakeFromString(xml_string):
return atom.CreateClassFromXMLString(Make, xml_string)
class Model(ExifBaseElement):
"(string) The model of the camera used,e.g AMAZING-100D"
_tag = 'model'
def ModelFromString(xml_string):
return atom.CreateClassFromXMLString(Model, xml_string)
class Time(ExifBaseElement):
"""(int) The date/time the photo was taken, e.g. 1180294337000.
Represented as the number of milliseconds since January 1st, 1970.
The value of this element will always be identical to the value
of the <gphoto:timestamp>.
Look at this object's .isoformat() for a human friendly datetime string:
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'time'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimeFromString(xml_string):
return atom.CreateClassFromXMLString(Time, xml_string)
class Tags(ExifBaseElement):
"""The container for all exif elements.
The <exif:tags> element can appear as a child of a photo entry.
"""
_tag = 'tags'
_children = atom.AtomBase._children.copy()
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
ExifBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.fstop=None
self.make=None
self.model=None
self.distance=None
self.exposure=None
self.flash=None
self.focallength=None
self.iso=None
self.time=None
self.imageUniqueID=None
def TagsFromString(xml_string):
return atom.CreateClassFromXMLString(Tags, xml_string)
|
Game28/modules/sprites/__init__.py | ttkaixin1998/pikachupythongames | 4,013 | 11099544 | '''初始化'''
from .hero import Hero
from .button import Button |
backend/apps/mails/admin.py | KuanWeiLee/froggy-service | 174 | 11099555 | from django.contrib import admin
from .models import SendGridMail, SendGridMailTemplate
admin.site.register(SendGridMailTemplate)
admin.site.register(SendGridMail)
|
tests/fixtures/io.py | lizhifeng1998/schnetpack | 450 | 11099564 | import pytest
import torch
import numpy as np
__all__ = [
# input
"schnet_batch",
"max_atoms_in_batch",
"neighbors",
"neighbor_mask",
"positions",
"cell",
"cell_offset",
"r_ij",
"f_ij",
"random_atomic_env",
"random_interatomic_distances",
"random_input_dim",
"random_output_dim",
"random_shape",
"random_float_input",
"random_int_input",
# output
"schnet_output_shape",
"interaction_output_shape",
"cfconv_output_shape",
"gaussian_smearing_shape",
]
# inputs
# from data
@pytest.fixture
def schnet_batch(example_loader):
return next(iter(example_loader))
# components of batch
@pytest.fixture
def max_atoms_in_batch(schnet_batch):
return schnet_batch["_positions"].shape[1]
@pytest.fixture
def neighbors(schnet_batch):
return schnet_batch["_neighbors"]
@pytest.fixture
def neighbor_mask(schnet_batch):
return schnet_batch["_neighbor_mask"]
@pytest.fixture
def positions(schnet_batch):
return schnet_batch["_positions"]
@pytest.fixture
def cell(schnet_batch):
return schnet_batch["_cell"]
@pytest.fixture
def cell_offset(schnet_batch):
return schnet_batch["_cell_offset"]
@pytest.fixture
def r_ij(atom_distances, positions, neighbors, cell, cell_offset, neighbor_mask):
return atom_distances(positions, neighbors, cell, cell_offset, neighbor_mask)
@pytest.fixture
def f_ij(gaussion_smearing_layer, r_ij):
return gaussion_smearing_layer(r_ij)
@pytest.fixture
def random_atomic_env(batch_size, max_atoms_in_batch, n_filters):
return torch.rand((batch_size, max_atoms_in_batch, n_filters))
@pytest.fixture
def random_interatomic_distances(batch_size, max_atoms_in_batch, cutoff):
return (
(1 - torch.rand((batch_size, max_atoms_in_batch, max_atoms_in_batch - 1)))
* 2
* cutoff
)
@pytest.fixture
def random_input_dim(random_shape):
return random_shape[-1]
@pytest.fixture
def random_output_dim():
return np.random.randint(1, 20, 1).item()
@pytest.fixture
def random_shape():
return list(np.random.randint(1, 8, 3))
@pytest.fixture
def random_float_input(random_shape):
return torch.rand(random_shape, dtype=torch.float32)
@pytest.fixture
def random_int_input(random_shape):
return torch.randint(0, 20, random_shape)
# outputs
# spk.representation
@pytest.fixture
def schnet_output_shape(batch_size, max_atoms_in_batch, n_atom_basis):
return [batch_size, max_atoms_in_batch, n_atom_basis]
@pytest.fixture
def interaction_output_shape(batch_size, max_atoms_in_batch, n_filters):
return [batch_size, max_atoms_in_batch, n_filters]
@pytest.fixture
def cfconv_output_shape(batch_size, max_atoms_in_batch, n_atom_basis):
return [batch_size, max_atoms_in_batch, n_atom_basis]
# spk.nn
@pytest.fixture
def gaussian_smearing_shape(batch_size, max_atoms_in_batch, n_gaussians):
return [batch_size, max_atoms_in_batch, max_atoms_in_batch - 1, n_gaussians]
|
code/dataset/megadepth_train.py | theNded/SGP | 137 | 11099624 | <reponame>theNded/SGP
import os, sys
file_path = os.path.abspath(__file__)
project_path = os.path.dirname(os.path.dirname(file_path))
sys.path.append(project_path)
import cv2
import numpy as np
import open3d as o3d
from dataset.base import DatasetBase
from geometry.image import compute_fundamental_from_poses, detect_keypoints, extract_feats, match_feats, estimate_essential, draw_matches
class DatasetMegaDepthTrain(DatasetBase):
def __init__(self, root, scenes):
super(DatasetMegaDepthTrain, self).__init__(root, scenes)
# override
def parse_scene(self, root, scene):
scene_path = os.path.join(root, scene)
fnames = os.listdir(os.path.join(scene_path, 'images'))
fnames_map = {fname: i for i, fname in enumerate(fnames)}
# Load pairs.txt
pair_fname = os.path.join(scene_path, 'pairs.txt')
with open(pair_fname, 'r') as f:
pair_content = f.readlines()
pairs = []
for line in pair_content:
lst = line.strip().split(' ')
src_fname = lst[0]
dst_fname = lst[1]
src_idx = fnames_map[src_fname]
dst_idx = fnames_map[dst_fname]
pairs.append((src_idx, dst_idx))
cam_fname = os.path.join(scene_path, 'img_cam.txt')
with open(cam_fname, 'r') as f:
cam_content = f.readlines()
cnt = 0
intrinsics = np.zeros((len(fnames), 3, 3))
extrinsics = np.zeros((len(fnames), 4, 4))
for line in cam_content:
line = line.strip()
if len(line) > 0 and line[0] != "#":
lst = line.split()
fname = lst[0]
idx = fnames_map[fname]
fx, fy = float(lst[3]), float(lst[4])
cx, cy = float(lst[5]), float(lst[6])
R = np.array(lst[7:16]).reshape((3, 3))
t = np.array(lst[16:19])
T = np.eye(4)
T[:3, :3] = R
T[:3, 3] = t
intrinsics[idx] = np.array([fx, 0, cx, 0, fy, cy, 0, 0,
1]).reshape((3, 3))
extrinsics[idx] = T
cnt += 1
assert cnt == len(fnames)
return {
'folder': scene,
'fnames': fnames,
'pairs': pairs,
'unary_info': [(K, T) for K, T in zip(intrinsics, extrinsics)],
'binary_info': [None for i in range(len(pairs))]
}
# override
def load_data(self, folder, fname):
fname = os.path.join(self.root, folder, 'images', fname)
return cv2.imread(fname)
# override
def collect_scenes(self, root, scenes):
scene_collection = []
for scene in scenes:
scene_path = os.path.join(root, scene)
subdirs = os.listdir(scene_path)
for subdir in subdirs:
if subdir.startswith('dense') and \
os.path.isdir(
os.path.join(scene_path, subdir)):
scene_dict = self.parse_scene(
root, os.path.join(scene, subdir, 'aligned'))
scene_collection.append(scene_dict)
return scene_collection
|
cloths_segmentation/metrics.py | fighting332/cloths_segmentation | 134 | 11099627 | import torch
EPSILON = 1e-15
def binary_mean_iou(logits: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
output = (logits > 0).int()
if output.shape != targets.shape:
targets = torch.squeeze(targets, 1)
intersection = (targets * output).sum()
union = targets.sum() + output.sum() - intersection
result = (intersection + EPSILON) / (union + EPSILON)
return result
|
aleph/migrate/versions/40d6ffcd8442_add_mappings_table.py | Rosencrantz/aleph | 1,213 | 11099642 | """Introduce table to store mappings in the DB.
Revision ID: 40d6ffcd8442
Revises: <PASSWORD>
Create Date: 2019-10-02 04:37:55.784441
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "8a8ef1f7e6fa"
def upgrade():
op.create_table(
"mapping",
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.Column("deleted_at", sa.DateTime(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("query", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("role_id", sa.Integer(), nullable=True),
sa.Column("collection_id", sa.Integer(), nullable=True),
sa.Column("table_id", sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(["collection_id"], ["collection.id"],),
sa.ForeignKeyConstraint(["role_id"], ["role.id"],),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_mapping_collection_id"), "mapping", ["collection_id"], unique=False
)
op.create_index(op.f("ix_mapping_role_id"), "mapping", ["role_id"], unique=False)
op.create_index(op.f("ix_mapping_table_id"), "mapping", ["table_id"], unique=False)
def downgrade():
pass
|
tools/utilities/pythonlibs/audio/compute_ell_model.py | shawncal/ELL | 2,094 | 11099664 | ###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: compute_ell_model.py
# Authors: <NAME>
#
# Requires: Python 3.x
#
###################################################################################################
import os
import sys
import numpy as np
try:
# try and import ELL from site_packages
import ell
except:
# try and use ELL_ROOT environment variable to find ELL.
ell_root = os.getenv("ELL_ROOT")
if not ell_root:
raise Exception("Please set your ELL_ROOT environment variable")
sys.path += [os.path.join(ell_root, "build", "interfaces", "python", "package")]
import ell
class ComputeModel:
""" This class wraps a .ell model, exposing the model compute function as a
transform method """
def __init__(self, model_path):
self.model_path = model_path
self.map = ell.model.Map(model_path)
self.input_shape = self.map.GetInputShape()
self.output_shape = self.map.GetOutputShape()
self.state_size = 0
if self.map.NumInputs() == 2 and self.map.NumOutputs() == 2:
# then perhaps we have a FastGRNN model with external state.
self.input_size = self.input_shape.Size()
self.output_size = self.output_shape.Size()
self.state_size = self.map.GetInputShape(1).Size()
self.output_buffer = ell.math.FloatVector(self.output_size)
self.hidden_state = ell.math.FloatVector(self.state_size)
self.new_state = ell.math.FloatVector(self.state_size)
def predict(self, x):
return self.transform(x)
def transform(self, x):
""" call the ell model with input array 'x' and return the output as numpy array """
# Send the input to the predict function and return the prediction result
if self.state_size:
i = ell.math.FloatVector(x)
self.map.ComputeMultiple([i, self.hidden_state], [self.output_buffer, self.new_state])
self.hidden_state.copy_from(self.new_state)
out_vec = self.output_buffer
else:
out_vec = self.map.Compute(x)
return np.array(out_vec)
def reset(self):
""" reset all model state """
self.map.Reset()
if self.state_size:
self.hidden_state = ell.math.FloatVector(self.state_size)
def get_metadata(self, name):
model = self.map.GetModel()
value = self.map.GetMetadataValue(name)
if value:
return value
value = model.GetMetadataValue(name)
if value:
return value
nodes = model.GetNodes()
while nodes.IsValid():
node = nodes.Get()
value = node.GetMetadataValue(name)
if value:
return value
nodes.Next()
return None
|
music21/instrument.py | cuthbertLab/music21 | 1,449 | 11099698 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: instrument.py
# Purpose: Class for basic instrument information
#
# Authors: <NAME>
# <NAME>
# <NAME>
# <NAME>
# <NAME>
# <NAME>
#
# Copyright: Copyright © 2009-2012, 17, 20 <NAME> and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
This module represents instruments through objects that contain general information
such as Metadata for instrument names, classifications, transpositions and default
MIDI program numbers. It also contains information specific to each instrument
or instrument family, such as string pitches, etc. Information about instrumental
ensembles is also included here though it may later be separated out into its own
ensemble.py module.
'''
import copy
import unittest
import sys
from collections import OrderedDict
from typing import Optional
from music21 import base
from music21 import common
from music21 import interval
from music21 import note
from music21 import pitch
from music21 import stream
from music21.tree.trees import OffsetTree
from music21.exceptions21 import InstrumentException
from music21 import environment
_MOD = 'instrument'
environLocal = environment.Environment(_MOD)
StreamType = stream.StreamType
def unbundleInstruments(streamIn: StreamType, *, inPlace=False) -> Optional[StreamType]:
# noinspection PyShadowingNames
'''
takes a :class:`~music21.stream.Stream` that has :class:`~music21.note.NotRest` objects
and moves their `.storedInstrument` attributes to a new Stream (unless inPlace=True)
>>> up1 = note.Unpitched()
>>> up1.storedInstrument = instrument.BassDrum()
>>> up2 = note.Unpitched()
>>> up2.storedInstrument = instrument.Cowbell()
>>> s = stream.Stream()
>>> s.append(up1)
>>> s.append(up2)
>>> s2 = instrument.unbundleInstruments(s)
>>> s2.show('text')
{0.0} <music21.instrument.BassDrum 'Bass Drum'>
{0.0} <music21.note.Unpitched object at 0x...>
{1.0} <music21.instrument.Cowbell 'Cowbell'>
{1.0} <music21.note.Unpitched object at 0x...>
'''
if inPlace is True:
s = streamIn
else:
s = streamIn.coreCopyAsDerivation('unbundleInstruments')
for thisObj in s:
if isinstance(thisObj, note.NotRest):
# eventually also unbundle each note of chord, but need new voices
i = thisObj.storedInstrument
if i is not None:
off = thisObj.offset
s.insert(off, i)
if inPlace is False:
return s
def bundleInstruments(streamIn: stream.Stream, *, inPlace=False) -> Optional[stream.Stream]:
# noinspection PyShadowingNames
'''
>>> up1 = note.Unpitched()
>>> up1.storedInstrument = instrument.BassDrum()
>>> upUnknownInstrument = note.Unpitched()
>>> up2 = note.Unpitched()
>>> up2.storedInstrument = instrument.Cowbell()
>>> s = stream.Stream()
>>> s.append(up1)
>>> s.append(upUnknownInstrument)
>>> s.append(up2)
>>> s2 = instrument.unbundleInstruments(s)
>>> s3 = instrument.bundleInstruments(s2)
>>> for test in s3:
... print(test.storedInstrument)
Bass Drum
Bass Drum
Cowbell
'''
if inPlace is True:
s = streamIn
else:
s = streamIn.coreCopyAsDerivation('bundleInstruments')
lastInstrument = None
for thisObj in s:
if 'Instrument' in thisObj.classes:
lastInstrument = thisObj
s.remove(thisObj)
elif isinstance(thisObj, note.NotRest):
thisObj.storedInstrument = lastInstrument
if inPlace is False:
return s
class Instrument(base.Music21Object):
'''
Base class for all musical instruments. Designed
for subclassing, though usually a more specific
instrument class (such as StringInstrument) would
be better to subclass.
Some defined attributes for instruments include:
* partId
* partName
* partAbbreviation
* instrumentId
* instrumentName
* instrumentAbbreviation
* midiProgram (0-indexed)
* midiChannel (0-indexed)
* lowestNote (a note object or a string for _written_ pitch)
* highestNote (a note object or a string for _written_ pitch)
* transposition (an interval object)
* inGMPercMap (bool -- if it uses the GM percussion map)
* soundfontFn (filepath to a sound font, optional)
'''
classSortOrder = -25
def __init__(self, instrumentName=None):
super().__init__()
self.partId = None
self._partIdIsRandom = False
self.partName = None
self.partAbbreviation = None
self.printPartName = None # True = yes, False = no, None = let others decide
self.printPartAbbreviation = None
self.instrumentId: Optional[str] = None # apply to midi and instrument
self._instrumentIdIsRandom = False
self.instrumentName = instrumentName
self.instrumentAbbreviation = None
self.midiProgram = None # 0-indexed
self.midiChannel = None # 0-indexed
self.instrumentSound = None
self.lowestNote = None
self.highestNote = None
# define interval to go from written to sounding
self.transposition: Optional[interval.Interval] = None
self.inGMPercMap = False
self.soundfontFn = None # if defined...
def __str__(self):
msg = []
if self.partId is not None:
msg.append(f'{self.partId}: ')
if self.partName is not None:
msg.append(f'{self.partName}: ')
if self.instrumentName is not None:
msg.append(self.instrumentName)
return ''.join(msg)
def _reprInternal(self):
return repr(str(self))
def __deepcopy__(self, memo=None):
new = common.defaultDeepcopy(self, memo)
if self._partIdIsRandom:
new.partIdRandomize()
if self._instrumentIdIsRandom:
new.instrumentIdRandomize()
return new
def bestName(self):
'''
Find a viable name, looking first at instrument, then part, then
abbreviations.
'''
if self.partName is not None:
return self.partName
elif self.partAbbreviation is not None:
return self.partAbbreviation
elif self.instrumentName is not None:
return self.instrumentName
elif self.instrumentAbbreviation is not None:
return self.instrumentAbbreviation
else:
return None
def partIdRandomize(self):
'''
Force a unique id by using an MD5
'''
idNew = f'P{common.getMd5()}'
# environLocal.printDebug(['incrementing instrument from',
# self.partId, 'to', idNew])
self.partId = idNew
self._partIdIsRandom = True
def instrumentIdRandomize(self):
'''
Force a unique id by using an MD5
'''
idNew = f'I{common.getMd5()}'
# environLocal.printDebug(['incrementing instrument from',
# self.partId, 'to', idNew])
self.instrumentId = idNew
self._instrumentIdIsRandom = True
# the empty list as default is actually CORRECT!
# noinspection PyDefaultArgument
def autoAssignMidiChannel(self, usedChannels=[]): # pylint: disable=dangerous-default-value
'''
Assign an unused midi channel given a list of
used channels.
assigns the number to self.midiChannel and returns
it as an int.
Note that midi channel 10 (9 in music21) is special, and
thus is skipped.
Currently only 16 channels are used.
Note that the reused "usedChannels=[]" in the
signature is NOT a mistake, but necessary for
the case where there needs to be a global list.
>>> used = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11]
>>> i = instrument.Violin()
>>> i.autoAssignMidiChannel(used)
12
>>> i.midiChannel
12
Unpitched percussion will be set to 9, so long as it's not in the filter list:
>>> used = [0]
>>> i = instrument.Maracas()
>>> i.autoAssignMidiChannel(used)
9
>>> i.midiChannel
9
>>> used = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> i = instrument.Woodblock()
>>> i.autoAssignMidiChannel(used)
11
>>> i.midiChannel
11
OMIT_FROM_DOCS
>>> used2 = range(16)
>>> i = instrument.Instrument()
>>> i.autoAssignMidiChannel(used2)
Traceback (most recent call last):
music21.exceptions21.InstrumentException: we are out of midi channels! help!
'''
# NOTE: this is used in musicxml output, not in midi output
maxMidi = 16
channelFilter = []
for e in usedChannels:
if e is not None:
channelFilter.append(e)
if not channelFilter:
self.midiChannel = 0
return self.midiChannel
elif len(channelFilter) >= maxMidi:
raise InstrumentException('we are out of midi channels! help!')
elif 'UnpitchedPercussion' in self.classes and 9 not in usedChannels:
self.midiChannel = 9
return self.midiChannel
else:
for ch in range(maxMidi):
if ch in channelFilter:
continue
elif ch % 16 == 9:
continue # skip 10 / percussion for now
else:
self.midiChannel = ch
return self.midiChannel
return 0
# raise InstrumentException('we are out of midi channels and this ' +
# 'was not already detected PROGRAM BUG!')
# ------------------------------------------------------------------------------
class KeyboardInstrument(Instrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Keyboard'
self.instrumentAbbreviation = 'Kb'
self.instrumentSound = 'keyboard.piano'
class Piano(KeyboardInstrument):
'''
>>> p = instrument.Piano()
>>> p.instrumentName
'Piano'
>>> p.midiProgram
0
'''
def __init__(self):
super().__init__()
self.instrumentName = 'Piano'
self.instrumentAbbreviation = 'Pno'
self.midiProgram = 0
self.lowestNote = pitch.Pitch('A0')
self.highestNote = pitch.Pitch('C8')
class Harpsichord(KeyboardInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Harpsichord'
self.instrumentAbbreviation = 'Hpschd'
self.midiProgram = 6
self.instrumentSound = 'keyboard.harpsichord'
self.lowestNote = pitch.Pitch('F1')
self.highestNote = pitch.Pitch('F6')
class Clavichord(KeyboardInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Clavichord'
self.instrumentAbbreviation = 'Clv'
self.midiProgram = 7
self.instrumentSound = 'keyboard.clavichord'
# TODO: self.lowestNote = pitch.Pitch('')
# TODO: self.highestNote = pitch.Pitch('')
class Celesta(KeyboardInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Celesta'
self.instrumentAbbreviation = 'Clst'
self.midiProgram = 8
self.instrumentSound = 'keyboard.celesta'
class Sampler(KeyboardInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Sampler'
self.instrumentAbbreviation = 'Samp'
self.midiProgram = 55
class ElectricPiano(Piano):
'''
>>> p = instrument.ElectricPiano()
>>> p.instrumentName
'Electric Piano'
>>> p.midiProgram
2
'''
def __init__(self):
super().__init__()
self.instrumentName = 'Electric Piano'
self.instrumentAbbreviation = 'E.Pno'
self.midiProgram = 2
# ------------------------------------------------------------------------------
class Organ(Instrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Organ'
self.midiProgram = 19
self.instrumentSound = 'keyboard.organ'
class PipeOrgan(Organ):
def __init__(self):
super().__init__()
self.instrumentName = 'Pipe Organ'
self.instrumentAbbreviation = 'P Org'
self.midiProgram = 19
self.instrumentSound = 'keyboard.organ.pipe'
self.lowestNote = pitch.Pitch('C2')
self.highestNote = pitch.Pitch('C6')
class ElectricOrgan(Organ):
def __init__(self):
super().__init__()
self.instrumentName = 'Electric Organ'
self.instrumentAbbreviation = 'Elec Org'
self.midiProgram = 16
self.lowestNote = pitch.Pitch('C2')
self.highestNote = pitch.Pitch('C6')
class ReedOrgan(Organ):
def __init__(self):
super().__init__()
self.instrumentName = 'Reed Organ'
# TODO self.instrumentAbbreviation = ''
self.midiProgram = 20
self.instrumentSound = 'keyboard.organ.reed'
self.lowestNote = pitch.Pitch('C2')
self.highestNote = pitch.Pitch('C6')
class Accordion(Organ):
def __init__(self):
super().__init__()
self.instrumentName = 'Accordion'
self.instrumentAbbreviation = 'Acc'
self.midiProgram = 21
self.instrumentSound = 'keyboard.accordion'
self.lowestNote = pitch.Pitch('F3')
self.highestNote = pitch.Pitch('A6')
class Harmonica(Instrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Harmonica'
self.instrumentAbbreviation = 'Hmca'
self.midiProgram = 22
self.instrumentSound = 'wind.reed.harmonica'
self.lowestNote = pitch.Pitch('C3')
self.highestNote = pitch.Pitch('C6')
# -----------------------------------------------------
class StringInstrument(Instrument):
def __init__(self):
super().__init__()
self._stringPitches = None
self._cachedPitches = None
self.instrumentName = 'StringInstrument'
self.instrumentAbbreviation = 'Str'
self.midiProgram = 48
def _getStringPitches(self):
if hasattr(self, '_cachedPitches') and self._cachedPitches is not None:
return self._cachedPitches
elif not hasattr(self, '_stringPitches'):
raise InstrumentException('cannot get stringPitches for these instruments')
else:
self._cachedPitches = [pitch.Pitch(x) for x in self._stringPitches]
return self._cachedPitches
def _setStringPitches(self, newPitches):
if newPitches and (hasattr(newPitches[0], 'step') or newPitches[0] is None):
# newPitches is pitchObjects or something
self._stringPitches = newPitches
self._cachedPitches = newPitches
else:
self._cachedPitches = None
self._stringPitches = newPitches
stringPitches = property(_getStringPitches, _setStringPitches, doc='''
stringPitches is a property that stores a list of Pitches (or pitch names,
such as "C4") that represent the pitch of the open strings from lowest to
highest.[*]
>>> vln1 = instrument.Violin()
>>> [str(p) for p in vln1.stringPitches]
['G3', 'D4', 'A4', 'E5']
instrument.stringPitches are full pitch objects, not just names:
>>> [x.octave for x in vln1.stringPitches]
[3, 4, 4, 5]
Scordatura for Scelsi's violin concerto *Anahit*.
(N.B. that string to pitch conversion is happening automatically)
>>> vln1.stringPitches = ['G3', 'G4', 'B4', 'D4']
(`[*]In some tuning methods such as reentrant tuning on the ukulele,
lute, or five-string banjo the order might not strictly be from lowest to
highest. The same would hold true for certain violin scordatura pieces, such
as some of Biber's *Mystery Sonatas*`)
''')
class Violin(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Violin'
self.instrumentAbbreviation = 'Vln'
self.midiProgram = 40
self.instrumentSound = 'strings.violin'
self.lowestNote = pitch.Pitch('G3')
self._stringPitches = ['G3', 'D4', 'A4', 'E5']
class Viola(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Viola'
self.instrumentAbbreviation = 'Vla'
self.midiProgram = 41
self.instrumentSound = 'strings.viola'
self.lowestNote = pitch.Pitch('C3')
self._stringPitches = ['C3', 'G3', 'D4', 'A4']
class Violoncello(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Violoncello'
self.instrumentAbbreviation = 'Vc'
self.midiProgram = 42
self.instrumentSound = 'strings.cello'
self.lowestNote = pitch.Pitch('C2')
self._stringPitches = ['C2', 'G2', 'D3', 'A3']
class Contrabass(StringInstrument):
'''
For the Contrabass (or double bass), the stringPitches attribute refers to the sounding pitches
of each string; whereas the lowestNote attribute refers to the lowest written note.
'''
def __init__(self):
super().__init__()
self.instrumentName = 'Contrabass'
self.instrumentAbbreviation = 'Cb'
self.midiProgram = 43
self.instrumentSound = 'strings.contrabass'
self.lowestNote = pitch.Pitch('E2')
self._stringPitches = ['E1', 'A1', 'D2', 'G2']
self.transposition = interval.Interval('P-8')
class Harp(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Harp'
self.instrumentAbbreviation = 'Hp'
self.midiProgram = 46
self.instrumentSound = 'pluck.harp'
self.lowestNote = pitch.Pitch('C1')
self.highestNote = pitch.Pitch('G#7')
class Guitar(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Guitar'
self.instrumentAbbreviation = 'Gtr'
self.midiProgram = 24 # default -- Acoustic
self.instrumentSound = 'pluck.guitar'
self.lowestNote = pitch.Pitch('E2')
self._stringPitches = ['E2', 'A2', 'D3', 'G3', 'B3', 'E4']
class AcousticGuitar(Guitar):
def __init__(self):
super().__init__()
self.instrumentName = 'Acoustic Guitar'
self.instrumentAbbreviation = 'Ac Gtr'
self.midiProgram = 24
self.instrumentSound = 'pluck.guitar.acoustic'
class ElectricGuitar(Guitar):
def __init__(self):
super().__init__()
self.instrumentName = 'Electric Guitar'
self.instrumentAbbreviation = 'Elec Gtr'
self.midiProgram = 26
self.instrumentSound = 'pluck.guitar.electric'
class AcousticBass(Guitar):
def __init__(self):
super().__init__()
self.instrumentName = 'Acoustic Bass'
self.instrumentAbbreviation = 'Ac b'
self.midiProgram = 32
self.instrumentSound = 'pluck.bass.acoustic'
self.lowestNote = pitch.Pitch('E1')
self._stringPitches = ['E1', 'A1', 'D2', 'G2']
class ElectricBass(Guitar):
def __init__(self):
super().__init__()
self.instrumentName = 'Electric Bass'
self.instrumentAbbreviation = 'Elec b'
self.midiProgram = 33
self.instrumentSound = 'pluck.bass.electric'
self.lowestNote = pitch.Pitch('E1')
self._stringPitches = ['E1', 'A1', 'D2', 'G2']
class FretlessBass(Guitar):
def __init__(self):
super().__init__()
self.instrumentName = 'Fretless Bass'
# TODO: self.instrumentAbbreviation = ''
self.midiProgram = 35
self.instrumentSound = 'pluck.bass.fretless'
self.lowestNote = pitch.Pitch('E1')
self._stringPitches = ['E1', 'A1', 'D2', 'G2']
class Mandolin(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Mandolin'
self.instrumentAbbreviation = 'Mdln'
self.instrumentSound = 'pluck.mandolin'
self.lowestNote = pitch.Pitch('G3')
self._stringPitches = ['G3', 'D4', 'A4', 'E5']
class Ukulele(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Ukulele'
self.instrumentAbbreviation = 'Uke'
self.instrumentSound = 'pluck.ukulele'
self.lowestNote = pitch.Pitch('C4')
self._stringPitches = ['G4', 'C4', 'E4', 'A4']
class Banjo(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Banjo'
self.instrumentAbbreviation = 'Bjo'
self.instrumentSound = 'pluck.banjo'
self.midiProgram = 105
self.lowestNote = pitch.Pitch('C3')
self._stringPitches = ['C3', 'G3', 'D4', 'A4']
self.transposition = interval.Interval('P-8')
class Lute(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Lute'
self.instrumentAbbreviation = 'Lte'
self.instrumentSound = 'pluck.lute'
self.midiProgram = 24
class Sitar(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Sitar'
self.instrumentAbbreviation = 'Sit'
self.instrumentSound = 'pluck.sitar'
self.midiProgram = 104
class Shamisen(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Shamisen'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'pluck.shamisen'
self.midiProgram = 106
class Koto(StringInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Koto'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'pluck.koto'
self.midiProgram = 107
# ------------------------------------------------------------------------------
class WoodwindInstrument(Instrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Woodwind'
self.instrumentAbbreviation = 'Ww'
class Flute(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Flute'
self.instrumentAbbreviation = 'Fl'
self.instrumentSound = 'wind.flutes.flute'
self.midiProgram = 73
self.lowestNote = pitch.Pitch('C4') # Occasionally (rarely) B3
class Piccolo(Flute):
def __init__(self):
super().__init__()
self.instrumentName = 'Piccolo'
self.instrumentAbbreviation = 'Picc'
self.instrumentSound = 'wind.flutes.piccolo'
self.midiProgram = 72
self.lowestNote = pitch.Pitch('D4') # Occasionally (rarely) C4
self.transposition = interval.Interval('P8')
class Recorder(Flute):
def __init__(self):
super().__init__()
self.instrumentName = 'Recorder'
self.instrumentAbbreviation = 'Rec'
self.instrumentSound = 'wind.flutes.recorder'
self.midiProgram = 74
self.lowestNote = pitch.Pitch('F4')
class PanFlute(Flute):
def __init__(self):
super().__init__()
self.instrumentName = 'Pan Flute'
self.instrumentAbbreviation = 'P Fl'
self.instrumentSound = 'wind.flutes.panpipes'
self.midiProgram = 75
class Shakuhachi(Flute):
def __init__(self):
super().__init__()
self.instrumentName = 'Shakuhachi'
self.instrumentAbbreviation = 'Shk Fl'
self.instrumentSound = 'wind.flutes.shakuhachi'
self.midiProgram = 77
class Whistle(Flute):
def __init__(self):
super().__init__()
self.instrumentName = 'Whistle'
self.instrumentAbbreviation = 'Whs'
self.instrumentSound = 'wind.flutes.whistle'
self.inGMPercMap = True
self.percMapPitch = 71
self.midiProgram = 78
class Ocarina(Flute):
def __init__(self):
super().__init__()
self.instrumentName = 'Ocarina'
self.instrumentAbbreviation = 'Oc'
self.instrumentSound = 'wind.flutes.ocarina'
self.midiProgram = 79
class Oboe(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Oboe'
self.instrumentAbbreviation = 'Ob'
self.instrumentSound = 'wind.reed.oboe'
self.midiProgram = 68
self.lowestNote = pitch.Pitch('B-3')
class EnglishHorn(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'English Horn'
self.instrumentAbbreviation = 'Eng Hn'
self.instrumentSound = 'wind.reed.english-horn'
self.midiProgram = 69
self.lowestNote = pitch.Pitch('B3')
self.transposition = interval.Interval('P-5')
class Clarinet(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Clarinet'
self.instrumentAbbreviation = 'Cl'
self.instrumentSound = 'wind.reed.clarinet'
self.midiProgram = 71
self.lowestNote = pitch.Pitch('E3')
self.transposition = interval.Interval('M-2')
class BassClarinet(Clarinet):
'''
>>> bcl = instrument.BassClarinet()
>>> bcl.instrumentName
'Bass clarinet'
>>> bcl.midiProgram
71
>>> 'WoodwindInstrument' in bcl.classes
True
'''
def __init__(self):
super().__init__()
self.instrumentName = 'Bass clarinet'
self.instrumentAbbreviation = 'Bs Cl'
self.instrumentSound = 'wind.reed.clarinet.bass'
self.lowestNote = pitch.Pitch('E-3')
self.transposition = interval.Interval('M-9')
class Bassoon(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Bassoon'
self.instrumentAbbreviation = 'Bsn'
self.instrumentSound = 'wind.reed.bassoon'
self.midiProgram = 70
self.lowestNote = pitch.Pitch('B-1')
class Contrabassoon(Bassoon):
def __init__(self):
super().__init__()
self.instrumentName = 'Contrabassoon'
self.instrumentAbbreviation = 'C Bsn'
self.instrumentSound = 'wind.reed.bassoon'
self.midiProgram = 70
self.lowestNote = pitch.Pitch('B-1')
class Saxophone(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Saxophone'
self.instrumentAbbreviation = 'Sax'
self.instrumentSound = 'wind.reed.saxophone'
self.midiProgram = 65
self.lowestNote = pitch.Pitch('B-3')
class SopranoSaxophone(Saxophone):
def __init__(self):
super().__init__()
self.instrumentName = 'Soprano Saxophone'
self.instrumentAbbreviation = 'S Sax'
self.instrumentSound = 'wind.reed.saxophone.soprano'
self.midiProgram = 64
self.transposition = interval.Interval('M-2')
class AltoSaxophone(Saxophone):
def __init__(self):
super().__init__()
self.instrumentName = 'Alto Saxophone'
self.instrumentAbbreviation = 'A Sax'
self.instrumentSound = 'wind.reed.saxophone.alto'
self.midiProgram = 65
self.transposition = interval.Interval('M-6')
class TenorSaxophone(Saxophone):
def __init__(self):
super().__init__()
self.instrumentName = 'Tenor Saxophone'
self.instrumentAbbreviation = 'T Sax'
self.instrumentSound = 'wind.reed.saxophone.tenor'
self.midiProgram = 66
self.transposition = interval.Interval('M-9')
class BaritoneSaxophone(Saxophone):
def __init__(self):
super().__init__()
self.instrumentName = 'Baritone Saxophone'
self.instrumentAbbreviation = 'Bar Sax'
self.instrumentSound = 'wind.reed.saxophone.baritone'
self.midiProgram = 67
self.transposition = interval.Interval('M-13')
class Bagpipes(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Bagpipes'
self.instrumentAbbreviation = 'Bag'
self.instrumentSound = 'wind.pipes.bagpipes'
self.midiProgram = 109
class Shehnai(WoodwindInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Shehnai'
self.instrumentAbbreviation = 'Shn'
# another spelling is 'Shehnai'
self.instrumentSound = 'wind.reed.shenai'
self.midiProgram = 111
# ------------------------------------------------------------------------------
class BrassInstrument(Instrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Brass'
self.instrumentAbbreviation = 'Brs'
self.midiProgram = 61
class Horn(BrassInstrument):
'''
>>> hn = instrument.Horn()
>>> hn.instrumentName
'Horn'
>>> hn.midiProgram
60
>>> 'BrassInstrument' in hn.classes
True
'''
def __init__(self):
super().__init__()
self.instrumentName = 'Horn'
self.instrumentAbbreviation = 'Hn'
self.instrumentSound = 'brass.french-horn'
self.midiProgram = 60
self.lowestNote = pitch.Pitch('C2')
self.transposition = interval.Interval('P-5')
class Trumpet(BrassInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Trumpet'
self.instrumentAbbreviation = 'Tpt'
self.instrumentSound = 'brass.trumpet'
self.midiProgram = 56
self.lowestNote = pitch.Pitch('F#3')
self.transposition = interval.Interval('M-2')
class Trombone(BrassInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Trombone'
self.instrumentAbbreviation = 'Trb'
self.instrumentSound = 'brass.trombone'
self.midiProgram = 57
self.lowestNote = pitch.Pitch('E2')
class BassTrombone(Trombone):
def __init__(self):
super().__init__()
self.instrumentName = 'Bass Trombone'
self.instrumentAbbreviation = 'BTrb'
self.instrumentSound = 'brass.trombone.bass'
self.lowestNote = pitch.Pitch('B-1')
class Tuba(BrassInstrument):
def __init__(self):
super().__init__()
self.instrumentName = 'Tuba'
self.instrumentAbbreviation = 'Tba'
self.instrumentSound = 'brass.tuba'
self.midiProgram = 58
self.lowestNote = pitch.Pitch('D1')
# ------------
class Percussion(Instrument):
def __init__(self):
super().__init__()
self.inGMPercMap = False
self.percMapPitch = None
self.instrumentName = 'Percussion'
self.instrumentAbbreviation = 'Perc'
class PitchedPercussion(Percussion):
pass
class UnpitchedPercussion(Percussion):
def __init__(self):
super().__init__()
self._modifier = None
self._modifierToPercMapPitch = {}
self.midiChannel = 9 # 0-indexed, i.e. MIDI channel 10
def _getModifier(self):
return self._modifier
def _setModifier(self, modifier):
modifier = modifier.lower().strip()
# BEN: to-do, pull out hyphens, spaces, etc.
if self.inGMPercMap is True and modifier.lower() in self._modifierToPercMapPitch:
self.percMapPitch = self._modifierToPercMapPitch[modifier.lower()]
# normalize modifiers...
if self.percMapPitch in self._percMapPitchToModifier:
modifier = self._percMapPitchToModifier[self.percMapPitch]
self._modifier = modifier
modifier = property(_getModifier, _setModifier, doc='''
Returns or sets the modifier for this instrument. A modifier could
be something like "low-floor" for a TomTom or "rimshot" for a SnareDrum.
If the modifier is in the object's ._modifierToPercMapPitch dictionary
then changing the modifier also changes the .percMapPitch for the object
>>> bd = instrument.BongoDrums()
>>> bd.modifier
'high'
>>> bd.percMapPitch
60
>>> bd.modifier = 'low'
>>> bd.percMapPitch
61
Variations on modifiers can also be used and they get normalized:
>>> wb1 = instrument.Woodblock()
>>> wb1.percMapPitch
76
>>> wb1.modifier = 'LO'
>>> wb1.percMapPitch
77
>>> wb1.modifier # n.b. -- not LO
'low'
''')
class Vibraphone(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Vibraphone'
self.instrumentAbbreviation = 'Vbp'
self.instrumentSound = 'pitched-percussion.vibraphone'
self.midiProgram = 11
class Marimba(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Marimba'
self.instrumentAbbreviation = 'Mar'
self.instrumentSound = 'pitched-percussion.marimba'
self.midiProgram = 12
class Xylophone(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Xylophone'
self.instrumentAbbreviation = 'Xyl.'
self.instrumentSound = 'pitched-percussion.xylophone'
self.midiProgram = 13
class Glockenspiel(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Glockenspiel'
self.instrumentAbbreviation = 'Gsp'
self.instrumentSound = 'pitched-percussion.glockenspiel'
self.midiProgram = 9
class ChurchBells(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Church Bells'
self.instrumentAbbreviation = 'Bells'
self.instrumentSound = 'metal.bells.church'
self.midiProgram = 14
class TubularBells(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Tubular Bells'
self.instrumentAbbreviation = 'Tbells'
self.instrumentSound = 'pitched-percussion.tubular-bells'
self.midiProgram = 14
class Gong(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Gong'
self.instrumentAbbreviation = 'Gng'
self.instrumentSound = 'metal.gong'
class Handbells(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Handbells'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'pitched-percussion.handbells'
class Dulcimer(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Dulcimer'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'pluck.dulcimer'
self.midiProgram = 15
class SteelDrum(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Steel Drum'
self.instrumentAbbreviation = 'St Dr'
self.instrumentSound = 'metal.steel-drums'
self.midiProgram = 114
class Timpani(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Timpani'
self.instrumentAbbreviation = 'Timp'
self.instrumentSound = 'drum.timpani'
self.midiProgram = 47
class Kalimba(PitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Kalimba'
self.instrumentAbbreviation = 'Kal'
self.instrumentSound = 'pitched-percussion.kalimba'
self.midiProgram = 108
class Woodblock(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Woodblock'
self.instrumentAbbreviation = 'Wd Bl'
self.instrumentSound = 'wood.wood-block'
self.inGMPercMap = True
self.midiProgram = 115
self._modifier = 'high'
self._modifierToPercMapPitch = {'high': 76, 'low': 77, 'hi': 76, 'lo': 77}
self._percMapPitchToModifier = {76: 'high', 77: 'low'}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class TempleBlock(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Temple Block'
self.instrumentAbbreviation = 'Temp Bl'
self.instrumentSound = 'wood.temple-block'
class Castanets(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Castanets'
self.instrumentAbbreviation = 'Cas'
self.instrumentSound = 'wood.castanets'
class Maracas(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Maracas'
self.inGMPercMap = True
self.percMapPitch = 70
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'rattle.maraca'
class Vibraslap(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Vibraslap'
self.instrumentAbbreviation = 'Vbslp'
self.instrumentSound = 'rattle.vibraslap'
self.inGMPercMap = True
self.percMapPitch = 58
# BEN: Standardize Cymbals as plural
class Cymbals(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Cymbals'
self.instrumentAbbreviation = 'Cym'
class FingerCymbals(Cymbals):
def __init__(self):
super().__init__()
self.instrumentName = 'Finger Cymbals'
self.instrumentAbbreviation = 'Fing Cym'
self.instrumentSound = 'metal.cymbal.finger'
class CrashCymbals(Cymbals):
def __init__(self):
super().__init__()
self.instrumentName = 'Crash Cymbals'
self.instrumentAbbreviation = 'Cym'
self.instrumentSound = 'metal.cymbal.crash'
self.inGMPercMap = True
self._modifier = '1'
self._modifierToPercMapPitch = {'1': 49,
'2': 57,
}
self._percMapPitchToModifier = {49: '1',
57: '2',
}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class SuspendedCymbal(Cymbals):
def __init__(self):
super().__init__()
self.instrumentName = 'Suspended Cymbal'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'metal.cymbal.suspended'
class SizzleCymbal(Cymbals):
def __init__(self):
super().__init__()
self.instrumentName = 'Sizzle Cymbal'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'metal.cymbal.sizzle'
class SplashCymbals(Cymbals):
def __init__(self):
super().__init__()
self.instrumentName = 'Splash Cymbals'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'metal.cymbal.splash'
class RideCymbals(Cymbals):
def __init__(self):
super().__init__()
self.instrumentName = 'Ride Cymbals'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'metal.cymbal.ride'
class HiHatCymbal(Cymbals):
def __init__(self):
super().__init__()
self.instrumentName = 'Hi-Hat Cymbal'
self.instrumentSound = 'metal.hi-hat'
self.inGMPercMap = True
self._modifier = 'pedal'
self._modifierToPercMapPitch = {'pedal': 44,
'open': 46,
'closed': 42,
}
self._percMapPitchToModifier = {44: 'pedal',
46: 'open',
42: 'closed',
}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
# TODO: self.instrumentAbbreviation = ''
class Triangle(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Triangle'
self.instrumentAbbreviation = 'Tri'
self.instrumentSound = 'metal.triangle'
self.inGMPercMap = True
self._modifier = 'open'
self._modifierToPercMapPitch = {'open': 81,
'mute': 80,
}
self._percMapPitchToModifier = {80: 'mute',
81: 'open',
}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class Cowbell(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Cowbell'
self.instrumentAbbreviation = 'Cwb'
self.instrumentSound = 'metal.bells.cowbell'
self.inGMPercMap = True
self.percMapPitch = 56
class Agogo(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Agogo'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'metal.bells.agogo'
self.inGMPercMap = True
self.percMapPitch = 67
self.midiProgram = 113
class TamTam(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Tam-Tam'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'metal.tamtam'
class SleighBells(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Sleigh Bells'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'metal.bells.sleigh-bells'
class SnareDrum(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Snare Drum'
self.instrumentAbbreviation = 'Sn Dr'
self.instrumentSound = 'drum.snare-drum'
self.inGMPercMap = True
self._modifier = 'acoustic'
self._modifierToPercMapPitch = {'acoustic': 38,
'side': 37,
'electric': 40,
}
self._percMapPitchToModifier = {38: 'acoustic',
37: 'side',
40: 'electric',
}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class TenorDrum(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Tenor Drum'
self.instrumentAbbreviation = 'Ten Dr'
self.instrumentSound = 'drum.tenor-drum'
class BongoDrums(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Bongo Drums'
self.instrumentAbbreviation = 'Bgo Dr'
self.instrumentSound = 'drum.bongo'
self.inGMPercMap = True
self._modifier = 'high'
self._modifierToPercMapPitch = {'high': 60, 'low': 61}
self._percMapPitchToModifier = {60: 'high', 61: 'low'}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class TomTom(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Tom-Tom'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'drum.tom-tom'
self.inGMPercMap = True
self._modifier = 'low floor'
self._modifierToPercMapPitch = {'low floor': 41, 'high floor': 43, 'low': 45,
'low-mid': 47, 'high-mid': 48, 'high': 50}
self._percMapPitchToModifier = {41: 'low floor', 43: 'high floor', 45: 'low',
47: 'low-mid', 48: 'high-mid', 50: 'high'}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class Timbales(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Timbales'
self.instrumentAbbreviation = 'Tim'
self.instrumentSound = 'drum.timbale'
self.inGMPercMap = True
self._modifier = 'high'
self._modifierToPercMapPitch = {'high': 65, 'low': 66}
self._percMapPitchToModifier = {65: 'high', 66: 'low'}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class CongaDrum(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Conga Drum'
self.instrumentAbbreviation = 'Cga Dr'
self.instrumentSound = 'drum.conga'
self.inGMPercMap = True
self._modifier = 'low'
self._modifierToPercMapPitch = {'low': 64, 'mute high': 62, 'open high': 63}
self._percMapPitchToModifier = {64: 'low', 62: 'mute high', 63: 'open high'}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class BassDrum(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Bass Drum'
self.instrumentAbbreviation = 'B Dr'
self.instrumentSound = 'drum.bass-drum'
self.inGMPercMap = True
self._modifier = 'acoustic'
self._modifierToPercMapPitch = {'acoustic': 35, '1': 36}
self._percMapPitchToModifier = {35: 'acoustic', 36: '1'}
self.percMapPitch = self._modifierToPercMapPitch[self._modifier]
class Taiko(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Taiko'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'drum.taiko'
self.midiProgram = 116
class Tambourine(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Tambourine'
self.instrumentAbbreviation = 'Tmbn'
self.instrumentSound = 'drum.tambourine'
self.inGMPercMap = True
self.percMapPitch = 54
class Whip(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Whip'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'effect.whip'
class Ratchet(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Ratchet'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'rattle.ratchet'
class Siren(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Siren'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'effect.siren'
class SandpaperBlocks(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Sandpaper Blocks'
self.instrumentAbbreviation = 'Sand Bl'
self.instrumentSound = 'wood.sand-block'
class WindMachine(UnpitchedPercussion):
def __init__(self):
super().__init__()
self.instrumentName = 'Wind Machine'
# TODO: self.instrumentAbbreviation = ''
self.instrumentSound = 'effect.wind'
# -----------------------------------------------------
class Vocalist(Instrument):
'''
n.b. called Vocalist to not be confused with stream.Voice
'''
def __init__(self):
super().__init__()
self.instrumentName = 'Voice'
self.instrumentAbbreviation = 'V'
self.midiProgram = 53
class Soprano(Vocalist):
def __init__(self):
super().__init__()
self.instrumentName = 'Soprano'
self.instrumentAbbreviation = 'S'
self.instrumentSound = 'voice.soprano'
class MezzoSoprano(Soprano):
def __init__(self):
super().__init__()
self.instrumentName = 'Mezzo-Soprano'
self.instrumentAbbreviation = 'Mez'
self.instrumentSound = 'voice.mezzo-soprano'
class Alto(Vocalist):
def __init__(self):
super().__init__()
self.instrumentName = 'Alto'
self.instrumentAbbreviation = 'A'
self.instrumentSound = 'voice.alto'
class Tenor(Vocalist):
def __init__(self):
super().__init__()
self.instrumentName = 'Tenor'
self.instrumentAbbreviation = 'T'
self.instrumentSound = 'voice.tenor'
class Baritone(Vocalist):
def __init__(self):
super().__init__()
self.instrumentName = 'Baritone'
self.instrumentAbbreviation = 'Bar'
self.instrumentSound = 'voice.baritone'
class Bass(Vocalist):
def __init__(self):
super().__init__()
self.instrumentName = 'Bass'
self.instrumentAbbreviation = 'B'
self.instrumentSound = 'voice.bass'
class Choir(Vocalist):
def __init__(self):
super().__init__()
self.instrumentName = 'Choir'
self.instrumentAbbreviation = 'Ch'
self.instrumentSound = 'voice.choir'
self.midiProgram = 52
# -----------------------------------------------------
class Conductor(Instrument):
'''Presently used only for tracking the MIDI track containing tempo,
key signature, and related metadata.'''
def __init__(self):
super().__init__(instrumentName='Conductor')
# -----------------------------------------------------------------------------
# noinspection SpellCheckingInspection
ensembleNamesBySize = ['no performers', 'solo', 'duet', 'trio', 'quartet',
'quintet', 'sextet', 'septet', 'octet', 'nonet', 'dectet',
'undectet', 'duodectet', 'tredectet', 'quattuordectet',
'quindectet', 'sexdectet', 'septendectet', 'octodectet',
'novemdectet', 'vigetet', 'unvigetet', 'duovigetet',
'trevigetet', 'quattuorvigetet', 'quinvigetet', 'sexvigetet',
'septenvigetet', 'octovigetet', 'novemvigetet',
'trigetet', 'untrigetet', 'duotrigetet', 'tretrigetet',
'quottuortrigetet', 'quintrigetet', 'sextrigetet',
'septentrigetet', 'octotrigetet', 'novemtrigetet',
'quadragetet', 'unquadragetet', 'duoquadragetet',
'trequadragetet', 'quattuorquadragetet', 'quinquadragetet',
'sexquadragetet', 'octoquadragetet', 'octoquadragetet',
'novemquadragetet', 'quinquagetet', 'unquinquagetet',
'duoquinquagetet', 'trequinguagetet', 'quattuorquinquagetet',
'quinquinquagetet', 'sexquinquagetet', 'septenquinquagetet',
'octoquinquagetet', 'novemquinquagetet', 'sexagetet',
'undexagetet', 'duosexagetet', 'tresexagetet',
'quoattuorsexagetet', 'quinsexagetet', 'sexsexagetet',
'septensexagetet', 'octosexagetet', 'novemsexagetet',
'septuagetet', 'unseptuagetet', 'duoseptuagetet', 'treseptuagetet',
'quattuorseptuagetet', 'quinseptuagetet', 'sexseptuagetet',
'septenseptuagetet', 'octoseptuagetet', 'novemseptuagetet',
'octogetet', 'unoctogetet', 'duooctogetet',
'treoctogetet', 'quattuoroctogetet', 'quinoctogetet',
'sexoctogetet', 'septoctogetet', 'octooctogetet',
'novemoctogetet', 'nonagetet', 'unnonagetet', 'duononagetet',
'trenonagetet', 'quattuornonagetet', 'quinnonagetet',
'sexnonagetet', 'septennonagetet', 'octononagetet',
'novemnonagetet', 'centet']
def ensembleNameBySize(number):
'''
return the name of a generic ensemble with "number" players:
>>> instrument.ensembleNameBySize(4)
'quartet'
>>> instrument.ensembleNameBySize(1)
'solo'
>>> instrument.ensembleNameBySize(83)
'treoctogetet'
'''
if number > 100:
return 'large ensemble'
elif number < 0:
raise InstrumentException('okay, you are on your own for this one buddy')
else:
return ensembleNamesBySize[int(number)]
def deduplicate(s: stream.Stream, inPlace: bool = False) -> stream.Stream:
'''
Check every offset in `s` for multiple instrument instances.
If the `.partName` can be standardized across instances,
i.e. if each instance has the same value or `None`,
and likewise for `.instrumentName`, standardize the attributes.
Further, and only if the above conditions are met,
if there are two instances of the same class, remove all but one;
if at least one generic `Instrument` instance is found at the same
offset as one or more specific instruments, remove the generic `Instrument` instances.
Two `Instrument` instances:
>>> i1 = instrument.Instrument(instrumentName='Semi-Hollow Body')
>>> i2 = instrument.Instrument()
>>> i2.partName = 'Electric Guitar'
>>> s1 = stream.Stream()
>>> s1.insert(4, i1)
>>> s1.insert(4, i2)
>>> list(s1.getInstruments())
[<music21.instrument.Instrument 'Semi-Hollow Body'>,
<music21.instrument.Instrument 'Electric Guitar: '>]
>>> post = instrument.deduplicate(s1)
>>> list(post.getInstruments())
[<music21.instrument.Instrument 'Electric Guitar: Semi-Hollow Body'>]
One `Instrument` instance and one subclass instance, with `inPlace` and parts:
>>> from music21.stream import Score, Part
>>> i3 = instrument.Instrument()
>>> i3.partName = 'Piccolo'
>>> i4 = instrument.Piccolo()
>>> s2 = stream.Score()
>>> p1 = stream.Part()
>>> p1.append([i3, i4])
>>> p2 = stream.Part()
>>> p2.append([instrument.Flute(), instrument.Flute()])
>>> s2.insert(0, p1)
>>> s2.insert(0, p2)
>>> list(p1.getInstruments())
[<music21.instrument.Instrument 'Piccolo: '>, <music21.instrument.Piccolo 'Piccolo'>]
>>> list(p2.getInstruments())
[<music21.instrument.Flute 'Flute'>, <music21.instrument.Flute 'Flute'>]
>>> s2 = instrument.deduplicate(s2, inPlace=True)
>>> list(p1.getInstruments())
[<music21.instrument.Piccolo 'Piccolo: Piccolo'>]
>>> list(p2.getInstruments())
[<music21.instrument.Flute 'Flute'>]
'''
if inPlace:
returnObj = s
else:
returnObj = s.coreCopyAsDerivation('instrument.deduplicate')
if not returnObj.hasPartLikeStreams():
substreams = [returnObj]
else:
substreams = returnObj.getElementsByClass('Stream')
for sub in substreams:
oTree = OffsetTree(sub.recurse().getElementsByClass('Instrument'))
for o in oTree:
if len(o) == 1:
continue
notNonePartNames = {i.partName for i in o if i.partName is not None}
notNoneInstNames = {i.instrumentName for i in o if i.instrumentName is not None}
# Proceed only if 0-1 part name AND 0-1 instrument name candidates
if len(notNonePartNames) > 1 or len(notNoneInstNames) > 1:
continue
partName = None
for pName in notNonePartNames:
partName = pName
instrumentName = None
for iName in notNoneInstNames:
instrumentName = iName
classes = {inst.__class__ for inst in o}
# Case: 2+ instances of the same class
if len(classes) == 1:
surviving = None
# Treat first as the surviving instance and standardize name
for inst in o:
inst.partName = partName
inst.instrumentName = instrumentName
surviving = inst
break
# Remove remaining instruments
for inst in o:
if inst is surviving:
continue
sub.remove(inst, recurse=True)
# Case: mixed classes: standardize names
# Remove instances of generic `Instrument` if found
else:
for inst in o:
if inst.__class__ == Instrument:
sub.remove(inst, recurse=True)
else:
inst.partName = partName
inst.instrumentName = instrumentName
return returnObj
# For lookup by MIDI Program
# TODOs should be resolved with another mapping from MIDI program
# to .instrumentSound
MIDI_PROGRAM_TO_INSTRUMENT = {
0: Piano,
1: Piano,
2: ElectricPiano,
3: Piano,
4: ElectricPiano,
5: ElectricPiano,
6: Harpsichord,
7: Clavichord,
8: Celesta,
9: Glockenspiel,
10: Glockenspiel, # TODO: MusicBox
11: Vibraphone,
12: Marimba,
13: Xylophone,
14: TubularBells,
15: Dulcimer,
16: ElectricOrgan, # TODO: instrumentSound
17: ElectricOrgan, # TODO: instrumentSound
18: ElectricOrgan, # TODO: instrumentSound
19: PipeOrgan,
20: ReedOrgan,
21: Accordion,
22: Harmonica,
23: Accordion, # TODO: instrumentSound
24: AcousticGuitar, # TODO: instrumentSound
25: AcousticGuitar, # TODO: instrumentSound
26: ElectricGuitar, # TODO: instrumentSound
27: ElectricGuitar, # TODO: instrumentSound
28: ElectricGuitar, # TODO: instrumentSound
29: ElectricGuitar, # TODO: instrumentSound
30: ElectricGuitar, # TODO: instrumentSound
31: ElectricGuitar, # TODO: instrumentSound
32: AcousticBass,
33: ElectricBass,
34: ElectricBass, # TODO: instrumentSound
35: FretlessBass,
36: ElectricBass, # TODO: instrumentSound
37: ElectricBass, # TODO: instrumentSound
38: ElectricBass, # TODO: instrumentSound
39: ElectricBass, # TODO: instrumentSound
40: Violin,
41: Viola,
42: Violoncello,
43: Contrabass,
44: StringInstrument, # TODO: instrumentSound
45: StringInstrument, # TODO: instrumentSound
46: Harp,
47: Timpani,
48: StringInstrument, # TODO: instrumentSound
49: StringInstrument, # TODO: instrumentSound
50: StringInstrument, # TODO: instrumentSound
51: StringInstrument, # TODO: instrumentSound
52: Choir, # TODO: instrumentSound
53: Vocalist, # TODO: instrumentSound
54: Vocalist, # TODO: instrumentSound
55: Sampler,
56: Trumpet,
57: Trombone,
58: Tuba,
59: Trumpet, # TODO: instrumentSound
60: Horn,
61: BrassInstrument, # TODO: instrumentSound
62: BrassInstrument, # TODO: instrumentSound
63: BrassInstrument, # TODO: instrumentSound
64: SopranoSaxophone,
65: AltoSaxophone,
66: TenorSaxophone,
67: BaritoneSaxophone,
68: Oboe,
69: EnglishHorn,
70: Bassoon,
71: Clarinet,
72: Piccolo,
73: Flute,
74: Recorder,
75: PanFlute,
76: PanFlute, # TODO 76: Bottle
77: Shakuhachi,
78: Whistle,
79: Ocarina,
80: Sampler, # TODO: all Sampler here and below: instrumentSound
81: Sampler,
82: Sampler,
83: Sampler,
84: Sampler,
85: Sampler,
86: Sampler,
87: Sampler,
88: Sampler,
89: Sampler,
90: Sampler,
91: Sampler,
92: Sampler,
93: Sampler,
94: Sampler,
95: Sampler,
96: Sampler,
97: Sampler,
98: Sampler,
99: Sampler,
100: Sampler,
101: Sampler,
102: Sampler,
103: Sampler,
104: Sitar,
105: Banjo,
106: Shamisen,
107: Koto,
108: Kalimba,
109: Bagpipes,
110: Violin, # TODO: instrumentSound
111: Shehnai,
112: Glockenspiel, # TODO 112: Tinkle Bell
113: Agogo,
114: SteelDrum,
115: Woodblock,
116: Taiko,
117: TomTom,
118: Sampler, # TODO: instrumentSound # debatable if this should be drum?
119: Sampler,
120: Sampler,
121: Sampler,
122: Sampler,
123: Sampler,
124: Sampler,
125: Sampler,
126: Sampler,
127: Sampler
}
def instrumentFromMidiProgram(number: int) -> Instrument:
'''
Return the instrument with "number" as its assigned MIDI program.
Notice any of the values 0-5 will return Piano.
Lookups are performed against `instrument.MIDI_PROGRAM_TO_INSTRUMENT`.
>>> instrument.instrumentFromMidiProgram(4)
<music21.instrument.ElectricPiano 'Electric Piano'>
>>> instrument.instrumentFromMidiProgram(21)
<music21.instrument.Accordion 'Accordion'>
>>> instrument.instrumentFromMidiProgram(500)
Traceback (most recent call last):
music21.exceptions21.InstrumentException: No instrument found for MIDI program 500
>>> instrument.instrumentFromMidiProgram('43')
Traceback (most recent call last):
TypeError: Expected int, got <class 'str'>
'''
try:
class_ = MIDI_PROGRAM_TO_INSTRUMENT[number]
inst = class_()
inst.midiProgram = number
# TODO: if midiProgram in MIDI_PROGRAM_SOUND_MAP:
# inst.instrumentSound = MIDI_PROGRAM_SOUND_MAP[midiProgram]
except KeyError as e:
if not isinstance(number, int):
raise TypeError(f'Expected int, got {type(number)}') from e
raise InstrumentException(f'No instrument found for MIDI program {number}') from e
return inst
def partitionByInstrument(streamObj):
# noinspection PyShadowingNames
'''
Given a single Stream, or a Score or similar multi-part structure,
partition into a Part for each unique Instrument, joining events
possibly from different parts.
>>> p1 = converter.parse("tinynotation: 4/4 c4 d e f g a b c' c1")
>>> p2 = converter.parse("tinynotation: 4/4 C#4 D# E# F# G# A# B# c# C#1")
>>> p1.getElementsByClass('Measure')[0].insert(0.0, instrument.Piccolo())
>>> p1.getElementsByClass('Measure')[0].insert(2.0, instrument.AltoSaxophone())
>>> p1.getElementsByClass('Measure')[1].insert(3.0, instrument.Piccolo())
>>> p2.getElementsByClass('Measure')[0].insert(0.0, instrument.Trombone())
>>> p2.getElementsByClass('Measure')[0].insert(3.0, instrument.Piccolo()) # not likely...
>>> p2.getElementsByClass('Measure')[1].insert(1.0, instrument.Trombone())
>>> s = stream.Score()
>>> s.insert(0, p1)
>>> s.insert(0, p2)
>>> s.show('text')
{0.0} <music21.stream.Part ...>
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.instrument.Piccolo 'Piccolo'>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Note D>
{2.0} <music21.instrument.AltoSaxophone 'Alto Saxophone'>
{2.0} <music21.note.Note E>
{3.0} <music21.note.Note F>
{4.0} <music21.stream.Measure 2 offset=4.0>
{0.0} <music21.note.Note G>
{1.0} <music21.note.Note A>
{2.0} <music21.note.Note B>
{3.0} <music21.instrument.Piccolo 'Piccolo'>
{3.0} <music21.note.Note C>
{8.0} <music21.stream.Measure 3 offset=8.0>
{0.0} <music21.note.Note C>
{4.0} <music21.bar.Barline type=final>
{0.0} <music21.stream.Part ...>
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.instrument.Trombone 'Trombone'>
{0.0} <music21.clef.BassClef>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Note C#>
{1.0} <music21.note.Note D#>
{2.0} <music21.note.Note E#>
{3.0} <music21.instrument.Piccolo 'Piccolo'>
{3.0} <music21.note.Note F#>
{4.0} <music21.stream.Measure 2 offset=4.0>
{0.0} <music21.note.Note G#>
{1.0} <music21.instrument.Trombone 'Trombone'>
{1.0} <music21.note.Note A#>
{2.0} <music21.note.Note B#>
{3.0} <music21.note.Note C#>
{8.0} <music21.stream.Measure 3 offset=8.0>
{0.0} <music21.note.Note C#>
{4.0} <music21.bar.Barline type=final>
>>> s2 = instrument.partitionByInstrument(s)
>>> len(s2.parts)
3
# TODO: this step might not be necessary...
>>> for p in s2.parts:
... p.makeRests(fillGaps=True, inPlace=True)
# TODO: this step SHOULD not be necessary (.template())...
>>> for p in s2.parts:
... p.makeMeasures(inPlace=True)
... p.makeTies(inPlace=True)
>>> s2.show('text')
{0.0} <music21.stream.Part Piccolo>
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.instrument.Piccolo 'Piccolo'>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Note D>
{2.0} <music21.note.Rest quarter>
{3.0} <music21.note.Note F#>
{4.0} <music21.stream.Measure 2 offset=4.0>
{0.0} <music21.note.Note G#>
{1.0} <music21.note.Rest half>
{3.0} <music21.note.Note C>
{8.0} <music21.stream.Measure 3 offset=8.0>
{0.0} <music21.note.Note C>
{4.0} <music21.bar.Barline type=final>
{0.0} <music21.stream.Part Alto Saxophone>
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.instrument.AltoSaxophone 'Alto Saxophone'>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Rest half>
{2.0} <music21.note.Note E>
{3.0} <music21.note.Note F>
{4.0} <music21.stream.Measure 2 offset=4.0>
{0.0} <music21.note.Note G>
{1.0} <music21.note.Note A>
{2.0} <music21.note.Note B>
{3.0} <music21.bar.Barline type=final>
{0.0} <music21.stream.Part Trombone>
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.instrument.Trombone 'Trombone'>
{0.0} <music21.clef.BassClef>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Note C#>
{1.0} <music21.note.Note D#>
{2.0} <music21.note.Note E#>
{3.0} <music21.note.Rest quarter>
{4.0} <music21.stream.Measure 2 offset=4.0>
{0.0} <music21.note.Rest quarter>
{1.0} <music21.note.Note A#>
{2.0} <music21.note.Note B#>
{3.0} <music21.note.Note C#>
{8.0} <music21.stream.Measure 3 offset=8.0>
{0.0} <music21.note.Note C#>
{4.0} <music21.bar.Barline type=final>
TODO: parts should be in Score Order. Coincidence that this almost works.
TODO: use proper recursion to make a copy of the stream.
TODO: final barlines should be aligned.
'''
if not streamObj.hasPartLikeStreams():
# place in a score for uniform operations
s = stream.Score()
s.insert(0, streamObj.flatten())
else:
s = stream.Score()
# append flat parts
for sub in streamObj.getElementsByClass(stream.Stream):
s.insert(0, sub.flatten())
# first, let's extend the duration of each instrument to match stream
for sub in s.getElementsByClass(stream.Stream):
sub.extendDuration('Instrument', inPlace=True)
# first, find all unique instruments
instrumentIterator = s.recurse().getElementsByClass(Instrument)
if not instrumentIterator:
# TODO(msc): v7 return s.
return None # no partition is available
names = OrderedDict() # store unique names
for instrumentObj in instrumentIterator:
# matching here by instrument name
if instrumentObj.instrumentName not in names:
names[instrumentObj.instrumentName] = {'Instrument': instrumentObj}
# just store one instance
# create a return object that has a part for each instrument
post = stream.Score()
for iName in names:
p = stream.Part()
p.id = iName
# add the instrument instance
p.insert(0, names[iName]['Instrument'])
# store a handle to this part
names[iName]['Part'] = p
post.insert(0, p)
# iterate over flat sources; get events within each defined instrument
# add to corresponding part
for el in s:
if not el.isStream:
post.insert(el.offset, el)
subStream = el
for i in subStream.getElementsByClass(Instrument):
start = i.offset
# duration will have been set with sub.extendDuration above
end = i.offset + i.duration.quarterLength
# get destination Part
p = names[i.instrumentName]['Part']
coll = subStream.getElementsByOffset(
start,
end,
# do not include elements that start at the end
includeEndBoundary=False,
mustFinishInSpan=False,
mustBeginInSpan=True
)
# add to part at original offset
# do not gather instrument
for e in coll.getElementsNotOfClass(Instrument):
try:
p.insert(subStream.elementOffset(e), e)
except stream.StreamException:
pass
# it is possible to enter an element twice because the getElementsByOffset
# might return something twice if it's at the same offset as the
# instrument switch...
for inst in post.recurse().getElementsByClass(Instrument):
inst.duration.quarterLength = 0
return post
def _combinations(instrumentString):
'''
find all combinations of instrumentString. Remove all punctuation.
'''
sampleList = instrumentString.split()
allComb = []
for size in range(1, len(sampleList) + 1):
for i in range(len(sampleList) - size + 1):
allComb.append(' '.join(sampleList[i:i + size]))
return allComb
def fromString(instrumentString):
'''
Given a string with instrument content (from an orchestral score
for example), attempts to return an appropriate
:class:`~music21.instrument.Instrument`.
>>> from music21 import instrument
>>> t1 = instrument.fromString('Clarinet 2 in A')
>>> t1
<music21.instrument.Clarinet 'Clarinet 2 in A'>
>>> t1.transposition
<music21.interval.Interval m-3>
>>> t2 = instrument.fromString('Clarinetto 3')
>>> t2
<music21.instrument.Clarinet 'Clarinetto 3'>
>>> t3 = instrument.fromString('flauto 2')
>>> t3
<music21.instrument.Flute 'flauto 2'>
Excess information is ignored, and the useful information can be extracted
correctly as long as it's sequential.
>>> t4 = instrument.fromString('I <3 music saxofono tenore go beavers')
>>> t4
<music21.instrument.TenorSaxophone 'I <3 music saxofono tenore go beavers'>
Some more demos:
>>> t5 = instrument.fromString('Bb Clarinet')
>>> t5
<music21.instrument.Clarinet 'Bb Clarinet'>
>>> t5.transposition
<music21.interval.Interval M-2>
>>> t6 = instrument.fromString('Clarinet in B-flat')
>>> t5.__class__ == t6.__class__
True
>>> t5.transposition == t6.transposition
True
>>> t7 = instrument.fromString('B-flat Clarinet.')
>>> t5.__class__ == t7.__class__ and t5.transposition == t7.transposition
True
>>> t8 = instrument.fromString('Eb Clarinet')
>>> t5.__class__ == t8.__class__
True
>>> t8.transposition
<music21.interval.Interval m3>
Note that because of the ubiquity of B-flat clarinets and trumpets, and the
rareness of B-natural forms of those instruments, this gives a B-flat, not
B-natural clarinet, using the German form:
>>> t9 = instrument.fromString('Klarinette in B.')
>>> t9
<music21.instrument.Clarinet 'Klarinette in B.'>
>>> t9.transposition
<music21.interval.Interval M-2>
Use "H" or "b-natural" to get an instrument in B-major. Or donate one to me
and I'll change this back!
Finally, standard abbreviations are acceptable:
>>> t10 = instrument.fromString('Cl in B-flat')
>>> t10
<music21.instrument.Clarinet 'Cl in B-flat'>
>>> t10.transposition
<music21.interval.Interval M-2>
This should work with or without a terminal period (for both 'Cl' and 'Cl.'):
>>> t11 = instrument.fromString('Cl. in B-flat')
>>> t11.__class__ == t10.__class__
True
Previously an exact instrument name was not always working:
>>> instrument.fromString('Flute')
<music21.instrument.Flute 'Flute'>
This common MIDI instrument was not previously working:
>>> instrument.fromString('Choir (Aahs)')
<music21.instrument.Choir 'Choir (Aahs)'>
'''
# pylint: disable=undefined-variable
from music21.languageExcerpts import instrumentLookup
instrumentStringOrig = instrumentString
instrumentString = instrumentString.replace('.', ' ') # sic, before removePunctuation
instrumentString = common.removePunctuation(instrumentString)
allCombinations = _combinations(instrumentString)
# First task: Find the best instrument.
bestInstClass = None
bestInstrument = None
bestName = None
for substring in allCombinations:
substring = substring.lower()
try:
if substring in instrumentLookup.bestNameToInstrumentClass:
englishName = substring
else:
englishName = instrumentLookup.allToBestName[substring]
className = instrumentLookup.bestNameToInstrumentClass[englishName]
# This would be unsafe...
thisInstClass = globals()[className]
thisInstClassParentClasses = [parentCls.__name__ for parentCls in thisInstClass.mro()]
# if not for this...
if ('Instrument' not in thisInstClassParentClasses
or 'Music21Object' not in thisInstClassParentClasses):
# little bit of security against calling another global...
raise KeyError
thisInstrument = thisInstClass()
thisBestName = thisInstrument.bestName().lower()
if (bestInstClass is None
or len(thisBestName.split()) >= len(bestName.split())
and not issubclass(bestInstClass, thisInstClass)):
# priority is also given to same length instruments which fall later
# on in the string (i.e. Bb Piccolo Trumpet)
bestInstClass = thisInstClass
bestInstrument = thisInstrument
bestInstrument.instrumentName = instrumentStringOrig
bestName = thisBestName
except KeyError:
pass
if bestInstClass is None:
raise InstrumentException(
f'Could not match string with instrument: {instrumentStringOrig}')
if bestName not in instrumentLookup.transposition:
return bestInstrument
# A transposition table is defined for the instrument.
# Second task: Determine appropriate transposition (if any)
for substring in allCombinations:
try:
bestPitch = instrumentLookup.pitchFullNameToName[substring.lower()]
bestInterval = instrumentLookup.transposition[bestName][bestPitch]
bestInstrument.transposition = interval.Interval(bestInterval)
break
except KeyError:
pass
return bestInstrument
# ------------------------------------------------------------------------------
class TestExternal(unittest.TestCase):
pass
class Test(unittest.TestCase):
def testCopyAndDeepcopy(self):
'''Test copying all objects defined in this module
'''
import types
for part in sys.modules[self.__module__].__dict__.keys():
match = False
for skip in ['_', '__', 'Test', 'Exception']:
if part.startswith(skip) or part.endswith(skip):
match = True
if match:
continue
name = getattr(sys.modules[self.__module__], part)
# noinspection PyTypeChecker
if callable(name) and not isinstance(name, types.FunctionType):
try: # see if obj can be made w/ args
obj = name()
except TypeError: # pragma: no cover
continue
i = copy.copy(obj)
j = copy.deepcopy(obj)
def testMusicXMLExport(self):
s1 = stream.Stream()
i1 = Violin()
i1.partName = 'test'
s1.append(i1)
s1.repeatAppend(note.Note(), 10)
# s.show()
s2 = stream.Stream()
i2 = Piano()
i2.partName = 'test2'
s2.append(i2)
s2.repeatAppend(note.Note('g4'), 10)
s3 = stream.Score()
s3.insert(0, s1)
s3.insert(0, s2)
# s3.show()
def testPartitionByInstrumentA(self):
from music21 import instrument
# basic case of instruments in Parts
s = stream.Score()
p1 = stream.Part()
p1.append(instrument.Piano())
p2 = stream.Part()
p2.append(instrument.Piccolo())
s.insert(0, p1)
s.insert(0, p2)
post = instrument.partitionByInstrument(s)
self.assertEqual(len(post), 2)
self.assertEqual(len(post.flatten().getElementsByClass('Instrument')), 2)
# post.show('t')
# one Stream with multiple instruments
s = stream.Stream()
s.insert(0, instrument.PanFlute())
s.insert(20, instrument.ReedOrgan())
post = instrument.partitionByInstrument(s)
self.assertEqual(len(post), 2)
self.assertEqual(len(post.flatten().getElementsByClass('Instrument')), 2)
# post.show('t')
def testPartitionByInstrumentB(self):
from music21 import instrument
# basic case of instruments in Parts
s = stream.Score()
p1 = stream.Part()
p1.append(instrument.Piano())
p1.repeatAppend(note.Note(), 6)
p2 = stream.Part()
p2.append(instrument.Piccolo())
p2.repeatAppend(note.Note(), 12)
s.insert(0, p1)
s.insert(0, p2)
post = instrument.partitionByInstrument(s)
self.assertEqual(len(post), 2)
self.assertEqual(len(post.flatten().getElementsByClass('Instrument')), 2)
self.assertEqual(len(post.parts[0].notes), 6)
self.assertEqual(len(post.parts[1].notes), 12)
def testPartitionByInstrumentC(self):
from music21 import instrument
# basic case of instruments in Parts
s = stream.Score()
p1 = stream.Part()
p1.append(instrument.Piano())
p1.repeatAppend(note.Note('a'), 6)
# will go in next available offset
p1.append(instrument.AcousticGuitar())
p1.repeatAppend(note.Note('b'), 3)
p2 = stream.Part()
p2.append(instrument.Piccolo())
p2.repeatAppend(note.Note('c'), 2)
p2.append(instrument.Flute())
p2.repeatAppend(note.Note('d'), 4)
s.insert(0, p1)
s.insert(0, p2)
post = instrument.partitionByInstrument(s)
self.assertEqual(len(post), 4) # 4 instruments
self.assertEqual(len(post.flatten().getElementsByClass('Instrument')), 4)
self.assertEqual(post.parts[0].getInstrument().instrumentName, 'Piano')
self.assertEqual(len(post.parts[0].notes), 6)
self.assertEqual(post.parts[1].getInstrument().instrumentName, 'Acoustic Guitar')
self.assertEqual(len(post.parts[1].notes), 3)
self.assertEqual(post.parts[2].getInstrument().instrumentName, 'Piccolo')
self.assertEqual(len(post.parts[2].notes), 2)
self.assertEqual(post.parts[3].getInstrument().instrumentName, 'Flute')
self.assertEqual(len(post.parts[3].notes), 4)
# environLocal.printDebug(['post processing'])
# post.show('t')
def testPartitionByInstrumentD(self):
from music21 import instrument
# basic case of instruments in Parts
s = stream.Score()
p1 = stream.Part()
p1.append(instrument.Piano())
p1.repeatAppend(note.Note('a'), 6)
# will go in next available offset
p1.append(instrument.AcousticGuitar())
p1.repeatAppend(note.Note('b'), 3)
p1.append(instrument.Piano())
p1.repeatAppend(note.Note('e'), 5)
p2 = stream.Part()
p2.append(instrument.Piccolo())
p2.repeatAppend(note.Note('c'), 2)
p2.append(instrument.Flute())
p2.repeatAppend(note.Note('d'), 4)
p2.append(instrument.Piano())
p2.repeatAppend(note.Note('f'), 1)
s.insert(0, p1)
s.insert(0, p2)
post = instrument.partitionByInstrument(s)
self.assertEqual(len(post), 4) # 4 instruments
self.assertEqual(len(post.flatten().getElementsByClass('Instrument')), 4)
# piano spans are joined together
self.assertEqual(post.parts[0].getInstrument().instrumentName, 'Piano')
self.assertEqual(len(post.parts[0].notes), 12)
self.assertEqual([n.offset for n in post.parts[0].notes],
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 9.0, 10.0, 11.0, 12.0, 13.0])
# environLocal.printDebug(['post processing'])
# post.show('t')
def testPartitionByInstrumentE(self):
from music21 import instrument
# basic case of instruments in Parts
# s = stream.Score()
p1 = stream.Part()
p1.append(instrument.Piano())
p1.repeatAppend(note.Note('a'), 6)
# will go in next available offset
p1.append(instrument.AcousticGuitar())
p1.repeatAppend(note.Note('b'), 3)
p1.append(instrument.Piano())
p1.repeatAppend(note.Note('e'), 5)
p1.append(instrument.Piccolo())
p1.repeatAppend(note.Note('c'), 2)
p1.append(instrument.Flute())
p1.repeatAppend(note.Note('d'), 4)
p1.append(instrument.Piano())
p1.repeatAppend(note.Note('f'), 1)
s = p1
post = instrument.partitionByInstrument(s)
self.assertEqual(len(post), 4) # 4 instruments
self.assertEqual(len(post.flatten().getElementsByClass('Instrument')), 4)
# piano spans are joined together
self.assertEqual(post.parts[0].getInstrument().instrumentName, 'Piano')
self.assertEqual(len(post.parts[0].notes), 12)
offsetList = []
ppn = post.parts[0].notes
for n in ppn:
offsetList.append(n.offset)
self.assertEqual(offsetList,
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 9.0, 10.0, 11.0, 12.0, 13.0, 20.0])
def testPartitionByInstrumentF(self):
from music21 import instrument
s1 = stream.Stream()
s1.append(instrument.AcousticGuitar())
s1.append(note.Note())
s1.append(instrument.Tuba())
s1.append(note.Note())
post = instrument.partitionByInstrument(s1)
self.assertEqual(len(post), 2) # 4 instruments
# def testPartitionByInstrumentDocTest(self):
# '''
# For debugging the doctest.
# '''
# from music21 import instrument, converter, stream
# p1 = converter.parse("tinynotation: 4/4 c4 d e f g a b c' c1")
# p2 = converter.parse("tinynotation: 4/4 C#4 D# E# F# G# A# B# c# C#1")
#
# p1.getElementsByClass('Measure')[0].insert(0.0, instrument.Piccolo())
# p1.getElementsByClass('Measure')[0].insert(2.0, instrument.AltoSaxophone())
# p1.getElementsByClass('Measure')[1].insert(3.0, instrument.Piccolo())
#
# p2.getElementsByClass('Measure')[0].insert(0.0, instrument.Trombone())
# p2.getElementsByClass('Measure')[0].insert(3.0, instrument.Piccolo()) # not likely...
# p2.getElementsByClass('Measure')[1].insert(1.0, instrument.Trombone())
#
# s = stream.Score()
# s.insert(0, p1)
# s.insert(0, p2)
# s2 = instrument.partitionByInstrument(s)
# for p in s2.parts:
# p.makeRests(fillGaps=True, inPlace=True)
# ------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = [Instrument]
if __name__ == '__main__':
# sys.arg test options will be used in mainTest()
import music21
music21.mainTest(Test)
|
overholt/stores/forms.py | prdonahue/overholt | 1,152 | 11099704 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
overholt.stores.forms
~~~~~~~~~~~~~~~~~~~~~
Store forms
"""
from flask_wtf import Form, TextField, Required, Optional
__all__ = ['NewStoreForm', 'UpdateStoreForm']
class NewStoreForm(Form):
name = TextField('Name', validators=[Required()])
address = TextField('Address', validators=[Required()])
city = TextField('City', validators=[Required()])
state = TextField('State', validators=[Required()])
zip_code = TextField('Zip Code', validators=[Required()])
class UpdateStoreForm(Form):
name = TextField('Name', validators=[Optional()])
address = TextField('Address', validators=[Optional()])
city = TextField('City', validators=[Optional()])
state = TextField('State', validators=[Optional()])
zip_code = TextField('Zip Code', validators=[Optional()])
|
DQM/TrackingMonitorSource/python/pixelTracksMonitoring_cff.py | malbouis/cmssw | 852 | 11099711 | import FWCore.ParameterSet.Config as cms
from DQM.TrackingMonitor.TrackerCollisionTrackingMonitor_cfi import *
pixelTracksMonitor = TrackerCollisionTrackMon.clone(
FolderName = 'Tracking/PixelTrackParameters/pixelTracks',
TrackProducer = 'pixelTracks',
allTrackProducer = 'pixelTracks',
beamSpot = 'offlineBeamSpot',
primaryVertex = 'pixelVertices',
pvNDOF = 1,
doAllPlots = False,
doLumiAnalysis = True,
doProfilesVsLS = True,
doDCAPlots = True,
doEffFromHitPatternVsPU = False,
doEffFromHitPatternVsBX = False,
doEffFromHitPatternVsLUMI = False,
doPlotsVsGoodPVtx = True,
doPlotsVsLUMI = True,
doPlotsVsBX = True
)
_trackSelector = cms.EDFilter('TrackSelector',
src = cms.InputTag('pixelTracks'),
cut = cms.string("")
)
quality = {
"L" : "loose",
"T" : "tight",
"HP" : "highPurity",
}
for key,value in quality.items():
label = "pixelTrks"+key
# print label
cutstring = "quality('" + value + "')"
# print cutstring
if label not in globals():
locals()[label] = _trackSelector.clone( cut = cutstring )
locals()[label].setLabel(label)
else :
print(label,"already configured")
for key,value in quality.items():
label = "pixelTrksMonitor"+key
locals()[label] = pixelTracksMonitor.clone(
TrackProducer = "pixelTrks"+key,
FolderName = "Tracking/PixelTrackParameters/"+value
)
locals()[label].setLabel(label)
ntuplet = {
'3' : "3Hits", # ==3
'4' : "4Hits" # >=4
}
for kN,vN in ntuplet.items():
for key,value in quality.items():
label = "pixelTrks" + vN + key
# print label
cutstring = "numberOfValidHits == " + kN + " & quality('" + value + "')"
# print cutstring
locals()[label] = _trackSelector.clone( cut = cutstring )
locals()[label].setLabel(label)
for kN,vN in ntuplet.items():
for key,value in quality.items():
label = "pixelTrks" + vN + "Monitor" + key
# print label
locals()[label] = pixelTracksMonitor.clone(
TrackProducer = "pixelTrks" + vN + key,
FolderName = "Tracking/PixelTrackParameters/" + vN + "/" + value
)
locals()[label].setLabel(label)
from CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi import goodOfflinePrimaryVertices as _goodOfflinePrimaryVertices
goodPixelVertices = _goodOfflinePrimaryVertices.clone(
src = "pixelVertices"
)
from DQM.TrackingMonitor.primaryVertexResolution_cfi import primaryVertexResolution as _primaryVertexResolution
pixelVertexResolution = _primaryVertexResolution.clone(
vertexSrc = "goodPixelVertices",
rootFolder = "OfflinePixelPV/Resolution"
)
pixelTracksMonitoringTask = cms.Task(
goodPixelVertices,
)
for category in ["pixelTrks", "pixelTrks3Hits", "pixelTrks4Hits"]:
for key in quality:
label = category+key
# print label
pixelTracksMonitoringTask.add(locals()[label])
allPixelTracksMonitoring = cms.Sequence()
for category in ["pixelTrksMonitor", "pixelTrks3HitsMonitor", "pixelTrks4HitsMonitor" ]:
for key in quality:
label = category+key
# print label
allPixelTracksMonitoring += locals()[label]
pixelTracksMonitoring = cms.Sequence(
allPixelTracksMonitoring +
pixelVertexResolution,
pixelTracksMonitoringTask
)
|
machine/models/__init__.py | drdarina/slack-machine | 111 | 11099719 | <gh_stars>100-1000
from .channel import Channel # noqa
from .user import User # noqa
|
python/minicaffe/profiler.py | ktts16/mini-caffe | 413 | 11099724 | <filename>python/minicaffe/profiler.py
# coding = utf-8
# pylint: disable=invalid-name
"""Profiler in mini-caffe"""
from .base import LIB
from .base import c_str, check_call
class Profiler(object):
"""Profiler
"""
@staticmethod
def enable():
"""enable profiler
"""
check_call(LIB.CaffeProfilerEnable())
@staticmethod
def disable():
"""disable profiler
"""
check_call(LIB.CaffeProfilerDisable())
@staticmethod
def open_scope(name):
"""open a scope on profiler
Parameters
----------
name: string
scope name
"""
check_call(LIB.CaffeProfilerScopeStart(c_str(name)))
@staticmethod
def close_scope():
"""close a scope on profiler
"""
check_call(LIB.CaffeProfilerScopeEnd())
@staticmethod
def dump(fn):
"""dump profiler data to fn
Parameters
----------
fn: string
file path to save profiler data
"""
check_call(LIB.CaffeProfilerDump(c_str(fn)))
|
backend/lk/views/reviews.py | Purus/LaunchKitDocker | 2,341 | 11099770 | <filename>backend/lk/views/reviews.py
#
# Copyright 2016 Cluster Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import urlparse
from django import forms
from django.core import validators
from backend.lk.logic import appstore
from backend.lk.logic import appstore_app_info
from backend.lk.logic import appstore_review_notify
from backend.lk.logic import appstore_review_subscriptions
from backend.lk.models import AppStoreApp
from backend.lk.models import AppStoreReview
from backend.lk.models import TwitterAppConnection
from backend.lk.views.base import api_response
from backend.lk.views.base import api_view
from backend.lk.views.base import api_user_view
from backend.lk.views.base import bad_request
from backend.lk.views.base import not_found
from backend.lk.views.base import ok_response
from backend.util import lkforms
class NewSubscriptionForm(forms.Form):
email = lkforms.LKEmailField(required=False)
my_email = lkforms.LKBooleanField(required=False)
twitter_app_connection_id = lkforms.LKEncryptedIdReferenceField(TwitterAppConnection, required=False)
app_id = lkforms.LKEncryptedIdReferenceField(AppStoreApp, required=False)
slack_channel_name = forms.CharField(required=False, max_length=64)
slack_url = forms.URLField(required=False)
def clean_slack_url(self):
slack_url = self.cleaned_data.get('slack_url')
if slack_url:
if urlparse.urlparse(slack_url).netloc != 'hooks.slack.com' or '/services/' not in slack_url:
raise forms.ValidationError('Slack URL should be a hooks.slack.com URL and start with /services/.')
return slack_url
def clean(self):
email = self.cleaned_data.get('email')
slack_url = self.cleaned_data.get('slack_url')
my_email = self.cleaned_data.get('my_email', False)
slack_channel_name = self.cleaned_data.get('slack_channel_name', False)
twitter_app_connection = self.cleaned_data.get('twitter_app_connection_id')
app = self.cleaned_data.get('app_id')
# the form is considered legit if it contains just one of the parameters for creating a review subsciption
legit = len(filter(lambda x: x, [slack_url, email, my_email, slack_channel_name, twitter_app_connection, app])) == 1
if not legit:
raise forms.ValidationError(
'Please provide `email` or `slack_url` or `my_email` or `slack_channel` or `twitter_app_connection_id` or `app_id`, but just one.')
return self.cleaned_data
class ReviewsFilter(forms.Form):
start_review_id = lkforms.LKEncryptedIdReferenceField(AppStoreReview, required=False)
app_id = lkforms.LKEncryptedIdReferenceField(AppStoreApp, required=False)
country = forms.ChoiceField(choices=appstore.APPSTORE_COUNTRIES, required=False)
rating = forms.IntegerField(required=False, min_value=1, max_value=5)
limit = forms.IntegerField(required=False, min_value=1, max_value=200)
@api_user_view('GET')
def reviews_view(request):
form = ReviewsFilter(request.GET)
if not form.is_valid():
return bad_request('Invalid filters provided.', errors=form.errors)
filters = form.cleaned_data
start_review = filters.get('start_review_id')
my_reviews = appstore_review_subscriptions.subscribed_reviews_for_user(request.user,
app=filters.get('app_id'),
country=filters.get('country'),
rating=filters.get('rating'),
start_review=start_review,
limit=filters.get('limit'))
# TODO(Taylor): Remove app id:app 1:1 relationship; move to app id + country
apps = set()
for review in my_reviews:
if review.app not in apps:
apps.add(review.app)
return api_response({
'reviews': [r.to_dict() for r in my_reviews],
'apps': dict((a.encrypted_id, a.to_dict()) for a in apps),
})
@api_view('GET')
def review_view(request, review_id=None):
review = AppStoreReview.find_by_encrypted_id(review_id)
if not review:
return not_found()
app = review.app
appstore_app_info.decorate_app(app, review.country)
return api_response({
'review': review.to_dict(),
'apps': {app.encrypted_id: app.to_dict()}
})
@api_user_view('GET', 'POST')
def subscriptions_view(request):
if request.method == 'GET':
return _subscriptions_view_GET(request)
else:
return _subscriptions_view_POST(request)
def _subscriptions_view_GET(request):
my_subscriptions = appstore_review_subscriptions.subscriptions_for_user(request.user)
return api_response({
'subscriptions': [s.to_dict() for s in my_subscriptions],
})
def _subscriptions_view_POST(request):
form = NewSubscriptionForm(request.POST)
if not form.is_valid():
return bad_request('Invalid subscription data.', errors=form.errors)
email = form.cleaned_data.get('email')
my_email = form.cleaned_data.get('my_email')
slack_url = form.cleaned_data.get('slack_url')
slack_channel_name = form.cleaned_data.get('slack_channel_name')
twitter_app_connection = form.cleaned_data.get('twitter_app_connection_id')
app = form.cleaned_data.get('app_id')
if app and not twitter_app_connection:
try:
twitter_app_connection = TwitterAppConnection.objects.get(user=request.user, app=app, enabled=True)
except TwitterAppConnection.DoesNotExist:
return bad_request('Invalid `app_id`, not already connected to twitter.')
sub = None
if email:
sub = appstore_review_subscriptions.create_email_subscription(request.user, email)
elif my_email:
sub = appstore_review_subscriptions.create_my_email_subscription(request.user)
elif slack_channel_name:
sub = appstore_review_subscriptions.create_slack_channel_subscription(request.user, slack_channel_name)
elif slack_url:
sub = appstore_review_subscriptions.create_slack_subscription(request.user, slack_url)
elif twitter_app_connection:
sub = appstore_review_subscriptions.create_twitter_subscription_from_connection(twitter_app_connection)
if not sub:
return bad_request('Subscription already exists.')
new_flags = []
if not request.user.flags.has_review_monitor:
new_flags.append('has_review_monitor')
if sub.twitter_connection_id and not request.user.flags.has_review_monitor_tweets:
new_flags.append('has_review_monitor_tweets')
if new_flags:
request.user.set_flags(new_flags)
return api_response({
'subscription': sub.to_dict(),
})
@api_user_view('GET', 'POST')
def subscription_view(request, subscription_id=None):
sub = appstore_review_subscriptions.get_user_subscription_by_encrypted_id(request.user, subscription_id)
if not sub:
return not_found()
if request.method == 'GET':
return api_response({
'subscription': sub.to_dict(),
})
if request.POST.get('filter_good'):
do_filter = request.POST['filter_good'] == '1'
appstore_review_subscriptions.mark_subscription_filtered_good(sub, do_filter)
return api_response({
'subscription': sub.to_dict(),
})
@api_user_view('POST')
def subscription_delete_view(request, subscription_id=None):
sub = appstore_review_subscriptions.get_user_subscription_by_encrypted_id(request.user, subscription_id)
if not sub:
return not_found()
appstore_review_subscriptions.disable_subscription(sub)
return ok_response()
@api_view('POST')
def subscription_unsubscribe_token_view(request):
sub = appstore_review_notify.subscription_from_unsubscribe_token(request.POST.get('token', ''))
if not sub:
return bad_request('Could not find subscription with that `token`.')
appstore_review_subscriptions.disable_subscription(sub)
return ok_response()
|
deer/base_classes/learning_algo.py | jhardy0/deer | 373 | 11099810 | <reponame>jhardy0/deer
"""
This module defines the base class for the learning algorithms.
"""
import numpy as np
class LearningAlgo(object):
""" All the Q-networks, actor-critic networks, etc. should inherit this interface.
Parameters
-----------
environment : object from class Environment
The environment linked to the Q-network
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
"""
def __init__(self, environment, batch_size):
self._environment = environment
self._df = 0.9
self._lr = 0.005
self._input_dimensions = self._environment.inputDimensions()
self._n_actions = self._environment.nActions()
self._batch_size = batch_size
def train(self, states, actions, rewards, nextStates, terminals):
""" This method performs the training step (e.g. using Bellman iteration in a deep Q-network)
for one batch of tuples.
"""
raise NotImplementedError()
def chooseBestAction(self, state):
""" Get the best action for a pseudo-state
"""
raise NotImplementedError()
def qValues(self, state):
""" Get the q value for one pseudo-state
"""
raise NotImplementedError()
def setLearningRate(self, lr):
""" Setting the learning rate
NB: The learning rate has usually to be set in the optimizer, hence this function should
be overridden. Otherwise, the learning rate change is likely not to be taken into account
Parameters
-----------
lr : float
The learning rate that has to bet set
"""
self._lr = lr
def setDiscountFactor(self, df):
""" Setting the discount factor
Parameters
-----------
df : float
The discount factor that has to bet set
"""
if df < 0. or df > 1.:
raise AgentError("The discount factor should be in [0,1]")
self._df = df
def learningRate(self):
""" Getting the learning rate
"""
return self._lr
def discountFactor(self):
""" Getting the discount factor
"""
return self._df
if __name__ == "__main__":
pass
|
Chapter05/thumbnail_limit_sema.py | AlekseiMikhalev/Software-Architecture-with-Python | 103 | 11099817 | # Code Listing #4
"""
Thumbnail producer/consumer - Limiting number of images using a Semaphore.
"""
import threading
import time
import string
import random
import uuid
import urllib.request
from PIL import Image
from queue import Queue
class ThumbnailURL_Generator(threading.Thread):
""" Worker class that generates image URLs """
def __init__(self, queue, sleep_time=1,):
self.sleep_time = sleep_time
self.queue = queue
# A flag for stopping
self.flag = True
# sizes
self._sizes = (240,320,360,480,600,720)
# URL scheme
self.url_template = 'https://dummyimage.com/%s/%s/%s.jpg'
threading.Thread.__init__(self)
def __str__(self):
return 'Producer'
def get_size(self):
return '%dx%d' % (random.choice(self._sizes),
random.choice(self._sizes))
def get_color(self):
return ''.join(random.sample(string.hexdigits[:-6], 3))
def run(self):
""" Main thread function """
while self.flag:
# generate image URLs of random sizes and fg/bg colors
url = self.url_template % (self.get_size(),
self.get_color(),
self.get_color())
# Add to queue
print(self,'Put',url)
self.queue.put(url)
time.sleep(self.sleep_time)
def stop(self):
""" Stop the thread """
self.flag = False
class ThumbnailImageSemaSaver(object):
""" Class which keeps an exact counter of saved images
and restricts the total count using a semaphore """
def __init__(self, limit=10):
self.limit = limit
self.counter = threading.BoundedSemaphore(value=limit)
self.count = 0
# Start time
self.start = time.time()
# Image saving rate
self.rate = 0
def acquire(self):
# Acquire counter, if limit is exhausted, it
# returns False
return self.counter.acquire(blocking=False)
def release(self):
# Release counter, incrementing count
return self.counter.release()
def thumbnail_image(self, url, size=(64,64), format='.png'):
""" Save image thumbnails, given a URL """
im = Image.open(urllib.request.urlopen(url))
# filename is last two parts of URL minus extension + '.format'
pieces = url.split('/')
filename = ''.join((pieces[-2],'_',pieces[-1].split('.')[0],format))
try:
im.thumbnail(size, Image.ANTIALIAS)
im.save(filename)
print('Saved',filename)
self.count += 1
except Exception as e:
print('Error saving URL',url,e)
# Image can't be counted, increment semaphore
self.release()
return True
def save(self, url):
""" Save a URL as thumbnail """
if self.acquire():
self.thumbnail_image(url)
return True
else:
print('Semaphore limit reached, returning False')
return False
class ThumbnailURL_Consumer(threading.Thread):
""" Worker class that consumes URLs and generates thumbnails """
def __init__(self, queue, saver):
self.queue = queue
self.flag = True
self.saver = saver
self.count = 0
# Internal id
self._id = uuid.uuid4().hex
threading.Thread.__init__(self, name='Consumer-'+ self._id)
def __str__(self):
return 'Consumer-' + self._id
def run(self):
""" Main thread function """
while self.flag:
url = self.queue.get()
print(self,'Got',url)
self.count += 1
if not self.saver.save(url):
# Limit reached, break out
print(self, 'Set limit reached, quitting')
break
def stop(self):
""" Stop the thread """
self.flag = False
if __name__ == '__main__':
from queue import Queue
import glob,os
os.system('rm -f *.png')
q = Queue(maxsize=2000)
saver = ThumbnailImageSemaSaver(limit=100)
producers, consumers = [], []
for i in range(3):
t = ThumbnailURL_Generator(q)
producers.append(t)
t.start()
for i in range(5):
t = ThumbnailURL_Consumer(q, saver)
consumers.append(t)
t.start()
for t in consumers:
t.join()
print('Joined', t, flush=True)
# To make sure producers dont block on a full queue
while not q.empty():
item=q.get()
for t in producers:
t.stop()
print('Stopped',t, flush=True)
print('Total number of PNG images',len(glob.glob('*.png')))
|
17-it-generator/tree/extra/test_drawtree.py | SeirousLee/example-code-2e | 990 | 11099823 | from drawtree import tree, render_lines
def test_1_level():
result = list(render_lines(tree(BrokenPipeError)))
expected = [
'BrokenPipeError',
]
assert expected == result
def test_2_levels_1_leaf():
result = list(render_lines(tree(IndentationError)))
expected = [
'IndentationError',
'└── TabError',
]
assert expected == result
def test_3_levels_1_leaf():
class X: pass
class Y(X): pass
class Z(Y): pass
result = list(render_lines(tree(X)))
expected = [
'X',
'└── Y',
' └── Z',
]
assert expected == result
def test_4_levels_1_leaf():
class Level0: pass
class Level1(Level0): pass
class Level2(Level1): pass
class Level3(Level2): pass
result = list(render_lines(tree(Level0)))
expected = [
'Level0',
'└── Level1',
' └── Level2',
' └── Level3',
]
assert expected == result
def test_2_levels_2_leaves():
class Branch: pass
class Leaf1(Branch): pass
class Leaf2(Branch): pass
result = list(render_lines(tree(Branch)))
expected = [
'Branch',
'├── Leaf1',
'└── Leaf2',
]
assert expected == result
def test_3_levels_2_leaves_dedent():
class A: pass
class B(A): pass
class C(B): pass
class D(A): pass
class E(D): pass
result = list(render_lines(tree(A)))
expected = [
'A',
'├── B',
'│ └── C',
'└── D',
' └── E',
]
assert expected == result
def test_4_levels_4_leaves_dedent():
class A: pass
class B1(A): pass
class C1(B1): pass
class D1(C1): pass
class D2(C1): pass
class C2(B1): pass
class B2(A): pass
expected = [
'A',
'├── B1',
'│ ├── C1',
'│ │ ├── D1',
'│ │ └── D2',
'│ └── C2',
'└── B2',
]
result = list(render_lines(tree(A)))
assert expected == result
|
cpmpy/post_office_problem2.py | tias/hakank | 279 | 11099854 | """
Post office problem in cpmpy.
Problem statement:
http://www-128.ibm.com/developerworks/linux/library/l-glpk2/
From Winston 'Operations Research: Applications and Algorithms':
'''
A post office requires a different number of full-time employees working
on different days of the week [summarized below]. Union rules state that
each full-time employee must work for 5 consecutive days and then receive
two days off. For example, an employee who works on Monday to Friday
must be off on Saturday and Sunday. The post office wants to meet its
daily requirements using only full-time employees. Minimize the number
of employees that must be hired.
To summarize the important information about the problem:
* Every full-time worker works for 5 consecutive days and takes 2 days off
* Day 1 (Monday): 17 workers needed
* Day 2 : 13 workers needed
* Day 3 : 15 workers needed
* Day 4 : 19 workers needed
* Day 5 : 14 workers needed
* Day 6 : 16 workers needed
* Day 7 (Sunday) : 11 workers needed
The post office needs to minimize the number of employees it needs
to hire to meet its demand.
'''
Model created by <NAME>, <EMAIL>
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def post_office_problem2():
#
# data
#
# days 0..6, monday 0
n = 7
days = list(range(n))
need = [17, 13, 15, 19, 14, 16, 11]
# Total cost for the 5 day schedule.
# Base cost per day is 100.
# Working saturday is 100 extra
# Working sunday is 200 extra.
cost = [500, 600, 800, 800, 800, 800, 700]
#
# variables
#
# Number of workers starting at day i
x = intvar(0,100,shape=n,name="x")
total_cost = intvar(0, 20000, name="total_cost")
num_workers = intvar(0, 100, name="num_workers")
model = Model(minimize=total_cost)
#
# constraints
#
model += [total_cost == sum(x*cost)]
model += [num_workers == sum(x)]
for i in days:
model += [sum([x[j] for j in days
if j != (i + 5) % n and j != (i + 6) % n])
>= need[i]]
ss = CPM_ortools(model)
num_solutions = 0
if ss.solve():
num_solutions += 1
print("num_workers:", num_workers.value())
print("total_cost:", total_cost.value())
print("x:", x.value())
post_office_problem2()
|
tests/daemon/unit/api/endpoints/test_common.py | Karnak123/jina | 15,179 | 11099875 | <reponame>Karnak123/jina<filename>tests/daemon/unit/api/endpoints/test_common.py
import os
import time
import pytest
from daemon.dockerize import Dockerizer
from daemon.models.containers import ContainerItem
cur_dir = os.path.dirname(os.path.abspath(__file__))
deps = ['mwu_encoder.py', 'mwu_encoder.yml']
@pytest.fixture(scope='module', autouse=True)
def workspace():
from tests.conftest import _create_workspace_directly, _clean_up_workspace
image_id, network_id, workspace_id, workspace_store = _create_workspace_directly(
cur_dir
)
yield workspace_id
_clean_up_workspace(image_id, network_id, workspace_id, workspace_store)
@pytest.mark.parametrize('api', ['/peas', '/pods', '/flows'])
def test_args(api, fastapi_client):
response = fastapi_client.get(f'{api}/arguments')
assert response.status_code == 200
assert response.json()
@pytest.mark.parametrize('api', ['/peas', '/pods', '/flows', '/workspaces'])
def test_status(api, fastapi_client):
response = fastapi_client.get(f'{api}')
assert response.status_code == 200
assert response.json()
@pytest.mark.parametrize('api', ['/peas', '/pods', '/flows'])
def test_delete(api, fastapi_client):
response = fastapi_client.delete(f'{api}')
assert response.status_code == 200
def _validate_response(response, payload, id, workspace_id):
assert response.status_code == 200
get_response = response.json()
item = ContainerItem(**get_response)
assert item.workspace_id == workspace_id
assert item.metadata.container_name == id
if 'json' in payload:
assert item.arguments.object['arguments']['name'] == payload['json']['name']
@pytest.mark.parametrize(
'api, payload',
[
(
'/peas',
{
'json': {'name': 'my_pea'},
},
),
(
'/pods',
{
'json': {'name': 'my_pod'},
},
),
],
)
def test_add_same_del_all(api, payload, fastapi_client, workspace):
_existing_containers = Dockerizer.containers
for _ in range(3):
# this test the random default_factory
payload['params'] = {'workspace_id': workspace}
post_response = fastapi_client.post(api, **payload)
assert post_response.status_code == 201
obj_id = post_response.json()
assert obj_id in Dockerizer.containers
r = fastapi_client.get(f'{api}/{obj_id}')
_validate_response(r, payload, obj_id, workspace)
response = fastapi_client.get(api)
assert response.status_code == 200
num_add = response.json()['num_add']
response = fastapi_client.delete(api)
assert response.status_code == 200
response = fastapi_client.get(api)
assert response.status_code == 200
assert response.json()['num_del'] == num_add
time.sleep(1)
assert Dockerizer.containers == _existing_containers
@pytest.mark.parametrize(
'api, payload',
[
(
'/peas',
{
'json': {'name': 'my_pea'},
},
),
(
'/pods',
{
'json': {'name': 'my_pod'},
},
),
(
'/flows',
{'params': {'filename': 'good_flow.yml'}},
),
(
'/flows',
{'params': {'filename': 'good_flow_jtype.yml'}},
),
],
)
def test_add_success(api, payload, fastapi_client, workspace):
if 'params' not in payload:
payload['params'] = {'workspace_id': workspace}
else:
payload['params'].update({'workspace_id': workspace})
post_response = fastapi_client.post(api, **payload)
assert post_response.status_code == 201
obj_id = post_response.json()
assert obj_id in Dockerizer.containers
r = fastapi_client.get(f'{api}/{obj_id}')
_validate_response(r, payload, obj_id, workspace)
response = fastapi_client.get(api)
assert response.status_code == 200
response = fastapi_client.get(f'{api}/{obj_id}')
assert response.status_code == 200
assert 'time_created' in response.json()
response = fastapi_client.delete(f'{api}/{obj_id}')
assert response.status_code == 200
response = fastapi_client.get(api)
assert response.status_code == 200
@pytest.mark.parametrize(
'api, payload',
[
('/peas', {'json': {'name': 'my_pea', 'uses': 'BAD'}}),
('/pods', {'json': {'name': 'my_pod', 'uses': 'BAD'}}),
(
'/flows',
{'params': {'filename': 'bad_flow.yml'}},
),
],
)
def test_add_fail(api, payload, fastapi_client, workspace):
if 'params' not in payload:
payload['params'] = {'workspace_id': workspace}
else:
payload['params'].update({'workspace_id': workspace})
response = fastapi_client.get(api)
assert response.status_code == 200
old_add = response.json()['num_add']
response = fastapi_client.post(api, **payload)
assert response.status_code != 201
if response.status_code == 400:
for k in ('body', 'detail'):
assert k in response.json()
response = fastapi_client.get(api)
assert response.status_code == 200
assert response.json()['num_add'] == old_add
|
2019/quals/hardware-flagrom/solution/secondary/exploit.py | iicarus-bit/google-ctf | 2,757 | 11099903 | #!/usr/bin/python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import socket
import hashlib
import sys
def recvuntil(sock, txt):
d = ""
while d.find(txt) == -1:
try:
dnow = sock.recv(1)
if len(dnow) == 0:
return ("DISCONNECTED", d)
except socket.timeout:
return ("TIMEOUT", d)
except socket.error as msg:
return ("ERROR", d)
d += dnow
return ("OK", d)
def recvall(sock, n):
d = ""
while len(d) != n:
try:
dnow = sock.recv(n - len(d))
if len(dnow) == 0:
return ("DISCONNECTED", d)
except socket.timeout:
return ("TIMEOUT", d)
except socket.error as msg:
return ("ERROR", d)
d += dnow
return ("OK", d)
# Proxy object for sockets.
class gsocket(object):
def __init__(self, *p):
self._sock = socket.socket(*p)
def __getattr__(self, name):
return getattr(self._sock, name)
def recvall(self, n):
err, ret = recvall(self._sock, n)
if err != "OK":
return False
return ret
def recvuntil(self, txt):
err, ret = recvuntil(self._sock, txt)
if err != "OK":
return False
return ret
# Proxy object for pipes
class psocket(object):
def __init__(self, pin, pout):
self._pin = pin
self._pout = pout
def __getattr__(self, name):
print "__getattr__:", name
return None
def recv(self, n):
return self._pin.read(n)
def send(self, n):
return self._pout.write(n)
def sendall(self, n):
return self.send(n)
def recvall(self, n):
err, ret = recvall(self, n)
if err != "OK":
return False
return ret
def recvuntil(self, txt):
err, ret = recvuntil(self, txt)
if err != "OK":
return False
return ret
def prepare_payload():
print "[ 8051 ] Compiling payload..."
subprocess.check_call(["make", "-B"])
with open("exploit.8051", "rb") as f:
return f.read()
def test_local(payload):
p = subprocess.Popen(["./flagrom"], cwd='..', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
send_payload(psocket(p.stdout, p.stdin), payload)
stdoutdata, stderrdata = p.communicate()
print stdoutdata
print stderrdata
def get_pow(pow):
sys.stdout.write("[ PoW ] md5('flagrom-???') starts with '%s'... " % pow)
sys.stdout.flush()
pow = pow.decode("hex")
for i in xrange(0xf000000):
x = "flagrom-%i" % i
if hashlib.md5(x).digest().startswith(pow):
print x
return x
print "NOT FOUND"
def send_payload(s, payload):
pow_line = s.recvuntil("?\n").strip()
print "[ Recv ]", pow_line
pow = pow_line.split(" ")[-1].replace("?", "")
response = get_pow(pow)
s.sendall("%s\n" % response)
length_line = s.recvuntil("payload?\n").strip()
print "[ Recv ]", length_line
print "[ Len ]", len(payload)
s.sendall("%i\n" % len(payload))
print "[ Send ] Sending payload...",
sys.stdout.flush()
s.sendall(payload)
print "Sent!"
def test_remote(payload, addr):
host, port = addr.split(':')
s = gsocket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
send_payload(s, payload)
while True:
ret = s.recv(1)
if not ret:
break
sys.stdout.write(ret)
s.close()
if len(sys.argv) == 1:
print "[ ] Testing using stdin/out redirection!"
print "[ ] (if you want to run this exploit remotly: ./exploit.py ip:port)"
test_local(prepare_payload())
else:
print "[ ] Testing remotly!"
test_remote(prepare_payload(), sys.argv[1])
|
tests/fuzzing/test_build_graphql_schema_from_sdl_fuzz.py | matt-koevort/tartiflette | 530 | 11099932 | # import sys
#
# import afl
# from tartiflette.sdl.builder import build_graphql_schema_from_sdl
# from tartiflette.sdl.schema import GraphQLSchema
# We use American Fuzzy Loop for this.
# TODO: Do the same with libgraphqlparser (needs good security :) )
# Commented for the moment, so pytest won't complain.
# afl.init()
#
# try:
# build_graphql_schema_from_sdl(sys.stdin.read(),
# schema=GraphQLSchema())
# except ValueError:
# pass
|
aiochan/test/test_buffer.py | agentOfChaos/aiochan | 128 | 11099933 | <filename>aiochan/test/test_buffer.py
from aiochan.buffers import *
def test_fixed_buffer():
buffer = FixedLengthBuffer(3)
assert buffer.can_add
assert not buffer.can_take
buffer.add(1)
buffer.add(2)
assert buffer.can_add
assert buffer.can_take
buffer.add(3)
assert not buffer.can_add
assert buffer.can_take
assert buffer.take() == 1
assert buffer.can_add
assert buffer.can_take
assert buffer.take() == 2
assert buffer.take() == 3
assert buffer.can_add
assert not buffer.can_take
assert buffer.__repr__()
def test_dropping_buffer():
buffer = DroppingBuffer(2)
assert buffer.can_add
assert not buffer.can_take
buffer.add(1)
buffer.add(2)
assert buffer.can_add
assert buffer.can_take
assert buffer.take() == 1
buffer.add(3)
buffer.add(4)
assert buffer.take() == 2
assert buffer.take() == 3
assert buffer.can_add
assert not buffer.can_take
assert buffer.__repr__()
def test_sliding_buffer():
buffer = SlidingBuffer(2)
assert buffer.can_add
assert not buffer.can_take
buffer.add(1)
buffer.add(2)
assert buffer.can_add
assert buffer.can_take
assert buffer.take() == 1
buffer.add(3)
buffer.add(4)
assert buffer.take() == 3
assert buffer.take() == 4
assert buffer.can_add
assert not buffer.can_take
assert buffer.__repr__()
def test_promise_buffer():
buffer = PromiseBuffer(None)
assert buffer.can_add
assert not buffer.can_take
buffer.add(1)
assert buffer.can_add
assert buffer.can_take
assert buffer.take() == 1
buffer.add(2)
assert buffer.can_add
assert buffer.can_take
assert buffer.take() == 1
assert buffer.__repr__()
def test_it_buffer():
buffer = IterBuffer(())
assert not buffer.can_add
assert not buffer.can_take
buffer = IterBuffer(range(2))
assert not buffer.can_add
assert buffer.can_take
assert buffer.take() == 0
assert not buffer.can_add
assert buffer.can_take
assert buffer.take() == 1
assert not buffer.can_add
assert not buffer.can_take
|
libreasr/lib/layers/mish.py | johnpaulbin/LibreASR | 680 | 11099961 | import torch
import torch.nn as nn
import torch.nn.functional as F
def _mish_fwd(x):
return x.mul(torch.tanh(F.softplus(x)))
def _mish_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _mish_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _mish_bwd(x, grad_output)
class _Mish(nn.Module):
def forward(self, x):
return MishAutoFn.apply(x)
try:
from fastai2.layers import Mish
Mish = Mish
print("Using Mish activation from fastai2.")
except:
Mish = _Mish
print("Using Mish activation from lib.layers.")
|
tests/test_slots_and_tokens.py | aalba6675/python-pkcs11 | 114 | 11099969 | """
PKCS#11 Slots and Tokens
"""
import unittest
import pkcs11
from . import LIB, TOKEN, Only, Not
class SlotsAndTokensTests(unittest.TestCase):
def test_double_initialise(self):
self.assertIsNotNone(pkcs11.lib(LIB))
self.assertIsNotNone(pkcs11.lib(LIB))
def test_double_initialise_different_libs(self):
self.assertIsNotNone(pkcs11.lib(LIB))
with self.assertRaises(pkcs11.AlreadyInitialized):
pkcs11.lib('somethingelse.so')
@Only.softhsm2
def test_get_slots(self):
lib = pkcs11.lib(LIB)
slots = lib.get_slots()
self.assertEqual(len(slots), 2)
slot1, slot2 = slots
self.assertIsInstance(slot1, pkcs11.Slot)
self.assertEqual(slot1.flags, pkcs11.SlotFlag.TOKEN_PRESENT)
def test_get_mechanisms(self):
lib = pkcs11.lib(LIB)
slot, *_ = lib.get_slots()
mechanisms = slot.get_mechanisms()
self.assertIn(pkcs11.Mechanism.RSA_PKCS, mechanisms)
def test_get_mechanism_info(self):
lib = pkcs11.lib(LIB)
slot, *_ = lib.get_slots()
info = slot.get_mechanism_info(pkcs11.Mechanism.RSA_PKCS_OAEP)
self.assertIsInstance(info, pkcs11.MechanismInfo)
@Not.nfast # EC not supported
@Not.opencryptoki
def test_get_mechanism_info_ec(self):
lib = pkcs11.lib(LIB)
slot, *_ = lib.get_slots()
info = slot.get_mechanism_info(pkcs11.Mechanism.EC_KEY_PAIR_GEN)
self.assertIsInstance(info, pkcs11.MechanismInfo)
self.assertIn(pkcs11.MechanismFlag.EC_NAMEDCURVE, info.flags)
@Only.softhsm2
def test_get_tokens(self):
lib = pkcs11.lib(LIB)
tokens = lib.get_tokens(token_flags=pkcs11.TokenFlag.RNG)
self.assertEqual(len(list(tokens)), 2)
tokens = lib.get_tokens(token_label=TOKEN)
self.assertEqual(len(list(tokens)), 1)
@Only.softhsm2
def test_get_token(self):
lib = pkcs11.lib(LIB)
slot, *_ = lib.get_slots()
token = slot.get_token()
self.assertIsInstance(token, pkcs11.Token)
self.assertEqual(token.label, TOKEN)
self.assertIn(pkcs11.TokenFlag.TOKEN_INITIALIZED, token.flags)
self.assertIn(pkcs11.TokenFlag.LOGIN_REQUIRED, token.flags)
|
cartoframes/viz/style.py | CartoDB/cartoframes | 236 | 11099980 | from . import defaults
from ..utils.utils import merge_dicts, text_match
class Style:
def __init__(self, data=None, value=None,
default_legend=None, default_widget=None,
default_popup_hover=None, default_popup_click=None):
self._style = self._init_style(data=data)
self._value = value
self._default_legend = default_legend
self._default_widget = default_widget
self._default_popup_hover = default_popup_hover
self._default_popup_click = default_popup_click
def _init_style(self, data):
if data is None:
return defaults.STYLE
elif isinstance(data, (str, dict)):
return data
else:
raise ValueError('`style` must be a dictionary')
@property
def value(self):
return self._value
@property
def default_legend(self):
return self._default_legend
@property
def default_widget(self):
return self._default_widget
@property
def default_popup_hover(self):
return self._default_popup_hover
@property
def default_popup_click(self):
return self._default_popup_click
def compute_viz(self, geom_type, variables={}):
style = self._style
default_style = defaults.STYLE[geom_type]
if isinstance(style, str):
# Only for testing purposes
return self._parse_style_str(style, default_style, variables)
elif isinstance(style, dict):
if geom_type in style:
style = style.get(geom_type)
return self._parse_style_dict(style, default_style, variables)
else:
raise ValueError('`style` must be a dictionary')
def _parse_style_dict(self, style, default_style, ext_vars):
variables = merge_dicts(style.get('vars', {}), ext_vars)
properties = merge_dicts(default_style, style)
serialized_variables = self._serialize_variables(variables)
serialized_properties = self._serialize_properties(properties)
return serialized_variables + serialized_properties
def _parse_style_str(self, style, default_style, ext_vars):
variables = ext_vars
default_properties = self._prune_defaults(default_style, style)
serialized_variables = self._serialize_variables(variables)
serialized_default_properties = self._serialize_properties(default_properties)
return serialized_variables + serialized_default_properties + style
def _serialize_variables(self, variables={}):
output = ''
for var in sorted(variables):
output += '@{name}: {value}\n'.format(
name=var,
value=variables.get(var)
)
return output
def _serialize_properties(self, properties={}):
output = ''
for prop in sorted(properties):
if prop == 'vars':
continue
output += '{name}: {value}\n'.format(
name=prop,
value=properties.get(prop)
)
return output
def _prune_defaults(self, default_style, style):
output = default_style.copy()
if 'color' in output and text_match(r'color\s*:', style):
del output['color']
if 'width' in output and text_match(r'width\s*:', style):
del output['width']
if 'strokeColor' in output and text_match(r'strokeColor\s*:', style):
del output['strokeColor']
if 'strokeWidth' in output and text_match(r'strokeWidth\s*:', style):
del output['strokeWidth']
return output
|
tests/conftest.py | pystitch/stitch | 468 | 11100017 | import pytest
def pytest_addoption(parser):
parser.addoption("--run-slow", action="store_true",
help="run slow tests")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and not item.config.getoption("--run-slow"):
pytest.skip("need --run-slow option to run")
|
venv/lib/python2.7/site-packages/gevent/util.py | egromero/chat_app_flask | 2,557 | 11100019 | <gh_stars>1000+
# Copyright (c) 2009 <NAME>. See LICENSE for details.
"""
Low-level utilities.
"""
from __future__ import absolute_import
import functools
__all__ = ['wrap_errors']
class wrap_errors(object):
"""
Helper to make function return an exception, rather than raise it.
Because every exception that is unhandled by greenlet will be logged,
it is desirable to prevent non-error exceptions from leaving a greenlet.
This can done with a simple ``try/except`` construct::
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except (TypeError, ValueError, AttributeError) as ex:
return ex
This class provides a shortcut to write that in one line::
wrapped_func = wrap_errors((TypeError, ValueError, AttributeError), func)
It also preserves ``__str__`` and ``__repr__`` of the original function.
"""
# QQQ could also support using wrap_errors as a decorator
def __init__(self, errors, func):
"""
Calling this makes a new function from *func*, such that it catches *errors* (an
:exc:`BaseException` subclass, or a tuple of :exc:`BaseException` subclasses) and
return it as a value.
"""
self.__errors = errors
self.__func = func
# Set __doc__, __wrapped__, etc, especially useful on Python 3.
functools.update_wrapper(self, func)
def __call__(self, *args, **kwargs):
func = self.__func
try:
return func(*args, **kwargs)
except self.__errors as ex:
return ex
def __str__(self):
return str(self.__func)
def __repr__(self):
return repr(self.__func)
def __getattr__(self, name):
return getattr(self.__func, name)
|
rapidsms/messages/tests.py | catalpainternational/rapidsms | 330 | 11100031 | <reponame>catalpainternational/rapidsms
from rapidsms.messages.base import MessageBase
from rapidsms.messages.incoming import IncomingMessage
from rapidsms.messages.outgoing import OutgoingMessage
from rapidsms.tests.harness import RapidTest
class MessagesTest(RapidTest):
disable_phases = True
def test_message_id(self):
"""All message objects should have IDs."""
connections = [self.create_connection()]
msg = MessageBase(text="test", connections=connections)
self.assertIsNotNone(msg.id)
msg = IncomingMessage(text="test", connections=connections)
self.assertIsNotNone(msg.id)
msg = OutgoingMessage(text="test", connections=connections)
self.assertIsNotNone(msg.id)
def test_saved_message_fields(self):
"""Extra data should be attached to IncomingMessage."""
connection = self.create_connection()
fields = {'extra-field': 'extra-value'}
message = IncomingMessage(connection, 'test incoming message',
fields=fields)
self.assertIn('extra-field', message.fields)
self.assertEqual(message.fields['extra-field'], fields['extra-field'])
def test_outgoing_message_link(self):
"""Extra data should be attached to response (OutgoingMessage)."""
connection = self.create_connection()
fields = {'extra-field': 'extra-value'}
message = IncomingMessage(connection, 'test incoming message',
fields=fields)
response = message.respond('response')
self.assertEqual(message, response['in_response_to'])
self.assertIn('extra-field', response['in_response_to'].fields)
def test_outgoing_message_send(self):
"""OutgoingMessage.send should use send() API correctly"""
message = self.create_outgoing_message()
message.send()
self.assertEqual(self.outbound[0].text, message.text)
def test_response_context(self):
"""
InboundMessage responses should contain proper context for
creating OutboundMessages by the router.
"""
inbound_message = self.create_incoming_message()
inbound_message.respond('test1')
inbound_message.respond('test2')
self.assertEqual(2, len(inbound_message.responses))
response1 = inbound_message.responses[0]
self.assertEqual("test1", response1['text'])
self.assertEqual(inbound_message.connections, response1['connections'])
# reply_to should reference original message
self.assertEqual(inbound_message, response1['in_response_to'])
|
src/python/twitter/common/app/inspection.py | zhouyijiaren/commons | 1,143 | 11100064 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
import inspect
import os
import sys
class Inspection(object):
class InternalError(Exception): pass
# TODO(wickman)
# Remove all calls to inspect.stack(). This is just bad. Port everything over
# to iterating from currentframe => outer frames.
@staticmethod
def find_main_from_caller():
last_frame = inspect.currentframe()
while True:
inspect_frame = last_frame.f_back
if not inspect_frame:
break
if 'main' in inspect_frame.f_locals:
return inspect_frame.f_locals['main']
last_frame = inspect_frame
raise Inspection.InternalError("Unable to detect main from the stack!")
@staticmethod
def print_stack_locals(out=sys.stderr):
stack = inspect.stack()[1:]
for fr_n in range(len(stack)):
print('--- frame %s ---\n' % fr_n, file=out)
for key in stack[fr_n][0].f_locals:
print(' %s => %s' % (key, stack[fr_n][0].f_locals[key]), file=out)
@staticmethod
def find_main_module():
stack = inspect.stack()[1:]
for fr_n in range(len(stack)):
if 'main' in stack[fr_n][0].f_locals:
return stack[fr_n][0].f_locals['__name__']
return None
@staticmethod
def get_main_locals():
stack = inspect.stack()[1:]
for fr_n in range(len(stack)):
if '__name__' in stack[fr_n][0].f_locals and (
stack[fr_n][0].f_locals['__name__'] == '__main__'):
return stack[fr_n][0].f_locals
return {}
@staticmethod
def find_calling_module():
last_frame = inspect.currentframe()
while True:
inspect_frame = last_frame.f_back
if not inspect_frame:
break
if '__name__' in inspect_frame.f_locals:
return inspect_frame.f_locals['__name__']
last_frame = inspect_frame
raise Inspection.InternalError("Unable to interpret stack frame!")
@staticmethod
def find_application_name():
__entry_point__ = None
locals = Inspection.get_main_locals()
if '__file__' in locals and locals['__file__'] is not None:
__entry_point__ = locals['__file__']
elif '__loader__' in locals:
from zipimport import zipimporter
from pkgutil import ImpLoader
# TODO(wickman) The monkeypatched zipimporter should probably not be a function
# but instead a properly delegating proxy.
if hasattr(locals['__loader__'], 'archive'):
# assuming it ends in .zip or .egg, it may be of package format, so
# foo-version-py2.6-arch.egg, so split off anything after '-'.
__entry_point__ = os.path.basename(locals['__loader__'].archive)
__entry_point__ = __entry_point__.split('-')[0].split('.')[0]
elif isinstance(locals['__loader__'], ImpLoader):
__entry_point__ = locals['__loader__'].get_filename()
else:
__entry_point__ = '__interpreter__'
app_name = os.path.basename(__entry_point__)
return app_name.split('.')[0]
|
uq360/utils/generate_1D_regression_data.py | Sclare87/UQ360 | 148 | 11100112 | import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import torch as torch
def make_data_gap(seed, data_count=100):
import GPy
npr.seed(0)
x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))])
x = x[:, np.newaxis]
k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
K = k.K(x)
L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count))
# draw a noise free random function from a GP
eps = np.random.randn(data_count)
f = L @ eps
# use a homoskedastic Gaussian noise model N(f(x)_i, \sigma^2). \sigma^2 = 0.1
eps_noise = np.sqrt(0.1) * np.random.randn(data_count)
y = f + eps_noise
y = y[:, np.newaxis]
plt.plot(x, f, 'ko', ms=2)
plt.plot(x, y, 'ro')
plt.title("GP generated Data")
plt.pause(1)
return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y)
def make_data_sine(seed, data_count=450):
# fix the random seed
np.random.seed(seed)
noise_var = 0.1
X = np.linspace(-4, 4, data_count)
y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
X_train = X[idx[:train_count], np.newaxis ]
X_test = X[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(X_train, 0)
std = np.std(X_train, 0)
X_train = (X_train - mu) / std
X_test = (X_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
# mu = 0
# std = 1
y_train = (y_train - mu) / std
y_test = (y_test -mu) / std
train_stats = dict()
train_stats['mu'] = torch.FloatTensor([mu])
train_stats['sigma'] = torch.FloatTensor([std])
return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\
train_stats |
30 Days of Code/Python/20 - Day 19 - Interfaces.py | srgeyK87/Hacker-Rank-30-days-challlenge | 275 | 11100150 | <reponame>srgeyK87/Hacker-Rank-30-days-challlenge
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/30-interfaces/problem
# Difficulty: Easy
# Max Score: 30
# Language: Python
# ========================
# Solution
# ========================
class AdvancedArithmetic(object):
def divisorSum(n):
raise NotImplementedError
class Calculator(AdvancedArithmetic):
def divisorSum(self, n):
sum = 0
for i in range(1, n//2 + 1):
if n % i == 0:
sum += i
return sum + n
n = int(input())
my_calculator = Calculator()
s = my_calculator.divisorSum(n)
print("I implemented: " + type(my_calculator).__bases__[0].__name__)
print(s)
|
ochre/rmgarbage.py | KBNLresearch/ochre | 113 | 11100184 | <reponame>KBNLresearch/ochre<filename>ochre/rmgarbage.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implementation of rmgarbage.
As described in the paper:
<NAME>., <NAME>., <NAME>. and <NAME>., 2001. Automatic
removal of "garbage strings" in OCR text: An implementation. In The 5th
World Multi-Conference on Systemics, Cybernetics and Informatics.
"""
import click
import codecs
import os
import pandas as pd
from string import punctuation
from nlppln.utils import create_dirs, out_file_name
def get_rmgarbage_errors(word):
errors = []
if rmgarbage_long(word):
errors.append('L')
if rmgarbage_alphanumeric(word):
errors.append('A')
if rmgarbage_row(word):
errors.append('R')
if rmgarbage_vowels(word):
errors.append('V')
if rmgarbage_punctuation(word):
errors.append('P')
if rmgarbage_case(word):
errors.append('C')
return errors
def rmgarbage_long(string, threshold=40):
if len(string) > threshold:
return True
return False
def rmgarbage_alphanumeric(string):
alphanumeric_chars = sum(c.isalnum() for c in string)
if len(string) > 2 and (alphanumeric_chars+0.0)/len(string) < 0.5:
return True
return False
def rmgarbage_row(string, rep=4):
for c in string:
if c.isalnum():
if c * rep in string:
return True
return False
def rmgarbage_vowels(string):
string = string.lower()
if len(string) > 2 and string.isalpha():
vowels = sum(c in u'aáâàåãäeéèëêuúûùüiíîìïoóôòøõö' for c in string)
consonants = len(string) - vowels
low = min(vowels, consonants)
high = max(vowels, consonants)
if low/(high+0.0) <= 0.1:
return True
return False
def rmgarbage_punctuation(string):
string = string[1:len(string)-1]
punctuation_marks = set()
for c in string:
if c in punctuation:
punctuation_marks.add(c)
if len(punctuation_marks) > 1:
return True
return False
def rmgarbage_case(string):
if string[0].islower() and string[len(string)-1].islower():
for c in string:
if c.isupper():
return True
return False
@click.command()
@click.argument('in_file', type=click.File(encoding='utf-8'))
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
def rmgarbage(in_file, out_dir):
create_dirs(out_dir)
text = in_file.read()
words = text.split()
doc_id = os.path.basename(in_file.name).split('.')[0]
result = []
removed = []
for word in words:
errors = get_rmgarbage_errors(word)
if len(errors) == 0:
result.append(word)
else:
removed.append({'word': word,
'errors': u''.join(errors),
'doc_id': doc_id})
out_file = out_file_name(out_dir, in_file.name)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write(u' '.join(result))
metadata_out = pd.DataFrame(removed)
fname = '{}-rmgarbage-metadata.csv'.format(doc_id)
out_file = out_file_name(out_dir, fname)
metadata_out.to_csv(out_file, encoding='utf-8')
if __name__ == '__main__':
rmgarbage()
|
core/common.py | madflojo/automon | 414 | 11100187 | '''
Automatron: Module for common tasks
'''
import argparse
import os
import sys
import yaml
import signal
def get_opts(description):
''' Parse command line arguments '''
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-c", "--config", dest="config", help="specify a configuration file")
args = parser.parse_args()
if args.config is None:
parser.print_help()
return args.config
def load_config(config):
''' Load config file into a dictionary '''
if os.path.isfile(config):
with open(config, "r") as fh:
config = yaml.safe_load(fh)
return config
return None
def get_config(description=None):
''' Get configuration file from cmdline and get yaml from file '''
config = get_opts(description)
if config is None:
sys.exit(1)
else:
config = load_config(config)
if config is None:
return False
else:
return config
def kill_threads(threads):
''' Used to kill of multi process threads '''
for thread in threads:
try:
os.kill(thread.pid, signal.SIGTERM)
except OSError:
pass
if __name__ == '__main__':
pass
|
src/radish/parser/core.py | bingyujin/radish | 182 | 11100191 | <reponame>bingyujin/radish
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import json
import re
from pathlib import Path
from lark import Lark, UnexpectedInput
from radish.models import PreconditionTag
from radish.parser.errors import (
RadishLanguageNotFound,
RadishMisplacedBackground,
RadishMissingFeatureShortDescription,
RadishMissingRuleShortDescription,
RadishMissingScenarioShortDescription,
RadishMultipleBackgrounds,
RadishPreconditionScenarioDoesNotExist,
RadishPreconditionScenarioRecursion,
RadishScenarioLoopInvalidIterationsValue,
RadishScenarioLoopMissingIterations,
RadishScenarioOutlineExamplesInconsistentCellCount,
RadishScenarioOutlineExamplesMissingClosingVBar,
RadishScenarioOutlineExamplesMissingOpeningVBar,
RadishScenarioOutlineWithoutExamples,
RadishStepDataTableMissingClosingVBar,
RadishStepDocStringNotClosed,
RadishStepDoesNotStartWithKeyword,
)
from radish.parser.transformer import RadishGherkinTransformer, Transformer
class LanguageSpec:
"""Represents a gherkin language specification"""
def __init__(self, code, keywords):
self.code = code
self.keywords = keywords
self.first_level_step_keywords = {
keywords["Given"],
keywords["When"],
keywords["Then"],
}
def __str__(self):
return self.code
def __call__(self, terminal):
try:
terminal.pattern.value = self.keywords[terminal.pattern.value]
except KeyError:
pass
class FeatureFileParser:
"""Radish Feature File Parser responsible to parse a single Feature File"""
def __init__(
self,
grammerfile: Path = None,
ast_transformer: Transformer = RadishGherkinTransformer,
resolve_preconditions: bool = True,
) -> None:
if grammerfile is None:
grammerfile = Path(__file__).parent / "grammer.g"
self.grammerfile = grammerfile
self.resolve_preconditions = resolve_preconditions
if ast_transformer is not None:
self._transformer = ast_transformer()
else:
self._transformer = None
self._current_feature_id = 1
self._parsers = {}
def _get_parser(self, language_spec):
"""Get a parser and lazy create it if necessary"""
try:
return self._parsers[language_spec.code]
except KeyError:
parser = Lark.open(
str(self.grammerfile),
parser="lalr",
transformer=self._transformer,
edit_terminals=language_spec,
)
self._parsers[language_spec.code] = parser
return parser
def parse(self, featurefile: Path):
"""Parse the given Feature File"""
ast = self.parse_file(featurefile, self._current_feature_id)
self._current_feature_id += 1
if self.resolve_preconditions:
self._resolve_preconditions(featurefile.parent, ast, {ast.path: ast})
return ast
def parse_file(self, featurefile: Path, feature_id: int = 0):
"""Parse the given Feature File using the parser"""
with open(str(featurefile), "r", encoding="utf-8") as featurefile_f:
contents = featurefile_f.read()
return self.parse_contents(featurefile, contents, feature_id)
def parse_contents(
self, featurefile_path: Path, featurefile_contents: str, feature_id: int = 0
):
# evaluate the language for the Feature File content
language_spec = self._detect_language(featurefile_contents)
# prepare the transformer for the Feature File
if self._transformer is not None:
self._transformer.prepare(
language_spec, featurefile_path, featurefile_contents, feature_id
)
# get a parser
parser = self._get_parser(language_spec)
try:
ast = parser.parse(featurefile_contents)
except UnexpectedInput as exc:
exc_class = exc.match_examples(
parser.parse,
{
RadishMissingFeatureShortDescription: RadishMissingFeatureShortDescription.examples, # noqa
RadishMissingRuleShortDescription: RadishMissingRuleShortDescription.examples,
RadishMissingScenarioShortDescription: RadishMissingScenarioShortDescription.examples, # noqa
RadishMisplacedBackground: RadishMisplacedBackground.examples,
RadishStepDoesNotStartWithKeyword: RadishStepDoesNotStartWithKeyword.examples,
RadishStepDocStringNotClosed: RadishStepDocStringNotClosed.examples,
RadishScenarioOutlineWithoutExamples: RadishScenarioOutlineWithoutExamples.examples, # noqa
RadishScenarioOutlineExamplesMissingOpeningVBar: RadishScenarioOutlineExamplesMissingOpeningVBar.examples, # noqa
RadishMultipleBackgrounds: RadishMultipleBackgrounds.examples,
RadishStepDataTableMissingClosingVBar: RadishStepDataTableMissingClosingVBar.examples, # noqa
RadishScenarioLoopMissingIterations: RadishScenarioLoopMissingIterations.examples, # noqa
RadishScenarioLoopInvalidIterationsValue: RadishScenarioLoopInvalidIterationsValue.examples, # noqa
RadishScenarioOutlineExamplesMissingClosingVBar: RadishScenarioOutlineExamplesMissingClosingVBar.examples, # noqa
RadishScenarioOutlineExamplesInconsistentCellCount: RadishScenarioOutlineExamplesInconsistentCellCount.examples, # noqa
},
)
if not exc_class:
raise
raise exc_class(exc.get_context(featurefile_contents), exc.line, exc.column)
return ast
def _detect_language(self, featurefile_contents: str):
"""Detect the specified language in the first line of the Feature File
If no language code is detected ``en`` is used.
If an unknown language code is detected an error is raised.
"""
def __get_language_spec(code):
language_spec_path = (
Path(__file__).parent / "languages" / "{}.json".format(code)
)
if not language_spec_path.exists():
raise RadishLanguageNotFound(code)
with open(
str(language_spec_path), "r", encoding="utf-8"
) as language_spec_file:
keywords = json.load(language_spec_file)
return LanguageSpec(code, keywords)
match = re.match(
r"^#\s*language:\s*(?P<code>[a-zA-Z-]{2,})", featurefile_contents.lstrip()
)
language_code = match.groupdict()["code"] if match else "en"
return __get_language_spec(language_code)
def _resolve_preconditions(self, features_rootdir, ast, visited_features):
for scenario in (s for rules in ast.rules for s in rules.scenarios):
preconditions = []
for precondition_tag in (
t for t in scenario.tags if isinstance(t, PreconditionTag)
):
precondition_path = features_rootdir / precondition_tag.feature_filename
if precondition_path not in visited_features:
precondition_ast = self.parse_file(precondition_path)
visited_features[precondition_ast.path] = precondition_ast
self._resolve_preconditions(
features_rootdir, precondition_ast, visited_features
)
else:
precondition_ast = visited_features[precondition_path]
precondition_scenarios = (
s for rules in precondition_ast.rules for s in rules.scenarios
)
for precondition_scenario in precondition_scenarios:
if (
precondition_scenario.short_description
== precondition_tag.scenario_short_description
):
break
else:
raise RadishPreconditionScenarioDoesNotExist(
precondition_path,
precondition_tag.scenario_short_description,
(
s.short_description
for rules in ast.rules
for s in rules.scenarios
),
)
# check if the precondition leads to a recursion
if (
precondition_scenario in preconditions
or precondition_scenario == scenario
):
raise RadishPreconditionScenarioRecursion(
scenario, precondition_scenario
)
preconditions.append(precondition_scenario)
# assign preconditions
scenario.set_preconditions(preconditions)
|
benchmarks/compare.py | novak2000/n2 | 528 | 11100205 | <gh_stars>100-1000
import os
import re
import argparse
from collections import defaultdict
def parse(args):
data = defaultdict(dict)
for line in open(args.fname):
library, algo, _, search_elapsed, accuracy, _ = line.strip().split('\t')
data[library.split(' ')[0]][algo] = float(search_elapsed), float(accuracy)
return data[args.base_lib], data[args.target_lib]
def compare(base, target):
def natural_sort(l):
def alphanum_key(key):
def convert(text):
return int(text) if text.isdigit() else text.lower()
return [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
print('algo\tsearch_elapsed_seconds(negative values are better)\taccuracy(positive values are better)')
for key in natural_sort(target.keys()):
if key in base:
print('%s\t%s' % (key, '\t'.join(str(round((z[0] - z[1]) * 100 / z[1], 2)) + ' %'
for z in zip(target[key], base[key]))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--base_lib', help='base library to compare')
parser.add_argument('--target_lib', help='target library to compare')
parser.add_argument('fname', help='result file path')
args = parser.parse_args()
if not os.path.exists(args.fname):
raise ValueError("Wrong result file path")
compare(*parse(args))
|
examples/monitoring/init_grafana.py | DNCoelho/clipper | 1,403 | 11100260 | import signal
import requests
import docker
import json
import time
import sys
def signal_handler(signal, frame):
print("Stopping Grafana...")
docker_client = client.from_env()
try:
grafana = [
c for c in docker_client.containers.list()
if c.attrs['Config']['Image'] == "grafana/grafana"
][0]
grafana.stop()
except Exception as e:
pass
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
print("(1/3) Initializing Grafana")
client = docker.from_env()
container = client.containers.run(
"grafana/grafana:latest", ports={'3000/tcp': 3000}, detach=True)
print("(2/3) Grafana Initialized")
time.sleep(3)
with open('Clipper_DataSource.json', 'r') as f:
datasource = json.load(f)
requests.post(
'http://admin:admin@localhost:3000/api/datasources', data=datasource)
print('(3/3) Clipper Data Source Added')
print(
'Please login to http://localhost:3000 using username and password "admin"'
)
print('''
After Login, Click "Home" -> "Import Dashboard" -> "Upload json File" -> "Clipper_Dashboard.json"
''')
while True:
time.sleep(1)
|
examples/embeddings_extraction_s3e_pooling.py | skiran252/FARM | 1,551 | 11100284 | import logging
import pickle
from pathlib import Path
from farm.data_handler.processor import InferenceProcessor
from farm.infer import Inferencer
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.language_model import LanguageModel
from farm.modeling.tokenization import Tokenizer
from farm.utils import set_all_seeds, initialize_device_settings
from farm.modeling.wordembedding_utils import fit_s3e_on_corpus
logger = logging.getLogger(__name__)
"""
Example for generating sentence embeddings via the S3E pooling approach as described by Wang et al in the paper
"Efficient Sentence Embedding via Semantic Subspace Analysis"
(https://arxiv.org/abs/2002.09620)
You can use classical models like fasttext, glove or word2vec and apply S3E on top.
This can be a powerful benchmark for plain transformer-based embeddings.
First, we fit the required stats on a custom corpus. This includes the derivation of token_weights depending on
token occurences in the corpus, creation of the semantic clusters via k-means and a couple of
pre-/post-processing steps to normalize the embeddings.
Second, we feed the resulting objects into our Inferencer to extract the actual sentence embeddings for our sentences.
"""
def fit(language_model, corpus_path, save_dir, do_lower_case, batch_size=4, use_gpu=False):
# Fit S3E on a corpus
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=use_gpu, use_amp=False)
# Create a InferenceProcessor
tokenizer = Tokenizer.load(pretrained_model_name_or_path=language_model, do_lower_case=do_lower_case)
processor = InferenceProcessor(tokenizer=tokenizer, max_seq_len=128)
# Create an AdaptiveModel
language_model = LanguageModel.load(language_model)
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[],
embeds_dropout_prob=0.1,
lm_output_types=["per_sequence"],
device=device)
model, processor, s3e_stats = fit_s3e_on_corpus(processor=processor,
model=model,
corpus=corpus_path,
n_clusters=10,
pca_n_components=300,
svd_postprocessing=True,
min_token_occurrences=1)
# save everything to allow inference without fitting everything again
model.save(save_dir)
processor.save(save_dir)
with open(save_dir / "s3e_stats.pkl", "wb") as f:
pickle.dump(s3e_stats, f)
# Load model, tokenizer and processor directly into Inferencer
inferencer = Inferencer(model=model, processor=processor, task_type="embeddings", gpu=use_gpu,
batch_size=batch_size, extraction_strategy="s3e", extraction_layer=-1,
s3e_stats=s3e_stats)
# Input
basic_texts = [
{"text": "a man is walking on the street."},
{"text": "a woman is walking on the street."},
]
# Get embeddings for input text (you can vary the strategy and layer)
result = inferencer.inference_from_dicts(dicts=basic_texts)
print(result)
inferencer.close_multiprocessing_pool()
def extract_embeddings(load_dir, use_gpu, batch_size):
with open(load_dir / "s3e_stats.pkl", "rb") as f:
s3e_stats = pickle.load(f)
# Init inferencer
inferencer = Inferencer.load(model_name_or_path=load_dir, task_type="embeddings", gpu=use_gpu,
batch_size=batch_size, extraction_strategy="s3e", extraction_layer=-1,
s3e_stats=s3e_stats)
# Input
basic_texts = [
{"text": "a man is walking on the street."},
{"text": "a woman is walking on the street."},
]
# Get embeddings for input text
result = inferencer.inference_from_dicts(dicts=basic_texts)
print(result)
inferencer.close_multiprocessing_pool()
if __name__ == "__main__":
lang_model = "glove-english-uncased-6B"
do_lower_case = True
# You can download this from:
# "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-downstream/lm_finetune_nips.tar.gz"
corpus_path = Path("../data/lm_finetune_nips/train.txt")
s3e_dir = Path("../saved_models/fitted_s3e/")
fit(language_model=lang_model,
do_lower_case=do_lower_case,
corpus_path=corpus_path,
save_dir=s3e_dir
)
extract_embeddings(load_dir=s3e_dir, use_gpu=False, batch_size=10) |
AutoDL_sample_code_submission/Auto_Tabular/feature/feat_engine.py | dianjixz/AutoDL | 1,044 | 11100286 | <gh_stars>1000+
from .feat_gen import *
from sklearn.utils import shuffle
from Auto_Tabular.utils.log_utils import log ,timeit
class FeatEngine:
def __init__(self):
self.order2s = []
def fit(self, data_space, order):
if order != 2:
return
order_name = 'order{}s'.format(order)
pipline = getattr(self, order_name)
self.feats = []
for feat_cls in pipline:
feat = feat_cls()
feat.fit(data_space)
self.feats.append(feat)
def transform(self, data_space, order):
for feat in self.feats:
feat.transform(data_space)
@timeit
def fit_transform(self, data_space, order, info=None):
if order != 2:
return
order_name = 'order{}s'.format(order)
pipline = getattr(self, order_name)
X, y = data_space.data, data_space.y
cats = data_space.cat_cols
for feat_cls in pipline:
feat = feat_cls()
feat.fit_transform(X, y, cat_cols=cats, num_cols=info['imp_nums'])
data_space.data = X
data_space.update = True
|
pythainlp/tokenize/sefr_cut.py | Gorlph/pythainlp | 569 | 11100287 | # -*- coding: utf-8 -*-
"""
Wrapper for SEFR CUT Thai word segmentation. SEFR CUT is a
Thai Word Segmentation Models using Stacked Ensemble.
:See Also:
* `GitHub repository <https://github.com/mrpeerat/SEFR_CUT>`_
"""
from typing import List
import sefr_cut
DEFAULT_ENGINE = 'ws1000'
sefr_cut.load_model(engine=DEFAULT_ENGINE)
def segment(text: str, engine: str = 'ws1000') -> List[str]:
global DEFAULT_ENGINE
if not text or not isinstance(text, str):
return []
if engine != DEFAULT_ENGINE:
DEFAULT_ENGINE = engine
sefr_cut.load_model(engine=DEFAULT_ENGINE)
return sefr_cut.tokenize(text)[0]
|
tests/test_concordance.py | baileythegreen/pyani | 144 | 11100324 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2016-2019
# (c) University of Strathclyde 2019-2020
# Author: <NAME>
#
# Contact:
# <EMAIL>
#
# <NAME>,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# 161 Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2016-2019 The James Hutton Institute
# Copyright (c) 2019-2020 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Test for concordance of pyani package output with JSpecies.
These tests are intended to be run from the repository root using:
pytest -v
print() statements will be caught by nosetests unless there is an
error. They can also be recovered with the -s option.
"""
import unittest
from pathlib import Path
import pandas as pd
import pytest
from pyani import run_multiprocessing as run_mp
from pyani import anib, anim, tetra, pyani_files
def parse_jspecies(infile):
"""Parse JSpecies output into Pandas dataframes.
The function expects a single file containing (legacy) ANIb,
ANIm, and TETRA output.
:param infile: path to JSpecies output file
This is an ugly function!
"""
dfs = dict()
methods = ("ANIm", "ANIb", "Tetra")
with open(infile, "r") as ifh:
header, in_table = False, False
for line in [_.strip() for _ in ifh.readlines() + ["\n"]]:
if line in methods and not in_table:
mth, header = line, True
elif header:
columns = line.split("\t")
data = pd.DataFrame(index=columns, columns=columns)
in_table, header = True, False
elif in_table:
if not line:
dfs[mth] = data.sort_index(axis=0).sort_index(axis=1)
in_table = False
else:
ldata = line.split("\t")
row = ldata[0]
for idx, val in enumerate(ldata[1:]):
if val != "---":
data[columns[idx]][row] = float(val)
elif mth.startswith("ANI"):
data[columns[idx]][row] = 100.0
else:
data[columns[idx]][row] = 1.0
else:
pass
return dfs
@pytest.fixture
def paths_concordance_fna(path_fixtures_base):
"""Paths to FASTA inputs for concordance analysis."""
return [
_
for _ in (path_fixtures_base / "concordance").iterdir()
if _.is_file() and _.suffix == ".fna"
]
@pytest.fixture
def path_concordance_jspecies(path_fixtures_base):
"""Path to JSpecies analysis output."""
return path_fixtures_base / "concordance/jspecies_output.tab"
@pytest.fixture
def threshold_anib_lo_hi():
"""Threshold for concordance comparison split between high and low identity.
When comparing ANIb results with ANIblastall results, we need to account for
the differing performances of BLASTN and BLASTN+ on more distantly-related
sequences. On closely-related sequences both methods give similar results;
for more distantly-related sequences, the results can be quite different. This
threshold is the percentage identity we consider to separate "close" from
"distant" related sequences.
"""
return 90
@pytest.fixture
def tolerance_anib_hi():
"""Tolerance for ANIb concordance comparisons.
This tolerance is for comparisons between "high identity" comparisons, i.e.
genomes having identity greater than threshold_anib_lo_hi in homologous regions.
These "within-species" level comparisons need to be more accurate
"""
return 0.1
@pytest.fixture
def tolerance_anib_lo():
"""Tolerance for ANIb concordance comparisons.
This tolerance is for comparisons between "low identity" comparisons, i.e.
genomes having identity less than threshold_anib_lo_hi in homologous regions.
These "intra-species" level comparisons vary more as a result of the change of
algorithm from BLASTN to BLASTN+ (megablast).
"""
return 5
@pytest.fixture
def tolerance_anim():
"""Tolerance for ANIm concordance comparisons."""
return 0.1
@pytest.fixture
def tolerance_tetra():
"""Tolerance for TETRA concordance comparisons."""
return 0.1
@pytest.mark.skip_if_exe_missing("nucmer")
def test_anim_concordance(
paths_concordance_fna, path_concordance_jspecies, tolerance_anim, tmp_path
):
"""Check ANIm results are concordant with JSpecies."""
# Perform ANIm on the input directory contents
# We have to separate nucmer/delta-filter command generation
# because Travis-CI doesn't play nicely with changes we made
# for local SGE/OGE integration.
# This might be avoidable with a scheduler flag passed to
# jobgroup generation in the anim.py module. That's a TODO.
ncmds, fcmds = anim.generate_nucmer_commands(paths_concordance_fna, tmp_path)
(tmp_path / "nucmer_output").mkdir(exist_ok=True, parents=True)
run_mp.multiprocessing_run(ncmds)
# delta-filter commands need to be treated with care for
# Travis-CI. Our cluster won't take redirection or semicolon
# separation in individual commands, but the wrapper we wrote
# for this (delta_filter_wrapper.py) can't be called under
# Travis-CI. So we must deconstruct the commands below
dfcmds = [
" > ".join([" ".join(fcmd.split()[1:-1]), fcmd.split()[-1]]) for fcmd in fcmds
]
run_mp.multiprocessing_run(dfcmds)
orglengths = pyani_files.get_sequence_lengths(paths_concordance_fna)
results = anim.process_deltadir(tmp_path / "nucmer_output", orglengths)
result_pid = results.percentage_identity
result_pid.to_csv(tmp_path / "pyani_anim.tab", sep="\t")
# Compare JSpecies output to results
result_pid = (result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0).values
tgt_pid = parse_jspecies(path_concordance_jspecies)["ANIm"].values
assert result_pid - tgt_pid == pytest.approx(0, abs=tolerance_anim)
@pytest.mark.skip_if_exe_missing("blastn")
def test_anib_concordance(
paths_concordance_fna,
path_concordance_jspecies,
tolerance_anib_hi,
tolerance_anib_lo,
threshold_anib_lo_hi,
fragment_length,
tmp_path,
):
"""Check ANIb results are concordant with JSpecies.
We expect ANIb results to be quite different, as the BLASTN
algorithm changed substantially between BLAST and BLAST+ (the
megaBLAST algorithm is now the default for BLASTN)
"""
# Get lengths of input genomes
orglengths = pyani_files.get_sequence_lengths(paths_concordance_fna)
# Build and run BLAST jobs
fragfiles, fraglengths = anib.fragment_fasta_files(
paths_concordance_fna, tmp_path, fragment_length
)
jobgraph = anib.make_job_graph(
paths_concordance_fna, fragfiles, anib.make_blastcmd_builder("ANIb", tmp_path)
)
assert 0 == run_mp.run_dependency_graph(jobgraph) # Jobs must run correctly
# Process BLAST output
result_pid = anib.process_blast(
tmp_path, orglengths, fraglengths, mode="ANIb"
).percentage_identity
# Compare JSpecies output to results. We do this in two blocks,
# masked according to whether the expected result is greater than
# a threshold separating "low" from "high" identity comparisons.
result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
lo_result = result_pid.mask(result_pid >= threshold_anib_lo_hi).fillna(0).values
hi_result = result_pid.mask(result_pid < threshold_anib_lo_hi).fillna(0).values
tgt_pid = parse_jspecies(path_concordance_jspecies)["ANIb"]
lo_target = tgt_pid.mask(tgt_pid >= threshold_anib_lo_hi).fillna(0).values
hi_target = tgt_pid.mask(tgt_pid < threshold_anib_lo_hi).fillna(0).values
assert (lo_result - lo_target, hi_result - hi_target) == (
pytest.approx(0, abs=tolerance_anib_lo),
pytest.approx(0, abs=tolerance_anib_hi),
)
@pytest.mark.skip_if_exe_missing("blastall")
def test_aniblastall_concordance(
paths_concordance_fna,
path_concordance_jspecies,
tolerance_anib_hi,
fragment_length,
tmp_path,
):
"""Check ANIblastall results are concordant with JSpecies."""
# Get lengths of input genomes
orglengths = pyani_files.get_sequence_lengths(paths_concordance_fna)
# Perform ANIblastall on the input directory contents
fragfiles, fraglengths = anib.fragment_fasta_files(
paths_concordance_fna, tmp_path, fragment_length
)
jobgraph = anib.make_job_graph(
paths_concordance_fna,
fragfiles,
anib.make_blastcmd_builder("ANIblastall", tmp_path),
)
assert 0 == run_mp.run_dependency_graph(jobgraph) # Jobs must run correctly
# Process BLAST output
result_pid = anib.process_blast(
tmp_path, orglengths, fraglengths, mode="ANIblastall"
).percentage_identity
# Compare JSpecies output to results
result_pid = (result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0).values
tgt_pid = parse_jspecies(path_concordance_jspecies)["ANIb"].values
assert result_pid - tgt_pid == pytest.approx(0, abs=tolerance_anib_hi)
def test_tetra_concordance(
paths_concordance_fna, path_concordance_jspecies, tolerance_tetra, tmp_path
):
"""Check TETRA results are concordant with JSpecies."""
# Perform TETRA analysis
zscores = dict()
for filename in paths_concordance_fna:
zscores[filename.stem] = tetra.calculate_tetra_zscore(filename)
results = tetra.calculate_correlations(zscores).values
# Compare JSpecies output
tgt_mat = parse_jspecies(path_concordance_jspecies)["Tetra"].values
assert results - tgt_mat == pytest.approx(0, abs=tolerance_tetra)
|
geoq/ontology/models.py | kaydoh/geoq | 471 | 11100329 | from django.db import models
from django.urls import reverse_lazy
from django.contrib.gis import admin
import json
class Term(models.Model):
"""
Ontological Term
"""
TERM_TYPES = [("Object", "Object"), ("Property", "Property"), ("Relationship","Relationship")]
word = models.CharField(max_length=100, help_text="Value of term")
identifier = models.CharField(max_length=200, help_text="IRI Identifier")
type = models.CharField(max_length=30, choices=TERM_TYPES, default=TERM_TYPES[0])
def __unicode__(self):
return self.word
def __str__(self):
return self.__unicode__()
@property
def serialize(self):
return {"name": self.word, "identifier": self.identifier, "type": self.type}
class Vocabulary(models.Model):
"""
Model for ontology vocabulary.
"""
name = models.CharField(max_length=200, help_text="Name of Vocabulary")
terms = models.ManyToManyField(Term, related_name="entries")
def __unicode__(self):
return self.name
def __str__(self):
return self.name
@property
def toJson(self):
return json.dumps([t.serialize for t in self.terms.all()])
class Ontology(models.Model):
"""
Representation of an Ontology
"""
name = models.CharField(max_length=200, help_text="Ontology Name")
url = models.CharField(max_length=200, help_text="Location of ontology")
def __unicode__(self):
return self.name
def __str__(self):
return self.name
|
deep-rl/lib/python2.7/site-packages/OpenGL/GLX/NV/video_capture.py | ShujaKhalid/deep-rl | 210 | 11100331 | '''OpenGL extension NV.video_capture
This module customises the behaviour of the
OpenGL.raw.GLX.NV.video_capture to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a mechanism for streaming video data
directly into texture objects and buffer objects. Applications can
then display video streams in interactive 3D scenes and/or
manipulate the video data using the GL's image processing
capabilities.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/video_capture.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.NV.video_capture import *
from OpenGL.raw.GLX.NV.video_capture import _EXTENSION_NAME
def glInitVideoCaptureNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
px/px_file.py | walles/px | 149 | 11100367 | import socket
from . import px_exec_util
import sys
if sys.version_info.major >= 3:
# For mypy PEP-484 static typing validation
from typing import Set # NOQA
from typing import List # NOQA
from typing import Tuple # NOQA
from typing import Iterable # NOQA
from typing import Optional # NOQA
class PxFileBuilder:
def __init__(self):
self.fd = None # type: Optional[int]
self.pid = None # type: Optional[int]
self.name = None # type: Optional[str]
self.type = None # type: Optional[str]
self.inode = None # type: Optional[str]
self.device = None # type: Optional[str]
self.access = None # type: Optional[str]
# Example values: "cwd", "txt" and probably others as well
self.fdtype = None # type: Optional[str]
def build(self):
# type: () -> PxFile
assert self.pid is not None
assert self.type
pxFile = PxFile(self.pid, self.type)
pxFile.name = self.name
pxFile.fd = self.fd
pxFile.inode = self.inode
pxFile.device = self.device
pxFile.access = self.access
pxFile.fdtype = self.fdtype
return pxFile
def __repr__(self):
return "PxFileBuilder(pid={}, name={}, type={})".format(
self.pid, self.name, self.type
)
class PxFile(object):
def __init__(self, pid, filetype):
# type: (int, str) -> None
self.fd = None # type: Optional[int]
self.pid = pid
self.type = filetype
self.name = None # type: Optional[str]
self.inode = None # type: Optional[str]
self.device = None # type: Optional[str]
self.access = None # type: Optional[str]
# Example values: "cwd", "txt" and probably others as well
self.fdtype = None # type: Optional[str]
def __repr__(self):
# The point of implementing this method is to make the py.test output
# more readable.
return str(self.pid) + ":" + str(self)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.name, self.fd, self.fdtype, self.pid))
def __str__(self):
if self.type == "REG":
return self.name
name = self.name
listen_suffix = ""
if self.type in ["IPv4", "IPv6"]:
local, remote_endpoint = self.get_endpoints()
if not remote_endpoint:
listen_suffix = " (LISTEN)"
name = self._resolve_name()
# Decorate non-regular files with their type
if name:
return "[" + self.type + "] " + name + listen_suffix
return "[" + self.type + "] " + listen_suffix
def _resolve_name(self):
local, remote = self.get_endpoints()
if not local:
return self.name
local = resolve_endpoint(local)
if not remote:
return local
return local + "->" + resolve_endpoint(remote)
def device_number(self):
if self.device is None:
return None
number = int(self.device, 16)
if number == 0:
# 0x000lotsofmore000 is what we get on lsof 4.86 and Linux 4.2.0
# when lsof doesn't have root privileges
return None
return number
def fifo_id(self):
if self.inode is not None:
# On Linux, pipes are presented by lsof as FIFOs. They have unique-
# per-pipe inodes, so we use them as IDs.
return self.inode
if self.type == "FIFO" and self.name == "pipe":
# This is just a label that can be shared by several pipes on Linux,
# we can't use it to identify a pipe.
return None
if self.type == "PIPE" and self.name == "(none)":
# This is just a label that can be shared by several pipes on OS X,
# we can't use it to identify a pipe.
return None
# On OS X, pipes are presented as PIPEs and lack inodes, but they
# compensate by having unique names.
return self.name
def get_endpoints(self):
# type: () -> Tuple[Optional[str], Optional[str]]
"""
Returns a (local,remote) tuple. They represent the local and the remote
endpoints of a network connection.
This method will never return None, but both local and remote can be
None in case this isn't a network connection for example.
"""
if not self.name:
return (None, None)
if self.type not in ["IPv4", "IPv6"]:
return (None, None)
local = None
remote = None
split_name = self.name.split("->")
local = split_name[0]
# Turn "localhost:ipp (LISTEN)" into "ipp" and nothing else
local = local.split(" ")[0]
if "*" in local:
# We can't match against this endpoint
local = None
if len(split_name) == 2:
remote = split_name[1]
return (local, remote)
def resolve_endpoint(endpoint):
# type: (str) -> str
"""
Resolves "127.0.0.1:portnumber" into "localhost:portnumber".
"""
# Find the rightmost :, necessary for IPv6 addresses
splitindex = endpoint.rfind(":")
if splitindex == -1:
return endpoint
address = endpoint[0:splitindex]
if address[0] == "[" and address[-1] == "]":
# This is how lsof presents IPv6 addresses
address = address[1:-1]
port = endpoint[splitindex + 1 :]
host = None
try:
host = socket.gethostbyaddr(address)[0]
except Exception:
# Lookup failed for whatever reason, give up
return endpoint
if host == "localhost.localdomain":
# "localdomain" is just a long word that doesn't add any information
host = "localhost"
return host + ":" + port
def call_lsof():
"""
Call lsof and return the result as one big string
"""
# See OUTPUT FOR OTHER PROGRAMS: http://linux.die.net/man/8/lsof
# Output lines can be in one of two formats:
# 1. "pPID@" (with @ meaning NUL)
# 2. "fFD@aACCESSMODE@tTYPE@nNAME@"
return px_exec_util.run(["lsof", "-n", "-F", "fnaptd0i"])
def lsof_to_files(lsof):
# type: (str) -> List[PxFile]
"""
Convert lsof output into a files array.
"""
pid = None
file_builder = None # type: Optional[PxFileBuilder]
files = [] # type: List[PxFile]
for shard in lsof.split("\0"):
if shard[0] == "\n":
# Some shards start with newlines. Looks pretty when viewing the
# lsof output in moar, but makes the parsing code have to deal with
# it.
shard = shard[1:]
if not shard:
# The output ends with a single newline, which we just stripped away
break
infotype = shard[0]
value = shard[1:]
if infotype == "p":
pid = int(value)
elif infotype == "f":
if file_builder:
files.append(file_builder.build())
else:
file_builder = PxFileBuilder()
# Reset the file builder object. This operation is on a hot path
# and doing without an extra object allocation here actually helps.
file_builder.__init__() # type: ignore
if value.isdigit():
file_builder.fd = int(value)
else:
# Words like "cwd", "txt" and probably others as well
file_builder.fdtype = value
assert pid is not None
file_builder.pid = pid
file_builder.type = "??"
elif infotype == "a":
assert file_builder is not None
access = {" ": None, "r": "r", "w": "w", "u": "rw"}[value]
file_builder.access = access
elif infotype == "t":
assert file_builder is not None
file_builder.type = value
elif infotype == "d":
assert file_builder is not None
file_builder.device = value
elif infotype == "n":
assert file_builder is not None
file_builder.name = value
elif infotype == "i":
assert file_builder is not None
file_builder.inode = value
else:
raise Exception(
"Unhandled type <{}> for shard <{}>".format(infotype, shard)
)
if file_builder:
# Don't forget the last file
files.append(file_builder.build())
return files
def get_all():
# type: () -> Set[PxFile]
"""
Get all files.
If a file_types array is specified, only files of the listed types will be
returned.
"""
return set(lsof_to_files(call_lsof()))
|
leaf/api/wrapper.py | guiqiqi/leaf | 119 | 11100374 | """API 函数的包装器"""
import sys
import json
import types
import logging
import datetime
import ipaddress
import traceback
from typing import Callable, Type, Dict,\
NoReturn, Optional, Iterable, Any
import bson
import mongoengine
from flask import g as _g
from flask import abort as _abort
from flask import request as _request
from flask import Response as _Response
from werkzeug.exceptions import HTTPException
from . import settings
from . import validator
from .. import rbac
from ..core import error
from ..core import modules
logger = logging.getLogger("leaf.api")
class __TypeConverter:
"""类型转换注册器"""
def __init__(self):
"""初始化一个转换注册表"""
self.__default = str # 默认转换器
self.__converters: Dict[Type, Callable[[Type], object]] = dict()
def set_default(self, default: Callable[[Type], object]) -> NoReturn:
"""设置默认转换器"""
self.__default = default
def register(self, typing: Type, _converter: Callable[[Type], object]):
"""注册一个转换器"""
self.__converters[typing] = _converter
def convert(self, obj: object, default: Optional[bool] = False) -> object:
"""
按照注册的顺序进行类型转换
当传入 default 参数为 true 时调用设置的默认转换器
"""
for _type, _converter in self.__converters.items():
if isinstance(obj, _type):
return _converter(obj)
if not default:
raise KeyError("can not find converter for type %s" %
str(type(obj)))
return self.__default(obj)
# 生成类型转换器实例
converter = __TypeConverter()
converter.set_default(str)
converter.register(bson.ObjectId, str)
converter.register(datetime.datetime, lambda obj: obj.isoformat())
converter.register(datetime.time, lambda obj: obj.isoformat)
converter.register(Exception, str)
converter.register(mongoengine.QuerySet, list)
converter.register(mongoengine.Document, lambda obj: obj.to_json())
converter.register(mongoengine.EmbeddedDocument, lambda obj: obj.to_json())
class Encoder(json.JSONEncoder):
"""
重写 json.JSONEncoder.default 函数
在其中加入自定义的类型转换
"""
# pylint: disable=method-hidden
# pylint: disable=arguments-differ
def default(self, obj):
"""设置类型转换"""
try:
return converter.convert(obj)
except KeyError as _error:
pass
return json.JSONEncoder.default(self, obj)
types.MethodType(default, json.JSONEncoder)
def jsonify(*args, **kwargs):
"""返回自定义格式的 JSON 数据包"""
response = _Response(
json.dumps(dict(*args, **kwargs), cls=Encoder),
mimetype="application/json")
# 增加跨域请求支持
if settings.HTTPResponseHeader.AddCORSSupport:
methods = settings.HTTPResponseHeader.SupportMethods
domain = settings.HTTPResponseHeader.CORSDomain
response.headers.add("Access-Control-Allow-Origin", domain)
response.headers.add("Access-Control-Allow-Methods", methods)
return response
def iplimit(allowed: Iterable[str]) -> Callable:
"""
一个 API 接口访问限制 (基于 IP 地址)
如果不在允许的 IP 列表范围内则返回 403:
@api.iplimit("127.0.0.1") -> only localhost
@api.iplimit("192.168.1.0/24") -> 192.168.1.1-255
@api.iplimit("0.0.0.0/0") -> all ip address
"""
def decorator(function: Callable) -> Callable:
"""函数包装器"""
networks = [ipaddress.ip_network(addr) for addr in allowed]
def wrapper(*args, **kwargs) -> object:
"""参数包装器"""
# 获取地址
address = ipaddress.ip_address(_request.remote_addr)
# 判断地址
for network in networks:
if address in network:
return function(*args, **kwargs)
return _abort(403)
# 重命名函数防止 overwriting existing endpoint function
wrapper.__name__ = function.__name__
return wrapper
return decorator
def require(pointname: str, checkuser: bool = False) -> Callable:
"""
一个权限验证装饰器:
pointname: 需要的权限点名称
checkuser: 是否获取用户给视图层处理:
0. 如果未启用按照正常的权限验证流程处理(即权限不足时403)
1. 如果启用了在权限验证不足时会:
0. 设置 g.operator: str - 通过数据库查询的用户信息
1. 设置 g.checkuser: bool = True
1. 将权限交予视图层函数处理
0. 通过 flask 获取 Bearer-Token 并验证用户的 JWT Token 是否合法
1. 通过 accesspoint 查询数据库所需要的访问级别
2. 如果可以访问返回函数值否则返回 403
"""
def decorator(function: Callable) -> Callable:
"""函数包装器"""
def wrapper(*args, **kwargs) -> Any:
"""参数包装器"""
# 检查是否启用开发模式的 JWT Token
_devmode = modules.server.debug
_devtoken = modules.server.devjwt
_gettoken = _request.headers.get("Authorization", str())
if _devmode and _devtoken == _gettoken:
return function(*args, **kwargs)
# 验证 token 是否正确
try:
payload: dict = validator.jwttoken(_gettoken)
except error.Error as _error:
logger.warning(_error)
return settings.Authorization.UnAuthorized(_error)
_g.checkuser: bool = False
_g.operator = payload.get(rbac.jwt.const.Payload.Audience)
# 检查用户权限是否符合要求
try:
diff: int = validator.permission(pointname, payload)
except bson.errors.InvalidId as _error:
logger.warning(_error)
return settings.Authorization.UnAuthorized(_error)
except rbac.error.AccessPointNotFound as _error:
logger.warning(_error)
if not settings.Authorization.ExecuteAPMissing:
return settings.Authorization.NotPermitted(_error)
return function(*args, **kwargs)
else:
# 如果权限符合 - 直接返回
# 如果权限不符合但是需要检查userid - 同样返回
if not diff or checkuser:
_g.checkuser = checkuser
return function(*args, **kwargs)
# 如果不需要手动检查userid, 权限也不正确则返回403
return settings.Authorization.NotPermitted(str(diff))
# 重命名函数防止 overwriting existing endpoint function
wrapper.__name__ = function.__name__
return wrapper
return decorator
def wrap(payload: str) -> Callable:
"""
一个API接口装饰器:
执行函数获取返回值
判断函数执行是否发生错误
如果发生错误则返回错误信息
payload - 要在数据包内放置数据的键
"""
def decorator(function: Callable) -> Callable:
"""函数包装器"""
def wrapper(*args, **kwargs) -> object:
"""参数包装器"""
response = dict()
# 尝试执行函数
try:
result = function(*args, **kwargs)
except error.Error as _error:
# 发生内部错误
response[settings.Response.Code] = _error.code
response[settings.Response.Message] = _error.message()
response[settings.Response.Description] = _error.description
logger.error(_error)
except HTTPException as _error:
# 主动抛出 HTTP 错误
return _error
# pylint: disable=broad-except
except Exception as _error:
# 发生未定义错误
code = settings.Response.Codes.Unknown
description = settings.Response.Descriptions.Unknown
response[settings.Response.Code] = code
response[settings.Response.Message] = str(_error)
response[settings.Response.Description] = description
# 保存堆栈信息
exenvior = sys.exc_info()
exstr = traceback.format_exception(*exenvior)
exstr = ''.join(exstr)
logger.error(exstr)
else:
# 未发生错误时 - 如果是 Response 类 直接返回
if isinstance(response, _Response):
return response
# 如果是普通数据
response[settings.Response.Code] = settings.Response.Codes.Success
response[settings.Response.Message] = settings.Response.Messages.Success
response[settings.Response.Description] = settings.Response.Descriptions.Success
response[payload] = result
return jsonify(response)
# 重命名函数防止 overwriting existing endpoint function
wrapper.__name__ = function.__name__
return wrapper
return decorator
|
script/gen_solver_table.py | carlosal1015/sfepy | 510 | 11100380 | <filename>script/gen_solver_table.py<gh_stars>100-1000
#!/usr/bin/env python
"""
Generate available solvers table for ReST documentation.
"""
from __future__ import absolute_import
import os.path as op
import sys
from argparse import ArgumentParser
sys.path.append('.')
import sfepy
from sfepy.base.base import load_classes
from sfepy.solvers import NonlinearSolver, TimeSteppingSolver, LinearSolver, \
EigenvalueSolver, QuadraticEVPSolver, OptimizationSolver
from sfepy.solvers.auto_fallback import AutoFallbackSolver
solver_by_type_table = [
[[AutoFallbackSolver], "Virtual Solvers with Automatic Fallback"],
[[TimeSteppingSolver], "Time-Stepping Solvers"],
[[NonlinearSolver], "Nonlinear Solvers"],
[[LinearSolver], "Linear Solvers"],
[[EigenvalueSolver], "Eigenvalue Problem Solvers"],
[[QuadraticEVPSolver], "Quadratic Eigenvalue Problem Solvers"],
[[OptimizationSolver], "Optimization Solvers"]
]
for i in enumerate(solver_by_type_table):
solver_by_type_table[i[0]][0] = \
load_classes(sfepy.solvers.solver_files,
solver_by_type_table[i[0]][0],
package_name='sfepy.solvers')
def trim(docstring):
"""Trim and split (doc)string."""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a splitted string:
return trimmed
def typeset_solvers_table(fd, solver_table):
"""
Generate solvers table ReST output.
"""
rest_tag_start = '.. <%s>\n'
rest_tag_end = '.. </%s>\n'
for solver_type in solver_table:
fd.write(rest_tag_start % solver_type[1])
for name, cls in sorted(solver_type[0].items()):
fd.write('- :class:`%s <%s.%s>`: ' %
(name, cls.__module__, cls.__name__))
fd.write('%s\n' % trim(cls.__doc__)[0])
fd.write(rest_tag_end % solver_type[1])
fd.write('\n')
def typeset(fd):
"""
Utility function called by Sphinx.
"""
fd = open(fd, 'w')
typeset_solvers_table(fd, solver_by_type_table)
fd.close()
def gen_solver_table(app):
typeset(op.join(app.builder.srcdir, 'solver_table.rst'))
def setup(app):
app.connect('builder-inited', gen_solver_table)
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s " + sfepy.__version__
)
parser.add_argument(
"-o",
"--output",
metavar="output_filename",
action="store",
dest="output_filename",
default="solver_table.rst",
)
options = parser.parse_args()
typeset(options.output_filename)
if __name__ == '__main__':
main()
|
osf/migrations/0075_merge_20171207_1511.py | gaybro8777/osf.io | 628 | 11100388 | <reponame>gaybro8777/osf.io<filename>osf/migrations/0075_merge_20171207_1511.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 21:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0074_parse_citation_styles'),
('osf', '0074_auto_20171207_1331'),
]
operations = [
]
|
ck/repo/module/choice/module.py | santosh653/ck | 480 | 11100405 | #
# Collective Knowledge (deal with choices)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: <NAME>, <EMAIL>, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# Make next multi-dimensional choice (with state)
def make(i):
"""
Input: {
choices_desc - dict with description of choices (flat format)
choices_order - list of list of flat choice vectors to tune [[],[],...] -
list of list is needed to be able to enable indedepent
selection of groups of choices. For example, iterate
over all possible data set sizes + random flags per data set
choices_selection - list of dicts with types of selection for each above group
choices_current - current vector of choices
(random_module) - if !=None, random module with seed
(pipeline) - if set, update it with current choices
(custom_explore) - enforce exploration params from command line
(all) - if 'yes', select all
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
choices_current - list of updated choices
choices - dictionary of flat choices and values
choices_order - list of flat choices (to know order if need such as for LLVM opt)
pipeline - upated pipeline with choices
also choices and choices_order are added to pipeline
finish - if True, iterations are over
}
"""
from random import Random
o=i.get('out','')
my_random=i.get('random_module',None)
if my_random==None: my_random=Random()
finish=False
cdesc=i['choices_desc']
corder=i['choices_order']
csel=i['choices_selection']
ccur=i['choices_current']
pipeline=i.get('pipeline',{})
cexp=i.get('custom_explore',{})
cd=len(corder)
al=i.get('all','')
# Init current choices
if len(ccur)==0:
for c in range(0, cd):
cx=corder[c]
cy=[]
for k in range(0,len(cx)):
cy.append('')
ccur.append(cy)
update=False
nupdate=False # if force update next
for cx in range(cd-1,-1,-1):
cc=corder[cx]
dc=ccur[cx]
t={}
if al=='yes':
tp='pre-selected'
elif cexp.get('type','')!='':
tp=cexp['type']
else:
t=csel[cx]
tp=t.get('type','')
ti=t.get('iterations','')
top=t.get('omit_probability','')
if cexp.get('omit_probability','')!='': top=cexp['omit_probability']
if top=='': top=0.0
else: top=float(top)
# Take descriptions first directly from choices_selection.
# Later will be taken from choice_desc, if exists
zchoice=t.get('choice',[])
zprefix=t.get('explore_prefix','')
zdefault=t.get('default','')
zcanomit=t.get('can_omit','')
zestart=t.get('start','')
if cexp.get('start','')!='': zestart=cexp['start']
zestop=t.get('stop','')
if cexp.get('stop','')!='': zestop=cexp['stop']
zestep=t.get('step','')
if cexp.get('step','')!='': zestep=cexp['step']
if tp=='': tp='random'
ci=t.get('cur_iter','')
if ci=='': ci=-1
if cx==(cd-1) or update or nupdate or ci==-1:
nupdate=False
ci+=1
if ti!='' and ci>=ti:
ci=0
update=True
else:
update=False
dvsame=''
xupdate=False
for c in range(len(cc)-1,-1,-1):
cn=cc[c]
qt=cdesc.get(cn,{})
if zcanomit!='': yco=zcanomit
else: yco=qt.get('can_omit','')
if len(zchoice)>0: yhc=zchoice
else:
yhc=qt.get('choice',[])
if len(yhc)==0:
yhc=qt.get('choices',[])
if zprefix!='': yep=zprefix
else: yep=qt.get('explore_prefix','')
if tp!='': ytp=t.get('subtype','')
else: ytp=qt.get('type','')
if zdefault!='': ydefault=zdefault
else: ydefault=qt.get('default','')
dcc=dc[c]
if yep!='' and dcc.startswith(yep):
dcc=dcc[len(yep):]
if ytp=='float': dcc=float(dcc)
else: dcc=int(dcc)
if zestart!='': yestart=zestart
else: yestart=qt.get('explore_start','')
if zestop!='': yestop=zestop
else: yestop=qt.get('explore_stop','')
if zestep!='': yestep=zestep
else: yestep=qt.get('explore_step','')
if yestart!='':
if ytp=='float':
r1=float(yestart)
r2=float(yestop)
rs=float(yestep)
else:
r1=int(yestart)
r2=int(yestop)
rs=int(yestep)
rx=(r2-r1+1)/rs
dv=ydefault
# If exploration, set first
# if tp=='parallel-loop' or tp=='loop':
if yestart!='':
dv=r1
elif len(yhc)>0:
dv=yhc[0]
if tp=='pre-selected':
dv=dcc
elif ci!=0 or (tp=='random' or tp=='random-with-next'):
lcqx=len(yhc)
if tp=='random' or tp=='random-with-next':
omit=False
if yco=='yes':
x=my_random.randrange(0, 1000)
if x<(1000.0*top):
omit=True
if omit:
dv=''
else:
if lcqx>0:
ln=my_random.randrange(0, lcqx)
dv=yhc[ln]
elif yestart!='':
if (type(rx)==float or type(rx)==int or type(rx)==ck.type_long) and rx>=1:
y=my_random.randrange(0,int(rx))
else:
# alternatively should print inconsistency
y=0
dv=r1+(y*rs)
if tp=='random-with-next':
nupdate=True
elif tp=='parallel-random': # Change all dimensions at the same time (if explorable)!
lcqx=len(yhc)
if dvsame=='':
if lcqx>0:
ln=my_random.randrange(0, lcqx)
dvsame=yhc[ln]
elif yestart!='':
if (type(rx)==float or type(rx)==int or type(rx)==ck.type_long) and rx>=1:
y=my_random.randrange(0,int(rx))
else:
# alternatively should print inconsistency
y=0
dvsame=r1+(y*rs)
dv=dvsame
elif tp=='parallel-loop' or tp=='loop' or tp=='loop-with-next' or tp=='parallel-loop-with-next':
dv=dcc
if (tp=='parallel-loop' or tp=='parallel-loop-with-next') or c==len(cc)-1 or xupdate:
if yestart!='':
dv=dcc+rs
if dv>r2:
dv=r1
if tp=='loop': xupdate=True
else:
ci=0
update=True
else:
xupdate=False
else:
# Here we process choices instead of ranges)
ln=t.get('tmp_choice_position',0)
if dv=='':
ln=0
else:
if tp=='loop-with-next' or tp=='parallel-loop-with-next':
ln+=1
else:
if dv in yhc:
ln=yhc.index(dv)
ln+=1
else:
ln=0
t['tmp_choice_position']=ln
if ln<lcqx:
dv=yhc[ln]
xupdate=False
else:
# Next is wrong, but check compatibility with previous cases!
# dv=ydefault
dv=yhc[0]
if tp=='loop': xupdate=True
else:
ci=0
update=True
if tp=='loop-with-next' or tp=='parallel-loop-with-next':
nupdate=True
# Machine learning based probabilistic adaptive sampling of multi-dimensional
# design and optimization speaces via external plugin
# See our work on Collective Mind (2014/2015)
#
# NOTE: moved to external customized autotuner plugins (see autotune pipeline --custom_autotuner)
# elif tp=='machine-learning-based' or tp=='model-based' or tp=='adaptive' or tp=='plugin-based' or tp=='customized':
else:
return {'return':1, 'error':'unknown autotuning type ('+tp+')'}
if yep!='' and dv!='': dv=yep+str(dv)
dc[c]=dv
if xupdate:
update=True
t['cur_iter']=ci
corder1=[]
ccur1={}
if update: # means that all loops were updated
finish=True
else:
if o=='con':
ck.out('')
ck.out(' Vector of flattened and updated choices:')
ll=0
prt=[]
for q in range(0, len(corder)):
qq=corder[q]
vq=ccur[q]
for q1 in range(0, len(qq)):
qq1=qq[q1]
vq1=vq[q1]
corder1.append(qq1)
ccur1[qq1]=vq1
if o=='con':
if vq1!='':
if len(qq1)>ll: ll=len(qq1)
prt.append({'k':qq1, 'v':vq1})
rx=ck.set_by_flat_key({'dict':pipeline, 'key':qq1, 'value':vq1})
if rx['return']>0: return rx
pipeline=rx['dict']
# Flatten choices and values, and add to pipeline
# Useful if order of choices is important (say opt flags in LLVM)
# Will be decoded by a given pipeline, if needed
pipeline['choices_order']=corder1
# pipeline['choices']=ccur1
if o=='con' and len(prt)>0:
for q in prt:
k=q['k']
v=q['v']
j=ll-len(k)
x=' '*j
ck.out(' '+k+x+' : '+str(v))
return {'return':0, 'choices_current':ccur, 'choices_order':corder1, 'choices':ccur1, 'pipeline':pipeline, 'finish':finish}
##############################################################################
# select list
def select_list(i):
"""
Input: {
choices - simple text list of choices
(skip_enter) - if 'yes', do not select 0 when entering 0
(desc) - description for each choices entry (list of the same size as choices)
(swap_name) - if 'yes' show desc first and real name in brackets
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
choice - selected text
position - selected position in the choice list
}
"""
se=i.get('skip_enter','')
sn=(i.get('swap_name','')=='yes')
lst=i.get('choices',[])
dsc=i.get('desc',[])
zz={}
iz=0
for iz in range(0, len(lst)):
z=lst[iz]
zs=str(iz)
zz[zs]=z
if iz<len(dsc):
zd=dsc[iz]
if zd!='':
if sn:
z=zd+' ('+z+')'
else:
z+=' ('+zd+')'
ck.out(zs+') '+z)
iz+=1
ck.out('')
y='Select item'
if se!='yes': y+=' (or press Enter for 0)'
y+=': '
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='' and se!='yes': x='0'
if x not in zz:
return {'return':1, 'error':'number is not recognized'}
dduoa=zz[x]
return {'return':0, 'choice':dduoa, 'position':x}
##############################################################################
# improved version of UOA selector over the version from CK kernel
##############################################################################
# Universal UOA selector
def select_uoa(i):
"""
Input: {
choices - list from search function
(skip_enter) - if 'yes', do not select 0 when user presses Enter
(skip_sort) - if 'yes', do not sort array
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
choice - data UOA
}
"""
se=i.get('skip_enter','')
lst=i.get('choices',[])
if i.get('skip_sort','')!='yes':
klst=sorted(lst, key=lambda v: v['data_uoa'])
else:
klst=lst
zz={}
iz=0
for z1 in klst:
z=z1['data_uid']
zu=z1['data_uoa']
zname=z1.get('info',{}).get('data_name','')
zs=str(iz)
zz[zs]=z
x=z
if zname!='' and zname!=zu: x=zname+', '+x
ck.out(zs+') '+zu+' ('+x+')')
iz+=1
ck.out('')
y='Select UOA'
if se!='yes': y+=' (or press Enter for 0)'
y+=': '
rx=ck.inp({'text':y})
x=rx['string'].strip()
if x=='' and se!='yes': x='0'
if x not in zz:
return {'return':1, 'error':'number is not recognized'}
dduoa=zz[x]
return {'return':0, 'choice':dduoa}
|
menpo/model/test/test_gmrf.py | apapaion/menpo | 311 | 11100406 | <reponame>apapaion/menpo<filename>menpo/model/test/test_gmrf.py<gh_stars>100-1000
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from menpo.shape import PointCloud, DirectedGraph, UndirectedGraph
from menpo.math import as_matrix
from .. import GMRFModel, GMRFVectorModel
def _compute_sum_cost_block_sparse(
samples, test_sample, graph, n_features_per_vertex, subtract_mean, mode
):
# create ndarray with data
data = as_matrix(samples, length=None, return_template=False)
# initialize cost
cost = 0.0
# for loop over the graph's edges
for e in graph.edges:
v1 = e[0]
v2 = e[1]
v1_from = v1 * n_features_per_vertex
v1_to = (v1 + 1) * n_features_per_vertex
v2_from = v2 * n_features_per_vertex
v2_to = (v2 + 1) * n_features_per_vertex
# slice data and test vector
y = test_sample.as_vector()
if mode == "concatenation":
x = np.hstack((data[:, v1_from:v1_to], data[:, v2_from:v2_to]))
y = np.hstack((y[v1_from:v1_to], y[v2_from:v2_to]))
else:
x = data[:, v1_from:v1_to] - data[:, v2_from:v2_to]
y = y[v1_from:v1_to] - y[v2_from:v2_to]
# compute mean and covariance
cov = np.linalg.inv(np.cov(x.T))
mean = np.mean(x, axis=0)
# compute and sum cost
if subtract_mean:
v = y - mean
else:
v = y
cost += v.dot(cov).T.dot(v)
return cost
def _compute_sum_cost_block_diagonal(
samples, test_sample, graph, n_features_per_vertex, subtract_mean
):
# create ndarray with data
data = as_matrix(samples, length=None, return_template=False)
# initialize cost
cost = 0.0
# for loop over the graph's edges
for v1 in graph.vertices:
v1_from = v1 * n_features_per_vertex
v1_to = (v1 + 1) * n_features_per_vertex
# slice data and test vector
y = test_sample.as_vector()
x = data[:, v1_from:v1_to]
y = y[v1_from:v1_to]
# compute mean and covariance
cov = np.linalg.inv(np.cov(x.T))
mean = np.mean(x, axis=0)
# compute and sum cost
if subtract_mean:
v = y - mean
else:
v = y
cost += v.dot(cov).T.dot(v)
return cost
def test_mahalanobis_distance():
# arguments values
mode_values = ["concatenation", "subtraction"]
n_features_per_vertex_values = [2, 3]
sparse_values = [True, False]
subtract_mean_values = [True, False]
n_components_values = [None, 30]
# create graph
n_vertices = 6
edges = np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 0]])
graphs = [
DirectedGraph.init_from_edges(edges, n_vertices),
UndirectedGraph(np.zeros((n_vertices, n_vertices))),
]
for n_features_per_vertex in n_features_per_vertex_values:
# create samples
n_samples = 50
samples = []
for i in range(n_samples):
samples.append(
PointCloud(np.random.rand(n_vertices, n_features_per_vertex))
)
test_sample = PointCloud(np.random.rand(n_vertices, n_features_per_vertex))
for graph in graphs:
for mode in mode_values:
for sparse in sparse_values:
for n_components in n_components_values:
# train GMRF
gmrf = GMRFModel(
samples,
graph,
mode=mode,
sparse=sparse,
n_components=n_components,
dtype=np.float64,
)
for subtract_mean in subtract_mean_values:
# compute costs
if graph.n_edges == 0:
cost1 = _compute_sum_cost_block_diagonal(
samples,
test_sample,
graph,
n_features_per_vertex,
subtract_mean,
)
else:
cost1 = _compute_sum_cost_block_sparse(
samples,
test_sample,
graph,
n_features_per_vertex,
subtract_mean,
mode,
)
cost2 = gmrf.mahalanobis_distance(
test_sample, subtract_mean=subtract_mean
)
assert_almost_equal(cost1, cost2)
def test_increment():
# arguments values
mode_values = ["concatenation", "subtraction"]
n_features_per_vertex_values = [2, 3]
sparse_values = [True, False]
n_components_values = [None, 30]
# create graph
n_vertices = 6
edges = np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 0]])
graphs = [
DirectedGraph.init_from_edges(edges, n_vertices),
UndirectedGraph(np.zeros((n_vertices, n_vertices))),
]
for n_features_per_vertex in n_features_per_vertex_values:
# create samples
n_samples = 100
samples = []
for i in range(n_samples):
samples.append(np.random.rand(n_vertices * n_features_per_vertex))
for graph in graphs:
for mode in mode_values:
for sparse in sparse_values:
for n_components in n_components_values:
# Incremental GMRF
gmrf1 = GMRFVectorModel(
samples[:50],
graph,
mode=mode,
sparse=sparse,
n_components=n_components,
dtype=np.float64,
incremental=True,
)
gmrf1.increment(samples[50::])
# Non incremental GMRF
gmrf2 = GMRFVectorModel(
samples,
graph,
mode=mode,
sparse=sparse,
n_components=n_components,
dtype=np.float64,
)
# Compare
if sparse:
assert_array_almost_equal(
gmrf1.precision.todense(), gmrf2.precision.todense()
)
assert_array_almost_equal(gmrf1.mean_vector, gmrf2.mean_vector)
|
plugin/lighthouse/ui/coverage_combobox.py | x9090/lighthouse | 1,741 | 11100439 | <filename>plugin/lighthouse/ui/coverage_combobox.py
import logging
import weakref
from lighthouse.util import *
from lighthouse.util.qt import *
from lighthouse.util.disassembler import disassembler
logger = logging.getLogger("Lighthouse.UI.ComboBox")
#------------------------------------------------------------------------------
# Constants Definitions
#------------------------------------------------------------------------------
SEPARATOR = "seperator"
SEPARATOR_HEIGHT = 1 # pixels
ENTRY_USER = "USER"
ENTRY_SPECIAL = "SPECIAL"
COLUMN_COVERAGE_STRING = 0
COLUMN_DELETE = 1
#------------------------------------------------------------------------------
# Coverage ComboBox
#------------------------------------------------------------------------------
class CoverageComboBox(QtWidgets.QComboBox):
"""
The Coverage ComboBox UI for switching between loaded coverage.
I had to write an unnecessary amount of code to prototype the engaging
combobox experiences I was looking for.
But now that we have all the important combobox components subclassed
out (it was necessary, I promise), perhaps there are a few more
interesting and fun features we can add in the future.
"""
def __init__(self, director, parent=None):
super(CoverageComboBox, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self._director = director
# configure the widget for use
self._ui_init()
self.refresh_theme()
#--------------------------------------------------------------------------
# QComboBox Overloads
#--------------------------------------------------------------------------
def mouseReleaseEvent(self, e):
"""
Capture mouse release events on the QComboBox.
"""
# get the widget currently beneath the mouse event being handled
hovering = self.childAt(e.pos())
#
# if the hovered widget is the 'head' of the QComboBox, we assume
# the user has clicked it and should show the dropwdown 'popup'
#
# we must showPopup() ourselves because internal Qt logic for
# 'editable' comboboxes try to enter an editing mode for the field
# rather than expanding the dropdown.
#
# if you don't remember, our combobox is marked 'editable' to satisfy
# some internal Qt logic so that our 'Windows' draw style is used
#
if hovering == self.lineEdit():
self.showPopup()
e.accept()
return
# handle any other events as they normally should be
super(CoverageComboBox, self).mousePressEvent(e)
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font.setPointSizeF(normalize_to_dpi(10))
self._font_metrics = QtGui.QFontMetricsF(self._font)
self.setFont(self._font)
# create the underlying model & table to power the combobox dropwdown
self.setModel(CoverageComboBoxModel(self._director, self))
self.setView(CoverageComboBoxView(self.model(), self))
#
# in the interest of maintaining a more consistent cross-platform
# style for the coverage combobox and its dropdown, we use an
# 'editable' QComboBox with the 'Windows' Qt style.
#
# since we don't actually want the QCombobox to be editable, we
# do everything we can to make it readonly / non-interfaceable.
#
self.setEditable(True)
self.lineEdit().setFont(self._font)
self.lineEdit().setReadOnly(True) # text can't be edited
self.lineEdit().setEnabled(False) # text can't be selected
#
# the combobox will pick a size based on its contents when it is first
# made visible, but we also make it is arbitrarily resizable for the
# user to change and play with at their own leisure
#
self.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
self.setMaximumHeight(self._font_metrics.height()*1.75)
# draw the QComboBox with a 'Windows'-esque style
self.setStyle(QtWidgets.QStyleFactory.create("Windows"))
# connect relevant signals
self._ui_init_signals()
def _ui_init_signals(self):
"""
Connect UI signals.
"""
# combobox selection was changed
self.activated.connect(self._ui_selection_changed)
# the 'X' / delete icon was clicked on a dropdown entry
self.view().clicked.connect(self._ui_clicked_delete)
# register for cues from the director
self._director.coverage_switched(self._internal_refresh)
self._director.coverage_modified(self._internal_refresh)
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def _ui_clicked_delete(self, index):
"""
Handle a click on the 'X' delete icon (cell) on a dropdown entry.
"""
if not index.isValid():
return
#
# the dropdown popup is actually a 2D table. column 0 is the detailed
# coverage string, where column '1' is actually the delete 'X' icon.
#
# this is a sanity check to ensure that the clicked index is actually
# the deletion column. It should not be possible for column 0 (the
# detail string) to pass through here, as that will be captured by
# the default combobox signal handlers.
#
# the reason the deletion column clicks can pass through is because
# the model has technically marked their cells as 'un-selectable'
# through the flags() overload.
#
assert index.column() == COLUMN_DELETE, "Unexpected Column (%u)" % index.column()
#
# using the table cell index that was clicked, we want to lookup the
# coverage name that this 'X' icon/cell is associated with.
#
# we retrieve the associated coverage name from the 'UserRole' field
# of the model using the clicked index. The 'UserRole' is a Qt field
# we are free to store developer/misc data in
#
coverage_name = self.model().data(index, QtCore.Qt.UserRole)
assert coverage_name
# pass the deletion request onto the director to delete said coverage
self._director.delete_coverage(coverage_name)
# refresh the dropdown (it will remove the deleted entry from the UI)
self.showPopup()
#
# I don't want there to be any entries highlighted after a deletion
# event, (it looks weird) so clear the table/dropdown highlights now
#
self.view().selectionModel().setCurrentIndex(
QtCore.QModelIndex(),
QtCore.QItemSelectionModel.ClearAndSelect
)
#
# the deletion of an entry will shift all the entries beneath it up
# by one. in this case, it is important we refresh the selection index
# to reflect the director so that it stays correct.
#
self._refresh_selection()
def _ui_selection_changed(self, row):
"""
Handle selection change of coverage combobox.
"""
# convert the combobox row index into a QModelIndex
index = self.model().index(row, 0)
# using the true index, lookup the coverage name for this selection
coverage_name = self.model().data(index, QtCore.Qt.UserRole)
# pass the user selection onto the director to change loaded coverage
self._director.select_coverage(coverage_name)
#--------------------------------------------------------------------------
# Refresh
#--------------------------------------------------------------------------
def refresh(self):
"""
Public refresh of the coverage combobox.
"""
self._internal_refresh()
@disassembler.execute_ui
def refresh_theme(self):
"""
Refresh UI facing elements to reflect the current theme.
"""
palette = self._director.palette
self.view().refresh_theme()
# configure the combobox's top row / visible dropdown
self.lineEdit().setStyleSheet(
"QLineEdit { "
" border: none;"
" padding: 0 0 0 2ex;"
" margin: 0;"
" background-color: %s;" % palette.combobox_background.name() +
"}"
)
# style the combobox dropdown
self.setStyleSheet(
"QComboBox {"
" color: %s;" % palette.combobox_text.name() +
" border: 1px solid %s;" % palette.combobox_border.name() +
" padding: 0;"
"} "
"QComboBox:hover, QComboBox:focus {"
" border: 1px solid %s;" % palette.combobox_border_focus.name() +
"}"
)
@disassembler.execute_ui
def _internal_refresh(self):
"""
Internal refresh of the coverage combobox.
"""
# refresh the comobobox internals
self.model().refresh()
self.view().refresh()
#
# now that the comobobox is fully up to date, select the item index
# that matches the active coverage as per the director
#
self._refresh_selection()
def _refresh_selection(self):
"""
Refresh the coverage combobox selection.
"""
# NOTE: we block any index change signals to stop unnecessary churn
self.blockSignals(True)
new_index = self.findData(self._director.coverage_name)
self.setCurrentIndex(new_index)
self.lineEdit().home(False)
self.blockSignals(False)
#------------------------------------------------------------------------------
# Coverage ComboBox - TableView
#------------------------------------------------------------------------------
class CoverageComboBoxView(QtWidgets.QTableView):
"""
The (internal) table view used for the Coverage ComboBox dropdown.
"""
def __init__(self, model, parent=None):
super(CoverageComboBoxView, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self._combobox = weakref.proxy(parent)
self._timer = None
# install the given data model into the table view
self.setModel(model)
# initialize UI elements
self._ui_init()
self.refresh_theme()
#--------------------------------------------------------------------------
# QTableView Overloads
#--------------------------------------------------------------------------
def showEvent(self, e):
"""
Show the QComboBox dropdown/popup.
"""
#
# the next line of code will prevent the combobox 'head' from getting
# any mouse actions now that the popup/dropdown is visible.
#
# this is pretty aggressive, but it will allow the user to 'collapse'
# the combobox dropdown while it is in an expanded state by simply
# clicking the combobox head as one can do to expand it.
#
# the reason this dirty trick is able to simulate a 'collapsing click'
# is because the user clicks 'outside' the popup/dropdown which
# automatically closes it. if the click was on the combobox head, it
# is simply ignored because we set this attribute!
#
# when the popup is closing, we undo this action in hideEvent().
#
# we have to use this workaround because we are using an 'editable' Qt
# combobox which behaves differently to clicks than a normal combobox.
#
# NOTE: we have to do this here in the tableview because the combobox's
# showPopup() and hidePopup() do not always trigger symmetrically.
#
# for example, hidePopup() was not being triggered when focus was lost
# via virutal desktop switch, and other external focus changes. this
# is really bad, because the combobox would get stuck *closed* as it
# was never re-enabled for mouse events
#
self._combobox.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents)
def hideEvent(self, e):
"""
Hide the QComboBox dropdown/popup.
"""
#
# the combobox popup is now hidden / collapsed. the combobox head needs
# to be re-enlightened to direct mouse clicks (eg, to expand it). this
# undos the setAttribute action in showPopup() above.
#
# if the coverage combobox is *not* visible, the coverage window is
# probably being closed / deleted. but just in case, we should attempt
# to restore the combobox's ability to accept clicks before bailing.
#
# this fixes a bug / Qt warning first printed in IDA 7.4 where 'self'
# (the comobobox) would be deleted by the time the 100ms timer in the
# 'normal' case fires below
#
if not self._combobox.isVisible():
self._combobox.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents, False)
return
#
# in the more normal case, the comobobox is simply being collapsed
# by the user clicking it, or clicking away from it.
#
# we use a short timer of 100ms to ensure the 'hiding' of the dropdown
# and its associated click are processed first. aftwards, it is safe to
# begin accepting clicks again.
#
self._timer = QtCore.QTimer.singleShot(100, self.__hidePopup_setattr)
def __hidePopup_setattr(self):
self._combobox.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents, False)
def leaveEvent(self, e):
"""
Overload the mouse leave event.
"""
#
# this code mitigates a bug (feature?) where the last hovered index
# of the table view was retaining its MouseOver flag internally. This
# was keeping my 'X' icons highlighted if the mouse cursor left the
# table while touching one of these cells last.
#
# we basically send a fake 'Hover Event' to the table viewport at an
# invalid position so table clears any remaining hover flags.
#
event = QtGui.QHoverEvent(QtCore.QEvent.HoverLeave, QtCore.QPoint(-1,-1), QtCore.QPoint(-1,-1))
QtWidgets.QApplication.sendEvent(self.viewport(), event)
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font.setPointSizeF(normalize_to_dpi(10))
self._font_metrics = QtGui.QFontMetricsF(self._font)
self.setFont(self._font)
# hide dropdown table headers, and default grid
self.horizontalHeader().setVisible(False)
self.verticalHeader().setVisible(False)
self.setShowGrid(False)
# let Qt automatically elide (...) long row text (coverage names)
self.resizeColumnToContents(0)
self.setTextElideMode(QtCore.Qt.ElideRight)
self.setWordWrap(False)
# more code-friendly, readable aliases
vh = self.verticalHeader()
hh = self.horizontalHeader()
#
# - set the coverage name column to be stretchy and as tall as the text
# - make the 'X' icon column fixed width
#
hh.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
hh.setSectionResizeMode(1, QtWidgets.QHeaderView.Fixed)
vh.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
hh.setMinimumSectionSize(0)
vh.setMinimumSectionSize(0)
# get the column width hint from the model for the 'X' delete column
icon_column_width = self.model().headerData(
COLUMN_DELETE,
QtCore.Qt.Horizontal,
QtCore.Qt.SizeHintRole
)
# set the 'X' delete icon column width to a fixed size based on the hint
hh.resizeSection(COLUMN_DELETE, icon_column_width)
# install a delegate to do some custom painting against the combobox
self.setItemDelegate(ComboBoxDelegate(self))
#--------------------------------------------------------------------------
# Refresh
#--------------------------------------------------------------------------
def refresh(self):
"""
Refresh the coverage combobox list order.
"""
model = self.model() # alias for readability
# merge the 'special' entries up until a seperator is found
for row in xrange(model.rowCount()):
#
# if this row is not a user defined entry, we want to merge ('span')
# its cells so there is no 'X' delete button column shown for it.
#
# this should apply to special rows such as the 'Hot Shell',
# 'Aggregate', or the 'separator' indexes
#
if not model.data(model.index(row, 1), QtCore.Qt.DecorationRole):
self.setSpan(row, 0, 1, model.columnCount())
# this is a user entry, ensure there is no span present (clear it)
else:
self.setSpan(row, 0, 0, model.columnCount())
@disassembler.execute_ui
def refresh_theme(self):
"""
Refresh UI facing elements to reflect the current theme.
"""
palette = self.model()._director.palette
self.setStyleSheet(
"QTableView {"
" background-color: %s;" % palette.combobox_background.name() +
" color: %s;" % palette.combobox_text.name() +
" margin: 0; outline: none;"
" border: 1px solid %s; " % palette.shell_border.name() +
"} "
"QTableView::item { " +
" padding: 0.5ex; border: 0; "
"} "
"QTableView::item:focus { " +
" background-color: %s; " % palette.combobox_selection_background.name() +
" color: %s; " % palette.combobox_selection_text.name() +
"} "
)
#------------------------------------------------------------------------------
# Coverage ComboBox - TableModel
#------------------------------------------------------------------------------
class CoverageComboBoxModel(QtCore.QAbstractTableModel):
"""
The (internal) table model used for the Coverage ComboBox dropdown.
"""
def __init__(self, director, parent=None):
super(CoverageComboBoxModel, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
self._director = director
# our internal model
self._entries = []
self._seperator_index = 0
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font.setPointSizeF(normalize_to_dpi(10))
self._font_metrics = QtGui.QFontMetricsF(self._font)
# load the raw 'X' delete icon from disk
delete_icon = QtGui.QPixmap(plugin_resource("icons/delete_coverage.png"))
# compute the appropriate size for the deletion icon
icon_height = self._font_metrics.height()*0.75
icon_width = icon_height
# scale the icon as appropriate (very likely scaling it down)
self._delete_icon = delete_icon.scaled(
icon_width,
icon_height,
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation
)
# register for cues from the director
self._director.coverage_created(self.refresh)
self._director.coverage_deleted(self.refresh)
#--------------------------------------------------------------------------
# QAbstractTableModel Overloads
#--------------------------------------------------------------------------
def rowCount(self, parent=QtCore.QModelIndex()):
"""
The number of dropdown rows.
"""
return len(self._entries)
def columnCount(self, parent=QtCore.QModelIndex()):
"""
The nubmer of dropdown columns.
| column[0] | column[1]
+---------------------------+--------------------
| detailed coverage string1 | 'X' (delete icon)
| detailed coverage string2 | 'X' (delete icon)
...
"""
return 2
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
"""
Define the properties of the the table rows & columns.
"""
# table row property request
if orientation == QtCore.Qt.Vertical:
# row height size hint request
if role == QtCore.Qt.SizeHintRole:
# the separator 'row' has a special, 'thinner' row size
if section == self._seperator_index:
return SEPARATOR_HEIGHT
# all other rows should be at least as tall as their text
else:
return self._font_metrics.height()
# table column property request
elif orientation == QtCore.Qt.Horizontal:
# column width size hint request
if role == QtCore.Qt.SizeHintRole:
#
# the column holding the 'X' delete icon should be small
# and fixed width, therefore we are explicit in specifying
# our own size hint for it.
#
# note that the icon size is used to hint the column width,
# but multiplied by two. this is because we want the 'X'
# icon to float and have some padding in its column.
#
if section == COLUMN_DELETE:
return self._delete_icon.size().width() * 2
# unhandled request, nothing to do
return None
def data(self, index, role=QtCore.Qt.DisplayRole):
"""
Define how Qt should access the underlying model data.
"""
# sanity check the given index
if not index.isValid() or \
not (index.row() < self.rowCount()) or \
not (index.column() < self.columnCount()):
return None
# font format request
if role == QtCore.Qt.FontRole:
return self._font
# text alignment request
elif role == QtCore.Qt.TextAlignmentRole:
return QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft
# combobox header, padded with " " to account for dropdown arrow overlap
elif role == QtCore.Qt.EditRole:
if index.column() == COLUMN_COVERAGE_STRING and index.row() != self._seperator_index:
return self._director.get_coverage_string(self._entries[index.row()]) + " "
# data display request
elif role == QtCore.Qt.DisplayRole:
if index.column() == COLUMN_COVERAGE_STRING and index.row() != self._seperator_index:
return self._director.get_coverage_string(self._entries[index.row()])
# tooltip
elif role == QtCore.Qt.ToolTipRole:
if index.column() == COLUMN_COVERAGE_STRING and index.row() != self._seperator_index:
coverage = self._director.get_coverage(self._entries[index.row()])
return coverage.filepath if coverage.filepath else ""
elif index.column() == COLUMN_DELETE:
return "Delete loaded coverage"
# icon display request
elif role == QtCore.Qt.DecorationRole:
# the icon request is for the 'X' column
if index.column() == COLUMN_DELETE:
#
# if the coverage entry is below the separator, it is a user
# loaded coverage and should always be deletable
#
if index.row() > self._seperator_index:
return self._delete_icon
#
# as a special case, we allow the aggregate to have a clear
# icon, which will clear all user loaded coverages
#
elif self._entries[index.row()] == "Aggregate":
return self._delete_icon
# entry type request
elif role == QtCore.Qt.AccessibleDescriptionRole:
#
# if the entry is ABOVE the separator index, it's a 'special'
# entry, eg 'Hot Shell', 'New Composition', 'Aggregate'
#
if index.row() < self._seperator_index:
return ENTRY_SPECIAL
#
# the entry IS the separator index
#
elif index.row() == self._seperator_index:
return SEPARATOR
#
# if the entry is BELOW the separator index, it's a 'user'
# entry, eg loaded coverage files, compositions, etc
#
else:
return ENTRY_USER
# entry coverage_name request
elif role == QtCore.Qt.UserRole:
return self._entries[index.row()]
# unhandeled request, nothing to do
return None
def flags(self, index):
"""
Item flags for the given entry index.
"""
# the 'X' column is ENABLED, but not technically selectable
if index.column() == COLUMN_DELETE:
return QtCore.Qt.ItemIsEnabled
# the separator should not be interactive in *any* way
if index.row() == self._seperator_index:
return QtCore.Qt.NoItemFlags
# unhandeled request, pass through
return super(CoverageComboBoxModel, self).flags(index)
#--------------------------------------------------------------------------
# Refresh
#--------------------------------------------------------------------------
def refresh(self):
"""
Refresh the coverage combobox model data.
"""
# extract all the names from the director with a shorthand symbol
with_shorthand = []
for name in self._director.coverage_names:
if self._director.get_shorthand(name):
with_shorthand.append(name)
# re-populate the model entries
self._entries = []
self._entries += list(self._director.special_names)
self._entries += [SEPARATOR]
self._entries += with_shorthand
# save the index of the separator for easy reference
self._seperator_index = self._entries.index(SEPARATOR)
# notify any listeners that the model layout may have changed
self.layoutChanged.emit()
#------------------------------------------------------------------------------
# Coverage ComboBox - Painting Delegate
#------------------------------------------------------------------------------
class ComboBoxDelegate(QtWidgets.QStyledItemDelegate):
"""
Coverage ComboBox Painting Delegate
Painting delegates can be used to augment the painting of a given
widget or its items. In this case, we use it to customize the
dropdown table in the Coverage ComboBox a bit more to our liking.
"""
def __init__(self, parent):
super(ComboBoxDelegate, self).__init__(parent)
# painting property definitions
self._grid_color = parent.model()._director.palette.shell_border
def sizeHint(self, option, index):
"""
Augmented entry sizeHint.
"""
if index.data(QtCore.Qt.AccessibleDescriptionRole) == SEPARATOR:
return QtCore.QSize(1, SEPARATOR_HEIGHT)
return super(ComboBoxDelegate, self).sizeHint(option, index)
def paint(self, painter, option, index):
"""
Augmented entry painting.
"""
# custom paint the 'grid line' beneath each coverage entry
if index.data(QtCore.Qt.AccessibleDescriptionRole) == ENTRY_USER:
painter.save()
painter.setPen(self._grid_color)
final_entry = (index.sibling(index.row()+1, 0).row() == -1)
# draw the grid line beneath the current row (a coverage entry)
tweak = QtCore.QPoint(0, 1) # 1px tweak provides better spacing
if not final_entry:
painter.drawLine(
option.rect.bottomLeft() + tweak,
option.rect.bottomRight() + tweak
)
#
# now we will re-draw the grid line *above* the current entry,
# fixing a minor graphical bug where grid lines could disappear
# after hovering over a row / entry
#
previous = index.sibling(index.row()-1, 0)
painter.drawLine(
option.rect.topLeft(),
option.rect.topRight()
)
painter.restore()
# custom paint the 'X' icon where applicable
if index.data(QtCore.Qt.DecorationRole):
# get the icon data from the model
pixmap = index.data(QtCore.Qt.DecorationRole)
# center the draw rect in the middle of the 'X' column cell
destination_rect = pixmap.rect()
destination_rect.moveCenter(option.rect.center())
# augment the icon pixmap to be grayed out (disabled) or colored
# based on the mouse hover status of this index
if not (option.state & QtWidgets.QStyle.State_MouseOver):
pixmap = QtWidgets.QApplication.style().generatedIconPixmap(
QtGui.QIcon.Disabled,
pixmap,
QtWidgets.QStyleOption()
)
# draw the icon to the column
painter.drawPixmap(destination_rect, pixmap)
return
# custom paint the separator entry between special & normal coverage
if index.data(QtCore.Qt.AccessibleDescriptionRole) == SEPARATOR:
painter.save()
painter.setPen(self._grid_color)
painter.drawRect(
option.rect
)
painter.restore()
# nothing else to paint for the separator entry
return
# pass through to the standard painting
super(ComboBoxDelegate, self).paint(painter, option, index)
|
scripts/bleu_over_length.py | anoidgit/zero | 111 | 11100445 | <filename>scripts/bleu_over_length.py
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import argparse
from collections import Counter
def closest_length(candidate, references):
clen = len(candidate)
closest_diff = 9999
closest_len = 9999
for reference in references:
rlen = len(reference)
diff = abs(rlen - clen)
if diff < closest_diff:
closest_diff = diff
closest_len = rlen
elif diff == closest_diff:
closest_len = rlen if rlen < closest_len else closest_len
return closest_len
def shortest_length(references):
return min([len(ref) for ref in references])
def modified_precision(candidate, references, n):
tngrams = len(candidate) + 1 - n
counts = Counter([tuple(candidate[i:i+n]) for i in range(tngrams)])
if len(counts) == 0:
return 0, 0
max_counts = {}
for reference in references:
rngrams = len(reference) + 1 - n
ngrams = [tuple(reference[i:i+n]) for i in range(rngrams)]
ref_counts = Counter(ngrams)
for ngram in counts:
mcount = 0 if ngram not in max_counts else max_counts[ngram]
rcount = 0 if ngram not in ref_counts else ref_counts[ngram]
max_counts[ngram] = max(mcount, rcount)
clipped_counts = {}
for ngram, count in counts.items():
clipped_counts[ngram] = min(count, max_counts[ngram])
return float(sum(clipped_counts.values())), float(sum(counts.values()))
def brevity_penalty(trans, refs, mode="closest"):
bp_c = 0.0
bp_r = 0.0
for candidate, references in zip(trans, refs):
bp_c += len(candidate)
if mode == "shortest":
bp_r += shortest_length(references)
else:
bp_r += closest_length(candidate, references)
# Prevent zero divide
bp_c = bp_c or 1.0
return math.exp(min(0, 1.0 - bp_r / bp_c))
def bleu(trans, refs, bp="closest", smooth=False, n=4, weights=None):
p_norm = [0 for _ in range(n)]
p_denorm = [0 for _ in range(n)]
for candidate, references in zip(trans, refs):
for i in range(n):
ccount, tcount = modified_precision(candidate, references, i + 1)
p_norm[i] += ccount
p_denorm[i] += tcount
bleu_n = [0 for _ in range(n)]
for i in range(n):
# add one smoothing
if smooth and i > 0:
p_norm[i] += 1
p_denorm[i] += 1
if p_norm[i] == 0 or p_denorm[i] == 0:
bleu_n[i] = -9999
else:
bleu_n[i] = math.log(float(p_norm[i]) / float(p_denorm[i]))
if weights:
if len(weights) != n:
raise ValueError("len(weights) != n: invalid weight number")
log_precision = sum([bleu_n[i] * weights[i] for i in range(n)])
else:
log_precision = sum(bleu_n) / float(n)
bp = brevity_penalty(trans, refs, bp)
score = bp * math.exp(log_precision)
return score
def read(f, lc=False):
with open(f, 'rU') as reader:
return [line.strip().split() if not lc else line.strip().lower().split()
for line in reader.readlines()]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='BLEU score over source sentence length')
parser.add_argument('-lc', help='Lowercase, i.e case-insensitive setting', action='store_true')
parser.add_argument('-bp', help='Length penalty', default='closest', choices=['shortest', 'closest'])
parser.add_argument('-n', type=int, default=4, help="ngram-based BLEU")
parser.add_argument('-g', type=int, default=1, help="sentence groups for evaluation")
parser.add_argument('-source', type=str, required=True, help='The source file')
parser.add_argument('-candidate', type=str, required=True, help='The candidate translation generated by MT system')
parser.add_argument('-reference', type=str, nargs='+', required=True,
help='The references like reference or reference0, reference1, ...')
args = parser.parse_args()
cand = args.candidate
refs = args.reference
src = args.source
src_sentences = read(src, args.lc)
cand_sentences = read(cand, args.lc)
refs_sentences = [read(ref, args.lc) for ref in refs]
assert len(cand_sentences) == len(refs_sentences[0]), \
'ERROR: the length of candidate and reference must be the same.'
refs_sentences = list(zip(*refs_sentences))
sorted_candidate_sentences = sorted(zip(src_sentences, cand_sentences), key=lambda x: len(x[0]))
sorted_reference_sentences = sorted(zip(src_sentences, refs_sentences), key=lambda x: len(x[0]))
sorted_source_sentences = [v[0] for v in sorted_candidate_sentences]
sorted_candidate_sentences = [v[1] for v in sorted_candidate_sentences]
sorted_reference_sentences = [v[1] for v in sorted_reference_sentences]
groups = args.g
elements_per_group = len(sorted_source_sentences) // groups
scores = []
for gidx in range(groups):
group_candidate = sorted_candidate_sentences[gidx * elements_per_group: (gidx + 1) * elements_per_group]
group_reference = sorted_reference_sentences[gidx * elements_per_group: (gidx + 1) * elements_per_group]
group_source = sorted_source_sentences[gidx * elements_per_group: (gidx + 1) * elements_per_group]
group_average_source = float(sum([len(v) for v in group_source])) / float(len(group_source))
bleu_score = bleu(group_candidate, group_reference, bp=args.bp, n=args.n)
print("Group Idx {} Avg Source Lenngth {} BLEU Score {}".format(gidx, group_average_source, bleu_score))
scores.append((group_average_source, bleu_score))
print('AvgLength: [{}]'.format(','.join([str(s[0]) for s in scores])))
print('BLEU Score: [{}]'.format(','.join([str(s[1]) for s in scores])))
|
setup.py | davidgodzsak/opentype-svg | 166 | 11100485 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="opentypesvg",
use_scm_version={'write_to': 'lib/opentypesvg/__version__.py'},
setup_requires=["setuptools_scm"],
author="<NAME>",
author_email="<EMAIL>",
description="Tools for making OpenType-SVG fonts",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/adobe-type-tools/opentype-svg",
license="MIT",
platforms=["Any"],
package_dir={'': 'lib'},
packages=['opentypesvg'],
python_requires='>=3.6',
install_requires=['fontTools[woff]>=3.1.0'],
entry_points={
'console_scripts': [
"addsvg = opentypesvg.addsvg:main",
"dumpsvg = opentypesvg.dumpsvg:main",
"fonts2svg = opentypesvg.fonts2svg:main",
]
},
)
|
samza-test/src/main/python/templates.py | xiefan46/samza | 860 | 11100503 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import nested
from jinja2 import Template
def render_config(template_location, rendered_location, properties):
"""
A method for rendering simple key/value configs into a template. Uses Jinja2
style templating.
param: template_location -- File path of the input Jinja2 template.
param: rendered_location -- File path where rendered output should be saved.
param: properties -- A dictionary of key/value pairs to be passed to the
template with the accessor name 'properties'.
"""
with nested(open(template_location, 'r'), open(rendered_location, 'w')) as (input, output):
template = Template(input.read())
rendered = template.render(properties=properties)
output.write(rendered)
|
doc/src/modules/mpmath/plots/ber.py | shipci/sympy | 319 | 11100508 | # Kelvin functions ber_n(x) and bei_n(x) on the real line for n=0,2
f0 = lambda x: ber(0,x)
f1 = lambda x: bei(0,x)
f2 = lambda x: ber(2,x)
f3 = lambda x: bei(2,x)
plot([f0,f1,f2,f3],[0,10],[-10,10]) |
wsi/bin/train.py | mingrui/NCRF | 734 | 11100522 | import sys
import os
import argparse
import logging
import json
import time
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.nn import BCEWithLogitsLoss, DataParallel
from torch.optim import SGD
from tensorboardX import SummaryWriter
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
from wsi.data.image_producer import GridImageDataset # noqa
from wsi.model import MODELS # noqa
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help='Path to the config file in json format')
parser.add_argument('save_path', default=None, metavar='SAVE_PATH', type=str,
help='Path to the saved models')
parser.add_argument('--num_workers', default=2, type=int, help='number of'
' workers for each data loader, default 2.')
parser.add_argument('--device_ids', default='0', type=str, help='comma'
' separated indices of GPU to use, e.g. 0,1 for using GPU_0'
' and GPU_1, default 0.')
def train_epoch(summary, summary_writer, cfg, model, loss_fn, optimizer,
dataloader_tumor, dataloader_normal):
model.train()
steps = len(dataloader_tumor)
batch_size = dataloader_tumor.batch_size
grid_size = dataloader_tumor.dataset._grid_size
dataiter_tumor = iter(dataloader_tumor)
dataiter_normal = iter(dataloader_normal)
time_now = time.time()
for step in range(steps):
data_tumor, target_tumor = next(dataiter_tumor)
data_tumor = Variable(data_tumor.cuda(async=True))
target_tumor = Variable(target_tumor.cuda(async=True))
data_normal, target_normal = next(dataiter_normal)
data_normal = Variable(data_normal.cuda(async=True))
target_normal = Variable(target_normal.cuda(async=True))
idx_rand = Variable(
torch.randperm(batch_size * 2).cuda(async=True))
data = torch.cat([data_tumor, data_normal])[idx_rand]
target = torch.cat([target_tumor, target_normal])[idx_rand]
output = model(data)
loss = loss_fn(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
probs = output.sigmoid()
predicts = (probs >= 0.5).type(torch.cuda.FloatTensor)
acc_data = (predicts == target).type(
torch.cuda.FloatTensor).sum().data[0] * 1.0 / (
batch_size * grid_size * 2)
loss_data = loss.data[0]
time_spent = time.time() - time_now
time_now = time.time()
logging.info(
'{}, Epoch : {}, Step : {}, Training Loss : {:.5f}, '
'Training Acc : {:.3f}, Run Time : {:.2f}'
.format(
time.strftime("%Y-%m-%d %H:%M:%S"), summary['epoch'] + 1,
summary['step'] + 1, loss_data, acc_data, time_spent))
summary['step'] += 1
if summary['step'] % cfg['log_every'] == 0:
summary_writer.add_scalar('train/loss', loss_data, summary['step'])
summary_writer.add_scalar('train/acc', acc_data, summary['step'])
summary['epoch'] += 1
return summary
def valid_epoch(summary, cfg, model, loss_fn,
dataloader_tumor, dataloader_normal):
model.eval()
steps = len(dataloader_tumor)
batch_size = dataloader_tumor.batch_size
grid_size = dataloader_tumor.dataset._grid_size
dataiter_tumor = iter(dataloader_tumor)
dataiter_normal = iter(dataloader_normal)
loss_sum = 0
acc_sum = 0
for step in range(steps):
data_tumor, target_tumor = next(dataiter_tumor)
data_tumor = Variable(data_tumor.cuda(async=True), volatile=True)
target_tumor = Variable(target_tumor.cuda(async=True))
data_normal, target_normal = next(dataiter_normal)
data_normal = Variable(data_normal.cuda(async=True), volatile=True)
target_normal = Variable(target_normal.cuda(async=True))
data = torch.cat([data_tumor, data_normal])
target = torch.cat([target_tumor, target_normal])
output = model(data)
loss = loss_fn(output, target)
probs = output.sigmoid()
predicts = (probs >= 0.5).type(torch.cuda.FloatTensor)
acc_data = (predicts == target).type(
torch.cuda.FloatTensor).sum().data[0] * 1.0 / (
batch_size * grid_size * 2)
loss_data = loss.data[0]
loss_sum += loss_data
acc_sum += acc_data
summary['loss'] = loss_sum / steps
summary['acc'] = acc_sum / steps
return summary
def run(args):
with open(args.cfg_path) as f:
cfg = json.load(f)
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
with open(os.path.join(args.save_path, 'cfg.json'), 'w') as f:
json.dump(cfg, f, indent=1)
os.environ["CUDA_VISIBLE_DEVICES"] = args.device_ids
num_GPU = len(args.device_ids.split(','))
batch_size_train = cfg['batch_size'] * num_GPU
batch_size_valid = cfg['batch_size'] * num_GPU * 2
num_workers = args.num_workers * num_GPU
if cfg['image_size'] % cfg['patch_size'] != 0:
raise Exception('Image size / patch size != 0 : {} / {}'.
format(cfg['image_size'], cfg['patch_size']))
patch_per_side = cfg['image_size'] // cfg['patch_size']
grid_size = patch_per_side * patch_per_side
model = MODELS[cfg['model']](num_nodes=grid_size, use_crf=cfg['use_crf'])
model = DataParallel(model, device_ids=None)
model = model.cuda()
loss_fn = BCEWithLogitsLoss().cuda()
optimizer = SGD(model.parameters(), lr=cfg['lr'], momentum=cfg['momentum'])
dataset_tumor_train = GridImageDataset(cfg['data_path_tumor_train'],
cfg['json_path_train'],
cfg['image_size'],
cfg['patch_size'],
crop_size=cfg['crop_size'])
dataset_normal_train = GridImageDataset(cfg['data_path_normal_train'],
cfg['json_path_train'],
cfg['image_size'],
cfg['patch_size'],
crop_size=cfg['crop_size'])
dataset_tumor_valid = GridImageDataset(cfg['data_path_tumor_valid'],
cfg['json_path_valid'],
cfg['image_size'],
cfg['patch_size'],
crop_size=cfg['crop_size'])
dataset_normal_valid = GridImageDataset(cfg['data_path_normal_valid'],
cfg['json_path_valid'],
cfg['image_size'],
cfg['patch_size'],
crop_size=cfg['crop_size'])
dataloader_tumor_train = DataLoader(dataset_tumor_train,
batch_size=batch_size_train,
num_workers=num_workers)
dataloader_normal_train = DataLoader(dataset_normal_train,
batch_size=batch_size_train,
num_workers=num_workers)
dataloader_tumor_valid = DataLoader(dataset_tumor_valid,
batch_size=batch_size_valid,
num_workers=num_workers)
dataloader_normal_valid = DataLoader(dataset_normal_valid,
batch_size=batch_size_valid,
num_workers=num_workers)
summary_train = {'epoch': 0, 'step': 0}
summary_valid = {'loss': float('inf'), 'acc': 0}
summary_writer = SummaryWriter(args.save_path)
loss_valid_best = float('inf')
for epoch in range(cfg['epoch']):
summary_train = train_epoch(summary_train, summary_writer, cfg, model,
loss_fn, optimizer,
dataloader_tumor_train,
dataloader_normal_train)
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'train.ckpt'))
time_now = time.time()
summary_valid = valid_epoch(summary_valid, cfg, model, loss_fn,
dataloader_tumor_valid,
dataloader_normal_valid)
time_spent = time.time() - time_now
logging.info(
'{}, Epoch : {}, Step : {}, Validation Loss : {:.5f}, '
'Validation Acc : {:.3f}, Run Time : {:.2f}'
.format(
time.strftime("%Y-%m-%d %H:%M:%S"), summary_train['epoch'],
summary_train['step'], summary_valid['loss'],
summary_valid['acc'], time_spent))
summary_writer.add_scalar(
'valid/loss', summary_valid['loss'], summary_train['step'])
summary_writer.add_scalar(
'valid/acc', summary_valid['acc'], summary_train['step'])
if summary_valid['loss'] < loss_valid_best:
loss_valid_best = summary_valid['loss']
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'best.ckpt'))
summary_writer.close()
def main():
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
|
lib/plugins/rssfeed.py | ikstream/Zeus-Scanner | 841 | 11100526 | import re
__product__ = "RSS Feed"
__description__ = (
"RSS (Rich Site Summary) is a type of web feed which allows "
"users to access updates to online content in a standardized, "
"computer-readable format"
)
def search(html, **kwargs):
html = str(html)
plugin_detection_schema = (
re.compile(r"type.[\'\"]?application/rss.xml[\'\"]?", re.I),
re.compile(r"title.[\'\"]?rss.feed[\'\"]?", re.I)
)
for plugin in plugin_detection_schema:
if plugin.search(html) is not None:
return True
|
release.py | ashishb/adb-enhanced | 910 | 11100531 | <filename>release.py<gh_stars>100-1000
#!/usr/bin/env python3
import os
import subprocess
import sys
import docopt
_DIR_OF_THIS_SCRIPT = os.path.split(__file__)[0]
_VERSION_FILE_NAME = 'version.txt'
_VERSION_FILE_PATH = os.path.join(
_DIR_OF_THIS_SCRIPT, 'adbe', _VERSION_FILE_NAME)
_README_FILE_NAME = os.path.join('docs', 'README.rst')
_TEST_PYPI_URL = 'https://test.pypi.org/legacy/'
_PROJECT_NAME = 'adb-enhanced'
_SRC_FILE_NAMES = [
'abe.jar',
'apksigner.jar',
'adb_enhanced.py',
'adb_helper.py',
'asyncio_helper.py',
'main.py',
'output_helper.py',
'version.txt',
]
def _get_version():
with open(_VERSION_FILE_PATH, 'r') as file_handle:
version = file_handle.read().strip()
return version
def _set_version(version):
if not version or not version.strip():
raise Exception('version cannot be empty')
with open(_VERSION_FILE_PATH, 'w') as file_handle:
file_handle.write('%s\n' % version)
def _prompt_user_to_update_version(version_file):
current_version = _get_version()
print('Current version is %s' % current_version)
new_version = input("Enter new version: ")
_set_version(new_version or current_version)
with open(version_file, 'w') as file_handle:
file_handle.write(new_version)
def _push_new_release_to_git(version_file):
with open(version_file) as file_handle:
version = file_handle.read()
cmds = [
'git add %s' % version_file,
'git commit -m "Setup release %s"' % version,
'git tag %s' % version,
'git push --tags',
]
for cmd in cmds:
_run_cmd_or_fail(cmd)
def _publish_package_to_pypi(testing_release=False):
if testing_release:
_run_cmd_or_fail(
'python3 -m twine upload --repository-url %s dist/*' % _TEST_PYPI_URL)
print('Few mins later, check https://test.pypi.org/project/%s/#history to confirm upload' %
_PROJECT_NAME)
else:
_run_cmd_or_fail('python3 -m twine upload dist/*')
print('Few mins later, check https://pypi.org/project/%s/#history to confirm upload' %
_PROJECT_NAME)
def _run_cmd_or_fail(cmd):
print('Executing \"%s\"...' % cmd)
with subprocess.Popen(cmd, shell=True, stdout=None, stderr=None) as process:
process.communicate()
if process.returncode == 0:
print('Successfully executed \"%s\"' % cmd)
else:
print('Failed to execute \"%s\"' % cmd)
sys.exit(1)
def _publish_release(testing_release=False):
version_file = os.path.join('adbe', 'version.txt')
_prompt_user_to_update_version(version_file)
_run_cmd_or_fail('make build')
_push_new_release_to_git(version_file)
_publish_package_to_pypi(testing_release)
# List of things which this release tool does as of today.
USAGE_STRING = """
Release script for %s
Usage:
release.py test release
release.py production release
""" % _PROJECT_NAME
def _using_python2():
return sys.version_info < (3, 0)
def main():
if _using_python2():
print('Python 2 is not supported, only Python 3 is supported')
sys.exit(1)
args = docopt.docopt(USAGE_STRING, version='1.0')
if args['test'] and args['release']:
_publish_release(testing_release=True)
elif args['production'] and args['release']:
_publish_release(testing_release=False)
else:
print('Unexpected command')
sys.exit(1)
if __name__ == '__main__':
main()
|
blockchain-workbench/rest-api-samples/python/swagger_client/api/connections_api.py | chaosmail/blockchain | 738 | 11100533 | # coding: utf-8
"""
Azure Blockchain Workbench REST API
The Azure Blockchain Workbench REST API is a Workbench extensibility point, which allows developers to create and manage blockchain applications, manage users and organizations within a consortium, integrate blockchain applications into services and platforms, perform transactions on a blockchain, and retrieve transactional and contract data from a blockchain. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class ConnectionsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def block_get(self, connection_id, block_id, **kwargs): # noqa: E501
""" # noqa: E501
Gets the block matching a specific block ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.block_get(connection_id, block_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The connectionId of the block (required)
:param int block_id: The id of the block (required)
:return: Block
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.block_get_with_http_info(connection_id, block_id, **kwargs) # noqa: E501
else:
(data) = self.block_get_with_http_info(connection_id, block_id, **kwargs) # noqa: E501
return data
def block_get_with_http_info(self, connection_id, block_id, **kwargs): # noqa: E501
""" # noqa: E501
Gets the block matching a specific block ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.block_get_with_http_info(connection_id, block_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The connectionId of the block (required)
:param int block_id: The id of the block (required)
:return: Block
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'block_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method block_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `block_get`") # noqa: E501
# verify the required parameter 'block_id' is set
if ('block_id' not in params or
params['block_id'] is None):
raise ValueError("Missing the required parameter `block_id` when calling `block_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connectionId'] = params['connection_id'] # noqa: E501
if 'block_id' in params:
path_params['blockId'] = params['block_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/ledgers/connections/{connectionId}/blocks/{blockId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Block', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def blocks_get(self, connection_id, **kwargs): # noqa: E501
""" # noqa: E501
Lists the blocks for a connected blockchain network. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.blocks_get(connection_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The id of the connection (required)
:param int top: The maximum number of items to return
:param int skip: The number of items to skip before returning
:return: BlockList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.blocks_get_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.blocks_get_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def blocks_get_with_http_info(self, connection_id, **kwargs): # noqa: E501
""" # noqa: E501
Lists the blocks for a connected blockchain network. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.blocks_get_with_http_info(connection_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The id of the connection (required)
:param int top: The maximum number of items to return
:param int skip: The number of items to skip before returning
:return: BlockList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'top', 'skip'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method blocks_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `blocks_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connectionID'] = params['connection_id'] # noqa: E501
query_params = []
if 'top' in params:
query_params.append(('top', params['top'])) # noqa: E501
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/ledgers/connections/{connectionId}/blocks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BlockList', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connection_get(self, connection_id, **kwargs): # noqa: E501
""" # noqa: E501
Gets the connected blockchain network matching a specific chain instance ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.connection_get(connection_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The id of the connection (required)
:return: Connection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.connection_get_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.connection_get_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def connection_get_with_http_info(self, connection_id, **kwargs): # noqa: E501
""" # noqa: E501
Gets the connected blockchain network matching a specific chain instance ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.connection_get_with_http_info(connection_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The id of the connection (required)
:return: Connection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connection_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `connection_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connectionID'] = params['connection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/ledgers/connections/{connectionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Connection', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connections_get(self, **kwargs): # noqa: E501
""" # noqa: E501
Lists the connected blockchain networks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.connections_get(async=True)
>>> result = thread.get()
:param async bool
:param int top: The maximum number of items to return
:param int skip: The number of items to skip before returning
:return: ConnectionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.connections_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.connections_get_with_http_info(**kwargs) # noqa: E501
return data
def connections_get_with_http_info(self, **kwargs): # noqa: E501
""" # noqa: E501
Lists the connected blockchain networks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.connections_get_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param int top: The maximum number of items to return
:param int skip: The number of items to skip before returning
:return: ConnectionList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['top', 'skip'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connections_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'top' in params:
query_params.append(('top', params['top'])) # noqa: E501
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/ledgers/connections', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionList', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def transaction_get(self, connection_id, transaction_id, **kwargs): # noqa: E501
""" # noqa: E501
Gets the transaction matching a specific transaction ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transaction_get(connection_id, transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The connectionId of the transaction (required)
:param int transaction_id: The id of the transaction (required)
:return: Transaction
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.transaction_get_with_http_info(connection_id, transaction_id, **kwargs) # noqa: E501
else:
(data) = self.transaction_get_with_http_info(connection_id, transaction_id, **kwargs) # noqa: E501
return data
def transaction_get_with_http_info(self, connection_id, transaction_id, **kwargs): # noqa: E501
""" # noqa: E501
Gets the transaction matching a specific transaction ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transaction_get_with_http_info(connection_id, transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The connectionId of the transaction (required)
:param int transaction_id: The id of the transaction (required)
:return: Transaction
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'transaction_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method transaction_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `transaction_get`") # noqa: E501
# verify the required parameter 'transaction_id' is set
if ('transaction_id' not in params or
params['transaction_id'] is None):
raise ValueError("Missing the required parameter `transaction_id` when calling `transaction_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connectionId'] = params['connection_id'] # noqa: E501
if 'transaction_id' in params:
path_params['transactionId'] = params['transaction_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/ledgers/connections/{connectionId}/transactions/{transactionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Transaction', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def transactions_get(self, connection_id, **kwargs): # noqa: E501
""" # noqa: E501
Lists the transactions for a connected blockchain network. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transactions_get(connection_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The id of the connection (required)
:param int top: The maximum number of items to return
:param int skip: The number of items to skip before returning
:return: list[TransactionList]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.transactions_get_with_http_info(connection_id, **kwargs) # noqa: E501
else:
(data) = self.transactions_get_with_http_info(connection_id, **kwargs) # noqa: E501
return data
def transactions_get_with_http_info(self, connection_id, **kwargs): # noqa: E501
""" # noqa: E501
Lists the transactions for a connected blockchain network. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transactions_get_with_http_info(connection_id, async=True)
>>> result = thread.get()
:param async bool
:param int connection_id: The id of the connection (required)
:param int top: The maximum number of items to return
:param int skip: The number of items to skip before returning
:return: list[TransactionList]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['connection_id', 'top', 'skip'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method transactions_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'connection_id' is set
if ('connection_id' not in params or
params['connection_id'] is None):
raise ValueError("Missing the required parameter `connection_id` when calling `transactions_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'connection_id' in params:
path_params['connectionId'] = params['connection_id'] # noqa: E501
query_params = []
if 'top' in params:
query_params.append(('top', params['top'])) # noqa: E501
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/ledgers/connections/{connectionId}/transactions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TransactionList]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
scibert/__init__.py | bjlee/scibert | 1,143 | 11100537 | import scibert.dataset_readers.classification_dataset_reader
import scibert.models.text_classifier
|
securify/solidity/base/__init__.py | AlexandreH/securify2 | 258 | 11100563 | from securify.grammar import ProductionOps
class AstNodeBase(ProductionOps):
id: int
src: str
@property
def src_range(self):
a, b, _ = map(int, self.src.split(":"))
return a, a + b
@property
def src_line(self):
src_offset = self.src.split(":")[0]
src = self.root().source[:int(src_offset)]
return len([i for i in src if i == "\n"]) + 1
@property
def src_code(self):
a, b = self.src_range
return self.root().source[a:b]
@property
def src_contract(self):
#TODO: Investigate why we can't import at the beginning of file
from securify.solidity.v_0_5_x.solidity_grammar_core import ContractDefinition
if isinstance(self, ContractDefinition):
return self.name
contract = self.find_ancestor_of_type(ContractDefinition)
if contract:
return contract.name
return None
|
libs/configs_old/DOTA/r3det_gwd/cfgs_res101_dota_r3det_gwd_v1.py | Artcs1/RotationDetection | 850 | 11100581 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
"""
r3det+gwd (only refine stage) + sqrt tau=2 + data aug. + ms + res101 + 3x
FLOPs: 1486599773; Trainable params: 56709560
single scale
This is your result for task 1:
mAP: 0.7507929219725538
ap of each class:
plane:0.8959363254654953,
baseball-diamond:0.8117970905483454,
bridge:0.528941868308621,
ground-track-field:0.7036828276226571,
small-vehicle:0.7773608512825836,
large-vehicle:0.824193538909995,
ship:0.869907508286484,
tennis-court:0.8931429748331345,
basketball-court:0.8306405809724954,
storage-tank:0.8596700800149586,
soccer-ball-field:0.6406646930436465,
roundabout:0.6513733145761638,
harbor:0.6805467355307225,
swimming-pool:0.7095327160387952,
helicopter:0.5845027241542098
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
multi-scale
This is your result for task 1:
mAP: 0.7565987110400665
ap of each class:
plane:0.8964434495830199,
baseball-diamond:0.8170201102441278,
bridge:0.5252222485821921,
ground-track-field:0.729602202372266,
small-vehicle:0.7601882164495246,
large-vehicle:0.8260373681497573,
ship:0.8716819767827122,
tennis-court:0.895671606749032,
basketball-court:0.8124548266228695,
storage-tank:0.8608847379716957,
soccer-ball-field:0.6224300564022891,
roundabout:0.657426649146108,
harbor:0.6805129469115909,
swimming-pool:0.7495597236660946,
helicopter:0.6438445459677189
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w_ms
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
multi-scale + flip
This is your result for task 1:
mAP: 0.7579683916708654
ap of each class:
plane:0.8952961695089519,
baseball-diamond:0.8194477842585369,
bridge:0.5243575576743463,
ground-track-field:0.7159062303762178,
small-vehicle:0.7522987121676139,
large-vehicle:0.8282767251249042,
ship:0.8719194994284161,
tennis-court:0.8900495351735876,
basketball-court:0.8399873550181818,
storage-tank:0.8593060497981101,
soccer-ball-field:0.6213106308173056,
roundabout:0.6531238042666215,
harbor:0.7089754248166696,
swimming-pool:0.7442537008416809,
helicopter:0.6450166957918368
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w_ms_f
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
multi-scale + mss
This is your result for task 1:
mAP: 0.7622308686293031
ap of each class: plane:0.8956285239247809, baseball-diamond:0.8123129086084642, bridge:0.5338483720916204, ground-track-field:0.7937988239995809, small-vehicle:0.7511751772047529, large-vehicle:0.8213795039925708, ship:0.8685701145257962, tennis-court:0.888685607876141, basketball-court:0.8121412656739693, storage-tank:0.8628395323661605, soccer-ball-field:0.6536075407437665, roundabout:0.650555639430081, harbor:0.728762235286426, swimming-pool:0.7304094256459305, helicopter:0.6297483580695049
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w_ms_mss
Username: yangxue
Institute: DetectionTeamUCAS
Emailadress: <EMAIL>
TeamMembers: yangxue, yangjirui
------------------------------------------------------------------------
SWA6 + single scale
This is your result for task 1:
mAP: 0.7536616140168031
ap of each class:
plane:0.8952691912932887,
baseball-diamond:0.820224997940994,
bridge:0.5351902682975013,
ground-track-field:0.6990760862785812,
small-vehicle:0.7768718792058928,
large-vehicle:0.8311637947866269,
ship:0.8707906667992975,
tennis-court:0.9041214885985671,
basketball-court:0.844673715234245,
storage-tank:0.8622850403351549,
soccer-ball-field:0.652075771320803,
roundabout:0.6344222159586387,
harbor:0.6796953794936931,
swimming-pool:0.7077122802895897,
helicopter:0.5913514344191727
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w_s_swa6
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
SWA9 + single scale
mAP: 0.7541074969944405
ap of each class:
plane:0.8945929006004023,
baseball-diamond:0.8239698637790639,
bridge:0.5375054158208356,
ground-track-field:0.707263661407391,
small-vehicle:0.7784868305276835,
large-vehicle:0.8313093170826968,
ship:0.8716851894984969,
tennis-court:0.894339902291634,
basketball-court:0.8124139096196356,
storage-tank:0.8621962310027806,
soccer-ball-field:0.6560753654400574,
roundabout:0.6425826383892252,
harbor:0.6781700792445191,
swimming-pool:0.7540293197758513,
helicopter:0.5669918304363327
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w_s_swa9
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
SWA9 + multi-scale
This is your result for task 1:
mAP: 0.7611382693726101
ap of each class:
plane:0.8965990887138806,
baseball-diamond:0.8211375961066281,
bridge:0.5274322763152262,
ground-track-field:0.7164191574732031,
small-vehicle:0.7594817184692972,
large-vehicle:0.8309449959450008,
ship:0.8697248627752349,
ennis-court:0.8927766089227556,
basketball-court:0.8504259904102585,
storage-tank:0.8616896266258318,
soccer-ball-field:0.6551601152374349,
roundabout:0.6329228740725862,
harbor:0.7218286216219233,
swimming-pool:0.748803742710907,
helicopter:0.6317267651889841
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w_ms_swa9
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
SWA9 + multi-scale + mss
This is your result for task 1:
mAP: 0.7667344567887646
ap of each class: plane:0.8932663066411421, baseball-diamond:0.8086480184788661, bridge:0.5328287201353856, ground-track-field:0.782914642745032, small-vehicle:0.7539546562133567, large-vehicle:0.8268751115449491, ship:0.8708668707425938, tennis-court:0.8934725656178293, basketball-court:0.826406161849139, storage-tank:0.8640553822619391, soccer-ball-field:0.6984796440184639, roundabout:0.6471336736334592, harbor:0.7418605732124637, swimming-pool:0.7617818475817874, helicopter:0.5984726771550614
The submitted information is :
Description: RetinaNet_DOTA_R3Det_GWD_3x_20201223_137.7w_ms_swa9_mss
Username: liuqingiqng
Institute: Central South University
Emailadress: <EMAIL>
TeamMembers: liuqingqing
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_R3Det_GWD_3x_20201223'
NET_NAME = 'resnet101_v1d' # 'MobilenetV2'
# ---------------------------------------- System
ROOT_PATH = os.path.abspath('../../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 27000 * 3
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
# ------------------------------------------ Train and test
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
ADD_BOX_IN_TENSORBOARD = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 2.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 1e-3
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Dataset
DATASET_NAME = 'DOTA' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = [800, 640, 700, 900, 1000, 1100]
IMG_MAX_LENGTH = 1100
CLASS_NUM = 15
IMG_ROTATE = True
RGB2GRAY = True
VERTICAL_FLIP = True
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = True
# --------------------------------------------- Network
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
NUM_SUBNET_CONV = 4
NUM_REFINE_STAGE = 1
USE_RELU = False
FPN_CHANNEL = 256
FPN_MODE = 'fpn'
# --------------------------------------------- Anchor
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
ANGLE_RANGE = 90
# -------------------------------------------- Head
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
REFINE_IOU_POSITIVE_THRESHOLD = [0.6, 0.7]
REFINE_IOU_NEGATIVE_THRESHOLD = [0.5, 0.6]
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
# -------------------------------------------- GWD
GWD_TAU = 2.0
GWD_FUNC = tf.sqrt
|
examples/02.random_scene.py | blaine141/NVISII | 149 | 11100594 | <reponame>blaine141/NVISII
# 02.random_scene.py
#
# This shows how to generate a randomized scene using a couple built-in mesh
# types and some randomized materials.
import nvisii
from random import *
import colorsys
opt = lambda: None
opt.nb_objs = 10000
opt.spp = 16
opt.width = 1920
opt.height = 1080
opt.out = '02_random_scene.png'
# nvisii uses sets of components to represent a scene.
# We can increase the max component limit here if necessary.
# In this case, we'll need 16 meshes, a material for each object,
# and finally a transform for each object as well as one more for the camera.
nvisii.initialize(
headless = True,
verbose = True,
lazy_updates = True,
max_entities = opt.nb_objs + 1,
max_transforms = opt.nb_objs + 1,
max_materials = opt.nb_objs,
max_meshes = 16
# these are also available
# max_lights, max_textures, & max_cameras
)
# Turn on the denoiser
nvisii.enable_denoiser()
# Create a camera
camera = nvisii.entity.create(
name = "camera",
transform = nvisii.transform.create("camera"),
camera = nvisii.camera.create(
name = "camera",
aspect = float(opt.width)/float(opt.height)
)
)
camera.get_transform().look_at(at = (0,0,0), up = (1,0,0), eye = (0,0,5))
nvisii.set_camera_entity(camera)
# Lets create a random scene.
# First lets pre-load some mesh components.
nvisii.mesh.create_sphere('m_0')
nvisii.mesh.create_torus_knot('m_1')
nvisii.mesh.create_teapotahedron('m_2')
nvisii.mesh.create_box('m_3')
nvisii.mesh.create_capped_cone('m_4')
nvisii.mesh.create_capped_cylinder('m_5')
nvisii.mesh.create_capsule('m_6')
nvisii.mesh.create_cylinder('m_7')
nvisii.mesh.create_disk('m_8')
nvisii.mesh.create_dodecahedron('m_9')
nvisii.mesh.create_icosahedron('m_10')
nvisii.mesh.create_icosphere('m_11')
nvisii.mesh.create_rounded_box('m_12')
nvisii.mesh.create_spring('m_13')
nvisii.mesh.create_torus('m_14')
nvisii.mesh.create_tube('m_15')
def add_random_obj(name = "name"):
# this function adds a random object that uses one of the pre-loaded mesh
# components, assigning a random pose and random material to that object.
obj = nvisii.entity.create(
name = name,
transform = nvisii.transform.create(name),
material = nvisii.material.create(name)
)
mesh_id = randint(0,15)
# set the mesh. (Note that meshes can be shared, saving memory)
mesh = nvisii.mesh.get(f'm_{mesh_id}')
obj.set_mesh(mesh)
obj.get_transform().set_position((
uniform(-5,5),
uniform(-5,5),
uniform(-1,3)
))
obj.get_transform().set_rotation((
uniform(0,1), # X
uniform(0,1), # Y
uniform(0,1), # Z
uniform(0,1) # W
))
s = uniform(0.05,0.15)
obj.get_transform().set_scale((
s,s,s
))
rgb = colorsys.hsv_to_rgb(
uniform(0,1),
uniform(0.7,1),
uniform(0.7,1)
)
obj.get_material().set_base_color(rgb)
mat = obj.get_material()
# Some logic to generate "natural" random materials
material_type = randint(0,2)
# Glossy / Matte Plastic
if material_type == 0:
if randint(0,2): mat.set_roughness(uniform(.9, 1))
else : mat.set_roughness(uniform(.0,.1))
# Metallic
if material_type == 1:
mat.set_metallic(uniform(0.9,1))
if randint(0,2): mat.set_roughness(uniform(.9, 1))
else : mat.set_roughness(uniform(.0,.1))
# Glass
if material_type == 2:
mat.set_transmission(uniform(0.9,1))
# controls outside roughness
if randint(0,2): mat.set_roughness(uniform(.9, 1))
else : mat.set_roughness(uniform(.0,.1))
# controls inside roughness
if randint(0,2): mat.set_transmission_roughness(uniform(.9, 1))
else : mat.set_transmission_roughness(uniform(.0,.1))
mat.set_sheen(uniform(0,1)) # <- soft velvet like reflection near edges
mat.set_clearcoat(uniform(0,1)) # <- Extra, white, shiny layer. Good for car paint.
if randint(0,1): mat.set_anisotropic(uniform(0.9,1)) # elongates highlights
# (lots of other material parameters are listed in the docs)
# Now, use the above function to make a bunch of random objects
for i in range(opt.nb_objs):
add_random_obj(str(i))
print("\rcreating random object", i, end="")
print(" - done!")
nvisii.render_to_file(
width = opt.width,
height = opt.height,
samples_per_pixel = opt.spp,
file_path = opt.out
)
nvisii.deinitialize() |
pyGeno/tools/SingletonManager.py | ealong/pyGeno | 309 | 11100601 | #This thing is wonderful
objects = {}
def add(obj, objName='') :
if objName == '' :
key = obj.name
else :
key = objName
if key not in objects :
objects[key] = obj
return obj
def contains(k) :
return k in objects
def get(objName) :
try :
return objects[objName]
except :
return None
|
lenstronomy/LensModel/Profiles/epl_numba.py | smericks/lenstronomy | 107 | 11100602 | <gh_stars>100-1000
__author__ = 'ewoudwempe'
import numpy as np
import lenstronomy.Util.param_util as param_util
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
from lenstronomy.Util.numba_util import jit, nan_to_num
__all__ = ['EPL_numba']
class EPL_numba(LensProfileBase):
""""
Elliptical Power Law mass profile - computation accelerated with numba
.. math::
\\kappa(x, y) = \\frac{3-\\gamma}{2} \\left(\\frac{\\theta_{E}}{\\sqrt{q x^2 + y^2/q}} \\right)^{\\gamma-1}
with :math:`\\theta_{E}` is the (circularized) Einstein radius,
:math:`\\gamma` is the negative power-law slope of the 3D mass distributions,
:math:`q` is the minor/major axis ratio,
and :math:`x` and :math:`y` are defined in a coordinate system aligned with the major and minor axis of the lens.
In terms of eccentricities, this profile is defined as
.. math::
\\kappa(r) = \\frac{3-\\gamma}{2} \\left(\\frac{\\theta'_{E}}{r \\sqrt{1 − e*\\cos(2*\\phi)}} \\right)^{\\gamma-1}
with :math:`\\epsilon` is the ellipticity defined as
.. math::
\\epsilon = \\frac{1-q^2}{1+q^2}
And an Einstein radius :math:`\\theta'_{\\rm E}` related to the definition used is
.. math::
\\left(\\frac{\\theta'_{\\rm E}}{\\theta_{\\rm E}}\\right)^{2} = \\frac{2q}{1+q^2}.
The mathematical form of the calculation is presented by <NAME> (2015), https://arxiv.org/abs/1507.01819.
The current implementation is using hyperbolic functions. The paper presents an iterative calculation scheme,
converging in few iterations to high precision and accuracy.
A (slower) implementation of the same model using hyperbolic functions without the iterative calculation
is accessible as 'EPL' not requiring numba.
"""
param_names = ['theta_E', 'gamma', 'e1', 'e2', 'center_x', 'center_y']
lower_limit_default = {'theta_E': 0, 'gamma': 1.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100}
upper_limit_default = {'theta_E': 100, 'gamma': 2.5, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100}
def __init__(self):
super(EPL_numba).__init__()
@staticmethod
@jit()
def function(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.):
"""
:param x: x-coordinate (angle)
:param y: y-coordinate (angle)
:param theta_E: Einstein radius (angle), pay attention to specific definition!
:param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal
:param e1: eccentricity component
:param e2: eccentricity component
:param center_x: x-position of lens center
:param center_y: y-position of lens center
:return: lensing potential
"""
z, b, t, q, ang = param_transform(x, y, theta_E, gamma, e1, e2, center_x, center_y)
alph = alpha(z.real, z.imag, b, q, t)
return 1/(2-t)*(z.real*alph.real+z.imag*alph.imag)
@staticmethod
@jit()
def derivatives(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.):
"""
:param x: x-coordinate (angle)
:param y: y-coordinate (angle)
:param theta_E: Einstein radius (angle), pay attention to specific definition!
:param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal
:param e1: eccentricity component
:param e2: eccentricity component
:param center_x: x-position of lens center
:param center_y: y-position of lens center
:return: deflection angles alpha_x, alpha_y
"""
z, b, t, q, ang = param_transform(x, y, theta_E, gamma, e1, e2, center_x, center_y)
alph = alpha(z.real, z.imag, b, q, t) * np.exp(1j*ang)
return alph.real, alph.imag
@staticmethod
@jit()
def hessian(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.):
"""
:param x: x-coordinate (angle)
:param y: y-coordinate (angle)
:param theta_E: Einstein radius (angle), pay attention to specific definition!
:param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal
:param e1: eccentricity component
:param e2: eccentricity component
:param center_x: x-position of lens center
:param center_y: y-position of lens center
:return: Hessian components f_xx, f_yy, f_xy
"""
z, b, t, q, ang_ell = param_transform(x, y, theta_E, gamma, e1, e2, center_x, center_y)
ang = np.angle(z)
#r = np.abs(z)
zz_ell = z.real*q+1j*z.imag
R = np.abs(zz_ell)
phi = np.angle(zz_ell)
#u = np.minimum(nan_to_num((b/R)**t),1e100)
u = np.fmin((b/R)**t, 1e10) # I remove all factors of (b/R)**t to only have to remove nans once.
# The np.fmin is a regularisation near R=0, to avoid overflows
# in the magnification calculations
kappa = (2-t)/2
Roverr = np.sqrt(np.cos(ang)**2*q**2+np.sin(ang)**2)
Omega = omega(phi, t, q)
alph = (2*b)/(1+q)/b*Omega
gamma_shear = -np.exp(2j*(ang+ang_ell))*kappa + (1-t)*np.exp(1j*(ang+2*ang_ell)) * alph*Roverr
f_xx = (kappa + gamma_shear.real)*u
f_yy = (kappa - gamma_shear.real)*u
f_xy = gamma_shear.imag*u
# Fix the nans if x=y=0 is filled in
return f_xx, f_xy, f_xy, f_yy
@jit()
def param_transform(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.):
"""Converts the parameters from lenstronomy definitions (as defined in PEMD) to the definitions of Tessore+ (2015)"""
t = gamma-1
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
ang = phi_G
z = np.exp(-1j*phi_G) * (x_shift + y_shift*1j)
return z, theta_E*np.sqrt(q), t, q, ang
@jit()
def alpha(x, y, b, q, t, Omega=None):
"""
Calculates the complex deflection
:param x: x-coordinate (angle)
:param y: y-coordinate (angle)
:param b: Einstein radius (angle), pay attention to specific definition!
:param q: axis ratio
:param t: logarithmic power-law slope. Is t=gamma-1
:param Omega: If given, use this Omega (to avoid recalculations)
:return: complex deflection angle
"""
zz = x*q + 1j*y
R = np.abs(zz)
phi = np.angle(zz)
if Omega is None:
Omega = omega(phi, t, q)
# Omega = omega(phi, t, q)
alph = (2*b)/(1+q)*nan_to_num((b/R)**t*R/b)*Omega
return alph
@jit(fastmath=True) # Because of the reduction nature of this, relaxing commutativity actually matters a lot (4x speedup).
def omega(phi, t, q, niter_max=200, tol=1e-16):
f = (1-q)/(1+q)
omegas = np.zeros_like(phi, dtype=np.complex128)
niter = min(niter_max, int(np.log(tol)/np.log(f))+2) # The absolute value of each summand is always less than f, hence this limit for the number of iterations.
Omega = 1*np.exp(1j*phi)
fact = -f*np.exp(2j*phi)
for n in range(1, niter):
omegas += Omega
Omega *= (2*n-(2-t))/(2*n+(2-t)) * fact
omegas += Omega
return omegas
|
test/core/tests/lineage.py | oliverholworthy/metaflow | 5,821 | 11100632 | from metaflow_test import MetaflowTest, ExpectationFailed, steps
class LineageTest(MetaflowTest):
PRIORITY = 1
@steps(0, ['start'])
def step_start(self):
self.lineage = (self._current_step,)
@steps(1, ['join'])
def step_join(self):
# we can't easily account for the number of foreach splits,
# so we only care about unique lineages (hence set())
self.lineage = (tuple(sorted({x.lineage for x in inputs})),
self._current_step)
@steps(2, ['all'])
def step_all(self):
self.lineage += (self._current_step,)
def check_results(self, flow, checker):
from collections import defaultdict
join_sets = defaultdict(set)
lineages = {}
graph = flow._graph
# traverse all paths from the start step to the end,
# collect lineages on the way and finally compare them
# to the lineages produced by the actual run
def traverse(step, lineage):
if graph[step].type == 'join':
join_sets[step].add(tuple(lineage))
if len(join_sets[step]) < len(graph[step].in_funcs):
return
else:
lineage = (tuple(sorted(join_sets[step])),)
lineages[step] = lineage + (step,)
for n in graph[step].out_funcs:
traverse(n, lineage + (step,))
traverse('start', ())
for step in flow:
checker.assert_artifact(step.name, 'lineage', lineages[step.name])
|
12-productionize-deploy-flask/deploy-models/review_classifier.py | jugalh/data-x-plaksha | 117 | 11100655 | <filename>12-productionize-deploy-flask/deploy-models/review_classifier.py<gh_stars>100-1000
import cloudpickle as pickle
def classifier(test_data):
"""
:param test_data: data that needs prediction list or single parameter
:return: predicted class rotten or fresh
"""
# Load the pickle data
model = pickle.load(open('movie_model.pkl','rb'))
vectorizer = pickle.load(open('vectorizer.pkl','rb'))
# Check for the type
if type(test_data) != list:
test_data = list(test_data)
# Transform the test data
transformed = vectorizer.transform(test_data).toarray()
# Predict the class
predicted = model.predict(transformed).tolist()
return predicted
|
exercises/fr/test_03_15.py | Jette16/spacy-course | 2,085 | 11100671 | def test():
assert Doc.has_extension(
"author"
), "As-tu défini l'extension author du Doc ?"
ext = Doc.get_extension("author")
assert all(
v is None for v in ext
), "As-tu affecté la valeur par défaut à l'extension author ?"
assert Doc.has_extension("book"), "As-tu défini l'extension book du Doc ?"
ext = Doc.get_extension("book")
assert all(
v is None for v in ext
), "As-tu affecté la valeur par défaut à l'extension book ?"
assert (
"nlp.pipe(DATA, as_tuples=True)" in __solution__
), "As-tu utilisé nlp.pipe avec as_tuples=True?"
assert (
'doc._.book = context["book"]' in __solution__
), "As-tu actualisé l'extension doc._.book avec la valeur de contexte de 'book' ?"
assert (
'doc._.author = context["author"]' in __solution__
), "As-tu actualisé l'extension doc._.author avec la valeur de contexte de 'author' ?"
__msg__.good(
"Bien joué ! Cette même technique est utile pour de nombreuses taches. "
"Par exemple, tu pourrais passer des numéros de page ou de paragraphe "
"pour lier le Doc traité à sa position dans un plus grand document. Ou "
"tu pourrais passer d'autres données structurées comme des ID faisant "
"référence à une base de connaissances."
)
|
example_image_resize.py | viddik13/katna | 125 | 11100692 | <gh_stars>100-1000
import os.path
import cv2
from Katna.image import Image
def main():
# Extract specific number of key frames from video
img_module = Image()
# folder to save extracted images
output_folder_cropped_image = "resizedimages"
if not os.path.isdir(os.path.join(".", output_folder_cropped_image)):
os.mkdir(os.path.join(".", output_folder_cropped_image))
# crop dimentions
resize_width = 500
resize_height = 600
# Image file path
image_file_path = os.path.join(".", "tests", "data", "bird_img_for_crop.jpg")
print(f"image_file_path = {image_file_path}")
resized_image = img_module.resize_image(
file_path=image_file_path,
target_width=resize_width,
target_height=resize_height,
down_sample_factor=8,
)
# cv2.imshow("resizedImage", resized_image)
# cv2.waitKey(0)
img_module.save_image_to_disk(
resized_image,
file_path=output_folder_cropped_image,
file_name="resized_image",
file_ext=".jpeg",
)
main()
|
cactus/utils/ipc.py | danielchasehooper/Cactus | 1,048 | 11100718 | <gh_stars>1000+
import os
import logging
def signal(signal, data=None):
if data is None:
data = {}
if not os.environ.get('DESKTOPAPP'):
return
data["signal"] = signal
logging.warning("", data)
|
neural_sp/trainers/optimizer.py | ishine/neural_sp | 577 | 11100731 | <gh_stars>100-1000
# Copyright 2019 Kyoto University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Select optimizer."""
import logging
import torch
logger = logging.getLogger(__name__)
def set_optimizer(model, optimizer, lr, weight_decay=0.):
"""Set optimizer.
Args:
model (): model class
optimizer (str): name of optimizer
lr (float): learning rate
weight_decay (float): L2 penalty for weight decay
Returns:
opt (torch.optim): optimizer
"""
parameters = [p for p in model.parameters() if p.requires_grad]
logger.info("===== Freezed parameters =====")
for n in [n for n, p in model.named_parameters() if not p.requires_grad]:
logger.info("%s" % n)
if optimizer == 'sgd':
opt = torch.optim.SGD(parameters,
lr=lr,
weight_decay=weight_decay,
nesterov=False)
elif optimizer == 'momentum':
opt = torch.optim.SGD(parameters,
lr=lr,
momentum=0.9,
weight_decay=weight_decay,
nesterov=False)
elif optimizer == 'nesterov':
opt = torch.optim.SGD(parameters,
lr=lr,
# momentum=0.9,
momentum=0.99,
weight_decay=weight_decay,
nesterov=True)
elif optimizer == 'adadelta':
opt = torch.optim.Adadelta(parameters,
rho=0.9, # pytorch default
# rho=0.95, # chainer default
# eps=1e-8, # pytorch default
# eps=1e-6, # chainer default
eps=lr,
weight_decay=weight_decay)
elif optimizer == 'adam':
opt = torch.optim.Adam(parameters,
lr=lr,
weight_decay=weight_decay)
elif optimizer == 'noam':
opt = torch.optim.Adam(parameters,
lr=0,
betas=(0.9, 0.98),
eps=1e-09,
weight_decay=weight_decay)
elif optimizer == 'adagrad':
opt = torch.optim.Adagrad(parameters,
lr=lr,
weight_decay=weight_decay)
elif optimizer == 'rmsprop':
opt = torch.optim.RMSprop(parameters,
lr=lr,
weight_decay=weight_decay)
else:
raise NotImplementedError(optimizer)
return opt
|
datasets/igbo_english_machine_translation/igbo_english_machine_translation.py | dkajtoch/datasets | 10,608 | 11100737 | <reponame>dkajtoch/datasets<filename>datasets/igbo_english_machine_translation/igbo_english_machine_translation.py
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import datasets
_DESCRIPTION = """\
Parallel Igbo-English Dataset
"""
_HOMEPAGE_URL = "https://github.com/IgnatiusEzeani/IGBONLP/tree/master/ig_en_mt"
_CITATION = """\
@misc{ezeani2020igboenglish,
title={Igbo-English Machine Translation: An Evaluation Benchmark},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2020},
eprint={2004.00648},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2004.00648}
}
"""
_VERSION = "1.0.0"
_TRAIN_EN = "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_en_mt/benchmark_dataset/train.en"
_VALID_EN = "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_en_mt/benchmark_dataset/val.en"
_TEST_EN = "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_en_mt/benchmark_dataset/test.en"
_TRAIN_IG = "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_en_mt/benchmark_dataset/train.ig"
_VALID_IG = "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_en_mt/benchmark_dataset/val.ig"
_TEST_IG = "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_en_mt/benchmark_dataset/test.ig"
_LANGUAGE_PAIRS = [
("ig", "en"),
]
class IgboEnglishMachineTranslationConfig(datasets.BuilderConfig):
def __init__(self, *args, lang1=None, lang2=None, **kwargs):
super().__init__(
*args,
name=f"{lang1}-{lang2}",
**kwargs,
)
self.lang1 = lang1
self.lang2 = lang2
class IgboEnglishMachineTranslation(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
IgboEnglishMachineTranslationConfig(
lang1=lang1,
lang2=lang2,
description=f"Translating {lang1} to {lang2} or vice versa",
version=datasets.Version(_VERSION),
)
for lang1, lang2 in _LANGUAGE_PAIRS
]
BUILDER_CONFIG_CLASS = IgboEnglishMachineTranslationConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
},
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_en = dl_manager.download_and_extract(_TRAIN_EN)
train_ig = dl_manager.download_and_extract(_TRAIN_IG)
valid_en = dl_manager.download_and_extract(_VALID_EN)
valid_ig = dl_manager.download_and_extract(_VALID_IG)
test_en = dl_manager.download_and_extract(_TEST_EN)
test_ig = dl_manager.download_and_extract(_TEST_IG)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"ig_datapath": train_ig, "en_datapath": train_en},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"ig_datapath": valid_ig, "en_datapath": valid_en},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"ig_datapath": test_ig, "en_datapath": test_en},
),
]
def _generate_examples(self, ig_datapath, en_datapath):
with open(ig_datapath, encoding="utf-8") as f1, open(en_datapath, encoding="utf-8") as f2:
for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
x = x.strip()
y = y.strip()
result = (
sentence_counter,
{
"id": str(sentence_counter),
"translation": {"ig": x, "en": y},
},
)
yield result
|
tests/links_tests/scaler_tests/test_standard_scaler.py | pfnet/chainerchem | 184 | 11100744 | <reponame>pfnet/chainerchem<gh_stars>100-1000
import os
import chainer
import numpy
import pytest
from chainer import serializers, Variable, cuda # NOQA
from chainer_chemistry.links.scaler.standard_scaler import StandardScaler
@pytest.fixture
def data():
x = numpy.array(
[[0.1, 10., 0.3],
[0.2, 20., 0.1],
[0.3, 30., 0.],
[0.4, 40., 0.]],
dtype=numpy.float32)
expect_x_scaled = numpy.array(
[[-1.3416407, -1.3416408, 1.6329931],
[-0.44721353, -0.4472136, 0.],
[0.44721368, 0.4472136, -0.8164965],
[1.3416407, 1.3416408, -0.8164965]],
dtype=numpy.float32)
return x, expect_x_scaled
@pytest.mark.parametrize('indices', [None, [0], [1, 2]])
def test_standard_scaler_transform(data, indices):
x, expect_x_scaled = data
scaler = StandardScaler()
scaler.fit(x, indices=indices)
x_scaled = scaler.transform(x)
if indices is None:
indices = numpy.arange(x.shape[1])
for index in range(x.shape[1]):
if index in indices:
assert numpy.allclose(x_scaled[:, index],
expect_x_scaled[:, index])
else:
assert numpy.allclose(x_scaled[:, index], x[:, index])
def test_standard_scaler_transform_variable(data):
x, expect_x_scaled = data
xvar = Variable(x)
scaler = StandardScaler()
scaler.fit(xvar)
x_scaled = scaler.transform(xvar)
assert isinstance(x_scaled, Variable)
assert numpy.allclose(x_scaled.array, expect_x_scaled)
@pytest.mark.gpu
def test_standard_scaler_transform_gpu(data):
x, expect_x_scaled = data
scaler = StandardScaler()
scaler.to_gpu()
x = cuda.to_gpu(x)
scaler.fit(x)
x_scaled = scaler.transform(x)
assert isinstance(x_scaled, cuda.cupy.ndarray)
assert numpy.allclose(cuda.to_cpu(x_scaled), expect_x_scaled)
@pytest.mark.parametrize('indices', [None, [0], [1, 2]])
def test_standard_scaler_inverse_transform(data, indices):
x, expect_x_scaled = data
scaler = StandardScaler()
scaler.fit(x, indices=indices)
x_inverse = scaler.inverse_transform(expect_x_scaled)
if indices is None:
indices = numpy.arange(x.shape[1])
for index in range(x.shape[1]):
if index in indices:
assert numpy.allclose(x_inverse[:, index], x[:, index])
else:
assert numpy.allclose(x_inverse[:, index],
expect_x_scaled[:, index])
@pytest.mark.parametrize('axis', [1, 2])
def test_standard_scaler_3darray(data, axis):
x, expect_x_scaled = data
s0, s1 = x.shape
if axis == 1:
# feature axis is 1, insert other axis to 2nd axis
x = numpy.broadcast_to(x[:, :, None], (s0, s1, 2))
expect_x_scaled = numpy.broadcast_to(
expect_x_scaled[:, :, None], (s0, s1, 2))
elif axis == 2:
# feature axis is 2, insert other axis to 1st axis
x = numpy.broadcast_to(x[:, None, :], (s0, 3, s1))
expect_x_scaled = numpy.broadcast_to(
expect_x_scaled[:, None, :], (s0, 3, s1))
assert x.ndim == 3
indices = None
scaler = StandardScaler()
scaler.fit(x, indices=indices, axis=axis)
x_scaled = scaler.transform(x, axis=axis)
assert x_scaled.shape == expect_x_scaled.shape
assert numpy.allclose(x_scaled, expect_x_scaled, atol=1e-7)
x_inverse = scaler.inverse_transform(expect_x_scaled, axis=axis)
for index in numpy.arange(x.shape[1]):
assert numpy.allclose(x_inverse[:, index], x[:, index], atol=1e-7)
def test_standard_scaler_fit_transform(data):
x, expect_x_scaled = data
scaler = StandardScaler()
x_scaled = scaler.fit_transform(x)
assert numpy.allclose(x_scaled, expect_x_scaled)
# TODO(nakago): fix Chainer serializer.
# Behavior changed from numpy versioin 1.16.3.
# allow_pickle=True must be passed to numpy.load function,
# in order to load `None`.
# For now, skip test for serialize `None`.
# @pytest.mark.parametrize('indices', [None, [0]])
@pytest.mark.parametrize('indices', [[0]])
def test_standard_scaler_serialize(tmpdir, data, indices):
x, expect_x_scaled = data
scaler = StandardScaler()
scaler.fit(x, indices=indices)
scaler_filepath = os.path.join(str(tmpdir), 'scaler.npz')
serializers.save_npz(scaler_filepath, scaler)
scaler2 = StandardScaler()
serializers.load_npz(scaler_filepath, scaler2)
# print('scaler2 attribs:', scaler2.mean, scaler2.std, scaler2.indices)
assert numpy.allclose(scaler.mean, scaler2.mean)
assert numpy.allclose(scaler.std, scaler2.std)
assert scaler.indices == scaler2.indices
def test_standard_scaler_assert_raises():
x = numpy.array([[0.1, 0.2, 0.3], [0.5, 0.3, 0.1]],
dtype=numpy.float32)
scaler = StandardScaler()
# call transform before fit raises error
with pytest.raises(AttributeError):
scaler.transform(x)
with pytest.raises(AttributeError):
scaler.inverse_transform(x)
def test_standard_scaler_transform_zero_std():
x = numpy.array([[1, 2], [1, 2], [1, 2]], dtype=numpy.float32)
expect_x_scaled = numpy.array([[0, 0], [0, 0], [0, 0]],
dtype=numpy.float32)
scaler = StandardScaler()
scaler.fit(x)
x_scaled = scaler.transform(x)
assert numpy.allclose(x_scaled, expect_x_scaled)
def test_standard_scaler_forward(data):
# test `forward` and `__call__` method.
indices = [0]
x, expect_x_scaled = data
scaler = StandardScaler()
scaler.fit(x, indices=indices)
x_scaled_transform = scaler.transform(x)
x_scaled_forward = scaler.forward(x)
assert numpy.allclose(x_scaled_transform, x_scaled_forward)
if int(chainer.__version__.split('.')[0]) >= 5:
# `__call__` invokes `forward` method from version 5.
# Skip test for chainer v4.
x_scaled_call = scaler(x)
assert numpy.allclose(x_scaled_transform, x_scaled_call)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
|
web/cook_resp.py | wnbh/iCopy | 232 | 11100786 | from utils import load
import pymongo
cfg = load.cfg
# ### Mongodb
myclient = pymongo.MongoClient(
f"{cfg['database']['db_connect_method']}://{load.user}:{load.passwd}@{cfg['database']['db_addr']}",
port=cfg["database"]["db_port"],
connect=False,
)
mydb = myclient[cfg["database"]["db_name"]]
fav_col = mydb["fav_col"]
task_list = mydb["task_list"]
db_counters = mydb["counters"]
def get_drive_list():
drivelist = {}
all_drive = load.all_drive
drivelist['data'] = all_drive
drivelist['code'] = 20000
drivelist['message'] = ""
return drivelist
def cook_fav_info():
favlist = {}
fav_info_array = []
fav_info = fav_col.find({"fav_type":"fav"},{"_id": 0})
for each in fav_info:
if 'fav_size' and 'fav_object' in each:
each['show_size'] = str(each['fav_size']) + " " +each['fav_size_tail']
each['percent'] = float(each['fav_object'] / 4000 )
each['show_percent'] = str(each['percent']) + "%"
else:
each['fav_size'] = "UNKNOW"
each['fav_object'] = "UNKNOW"
each['show_size'] = "UNKNOW"
each['show_percent'] = "UNKNOW"
fav_info_array.append(each)
favlist['data'] = fav_info_array
favlist['code'] = 20000
favlist['message'] = ""
return favlist
def cook_task_info():
tasklist = {}
task_info_array = []
task_info = task_list.find({"status":1,"error":0})
for each in task_info:
each['show_status'] = "Completed"
if "task_total_prog_size_tail" in each:
each['show_size'] = str(each['task_total_prog_size']) + " " + each['task_total_prog_size_tail']
else:
each['show_size'] = "UNKNOW"
task_info_array.append(each)
tasklist['code'] = 20000
tasklist['data'] = task_info_array
tasklist['message'] = ""
return tasklist
|
cronyo/deploy.py | 0xflotus/cronyo | 335 | 11100796 | <filename>cronyo/deploy.py
from botocore.client import ClientError
import os
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED
try:
from cronyo import logger
from cronyo.config import config
from cronyo.aws_api import iam, aws_lambda, events, region, check_aws_credentials
except ImportError:
import logger
from config import config
from aws_api import iam, aws_lambda, events, region, check_aws_credentials
logger = logger.setup()
LIVE = 'live'
REVISIONS = 5
POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "*"
}
]
}"""
ASSUMED_ROLE_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}
]
}"""
WIRING = [
{
"lambda": {
"FunctionName": "cronyo-http_post",
"Handler": "cronyo.http_post",
"MemorySize": 128,
"Timeout": 30
}
},
{
"lambda": {
"FunctionName": "cronyo-http_get",
"Handler": "cronyo.http_get",
"MemorySize": 128,
"Timeout": 30
}
}
]
def prepare_zip():
from pkg_resources import resource_filename as resource
from yaml import dump
logger.info('creating/updating cronyo.zip')
with ZipFile('cronyo.zip', 'w', ZIP_DEFLATED) as zipf:
info = ZipInfo('config.yml')
info.external_attr = 0o664 << 16
zipf.writestr(info, dump(config))
zipf.write(resource('cronyo', 'config.py'), 'config.py')
zipf.write(resource('cronyo', 'cronyo.py'), 'cronyo.py')
zipf.write(resource('cronyo', 'logger.py'), 'logger.py')
for root, dirs, files in os.walk(resource('cronyo', 'vendor')):
for file in files:
real_file = os.path.join(root, file)
relative_file = os.path.relpath(real_file,
resource('cronyo', ''))
zipf.write(real_file, relative_file)
def role():
new_role = False
try:
logger.info('finding role')
iam('get_role', RoleName='cronyo')
except ClientError:
logger.info('role not found. creating')
iam('create_role', RoleName='cronyo',
AssumeRolePolicyDocument=ASSUMED_ROLE_POLICY)
new_role = True
role_arn = iam('get_role', RoleName='cronyo', query='Role.Arn')
logger.debug('role_arn={}'.format(role_arn))
logger.info('updating role policy')
iam('put_role_policy', RoleName='cronyo', PolicyName='cronyo',
PolicyDocument=POLICY)
if new_role:
from time import sleep
logger.info('waiting for role policy propagation')
sleep(5)
return role_arn
def _cleanup_old_versions(name):
logger.info('cleaning up old versions of {0}. Keeping {1}'.format(
name, REVISIONS))
versions = _versions(name)
for version in versions[0:(len(versions) - REVISIONS)]:
logger.debug('deleting {} version {}'.format(name, version))
aws_lambda('delete_function',
FunctionName=name,
Qualifier=version)
def _function_alias(name, version, alias=LIVE):
try:
logger.info('creating function alias {0} for {1}:{2}'.format(
alias, name, version))
arn = aws_lambda('create_alias',
FunctionName=name,
FunctionVersion=version,
Name=alias,
query='AliasArn')
except ClientError:
logger.info('alias {0} exists. updating {0} -> {1}:{2}'.format(
alias, name, version))
arn = aws_lambda('update_alias',
FunctionName=name,
FunctionVersion=version,
Name=alias,
query='AliasArn')
return arn
def _versions(name):
versions = aws_lambda('list_versions_by_function',
FunctionName=name,
query='Versions[].Version')
return versions[1:]
def _get_version(name, alias=LIVE):
return aws_lambda('get_alias',
FunctionName=name,
Name=alias,
query='FunctionVersion')
def rollback_lambda(name, alias=LIVE):
all_versions = _versions(name)
live_version = _get_version(name, alias)
try:
live_index = all_versions.index(live_version)
if live_index < 1:
raise RuntimeError('Cannot find previous version')
prev_version = all_versions[live_index - 1]
logger.info('rolling back to version {}'.format(prev_version))
_function_alias(name, prev_version)
except RuntimeError as error:
logger.error('Unable to rollback. {}'.format(repr(error)))
def rollback(alias=LIVE):
for lambda_function in ('cronyo-track'):
rollback_lambda(lambda_function, alias)
def create_update_lambda(role_arn, wiring):
name, handler, memory, timeout = (wiring[k] for k in ('FunctionName',
'Handler',
'MemorySize',
'Timeout'))
try:
logger.info('finding lambda function')
function_arn = aws_lambda('get_function',
FunctionName=name,
query='Configuration.FunctionArn')
except ClientError:
function_arn = None
if not function_arn:
logger.info('creating new lambda function {}'.format(name))
with open('cronyo.zip', 'rb') as zf:
function_arn, version = aws_lambda('create_function',
FunctionName=name,
Runtime='python3.8',
Role=role_arn,
Handler=handler,
MemorySize=memory,
Timeout=timeout,
Publish=True,
Code={'ZipFile': zf.read()},
query='[FunctionArn, Version]')
else:
logger.info('updating lambda function {}'.format(name))
aws_lambda('update_function_configuration',
FunctionName=name,
Runtime='python3.8',
Role=role_arn,
Handler=handler,
MemorySize=memory,
Timeout=timeout)
with open('cronyo.zip', 'rb') as zf:
function_arn, version = aws_lambda('update_function_code',
FunctionName=name,
Publish=True,
ZipFile=zf.read(),
query='[FunctionArn, Version]')
function_arn = _function_alias(name, version)
_cleanup_old_versions(name)
logger.debug('function_arn={} ; version={}'.format(function_arn, version))
return function_arn
def preflight_checks():
logger.info('checking aws credentials and region')
if region() is None:
logger.error('Region is not set up. please run aws configure')
return False
try:
check_aws_credentials()
except AttributeError:
logger.error('AWS credentials not found. please run aws configure')
return False
return True
def run():
prepare_zip()
role_arn = role()
for component in WIRING + config.get("extra_wiring", []):
function_arn = create_update_lambda(role_arn, component['lambda'])
if __name__ == '__main__':
try:
preflight_checks()
run()
except Exception:
logger.error('preflight checks failed')
|
src/proxy_spider/spiders/xicidaili.py | HaoJiangGuo/fp-server | 173 | 11100804 | <reponame>HaoJiangGuo/fp-server
# -*- coding: utf-8 -*-
from scrapy import Request
from proxy_spider.spiders import _BaseSpider
from utils.collections import shuffled_range
class XicidailiSpider(_BaseSpider):
name = 'xicidaili'
allowed_domains = ['www.xicidaili.com']
def start_requests(self):
for _type in ('nn', 'nt'):
for _page in range(1, 100):
if self.complete_condition():
break
url = 'http://www.xicidaili.com/%s/%s' % (_type, _page)
yield Request(url, dont_filter=True)
def parse(self, response):
for tr in response.xpath('//table[@id="ip_list"]//tr[@class]'):
ex = tr.xpath('./td/text()').extract()
ip = ex[0]
port = ex[1]
scheme = ex[5].lower()
if ip and port and scheme in ('http', 'https'):
yield self.build_check_recipient(ip, port, scheme)
|
alembic/versions/4cd9c1a13771_add_name_origin_sources_uses.py | lmmentel/mendeleev | 105 | 11100846 | """add name origin sources uses
Revision ID: 4cd9c1a13771
Revises: 5a05464c07ae
Create Date: 2017-09-07 11:18:00.919996
"""
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "5a05464c07ae"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column("elements", sa.Column("name_origin", sa.String))
op.add_column("elements", sa.Column("sources", sa.String))
op.add_column("elements", sa.Column("uses", sa.String))
def downgrade():
with op.batch_alter_table("elements") as batch_op:
batch_op.drop_column("name_origin")
batch_op.drop_column("sources")
batch_op.drop_column("uses")
|
configs/qdtrack-frcnn_r50_fpn_12e_bdd100k_evalseg.py | SysCV/pcan | 271 | 11100860 | _base_ = './qdtrack-frcnn_r50_fpn_12e_bdd100k.py'
# dataset settings
dataset_type = 'BDDVideoDataset'
data_root = 'data/bdd/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadMultiImagesFromFile'),
dict(type='SeqLoadAnnotations', with_bbox=True, with_ins_id=True),
dict(type='SeqResize', img_scale=(1296, 720), keep_ratio=True),
dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5),
dict(type='SeqNormalize', **img_norm_cfg),
dict(type='SeqPad', size_divisor=32),
dict(type='SeqDefaultFormatBundle'),
dict(
type='SeqCollect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_match_indices'],
ref_prefix='ref'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1296, 720),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=[
dict(
type=dataset_type,
ann_file=data_root + 'labels/seg_track_20/seg_track_train_cocoformat_new.json',
img_prefix=data_root + 'images/seg_track_20/train',
key_img_sampler=dict(interval=1),
ref_img_sampler=dict(num_ref_imgs=1, scope=3, method='uniform'),
pipeline=train_pipeline),
dict(
type=dataset_type,
load_as_video=False,
ann_file=data_root + 'labels/ins_seg/polygons/ins_seg_train_cocoformat.json',
img_prefix=data_root + 'images/10k/train',
pipeline=train_pipeline)
],
val=dict(
type=dataset_type,
ann_file=data_root + 'labels/seg_track_20/seg_track_val_cocoformat.json',
img_prefix=data_root + 'images/seg_track_20/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'labels/seg_track_20/seg_track_val_cocoformat.json',
img_prefix=data_root + 'images/seg_track_20/val',
pipeline=test_pipeline)) |
python/tests/table_tests/utils.py | azriel1rf/PokerHandEvaluator | 161 | 11100879 | <gh_stars>100-1000
import unittest
from itertools import combinations, combinations_with_replacement, permutations
from phevaluator.hash import hash_quinary
from phevaluator.tables import NO_FLUSH_5
class BaseTestNoFlushTable(unittest.TestCase):
TABLE: list[int] = NotImplemented
VISIT: list[int] = NotImplemented
NUM_CARDS: int = NotImplemented
@classmethod
def setUpClass(cls):
cls.CACHE = []
cls.USED = [0] * 13
cls.QUINARIES = []
cls.CACHE_ADDITIONAL = []
cls.USED_ADDITIONAL = [0] * 13
cls.QUINARIES_ADDITIONAL = []
# Straight Flushes are not in this table
cls.mark_four_of_a_kind()
cls.mark_full_house()
# Flushes are not in this table
cls.mark_straight()
cls.mark_three_of_a_kind()
cls.mark_two_pair()
cls.mark_one_pair()
cls.mark_high_card()
@staticmethod
def quinary_permutations(n):
return permutations(range(13)[::-1], n)
@staticmethod
def quinary_combinations(n):
return combinations(range(13)[::-1], n)
@staticmethod
def quinary_combinations_with_replacement(n):
return combinations_with_replacement(range(13)[::-1], n)
@classmethod
def gen_quinary(cls, ks, cur, additional):
if cur == len(ks):
cls.get_additional(additional)
cls.QUINARIES.append((cls.CACHE[:], cls.QUINARIES_ADDITIONAL[:]))
cls.QUINARIES_ADDITIONAL = []
else:
for i in range(12, -1, -1):
if cls.USED[i] > 0:
continue
cls.CACHE.append(i)
cls.USED[i] = ks[cur]
cls.gen_quinary(ks, cur + 1, additional)
cls.CACHE.pop(-1)
cls.USED[i] = 0
@classmethod
def get_additional(cls, n):
if n == 0:
cls.QUINARIES_ADDITIONAL.append(cls.CACHE_ADDITIONAL[:])
else:
for i in range(12, -1, -1):
if cls.USED[i] + cls.USED_ADDITIONAL[i] >= 4:
continue
cls.CACHE_ADDITIONAL.append(i)
cls.USED_ADDITIONAL[i] += 1
cls.get_additional(n - 1)
cls.CACHE_ADDITIONAL.pop(-1)
cls.USED_ADDITIONAL[i] -= 1
@classmethod
def mark_template(cls, ks):
cls.gen_quinary(ks, 0, cls.NUM_CARDS - 5)
for base, additionals in cls.QUINARIES:
hand = [0] * 13
for i, k in enumerate(ks):
hand[base[i]] = k
base_rank = NO_FLUSH_5[hash_quinary(hand, 5)]
for additional in additionals:
for i in additional:
hand[i] += 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
for i in additional:
hand[i] -= 1
if cls.VISIT[hash_] > 0:
continue
cls.TABLE[hash_] = base_rank
cls.VISIT[hash_] = 1
cls.QUINARIES = []
@classmethod
def mark_four_of_a_kind(cls):
cls.mark_template((4, 1))
@classmethod
def mark_full_house(cls):
cls.mark_template((3, 2))
@classmethod
def mark_three_of_a_kind(cls):
cls.mark_template((3, 1, 1))
@classmethod
def mark_two_pair(cls):
cls.mark_template((2, 2, 1))
@classmethod
def mark_one_pair(cls):
for paired_card in range(13)[::-1]:
for other_cards in cls.quinary_combinations(cls.NUM_CARDS - 2):
if paired_card in other_cards:
continue
hand = [0] * 13
hand[paired_card] = 2
for i in range(3):
hand[other_cards[i]] = 1
base_hash = hash_quinary(hand, 5)
base_rank = NO_FLUSH_5[base_hash]
for i in range(3, cls.NUM_CARDS - 2):
hand[other_cards[i]] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
if cls.VISIT[hash_] == 0:
cls.VISIT[hash_] = 1
cls.TABLE[hash_] = base_rank
@classmethod
def mark_high_card(cls):
for base in cls.quinary_combinations(cls.NUM_CARDS):
hand = [0] * 13
for i in range(5):
hand[base[i]] = 1
base_hash = hash_quinary(hand, 5)
base_rank = NO_FLUSH_5[base_hash]
for i in range(5, cls.NUM_CARDS):
hand[base[i]] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
if cls.VISIT[hash_] == 0:
cls.VISIT[hash_] = 1
cls.TABLE[hash_] = base_rank
@classmethod
def mark_straight(cls):
hands = []
for lowest in range(9)[::-1]: # From 10 to 2
hand = [0] * 13
for i in range(lowest, lowest + 5):
hand[i] = 1
hands.append(hand)
# Five High Straight Flush
base = [12, 3, 2, 1, 0]
hand = [0] * 13
for i in base:
hand[i] = 1
hands.append(hand)
for hand in hands:
base_rank = NO_FLUSH_5[hash_quinary(hand, 5)]
for additional in cls.quinary_combinations_with_replacement(
cls.NUM_CARDS - 5
):
for i in additional:
hand[i] += 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
if cls.VISIT[hash_] == 0:
cls.TABLE[hash_] = base_rank
cls.VISIT[hash_] = 1
for i in additional:
hand[i] -= 1
|
problem.py | wangxiaoyunanne/GraphDefense | 140 | 11100891 | <filename>problem.py
#!/usr/bin/env python
"""
problem.py
"""
from __future__ import division
from __future__ import print_function
import os
import sys
import h5py
import cPickle
import numpy as np
from scipy import sparse
from sklearn import metrics
from scipy.sparse import csr_matrix
import torch
from torch.autograd import Variable
from torch.nn import functional as F
# --
# Helper classes
class ProblemLosses:
@staticmethod
def multilabel_classification(preds, targets):
return F.multilabel_soft_margin_loss(preds, targets)
@staticmethod
def classification(preds, targets):
return F.cross_entropy(preds, targets)
@staticmethod
def regression_mae(preds, targets):
return F.l1_loss(preds, targets)
# @staticmethod
# def regression_mse(preds, targets):
# return F.mse_loss(preds - targets)
class ProblemMetrics:
@staticmethod
def multilabel_classification(y_true, y_pred):
y_pred = (y_pred > 0).astype(int)
return {
"micro" : float(metrics.f1_score(y_true, y_pred, average="micro")),
"macro" : float(metrics.f1_score(y_true, y_pred, average="macro")),
}
@staticmethod
def classification(y_true, y_pred):
y_pred = np.argmax(y_pred, axis=1)
return {
"micro" : float(metrics.f1_score(y_true, y_pred, average="micro")),
"macro" : float(metrics.f1_score(y_true, y_pred, average="macro")),
}
# return (y_pred == y_true.squeeze()).mean()
@staticmethod
def regression_mae(y_true, y_pred):
return float(np.abs(y_true - y_pred).mean())
# --
# Problem definition
def parse_csr_matrix(x):
v, r, c = x
return csr_matrix((v, (r, c)))
class NodeProblem(object):
def __init__(self, problem_path, cuda=True):
print('NodeProblem: loading started')
f = h5py.File(problem_path)
self.task = f['task'].value
self.n_classes = f['n_classes'].value if 'n_classes' in f else 1 # !!
self.feats = f['feats'].value if 'feats' in f else None
self.folds = f['folds'].value
self.targets = f['targets'].value
if 'sparse' in f and f['sparse'].value:
self.adj = parse_csr_matrix(f['adj'].value)
self.train_adj = parse_csr_matrix(f['train_adj'].value)
else:
self.adj = f['adj'].value
self.train_adj = f['train_adj'].value
f.close()
self.feats_dim = self.feats.shape[1] if self.feats is not None else None
self.n_nodes = self.adj.shape[0]
self.cuda = cuda
self.__to_torch()
self.nodes = {
"train" : np.where(self.folds == 'train')[0],
"val" : np.where(self.folds == 'val')[0],
"test" : np.where(self.folds == 'test')[0],
}
self.loss_fn = getattr(ProblemLosses, self.task)
self.metric_fn = getattr(ProblemMetrics, self.task)
print('NodeProblem: loading finished')
def __to_torch(self):
if not sparse.issparse(self.adj):
self.adj = Variable(torch.LongTensor(self.adj))
self.train_adj = Variable(torch.LongTensor(self.train_adj))
if self.cuda:
self.adj = self.adj.cuda()
self.train_adj = self.train_adj.cuda()
if self.feats is not None:
self.feats = Variable(torch.FloatTensor(self.feats))
if self.cuda:
self.feats = self.feats.cuda()
def __batch_to_torch(self, mids, targets):
""" convert batch to torch """
mids = Variable(torch.LongTensor(mids))
if self.task == 'multilabel_classification':
targets = Variable(torch.FloatTensor(targets))
elif self.task == 'classification':
targets = Variable(torch.LongTensor(targets))
elif 'regression' in self.task:
targets = Variable(torch.FloatTensor(targets))
else:
raise Exception('NodeDataLoader: unknown task: %s' % self.task)
if self.cuda:
mids, targets = mids.cuda(), targets.cuda()
return mids, targets
def iterate(self, mode, batch_size=512, shuffle=False):
nodes = self.nodes[mode]
idx = np.arange(nodes.shape[0])
if shuffle:
idx = np.random.permutation(idx)
n_chunks = idx.shape[0] // batch_size + 1
for chunk_id, chunk in enumerate(np.array_split(idx, n_chunks)):
mids = nodes[chunk]
targets = self.targets[mids]
mids, targets = self.__batch_to_torch(mids, targets)
yield mids, targets, chunk_id / n_chunks
|
nilmtk/legacy/__init__.py | chandru99/nilmtk | 646 | 11100893 | from . import disaggregate |
lisrd/datasets/hpatches.py | liuyuzhenn/LISRD | 225 | 11100897 | <filename>lisrd/datasets/hpatches.py
""" HPatches dataset. """
import os
import numpy as np
import cv2
import logging
from pathlib import Path
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from .base_dataset import BaseDataset
from .utils.data_reader import resize_and_crop
from ..utils.geometry_utils import select_k_best
from ..utils.pytorch_utils import keypoints_to_grid
class Hpatches(BaseDataset):
def __init__(self, config, device):
super().__init__(config, device)
def get_dataset(self, split):
assert split == 'test', 'Only test split supported'
return _Dataset(self._config)
class _Dataset(Dataset):
def __init__(self, config):
self._config = config
root_dir = Path(os.path.expanduser(config['data_path']))
folder_paths = [x for x in root_dir.iterdir() if x.is_dir()]
if len(folder_paths) == 0:
raise ValueError(
f'Could not find any image in folder: {root_dir}.')
logging.info(f'Found {len(folder_paths)} scenes in image folder.')
self._image0_paths = []
self._image1_paths = []
self._homographies = []
for path in folder_paths:
if (config['alteration'] != 'all'
and config['alteration'] != path.stem[0]):
continue
for i in range(2, 7):
self._image0_paths.append(str(Path(path, "1.ppm")))
self._image1_paths.append(str(Path(path, str(i) + '.ppm')))
self._homographies.append(
np.loadtxt(str(Path(path, "H_1_" + str(i)))))
def adapt_homography_to_preprocessing(self, H, img_shape0, img_shape1):
source_size0 = np.array(img_shape0, dtype=float)
source_size1 = np.array(img_shape1, dtype=float)
target_size = np.array(self._config['resize'], dtype=float)
# Get the scaling factor in resize
scale0 = np.amax(target_size / source_size0)
scaling0 = np.diag([1. / scale0, 1. / scale0, 1.]).astype(float)
scale1 = np.amax(target_size / source_size1)
scaling1 = np.diag([scale1, scale1, 1.]).astype(float)
# Get the translation params in crop
pad_y0 = (source_size0[0] * scale0 - target_size[0]) / 2.
pad_x0 = (source_size0[1] * scale0 - target_size[1]) / 2.
translation0 = np.array([[1., 0., pad_x0],
[0., 1., pad_y0],
[0., 0., 1.]], dtype=float)
pad_y1 = (source_size1[0] * scale1 - target_size[0]) / 2.
pad_x1 = (source_size1[1] * scale1 - target_size[1]) / 2.
translation1 = np.array([[1., 0., -pad_x1],
[0., 1., -pad_y1],
[0., 0., 1.]], dtype=float)
return translation1 @ scaling1 @ H @ scaling0 @ translation0
def __getitem__(self, item):
img0_path = self._image0_paths[item]
img0 = cv2.imread(img0_path)
img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
img1_path = self._image1_paths[item]
img1 = cv2.imread(img1_path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
H = self._homographies[item].astype(float)
if 'resize' in self._config:
H = self.adapt_homography_to_preprocessing(H, img0.shape[:2],
img1.shape[:2])
img0 = resize_and_crop(img0, self._config['resize'])
img1 = resize_and_crop(img1, self._config['resize'])
img_size = img0.shape[:2]
H_inv = np.linalg.inv(H)
img0 = img0.astype(float) / 255.
img1 = img1.astype(float) / 255.
# Extract the keypoints and descriptors of each method
features = {}
for m in self._config['models_name']:
features[m] = {}
feat0 = np.load(img0_path + '.' + m)
feat1 = np.load(img1_path + '.' + m)
# Extract a fixed number of shared keypoints between the images
kp0, mask0 = select_k_best(
feat0['keypoints'][:, [1, 0]], feat0['scores'],
self._config['num_kp'], H, img_size, margin=3)
features[m]['keypoints0'] = kp0
kp1, mask1 = select_k_best(
feat1['keypoints'][:, [1, 0]], feat1['scores'],
self._config['num_kp'], H_inv, img_size, margin=3)
features[m]['keypoints1'] = kp1
# Extract the local descriptors
features[m]['descriptors0'] = feat0['descriptors'][mask0]
features[m]['descriptors1'] = feat1['descriptors'][mask1]
# Extract meta descriptors if they exist
if 'meta_descriptors' in feat0:
meta_desc0_t = torch.tensor(feat0['meta_descriptors'])
grid0 = keypoints_to_grid(torch.tensor(kp0),
img_size).repeat(4, 1, 1, 1)
features[m]['meta_descriptors0'] = F.normalize(F.grid_sample(
meta_desc0_t,
grid0).squeeze(3).permute(2, 0, 1), dim=2).numpy()
meta_desc1_t = torch.tensor(feat1['meta_descriptors'])
grid1 = keypoints_to_grid(torch.tensor(kp1),
img_size).repeat(4, 1, 1, 1)
features[m]['meta_descriptors1'] = F.normalize(F.grid_sample(
meta_desc1_t,
grid1).squeeze(3).permute(2, 0, 1), dim=2).numpy()
return {'image0': img0, 'image1': img1, 'homography': H,
'img0_path': img0_path, 'img1_path': img1_path,
'features': features, 'img_size': img_size}
def __len__(self):
return len(self._homographies) |
asana/resources/webhooks.py | FiyaFly/python-asana | 266 | 11100933 |
from .gen.webhooks import _Webhooks
class Webhooks(_Webhooks):
"""Webhooks resource"""
def create(self, params={}, **options):
"""Establishing a webhook is a two-part process. First, a simple HTTP POST
similar to any other resource creation. Since you could have multiple
webhooks we recommend specifying a unique local id for each target.
Next comes the confirmation handshake. When a webhook is created, we will
send a test POST to the `target` with an `X-Hook-Secret` header as
described in the
[Resthooks Security documentation](http://resthooks.org/docs/security/).
The target must respond with a `200 OK` and a matching `X-Hook-Secret`
header to confirm that this webhook subscription is indeed expected.
If you do not acknowledge the webhook's confirmation handshake it will
fail to setup, and you will receive an error in response to your attempt
to create it. This means you need to be able to receive and complete the
webhook *while* the POST request is in-flight.
Parameters
----------
resource : {Id} A resource ID to subscribe to. The resource can be a task or project.
target : {String} The URL to receive the HTTP POST.
[data] : {Object} Data for the request
"""
return self.client.post("/webhooks", params, **options)
def get_all(self, params={}, **options):
"""Returns the compact representation of all webhooks your app has
registered for the authenticated user in the given workspace.
Parameters
----------
workspace : {Id} The workspace to query for webhooks in.
[params] : {Object} Parameters for the request
- [resource] : {Id} Only return webhooks for the given resource.
"""
return self.client.get_collection("/webhooks", params, **options)
def get_by_id(self, webhook, params={}, **options):
"""Returns the full record for the given webhook.
Parameters
----------
webhook : {Id} The webhook to get.
[params] : {Object} Parameters for the request
"""
path = "/webhooks/%s" % (webhook)
return self.client.get(path, params, **options)
def delete_by_id(self, webhook, params={}, **options):
"""This method permanently removes a webhook. Note that it may be possible
to receive a request that was already in flight after deleting the
webhook, but no further requests will be issued.
Parameters
----------
webhook : {Id} The webhook to delete.
"""
path = "/webhooks/%s" % (webhook)
return self.client.delete(path, params, **options)
|
bentoml/saved_bundle/templates.py | niits/BentoML | 3,451 | 11100937 | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BENTO_SERVICE_BUNDLE_SETUP_PY_TEMPLATE = """\
import setuptools
try:
# for pip >= 10
from pip._internal.req import parse_requirements
try:
# for pip >= 20.0
from pip._internal.network.session import PipSession
except ModuleNotFoundError:
# for pip >= 10, < 20.0
from pip._internal.download import PipSession
except ImportError:
# for pip <= 9.0.3
from pip.req import parse_requirements
from pip.download import PipSession
try:
raw = parse_requirements('requirements.txt', session=PipSession())
# pip >= 20.1 changed ParsedRequirement attribute from `req` to `requirement`
install_reqs = []
for i in raw:
try:
install_reqs.append(str(i.requirement))
except AttributeError:
install_reqs.append(str(i.req))
except Exception:
install_reqs = []
setuptools.setup(
name='{name}',
version='{pypi_package_version}',
description="BentoML generated model module",
long_description=\"\"\"{long_description}\"\"\",
long_description_content_type="text/markdown",
url="https://github.com/bentoml/BentoML",
packages=setuptools.find_packages(),
install_requires=install_reqs,
include_package_data=True,
package_data={{
'{name}': ['bentoml.yml', 'artifacts/*']
}},
entry_points={{
'console_scripts': [
'{name}={name}:cli',
],
}}
)
"""
MANIFEST_IN_TEMPLATE = """\
include {service_name}/bentoml.yml
graft {service_name}/artifacts
"""
MODEL_SERVER_DOCKERFILE_CPU = """\
FROM {docker_base_image}
# Configure PIP install arguments, e.g. --index-url, --trusted-url, --extra-index-url
ARG EXTRA_PIP_INSTALL_ARGS=
ENV EXTRA_PIP_INSTALL_ARGS $EXTRA_PIP_INSTALL_ARGS
ARG UID=1034
ARG GID=1034
RUN groupadd -g $GID -o bentoml && useradd -m -u $UID -g $GID -o -r bentoml
ARG BUNDLE_PATH=/home/bentoml/bundle
ENV BUNDLE_PATH=$BUNDLE_PATH
ENV BENTOML_HOME=/home/bentoml/
RUN mkdir $BUNDLE_PATH && chown bentoml:bentoml $BUNDLE_PATH -R
WORKDIR $BUNDLE_PATH
# copy over the init script; copy over entrypoint scripts
COPY --chown=bentoml:bentoml bentoml-init.sh docker-entrypoint.sh ./
RUN chmod +x ./bentoml-init.sh
# Copy docker-entrypoint.sh again, because setup.sh might not exist. This prevent COPY command from failing.
COPY --chown=bentoml:bentoml docker-entrypoint.sh setup.s[h] ./
RUN ./bentoml-init.sh custom_setup
COPY --chown=bentoml:bentoml docker-entrypoint.sh python_versio[n] ./
RUN ./bentoml-init.sh ensure_python
COPY --chown=bentoml:bentoml environment.yml ./
RUN ./bentoml-init.sh restore_conda_env
COPY --chown=bentoml:bentoml requirements.txt ./
RUN ./bentoml-init.sh install_pip_packages
COPY --chown=bentoml:bentoml docker-entrypoint.sh bundled_pip_dependencie[s] ./bundled_pip_dependencies/
RUN rm ./bundled_pip_dependencies/docker-entrypoint.sh && ./bentoml-init.sh install_bundled_pip_packages
# copy over model files
COPY --chown=bentoml:bentoml . ./
# Default port for BentoML Service
EXPOSE 5000
USER bentoml
RUN chmod +x ./docker-entrypoint.sh
ENTRYPOINT [ "./docker-entrypoint.sh" ]
CMD ["bentoml", "serve-gunicorn", "./"]
""" # noqa: E501
INIT_PY_TEMPLATE = """\
import os
import sys
import logging
from bentoml import saved_bundle, configure_logging
from bentoml.cli.bento_service import create_bento_service_cli
# By default, ignore warnings when loading BentoService installed as PyPI distribution
# CLI will change back to default log level in config(info), and by adding --quiet or
# --verbose CLI option, user can change the CLI output behavior
configure_logging(logging_level=logging.ERROR)
__VERSION__ = "{pypi_package_version}"
__module_path = os.path.abspath(os.path.dirname(__file__))
{service_name} = saved_bundle.load_bento_service_class(__module_path)
cli=create_bento_service_cli(__module_path)
def load():
return saved_bundle.load_from_dir(__module_path)
__all__ = ['__version__', '{service_name}', 'load']
"""
|
yellowbrick/datasets/download.py | mrtrkmn/yellowbrick | 3,662 | 11100946 | <reponame>mrtrkmn/yellowbrick<gh_stars>1000+
# yellowbrick.datasets.download
# Downloads the example datasets for running the examples.
#
# Author: <NAME>
# Author: <NAME>
# Author: <NAME>
# Created: Wed May 18 11:54:45 2016 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: download.py [1f73d2b] <EMAIL> $
"""
Downloads the example datasets for running the examples.
"""
##########################################################################
## Imports
##########################################################################
import os
import zipfile
from urllib.request import urlopen
from .signature import sha256sum
from .path import get_data_home, cleanup_dataset
from yellowbrick.exceptions import DatasetsError
# Downlod chunk size
CHUNK = 524288
##########################################################################
## Download functions
##########################################################################
def download_data(url, signature, data_home=None, replace=False, extract=True):
"""
Downloads the zipped data set specified at the given URL, saving it to
the data directory specified by ``get_data_home``. This function verifies
the download with the given signature and extracts the archive.
Parameters
----------
url : str
The URL of the dataset on the Internet to GET
signature : str
The SHA 256 hash of the dataset archive being downloaded to verify
that the dataset has been correctly downloaded
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
replace : bool, default: False
If the data archive already exists, replace the dataset. If this is
False and the dataset exists, an exception is raised.
extract : bool, default: True
Extract the archive file after downloading it
"""
data_home = get_data_home(data_home)
# Get the name of the file from the URL
basename = os.path.basename(url)
name, _ = os.path.splitext(basename)
# Get the archive and data directory paths
archive = os.path.join(data_home, basename)
datadir = os.path.join(data_home, name)
# If the archive exists cleanup or raise override exception
if os.path.exists(archive):
if not replace:
raise DatasetsError(
("dataset already exists at {}, set replace=False to overwrite").format(
archive
)
)
cleanup_dataset(name, data_home=data_home)
# Create the output directory if it does not exist
if not os.path.exists(datadir):
os.mkdir(datadir)
# Fetch the response in a streaming fashion and write it to disk.
response = urlopen(url)
with open(archive, "wb") as f:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
f.write(chunk)
# Compare the signature of the archive to the expected one
if sha256sum(archive) != signature:
raise ValueError("Download signature does not match hardcoded signature!")
# If extract, extract the zipfile.
if extract:
zf = zipfile.ZipFile(archive)
zf.extractall(path=data_home)
|
tests/test_claims.py | chhsiao1981/pyoidc | 290 | 11100950 | from oic.oic import claims_match
from oic.utils.claims import ClaimsMode
def test_claims_for_user():
user = "foobar"
user2mode = {user: "aggregate"}
claims_mode = ClaimsMode(user2mode)
assert claims_mode.aggregate(user)
def test_claims_for_missing_user():
claims_mode = ClaimsMode({})
assert not claims_mode.aggregate("nobody")
def test_non_aggregate_claims():
user = "foobar"
claims_mode = ClaimsMode({user: "distributed"})
assert not claims_mode.aggregate(user)
def test_claims_match():
claims_request = {
"sub": {"value": "248289761001"},
"auth_time": {"essential": True},
"acr": {"essential": True,
"values": ["urn:mace:incommon:iap:silver",
"urn:mace:incommon:iap:bronze"]}
}
assert claims_match("248289761001", claims_request['sub'])
assert claims_match("123456789012", claims_request['sub']) is False
assert claims_match("123456789", claims_request['auth_time'])
assert claims_match("urn:mace:incommon:iap:silver", claims_request['acr'])
assert claims_match("urn:mace:incommon:iap:gold",
claims_request['acr']) is False
|
gabbi/tests/test_data_to_string.py | scottwallacesh/gabbi | 145 | 11100978 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test handling of data field in tests.
"""
import unittest
from gabbi import case
from gabbi import handlers
class TestDataToString(unittest.TestCase):
def setUp(self):
self.case = case.HTTPTestCase('test_request')
self.case.content_handlers = []
for handler in handlers.RESPONSE_HANDLERS:
h = handler()
if hasattr(h, 'content_handler') and h.content_handler:
self.case.content_handlers.append(h)
def testHappyPath(self):
data = [{"hi": "low"}, {"yes": "no"}]
content_type = 'application/json'
body = self.case._test_data_to_string(data, content_type)
self.assertEqual('[{"hi": "low"}, {"yes": "no"}]', body)
def testNoContentType(self):
data = [{"hi": "low"}, {"yes": "no"}]
content_type = ''
with self.assertRaises(ValueError) as exc:
self.case._test_data_to_string(data, content_type)
self.assertEqual(
'no content-type available for processing data',
str(exc.exception))
def testNoHandler(self):
data = [{"hi": "low"}, {"yes": "no"}]
content_type = 'application/xml'
with self.assertRaises(ValueError) as exc:
self.case._test_data_to_string(data, content_type)
self.assertEqual(
'unable to process data to application/xml',
str(exc.exception))
|
example_data/run_sQTL.py | AdaWon/leafcutter | 170 | 11100982 |
import subprocess
def run(cmd, max_minutes = 6000):
import time
sys.stderr.write("Running cmd: %s\n"%cmd)
p = subprocess.Popen(cmd ,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
(file_stdin, file_stdout, file_stderr) = (p.stdin, p.stdout, p.stderr)
t = 0
r = ''
e = ''
while t < 60*max_minutes and p.poll() is None:
time.sleep(1) # (comment 1)
t += 1
r += file_stdout.read()
e += file_stderr.read()
r += file_stdout.read()
e += file_stderr.read()
file_stdin.close()
#lines = file_stdout.read()
lines_stderr = file_stderr.read()
exit_code = file_stdout.close()
file_stdout.close()
file_stderr.close()
return (r, e, exit_code)
if __name__ == "__main__":
import sys, os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--outprefix", dest="outprefix", default = 'leafcutter',
help="Output prefix (default leafcutter)")
parser.add_option("-t", "--tempdir", dest="tmpdir", default='./tmp/',
help="Where to output files (default ./)")
parser.add_option("-d", "--leafdir", dest="leafd", default='./',
help="Top-level LeafCutter directory")
parser.add_option("-l", "--maxintronlen", dest="maxintronlen", default = 100000,
help="Maximum intron length in bp (default 100,000bp)")
parser.add_option("-m", "--minclureads", dest="minclureads", default = 30,
help="Minimum reads in a cluster (default 30 reads)")
parser.add_option("-p", "--mincluratio", dest="mincluratio", default = 0.001,
help="Minimum fraction of reads in a cluster that support a junction (default 0.001)")
parser.add_option("-a", "--annot", dest="annotation", default = None,
help="[optional] Path of annotation GTF file e.g. ~/tools/leafcutter/clustering/gencode.v19.annotation.gtf.gz")
parser.add_option("-b", "--bams", dest="bam",
help="Text file listing bam files to quantify")
(options, args) = parser.parse_args()
try: open(options.leafd+"/clustering/leafcutter_cluster.py")
except:
sys.stderr.write("Please specify correct LeafCutter directory e.g. -d tools/leafcutter/.\n")
exit(0)
if options.bam == None:
sys.stderr.write("Error: no bam file provided...\n")
exit(0)
bams = open(options.bam).readlines()
# create tmp file directory
try: os.mkdir(options.tmpdir)
except: pass
# (should check if samtools are installed)
sys.stderr.write("processing bam files...\n")
fout = file("%s/junction_files.txt"%options.tmpdir,'w')
for bam in bams:
bam = bam.strip()
bedfile = "%s/%s.bed"%(options.tmpdir,bam.split('/')[-1])
juncfile = "%s/%s.junc"%(options.tmpdir,bam.split('/')[-1])
fout.write(juncfile+'\n')
try: open(juncfile)
except: pass
else:
sys.stderr.write("%s exists..skipping\n"%juncfile)
continue
print run("samtools view %s | python %s/scripts/filter_cs.py | %s/scripts/sam2bed.pl --use-RNA-strand - %s"%(bam, options.leafd, options.leafd,bedfile))[1]
print run("%s/scripts/bed2junc.pl %s %s; rm %s"%(options.leafd,bedfile,juncfile, bedfile))[1]
fout.close()
print run("python %s/clustering/leafcutter_cluster.py -j %s/junction_files.txt -m %s -o %s -l %s -r %s -p %s"%(options.leafd,options.tmpdir,options.minclureads, options.outprefix,str(options.maxintronlen), options.tmpdir,str(options.mincluratio)))[1]
if options.annotation != None:
print run("python %s/clustering/get_cluster_gene.py %s %s/%s_perind.counts.gz"%(options.leafd,options.annotation, options.tmpdir,options.outprefix))[1]
pass
print run("python %s/scripts/prepare_phenotype_table.py %s/%s_perind.counts.gz"%(options.leafd,options.tmpdir,options.outprefix))
sys.stdout.write("\n*******fastQTL instructions (also see http://fastqtl.sourceforge.net/) *******\n")
sys.stdout.write("\n(1) Prepare phenotypes: Use `sh %s/%s_perind.counts.gz_prepare.sh' to create index for fastQTL (requires tabix and bgzip).\n"%(options.tmpdir,options.outprefix))
sys.stdout.write("(2) Prepare covariates: To take the top 5 PCs, use `head -6 %s/%s_perind.counts.gz.PCs > %s/%s_perind.counts.gz.PCs.PC5\n"%(options.tmpdir,options.outprefix,options.tmpdir,options.outprefix))
sys.stdout.write("(3) Prepare genotypes: bgzip + tabix your genotype (VCF) file > SNPs.MAF05.txt.gen.gz (not included)\n")
sys.stdout.write("(4) Run fastQTL: Use e.g. `FastQTL/bin/fastQTL.static --vcf SNPs.MAF05.txt.gen.gz --bed %s/%s_perind.counts.gz.qqnorm_chr1.gz -out %s/%s_output_chr1 --chunk 1 1 --window 1e5 --cov %s/%s_perind.counts.gz.PCs.PC5\n\n\n"%(options.tmpdir,options.outprefix,options.tmpdir,options.outprefix,options.tmpdir,options.outprefix))
|
examples/perl_rockstar.py | hoojaoh/rockstar | 4,603 | 11100997 | from rockstar import RockStar
perl_code = 'print "Hello World"'
rock_it_bro = RockStar(days=400, file_name='hello_world.pl', code=perl_code)
rock_it_bro.make_me_a_rockstar()
|
ISMLnextGen/geventCuncurrencyTest.py | Ravenclaw-OIer/ISML_auto_voter | 128 | 11101006 | <gh_stars>100-1000
import gevent,asyncio
from gevent import monkey
# socket发送请求以后就会进入等待状态,gevent更改了这个机制
# socket.setblocking(False) -->发送请求后就不会等待服务器响应
monkey.patch_all() # 找到内置的socket并更改为gevent自己的东西
import requests
def fetch_async(method, url, req_kwargs,id):
print('started',id, method, url, req_kwargs)
response = requests.request(method=method, url=url, **req_kwargs)
print('finished', id, response.url, len(response.content))
# ##### 发送请求 #####
##gevent.joinall([
## # 这里spawn是3个任务[实际是3个协程],每个任务都会执行fetch_async函数
## gevent.spawn(fetch_async, method='get', url='https://www.python.org/', req_kwargs={}),
## gevent.spawn(fetch_async, method='get', url='https://www.yahoo.com/', req_kwargs={}),
## gevent.spawn(fetch_async, method='get', url='https://github.com/', req_kwargs={}),
##])
for i in range(100):
gevent.spawn(fetch_async, method='get', url='http://localhost:55556', req_kwargs={},id=i),
loop=asyncio.get_event_loop()
loop.run_forever()
|
python/federatedml/util/schema_check.py | hubert-he/FATE | 3,787 | 11101018 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing import is_table
from federatedml.util import LOGGER
def check_schema(input_schema, output_schema):
LOGGER.debug(f"input schema: {input_schema} -> output schema: {output_schema}")
if output_schema is None:
raise EnvironmentError(
f"output_schema is None while input data has schema.")
input_header = input_schema.get("header", None)
output_header = output_schema.get("header", None)
if input_header is not None and output_header is None:
raise EnvironmentError(
f"output header is None while input data has header.")
def assert_schema_consistent(func):
def _func(*args, **kwargs):
input_schema = None
all_args = []
all_args.extend(args)
all_args.extend(kwargs.values())
for arg in all_args:
if is_table(arg):
input_schema = arg.schema
break
result = func(*args, **kwargs)
if input_schema is not None:
# single data set
if is_table(result) and result.count() > 0:
output_schema = result.schema
check_schema(input_schema, output_schema)
# multiple data sets
elif type(result).__name__ in ["list", "tuple"]:
for output_data in result:
if is_table(output_data) and output_data.count() > 0:
output_schema = output_data.schema
check_schema(input_schema, output_schema)
return result
return _func
|
hpccm/templates/CMakeBuild.py | robertmaynard/hpc-container-maker | 340 | 11101024 |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""CMakeBuild template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six.moves import shlex_quote
import copy
import posixpath
import hpccm.base_object
class CMakeBuild(hpccm.base_object):
"""Template for cmake workflows"""
def __init__(self, **kwargs):
"""Initialize CMakeBuild template"""
super(CMakeBuild, self).__init__(**kwargs)
self.__build_directory = None
self.cmake_opts = kwargs.get('opts', [])
self.parallel = kwargs.get('parallel', '$(nproc)')
self.prefix = kwargs.get('prefix', '/usr/local')
# Some components complain if some compiler variables are
# enabled, e.g., MVAPICH2 with F90, so provide a way for the
# caller to disable any of the compiler variables.
self.toolchain_control = kwargs.get('toolchain_control',
{'CC': True, 'CXX': True,
'F77': True, 'F90': True,
'FC': True})
def build_step(self, target='all', parallel=None):
"""Generate cmake build command line string"""
if not parallel:
parallel = self.parallel
return 'cmake --build {0} --target {1} -- -j{2}'.format(
self.__build_directory, target, parallel)
def configure_step(self, build_directory='build', directory=None,
environment=[], opts=None, toolchain=None):
"""Generate cmake command line string"""
change_directory = ''
if directory:
src_directory = directory
build_directory = posixpath.join(directory, build_directory)
change_directory = "mkdir -p {0} && cd {0} && ".format(
build_directory)
else:
# Assume the build directory is a subdirectory of the source
# directory and we are already in the build directory
src_directory = '..'
# Cache this for the build step
self.__build_directory = build_directory
e = copy.copy(environment)
if toolchain:
if toolchain.CC and self.toolchain_control.get('CC'):
e.append('CC={}'.format(toolchain.CC))
if toolchain.CFLAGS:
e.append('CFLAGS={}'.format(shlex_quote(toolchain.CFLAGS)))
if toolchain.CPPFLAGS:
e.append('CPPFLAGS={}'.format(shlex_quote(toolchain.CPPFLAGS)))
if toolchain.CXX and self.toolchain_control.get('CXX'):
e.append('CXX={}'.format(toolchain.CXX))
if toolchain.CXXFLAGS:
e.append('CXXFLAGS={}'.format(shlex_quote(
toolchain.CXXFLAGS)))
if toolchain.F77 and self.toolchain_control.get('F77'):
e.append('F77={}'.format(toolchain.F77))
if toolchain.F90 and self.toolchain_control.get('F90'):
e.append('F90={}'.format(toolchain.F90))
if toolchain.FC and self.toolchain_control.get('FC'):
e.append('FC={}'.format(toolchain.FC))
if toolchain.FCFLAGS:
e.append('FCFLAGS={}'.format(shlex_quote(toolchain.FCFLAGS)))
if toolchain.FFLAGS:
e.append('FFLAGS={}'.format(shlex_quote(toolchain.FFLAGS)))
if toolchain.FLIBS:
e.append('FLIBS={}'.format(shlex_quote(toolchain.FLIBS)))
if toolchain.LD_LIBRARY_PATH:
e.append('LD_LIBRARY_PATH={}'.format(shlex_quote(
toolchain.LD_LIBRARY_PATH)))
if toolchain.LDFLAGS:
e.append('LDFLAGS={}'.format(shlex_quote(toolchain.LDFLAGS)))
if toolchain.LIBS:
e.append('LIBS={}'.format(shlex_quote(toolchain.LIBS)))
configure_env = ' '.join(e)
if configure_env:
configure_env += ' '
configure_opts = ''
if not opts and self.cmake_opts:
opts = self.cmake_opts
if opts:
configure_opts = ' '.join(opts)
configure_opts += ' '
if self.prefix:
configure_opts = '-DCMAKE_INSTALL_PREFIX={0:s} {1}'.format(
self.prefix, configure_opts)
cmd = '{0}{1}cmake {2}{3}'.format(
change_directory, configure_env, configure_opts, src_directory)
# Add an annotation if the caller inherits from the annotate template
if callable(getattr(self, 'add_annotation', None)):
self.add_annotation('cmake', '{1}cmake {2}'.format(
change_directory, configure_env, configure_opts,
src_directory).strip())
return cmd.strip() # trim whitespace
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.