max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
src/hieroglyph/tests/test_builder.py | wescpy/hieroglyph | 115 | 11142715 | import glob
from unittest import TestCase
from bs4 import BeautifulSoup
from sphinx_testing import (
TestApp,
)
from hieroglyph.tests import util
from hieroglyph.tests.util import with_app
import hieroglyph.builder
class SlideBuilderTests(TestCase):
@with_app()
def test_get_theme_options(self, app, *args):
builder = hieroglyph.builder.SlideBuilder(app)
builder.init()
resolved_theme_options = builder.get_theme_options()
self.assertIsInstance(
resolved_theme_options,
dict,
)
self.assertIn(
'custom_css',
resolved_theme_options,
)
self.assertIn(
'custom_js',
resolved_theme_options,
)
@with_app()
def test_get_theme_options_with_overrides(self, app, *args):
builder = hieroglyph.builder.SlideBuilder(app)
builder.init()
resolved_theme_options = builder.get_theme_options()
self.assertEqual(
resolved_theme_options['custom_css'],
'',
)
app = TestApp(
srcdir=util.test_root,
copy_srcdir_to_tmpdir=True,
confoverrides={
'slide_theme_options': {
'custom_css': 'testing.css',
},
},
)
builder = hieroglyph.builder.SlideBuilder(app)
builder.init()
resolved_theme_options = builder.get_theme_options()
self.assertEqual(
resolved_theme_options['custom_css'],
'testing.css',
)
@with_app(
buildername='slides',
)
def test_html_static_dir_contents_override_theme(self, sphinx_app, status, warning):
self.assertIsInstance(
sphinx_app.builder,
hieroglyph.builder.AbstractSlideBuilder,
)
sphinx_app.build()
built_styles = open(sphinx_app.builddir/'slides'/'_static'/'styles.css').read()
static_styles = open(sphinx_app.srcdir/'_static'/'styles.css').read()
self.assertEqual(
built_styles,
static_styles,
)
@with_app(
buildername='slides',
confoverrides={
'slide_title': 'SLIDES TITLE',
},
)
def test_docstitle_uses_slidetitle(self, app, *args):
builder = app.builder
builder.prepare_writing([])
self.assertEqual(
builder.globalcontext['docstitle'],
'SLIDES TITLE',
)
@with_app(buildername='slides')
def test_docstitle_fallback_to_html_title(self, app, status, warning):
builder = app.builder
builder.prepare_writing([])
self.assertEqual(
builder.globalcontext['docstitle'],
builder.config.html_title,
)
class SingleFileBuilderTests(TestCase):
@with_app(
buildername='singlefile-slides',
srcdir=util.test_root.parent/'singlefile',
)
def test_builds_single_file(self, app, *args):
app.build()
self.assertEqual(
len(glob.glob(app.builddir/'singlefile-slides'/'*.html')),
1,
)
@with_app(
buildername='singlefile-slides',
srcdir=util.test_root.parent/'singlefile',
)
def test_adjusts_section_levels_to_account_for_toctree(self, app, *args):
"""The TOCTREE pushes sections/slides down a level if not handled."""
app.build()
with open(app.builddir/'singlefile-slides'/'index.html') as html_file:
tree = BeautifulSoup(html_file.read(), "html.parser")
contents = tree.find_all('article')
self.assertEqual(len(contents), 4)
@with_app(
buildername='singlefile-slides',
srcdir=util.test_root.parent/'singlefile',
)
def test_slide_directive_closes_correctly_at_end_of_source_file(self, app, *args):
app.build()
with open(app.builddir/'singlefile-slides'/'index.html') as html_file:
tree = BeautifulSoup(html_file.read(), "html.parser")
# test to see that all the slides are siblings
slides = [
s for s in tree.section.children
if s.name == 'article'
]
self.assertEqual(len(slides), 4)
|
datasets.py | GongXinyuu/AutoGAN | 364 | 11142737 | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : <NAME> (<EMAIL>)
# @Link : None
# @Version : 0.0
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class ImageDataset(object):
def __init__(self, args, cur_img_size=None):
img_size = cur_img_size if cur_img_size else args.img_size
if args.dataset.lower() == "cifar10":
Dt = datasets.CIFAR10
transform = transforms.Compose(
[
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
args.n_classes = 10
elif args.dataset.lower() == "stl10":
Dt = datasets.STL10
transform = transforms.Compose(
[
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
else:
raise NotImplementedError("Unknown dataset: {}".format(args.dataset))
if args.dataset.lower() == "stl10":
self.train = torch.utils.data.DataLoader(
Dt(
root=args.data_path,
split="train+unlabeled",
transform=transform,
download=True,
),
batch_size=args.dis_batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, split="test", transform=transform),
batch_size=args.dis_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
)
self.test = self.valid
else:
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=True, transform=transform, download=True),
batch_size=args.dis_batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=False, transform=transform),
batch_size=args.dis_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
)
self.test = self.valid
|
lidar_to_depth.py | jytime/Deep-SfM-Revisited | 126 | 11142752 | <filename>lidar_to_depth.py
import numpy as np
import os
import glob
import cv2
import pykitti
# Export Lidar Data to Depth Maps
# Change this to the directory where you store KITTI data
basedir = '/PATH/TO/YOUR/KITTIVO/DATA'
for sequence in ['00','01','02','03','04','05','06','07','08','09','10']:
# Specify the dataset to load
print('Sequence ' + sequence)
# Load the data. Optionally, specify the frame range to load.
# dataset = pykitti.odometry(basedir, sequence)
dataset = pykitti.odometry(basedir, sequence)
# for cam2_image in dataset.cam2:
for i in range(len(dataset)):
color = np.array(dataset.get_cam2(i))
img_width = color.shape[1]
img_height = color.shape[0]
depth = np.zeros([img_height,img_width])
velo = dataset.get_velo(i)
velo[:,-1] = 1
temp = dataset.calib.P_rect_20.dot(dataset.calib.T_cam0_velo)
results = temp.dot(velo.T)
uv = results[:2,:]/results[-1,:]
z = results[-1,:]
valid = (uv[0,:] > 0) & (np.round(uv[0,:]) < img_width) & (uv[1,:] > 0) &(np.round(uv[1,:]) < img_height) &(z>0)&(z<1000)
valid_index = np.round(uv[:,valid]).astype('uint32')
depth[valid_index[1],valid_index[0]] = z[valid]
file_name = dataset.velo_files[i]
out_name = file_name.replace('sequences','RealDepth').replace('bin','png')
depth_to_write = depth.copy() * 256
depth_to_write[depth_to_write<0] = 0
depth_to_write[depth_to_write>65535] = 0
depth_to_write = depth_to_write.astype('uint16')
if not os.path.exists(os.path.dirname(out_name)):
os.makedirs(os.path.dirname(out_name))
cv2.imwrite(out_name,depth_to_write)
|
src/cloudant/document.py | inuyasha82/python-cloudant | 187 | 11142758 | <reponame>inuyasha82/python-cloudant<gh_stars>100-1000
#!/usr/bin/env python
# Copyright © 2015, 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API module/class for interacting with a document in a database.
"""
import json
import requests
from requests.exceptions import HTTPError
from ._2to3 import url_quote, url_quote_plus
from ._common_util import response_to_json_dict, assert_document_type_id, assert_attachment_name
from .error import CloudantDocumentException
class Document(dict):
"""
Encapsulates a JSON document. A Document object is instantiated with a
reference to a database and used to manipulate document content
in a CouchDB or Cloudant database instance.
In addition to basic CRUD style operations, a Document object also provides
a convenient context manager. This context manager removes having to
explicitly :func:`~cloudant.document.Document.fetch` the document from the
remote database before commencing work on it as well as explicitly having
to :func:`~cloudant.document.Document.save` the document once work is
complete.
For example:
.. code-block:: python
# Upon entry into the document context, fetches the document from the
# remote database, if it exists. Upon exit from the context, saves the
# document to the remote database with changes made within the context.
with Document(database, 'julia006') as document:
# The document is fetched from the remote database
# Changes are made locally
document['name'] = 'Julia'
document['age'] = 6
# The document is saved to the remote database
:param database: A database instance used by the Document. Can be
either a ``CouchDatabase`` or ``CloudantDatabase`` instance.
:param str document_id: Optional document id used to identify the document.
:param str encoder: Optional JSON encoder object (extending json.JSONEncoder).
:param str decoder: Optional JSON decoder object (extending json.JSONDecoder).
"""
def __init__(self, database, document_id=None, **kwargs):
super(Document, self).__init__()
self._client = database.client
self._database = database
self._database_host = self._client.server_url
self._database_name = database.database_name
if document_id:
self['_id'] = document_id
self.encoder = kwargs.get('encoder') or self._client.encoder
self.decoder = kwargs.get('decoder') or json.JSONDecoder
@property
def r_session(self):
"""
Returns the database instance ``r_session`` used by the document.
:returns: Client ``r_session``
"""
return self._client.r_session
@property
def document_url(self):
"""
Constructs and returns the document URL.
:returns: Document URL
"""
if '_id' not in self or self['_id'] is None:
return None
# handle design document url
if self['_id'].startswith('_design/'):
return '/'.join((
self._database_host,
url_quote_plus(self._database_name),
'_design',
url_quote(self['_id'][8:], safe='')
))
# handle _local document url
if self['_id'].startswith('_local/'):
return '/'.join((
self._database_host,
url_quote_plus(self._database_name),
'_local',
url_quote(self['_id'][7:], safe='')
))
# handle document url
return '/'.join((
self._database_host,
url_quote_plus(self._database_name),
url_quote(self['_id'], safe='')
))
def exists(self):
"""
Retrieves whether the document exists in the remote database or not.
:returns: True if the document exists in the remote database,
otherwise False
"""
if '_id' not in self or self['_id'] is None:
return False
assert_document_type_id(self['_id'])
resp = self.r_session.head(self.document_url)
if resp.status_code not in [200, 404]:
resp.raise_for_status()
return resp.status_code == 200
def json(self):
"""
Retrieves the JSON string representation of the current locally cached
document object, encoded by the encoder specified in the associated
client object.
:returns: Encoded JSON string containing the document data
"""
return json.dumps(dict(self), cls=self.encoder)
def create(self):
"""
Creates the current document in the remote database and if successful,
updates the locally cached Document object with the ``_id``
and ``_rev`` returned as part of the successful response.
"""
# Ensure that an existing document will not be "updated"
doc = dict(self)
if doc.get('_rev') is not None:
doc.__delitem__('_rev')
headers = {'Content-Type': 'application/json'}
resp = self.r_session.post(
self._database.database_url,
headers=headers,
data=json.dumps(doc, cls=self.encoder)
)
resp.raise_for_status()
data = response_to_json_dict(resp)
super(Document, self).__setitem__('_id', data['id'])
super(Document, self).__setitem__('_rev', data['rev'])
def fetch(self):
"""
Retrieves the content of the current document from the remote database
and populates the locally cached Document object with that content.
A call to fetch will overwrite any dictionary content currently in
the locally cached Document object.
"""
if self.document_url is None:
raise CloudantDocumentException(101)
if '_id' in self:
assert_document_type_id(self['_id'])
resp = self.r_session.get(self.document_url)
resp.raise_for_status()
self.clear()
self.update(response_to_json_dict(resp, cls=self.decoder))
def save(self):
"""
Saves changes made to the locally cached Document object's data
structures to the remote database. If the document does not exist
remotely then it is created in the remote database. If the object
does exist remotely then the document is updated remotely. In either
case the locally cached Document object is also updated accordingly
based on the successful response of the operation.
"""
headers = {}
headers.setdefault('Content-Type', 'application/json')
if not self.exists():
self.create()
return
put_resp = self.r_session.put(
self.document_url,
data=self.json(),
headers=headers
)
put_resp.raise_for_status()
data = response_to_json_dict(put_resp)
super(Document, self).__setitem__('_rev', data['rev'])
return
# Update Actions
# These are handy functions to use with update_field below.
@staticmethod
def list_field_append(doc, field, value):
"""
Appends a value to a list field in a locally cached Document object.
If a field does not exist it will be created first.
:param Document doc: Locally cached Document object that can be a
Document, DesignDocument or dict.
:param str field: Name of the field list to append to.
:param value: Value to append to the field list.
"""
if doc.get(field) is None:
doc[field] = []
if not isinstance(doc[field], list):
raise CloudantDocumentException(102, field)
if value is not None:
doc[field].append(value)
@staticmethod
def list_field_remove(doc, field, value):
"""
Removes a value from a list field in a locally cached Document object.
:param Document doc: Locally cached Document object that can be a
Document, DesignDocument or dict.
:param str field: Name of the field list to remove from.
:param value: Value to remove from the field list.
"""
if not isinstance(doc[field], list):
raise CloudantDocumentException(102, field)
doc[field].remove(value)
@staticmethod
def field_set(doc, field, value):
"""
Sets or replaces a value for a field in a locally cached Document
object. To remove the field set the ``value`` to None.
:param Document doc: Locally cached Document object that can be a
Document, DesignDocument or dict.
:param str field: Name of the field to set.
:param value: Value to set the field to.
"""
if value is None:
doc.__delitem__(field)
else:
doc[field] = value
def _update_field(self, action, field, value, max_tries, tries=0):
"""
Private update_field method. Wrapped by Document.update_field.
Tracks a "tries" var to help limit recursion.
"""
# Refresh our view of the document.
self.fetch()
# Update the field.
action(self, field, value)
# Attempt to save, retrying conflicts up to max_tries.
try:
self.save()
except requests.HTTPError as ex:
if tries < max_tries and ex.response.status_code == 409:
self._update_field(
action, field, value, max_tries, tries=tries+1)
else:
raise
def update_field(self, action, field, value, max_tries=10):
"""
Updates a field in the remote document. If a conflict exists,
the document is re-fetched from the remote database and the update
is retried. This is performed up to ``max_tries`` number of times.
Use this method when you want to update a single field in a document,
and don't want to risk clobbering other people's changes to
the document in other fields, but also don't want the caller
to implement logic to deal with conflicts.
For example:
.. code-block:: python
# Append the string 'foo' to the 'words' list of Document doc.
doc.update_field(
action=doc.list_field_append,
field='words',
value='foo'
)
:param callable action: A routine that takes a Document object,
a field name, and a value. The routine should attempt to
update a field in the locally cached Document object with the
given value, using whatever logic is appropriate.
Valid actions are
:func:`~cloudant.document.Document.list_field_append`,
:func:`~cloudant.document.Document.list_field_remove`,
:func:`~cloudant.document.Document.field_set`
:param str field: Name of the field to update
:param value: Value to update the field with
:param int max_tries: In the case of a conflict, the number of retries
to attempt
"""
self._update_field(action, field, value, max_tries)
def delete(self):
"""
Removes the document from the remote database and clears the content of
the locally cached Document object with the exception of the ``_id``
field. In order to successfully remove a document from the remote
database, a ``_rev`` value must exist in the locally cached Document
object.
"""
if not self.get("_rev"):
raise CloudantDocumentException(103)
assert_document_type_id(self['_id'])
del_resp = self.r_session.delete(
self.document_url,
params={"rev": self["_rev"]},
)
del_resp.raise_for_status()
_id = self['_id']
self.clear()
self['_id'] = _id
def __enter__(self):
"""
Supports context like editing of document fields. Handles context
entry logic. Executes a Document.fetch() upon entry.
"""
# We don't want to raise an exception if the document is not found
# because upon __exit__ the save() call will create the document
# if necessary.
try:
self.fetch()
except HTTPError as error:
if error.response.status_code != 404:
raise
except CloudantDocumentException as error:
if error.status_code != 101:
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Support context like editing of document fields. Handles context exit
logic. Executes a `Document.save()` upon exit if no exception occurred.
"""
if exc_type is None:
self.save()
def get_attachment(
self,
attachment,
headers=None,
write_to=None,
attachment_type=None):
"""
Retrieves a document's attachment and optionally writes it to a file.
If the content_type of the attachment is 'application/json' then the
data returned will be in JSON format otherwise the response content will
be returned as text or binary.
:param str attachment: Attachment file name used to identify the
attachment.
:param dict headers: Optional, additional headers to be sent
with request.
:param file write_to: Optional file handler to write the attachment to.
The write_to file must be opened for writing prior to including it
as an argument for this method.
:param str attachment_type: Optional setting to define how to handle the
attachment when returning its contents from this method. Valid
values are ``'text'``, ``'json'``, and ``'binary'`` If
omitted then the returned content will be based on the
response Content-Type.
:returns: The attachment content
"""
# need latest rev
self.fetch()
assert_attachment_name(attachment)
attachment_url = '/'.join((self.document_url, url_quote(attachment, safe='')))
if headers is None:
headers = {'If-Match': self['_rev']}
else:
headers['If-Match'] = self['_rev']
resp = self.r_session.get(attachment_url, headers=headers)
resp.raise_for_status()
if attachment_type is None:
if resp.headers['Content-Type'].startswith('text/'):
attachment_type = 'text'
elif resp.headers['Content-Type'] == 'application/json':
attachment_type = 'json'
else:
attachment_type = 'binary'
if write_to is not None:
if attachment_type in ('text', 'json'):
write_to.write(resp.text)
else:
write_to.write(resp.content)
if attachment_type == 'text':
return resp.text
if attachment_type == 'json':
return response_to_json_dict(resp)
return resp.content
def delete_attachment(self, attachment, headers=None):
"""
Removes an attachment from a remote document and refreshes the locally
cached document object.
:param str attachment: Attachment file name used to identify the
attachment.
:param dict headers: Optional, additional headers to be sent
with request.
:returns: Attachment deletion status in JSON format
"""
# need latest rev
self.fetch()
assert_attachment_name(attachment)
attachment_url = '/'.join((self.document_url, attachment))
if headers is None:
headers = {'If-Match': self['_rev']}
else:
headers['If-Match'] = self['_rev']
resp = self.r_session.delete(
attachment_url,
headers=headers
)
resp.raise_for_status()
super(Document, self).__setitem__('_rev', response_to_json_dict(resp)['rev'])
# Execute logic only if attachment metadata exists locally
if self.get('_attachments'):
# Remove the attachment metadata for the specified attachment
if self['_attachments'].get(attachment):
self['_attachments'].__delitem__(attachment)
# Remove empty attachment metadata from the local dictionary
if not self['_attachments']:
super(Document, self).__delitem__('_attachments')
return response_to_json_dict(resp)
def put_attachment(self, attachment, content_type, data, headers=None):
"""
Adds a new attachment, or updates an existing attachment, to
the remote document and refreshes the locally cached
Document object accordingly.
:param attachment: Attachment file name used to identify the
attachment.
:param content_type: The http ``Content-Type`` of the attachment used
as an additional header.
:param data: Attachment data defining the attachment content.
:param headers: Optional, additional headers to be sent
with request.
:returns: Attachment addition/update status in JSON format
"""
# need latest rev
self.fetch()
assert_attachment_name(attachment)
attachment_url = '/'.join((self.document_url, attachment))
if headers is None:
headers = {
'If-Match': self['_rev'],
'Content-Type': content_type
}
else:
headers['If-Match'] = self['_rev']
headers['Content-Type'] = content_type
resp = self.r_session.put(
attachment_url,
data=data,
headers=headers
)
resp.raise_for_status()
self.fetch()
return response_to_json_dict(resp)
|
acp/message.py | 6sibilings/daniel-Allen | 133 | 11142783 | <filename>acp/message.py
import logging
import struct
import zlib
from .exception import ACPMessageError
from .keystream import *
def _generate_acp_header_key(password):
"""
Encrypt password for ACP message header key field
Note:
Truncates the password at 0x20 bytes, not sure if this is the right thing to use in all cases
Args:
password (str): system password of the router (syAP)
Returns:
String containing encrypted password of proper length for the header field
"""
pw_len = 0x20
pw_key = generate_acp_keystream(pw_len)
# pad with NULLs
pw_buf = password[:pw_len].ljust(pw_len, "\x00")
enc_pw_buf = ""
for i in range(pw_len):
enc_pw_buf += chr(ord(pw_key[i]) ^ ord(pw_buf[i]))
return enc_pw_buf
class ACPMessage(object):
"""ACP message composition and parsing"""
#XXX: struct is stupid about unpacking unsigned ints > 0x7fffffff, so treat everything as signed and
# "cast" where necessary. Should we switch to using ctypes?
_header_format = struct.Struct("!4s8i12x32s48x")
_header_magic = "acpp"
header_size = _header_format.size
def __init__(self, version, flags, unused, command, error_code, key, body=None, body_size=None):
self.version = version
self.flags = flags
self.unused = unused
self.command = command
self.error_code = error_code
# body is not specified, this is a stream header
if body == None:
# the body size is already specified, don't override it
self.body_size = body_size if body_size != None else -1
self.body_checksum = 1 # equivalent to zlib.adler32("")
else:
# the body size is already specified, don't override it
self.body_size = body_size if body_size != None else len(body)
self.body_checksum = zlib.adler32(body)
self.key = key
self.body = body
def __str__(self):
s = "ACPMessage: {0!r}\n".format(self)
s += "body_checksum: {0:#x}\n".format(self.body_checksum)
s += "body_size: {0:#x}\n".format(self.body_size)
s += "flags: {0:#x}\n".format(self.flags)
s += "unused: {0:#x}\n".format(self.unused)
s += "command: {0:#x}\n".format(self.command)
s += "error_code: {0:#x}\n".format(self.error_code)
s += "key: {0!r}".format(self.key)
return s
@classmethod
def parse_raw(cls, data):
# bail early if there is not enough data
if len(data) < cls.header_size:
raise ACPMessageError("need to pass at least {0} bytes".format(cls.header_size))
header_data = data[:cls.header_size]
# make sure there's data beyond the header before we try to access it
body_data = data[cls.header_size:] if len(data) > cls.header_size else None
(magic, version, header_checksum, body_checksum, body_size, flags, unused, command, error_code, key) = cls._header_format.unpack(header_data)
logging.debug("ACP message header fields, parsed not validated")
logging.debug("magic {0!r}".format(magic))
logging.debug("header_checksum {0:#x}".format(header_checksum))
logging.debug("body_checksum {0:#x}".format(body_checksum))
logging.debug("body_size {0:#x}".format(body_size))
logging.debug("flags {0:#x}".format(flags))
logging.debug("unused {0:#x}".format(unused))
logging.debug("command {0:#x}".format(command))
logging.debug("error_code {0:#x}".format(error_code))
logging.debug("key {0!r}".format(key))
if magic != cls._header_magic:
raise ACPMessageError("bad header magic")
if version not in [0x00000001, 0x00030001]:
raise ACPMessageError("invalid version")
#TODO: can we zero the header_checksum field without recreating the struct (how?)
tmphdr = cls._header_format.pack(magic, version, 0, body_checksum, body_size, flags, unused, command, error_code, key)
if header_checksum != zlib.adler32(tmphdr):
raise ACPMessageError("header checksum does not match")
if body_data and body_size == -1:
raise ACPMessageError("cannot handle stream header with data attached")
if body_data and body_size != len(body_data):
raise ACPMessageError("message body size does not match available data")
if body_data and body_checksum != zlib.adler32(body_data):
raise ACPMessageError("body checksum does not match")
#TODO: check flags
#TODO: check status
if command not in [1, 3, 4, 5, 6, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b]:
raise ACPMessageError("unknown command")
#TODO: check error code
return cls(version, flags, unused, command, error_code, key, body_data, body_size)
@classmethod
def compose_echo_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 1, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_flash_primary_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 3, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_flash_secondary_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 5, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_flash_bootloader_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 6, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_getprop_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 0x14, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_setprop_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 0x15, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_perform_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 0x16, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_monitor_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 0x18, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_rpc_command(cls, flags, password, payload):
return cls(0x00030001, flags, 0, 0x19, 0, _generate_acp_header_key(password), payload)._compose_raw_packet()
@classmethod
def compose_auth_command(cls, flags, payload):
return cls(0x00030001, flags, 0, 0x1a, 0, _generate_acp_header_key(""), payload)._compose_raw_packet()
@classmethod
def compose_feat_command(cls, flags):
return cls(0x00030001, flags, 0, 0x1b, 0, _generate_acp_header_key(""))._compose_raw_packet()
@classmethod
def compose_message_ex(cls, version, flags, unused, command, error_code, password, payload, payload_size):
return cls(version, flags, unused, command, error_code, _generate_acp_header_key(password), payload, payload_size)._compose_raw_packet()
def _compose_raw_packet(self):
"""Compose a request from the client to ACP daemon
Returns:
String containing message to send
"""
reply = self._compose_header()
if self.body:
reply += self.body
return reply
def _compose_header(self):
"""Compose the message header
Returns:
String containing header data
"""
tmphdr = self._header_format.pack(self._header_magic,
self.version,
0,
self.body_checksum,
self.body_size,
self.flags,
self.unused,
self.command,
self.error_code,
self.key)
header = self._header_format.pack(self._header_magic,
self.version,
zlib.adler32(tmphdr),
self.body_checksum,
self.body_size,
self.flags,
self.unused,
self.command,
self.error_code,
self.key)
return header
|
src/oci/nosql/models/index_key.py | Manny27nyc/oci-python-sdk | 249 | 11142803 | <filename>src/oci/nosql/models/index_key.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class IndexKey(object):
"""
Specifies a single key in a secondary index.
"""
def __init__(self, **kwargs):
"""
Initializes a new IndexKey object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param column_name:
The value to assign to the column_name property of this IndexKey.
:type column_name: str
:param json_path:
The value to assign to the json_path property of this IndexKey.
:type json_path: str
:param json_field_type:
The value to assign to the json_field_type property of this IndexKey.
:type json_field_type: str
"""
self.swagger_types = {
'column_name': 'str',
'json_path': 'str',
'json_field_type': 'str'
}
self.attribute_map = {
'column_name': 'columnName',
'json_path': 'jsonPath',
'json_field_type': 'jsonFieldType'
}
self._column_name = None
self._json_path = None
self._json_field_type = None
@property
def column_name(self):
"""
**[Required]** Gets the column_name of this IndexKey.
The name of a column to be included as an index key.
:return: The column_name of this IndexKey.
:rtype: str
"""
return self._column_name
@column_name.setter
def column_name(self, column_name):
"""
Sets the column_name of this IndexKey.
The name of a column to be included as an index key.
:param column_name: The column_name of this IndexKey.
:type: str
"""
self._column_name = column_name
@property
def json_path(self):
"""
Gets the json_path of this IndexKey.
If the specified column is of type JSON, jsonPath contains
a dotted path indicating the field within the JSON object
that will be the index key.
:return: The json_path of this IndexKey.
:rtype: str
"""
return self._json_path
@json_path.setter
def json_path(self, json_path):
"""
Sets the json_path of this IndexKey.
If the specified column is of type JSON, jsonPath contains
a dotted path indicating the field within the JSON object
that will be the index key.
:param json_path: The json_path of this IndexKey.
:type: str
"""
self._json_path = json_path
@property
def json_field_type(self):
"""
Gets the json_field_type of this IndexKey.
If the specified column is of type JSON, jsonFieldType contains
the type of the field indicated by jsonPath.
:return: The json_field_type of this IndexKey.
:rtype: str
"""
return self._json_field_type
@json_field_type.setter
def json_field_type(self, json_field_type):
"""
Sets the json_field_type of this IndexKey.
If the specified column is of type JSON, jsonFieldType contains
the type of the field indicated by jsonPath.
:param json_field_type: The json_field_type of this IndexKey.
:type: str
"""
self._json_field_type = json_field_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
b-tool/dec/b-tool.py | shyamjangid07/Reverse-Engineering | 337 | 11142811 | <filename>b-tool/dec/b-tool.py<gh_stars>100-1000
# Deobfuscated BY HTR-TECH | <NAME>
# Github : https://github.com/htr-tech
# Instagram : https://www.instagram.com/tahmid.rayat
# Facebook : https://fb.com/tahmid.rayat.oficial
# Messenger : https://m.me/tahmid.rayat.oficial
import os,sys,time
##### LOGO #####
logo = """
\033[1;91mBBBBBBBBBBBBBBBB
\033[1;97mB::::::::::::::::B
\033[1;92mB::::::BBBBBB:::::B
\033[1;97mBB:::::B B:::::B
\033[1;95m B::::B B:::::B
\033[1;97m B::::B B:::::B
\033[1;94m B::::BBBBBB:::::B
\033[1;97m B:::::::::::::BB --------------
\033[1;91m B::::BBBBBB:::::B -::: TOOL :::-
\033[1;97m B::::B B:::::B --------------
\033[1;92m B::::B B:::::B
\033[1;97m B::::B B:::::B
\033[1;95mBB:::::BBBBBB::::::B
\033[1;97mB:::::::::::::::::B
\033[1;94mB::::::::::::::::B
\033[1;91mBBBBBBBBBBBBBBBB
\033[1;96m--------------------------------------------------
\033[1;91mAuther : Binyamin
\033[1;92mGitHub : https://github.com/binyamin-binni
\033[1;95mYouTube : Trick Proof
\033[1;94mBlogspot : https://tr<EMAIL>
\033[1;96m--------------------------------------------------
"""
B = '\033[1;94m'
R = '\033[1;91m'
G = '\033[1;92m'
W = '\033[1;97m'
S = '\033[1;96m'
P = '\033[1;95m'
def cb():
os.system('clear')
#### time sleep ####
def t():
time.sleep(1)
def t1():
time.sleep(0.01)
#### print std ####
def psb(z):
for e in z + "\n":
sys.stdout.write(e)
sys.stdout.flush()
t1()
def menu():
cb()
print(logo)
print
print(S+"[1]"+G+" INSTALL BXI")
print(S+"[2]"+P+" INSTALL BN")
print(S+"[3]"+B+" INSTALL B4U")
print(R+"[0]"+R+" EXIT")
print
mb()
def mb():
bm = raw_input(W + " >>> ")
if bm =="":
print (R + "Select a valid option !")
mb()
elif bm =="1":
cb()
print(logo)
os.system("rm -rf $HOME/bxi")
os.system("cd $HOME && git clone https://github.com/binyamin-binni/bxi")
print
psb("Congratulations BXI Tool Has Been Installed Successfully")
psb("Now you can open this tool as usual")
time.sleep(5)
menu()
elif bm =="2":
cb()
print(logo)
os.system("rm -rf $HOME/bn")
os.system("cd $HOME && git clone https://github.com/binyamin-binni/bn")
print
psb("Congratulations BN Tool Has Been Installed Successfully")
psb("Now you can open this tool as usual")
time.sleep(5)
menu()
elif bm =="3":
cb()
print(logo)
os.system("rm -rf $HOME/b4u")
os.system("cd $HOME && git clone https://github.com/binyamin-binni/b4u")
print
psb("Congratulations B4U Tool Has Been Installed Successfully")
psb("Now you can open this tool as usual")
time.sleep(5)
menu()
elif bm =="0":
os.system("exit")
if __name__ == "__main__":
menu()
|
tests/test_misc.py | Hellowlol/bw_plex | 371 | 11142819 | import math
import pytest
from conftest import misc
def test_to_sec():
assert misc.to_sec(1) == 1
assert misc.to_sec('00:01') == 1
assert misc.to_sec('10:59') == 659
def test_get_valid_filename():
assert misc.get_valid_filename('M*A*S*H') == 'MASH'
def test_ignoreratingkey(film, episode):
assert misc.ignore_ratingkey(episode, [1337])
assert misc.ignore_ratingkey(film, [7331])
assert not misc.ignore_ratingkey(episode, [113])
def test_sec_to_hh_mm_ss():
x = misc.sec_to_hh_mm_ss(60)
assert x == '00:01:00'
def test_findnxt(film, episode):
assert not misc.find_next(film)
# this should fail was there is no more
# episodes.
assert not misc.find_next(episode)
@pytest.mark.xfail
def test_find_offset_ffmpeg(intro_file):
x = misc.find_offset_ffmpeg(intro_file)
assert x == 214
# This failed it isnt really the intro file.
#assert x == -1
# failes as find_offset_ffmpeg selects the intro, not the end of the intro..
def test_download_theme_and_find_theme_start_end(media, HT, intro_file):
files = misc.download_theme(media, HT, theme_source='youtube', url='https://www.youtube.com/watch?v=BIqBQWB7IUM')
assert len(files)
assert HT.has_theme(media)
new_files = misc.download_theme(media, HT, theme_source='tvtunes')
assert len(new_files)
start, end = misc.find_theme_start_end(intro_file, HT)
assert math.floor(start) in (115, 116, 117)
assert math.floor(end) in (208, 209)
def test_has_recap_subtitle(episode, monkeypatch, mocker):
def download_subtitle2(*args, **kwargs):
l = []
for i in ['Hello you old', 'dog']:
m = mocker.Mock()
m.text = i
l.append(m)
return [l]
monkeypatch.setattr(misc, 'download_subtitle', download_subtitle2)
assert misc.has_recap_subtitle(episode, ['dog'])
def test_search_tunes():
d = misc.search_tunes('dexter', 1337, url=None)
assert d
def test_choose(monkeypatch, mocker):
l = []
for r in range(10):
m = mocker.Mock()
m.title = r
l.append(m)
mocker.patch('click.prompt', side_effect=['0'])
x = misc.choose('select', l, 'title')
assert x[0].title == 0
assert not len(misc.choose('select', [], 'title'))
mocker.patch('click.prompt', side_effect=['-1'])
last = misc.choose('select', l, 'title')
assert last[0].title == 9
mocker.patch('click.prompt', side_effect=['1,7'])
some = misc.choose('select', l, 'title')
assert some[0].title == 1
assert some[1].title == 7
mocker.patch('click.prompt', side_effect=['1000', '-1:'])
some = misc.choose('select', l, 'title')
assert some[0].title == 9
def test_to_time():
assert misc.to_time(-1) == '00:00'
|
ops/_private/yaml.py | r-f-g/operator | 147 | 11142824 | <filename>ops/_private/yaml.py
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal YAML helpers."""
import yaml
# Use C speedups if available
_safe_loader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader)
_safe_dumper = getattr(yaml, 'CSafeDumper', yaml.SafeDumper)
def safe_load(stream):
"""Same as yaml.safe_load, but use fast C loader if available."""
return yaml.load(stream, Loader=_safe_loader)
def safe_dump(data, stream=None, **kwargs):
"""Same as yaml.safe_dump, but use fast C dumper if available."""
return yaml.dump(data, stream=stream, Dumper=_safe_dumper, **kwargs)
|
tests/ut/datavisual/mock.py | fapbatista/mindinsight | 216 | 11142826 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Mock for ut test case.
"""
class MockLogger:
"""Mock logger in DataLoader and collect log message for verification."""
log_msg = {'error': None, 'warning': None, 'info': None}
@classmethod
def error(cls, msg, *args):
"""Mock logger.error() and collect error message."""
cls.log_msg['error'] = msg.replace("%s", "{}").replace("%r", "'{}'").format(*args)
@classmethod
def warning(cls, msg, *args):
"""Mock logger.warning() and collect warning message."""
cls.log_msg['warning'] = msg.replace("%s", "{}").replace("%r", "'{}'").format(*args)
@classmethod
def info(cls, msg, *args):
"""Mock logger.info() and collect info message."""
cls.log_msg['info'] = msg.replace("%s", "{}").replace("%r", "'{}'").format(*args)
@classmethod
def debug(cls, msg, *args):
"""Mock logger.debug() and collect debug message."""
cls.log_msg['debug'] = msg.replace("%s", "{}").replace("%r", "'{}'").format(*args)
|
input/gcamdata/exec/run-xml-tests.py | cmcormack/gcam-core | 157 | 11142833 | <gh_stars>100-1000
#! /usr/bin/env python3
"""
Compare the GCAM xml files in olddir with their counterparts in newdir.
Usage: run-xml-tests.py [-v] olddir newdir
Discrepancies will be reported to stdout; all other messages (progress indicators,
etc.) will be written to stderr
"""
import sys
import os
import glob
import argparse
import textwrap
import xml_verify
def parse_args():
"""
Parse command line arguments
:return: parsed arguments object
"""
parser = argparse.ArgumentParser(
description = "Compare the GCAM xml files in olddir with their counterparts in newdir.",
epilog = textwrap.dedent(
"""
The olddir will be searched recursively for xml files. Currently newdir is
*not* searched recursively (gcamdata currently generates all of its xml files in
the top-level directory). This might change in the future.
Discrepancies will be reported to stdout; all other messages (progress indicators,
etc.) will be written to stderr.
"""),
formatter_class = argparse.RawDescriptionHelpFormatter
)
parser.add_argument('olddir', help='Directory containing the old GCAM xml files')
parser.add_argument('newdir', help='Directory containing the new GCAM xml files')
parser.add_argument('-v', action='store_true',
help='Verbose mode: add extra informational messages')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
oldfiles = glob.glob( os.path.join( os.path.expanduser(args.olddir), '**', '*.xml'), recursive=True )
newdir = os.path.expanduser(args.newdir)
misscount = 0
statcount = 0
for oldfile in oldfiles:
newfile = os.path.join( newdir, os.path.basename(oldfile) )
if not os.path.exists(newfile):
sys.stdout.write('ERROR : File does not exist: {}\n'.format(newfile))
misscount += 1
else:
stat = xml_verify.compare_files(oldfile, newfile, args.v)
if stat != 0:
sys.stdout.write('ERROR: Discrepancy between files: {} and {}\n'.format(oldfile, newfile))
statcount += 1
if misscount == 1:
fs = 'file'
else:
fs = 'files'
if statcount == 1:
ds = 'discrepancy'
else:
ds = 'discrepancies'
nold = len(oldfiles)
if nold == 1:
fno = 'file'
else:
fno = 'files'
sys.stdout.write('{} {} tested.\n'.format(nold, fno))
sys.stdout.write('{} missing output {}.\n'.format(misscount, fs))
sys.stdout.write('{} file {}.\n'.format(statcount, ds))
if nold == 0:
sys.exit(1) # failing to find any files to test is an error
elif misscount > 0:
sys.exit(2)
elif statcount > 0:
sys.exit(3)
else:
sys.exit(0) # success!
|
src/_cffi_src/openssl/ocsp.py | ceridwen/cryptography | 333 | 11142847 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/ocsp.h>
"""
TYPES = """
typedef ... OCSP_REQUEST;
typedef ... OCSP_ONEREQ;
typedef ... OCSP_RESPONSE;
typedef ... OCSP_BASICRESP;
typedef ... OCSP_SINGLERESP;
typedef ... OCSP_CERTID;
typedef ... OCSP_RESPDATA;
static const long OCSP_NOCERTS;
static const long OCSP_RESPID_KEY;
"""
FUNCTIONS = """
int OCSP_response_status(OCSP_RESPONSE *);
OCSP_BASICRESP *OCSP_response_get1_basic(OCSP_RESPONSE *);
int OCSP_BASICRESP_get_ext_count(OCSP_BASICRESP *);
const ASN1_OCTET_STRING *OCSP_resp_get0_signature(const OCSP_BASICRESP *);
Cryptography_STACK_OF_X509 *OCSP_resp_get0_certs(const OCSP_BASICRESP *);
const ASN1_GENERALIZEDTIME *OCSP_resp_get0_produced_at(
const OCSP_BASICRESP *);
const OCSP_CERTID *OCSP_SINGLERESP_get0_id(const OCSP_SINGLERESP *);
int OCSP_resp_get0_id(const OCSP_BASICRESP *, const ASN1_OCTET_STRING **,
const X509_NAME **);
const X509_ALGOR *OCSP_resp_get0_tbs_sigalg(const OCSP_BASICRESP *);
const OCSP_RESPDATA *OCSP_resp_get0_respdata(const OCSP_BASICRESP *);
X509_EXTENSION *OCSP_BASICRESP_get_ext(OCSP_BASICRESP *, int);
int OCSP_resp_count(OCSP_BASICRESP *);
OCSP_SINGLERESP *OCSP_resp_get0(OCSP_BASICRESP *, int);
int OCSP_SINGLERESP_get_ext_count(OCSP_SINGLERESP *);
X509_EXTENSION *OCSP_SINGLERESP_get_ext(OCSP_SINGLERESP *, int);
int OCSP_single_get0_status(OCSP_SINGLERESP *, int *, ASN1_GENERALIZEDTIME **,
ASN1_GENERALIZEDTIME **, ASN1_GENERALIZEDTIME **);
int OCSP_REQUEST_get_ext_count(OCSP_REQUEST *);
X509_EXTENSION *OCSP_REQUEST_get_ext(OCSP_REQUEST *, int);
int OCSP_request_onereq_count(OCSP_REQUEST *);
OCSP_ONEREQ *OCSP_request_onereq_get0(OCSP_REQUEST *, int);
int OCSP_ONEREQ_get_ext_count(OCSP_ONEREQ *);
X509_EXTENSION *OCSP_ONEREQ_get_ext(OCSP_ONEREQ *, int);
OCSP_CERTID *OCSP_onereq_get0_id(OCSP_ONEREQ *);
OCSP_ONEREQ *OCSP_request_add0_id(OCSP_REQUEST *, OCSP_CERTID *);
OCSP_CERTID *OCSP_cert_to_id(const EVP_MD *, const X509 *, const X509 *);
void OCSP_CERTID_free(OCSP_CERTID *);
OCSP_BASICRESP *OCSP_BASICRESP_new(void);
void OCSP_BASICRESP_free(OCSP_BASICRESP *);
OCSP_SINGLERESP *OCSP_basic_add1_status(OCSP_BASICRESP *, OCSP_CERTID *, int,
int, ASN1_TIME *, ASN1_TIME *,
ASN1_TIME *);
int OCSP_basic_add1_nonce(OCSP_BASICRESP *, unsigned char *, int);
int OCSP_basic_add1_cert(OCSP_BASICRESP *, X509 *);
int OCSP_BASICRESP_add_ext(OCSP_BASICRESP *, X509_EXTENSION *, int);
int OCSP_basic_sign(OCSP_BASICRESP *, X509 *, EVP_PKEY *, const EVP_MD *,
Cryptography_STACK_OF_X509 *, unsigned long);
OCSP_RESPONSE *OCSP_response_create(int, OCSP_BASICRESP *);
void OCSP_RESPONSE_free(OCSP_RESPONSE *);
OCSP_REQUEST *OCSP_REQUEST_new(void);
void OCSP_REQUEST_free(OCSP_REQUEST *);
int OCSP_request_add1_nonce(OCSP_REQUEST *, unsigned char *, int);
int OCSP_REQUEST_add_ext(OCSP_REQUEST *, X509_EXTENSION *, int);
int OCSP_id_get0_info(ASN1_OCTET_STRING **, ASN1_OBJECT **,
ASN1_OCTET_STRING **, ASN1_INTEGER **, OCSP_CERTID *);
OCSP_REQUEST *d2i_OCSP_REQUEST_bio(BIO *, OCSP_REQUEST **);
OCSP_RESPONSE *d2i_OCSP_RESPONSE_bio(BIO *, OCSP_RESPONSE **);
int i2d_OCSP_REQUEST_bio(BIO *, OCSP_REQUEST *);
int i2d_OCSP_RESPONSE_bio(BIO *, OCSP_RESPONSE *);
int i2d_OCSP_RESPDATA(OCSP_RESPDATA *, unsigned char **);
"""
CUSTOMIZATIONS = """
#if ( \
CRYPTOGRAPHY_OPENSSL_110_OR_GREATER && \
CRYPTOGRAPHY_OPENSSL_LESS_THAN_110J \
)
/* These structs come from ocsp_lcl.h and are needed to de-opaque the struct
for the getters in OpenSSL 1.1.0 through 1.1.0i */
struct ocsp_responder_id_st {
int type;
union {
X509_NAME *byName;
ASN1_OCTET_STRING *byKey;
} value;
};
struct ocsp_response_data_st {
ASN1_INTEGER *version;
OCSP_RESPID responderId;
ASN1_GENERALIZEDTIME *producedAt;
STACK_OF(OCSP_SINGLERESP) *responses;
STACK_OF(X509_EXTENSION) *responseExtensions;
};
struct ocsp_basic_response_st {
OCSP_RESPDATA tbsResponseData;
X509_ALGOR signatureAlgorithm;
ASN1_BIT_STRING *signature;
STACK_OF(X509) *certs;
};
#endif
#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110
/* These functions are all taken from ocsp_cl.c in OpenSSL 1.1.0 */
const OCSP_CERTID *OCSP_SINGLERESP_get0_id(const OCSP_SINGLERESP *single)
{
return single->certId;
}
const Cryptography_STACK_OF_X509 *OCSP_resp_get0_certs(
const OCSP_BASICRESP *bs)
{
return bs->certs;
}
int OCSP_resp_get0_id(const OCSP_BASICRESP *bs,
const ASN1_OCTET_STRING **pid,
const X509_NAME **pname)
{
const OCSP_RESPID *rid = bs->tbsResponseData->responderId;
if (rid->type == V_OCSP_RESPID_NAME) {
*pname = rid->value.byName;
*pid = NULL;
} else if (rid->type == V_OCSP_RESPID_KEY) {
*pid = rid->value.byKey;
*pname = NULL;
} else {
return 0;
}
return 1;
}
const ASN1_GENERALIZEDTIME *OCSP_resp_get0_produced_at(
const OCSP_BASICRESP* bs)
{
return bs->tbsResponseData->producedAt;
}
const ASN1_OCTET_STRING *OCSP_resp_get0_signature(const OCSP_BASICRESP *bs)
{
return bs->signature;
}
#endif
#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110J
const X509_ALGOR *OCSP_resp_get0_tbs_sigalg(const OCSP_BASICRESP *bs)
{
#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110
return bs->signatureAlgorithm;
#else
return &bs->signatureAlgorithm;
#endif
}
const OCSP_RESPDATA *OCSP_resp_get0_respdata(const OCSP_BASICRESP *bs)
{
#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110
return bs->tbsResponseData;
#else
return &bs->tbsResponseData;
#endif
}
#endif
"""
|
observations/r/film.py | hajime9652/observations | 199 | 11142891 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def film(path):
"""Film
Film data from Maltin's Movie and Video Guide
A dataset with 100 observations on the following 9 variables.
`Title`
Movie title
`Year`
Year the movie was released
`Time`
Running time (in minutes)
`Cast`
Number of cast members listed in the guide
`Rating`
Maltin rating (range is 1 to 4, in steps of 0.5)
`Description`
Number of lines of text Maltin uses to describe the movie
`Origin`
Country: 0 = USA, 1 = Great Britain, 2 = France, 3 = Italy, 4 = Canada
`Time_code`
`long`\ =90 minues or longer `short`\ =under 90 minutes
`Good`
`1`\ =rating or 3 stars or better `0`\ =any lower rating
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `film.csv`.
Returns:
Tuple of np.ndarray `x_train` with 100 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'film.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Stat2Data/Film.csv'
maybe_download_and_extract(path, url,
save_file_name='film.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
tests/test_estimators/test_basics.py | kaipoethkow/scikit-lego | 784 | 11142951 | <reponame>kaipoethkow/scikit-lego
import pytest
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklego.dummy import RandomRegressor
from sklego.linear_model import DeadZoneRegressor
from sklego.mixture import (
GMMClassifier,
BayesianGMMClassifier,
GMMOutlierDetector,
BayesianGMMOutlierDetector,
)
from tests.conftest import id_func
@pytest.mark.parametrize(
"estimator",
[
RandomRegressor(strategy="uniform"),
RandomRegressor(strategy="normal"),
DeadZoneRegressor(effect="linear", n_iter=100),
DeadZoneRegressor(effect="quadratic", n_iter=100),
],
ids=id_func,
)
def test_shape_regression(estimator, random_xy_dataset_regr):
X, y = random_xy_dataset_regr
assert estimator.fit(X, y).predict(X).shape[0] == y.shape[0]
pipe = Pipeline(steps=[("scaler", StandardScaler()), ("clf", estimator)])
assert pipe.fit(X, y).predict(X).shape[0] == y.shape[0]
@pytest.mark.parametrize(
"estimator",
[
GMMClassifier(),
BayesianGMMClassifier(),
GMMOutlierDetector(threshold=0.999, method="quantile"),
GMMOutlierDetector(threshold=2, method="stddev"),
BayesianGMMOutlierDetector(threshold=0.999, method="quantile"),
BayesianGMMOutlierDetector(threshold=2, method="stddev"),
],
ids=id_func,
)
def test_shape_classification(estimator, random_xy_dataset_clf):
X, y = random_xy_dataset_clf
assert estimator.fit(X, y).predict(X).shape[0] == y.shape[0]
pipe = Pipeline(steps=[("scaler", StandardScaler()), ("clf", estimator)])
assert pipe.fit(X, y).predict(X).shape[0] == y.shape[0]
|
test/test_rbql_sqlite.py | mechatroner/RBQL | 178 | 11142955 | <reponame>mechatroner/RBQL
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import unittest
import os
import sys
import json
import random
import tempfile
import time
import shutil
import sqlite3
import rbql
from rbql import rbql_engine
from rbql import rbql_sqlite
#This module must be both python2 and python3 compatible
script_dir = os.path.dirname(os.path.abspath(__file__))
python_version = float('{}.{}'.format(sys.version_info[0], sys.version_info[1]))
def calc_file_md5(fname):
# TODO put into a common test_common.py module
import hashlib
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def normalize_warnings(warnings):
# TODO can we get rid of this function? Why do we need to normalize warnings?
# TODO move into a common test lib module e.g. "tests_common.py"
result = []
for warning in warnings:
if warning.find('Number of fields in "input" table is not consistent') != -1:
result.append('inconsistent input records')
elif warning.find('Inconsistent double quote escaping') != -1:
result.append('inconsistent double quote escaping')
elif warning.find('None values in output were replaced by empty strings') != -1:
result.append('null values in output were replaced')
elif warning == 'UTF-8 Byte Order Mark (BOM) was found and skipped in input table':
result.append('BOM removed from input')
else:
result.append(warning)
return result
class TestSqliteJsonScenarios(unittest.TestCase):
def process_test_case(self, tmp_tests_dir, test_case):
test_name = test_case['test_name']
query = test_case.get('query_python', None)
if query is None:
if python_version >= 3:
query = test_case.get('query_python_3', None)
else:
query = test_case.get('query_python_2', None)
debug_mode = test_case.get('debug_mode', False)
minimal_python_version = float(test_case.get('minimal_python_version', 2.7))
if python_version < minimal_python_version:
print('Skipping {}: python version must be at least {}. Interpreter version is {}'.format(test_name, minimal_python_version, python_version))
return
if query is None:
self.assertTrue(test_case.get('query_js', None) is not None)
return # Skip this test
input_db_path = os.path.join(script_dir, test_case['input_db_path'])
input_table_name = test_case['input_table_name']
join_table = test_case.get('join_table', None)
user_init_code = test_case.get('python_init_code', '')
out_delim = ',' # TODO read from the test_case
out_policy = 'quoted_rfc' # TODO read from the test_case
output_encoding = 'utf-8' # TODO read from the test_case
warnings = []
expected_output_table_path = test_case.get('expected_output_table_path', None)
if expected_output_table_path is not None:
expected_output_table_path = os.path.join(script_dir, expected_output_table_path)
expected_md5 = calc_file_md5(expected_output_table_path)
output_file_name = os.path.basename(expected_output_table_path)
actual_output_table_path = os.path.join(tmp_tests_dir, output_file_name)
else:
actual_output_table_path = os.path.join(tmp_tests_dir, 'expected_empty_file')
expected_error = test_case.get('expected_error', None)
expected_warnings = test_case.get('expected_warnings', [])
error_type, error_msg = None, None
db_connection = None
try:
db_connection = sqlite3.connect(input_db_path)
rbql_sqlite.query_sqlite_to_csv(query, db_connection, input_table_name, actual_output_table_path, out_delim, out_policy, output_encoding, warnings)
except Exception as e:
if debug_mode:
raise
error_type, error_msg = rbql.exception_to_error_info(e)
finally:
db_connection.close()
self.assertTrue((expected_error is not None) == (error_type is not None), 'Inside json test: "{}". Expected error: {}, error_type, error_msg: {}'.format(test_name, expected_error, error_type, error_msg))
if expected_error is not None:
self.assertTrue(error_msg.find(expected_error) != -1, 'Inside json test: "{}", Expected error: "{}", Actual error: "{}"'.format(test_name, expected_error, error_msg))
else:
actual_md5 = calc_file_md5(actual_output_table_path)
self.assertTrue(expected_md5 == actual_md5, 'md5 missmatch. Expected table: {}, Actual table: {}'.format(expected_output_table_path, actual_output_table_path))
warnings = sorted(normalize_warnings(warnings))
expected_warnings = sorted(expected_warnings)
self.assertEqual(expected_warnings, warnings, 'Inside json test: "{}"'.format(test_name))
def test_json_scenarios(self):
tests_file = os.path.join(script_dir, 'sqlite_unit_tests.json')
tmp_dir = tempfile.gettempdir()
tmp_tests_dir = 'rbql_sqlite_unit_tests_dir_{}_{}'.format(time.time(), random.randint(1, 100000000)).replace('.', '_')
tmp_tests_dir = os.path.join(tmp_dir, tmp_tests_dir)
os.mkdir(tmp_tests_dir)
with open(tests_file) as f:
tests = json.loads(f.read())
for test in tests:
self.process_test_case(tmp_tests_dir, test)
shutil.rmtree(tmp_tests_dir)
|
python/fate_flow/scheduling_apps/initiator_app.py | hubert-he/FATE | 3,787 | 11142986 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
from fate_arch.common import log
from fate_flow.db.db_models import Task
from fate_flow.entity.types import RetCode
from fate_flow.operation.job_saver import JobSaver
from fate_flow.scheduler.dag_scheduler import DAGScheduler
from fate_flow.settings import stat_logger
from fate_flow.utils.api_utils import get_json_result
manager = Flask(__name__)
@manager.errorhandler(500)
def internal_server_error(e):
stat_logger.exception(e)
return get_json_result(retcode=RetCode.EXCEPTION_ERROR, retmsg=log.exception_to_trace_string(e))
# apply initiator for control operation
@manager.route('/<job_id>/<role>/<party_id>/stop/<stop_status>', methods=['POST'])
def stop_job(job_id, role, party_id, stop_status):
retcode, retmsg = DAGScheduler.stop_job(job_id=job_id, role=role, party_id=party_id, stop_status=stop_status)
return get_json_result(retcode=retcode, retmsg=retmsg)
@manager.route('/<job_id>/<role>/<party_id>/rerun', methods=['POST'])
def rerun_job(job_id, role, party_id):
DAGScheduler.rerun_job(job_id=job_id, initiator_role=role, initiator_party_id=party_id,
component_name=request.json.get("component_name"))
return get_json_result(retcode=0, retmsg='success')
@manager.route('/<job_id>/<component_name>/<task_id>/<task_version>/<role>/<party_id>/report', methods=['POST'])
def report_task(job_id, component_name, task_id, task_version, role, party_id):
task_info = {}
task_info.update(request.json)
task_info.update({
"job_id": job_id,
"task_id": task_id,
"task_version": task_version,
"role": role,
"party_id": party_id,
})
JobSaver.update_task(task_info=task_info)
if task_info.get("party_status"):
JobSaver.update_status(Task, task_info)
return get_json_result(retcode=0, retmsg='success')
|
tests/parsers/olecf_plugins/automatic_destinations.py | roshanmaskey/plaso | 1,253 | 11142998 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the .automaticDestinations-ms OLECF parser plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.olecf_plugins import automatic_destinations
from tests.parsers.olecf_plugins import test_lib
class TestAutomaticDestinationsOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the .automaticDestinations-ms OLECF parser plugin."""
def testProcessVersion1(self):
"""Tests the Process function on version 1 .automaticDestinations-ms."""
plugin = automatic_destinations.AutomaticDestinationsOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(
['1b4dd67f29cb1962.automaticDestinations-ms'], plugin)
# Number of events:
# olecf:dest_list:entry: 11
# windows:lnk:link 33
# windows:distributed_link_tracking:creation: 44
self.assertEqual(storage_writer.number_of_events, 88)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# Check a AutomaticDestinationsDestListEntryEvent.
expected_event_values = {
'birth_droid_file_identifier': '{63eea867-7b85-11e1-8950-005056a50b40}',
'birth_droid_volume_identifier': (
'{cf6619c2-66a8-44a6-8849-1582fcd3a338}'),
'data_type': 'olecf:dest_list:entry',
'date_time': '2012-04-01 13:52:38.9975382',
'droid_file_identifier': '{63eea867-7b85-11e1-8950-005056a50b40}',
'droid_volume_identifier': '{cf6619c2-66a8-44a6-8849-1582fcd3a338}',
'entry_number': 11,
'hostname': 'wks-win764bitb',
'offset': 32,
'path': 'C:\\Users\\nfury\\Pictures\\The SHIELD',
'pin_status': -1,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[7], expected_event_values)
# Check a WinLnkLinkEvent.
expected_event_values = {
'data_type': 'windows:lnk:link',
'date_time': '2010-11-10 07:51:16.7491250',
'drive_serial_number': 0x24ba718b,
'drive_type': 3,
'file_attribute_flags': 0x00002020,
'file_size': 3545,
'link_target': '<Users Libraries> <UNKNOWN: 0x00>',
'local_path': (
'C:\\Users\\nfury\\AppData\\Roaming\\Microsoft\\Windows\\'
'Libraries\\Documents.library-ms')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# Check a WindowsDistributedLinkTrackingCreationEvent.
expected_event_values = {
'data_type': 'windows:distributed_link_tracking:creation',
'date_time': '2012-03-31 23:01:03.5277415',
'mac_address': '00:50:56:a5:0b:40',
'origin': 'DestList entry at offset: 0x00000020',
'uuid': '63eea867-7b85-11e1-8950-005056a50b40'}
self.CheckEventValues(storage_writer, events[5], expected_event_values)
def testProcessVersion3(self):
"""Tests the Process function on version 3 .automaticDestinations-ms."""
plugin = automatic_destinations.AutomaticDestinationsOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(
['9d1f905ce5044aee.automaticDestinations-ms'], plugin)
# Number of events:
# olecf:dest_list:entry: 2
# windows:lnk:link 2
self.assertEqual(storage_writer.number_of_events, 4)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# Check a AutomaticDestinationsDestListEntryEvent.
expected_event_values = {
'birth_droid_file_identifier': '{00000000-0000-0000-0000-000000000000}',
'birth_droid_volume_identifier': (
'{00000000-0000-0000-0000-000000000000}'),
'data_type': 'olecf:dest_list:entry',
'date_time': '2016-01-17 13:08:08.2475045',
'droid_file_identifier': '{00000000-0000-0000-0000-000000000000}',
'droid_volume_identifier': '{00000000-0000-0000-0000-000000000000}',
'entry_number': 2,
'offset': 32,
'path': 'http://support.microsoft.com/kb/3124263',
'pin_status': -1,
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# Check a WinLnkLinkEvent.
expected_event_values = {
'data_type': 'windows:lnk:link',
'date_time': 'Not set',
'timestamp_desc': definitions.TIME_DESCRIPTION_NOT_A_TIME}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
online_status/middleware.py | r26zhao/django-easy-comment | 318 | 11143003 | import datetime
from django.utils import timezone
from django.core.cache import cache
from django.utils.deprecation import MiddlewareMixin
from . import settings
from .models import OnlineStatus
class OnlineStatusMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.user.is_authenticated() and (not request.path == '/notifications/api/unread_count/'):
cache_key = '%s_last_login' % request.user.username
now = timezone.now()
# 用户是第一次登录、或者是缓存过去、或者是服务器重启导致缓存消失
if not cache.get(cache_key):
# print('#### cache not found #####')
obj, created = OnlineStatus.objects.get_or_create(user=request.user)
if not created:
# print("#### login before #####")
obj.last_login = now
obj.save()
cache.set(cache_key, now, settings.USER_LAST_LOGIN_EXPIRE)
else:
# print("##### cache found ######")
limit = now - datetime.timedelta(seconds=settings.USER_ONLINE_TIMEOUT)
# 距离上一次发送request请求的时间 超过了TIMEOUT,更新上一次login的时间
if cache.get(cache_key) < limit:
# print("#### renew login #####")
obj = OnlineStatus.objects.get(user=request.user)
obj.last_login = now
obj.save()
cache.set(cache_key, now, settings.USER_LAST_LOGIN_EXPIRE)
return None
|
sydent/config/http.py | clmnin/sydent | 220 | 11143004 | <reponame>clmnin/sydent<gh_stars>100-1000
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from configparser import ConfigParser
from typing import Optional
from sydent.config._base import BaseConfig
class HTTPConfig(BaseConfig):
def parse_config(self, cfg: "ConfigParser") -> bool:
"""
Parse the http section of the config
:param cfg: the configuration to be parsed
"""
# This option is deprecated
self.verify_response_template = cfg.get(
"http", "verify_response_template", fallback=None
)
self.client_bind_address = cfg.get("http", "clientapi.http.bind_address")
self.client_port = cfg.getint("http", "clientapi.http.port")
# internal port is allowed to be set to an empty string in the config
internal_api_port = cfg.get("http", "internalapi.http.port")
self.internal_bind_address = cfg.get(
"http", "internalapi.http.bind_address", fallback="::1"
)
self.internal_port: Optional[int] = None
if internal_api_port != "":
self.internal_port = int(internal_api_port)
self.cert_file = cfg.get("http", "replication.https.certfile")
self.ca_cert_file = cfg.get("http", "replication.https.cacert")
self.replication_bind_address = cfg.get(
"http", "replication.https.bind_address"
)
self.replication_port = cfg.getint("http", "replication.https.port")
self.obey_x_forwarded_for = cfg.getboolean("http", "obey_x_forwarded_for")
self.verify_federation_certs = cfg.getboolean("http", "federation.verifycerts")
self.server_http_url_base = cfg.get("http", "client_http_base")
self.base_replication_urls = {}
for section in cfg.sections():
if section.startswith("peer."):
# peer name is all the characters after 'peer.'
peer = section[5:]
if cfg.has_option(section, "base_replication_url"):
base_url = cfg.get(section, "base_replication_url")
self.base_replication_urls[peer] = base_url
return False
|
alipay/aop/api/domain/AntLinkeDevopsMobiledeviceReturnModel.py | antopen/alipay-sdk-python-all | 213 | 11143013 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntLinkeDevopsMobiledeviceReturnModel(object):
def __init__(self):
self._device_id = None
self._remote_host = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def remote_host(self):
return self._remote_host
@remote_host.setter
def remote_host(self, value):
self._remote_host = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.remote_host:
if hasattr(self.remote_host, 'to_alipay_dict'):
params['remote_host'] = self.remote_host.to_alipay_dict()
else:
params['remote_host'] = self.remote_host
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntLinkeDevopsMobiledeviceReturnModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'remote_host' in d:
o.remote_host = d['remote_host']
return o
|
tests/test_code/py/init/init.py | FreddyZeng/code2flow | 2,248 | 11143040 | <gh_stars>1000+
from the_import import ProvincialClass as pc, imported_func
class Abra():
def __init__(self):
self.cadabra()
def cadabra(self):
print("cadabra")
def b():
Abra()
b()
pc()
HiddenClass() # this is probably too defensive
imported_func()
|
mayan/apps/sources/wizards.py | eshbeata/open-paperless | 2,743 | 11143049 | <reponame>eshbeata/open-paperless
from __future__ import unicode_literals
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from formtools.wizard.views import SessionWizardView
from common.mixins import ViewPermissionCheckMixin
from documents.forms import DocumentTypeSelectForm
from metadata.forms import DocumentMetadataFormSet
from tags.forms import TagMultipleSelectionForm
from .literals import STEP_DOCUMENT_TYPE, STEP_METADATA, STEP_TAGS
from .models import InteractiveSource
def has_metadata_types(wizard):
"""
Skip the 2nd step if document type has no associated metadata
"""
cleaned_data = wizard.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE) or {}
document_type = cleaned_data.get('document_type')
if document_type:
return document_type.metadata.exists()
class DocumentCreateWizard(ViewPermissionCheckMixin, SessionWizardView):
condition_dict = {STEP_METADATA: has_metadata_types}
extra_context = {}
form_list = (
DocumentTypeSelectForm, DocumentMetadataFormSet,
TagMultipleSelectionForm
)
form_titles = {
DocumentTypeSelectForm: _('Step 1 of 3: Select document type'),
DocumentMetadataFormSet: _('Step 2 of 3: Enter document metadata'),
TagMultipleSelectionForm: _('Step 3 of 3: Select tags'),
}
template_name = 'appearance/generic_wizard.html'
def dispatch(self, request, *args, **kwargs):
if not InteractiveSource.objects.filter(enabled=True).exists():
messages.error(
request,
_(
'No interactive document sources have been defined or '
'none have been enabled, create one before proceeding.'
)
)
return HttpResponseRedirect(reverse('sources:setup_source_list'))
return super(
DocumentCreateWizard, self
).dispatch(request, *args, **kwargs)
def get_context_data(self, form, **kwargs):
context = super(
DocumentCreateWizard, self
).get_context_data(form=form, **kwargs)
context.update({
'step_title': self.form_titles[form.__class__],
'submit_label': _('Next step'),
'submit_icon': 'fa fa-arrow-right',
'title': _('Document upload wizard'),
})
return context
def get_form_initial(self, step):
if step == STEP_METADATA:
initial = []
for document_type_metadata_type in self.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE)['document_type'].metadata.all():
initial.append(
{
'document_type': self.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE)['document_type'],
'metadata_type': document_type_metadata_type.metadata_type,
}
)
return initial
return self.initial_dict.get(step, {})
def get_form_kwargs(self, step):
# Tags form needs the user instance to determine which tags to
# display
if step == STEP_DOCUMENT_TYPE:
return {'user': self.request.user}
if step == STEP_TAGS:
return {
'help_text': _('Tags to be attached.'),
'user': self.request.user
}
return {}
def done(self, *args, **kwargs):
query_dict = {}
try:
query_dict['document_type_id'] = self.get_cleaned_data_for_step(STEP_DOCUMENT_TYPE)['document_type'].pk
except AttributeError:
pass
try:
for identifier, metadata in enumerate(self.get_cleaned_data_for_step(STEP_METADATA)):
if metadata.get('update'):
query_dict['metadata%s_id' % identifier] = metadata['id']
query_dict['metadata%s_value' % identifier] = metadata['value']
except TypeError:
pass
try:
query_dict['tags'] = ([force_text(tag.pk) for tag in self.get_cleaned_data_for_step(STEP_TAGS)['tags']])
except AttributeError:
pass
url = '?'.join(
[
reverse('sources:upload_interactive'),
urlencode(query_dict, doseq=True)
]
)
return HttpResponseRedirect(url)
|
asterioids-pygame-project/source_code_step_4/space_rocks/models.py | syberflea/materials | 3,682 | 11143084 | from pygame.math import Vector2
class GameObject:
def __init__(self, position, sprite, velocity):
self.position = Vector2(position)
self.sprite = sprite
self.radius = sprite.get_width() / 2
self.velocity = Vector2(velocity)
def draw(self, surface):
blit_position = self.position - Vector2(self.radius)
surface.blit(self.sprite, blit_position)
def move(self):
self.position = self.position + self.velocity
def collides_with(self, other_obj):
distance = self.position.distance_to(other_obj.position)
return distance < self.radius + other_obj.radius
|
example/Al2O3/Al2O3.py | ladyteam/phonopy | 127 | 11143097 | """Example by <NAME>."""
import numpy as np
import phonopy
phonon = phonopy.load(
unitcell_filename="POSCAR-unitcell", supercell_matrix=[2, 2, 1], log_level=1
)
print("Space group: %s" % phonon.symmetry.get_international_table())
# Example to obtain dynamical matrix
dmat = phonon.get_dynamical_matrix_at_q([0, 0, 0])
print(dmat)
# Example of band structure calculation
bands = []
q_start = np.array([1.0 / 3, 1.0 / 3, 0])
q_end = np.array([0, 0, 0])
band = []
for i in range(51):
band.append(q_start + (q_end - q_start) / 50 * i)
bands.append(band)
q_start = np.array([0, 0, 0])
q_end = np.array([1.0 / 3, 1.0 / 3, 1.0 / 2])
band = []
for i in range(51):
band.append(q_start + (q_end - q_start) / 50 * i)
bands.append(band)
print("\nPhonon dispersion:")
phonon.run_band_structure(bands, with_eigenvectors=True, labels=["X", r"$\Gamma$", "L"])
band_plot = phonon.plot_band_structure()
band_plot.show()
bs = phonon.get_band_structure_dict()
distances = bs["distances"]
frequencies = bs["frequencies"]
qpoints = bs["qpoints"]
for (qs_at_segments, dists_at_segments, freqs_at_segments) in zip(
qpoints, distances, frequencies
):
for q, d, f in zip(qs_at_segments, dists_at_segments, freqs_at_segments):
print("# %f %f %f" % tuple(q))
print(("%s " + "%f " * len(f)) % ((d,) + tuple(f)))
|
setup.py | maxschommer/pcbdl | 117 | 11143106 | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open("README.md", "r", encoding="utf8") as readme_file:
long_description = readme_file.read()
setuptools.setup(
name="pcbdl",
version="0.1.1",
author="Google LLC",
description="A programming way to design schematics.",
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache-2.0",
url="https://github.com/google/pcbdl",
packages=setuptools.find_packages(),
keywords=["eda", "hdl", "electronics", "netlist", "hardware", "schematics"],
install_requires=["pygments"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
"Topic :: System :: Hardware",
],
)
|
pymagnitude/third_party_mock/parsimonious/nodes/__init__.py | tpeng/magnitude | 1,520 | 11143118 | class Node:
pass
class NodeVisitor:
def generic_visit(self, node, visited_children):
pass |
deon/__main__.py | NitronBeenGrinding/deonedits | 223 | 11143140 | <gh_stars>100-1000
from .cli import main
if __name__ == "__main__":
main(prog_name="python -m deon")
|
homeassistant/components/tautulli/sensor.py | andersop91/core | 22,481 | 11143173 | """A platform which allows you to get information from Tautulli."""
from __future__ import annotations
from typing import Any
from pytautulli import PyTautulli
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType, StateType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .coordinator import TautulliDataUpdateCoordinator
CONF_MONITORED_USERS = "monitored_users"
DEFAULT_NAME = "Tautulli"
DEFAULT_PORT = "8181"
DEFAULT_PATH = ""
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_MONITORED_USERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Create the Tautulli sensor."""
name = config[CONF_NAME]
host = config[CONF_HOST]
port = config[CONF_PORT]
path = config[CONF_PATH]
api_key = config[CONF_API_KEY]
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS, [])
users = config.get(CONF_MONITORED_USERS, [])
use_ssl = config[CONF_SSL]
verify_ssl = config[CONF_VERIFY_SSL]
session = async_get_clientsession(hass=hass, verify_ssl=verify_ssl)
api_client = PyTautulli(
api_token=api_key,
hostname=host,
session=session,
verify_ssl=verify_ssl,
port=port,
ssl=use_ssl,
base_api_path=path,
)
coordinator = TautulliDataUpdateCoordinator(hass=hass, api_client=api_client)
async_add_entities(
new_entities=[
TautulliSensor(
coordinator=coordinator,
name=name,
monitored_conditions=monitored_conditions,
usernames=users,
)
],
update_before_add=True,
)
class TautulliSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Tautulli sensor."""
coordinator: TautulliDataUpdateCoordinator
def __init__(
self,
coordinator: TautulliDataUpdateCoordinator,
name: str,
monitored_conditions: list[str],
usernames: list[str],
) -> None:
"""Initialize the Tautulli sensor."""
super().__init__(coordinator)
self.monitored_conditions = monitored_conditions
self.usernames = usernames
self._name = name
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def native_value(self) -> StateType:
"""Return the state of the sensor."""
if not self.coordinator.activity:
return 0
return self.coordinator.activity.stream_count or 0
@property
def icon(self) -> str:
"""Return the icon of the sensor."""
return "mdi:plex"
@property
def native_unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return "Watching"
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return attributes for the sensor."""
if (
not self.coordinator.activity
or not self.coordinator.home_stats
or not self.coordinator.users
):
return None
_attributes = {
"stream_count": self.coordinator.activity.stream_count,
"stream_count_direct_play": self.coordinator.activity.stream_count_direct_play,
"stream_count_direct_stream": self.coordinator.activity.stream_count_direct_stream,
"stream_count_transcode": self.coordinator.activity.stream_count_transcode,
"total_bandwidth": self.coordinator.activity.total_bandwidth,
"lan_bandwidth": self.coordinator.activity.lan_bandwidth,
"wan_bandwidth": self.coordinator.activity.wan_bandwidth,
}
for stat in self.coordinator.home_stats:
if stat.stat_id == "top_movies":
_attributes["Top Movie"] = stat.rows[0].title if stat.rows else None
elif stat.stat_id == "top_tv":
_attributes["Top TV Show"] = stat.rows[0].title if stat.rows else None
elif stat.stat_id == "top_users":
_attributes["Top User"] = stat.rows[0].user if stat.rows else None
for user in self.coordinator.users:
if (
self.usernames
and user.username not in self.usernames
or user.username == "Local"
):
continue
_attributes.setdefault(user.username, {})["Activity"] = None
for session in self.coordinator.activity.sessions:
if not _attributes.get(session.username):
continue
_attributes[session.username]["Activity"] = session.state
for key in self.monitored_conditions:
_attributes[session.username][key] = getattr(session, key)
return _attributes
|
tests/parsers/sqlite_plugins/chrome_cookies.py | roshanmaskey/plaso | 1,253 | 11143185 | <gh_stars>1000+
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome cookie database plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import chrome_cookies
from tests.parsers.sqlite_plugins import test_lib
class Chrome17CookiesPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 17-65 cookie database plugin."""
def testProcess(self):
"""Tests the Process function on a Chrome cookie database file."""
plugin = chrome_cookies.Chrome17CookiePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(['cookies.db'], plugin)
self.assertEqual(storage_writer.number_of_events, 1755)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# Since we've got both events generated by cookie plugins and the Chrome
# cookie plugin we need to separate them.
events = []
extra_objects = []
for event in storage_writer.GetEvents():
event_data = self._GetEventDataOfEvent(storage_writer, event)
if event_data.data_type == 'chrome:cookie:entry':
events.append(event)
else:
extra_objects.append(event)
# The cookie database contains 560 entries:
# 560 creation timestamps.
# 560 last access timestamps.
# 560 expired timestamps.
# Then there are extra events created by plugins:
# 75 events created by Google Analytics cookies.
# In total: 1755 events.
self.assertEqual(len(events), 3 * 560)
self.assertEqual(len(extra_objects), 75)
# Check one www.linkedin.com cookie.
expected_event_values = {
'cookie_name': 'leo_auth_token',
'data_type': 'chrome:cookie:entry',
'date_time': '2011-08-25 21:50:27.292367',
'host': 'www.linkedin.com',
'httponly': False,
'persistent': True,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS,
'url': 'http://www.linkedin.com/'}
self.CheckEventValues(storage_writer, events[124], expected_event_values)
# Check one of the visits to rubiconproject.com.
expected_event_values = {
'cookie_name': 'put_2249',
'data_type': 'chrome:cookie:entry',
'date_time': '2012-04-01 13:54:34.949210',
'httponly': False,
'path': '/',
'persistent': True,
'secure': False,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS,
'url': 'http://rubiconproject.com/'}
self.CheckEventValues(storage_writer, events[379], expected_event_values)
# Examine an event for a visit to a political blog site.
expected_event_values = {
'data_type': 'chrome:cookie:entry',
'date_time': '2012-03-22 01:47:21.012022',
'host': 'politicalticker.blogs.cnn.com',
'path': '/2012/03/21/romney-tries-to-clean-up-etch-a-sketch-mess/'}
self.CheckEventValues(storage_writer, events[444], expected_event_values)
# Examine a cookie that has an autologin entry.
# This particular cookie value represents a timeout value that
# corresponds to the expiration date of the cookie.
expected_event_values = {
'cookie_name': 'autologin[timeout]',
'data': '1364824322',
'data_type': 'chrome:cookie:entry',
'date_time': '2012-04-01 13:52:56.189444',
'host': 'marvel.com',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[1425], expected_event_values)
# Examine a cookie expiry event.
expected_event_values = {
'data_type': 'chrome:cookie:entry',
'date_time': '2013-08-14 14:19:42.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_EXPIRATION}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
class Chrome66CookiesPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 66 Cookies database plugin."""
def testProcess(self):
"""Tests the Process function on a Chrome cookie database file."""
plugin = chrome_cookies.Chrome66CookiePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['Cookies-68.0.3440.106'], plugin)
self.assertEqual(storage_writer.number_of_events, 16)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# Since we've got both events generated by cookie plugins and the Chrome
# cookie plugin we need to separate them.
events = []
extra_objects = []
for event in storage_writer.GetEvents():
event_data = self._GetEventDataOfEvent(storage_writer, event)
if event_data.data_type == 'chrome:cookie:entry':
events.append(event)
else:
extra_objects.append(event)
# The cookie database contains 5 entries:
# 5 creation timestamps.
# 5 last access timestamps.
# 5 expired timestamps.
# Then there are extra events created by plugins:
# 1 event created by Google Analytics cookies.
# In total: 16 events.
self.assertEqual(len(events), 3 * 5)
self.assertEqual(len(extra_objects), 1)
# Test some cookies
# Check a GA cookie creation event with a path.
expected_event_values = {
'cookie_name': '__utma',
'data_type': 'chrome:cookie:entry',
'date_time': '2018-08-14 15:03:43.650324',
'host': 'google.com',
'httponly': False,
'persistent': True,
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION,
'url': 'http://google.com/gmail/about/'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check one of the visits to fbi.gov for last accessed time.
expected_event_values = {
'cookie_name': '__cfduid',
'data_type': 'chrome:cookie:entry',
'date_time': '2018-08-20 17:19:53.134291',
'httponly': True,
'path': '/',
'persistent': True,
'secure': False,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS,
'url': 'http://fbi.gov/'}
self.CheckEventValues(storage_writer, events[10], expected_event_values)
# Examine an event for a cookie with a very large expire time.
expected_event_values = {
'data_type': 'chrome:cookie:entry',
'date_time': '9999-08-17 12:26:28.000000',
'host': 'projects.fivethirtyeight.com'}
self.CheckEventValues(storage_writer, events[8], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
tagulous/checks.py | nschlemm/django-tagulous | 307 | 11143188 | <filename>tagulous/checks.py
from django.core.checks import Warning, register
SERIALIZATION_MODULES_EXPECTED = {
"xml": "tagulous.serializers.xml_serializer",
"json": "tagulous.serializers.json",
"python": "tagulous.serializers.python",
"yaml": "tagulous.serializers.pyyaml",
}
WARNING_W001 = Warning(
"``settings.SERIALIZATION_MODULES`` has not been configured as expected",
hint=(
"See Tagulous installation instructions for"
" recommended SERIALIZATION_MODULES setting"
),
id="tagulous.W001",
)
def tagulous_check(app_configs, **kwargs):
from django.conf import settings
errors = []
serialization_modules = getattr(settings, "SERIALIZATION_MODULES", None)
if serialization_modules != SERIALIZATION_MODULES_EXPECTED:
errors.append(WARNING_W001)
return errors
def register_checks():
register(tagulous_check)
|
rpython/translator/backendopt/gilanalysis.py | nanjekyejoannah/pypy | 381 | 11143202 | <gh_stars>100-1000
from rpython.translator.backendopt import graphanalyze
# This is not an optimization. It checks for possible releases of the
# GIL in all graphs starting from rgc.no_release_gil.
class GilAnalyzer(graphanalyze.BoolGraphAnalyzer):
def analyze_direct_call(self, graph, seen=None):
try:
func = graph.func
except AttributeError:
pass
else:
if getattr(func, '_gctransformer_hint_close_stack_', False):
return True
if getattr(func, '_transaction_break_', False):
return True
return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(
self, graph, seen)
def analyze_external_call(self, op, seen=None):
return False
def analyze_simple_operation(self, op, graphinfo):
return False
def analyze(graphs, translator):
gilanalyzer = GilAnalyzer(translator)
for graph in graphs:
func = getattr(graph, 'func', None)
if func and getattr(func, '_no_release_gil_', False):
if gilanalyzer.analyze_direct_call(graph):
# 'no_release_gil' function can release the gil
import cStringIO
err = cStringIO.StringIO()
import sys
prev = sys.stdout
try:
sys.stdout = err
ca = GilAnalyzer(translator)
ca.verbose = True
ca.analyze_direct_call(graph) # print the "traceback" here
sys.stdout = prev
except:
sys.stdout = prev
# ^^^ for the dump of which operation in which graph actually
# causes it to return True
raise Exception("'no_release_gil' function can release the GIL:"
" %s\n%s" % (func, err.getvalue()))
|
vimfiles/bundle/vim-python/submodules/pylint/tests/test_import_graph.py | ciskoinch8/vimrc | 463 | 11143208 | <reponame>ciskoinch8/vimrc
# Copyright (c) 2006-2008, 2010, 2013 LOGILAB S.A. (Paris, FRANCE) <<EMAIL>>
# Copyright (c) 2012 <NAME> <<EMAIL>>
# Copyright (c) 2014-2018, 2020 <NAME> <<EMAIL>>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015 <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME> <<EMAIL>>
# Copyright (c) 2018 <NAME> <<EMAIL>>
# Copyright (c) 2019-2021 <NAME> <<EMAIL>>
# Copyright (c) 2019 <NAME> <<EMAIL>>
# Copyright (c) 2020 hippo91 <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
# pylint: disable=redefined-outer-name
import os
import shutil
from os.path import exists
import pytest
from pylint import testutils
from pylint.checkers import imports, initialize
from pylint.lint import PyLinter
@pytest.fixture
def dest(request):
dest = request.param
yield dest
try:
os.remove(dest)
except FileNotFoundError:
# file may not have been created if tests inside fixture skipped
pass
POSSIBLE_DOT_FILENAMES = ["foo.dot", "foo.gv", "tests/regrtest_data/foo.dot"]
@pytest.mark.parametrize("dest", POSSIBLE_DOT_FILENAMES, indirect=True)
def test_dependencies_graph(dest):
"""DOC files are correctly generated, and the graphname is the basename"""
imports._dependencies_graph(dest, {"labas": ["hoho", "yep"], "hoho": ["yep"]})
with open(dest) as stream:
assert (
stream.read().strip()
== """
digraph "foo" {
rankdir=LR
charset="utf-8"
URL="." node[shape="box"]
"hoho" [];
"yep" [];
"labas" [];
"yep" -> "hoho" [];
"hoho" -> "labas" [];
"yep" -> "labas" [];
}
""".strip()
)
@pytest.mark.parametrize("filename", ["graph.png", "graph"])
@pytest.mark.skipif(
any(shutil.which(x) for x in ["dot", "gv"]), reason="dot or gv is installed"
)
def test_missing_graphviz(filename):
"""Raises if graphviz is not installed, and defaults to png if no extension given"""
with pytest.raises(RuntimeError, match=r"Cannot generate `graph\.png`.*"):
imports._dependencies_graph(filename, {"a": ["b", "c"], "b": ["c"]})
@pytest.fixture
def linter():
pylinter = PyLinter(reporter=testutils.GenericTestReporter())
initialize(pylinter)
return pylinter
@pytest.fixture
def remove_files():
yield
for fname in ("import.dot", "ext_import.dot", "int_import.dot"):
try:
os.remove(fname)
except FileNotFoundError:
pass
@pytest.mark.usefixtures("remove_files")
def test_checker_dep_graphs(linter):
linter.global_set_option("persistent", False)
linter.global_set_option("reports", True)
linter.global_set_option("enable", "imports")
linter.global_set_option("import-graph", "import.dot")
linter.global_set_option("ext-import-graph", "ext_import.dot")
linter.global_set_option("int-import-graph", "int_import.dot")
linter.global_set_option("int-import-graph", "int_import.dot")
# ignore this file causing spurious MemoryError w/ some python version (>=2.3?)
linter.global_set_option("ignore", ("func_unknown_encoding.py",))
linter.check("input")
linter.generate_reports()
assert exists("import.dot")
assert exists("ext_import.dot")
assert exists("int_import.dot")
|
tutorials/W1D3_ModelFitting/solutions/W1D3_Tutorial4_Solution_89324713.py | eduardojdiniz/CompNeuro | 2,294 | 11143229 |
mse_list = []
order_list = list(range(max_order + 1))
for order in order_list:
X_design = make_design_matrix(x, order)
# Get prediction for the polynomial regression model of this order
y_hat = X_design @ theta_hats[order]
# Compute the residuals
residuals = y - y_hat
# Compute the MSE
mse = np.mean(residuals ** 2)
mse_list.append(mse)
# Visualize MSE of fits
with plt.xkcd():
evaluate_fits(order_list, mse_list) |
nucleus/io/python/vcf_reader_wrap_test.py | google/nucleus | 721 | 11143278 | <reponame>google/nucleus
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vcf_reader CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from nucleus.io.python import vcf_reader
from nucleus.protos import reference_pb2
from nucleus.protos import variants_pb2
from nucleus.testing import test_utils
from nucleus.util import ranges
expected_sites_contigs = [
reference_pb2.ContigInfo(name='chr1', pos_in_fasta=0, n_bases=248956422),
reference_pb2.ContigInfo(name='chr2', pos_in_fasta=1, n_bases=242193529),
reference_pb2.ContigInfo(name='chr3', pos_in_fasta=2, n_bases=198295559),
reference_pb2.ContigInfo(name='chr4', pos_in_fasta=3, n_bases=190214555),
reference_pb2.ContigInfo(name='chr5', pos_in_fasta=4, n_bases=181538259),
reference_pb2.ContigInfo(name='chr6', pos_in_fasta=5, n_bases=170805979),
reference_pb2.ContigInfo(name='chr7', pos_in_fasta=6, n_bases=159345973),
reference_pb2.ContigInfo(name='chr8', pos_in_fasta=7, n_bases=145138636),
reference_pb2.ContigInfo(name='chr9', pos_in_fasta=8, n_bases=138394717),
reference_pb2.ContigInfo(name='chr10', pos_in_fasta=9, n_bases=133797422),
reference_pb2.ContigInfo(name='chr11', pos_in_fasta=10, n_bases=135086622),
reference_pb2.ContigInfo(name='chr12', pos_in_fasta=11, n_bases=133275309),
reference_pb2.ContigInfo(name='chr13', pos_in_fasta=12, n_bases=114364328),
reference_pb2.ContigInfo(name='chr14', pos_in_fasta=13, n_bases=107043718),
reference_pb2.ContigInfo(name='chr15', pos_in_fasta=14, n_bases=101991189),
reference_pb2.ContigInfo(name='chr16', pos_in_fasta=15, n_bases=90338345),
reference_pb2.ContigInfo(name='chr17', pos_in_fasta=16, n_bases=83257441),
reference_pb2.ContigInfo(name='chr18', pos_in_fasta=17, n_bases=80373285),
reference_pb2.ContigInfo(name='chr19', pos_in_fasta=18, n_bases=58617616),
reference_pb2.ContigInfo(name='chr20', pos_in_fasta=19, n_bases=64444167),
reference_pb2.ContigInfo(name='chr21', pos_in_fasta=20, n_bases=46709983),
reference_pb2.ContigInfo(name='chr22', pos_in_fasta=21, n_bases=50818468),
reference_pb2.ContigInfo(name='chrX', pos_in_fasta=22, n_bases=156040895),
reference_pb2.ContigInfo(name='chrY', pos_in_fasta=23, n_bases=57227415),
reference_pb2.ContigInfo(name='chrM', pos_in_fasta=24, n_bases=16569),
]
# pylint: disable=line-too-long
expected_samples_filters = [
variants_pb2.VcfFilterInfo(id='PASS', description='All filters passed'),
variants_pb2.VcfFilterInfo(id='LowQual', description='Low quality'),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL95.00to96.00',
description='Truth sensitivity tranche level for INDEL model at VQS Lod: 0.9364 <= x < 1.0415'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL96.00to97.00',
description='Truth sensitivity tranche level for INDEL model at VQS Lod: 0.8135 <= x < 0.9364'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL97.00to99.00',
description='Truth sensitivity tranche level for INDEL model at VQS Lod: 0.323 <= x < 0.8135'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.00to99.50',
description='Truth sensitivity tranche level for INDEL model at VQS Lod: -0.1071 <= x < 0.323'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.50to99.90',
description='Truth sensitivity tranche level for INDEL model at VQS Lod: -1.845 <= x < -0.1071'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.90to99.95',
description='Truth sensitivity tranche level for INDEL model at VQS Lod: -3.2441 <= x < -1.845'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.95to100.00+',
description='Truth sensitivity tranche level for INDEL model at VQS Lod < -57172.0693'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheINDEL99.95to100.00',
description='Truth sensitivity tranche level for INDEL model at VQS Lod: -57172.0693 <= x < -3.2441'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.50to99.60',
description='Truth sensitivity tranche level for SNP model at VQS Lod: -0.751 <= x < -0.6681'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.60to99.80',
description='Truth sensitivity tranche level for SNP model at VQS Lod: -1.0839 <= x < -0.751'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.80to99.90',
description='Truth sensitivity tranche level for SNP model at VQS Lod: -1.7082 <= x < -1.0839'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.90to99.95',
description='Truth sensitivity tranche level for SNP model at VQS Lod: -3.0342 <= x < -1.7082'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.95to100.00+',
description='Truth sensitivity tranche level for SNP model at VQS Lod < -40235.9641'
),
variants_pb2.VcfFilterInfo(
id='VQSRTrancheSNP99.95to100.00',
description='Truth sensitivity tranche level for SNP model at VQS Lod: -40235.9641 <= x < -3.0342'
)
]
# pylint: enable=line-too-long
class WrapVcfReaderTests(absltest.TestCase):
def setUp(self):
self.sites_vcf = test_utils.genomics_core_testdata('test_sites.vcf')
self.samples_vcf = test_utils.genomics_core_testdata('test_samples.vcf.gz')
self.options = variants_pb2.VcfReaderOptions()
self.sites_reader = vcf_reader.VcfReader.from_file(self.sites_vcf,
self.options)
self.samples_reader = vcf_reader.VcfReader.from_file(
self.samples_vcf, self.options)
def test_vcf_iterate(self):
iterable = self.sites_reader.iterate()
self.assertEqual(test_utils.iterable_len(iterable), 5)
def test_vcf_header(self):
header = self.sites_reader.header
expected1 = variants_pb2.VcfStructuredExtra(
key='ALT',
fields=[
variants_pb2.VcfExtra(key='ID', value='NON_REF'),
variants_pb2.VcfExtra(
key='Description',
value='Represents any possible alternative allele at th'
'is location')
])
expected2 = variants_pb2.VcfStructuredExtra(
key='META',
fields=[
variants_pb2.VcfExtra(key='ID', value='TESTMETA'),
variants_pb2.VcfExtra(key='Description', value='blah')
])
self.assertLen(header.structured_extras, 2)
self.assertEqual(header.structured_extras[1], expected2)
self.assertEqual(header.structured_extras[0], expected1)
def test_vcf_contigs(self):
self.assertEqual(expected_sites_contigs,
list(self.sites_reader.header.contigs))
def test_vcf_filters(self):
self.assertEqual(expected_samples_filters,
list(self.samples_reader.header.filters))
def test_vcf_samples(self):
self.assertEqual(list(self.sites_reader.header.sample_names), [])
self.assertEqual(
list(self.samples_reader.header.sample_names), ['NA12878_18_99'])
def test_vcf_query(self):
range1 = ranges.parse_literal('chr3:100,000-500,000')
iterable = self.samples_reader.query(range1)
self.assertEqual(test_utils.iterable_len(iterable), 4)
def test_vcf_from_string(self):
v = self.samples_reader.from_string(
'chr3\t370537\trs142286746\tC\tCA,CAA\t350.73\tPASS\t'
'AC=1,1;AF=0.500,0.500;AN=2;DB;DP=16;ExcessHet=3.0103;'
'FS=0.000;MLEAC=1,1;MLEAF=0.500,0.500;MQ=60.00;QD=26.98;'
'SOR=1.179;VQSLOD=2.88;culprit=FS\tGT:AD:DP:GQ:PL\t'
'1/2:0,6,7:13:99:388,188,149,140,0,116')
self.assertEqual(v.reference_name, 'chr3')
self.assertEqual(v.start, 370536)
self.assertEqual(list(v.names), ['rs142286746'])
self.assertEqual(v.reference_bases, 'C')
self.assertEqual(list(v.alternate_bases), ['CA', 'CAA'])
self.assertEqual(len(v.calls), 1)
def test_vcf_from_string_raises_on_bad_input(self):
with self.assertRaises(ValueError):
self.samples_reader.from_string('BAD NOT A VCF RECORD\n;;')
def test_from_file_raises_with_missing_source(self):
# TODO(b/196638558): OpError exception not propagated.
with self.assertRaisesRegexp(ValueError, 'Could not open missing.vcf'):
vcf_reader.VcfReader.from_file('missing.vcf', self.options)
def test_ops_on_closed_reader_raise(self):
with self.samples_reader:
pass
# At this point the reader is closed.
with self.assertRaisesRegexp(ValueError, 'Cannot Iterate a closed'):
self.samples_reader.iterate()
with self.assertRaisesRegexp(ValueError, 'Cannot Query a closed'):
self.samples_reader.query(
ranges.parse_literal('chr1:10,000,000-10,000,100'))
def test_query_on_unindexed_reader_raises(self):
window = ranges.parse_literal('chr1:10,000,000-10,000,100')
unindexed_file = test_utils.genomics_core_testdata('test_samples.vcf')
with vcf_reader.VcfReader.from_file(unindexed_file, self.options) as reader:
with self.assertRaisesRegexp(ValueError, 'Cannot query without an index'):
reader.query(window)
def test_query_raises_with_bad_range(self):
with self.assertRaisesRegexp(ValueError, 'Unknown reference_name'):
self.samples_reader.query(ranges.parse_literal('XXX:1-10'))
with self.assertRaisesRegexp(ValueError, 'Malformed region'):
self.samples_reader.query(ranges.parse_literal('chr1:0-5'))
with self.assertRaisesRegexp(ValueError, 'Malformed region'):
self.samples_reader.query(ranges.parse_literal('chr1:6-5'))
with self.assertRaisesRegexp(ValueError, 'Malformed region'):
self.samples_reader.query(ranges.parse_literal('chr1:10-5'))
def test_context_manager(self):
with vcf_reader.VcfReader.from_file(self.sites_vcf, self.options) as f:
self.assertEqual(expected_sites_contigs, list(f.header.contigs))
# Commented out because we in fact don't detect the malformed VCF yet. It is
# unclear if it's even possible to detect the issue with the API provided by
# htslib.
# def test_vcf_iterate_raises_on_malformed_record(self):
# malformed = test_utils.genomics_core_testdata('malformed.vcf')
# reader = vcf_reader.VcfReader.from_file(malformed, self.unindexed_options)
# iterable = iter(reader.iterate())
# self.assertIsNotNone(next(iterable))
# with self.assertRaises(ValueError):
# print(list(iterable))
if __name__ == '__main__':
absltest.main()
|
tests/random/random_random.py | Fryguy/py2rb | 124 | 11143281 | <reponame>Fryguy/py2rb
import random
x = random.random()
if 0 < x < 1:
print("OK")
else:
print("NG")
|
pcan/models/roi_heads/refine_heads/hr_em_match_head.py | SysCV/pcan | 271 | 11143326 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import force_fp32, auto_fp16
from mmcv.cnn import ConvModule, build_upsample_layer
from mmdet.core import bbox_rescale
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.roi_heads.mask_heads.fcn_mask_head import (
_do_paste_mask, BYTES_PER_FLOAT, GPU_MEM_LIMIT)
from pcan.core import cal_similarity
def gen_pos_emb(x, temperature=10000, scale=2 * math.pi, normalize=False):
"""
This is a more standard version of the position embedding, very similar to
the one used by the Attention is all you need paper, generalized to work on
images.
"""
R, C, H, W = x.size()
mask = x.new_ones((R, H, W))
y_embed = mask.cumsum(1, dtype=torch.float32)
x_embed = mask.cumsum(2, dtype=torch.float32)
if normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale
num_pos_feats = C // 2
assert num_pos_feats * 2 == C, (
'The input channel number must be an even number.')
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2).contiguous()
return pos
@HEADS.register_module
class HREMMatchHeadPlus(nn.Module):
"""HR means high-resolution. This version refine the mask projecting to the
1/4 or 1/8 size of the original input, instead of refining in the RoI level.
"""
def __init__(self,
num_feats=3,
num_convs=4,
in_channels=256,
conv_kernel_size=3,
conv_channels=128,
out_channels=8,
num_classes=80,
feat_stride=8,
out_stride=4,
pos_proto_num=4,
neg_proto_num=4,
stage_num=6,
with_mask_key=True,
with_both_feat=False,
with_pos_emb=False,
match_score_thr=0.5,
rect_scale_factor=1.5,
upsample_cfg=dict(type='deconv'),
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='DiceLoss',
loss_weight=1.0)):
super().__init__()
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_feats = num_feats
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_channels = conv_channels
self.out_channels = out_channels
self.feat_stride = feat_stride
self.out_stride = out_stride
self.num_classes = num_classes
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = feat_stride // out_stride
self.pos_proto_num = pos_proto_num
self.neg_proto_num = neg_proto_num
self.stage_num = stage_num
self.with_mask_key = with_mask_key
self.with_both_feat = with_both_feat
self.with_pos_emb = with_pos_emb
self.match_score_thr = match_score_thr
self.rect_scale_factor = rect_scale_factor
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.positioanl_embeddings = None
self.refines = nn.ModuleList()
for i in range(self.num_feats):
in_channels = self.in_channels
padding = (self.conv_kernel_size - 1) // 2
self.refines.append(
ConvModule(
self.in_channels, self.conv_channels, self.conv_kernel_size,
padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg))
padding = (self.conv_kernel_size - 1) // 2
self.conv1 = ConvModule(
self.conv_channels, self.out_channels, self.conv_kernel_size,
padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
self.conv_channels, self.out_channels, 1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg, act_cfg=None)
self.conv3 = ConvModule(
3, self.out_channels, self.conv_kernel_size, padding=padding,
conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.conv4 = ConvModule(
self.conv_channels, self.out_channels, 1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg, act_cfg=None)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
self.out_channels, self.out_channels, self.conv_kernel_size,
padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg))
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=self.out_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
self.conv_logits = nn.Conv2d(self.out_channels, 1, 1)
self.relu = nn.ReLU(inplace=True)
self.init_protos(pos_proto_num, 'pos_mu')
self.init_protos(neg_proto_num, 'neg_mu')
def pos_emb(self, x):
if not self.with_pos_emb:
return 0.
if self.positioanl_embeddings is None:
self.positioanl_embeddings = gen_pos_emb(x, normalize=True)
return self.positioanl_embeddings
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
def match(self, key_embeds, ref_embeds, key_pids, ref_pids):
num_imgs = len(key_embeds)
valids, ref_inds = [], []
for i in range(num_imgs):
cos_dist = cal_similarity(
key_embeds[i], ref_embeds[i], method='cosine')
same_pids = key_pids[i][:, None] == ref_pids[i][None, :]
zeros = cos_dist.new_zeros(cos_dist.size())
scores = torch.where(same_pids, cos_dist, zeros)
conf, ref_ind = torch.max(scores, dim=1)
valid = conf > self.match_score_thr
ref_ind = ref_ind[valid]
valids.append(valid)
ref_inds.append(ref_ind)
return valids, ref_inds
def init_protos(self, proto_num, proto_name):
protos = torch.Tensor(1, self.conv_channels, proto_num)
protos.normal_(0, math.sqrt(2. / proto_num))
protos = self._l2norm(protos, dim=1)
self.register_buffer(proto_name, protos)
@auto_fp16()
def forward_feat(self, x):
start_lvl = int(math.log2(self.feat_stride // 4))
end_lvl = min(start_lvl + self.num_feats, len(x))
feats = [
refine(lvl)
for refine, lvl in zip(self.refines, x[start_lvl:end_lvl])]
for i in range(1, len(feats)):
feats[i] = F.interpolate(feats[i], size=feats[0].size()[-2:],
mode='bilinear')
feat = sum(feats)
# for conv in self.convs:
# feat = conv(feat)
return feat
@force_fp32(apply_to=('inp', ))
def _l1norm(self, inp, dim):
return inp / (1e-6 + inp.sum(dim=dim, keepdim=True))
@force_fp32(apply_to=('inp', ))
def _l2norm(self, inp, dim):
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
@force_fp32(apply_to=('feat', 'mask', 'mu'))
@torch.no_grad()
def _em_iter(self, feat, mask, mu):
# n = h * w
_, C = feat.size()[:2]
R, _ = mask.size()[:2]
pos_feat = feat + self.pos_emb(x=feat)
x = pos_feat.view(1, C, -1) # 1 * C * N
y = feat.view(1, C, -1) # 1 * C * N
m = mask.view(R, 1, -1) # R * 1 * N
mu = mu.repeat(R, 1, 1) # R * C * K
for i in range(self.stage_num):
z = torch.einsum('ocn,rck->rnk', (x, mu)) # R * N * K
z = F.softmax(z, dim=2) # R * N * K
z = torch.einsum('rnk,ron->rnk', (z, m)) # R * N * K
z = self._l1norm(z, dim=1) # R * N * K
mu = torch.einsum('ocn,rnk->rck', (x, z)) # R * C * K
mu = self._l2norm(mu, dim=1) # R * C * K
nu = torch.einsum('ocn,rnk->rck', (y, z)) # R * C * K
nu = self._l2norm(nu, dim=1) # R * C * K
return mu, nu
@force_fp32(apply_to=('feat', 'mu'))
def _prop(self, feat, mu):
R = mu.size(0)
_, C, H, W = feat.size()
pos_feat = feat + self.pos_emb(x=feat)
x = pos_feat.view(1, C, -1) # 1 * C * N
z = torch.einsum('rck,ocn->rkn', (mu, x)) # R * K * N
z = F.softmax(z, dim=1) # R * K * N
z = z.view(R, 2, -1, H, W).sum(dim=2) # R * 2 * H * W
return z
def em_match(self, feat_a, mask_a, rect_a, feat_b, mask_b, rect_b):
if not self.with_both_feat:
pos_mask, neg_mask = rect_a * mask_a, rect_a * (1 - mask_a)
pos_mu, pos_nu = self._em_iter(feat_a, pos_mask, self.pos_mu)
neg_mu, neg_nu = self._em_iter(feat_a, neg_mask, self.neg_mu)
else:
feat = torch.cat((feat_a, feat_b), dim=2)
mask = torch.cat((mask_a, mask_b), dim=2)
rect = torch.cat((rect_a, rect_b), dim=2)
pos_mask, neg_mask = rect * mask, rect * (1 - mask)
pos_mu, pos_nu = self._em_iter(feat, pos_mask, self.pos_mu)
neg_mu, neg_nu = self._em_iter(feat, neg_mask, self.neg_mu)
mu = torch.cat((pos_mu, neg_mu), dim=2)
z = self._prop(feat_b, mu)
R = mask_b.size(0)
pos_nu = pos_nu.permute(0, 2, 1).contiguous().view(R, -1, 1, 1)
return pos_nu, z
def compute_context(self, feat, mask, eps=1e-5):
_, C = feat.size()[:2]
R, _ = mask.size()[:2]
fore_feat = (feat * mask).view(R, C, -1).sum(dim=2)
fore_sum = mask.view(R, 1, -1).sum(dim=2)
fore_feat = fore_feat / (fore_sum + eps)
fore_feat = fore_feat.view(R, C, 1, 1)
return fore_feat
def gather_context(self, feat, mask, gap, z, pos_mu):
mask = torch.cat((mask, z), dim=1)
res = self.conv1(feat) + self.conv2(gap) + self.conv3(mask)
res = res + F.conv2d(pos_mu, self.conv4.conv.weight,
self.conv4.conv.bias, groups=self.pos_proto_num)
res = F.relu(res)
return res
@auto_fp16()
def forward(self, x_a, mask_a, rect_a, x_b, mask_b, rect_b):
assert len(mask_a) == len(mask_b) == x_a[0].size(0) == x_b[0].size(0)
feat_a = self.forward_feat(x_a)
feat_b = self.forward_feat(x_b)
B, C, H, W = feat_a.size()
feat_a = torch.chunk(feat_a, B, dim=0)
feat_b = torch.chunk(feat_b, B, dim=0)
xs = []
for i in range(B):
if len(mask_a[i]) == 0:
continue
m_a = mask_a[i].clone()
m_b = mask_b[i].clone()
mask_a[i] = mask_a[i].sigmoid()
mask_b[i] = mask_b[i].sigmoid()
# pos_mu: [R, K * C, 1, 1]
# pos_z: [R, 1, H, W]
pos_mu, z = self.em_match(feat_a[i], mask_a[i], rect_a[i],
feat_b[i], mask_b[i], rect_b[i])
# pos_feat: [R, C, 1, 1]
gap = self.compute_context(feat_a[i], mask_a[i])
# x: [R, C, H, W]
mask = m_b if self.with_mask_key else m_a
x = self.gather_context(feat_b[i], mask, gap, z, pos_mu)
xs.append(x)
x = torch.cat(xs, dim=0)
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, valids, gt_masks):
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds[valid]
for res, valid in zip(sampling_results, valids)]
mask_targets = map(self.get_target_single, pos_assigned_gt_inds,
gt_masks)
mask_targets = list(mask_targets)
if len(mask_targets) > 0:
mask_targets = torch.cat(mask_targets)
return mask_targets
def get_target_single(self, pos_assigned_gt_inds, gt_masks):
device = pos_assigned_gt_inds.device
num_pos = pos_assigned_gt_inds.size(0)
if num_pos > 0:
mask_targets = torch.from_numpy(gt_masks.to_ndarray()).float()
start = self.out_stride // 2
stride = self.out_stride
mask_targets = mask_targets[:, start::stride, start::stride]
mask_targets = mask_targets.to(device)[pos_assigned_gt_inds]
else:
mask_targets = pos_assigned_gt_inds.new_zeros((
0, gt_masks.height // self.out_stride,
gt_masks.width // self.out_stride))
return mask_targets
def get_seg_masks(self, mask_pred, det_labels, rcnn_test_cfg, ori_shape,
scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_labels.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
segms = []
labels = det_labels
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = det_labels.new_tensor(scale_factor)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
out_h, out_w = mask_pred.size()[-2:]
out_h = out_h * self.out_stride
out_w = out_w * self.out_stride
im_mask = torch.zeros(
N,
out_h,
out_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
for inds in chunks:
masks_chunk = _do_paste_mask_hr(
mask_pred[inds],
out_h,
out_w,
offset=0.)
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[inds] = masks_chunk
for i in range(N):
segm = im_mask[i, :img_h, :img_w].cpu().numpy()
cls_segms[labels[i]].append(segm)
segms.append(segm)
return cls_segms, segms
def get_hr_masks(self, feat, mask_pred, det_bboxes, det_labels,
scale_factor):
"""Get high-resolution masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
feat_shape (Tensor): shape (3, )
Returns:
list[list]: encoded masks
"""
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
bboxes = det_bboxes[:, :4]
labels = det_labels
level = int(math.log2(self.feat_stride // 4))
mask_h, mask_w = feat[level].size()[-2:]
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor / self.feat_stride
rects = bbox_rescale(bboxes, self.rect_scale_factor)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * mask_h * mask_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
im_mask = torch.zeros(
N,
1,
mask_h,
mask_w,
device=device,
dtype=torch.float32)
im_rect = torch.zeros(
N, 1, mask_h, mask_w, device=device, dtype=torch.float32)
mask_pred = mask_pred[range(N), labels][:, None]
rect_pred = mask_pred.new_ones(mask_pred.size())
if N == 0:
return im_mask, im_rect
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
mask_h,
mask_w,
skip_empty=device.type == 'cpu')
im_mask[(inds, 0) + spatial_inds] = masks_chunk
rects_chunk, spatial_inds = _do_paste_mask(
rect_pred[inds], rects[inds], mask_h, mask_w, skip_empty=False)
im_rect[(inds, 0) + spatial_inds] = rects_chunk
return im_mask, im_rect
def _do_paste_mask_hr(masks, img_h, img_w, offset):
"""Paste instance masks acoording to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
The whole image will be pasted. It will return a mask of shape
(N, img_h, img_w) and an empty tuple.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
N = masks.shape[0]
img_y = torch.arange(
y0_int, y1_int, device=device, dtype=torch.float32) + offset
img_x = torch.arange(
x0_int, x1_int, device=device, dtype=torch.float32) + offset
img_y = img_y / img_h * 2 - 1
img_x = img_x / img_w * 2 - 1
img_y = img_y.unsqueeze(dim=0).repeat(N, 1)
img_x = img_x.unsqueeze(dim=0).repeat(N, 1)
# img_x, img_y have shapes (N, w), (N, h)
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
return img_masks[:, 0] |
PyEMD/tests/test_all.py | jurg96/PyEMD | 583 | 11143358 | <filename>PyEMD/tests/test_all.py
import unittest
import sys
if __name__ == '__main__':
test_suite = unittest.defaultTestLoader.discover('.', '*test*.py')
test_runner = unittest.TextTestRunner(resultclass=unittest.TextTestResult)
result = test_runner.run(test_suite)
sys.exit(not result.wasSuccessful())
|
modules/zip_cracker.py | Lola224/hakkuframework | 250 | 11143369 | <gh_stars>100-1000
# Copyright (C) 2015 – 2021 <NAME> (4shadoww)
from core.hakkuframework import *
from core import colors
import zipfile
import threading, queue
from core import getpath
from os.path import relpath
import sys
conf = {
"name": "zip_cracker", # Module's name (should be same as file name)
"version": "1.0", # Module version
"shortdesc": "zip file brute-force attack using dictionary", # Short description
"github": "4shadoww", # Author's github
"author": "4shadoww", # Author
"email": "4shadoww", # Email
"initdate": "2016-12-22", # Initial date
"lastmod": "2017-01-03",
"apisupport": True, # Api support
}
# List of the variables
variables = OrderedDict((
("file", ["none", "target zip file"]),
("dict", ["none", "dictionary of words"]),
("tc", [8, "thread count (int)"]),
("exto", ["none", "extract directory"])
))
# Simple changelog
changelog = "Version 1.0:\nrelease"
def init():
variables["exto"][0] = relpath(getpath.tmp(), getpath.main_module())
variables["dict"][0] = relpath(getpath.db() + "dazzlepod.txt", getpath.main_module())
class PwdHolder:
pwd = None
error = None
kill = False
def __init__(self):
self.pwd = None
self.error = None
self.kill = False
def reset(self):
PwdHolder.pwd = None
PwdHolder.error = None
PwdHolder.kill = False
class Worker(threading.Thread):
pwdh = None
words = None
def __init__(self, words, pwdh):
self.pwdh = pwdh
self.words = words
threading.Thread.__init__(self)
def run(self):
try:
zipf = zipfile.ZipFile(variables["file"][0])
except FileNotFoundError:
self.pwdh.error = "zip file not found"
return
for word in self.words:
if self.pwdh.pwd != None:
return
elif self.pwdh.error != None:
return
elif self.pwdh.kill == True:
return
try:
word = word.decode("utf-8").replace("\n", "")
if word[0] == "#":
continue
#animline("trying password: "+word)
zipf.extractall(variables["exto"][0], pwd=word.encode("utf-8"))
self.pwdh.pwd = word
return
except RuntimeError:
pass
except zipfile.BadZipFile:
pass
def run():
try:
wordlist = open(variables["dict"][0], "rb")
print_info("reading word list...")
words = wordlist.read().splitlines()
except FileNotFoundError:
print_error("word list not found")
return ModuleError("word list not found")
print_info("brute-force attack started...")
pwdh = PwdHolder()
pwdh.reset()
try:
u = int(variables["tc"][0])
except TypeError:
print_error("invalid thread count")
return ModuleError("invalid thread count")
threads = []
for i in range(variables["tc"][0]):
t = Worker(words[i::u], pwdh)
threads.append(t)
t.start()
print_info("now cracking...")
try:
for thread in threads:
thread.join()
except KeyboardInterrupt:
pwdh.kill = True
print_info("brute-force attack terminated")
if pwdh.pwd != None:
print_success("password found: "+pwdh.pwd)
return pwdh.pwd
elif pwdh.error != None:
print_error(pwdh.error)
return ModuleError(pwdh.error)
|
solvers/cns/cnsAnimation.py | mfkiwl/libparanumal | 102 | 11143376 | #### cnsAnimation.py
#### usage: pvbatch pvbatch cnsAnimation.py 1 ../examples/cnsTri2D/foo foo
#### import the simple module from the paraview
from paraview.simple import *
import glob
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
Ndatasets = int(sys.argv[1])
datasetIn = sys.argv[2]
imageFilesOut = sys.argv[3]
dataset = range(0,Ndatasets)
datasetDisplay = range(0,Ndatasets)
for n in range(0,Ndatasets):
Nfiles = len(glob.glob(datasetIn+'_%04d_*.vtu' % n))
files = range(0,Nfiles)
for m in range(0,Nfiles):
files[m] = glob.glob(datasetIn+'_%04d_%04d.vtu' % (n,m))[0]
# create a new 'XML Unstructured Grid Reader'
dataset[n] = XMLUnstructuredGridReader(FileName=files)
dataset[n].PointArrayStatus = ['Density', 'Velocity', 'Vorticity']
# set active source
#SetActiveSource(dataset[n])
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
# create a new 'Group Datasets'
groupDatasets1 = GroupDatasets(Input=dataset)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# uncomment following to set a specific view size
# renderView1.ViewSize = [1599, 803]
# reset view to fit data
renderView1.ResetCamera()
#changing interaction mode based on data extents
# renderView1.InteractionMode = '2D'
# renderView1.CameraPosition = [-0.39905500411987305, 0.0, 10000.0]
# renderView1.CameraFocalPoint = [-0.39905500411987305, 0.0, 0.0]
# # show data in view
groupDatasets1Display = Show(groupDatasets1, renderView1)
# trace defaults for the display properties.
groupDatasets1Display.Representation = 'Surface'
groupDatasets1Display.ColorArrayName = [None, '']
groupDatasets1Display.OSPRayScaleArray = 'Density'
groupDatasets1Display.OSPRayScaleFunction = 'PiecewiseFunction'
groupDatasets1Display.SelectOrientationVectors = 'Density'
groupDatasets1Display.ScaleFactor = 1.0527610063552857
groupDatasets1Display.SelectScaleArray = 'Density'
groupDatasets1Display.GlyphType = 'Arrow'
groupDatasets1Display.GlyphTableIndexArray = 'Density'
groupDatasets1Display.DataAxesGrid = 'GridAxesRepresentation'
groupDatasets1Display.PolarAxes = 'PolarAxesRepresentation'
groupDatasets1Display.ScalarOpacityUnitDistance = 0.09258228982369943
groupDatasets1Display.GaussianRadius = 0.5263805031776428
groupDatasets1Display.SetScaleArray = ['POINTS', 'Density']
groupDatasets1Display.ScaleTransferFunction = 'PiecewiseFunction'
groupDatasets1Display.OpacityArray = ['POINTS', 'Density']
groupDatasets1Display.OpacityTransferFunction = 'PiecewiseFunction'
# # hide data in view
# #Hide(foo_0001_000, renderView1)
# # hide data in view
# #Hide(foo_0000_000, renderView1)
# # update the view to ensure updated data information
renderView1.Update()
# set scalar coloring
ColorBy(groupDatasets1Display, ('POINTS', 'Vorticity'))
# Hide the scalar bar for this color map if no visible data is colored by it.
#HideScalarBarIfNotNeeded(vtkBlockColorsLUT, renderView1)
# rescale color and/or opacity maps used to include current data range
groupDatasets1Display.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
#groupDatasets1Display.SetScalarBarVisibility(renderView1, True)
# get color transfer function/color map for 'Vorticity'
vorticityLUT = GetColorTransferFunction('Vorticity')
# Rescale transfer function
vorticityLUT.RescaleTransferFunction(-4.0, 4.0)
# get opacity transfer function/opacity map for 'Vorticity'
vorticityPWF = GetOpacityTransferFunction('Vorticity')
# Rescale transfer function
vorticityPWF.RescaleTransferFunction(-4.0, 4.0)
# Hide orientation axes
renderView1.OrientationAxesVisibility = 0
groupDatasets1Display.SetScalarBarVisibility(renderView1, False)
# current camera placement for renderView1
renderView1.InteractionMode = '2D'
renderView1.CameraPosition = [3., 0, 12.0]
renderView1.CameraFocalPoint = [3., 0, 0.0]
renderView1.CameraParallelScale = 6.210760565455133
# save animation
#SaveAnimation(imageFilesOut+'.avi', renderView1, ImageResolution=[1596, 800], FrameRate=15, FrameWindow=[0, len(files)])
SaveAnimation(imageFilesOut+'.png', renderView1, ImageResolution=[1855, 1163],
FrameWindow=[0, Nfiles])
#### uncomment the following to render all views
#RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
# save screenshot
#SaveScreenshot(imageFilesOut+'.png', renderView1, ImageResolution=[1599, 803]) |
packages/opal-common/opal_common/sources/base_policy_source.py | permitio/opal | 106 | 11143381 | import asyncio
import os
from functools import partial
from typing import Callable, Coroutine, List, Union
from git.objects.commit import Commit
from opal_common.logger import logger
OnNewPolicyCallback = Callable[[Commit, Commit], Coroutine]
OnPolicyFailureCallback = Callable[[Exception], Coroutine]
class BasePolicySource:
"""Base class to support git and api policy source.
Args:
remote_source_url(str): the base address to request the policy from
local_clone_path(str): path for the local git to manage policies
polling_interval(int): how many seconds need to wait between polling
"""
def __init__(
self,
remote_source_url: str,
local_clone_path: str,
polling_interval: int = 0,
):
self._on_failure_callbacks: List[OnNewPolicyCallback] = []
self._on_new_policy_callbacks: List[OnPolicyFailureCallback] = []
self._polling_interval = polling_interval
self._polling_task = None
self.remote_source_url = remote_source_url
self.local_clone_path = os.path.expanduser(local_clone_path)
def add_on_new_policy_callback(self, callback: OnNewPolicyCallback):
"""Register a callback that will be called when new policy are detected
on the monitored repo (after a pull)."""
self._on_new_policy_callbacks.append(callback)
def add_on_failure_callback(self, callback: OnPolicyFailureCallback):
"""Register a callback that will be called when failure occurred."""
self._on_failure_callbacks.append(callback)
async def get_initial_policy_state_from_remote(self):
"""init remote data to local repo."""
raise NotImplementedError()
async def check_for_changes(self):
"""trigger check for policy change."""
raise NotImplementedError()
async def run(self):
"""potentially starts the polling task."""
await self.get_initial_policy_state_from_remote()
if self._polling_interval > 0:
logger.info(
"Launching polling task, interval: {interval} seconds",
interval=self._polling_interval,
)
self._start_polling_task(self.check_for_changes)
else:
logger.info("Polling task is off")
async def stop(self):
return await self._stop_polling_task()
def _start_polling_task(self, polling_task):
if self._polling_task is None and self._polling_interval > 0:
self._polling_task = asyncio.create_task(self._do_polling(polling_task))
async def _do_polling(self, polling_task):
"""optional task to periodically check the remote for changes (git pull
and compare hash)."""
while True:
await polling_task()
await asyncio.sleep(self._polling_interval)
async def _stop_polling_task(self):
if self._polling_task is not None:
self._polling_task.cancel()
try:
await self._polling_task
except asyncio.CancelledError:
pass
async def _on_new_policy(self, old: Commit, new: Commit):
"""triggers callbacks registered with on_new_policy()."""
await self._run_callbacks(self._on_new_policy_callbacks, old, new)
async def _on_failed(self, exc: Exception):
"""will be triggered if a failure occurred.
triggers callbacks registered with on_git_failed().
"""
await self._run_callbacks(self._on_failure_callbacks, exc)
async def _run_callbacks(self, handlers, *args, **kwargs):
"""triggers a list of callbacks."""
await asyncio.gather(*(callback(*args, **kwargs) for callback in handlers))
async def _on_git_failed(self, exc: Exception):
"""will be triggered if a git failure occurred (i.e: repo does not
exist, can't clone, etc).
triggers callbacks registered with on_git_failed().
"""
await self._run_callbacks(self._on_failure_callbacks, exc)
|
python/basic_api.py | APinkLemon/Learn-Carla | 148 | 11143389 | <filename>python/basic_api.py
"""
In this script, we are going to learn how to spawn a vehicle on the road and make it autopilot.
At the same time, we will collect camera and lidar data from it.
"""
import carla
import os
import random
def main():
actor_list = []
sensor_list = []
try:
# First of all, we need to create the client that will send the requests, assume port is 2000
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
# Retrieve the world that is currently running
world = client.get_world()
# world = client.load_world('Town02') # you can also retrive another world by specifically defining
blueprint_library = world.get_blueprint_library()
# Set weather for your world
weather = carla.WeatherParameters(cloudiness=10.0,
precipitation=10.0,
fog_density=10.0)
world.set_weather(weather)
# create the ego vehicle
ego_vehicle_bp = blueprint_library.find('vehicle.mercedes-benz.coupe')
# black color
ego_vehicle_bp.set_attribute('color', '0, 0, 0')
# get a random valid occupation in the world
transform = random.choice(world.get_map().get_spawn_points())
# spawn the vehilce
ego_vehicle = world.spawn_actor(ego_vehicle_bp, transform)
# set the vehicle autopilot mode
ego_vehicle.set_autopilot(True)
# collect all actors to destroy when we quit the script
actor_list.append(ego_vehicle)
# add a camera
camera_bp = blueprint_library.find('sensor.camera.rgb')
# camera relative position related to the vehicle
camera_transform = carla.Transform(carla.Location(x=1.5, z=2.4))
camera = world.spawn_actor(camera_bp, camera_transform, attach_to=ego_vehicle)
output_path = '../outputs/output_basic_api'
if not os.path.exists(output_path):
os.makedirs(output_path)
# set the callback function
camera.listen(lambda image: image.save_to_disk(os.path.join(output_path, '%06d.png' % image.frame)))
sensor_list.append(camera)
# we also add a lidar on it
lidar_bp = blueprint_library.find('sensor.lidar.ray_cast')
lidar_bp.set_attribute('channels', str(32))
lidar_bp.set_attribute('points_per_second', str(90000))
lidar_bp.set_attribute('rotation_frequency', str(40))
lidar_bp.set_attribute('range', str(20))
# set the relative location
lidar_location = carla.Location(0, 0, 2)
lidar_rotation = carla.Rotation(0, 0, 0)
lidar_transform = carla.Transform(lidar_location, lidar_rotation)
# spawn the lidar
lidar = world.spawn_actor(lidar_bp, lidar_transform, attach_to=ego_vehicle)
lidar.listen(
lambda point_cloud: point_cloud.save_to_disk(os.path.join(output_path, '%06d.ply' % point_cloud.frame)))
sensor_list.append(lidar)
while True:
# set the sectator to follow the ego vehicle
spectator = world.get_spectator()
transform = ego_vehicle.get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=20),
carla.Rotation(pitch=-90)))
finally:
print('destroying actors')
client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
for sensor in sensor_list:
sensor.destroy()
print('done.')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print(' - Exited by user.')
|
pyscf/pbc/dft/test/test_krkspu.py | QuESt-Calculator/pyscf | 501 | 11143395 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME> <<EMAIL>>
#
import unittest
import numpy as np
from pyscf.pbc import gto as pgto
from pyscf.pbc import dft as pdft
class KnownValues(unittest.TestCase):
def test_KRKSpU(self):
cell = pgto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '''0. 1.7834 1.7834
1.7834 0. 1.7834
1.7834 1.7834 0. '''
cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.verbose = 7
cell.build()
kmesh = [2, 1, 1]
kpts = cell.make_kpts(kmesh, wrap_around=True)
U_idx = ["1 C 2p"]
U_val = [5.0]
mf = pdft.KRKSpU(cell, kpts, U_idx=U_idx, U_val=U_val, C_ao_lo='minao',
minao_ref='gth-szv')
mf.conv_tol = 1e-10
e1 = mf.kernel()
self.assertAlmostEqual(e1, -10.694460059491741, 8)
if __name__ == '__main__':
print("Full Tests for pbc.dft.krkspu")
unittest.main()
|
tensorflow_ranking/python/keras/metrics.py | sarvex/tensorflow-ranking | 2,482 | 11143402 | <filename>tensorflow_ranking/python/keras/metrics.py<gh_stars>1000+
# Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Keras metrics in TF-Ranking.
NOTE: For metrics that compute a ranking, ties are broken randomly. This means
that metrics may be stochastic if items with equal scores are provided.
WARNING: Some metrics (e.g. Recall or MRR) are not well-defined when there are
no relevant items (e.g. if `y_true` has a row of only zeroes). For these cases,
the TF-Ranking metrics will evaluate to `0`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional
import tensorflow.compat.v2 as tf
from tensorflow_ranking.python import metrics_impl
from tensorflow_ranking.python.keras import utils
class RankingMetricKey(object):
"""Ranking metric key strings."""
# Mean Reciprocal Rank. For binary relevance.
MRR = "mrr"
# Average Relevance Position.
ARP = "arp"
# Normalized Discounted Cumulative Gain.
NDCG = "ndcg"
# Discounted Cumulative Gain.
DCG = "dcg"
# Precision. For binary relevance.
PRECISION = "precision"
# Mean Average Precision. For binary relevance.
MAP = "map"
# Intent-aware Precision. For binary relevance of subtopics.
PRECISION_IA = "precision_ia"
# Ordered Pair Accuracy.
ORDERED_PAIR_ACCURACY = "ordered_pair_accuracy"
# Alpha Discounted Cumulative Gain.
ALPHA_DCG = "alpha_dcg"
def get(key: str,
name: Optional[str] = None,
dtype: Optional[tf.dtypes.DType] = None,
topn: Optional[int] = None,
**kwargs: Dict[str, Any]) -> tf.keras.metrics.Metric:
"""Factory method to get a list of ranking metrics.
Example Usage:
```python
metric = tfr.keras.metics.get(tfr.keras.metrics.RankingMetricKey.MRR)
```
to get Mean Reciprocal Rank.
```python
metric = tfr.keras.metics.get(tfr.keras.metrics.RankingMetricKey.MRR,
topn=2)
```
to get MRR@2.
Args:
key: An attribute of `RankingMetricKey`, defining which metric objects to
return.
name: Name of metrics.
dtype: Dtype of the metrics.
topn: Cutoff of how many items are considered in the metric.
**kwargs: Keyword arguments for the metric object.
Returns:
A tf.keras.metrics.Metric. See `_RankingMetric` signature for more details.
Raises:
ValueError: If key is unsupported.
"""
if not isinstance(key, str):
raise ValueError("Input `key` needs to be string.")
key_to_cls = {
RankingMetricKey.MRR: MRRMetric,
RankingMetricKey.ARP: ARPMetric,
RankingMetricKey.PRECISION: PrecisionMetric,
RankingMetricKey.MAP: MeanAveragePrecisionMetric,
RankingMetricKey.NDCG: NDCGMetric,
RankingMetricKey.DCG: DCGMetric,
RankingMetricKey.ORDERED_PAIR_ACCURACY: OPAMetric,
}
metric_kwargs = {"name": name, "dtype": dtype}
if topn:
metric_kwargs.update({"topn": topn})
if kwargs:
metric_kwargs.update(kwargs)
if key in key_to_cls:
metric_cls = key_to_cls[key]
metric_obj = metric_cls(**metric_kwargs)
else:
raise ValueError("Unsupported metric: {}".format(key))
return metric_obj
def default_keras_metrics(**kwargs) -> List[tf.keras.metrics.Metric]:
"""Returns a list of ranking metrics.
Args:
**kwargs: Additional kwargs to pass to each keras metric.
Returns:
A list of metrics of type `tf.keras.metrics.Metric`.
"""
list_kwargs = [
dict(key="ndcg", topn=topn, name="metric/ndcg_{}".format(topn), **kwargs)
for topn in [1, 3, 5, 10]
] + [
dict(key="arp", name="metric/arp", **kwargs),
dict(key="ordered_pair_accuracy", name="metric/ordered_pair_accuracy",
**kwargs),
dict(key="mrr", name="metric/mrr", **kwargs),
dict(key="precision", name="metric/precision", **kwargs),
dict(key="map", name="metric/map", **kwargs),
dict(key="dcg", name="metric/dcg", **kwargs),
dict(key="ndcg", name="metric/ndcg", **kwargs)
]
return [get(**kwargs) for kwargs in list_kwargs]
class _RankingMetric(tf.keras.metrics.Mean):
"""Implements base ranking metric class.
Please see tf.keras.metrics.Mean for more information about such a class and
https://www.tensorflow.org/tutorials/distribute/custom_training on how to do
customized training.
"""
def __init__(self, name=None, dtype=None, ragged=False, **kwargs):
super(_RankingMetric, self).__init__(name=name, dtype=dtype, **kwargs)
# An instance of `metrics_impl._RankingMetric`.
# Overwrite this in subclasses.
self._metric = None
self._ragged = ragged
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
# TODO: Add mask argument for metric.compute() call
per_list_metric_val, per_list_metric_weights = self._metric.compute(
y_true, y_pred, sample_weight)
return super(_RankingMetric, self).update_state(
per_list_metric_val, sample_weight=per_list_metric_weights)
def get_config(self):
config = super(_RankingMetric, self).get_config()
config.update({
"ragged": self._ragged,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class MRRMetric(_RankingMetric):
r"""Mean reciprocal rank (MRR).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
MRR(y, s) = max_i y_i / rank(s_i)
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> mrr = tfr.keras.metrics.MRRMetric()
>>> mrr(y_true, y_pred).numpy()
0.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> mrr = tfr.keras.metrics.MRRMetric(ragged=True)
>>> mrr(y_true, y_pred).numpy()
0.75
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.MRRMetric()])
```
Definition:
$$
\text{MRR}(\{y\}, \{s\}) = \max_i \frac{\bar{y}_i}{\text{rank}(s_i)}
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly and $\bar{y_i}$ are truncated labels:
$$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$$
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(MRRMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._metric = metrics_impl.MRRMetric(name=name, topn=topn, ragged=ragged)
def get_config(self):
config = super(MRRMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class ARPMetric(_RankingMetric):
r"""Average relevance position (ARP).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
ARP(y, s) = sum_i (y_i * rank(s_i)) / sum_j y_j
```
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> arp = tfr.keras.metrics.ARPMetric()
>>> arp(y_true, y_pred).numpy()
2.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> arp = tfr.keras.metrics.ARPMetric(ragged=True)
>>> arp(y_true, y_pred).numpy()
1.75
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.ARPMetric()])
```
Definition:
$$
\text{ARP}(\{y\}, \{s\}) =
\frac{1}{\sum_i y_i} \sum_i y_i \cdot \text{rank}(s_i)
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
"""
def __init__(self, name=None, dtype=None, ragged=False, **kwargs):
super(ARPMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._metric = metrics_impl.ARPMetric(name=name, ragged=ragged)
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class PrecisionMetric(_RankingMetric):
r"""Precision@k (P@k).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
P@K(y, s) = 1/k sum_i I[rank(s_i) < k] y_i
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> precision_at_2 = tfr.keras.metrics.PrecisionMetric(topn=2)
>>> precision_at_2(y_true, y_pred).numpy()
0.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> precision_at_2 = tfr.keras.metrics.PrecisionMetric(topn=2, ragged=True)
>>> precision_at_2(y_true, y_pred).numpy()
0.5
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.PrecisionMetric()])
```
Definition:
$$
\text{P@k}(\{y\}, \{s\}) =
\frac{1}{k} \sum_i I[\text{rank}(s_i) \leq k] \bar{y}_i
$$
where:
* $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores $s$
with ties broken randomly
* $I[]$ is the indicator function:\
$I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$
* $\bar{y}_i$ are the truncated labels:\
$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$
* $k = |y|$ if $k$ is not provided
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(PrecisionMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._metric = metrics_impl.PrecisionMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
config = super(PrecisionMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
# TODO Add recall metrics to TF1 in another cl.
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class RecallMetric(_RankingMetric):
r"""Recall@k (R@k).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
R@K(y, s) = sum_i I[rank(s_i) < k] y_i / sum_j y_j
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> recall_at_2 = tfr.keras.metrics.RecallMetric(topn=2)
>>> recall_at_2(y_true, y_pred).numpy()
0.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> recall_at_2 = tfr.keras.metrics.RecallMetric(topn=2, ragged=True)
>>> recall_at_2(y_true, y_pred).numpy()
0.75
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.RecallMetric()])
```
Definition:
$$
\text{R@k}(\{y\}, \{s\}) =
\frac{\sum_i I[\text{rank}(s_i) \leq k] \bar{y}_i}{\sum_j \bar{y}_j}
$$
where:
* $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores $s$
with ties broken randomly
* $I[]$ is the indicator function:\
$I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$
* $\bar{y}_i$ are the truncated labels:\
$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$
* $k = |y|$ if $k$ is not provided
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(RecallMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._metric = metrics_impl.RecallMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
config = super(RecallMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class PrecisionIAMetric(_RankingMetric):
r"""Precision-IA@k (Pre-IA@k).
Intent-aware Precision@k ([Agrawal et al, 2009][agrawal2009];
[Clarke et al, 2009][clarke2009]) is a precision metric that operates on
subtopics and is typically used for diversification tasks..
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
Pre-IA@k(y, s) = sum_t sum_i I[rank(s_i) <= k] y_{i,t} / (# of subtopics * k)
```
NOTE: The labels `y_true` should be of shape
`[batch_size, list_size, subtopic_size]`, indicating relevance for each
subtopic in the last dimension.
NOTE: This metric converts graded relevance to binary relevance by setting
`y_{i,t} = 1` if `y_{i,t} >= 1`.
Standalone usage:
>>> y_true = [[[0., 1.], [1., 0.], [1., 1.]]]
>>> y_pred = [[3., 1., 2.]]
>>> pre_ia = tfr.keras.metrics.PrecisionIAMetric()
>>> pre_ia(y_true, y_pred).numpy()
0.6666667
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant(
... [[[0., 0.], [1., 0.]], [[1., 1.], [0., 2.], [1., 0.]]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> pre_ia = tfr.keras.metrics.PrecisionIAMetric(ragged=True)
>>> pre_ia(y_true, y_pred).numpy()
0.5833334
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
metrics=[tfr.keras.metrics.PrecisionIAMetric()])
```
Definition:
$$
\text{Pre-IA@k}(y, s) = \frac{1}{\text{# of subtopics} \cdot k}
\sum_t \sum_i I[\text{rank}(s_i) \leq k] y_{i,t}
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
References:
- [Diversifying Search Results, Agrawal et al, 2009][agrawal2009]
- [Overview of the TREC 2009 Web Track, Clarke et al, 2009][clarke2009]
[agrawal2009]:
https://www.microsoft.com/en-us/research/publication/diversifying-search-results/
[clarke2009]: https://trec.nist.gov/pubs/trec18/papers/ENT09.OVERVIEW.pdf
"""
def __init__(self,
name=None,
topn=None,
dtype=None,
ragged=False,
**kwargs):
"""Constructor.
Args:
name: A string used as the name for this metric.
topn: A cutoff for how many examples to consider for this metric.
dtype: Data type of the metric output. See `tf.keras.metrics.Metric`.
ragged: A bool indicating whether the supplied tensors are ragged. If
True y_true, y_pred and sample_weight (if providing per-example weights)
need to be ragged tensors with compatible shapes.
**kwargs: Other keyward arguments used in `tf.keras.metrics.Metric`.
"""
super(PrecisionIAMetric, self).__init__(name=name, dtype=dtype,
ragged=ragged, **kwargs)
self._topn = topn
self._metric = metrics_impl.PrecisionIAMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
config = super(PrecisionIAMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class MeanAveragePrecisionMetric(_RankingMetric):
r"""Mean average precision (MAP).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
MAP(y, s) = sum_k (P@k(y, s) * rel(k)) / sum_i y_i
rel(k) = y_i if rank(s_i) = k
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> map_metric = tfr.keras.metrics.MeanAveragePrecisionMetric(topn=2)
>>> map_metric(y_true, y_pred).numpy()
0.25
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> map_metric = tfr.keras.metrics.MeanAveragePrecisionMetric(
... topn=2, ragged=True)
>>> map_metric(y_true, y_pred).numpy()
0.5
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
metrics=[tfr.keras.metrics.MeanAveragePrecisionMetric()])
```
Definition:
$$
\text{MAP}(\{y\}, \{s\}) =
\frac{\sum_k P@k(y, s) \cdot \text{rel}(k)}{\sum_j \bar{y}_j} \\
\text{rel}(k) = \max_i I[\text{rank}(s_i) = k] \bar{y}_i
$$
where:
* $P@k(y, s)$ is the Precision at rank $k$. See
`tfr.keras.metrics.PrecisionMetric`.
* $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores $s$
with ties broken randomly
* $I[]$ is the indicator function:\
$I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$
* $\bar{y}_i$ are the truncated labels:\
$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(MeanAveragePrecisionMetric, self).__init__(
name=name, dtype=dtype, ragged=ragged, **kwargs)
self._topn = topn
self._metric = metrics_impl.MeanAveragePrecisionMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
base_config = super(MeanAveragePrecisionMetric, self).get_config()
config = {
"topn": self._topn,
}
config.update(base_config)
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class NDCGMetric(_RankingMetric):
r"""Normalized discounted cumulative gain (NDCG).
Normalized discounted cumulative gain ([Järvelin et al, 2002][jarvelin2002])
is the normalized version of `tfr.keras.metrics.DCGMetric`.
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
NDCG(y, s) = DCG(y, s) / DCG(y, y)
DCG(y, s) = sum_i gain(y_i) * rank_discount(rank(s_i))
```
NOTE: The `gain_fn` and `rank_discount_fn` should be keras serializable.
Please see `tfr.keras.utils.pow_minus_1` and `tfr.keras.utils.log2_inverse` as
examples when defining user customized functions.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> ndcg = tfr.keras.metrics.NDCGMetric()
>>> ndcg(y_true, y_pred).numpy()
0.6934264
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> ndcg = tfr.keras.metrics.NDCGMetric(ragged=True)
>>> ndcg(y_true, y_pred).numpy()
0.7974351
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.NDCGMetric()])
```
Definition:
$$
\text{NDCG}(\{y\}, \{s\}) =
\frac{\text{DCG}(\{y\}, \{s\})}{\text{DCG}(\{y\}, \{y\})} \\
\text{DCG}(\{y\}, \{s\}) =
\sum_i \text{gain}(y_i) \cdot \text{rank_discount}(\text{rank}(s_i))
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
References:
- [Cumulated gain-based evaluation of IR techniques, Järvelin et al,
2002][jarvelin2002]
[jarvelin2002]: https://dl.acm.org/doi/10.1145/582415.582418
"""
def __init__(self,
name=None,
topn=None,
gain_fn=None,
rank_discount_fn=None,
dtype=None,
ragged=False,
**kwargs):
super(NDCGMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._gain_fn = gain_fn or utils.pow_minus_1
self._rank_discount_fn = rank_discount_fn or utils.log2_inverse
self._metric = metrics_impl.NDCGMetric(
name=name,
topn=topn,
gain_fn=self._gain_fn,
rank_discount_fn=self._rank_discount_fn,
ragged=ragged)
def get_config(self):
base_config = super(NDCGMetric, self).get_config()
config = {
"topn": self._topn,
"gain_fn": self._gain_fn,
"rank_discount_fn": self._rank_discount_fn,
}
config.update(base_config)
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class DCGMetric(_RankingMetric):
r"""Discounted cumulative gain (DCG).
Discounted cumulative gain ([Järvelin et al, 2002][jarvelin2002]).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
DCG(y, s) = sum_i gain(y_i) * rank_discount(rank(s_i))
```
NOTE: The `gain_fn` and `rank_discount_fn` should be keras serializable.
Please see `tfr.keras.utils.pow_minus_1` and `tfr.keras.utils.log2_inverse` as
examples when defining user customized functions.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> dcg = tfr.keras.metrics.DCGMetric()
>>> dcg(y_true, y_pred).numpy()
1.1309297
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> dcg = tfr.keras.metrics.DCGMetric(ragged=True)
>>> dcg(y_true, y_pred).numpy()
2.065465
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.DCGMetric()])
```
Definition:
$$
\text{DCG}(\{y\}, \{s\}) =
\sum_i \text{gain}(y_i) \cdot \text{rank_discount}(\text{rank}(s_i))
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
References:
- [Cumulated gain-based evaluation of IR techniques, Järvelin et al,
2002][jarvelin2002]
[jarvelin2002]: https://dl.acm.org/doi/10.1145/582415.582418
"""
def __init__(self,
name=None,
topn=None,
gain_fn=None,
rank_discount_fn=None,
dtype=None,
ragged=False,
**kwargs):
super(DCGMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._gain_fn = gain_fn or utils.pow_minus_1
self._rank_discount_fn = rank_discount_fn or utils.log2_inverse
self._metric = metrics_impl.DCGMetric(
name=name,
topn=topn,
gain_fn=self._gain_fn,
rank_discount_fn=self._rank_discount_fn,
ragged=ragged)
def get_config(self):
base_config = super(DCGMetric, self).get_config()
config = {
"topn": self._topn,
"gain_fn": self._gain_fn,
"rank_discount_fn": self._rank_discount_fn,
}
config.update(base_config)
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class AlphaDCGMetric(_RankingMetric):
r"""Alpha discounted cumulative gain (alphaDCG).
Alpha discounted cumulative gain ([Clarke et al, 2008][clarke2008];
[Clarke et al, 2009][clarke2009]) is a cumulative gain metric that operates
on subtopics and is typically used for diversification tasks.
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
alphaDCG(y, s) = sum_t sum_i gain(y_{i,t}) * rank_discount(rank(s_i))
gain(y_{i,t}) = (1 - alpha)^(sum_j I[rank(s_j) < rank(s_i)] * gain(y_{j,t}))
```
NOTE: The labels `y_true` should be of shape
`[batch_size, list_size, subtopic_size]`, indicating relevance for each
subtopic in the last dimension.
NOTE: The `rank_discount_fn` should be keras serializable. Please see
`tfr.keras.utils.log2_inverse` as an example when defining user customized
functions.
Standalone usage:
>>> y_true = [[[0., 1.], [1., 0.], [1., 1.]]]
>>> y_pred = [[3., 1., 2.]]
>>> alpha_dcg = tfr.keras.metrics.AlphaDCGMetric()
>>> alpha_dcg(y_true, y_pred).numpy()
2.1963947
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant(
... [[[0., 0.], [1., 0.]], [[1., 1.], [0., 2.], [1., 0.]]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> alpha_dcg = tfr.keras.metrics.AlphaDCGMetric(ragged=True)
>>> alpha_dcg(y_true, y_pred).numpy()
1.8184297
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.AlphaDCGMetric()])
```
Definition:
$$
\alpha\text{DCG}(y, s) =
\sum_t \sum_i \text{gain}(y_{i, t}, \alpha)
\text{ rank_discount}(\text{rank}(s_i))\\
\text{gain}(y_{i, t}, \alpha) =
y_{i, t} (1 - \alpha)^{\sum_j I[\text{rank}(s_j) < \text{rank}(s_i)] y_{j, t}}
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly and $I[]$ is the indicator function:
$$
I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$$
References:
- [Novelty and diversity in information retrieval evaluation, Clarke et al,
2008][clarke2008]
- [Overview of the TREC 2009 Web Track, Clarke et al, 2009][clarke2009]
[clarke2008]: https://dl.acm.org/doi/10.1145/1390334.1390446
[clarke2009]: https://trec.nist.gov/pubs/trec18/papers/ENT09.OVERVIEW.pdf
"""
def __init__(self,
name="alpha_dcg_metric",
topn=None,
alpha=0.5,
rank_discount_fn=None,
seed=None,
dtype=None,
ragged=False,
**kwargs):
"""Construct the ranking metric class for alpha-DCG.
Args:
name: A string used as the name for this metric.
topn: A cutoff for how many examples to consider for this metric.
alpha: A float between 0 and 1, parameter used in definition of alpha-DCG.
Introduced as an assessor error in judging whether a document is
covering a subtopic of the query.
rank_discount_fn: A function of rank discounts. Default is set to
`1 / log2(rank+1)`. The `rank_discount_fn` should be keras serializable.
Please see the `log2_inverse` above as an example when defining user
customized functions.
seed: The ops-level random seed used in shuffle ties in `sort_by_scores`.
dtype: Data type of the metric output. See `tf.keras.metrics.Metric`.
ragged: A bool indicating whether the supplied tensors are ragged. If
True y_true, y_pred and sample_weight (if providing per-example weights)
need to be ragged tensors with compatible shapes.
**kwargs: Other keyward arguments used in `tf.keras.metrics.Metric`.
"""
super(AlphaDCGMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._alpha = alpha
self._rank_discount_fn = rank_discount_fn or utils.log2_inverse
self._seed = seed
self._metric = metrics_impl.AlphaDCGMetric(
name=name,
topn=topn,
alpha=alpha,
rank_discount_fn=self._rank_discount_fn,
seed=seed,
ragged=ragged)
def get_config(self):
config = super(AlphaDCGMetric, self).get_config()
config.update({
"topn": self._topn,
"alpha": self._alpha,
"rank_discount_fn": self._rank_discount_fn,
"seed": self._seed,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class OPAMetric(_RankingMetric):
r"""Ordered pair accuracy (OPA).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
OPA(y, s) = sum_i sum_j I[s_i > s_j] I[y_i > y_j] / sum_i sum_j I[y_i > y_j]
```
NOTE: Pairs with equal labels (`y_i = y_j`) are always ignored. Pairs with
equal scores (`s_i = s_j`) are considered incorrectly ordered.
Standalone usage:
>>> y_true = [[0., 1., 2.]]
>>> y_pred = [[3., 1., 2.]]
>>> opa = tfr.keras.metrics.OPAMetric()
>>> opa(y_true, y_pred).numpy()
0.33333334
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> opa = tfr.keras.metrics.OPAMetric(ragged=True)
>>> opa(y_true, y_pred).numpy()
0.5
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.OPAMetric()])
```
Definition:
$$
\text{OPA}(\{y\}, \{s\}) =
\frac{\sum_i \sum_j I[s_i > s_j] I[y_i > y_j]}{\sum_i \sum_j I[y_i > y_j]}
$$
where $I[]$ is the indicator function:
$$
I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$$
"""
def __init__(self, name=None, dtype=None, ragged=False, **kwargs):
super(OPAMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._metric = metrics_impl.OPAMetric(name=name, ragged=ragged)
|
test/integration/component/test_bigswitch_bcf.py | Codegass/cloudstack | 1,131 | 11143425 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for BigSwitchBcf network Plugin
"""
#Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import *
from marvin.lib.utils import (cleanup_resources,
random_gen, validateList)
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
NetworkOffering,
VpcOffering,
Network,
PublicIPAddress,
FireWallRule,
NATRule,
Configurations)
from marvin.lib.common import (get_domain,
get_zone,
get_template)
from marvin.sshClient import SshClient
from marvin.codes import PASS
import subprocess
import time
class Services:
"""Test BigSwitchBcf plugin
"""
def __init__(self):
self.services = {
"account": {
"email": "<EMAIL>",
"firstname": "cloudstack",
"lastname": "bob",
"username": "bobbuilder",
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"virtual_machine": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'KVM',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"net1_net2_aclrule": {
"protocol": "ICMP",
"cidrlist": '0.0.0.0/0',
},
"big_switch_bcf_device": {
"hostname": '10.212.1.104',
"username": 'admin',
"password": '<PASSWORD>',
"retrycount": '4',
"retryinterval": '60'
},
#services supported by Big Switch BCF for isolated networks.
"network_offering": {
"name": 'BCF network offering (marvin)',
"displaytext": 'BCF (marvin)',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,SourceNat,Connectivity,StaticNat,PortForwarding,Firewall',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"PortForwarding": 'VirtualRouter',
"Dhcp" : 'VirtualRouter',
"Connectivity" : 'BigSwitchBcf',
"StaticNat" : 'VirtualRouter',
"SourceNat" : 'VirtualRouter',
"Firewall" : 'VirtualRouter'
},
},
"vpc_network_offering": {
"name": 'BCF VPC network offering (marvin)',
"displaytext": 'BCF VPC (marvin)',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,SourceNat,Connectivity,PortForwarding,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter',
"Connectivity" : 'BigSwitchBcf'
},
},
"vpc_offering": {
"name": 'BCF VPC',
"displaytext": 'BCF VPC',
"supportedservices": 'Dhcp,SourceNat,PortForwarding,StaticNat,NetworkACL,Connectivity',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/16'
},
"network": {
"name": "BCF network",
"displaytext": "BCF network",
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10
}
class TestBigSwitchBcf(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls._cleanup = []
cls.testClient = super(TestBigSwitchBcf, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
# Get test parameters
cls.bigswitch_services = cls.services["big_switch_bcf_device"]
try:
# Get physical network
resp = listPhysicalNetworks.listPhysicalNetworksCmd()
resp.zoneid = cls.zone.id
physical_networks = cls.api_client.listPhysicalNetworks(resp)
if isinstance(physical_networks, list):
cls.debug("phy net %s" % physical_networks)
physical_network = physical_networks[1]
# Get bigswitch network service provider
resp = listNetworkServiceProviders.listNetworkServiceProvidersCmd()
resp.name = 'BigSwitchBcf'
resp.physicalnetworkid = physical_network.id
nw_service_providers = cls.api_client.listNetworkServiceProviders(resp)
if isinstance(nw_service_providers, list):
bigswitch_provider = nw_service_providers[0]
cls.debug("net serv prov %s" % nw_service_providers)
else:
# Create bigswitch provider if not already existent
resp_add_nsp = addNetworkServiceProvider.addNetworkServiceProviderCmd()
resp_add_nsp.name = 'BigSwitchBcf'
resp_add_nsp.physicalnetworkid = physical_network.id
bigswitch_provider = cls.api_client.addNetworkServiceProvider(resp_add_nsp)
# add BCF controller
resp_add_device = addBigSwitchBcfDevice.addBigSwitchBcfDeviceCmd()
resp_add_device.physicalnetworkid = physical_network.id
resp_add_device.username = cls.bigswitch_services["username"]
resp_add_device.password = cls.bigswitch_services["password"]
resp_add_device.hostname = cls.bigswitch_services["hostname"]
resp_add_device.retrycount = cls.bigswitch_services["retrycount"]
resp_add_device.retryinterval = cls.bigswitch_services["retryinterval"]
cls.bigswitch = cls.api_client.addBigSwitchBcfDevice(resp_add_device)
if bigswitch_provider.state != 'Enabled':
cmd = updateNetworkServiceProvider.updateNetworkServiceProviderCmd()
cmd.id = bigswitch_provider.id
cmd.state = 'Enabled'
cls.api_client.updateNetworkServiceProvider(cmd)
# Create non-VPC network offering
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
cls.network_offering.update(cls.api_client, state='Enabled')
# Create VPC network offering
cls.vpc_network_offering = NetworkOffering.create(
cls.api_client,
cls.services["vpc_network_offering"],
conservemode=False
)
cls._cleanup.append(cls.vpc_network_offering)
cls.vpc_network_offering.update(cls.api_client, state='Enabled')
# Create VPC offering
cls.vpc_offering = VpcOffering.create(
cls.api_client,
cls.services["vpc_offering"]
)
cls._cleanup.append(cls.vpc_offering)
cls.vpc_offering.update(cls.api_client, state='Enabled')
# Create compute service offering
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup.append(cls.service_offering)
except Exception as e:
cls.tearDownClass()
raise Exception ("Warning: Exception in setUpClass: %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
# self._cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self._cleanup)
interval = Configurations.list(
self.apiclient,
name='network.gc.interval'
)
wait = Configurations.list(
self.apiclient,
name='network.gc.wait'
)
time.sleep(int(interval[0].value) + int(wait[0].value))
self.debug("Cleanup complete!")
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advancedns"])
def test_network_bcf(self):
"""Test basic workflow with BigSwitch BCF plugin
1. Create a non-VPC guest network
2. Create two VMs on the network
3. Add firewall rule to make virtual router pingable
4. Test ping to virtual router public IP
5. Add static NAT to vm_1, with firewall rule to allow ssh
6. Ssh to vm_1, ping vm_2 private address, ping google.com
"""
self.debug("STEP 1: Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("STEP 2: Deploying VMs in account: %s" % self.account.name)
vm_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[ str(self.network.id), ]
)
self.debug("Deployed VM in network: %s" % self.network.id)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=vm_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% vm_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM state should be running after deployment"
)
vm_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[ str(self.network.id), ]
)
self.debug("Deployed VM in network: %s" % self.network.id)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=vm_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% vm_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM state should be running after deployment"
)
self.debug("STEP 3: Add FW rule to allow source nat ping")
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=self.network.id,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
#Create Firewall rules on source NAT
fw_rule_icmp = FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='ICMP',
cidrlist=["0.0.0.0/0",]
)
self.debug("Created firewall rule: %s" % fw_rule_icmp.id)
self.debug("STEP 4: Trying to ping source NAT %s" % src_nat.ipaddress)
# User should be able to ping router via source nat ip
try:
self.debug("Trying to ping source NAT %s" % src_nat.ipaddress)
result = subprocess.call(
['ping', '-c 1', src_nat.ipaddress])
self.debug("Ping result: %s" % result)
# if ping successful, then result should be 0
self.assertEqual(
result,
0,
"Check if ping is successful or not"
)
except Exception as e:
self.fail("Ping failed for source NAT %s (%s)"
% (src_nat.ipaddress, e))
self.debug("STEP 5: Add static NAT to vm_1 with FW rule to allow SSH")
floating_ip_1 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.network.id,
)
self.debug("Associated %s with network %s" % (
floating_ip_1.ipaddress,
self.network.id
)
)
NATRule.create(
self.apiclient,
vm_1,
self.services["natrule"],
ipaddressid=floating_ip_1.ipaddress.id,
openfirewall=False,
networkid=self.network.id
)
# Should be able to SSH vm_1 via static nat, then ping vm_2 & Internet
try:
self.debug("STEP 6: SSH into vm_1: %s" % floating_ip_1)
ssh = vm_1.get_ssh_client(
ipaddress=floating_ip_1.ipaddress.ipaddress
)
self.debug("Ping vm_2 at %s" % vm_2.ipaddress)
# Ping vm_2
res_1 = ssh.execute("ping -c 1 %s" % vm_2.ipaddress)
self.debug("Ping to google.com from VM")
# Ping Internet
res_2 = ssh.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (192.168.3.11):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
except Exception as e:
self.fail("SSH Access failed: %s" % e)
self.debug("ping result1: %s" % res_1);
self.debug("ping result2: %s" % res_2);
result1 = str(res_1)
self.assertEqual(
result1.count("1 received"),
1,
"Ping vm_2 from vm_1 should be successful"
)
result2 = str(res_2)
self.assertEqual(
result2.count("1 received"),
1,
"Ping Internet from vm_1 should be successful"
)
# Deleting two test VMs
VirtualMachine.delete(vm_1, self.apiclient, expunge=True)
VirtualMachine.delete(vm_2, self.apiclient, expunge=True)
# Delete Network
Network.delete(self.network, self.apiclient)
return
@attr(tags = ["advancedns"])
def test_vpc_network_bcf(self):
"""Test VPC workflow with BigSwitch BCF plugin
1. Create a VPC with three networks
2. Create one VM on each of the three networks
3. Add firewall rule to make virtual router pingable
4. Test ping to virtual router public IP
5. Add static NAT to vm_1, with firewall rule to allow ssh
6. Add NAT rule to allow ping between net1 and net2
7. Ssh to vm_1, ping vm_2 private address, should succeed
8. continue ... ping vm_3 private address, should fail
9. continue ... ping Internet, should succeed
"""
self.debug("STEP 1: Creating VPC with VPC offering: %s" %
self.vpc_offering.id)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
accountid=self.account.name,
domainid=self.account.domainid,
vpcofferingid=self.vpc_offering.id,
zoneid=self.zone.id
)
self.debug("Created VPC with ID: %s" % self.vpc.id)
# Creating network using the vpc network offering created
self.debug("Creating networks with vpc network offering: %s" %
self.vpc_network_offering.id)
net1 = Network.create(
self.apiclient,
self.services["vpc_network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.vpc_network_offering.id,
zoneid=self.zone.id,
gateway="10.0.100.1",
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % net1.id)
net2 = Network.create(
self.apiclient,
self.services["vpc_network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.vpc_network_offering.id,
zoneid=self.zone.id,
gateway="10.0.101.1",
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % net2.id)
net3 = Network.create(
self.apiclient,
self.services["vpc_network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.vpc_network_offering.id,
zoneid=self.zone.id,
gateway="10.0.102.0",
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % net3.id)
self.debug("STEP 2: Deploying VMs in networks")
vm_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[ str(net1.id), ]
)
self.debug("Deployed VM in network: %s" % net1.id)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=vm_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% vm_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM state should be running after deployment"
)
vm_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[ str(net2.id), ]
)
self.debug("Deployed VM in network: %s" % net2.id)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=vm_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% vm_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM state should be running after deployment"
)
vm_3 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[ str(net3.id), ]
)
self.debug("Deployed VM in network: %s" % net3.id)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=vm_3.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% vm_3.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM state should be running after deployment"
)
self.debug("STEP 3: Add FW rule to allow source nat ping")
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc.id
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
#Create Firewall rules on source NAT
fw_rule_icmp = FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='ICMP',
cidrlist=["0.0.0.0/0",]
)
self.debug("Created firewall rule: %s" % fw_rule_icmp.id)
self.debug("STEP 4: Trying to ping source NAT %s" % src_nat.ipaddress)
# User should be able to ping router via source nat ip
try:
self.debug("Trying to ping source NAT %s" % src_nat.ipaddress)
result = subprocess.call(
['ping', '-c 1', src_nat.ipaddress])
self.debug("Ping result: %s" % result)
# if ping successful, then result should be 0
self.assertEqual(
result,
0,
"Check if ping is successful or not"
)
except Exception as e:
self.fail("Ping failed for source NAT %s (%s)"
% (src_nat.ipaddress, e))
self.debug("STEP 5: Add static NAT to vm_1 with FW rule to allow SSH")
floating_ip_1 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=net1.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
floating_ip_1.ipaddress,
net1.id
)
)
NATRule.create(
self.apiclient,
vm_1,
self.services["natrule"],
ipaddressid=floating_ip_1.ipaddress.id,
openfirewall=False,
networkid=net1.id,
vpcid=vpc.id
)
# Should be able to SSH vm_1 via static nat, then ping vm_2 & Internet
try:
self.debug("STEP 6: SSH into vm_1: %s" % floating_ip_1)
ssh = vm_1.get_ssh_client(
ipaddress=floating_ip_1.ipaddress.ipaddress
)
# self.debug("Ping vm_2 at %s" % vm_2.ipaddress)
# Ping to outsite world
# res_1 = ssh.execute("ping -c 1 %s" % vm_2.ipaddress)
# self.debug("Ping to google.com from VM")
# Ping to outsite world
# res_2 = ssh.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (192.168.3.11):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
except Exception as e:
self.fail("SSH Access failed: %s" % e)
# self.debug("ping result1: %s" % res_1);
# self.debug("ping result2: %s" % res_2);
# result1 = str(res_1)
# self.assertEqual(
# result1.count("1 received"),
# 1,
# "Ping vm_2 from vm_1 should be successful"
# )
# result2 = str(res_2)
# self.assertEqual(
# result2.count("1 received"),
# 1,
# "Ping Internet from vm_1 should be successful"
# )
# Deleting two test VMs
VirtualMachine.delete(vm_1, self.apiclient, expunge=True)
VirtualMachine.delete(vm_2, self.apiclient, expunge=True)
# Delete Network
Network.delete(self.network, self.apiclient)
return
|
preprocessing/compute_pixel_distance.py | enviromachinebeast/head2head | 206 | 11143428 | import cv2
import os
import numpy as np
import argparse
import collections
import torch
import itertools
from tqdm import tqdm
import util.util as util
IMG_EXTENSIONS = ['.png']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_image_paths_dict(dir):
# Returns dict: {name: [path1, path2, ...], ...}
image_files = {}
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
basename = os.path.basename(root)
for fname in fnames:
if is_image_file(fname) and basename in ['real', 'fake']:
path = os.path.join(root, fname)
seq_name = os.path.basename(root).split('_')[0]
if seq_name not in image_files:
image_files[seq_name] = [path]
else:
image_files[seq_name].append(path)
# Sort paths for each sequence
for k, v in image_files.items():
image_files[k] = sorted(v)
# Return directory sorted for keys (identity names)
return collections.OrderedDict(sorted(image_files.items()))
def paths_exist(image_pths):
return all([os.path.exists(image_path) for image_path in image_pths])
def print_args(parser, args):
message = ''
message += '----------------- Arguments ---------------\n'
for k, v in sorted(vars(args).items()):
comment = ''
default = parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '-------------------------------------------'
print(message)
def main():
print('Computation of average pixel distance (APD)\n')
parser = argparse.ArgumentParser()
parser.add_argument('--results_dir', type=str, default='results/head2head_obama/latest_epoch/videos_test/obama',
help='Path to the results directory.')
args = parser.parse_args()
# Print Arguments
print_args(parser, args)
# Create the directory of image paths.
images_dict = get_image_paths_dict(args.results_dir)
# Make sure we have two folders, one with real and one withs fake frames.
assert 'real' in images_dict and 'fake' in images_dict and \
len(images_dict.keys()) == 2, 'Results directory has wrong structure'
total_distance, total_pixels = 0, 0
for f_path, r_path in zip(images_dict['fake'], images_dict['real']):
f_img = cv2.imread(f_path)
r_img = cv2.imread(r_path)
total_distance, total_pixels, _ = util.get_pixel_distance(
r_img, f_img, total_distance, total_pixels)
print('Average pixel (L2) distance for sequence (APD-L2): %0.2f' % (total_distance/total_pixels))
if __name__=='__main__':
main()
|
tests/__init__.py | dtardoin/airflow | 150 | 11143430 | <reponame>dtardoin/airflow
from __future__ import absolute_import
from .core import *
|
zipline/pipeline/classifiers/classifier.py | lv-cha/zipline-chinese | 606 | 11143434 | <filename>zipline/pipeline/classifiers/classifier.py
"""
classifier.py
"""
from numbers import Number
from numpy import where, isnan, nan, zeros
from zipline.lib.quantiles import quantiles
from zipline.pipeline.term import ComputableTerm
from zipline.utils.input_validation import expect_types
from zipline.utils.numpy_utils import int64_dtype
from ..filters import NullFilter, NumExprFilter
from ..mixins import (
CustomTermMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
class Classifier(RestrictedDTypeMixin, ComputableTerm):
"""
A Pipeline expression computing a categorical output.
Classifiers are most commonly useful for describing grouping keys for
complex transformations on Factor outputs. For example, Factor.demean() and
Factor.zscore() can be passed a Classifier in their ``groupby`` argument,
indicating that means/standard deviations should be computed on assets for
which the classifier produced the same label.
"""
ALLOWED_DTYPES = (int64_dtype,) # Used by RestrictedDTypeMixin
def isnull(self):
"""
A Filter producing True for values where this term has missing data.
"""
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this term has complete data.
"""
return ~self.isnull()
# We explicitly don't support classifier to classifier comparisons, since
# the numbers likely don't mean the same thing. This may be relaxed in the
# future, but for now we're starting conservatively.
@expect_types(other=Number)
def eq(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other.
"""
# We treat this as an error because missing_values have NaN semantics,
# which means this would return an array of all False, which is almost
# certainly not what the user wants.
if other == self.missing_value:
raise ValueError(
"Comparison against self.missing_value ({value}) in"
" {typename}.eq().\n"
"Missing values have NaN semantics, so the "
"requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
value=other,
typename=(type(self).__name__),
)
)
return NumExprFilter.create(
"x_0 == {other}".format(other=int(other)),
binds=(self,),
)
@expect_types(other=Number)
def __ne__(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other.
"""
return NumExprFilter.create(
"((x_0 != {other}) & (x_0 != {missing}))".format(
other=int(other),
missing=self.missing_value,
),
binds=(self,),
)
class Everything(Classifier):
"""
A trivial classifier that classifies everything the same.
"""
dtype = int64_dtype
window_length = 0
inputs = ()
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
return where(
mask,
zeros(shape=mask.shape, dtype=int64_dtype),
self.missing_value,
)
class Quantiles(SingleInputMixin, Classifier):
"""
A classifier computing quantiles over an input.
"""
params = ('bins',)
dtype = int64_dtype
window_length = 0
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
bins = self.params['bins']
to_bin = where(mask, data, nan)
result = quantiles(to_bin, bins)
# Write self.missing_value into nan locations, whether they were
# generated by our input mask or not.
result[isnan(result)] = self.missing_value
return result.astype(int64_dtype)
def short_repr(self):
return type(self).__name__ + '(%d)' % self.params['bins']
class CustomClassifier(PositiveWindowLengthMixin, CustomTermMixin, Classifier):
"""
Base class for user-defined Classifiers.
See Also
--------
zipline.pipeline.CustomFactor
zipline.pipeline.CustomFilter
"""
pass
class Latest(LatestMixin, CustomClassifier):
"""
A classifier producing the latest value of an input.
See Also
--------
zipline.pipeline.data.dataset.BoundColumn.latest
zipline.pipeline.factors.factor.Latest
zipline.pipeline.filters.filter.Latest
"""
|
third_party/google-endpoints/Crypto/SelfTest/Random/Fortuna/test_FortunaAccumulator.py | tingshao/catapult | 2,151 | 11143449 | # -*- coding: utf-8 -*-
#
# SelfTest/Random/Fortuna/test_FortunaAccumulator.py: Self-test for the FortunaAccumulator module
#
# Written in 2008 by <NAME> <<EMAIL>>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-tests for Crypto.Random.Fortuna.FortunaAccumulator"""
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
import unittest
from binascii import b2a_hex
class FortunaAccumulatorTests(unittest.TestCase):
def setUp(self):
global FortunaAccumulator
from Crypto.Random.Fortuna import FortunaAccumulator
def test_FortunaPool(self):
"""FortunaAccumulator.FortunaPool"""
pool = FortunaAccumulator.FortunaPool()
self.assertEqual(0, pool.length)
self.assertEqual("5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456", pool.hexdigest())
pool.append(b('abc'))
self.assertEqual(3, pool.length)
self.assertEqual("4f8b42c22dd3729b519ba6f68d2da7cc5b2d606d05daed5ad5128cc03e6c6358", pool.hexdigest())
pool.append(b("dbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"))
self.assertEqual(56, pool.length)
self.assertEqual(b('0cffe17f68954dac3a84fb1458bd5ec99209449749b2b308b7cb55812f9563af'), b2a_hex(pool.digest()))
pool.reset()
self.assertEqual(0, pool.length)
pool.append(b('a') * 10**6)
self.assertEqual(10**6, pool.length)
self.assertEqual(b('80d1189477563e1b5206b2749f1afe4807e5705e8bd77887a60187a712156688'), b2a_hex(pool.digest()))
def test_which_pools(self):
"""FortunaAccumulator.which_pools"""
# which_pools(0) should fail
self.assertRaises(AssertionError, FortunaAccumulator.which_pools, 0)
self.assertEqual(FortunaAccumulator.which_pools(1), [0])
self.assertEqual(FortunaAccumulator.which_pools(2), [0, 1])
self.assertEqual(FortunaAccumulator.which_pools(3), [0])
self.assertEqual(FortunaAccumulator.which_pools(4), [0, 1, 2])
self.assertEqual(FortunaAccumulator.which_pools(5), [0])
self.assertEqual(FortunaAccumulator.which_pools(6), [0, 1])
self.assertEqual(FortunaAccumulator.which_pools(7), [0])
self.assertEqual(FortunaAccumulator.which_pools(8), [0, 1, 2, 3])
for i in range(1, 32):
self.assertEqual(FortunaAccumulator.which_pools(2L**i-1), [0])
self.assertEqual(FortunaAccumulator.which_pools(2L**i), range(i+1))
self.assertEqual(FortunaAccumulator.which_pools(2L**i+1), [0])
self.assertEqual(FortunaAccumulator.which_pools(2L**31), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**32), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**33), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**34), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**35), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**36), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**64), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**128), range(32))
def test_accumulator(self):
"""FortunaAccumulator.FortunaAccumulator"""
fa = FortunaAccumulator.FortunaAccumulator()
# This should fail, because we haven't seeded the PRNG yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Spread some test data across the pools (source number 42)
# This would be horribly insecure in a real system.
for p in range(32):
fa.add_random_event(42, p, b("X") * 32)
self.assertEqual(32+2, fa.pools[p].length)
# This should still fail, because we haven't seeded the PRNG with 64 bytes yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Add more data
for p in range(32):
fa.add_random_event(42, p, b("X") * 32)
self.assertEqual((32+2)*2, fa.pools[p].length)
# The underlying RandomGenerator should get seeded with Pool 0
# s = SHAd256(chr(42) + chr(32) + "X"*32 + chr(42) + chr(32) + "X"*32)
# = SHA256(h'edd546f057b389155a31c32e3975e736c1dec030ddebb137014ecbfb32ed8c6f')
# = h'aef42a5dcbddab67e8efa118e1b47fde5d697f89beb971b99e6e8e5e89fbf064'
# The counter and the key before reseeding is:
# C_0 = 0
# K_0 = "\x00" * 32
# The counter after reseeding is 1, and the new key after reseeding is
# C_1 = 1
# K_1 = SHAd256(K_0 || s)
# = SHA256(h'0eae3e401389fab86640327ac919ecfcb067359d95469e18995ca889abc119a6')
# = h'aafe9d0409fbaaafeb0a1f2ef2014a20953349d3c1c6e6e3b962953bea6184dd'
# The first block of random data, therefore, is
# r_1 = AES-256(K_1, 1)
# = AES-256(K_1, h'01000000000000000000000000000000')
# = h'b7b86bd9a27d96d7bb4add1b6b10d157'
# The second block of random data is
# r_2 = AES-256(K_1, 2)
# = AES-256(K_1, h'02000000000000000000000000000000')
# = h'2350b1c61253db2f8da233be726dc15f'
# The third and fourth blocks of random data (which become the new key) are
# r_3 = AES-256(K_1, 3)
# = AES-256(K_1, h'03000000000000000000000000000000')
# = h'f23ad749f33066ff53d307914fbf5b21'
# r_4 = AES-256(K_1, 4)
# = AES-256(K_1, h'04000000000000000000000000000000')
# = h'da9667c7e86ba247655c9490e9d94a7c'
# K_2 = r_3 || r_4
# = h'f23ad749f33066ff53d307914fbf5b21da9667c7e86ba247655c9490e9d94a7c'
# The final counter value is 5.
self.assertEqual("aef42a5dcbddab67e8efa118e1b47fde5d697f89beb971b99e6e8e5e89fbf064",
fa.pools[0].hexdigest())
self.assertEqual(None, fa.generator.key)
self.assertEqual(0, fa.generator.counter.next_value())
result = fa.random_data(32)
self.assertEqual(b("b7b86bd9a27d96d7bb4add1b6b10d157" "2350b1c61253db2f8da233be726dc15f"), b2a_hex(result))
self.assertEqual(b("f23ad749f33066ff53d307914fbf5b21da9667c7e86ba247655c9490e9d94a7c"), b2a_hex(fa.generator.key))
self.assertEqual(5, fa.generator.counter.next_value())
def test_accumulator_pool_length(self):
"""FortunaAccumulator.FortunaAccumulator minimum pool length"""
fa = FortunaAccumulator.FortunaAccumulator()
# This test case is hard-coded to assume that FortunaAccumulator.min_pool_size is 64.
self.assertEqual(fa.min_pool_size, 64)
# The PRNG should not allow us to get random data from it yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Add 60 bytes, 4 at a time (2 header + 2 payload) to each of the 32 pools
for i in range(15):
for p in range(32):
# Add the bytes to the pool
fa.add_random_event(2, p, b("XX"))
# The PRNG should not allow us to get random data from it yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Add 4 more bytes to pool 0
fa.add_random_event(2, 0, b("XX"))
# We should now be able to get data from the accumulator
fa.random_data(1)
def get_tests(config={}):
from Crypto.SelfTest.st_common import list_test_cases
return list_test_cases(FortunaAccumulatorTests)
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
test/test_jieba.py | honmaple/flask-search | 213 | 11143453 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ********************************************************************************
# Copyright © 2018 jianglin
# File Name: test_jieba.py
# Author: jianglin
# Email: <EMAIL>
# Created: 2018-05-18 13:22:46 (CST)
# Last Update: Tuesday 2018-12-18 11:32:46 (CST)
# By:
# Description:
# ********************************************************************************
from test import (SearchTestBase, mkdtemp, Flask, SQLAlchemy, Search, unittest,
ModelSaveMixin)
from jieba.analyse import ChineseAnalyzer
titles = [
"买水果然后来世博园。",
"The second one 你 中文测试中文 is even more interesting! 吃水果",
"吃苹果",
"吃橘子",
]
class TestSearch(SearchTestBase):
def setUp(self):
class TestConfig(object):
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
DEBUG = True
TESTING = True
MSEARCH_INDEX_NAME = mkdtemp()
MSEARCH_BACKEND = 'whoosh'
self.app = Flask(__name__)
self.app.config.from_object(TestConfig())
self.db = SQLAlchemy(self.app)
self.search = Search(self.app, db=self.db, analyzer=ChineseAnalyzer())
db = self.db
class Post(db.Model, ModelSaveMixin):
__tablename__ = 'basic_posts'
__searchable__ = ['title', 'content']
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(49))
content = db.Column(db.Text)
def __repr__(self):
return '<Post:{}>'.format(self.title)
self.Post = Post
with self.app.test_request_context():
self.db.create_all()
for (i, title) in enumerate(titles, 1):
post = self.Post(title=title, content='content%d' % i)
post.save(self.db)
def test_basic_search(self):
with self.app.test_request_context():
results = self.Post.query.msearch('水果').all()
self.assertEqual(len(results), 2)
results = self.Post.query.msearch('苹果').all()
self.assertEqual(len(results), 1)
results = self.Post.query.msearch('世博园').all()
self.assertEqual(len(results), 1)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromNames([
'test_jieba.TestSearch',
])
unittest.TextTestRunner(verbosity=1).run(suite)
|
migrations/versions/c3dbfdaeb45b_.py | muellermartin/moa | 238 | 11143463 | """empty message
Revision ID: c3dbfdaeb45b
Revises: <PASSWORD>
Create Date: 2019-12-01 16:12:12.484573
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c3dbfdaeb45b'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('bridgemetadata',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('last_tweet', sa.DateTime(), nullable=True),
sa.Column('last_toot', sa.DateTime(), nullable=True),
sa.Column('is_bot', sa.Boolean(), server_default='0', nullable=True),
sa.Column('worker_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('bridge', sa.Column('metadata_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'bridge', 'bridgemetadata', ['metadata_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'bridge', type_='foreignkey')
op.drop_column('bridge', 'metadata_id')
op.drop_table('bridgemetadata')
# ### end Alembic commands ###
|
discovery-provider/src/model_validator.py | atticwip/audius-protocol | 429 | 11143489 | import os.path
import json
import copy
from typing import Dict, Any
import logging # pylint: disable=C0302
from jsonschema import Draft7Validator, ValidationError, SchemaError
logger = logging.getLogger(__name__)
# https://app.quicktype.io/ -- JSON schema generator
class ModelValidator:
"""
{key: value} :
{
model (first letter uppercase): {
model_schema: schema,
field_schema: {
title: schema,
mood: schema,
...
},
fields: [title, mood, ...]
}
}
"""
models_to_schema_and_fields_dict: Dict[str, Any] = {}
BASE_PATH = "./src/schemas/"
FILE_NAME = ""
# Default field is set to None to validate the entire model
@classmethod
def validate(cls, to_validate, model, field=None):
cls.FILE_NAME = model.lower() + "_schema.json"
try:
schema = cls.get_schema_for_field(field, model)
validator = Draft7Validator(schema)
found_invalid_field = False
errors = []
for error in sorted(validator.iter_errors(to_validate), key=str):
found_invalid_field = True
errors.append(error.message)
if found_invalid_field:
raise ValidationError(
f"Instance {to_validate} is not proper. Errors: {errors}"
)
except ValidationError as ve:
raise ve
except SchemaError as se:
raise se
except BaseException as e:
# one of many errors specified in helper methods
raise e
# If field is None, return the entire model schema
@classmethod
def get_schema_for_field(cls, field, model):
try:
# If model is not present in dict, init its schema and its field subschemas
if model not in cls.models_to_schema_and_fields_dict:
schema = cls.init_model_schemas(model)
if not schema:
error_msg = f"ModelValidation failed. Are you sure the schema for {model} is present and proper?"
raise RuntimeError(error_msg)
if field == None:
return cls.models_to_schema_and_fields_dict[model]["model_schema"]
# Else, return the specified field schema
return cls.models_to_schema_and_fields_dict[model]["field_schema"][field]
except BaseException as e:
raise e
@classmethod
def init_model_schemas(cls, model):
cls.FILE_NAME = model.lower() + "_schema.json"
try:
# Load in the model schema in /schemas
schema = cls.load_schema_from_path(model)
model_properties = {
"model_schema": schema, # schema for the entire model
"field_schema": {}, # schema for just a field in model
"fields": cls._get_required(schema, model), # list of fields in model
}
cls.models_to_schema_and_fields_dict[model] = model_properties
# Create all the subschemas for each individual field
for field in model_properties["fields"]:
# Create a deep copy of the entire schema to generate a schema that only
# validates one field of the entire model
cls.create_field_schema(schema, field, model)
return schema
except BaseException as e:
logger.exception(e)
return None
@classmethod
def load_schema_from_path(cls, model):
schema_path = cls.BASE_PATH + cls.FILE_NAME
if not os.path.isfile(schema_path):
raise FileNotFoundError(
f"Schema for {model} not found in path {schema_path}"
)
with open(schema_path) as f:
try:
schema = json.load(f)
return schema
except json.JSONDecodeError as je:
raise je
@classmethod
def create_field_schema(cls, schema, field, model):
try:
# Create a deep copy of the entire schema to generate a schema that only
# validates one field of the entire model
schema_copy = copy.deepcopy(schema)
# Replace the properties value with just the field to validate against
# This way, we are able to use one entire model schema and at runtime,
# generate a new schema for just a field
# ex. replace all properties of Track (blockhash, block, title, ...) with just 'title'
field_to_validate_against = {
field: cls._get_properties_field(schema_copy, model, field)
}
cls._set_property(schema_copy, model, field_to_validate_against)
cls._set_required(schema_copy, model, field)
# Add field schema to dict
cls.models_to_schema_and_fields_dict[model]["field_schema"][
field
] = schema_copy
except BaseException as e:
raise e
@classmethod
def get_properties_for_field(cls, model, field):
try:
field_schema = cls.models_to_schema_and_fields_dict[model]["field_schema"][
field
]
return field_schema["definitions"][model]["properties"][field]
except KeyError:
logger.warning(
f"Could not find type for {field} for model {model}. Defaulting to 'None'"
)
return None
@classmethod
def _get_properties_field(cls, schema, model, field):
try:
return schema["definitions"][model]["properties"][field]
except BaseException:
logger.warning(
f"Could not find field '{field}' for {model}. Schema will be empty for this field."
)
return {} # empty schema
@classmethod
def _set_property(cls, schema, model, new_property):
try:
schema["definitions"][model]["properties"] = new_property
except KeyError as ke:
raise KeyError(f"Could not find keys for {model} schema: {ke}") from ke
@classmethod
def _set_required(cls, schema, model, new_required):
try:
schema["definitions"][model]["required"] = [new_required]
except KeyError as ke:
raise KeyError(f"Could not find keys for {model} schema: {ke}") from ke
@classmethod
def _get_required(cls, schema, model):
try:
return schema["definitions"][model]["required"]
except KeyError as ke:
raise KeyError(f"Could not find keys for {model} schema: {ke}") from ke
|
Practice/Coding Blocks/Competitive Warriors Challenge 1.0/Deepak and Primes.py | cnm06/Competitive-Programming | 994 | 11143490 | def SieveOfEratosthenes(n):
prime = [True for i in xrange(n+1)]
p = 2
while (p * p <= n):
if (prime[p] == True):
for i in range(p * 2, n+1, p):
prime[i] = False
p += 1
ans = []
for p in xrange(2, n):
if prime[p]:
ans.append(p)
return ans
prime = SieveOfEratosthenes(10000000)
n = int(raw_input())
print prime[n-1]
|
dragonfly/nn/unittest_cp_nn_gp.py | hase1128/dragonfly | 675 | 11143498 | <filename>dragonfly/nn/unittest_cp_nn_gp.py
"""
Unittests for Cartesian product GP with NN domains. Testing these separately since
we do not need to have nn dependencies in the gp module.
-- <EMAIL>
"""
# pylint: disable=invalid-name
# pylint: disable=no-member
import os
import numpy as np
import unittest
# Local
from ..gp.unittest_cartesian_product_gp import get_test_dataset, \
gen_cpmfgp_test_data_from_config_file, \
CPGPTestCaseDefinitions, CPMFGPTestCaseDefinitions
from ..nn import nn_examples
from ..test_data.syn_cnn_2.syn_cnn_2 import syn_cnn_2
from ..test_data.syn_cnn_2.syn_cnn_2_mf import syn_cnn_2_mf
from ..utils.base_test_class import BaseTestClass, execute_tests
from ..utils.general_utils import map_to_bounds
def get_cnns():
""" Returns the initial pool for CNNs. """
vgg_nets = [nn_examples.get_vgg_net(1),
nn_examples.get_vgg_net(2),
nn_examples.get_vgg_net(3),
nn_examples.get_vgg_net(4)]
blocked_cnns = [nn_examples.get_blocked_cnn(3, 1, 1), # 3
nn_examples.get_blocked_cnn(3, 2, 1), # 6
nn_examples.get_blocked_cnn(3, 3, 1), # 9
nn_examples.get_blocked_cnn(3, 4, 1), # 12
nn_examples.get_blocked_cnn(3, 5, 1), # 15
nn_examples.get_blocked_cnn(4, 4, 1), # 16
]
other_cnns = nn_examples.generate_cnn_architectures()
ret = vgg_nets + blocked_cnns + other_cnns
np.random.shuffle(ret)
return ret
def get_initial_mlp_pool(class_or_reg):
""" Returns the initial pool of MLPs. """
blocked_mlps = [nn_examples.get_blocked_mlp(class_or_reg, 3, 2), # 6
nn_examples.get_blocked_mlp(class_or_reg, 4, 2), # 8
nn_examples.get_blocked_mlp(class_or_reg, 5, 2), # 10
nn_examples.get_blocked_mlp(class_or_reg, 3, 4), # 12
nn_examples.get_blocked_mlp(class_or_reg, 6, 2), # 12
nn_examples.get_blocked_mlp(class_or_reg, 8, 2), # 16
nn_examples.get_blocked_mlp(class_or_reg, 6, 3), # 18
nn_examples.get_blocked_mlp(class_or_reg, 10, 2), #20
nn_examples.get_blocked_mlp(class_or_reg, 4, 6), #24
nn_examples.get_blocked_mlp(class_or_reg, 8, 3), #24
]
other_mlps = nn_examples.generate_mlp_architectures()
ret = blocked_mlps + other_mlps
np.random.shuffle(ret)
return ret
def gen_cpnnmfgp_test_data(num_tr_data, num_te_data):
""" Generates data on all functions. """
file_dir = os.path.dirname(os.path.realpath(__file__))
test_data_dir = os.path.dirname(file_dir)
test_problems = [
(test_data_dir + '/test_data/syn_cnn_2/config_mf.json', syn_cnn_2_mf),
]
ret = [gen_cpmfgp_test_data_from_config_file(cfn, rf, num_tr_data, num_te_data)
for cfn, rf in test_problems]
return ret
def get_cp_nn_gp_test_data():
""" Create test data. """
# pylint: disable=too-many-locals
file_dir = os.path.dirname(os.path.realpath(__file__))
test_data_dir = os.path.dirname(file_dir)
ret = []
n_train = 200
n_test = 300
# Dataset 1
all_cnns = get_cnns()
num_train = max(len(all_cnns), n_train)
num_test = max(len(all_cnns), n_test)
domain_file_name = test_data_dir + '/' + 'test_data/syn_cnn_2/config.json'
func = syn_cnn_2
x1_bounds = np.array([[0, 1], [0, 1], [10, 14]])
x4_elems = [4, 10, 23, 45, 78, 87.1, 91.8, 99, 75.7, 28.1, 3.141593]
# Create training set
X_tr_0 = np.random.choice(all_cnns, num_train)
X_tr_1 = map_to_bounds(np.random.random((num_train, len(x1_bounds))), x1_bounds)
X_tr_2 = [[elem1, elem2] for (elem1, elem2) in zip(
np.random.choice(['foo', 'bar'], num_train),
np.random.choice(['foo', 'bar'], num_train))]
X_tr_3 = np.random.choice(x4_elems, num_train)
X_train = [[x0, x1, x2, [x3]] for (x0, x1, x2, x3) in \
zip(X_tr_0, X_tr_1, X_tr_2, X_tr_3)]
# Create test set
X_te_0 = np.random.choice(all_cnns, num_test)
X_te_1 = map_to_bounds(np.random.random((num_test, len(x1_bounds))), x1_bounds)
X_te_2 = [[elem1, elem2] for (elem1, elem2) in zip(
np.random.choice(['foo', 'bar'], num_test),
np.random.choice(['foo', 'bar'], num_test))]
X_te_3 = np.random.choice(x4_elems, num_test)
X_test = [[x0, x1, x2, [x3]] for (x0, x1, x2, x3) in \
zip(X_te_0, X_te_1, X_te_2, X_te_3)]
ret.append(get_test_dataset(domain_file_name, func, X_train, X_test))
return ret
class CPNNGPTestCase(CPGPTestCaseDefinitions, BaseTestClass):
""" Unit tests for Cartesian product GPs with NN domains. """
def setUp(self):
""" Set up. """
self.cpgp_datasets = get_cp_nn_gp_test_data()
self.num_datasets = len(self.cpgp_datasets)
@unittest.skip
class CPMFGPTestCase(CPMFGPTestCaseDefinitions, BaseTestClass):
""" Unit tests for Multi-fidelity Cartesian Product GPs. """
def setUp(self):
""" Set up. """
num_tr_data = 50
num_te_data = 50
self.cpmfgp_datasets = gen_cpnnmfgp_test_data(num_tr_data, num_te_data)
self.num_datasets = len(self.cpmfgp_datasets)
if __name__ == '__main__':
execute_tests()
|
src/beanmachine/ppl/inference/tests/sampler_test.py | horizon-blue/beanmachine-1 | 177 | 11143521 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
def test_sampler():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 10
sampler = nuts.sampler(queries, observations, num_samples, num_adaptive_samples=0)
worlds = list(sampler)
assert len(worlds) == num_samples
for world in worlds:
assert model.foo() in world
with world:
assert isinstance(model.foo(), torch.Tensor)
def test_two_samplers():
model = SampleModel()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
nuts_sampler = bm.GlobalNoUTurnSampler().sampler(queries, observations)
hmc_sampler = bm.GlobalHamiltonianMonteCarlo(1.0).sampler(queries, observations)
world = next(nuts_sampler)
# it's possible to use multiple sampler interchangably to update the worlds (or
# in general, pass a new world to sampler and continue inference with existing
# hyperparameters)
for _ in range(3):
world = hmc_sampler.send(world)
world = nuts_sampler.send(world)
assert model.foo() in world
assert model.bar() in world
|
contrib/stack/topsStack/sentinelApp.py | yuankailiu/isce2 | 1,133 | 11143531 | #!/usr/bin/env python3
########################
#Author: <NAME>, <NAME>
# For Geocoding SLCs
#######################
import os, sys, glob
import argparse
import configparser
import datetime
import numpy as np
import isce
import isceobj
from isceobj.Sensor.TOPS.Sentinel1 import Sentinel1
from Stack import config, run, sentinelSLC
helpstr= '''
Processor for Sentinel-1 data using ISCE software.
For a full list of different options, try sentinelApp.py -h
sentinelApp.py generates all configuration and run files required to be executed for Sentinel-1 TOPS data.
Following are required to start processing:
1) a folder that includes Sentinel-1 SLCs,
2) a DEM (Digital Elevation Model)
3) a folder that includes precise orbits (use dloadOrbits.py to download or to update your orbit folder)
4) a folder for Sentinel-1 Aux files (which is used for correcting the Elevation Antenna Pattern).
5) bounding box as South North West East.
Note that sentinelApp.py does not process any data. It only prepares a lot of input files for processing and a lot of run files. Then you need to execute all those generated run files in order. To know what is really going on, after running sentinelApp.py, look at each run file generated by sentinelApp.py. Each run file actually has several commands that are independent from each other and can be executed in parallel. The config files for each run file include the processing options to execute a specific command/function.
'''
class customArgparseAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
'''
The action to be performed.
'''
print(helpstr)
parser.exit()
def createParser():
parser = argparse.ArgumentParser( description='Preparing the directory structure and config files for the processing of Sentinel data')
parser.add_argument('-H','--hh', nargs=0, action=customArgparseAction,
help='Display detailed help information.')
parser.add_argument('-s', '--slc_directory', dest='slc_dirname', type=str, required=True,
help='Directory with all Sentinel SLCs')
parser.add_argument('-o', '--orbit_directory', dest='orbit_dirname', type=str, required=True,
help='Directory with all orbits')
parser.add_argument('-a', '--aux_directory', dest='aux_dirname', type=str, required=True,
help='Directory with all orbits')
parser.add_argument('-w', '--working_directory', dest='work_dir', type=str, default='./',
help='Working directory ')
parser.add_argument('-d', '--dem', dest='dem', type=str, required=True,
help='Directory with secondary acquisition')
parser.add_argument('-n', '--swath_num', dest='swath_num', type=str, default='1 2 3',
help='A list of swaths to be processed')
parser.add_argument('-b', '--bbox', dest='bbox', type=str, default=None, help='Lat/Lon Bounding SNWE')
parser.add_argument('-t', '--text_cmd', dest='text_cmd', type=str, default='source ~/.bash_profile;'
, help='text command to be added to the beginning of each line of the run files. Example : source ~/.bash_profile;')
parser.add_argument('-p', '--polarization', dest='polarization', type=str, default='vv'
, help='SAR data polarization')
parser.add_argument('-u','--update', dest='update', type=int, default=0, help='re-run (0) or update (1)')
parser.add_argument('-z', '--azimuth_looks', dest='azimuthLooks', type=str, default='3'
, help='Number of looks in azimuth for interferogram multi-looking')
parser.add_argument('-r', '--range_looks', dest='rangeLooks', type=str, default='9'
, help='Number of looks in range for interferogram multi-looking')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
inps = parser.parse_args(args=iargs)
inps.slc_dirname = os.path.abspath(inps.slc_dirname)
inps.orbit_dirname = os.path.abspath(inps.orbit_dirname)
inps.aux_dirname = os.path.abspath(inps.aux_dirname)
inps.work_dir = os.path.abspath(inps.work_dir)
inps.dem = os.path.abspath(inps.dem)
return inps
####################################
def get_dates(inps):
# Given the SLC directory This function extracts the acquisition dates
# and prepares a dictionary of sentinel slc files such that keys are
# acquisition dates and values are object instances of sentinelSLC class
# which is defined in Stack.py
if inps.bbox is not None:
bbox = [float(val) for val in inps.bbox.split()]
if os.path.isfile(inps.slc_dirname):
print('reading SAFE files from: ' + inps.slc_dirname)
SAFE_files = []
for line in open(inps.slc_dirname):
SAFE_files.append(str.replace(line,'\n','').strip())
else:
SAFE_files = glob.glob(os.path.join(inps.slc_dirname,'S1*_IW_SLC*zip')) # changed to zip file by <NAME>
if len(SAFE_files) == 0:
raise Exception('No SAFE file found')
else:
print ("Number of SAFE files found: "+str(len(SAFE_files)))
################################
# write down the list of SAFE files in a txt file:
f = open('SAFE_files.txt','w')
for safe in SAFE_files:
f.write(safe + '\n')
f.close()
################################
# group the files based on dates
safe_dict={}
for safe in SAFE_files:
safeObj=sentinelSLC(safe)
safeObj.get_dates()
safeObj.get_orbit(inps.orbit_dirname, inps.work_dir)
if safeObj.date not in safe_dict.keys():
safe_dict[safeObj.date]=safeObj
else:
safe_dict[safeObj.date].safe_file = safe_dict[safeObj.date].safe_file + ' ' + safe
################################
dateList = [key for key in safe_dict.keys()]
dateList.sort()
print ("*****************************************")
print ("Number of dates : " +str(len(dateList)))
print ("List of dates : ")
print (dateList)
################################
#get the files covering the bounding box
S=[]
N=[]
W=[]
E=[]
safe_dict_bbox={}
print ('date south north west east')
for date in dateList:
#safe_dict[date].get_lat_lon()
safe_dict[date].get_lat_lon_v2()
#safe_dict[date].get_lat_lon_v3(inps)
S.append(safe_dict[date].SNWE[0])
N.append(safe_dict[date].SNWE[1])
W.append(safe_dict[date].SNWE[2])
E.append(safe_dict[date].SNWE[3])
print (date, safe_dict[date].SNWE[0],safe_dict[date].SNWE[1],safe_dict[date].SNWE[2],safe_dict[date].SNWE[3])
if inps.bbox is not None:
if safe_dict[date].SNWE[0] <= bbox[0] and safe_dict[date].SNWE[1] >= bbox[1] and safe_dict[date].SNWE[2] <= bbox[2] and safe_dict[date].SNWE[3] >=bbox[3]:
safe_dict_bbox[date] = safe_dict[date]
print ("*****************************************")
################################
print ('All dates')
print (dateList)
if inps.bbox is not None:
safe_dict = safe_dict_bbox
dateList = [key for key in safe_dict.keys()]
dateList.sort()
print ('dates covering the bbox')
print (dateList)
return dateList, safe_dict
def checkCurrentStatus(inps):
acquisitionDates, safe_dict = get_dates(inps)
slcDir = os.path.join(inps.work_dir, 'slc')
if os.path.exists(slcDir):
slcFiles = glob.glob(os.path.join(slcDir, '*'))
existed_dates = [os.path.basename(slc) for slc in slcFiles]
existed_dates.sort()
if inps.update and len(existed_dates)>0:
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('')
print('Old processed acquisitions are found: ')
print(existed_dates)
print('')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
acquisitionDates = list(set(acquisitionDates).difference(set(existed_dates)))
acquisitionDates.sort()
if len(acquisitionDates)>0:
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('')
print('New acquisitions are found and will be processed: ')
print(acquisitionDates)
print('')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
else:
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('')
print('No new acquisition: ')
print('')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
safe_dict_new={}
for d in acquisitionDates:
safe_dict_new[d] = safe_dict[d]
safe_dict = safe_dict_new
else:
print('No existing processed slc are identified. All the slcs will be processed.')
return acquisitionDates, safe_dict
def slcSimple(inps, acquisitionDates, safe_dict, mergeSLC=False):
#############################
i=0
i+=1
runObj = run()
runObj.configure(inps, 'run_' + str(i))
runObj.unpackSLC(acquisitionDates, safe_dict)
runObj.finalize()
if mergeSLC:
i+=1
runObj = run()
runObj.configure(inps, 'run_' + str(i))
runObj.mergeSLC(acquisitionDates, virtual = 'False')
runObj.finalize()
return i
def main(iargs=None):
inps = cmdLineParse(iargs)
if os.path.exists(os.path.join(inps.work_dir, 'run_files')):
print('')
print('**************************')
print('run_files folder exists.')
print(os.path.join(inps.work_dir, 'run_files'), ' already exists.')
print('Please remove or rename this folder and try again.')
print('')
print('**************************')
sys.exit(1)
acquisitionDates, safe_dict = checkCurrentStatus(inps)
slcSimple(inps, acquisitionDates, safe_dict, mergeSLC=True)
if __name__ == "__main__":
# Main engine
main()
|
sdk/python/tests/unit/test_feature_view.py | kevjumba/feast | 810 | 11143533 | # Copyright 2022 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from feast.feature_view import FeatureView
from feast.field import Field
from feast.infra.offline_stores.file_source import FileSource
from feast.types import Float32
def test_hash():
file_source = FileSource(name="my-file-source", path="test.parquet")
feature_view_1 = FeatureView(
name="my-feature-view",
entities=[],
schema=[
Field(name="feature1", dtype=Float32),
Field(name="feature2", dtype=Float32),
],
source=file_source,
)
feature_view_2 = FeatureView(
name="my-feature-view",
entities=[],
schema=[
Field(name="feature1", dtype=Float32),
Field(name="feature2", dtype=Float32),
],
source=file_source,
)
feature_view_3 = FeatureView(
name="my-feature-view",
entities=[],
schema=[Field(name="feature1", dtype=Float32)],
source=file_source,
)
feature_view_4 = FeatureView(
name="my-feature-view",
entities=[],
schema=[Field(name="feature1", dtype=Float32)],
source=file_source,
description="test",
)
s1 = {feature_view_1, feature_view_2}
assert len(s1) == 1
s2 = {feature_view_1, feature_view_3}
assert len(s2) == 2
s3 = {feature_view_3, feature_view_4}
assert len(s3) == 2
s4 = {feature_view_1, feature_view_2, feature_view_3, feature_view_4}
assert len(s4) == 3
# TODO(felixwang9817): Add tests for proto conversion.
# TODO(felixwang9817): Add tests for field mapping logic.
|
pandas/tests/series/methods/test_pct_change.py | CJL89/pandas | 1,738 | 11143540 | import numpy as np
import pytest
from pandas import Series, date_range
import pandas._testing as tm
class TestSeriesPctChange:
def test_pct_change(self, datetime_series):
rs = datetime_series.pct_change(fill_method=None)
tm.assert_series_equal(rs, datetime_series / datetime_series.shift(1) - 1)
rs = datetime_series.pct_change(2)
filled = datetime_series.fillna(method="pad")
tm.assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_series.pct_change(fill_method="bfill", limit=1)
filled = datetime_series.fillna(method="bfill", limit=1)
tm.assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_series.pct_change(freq="5D")
filled = datetime_series.fillna(method="pad")
tm.assert_series_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_with_duplicate_axis(self):
# GH#28664
common_idx = date_range("2019-11-14", periods=5, freq="D")
result = Series(range(5), common_idx).pct_change(freq="B")
# the reason that the expected should be like this is documented at PR 28681
expected = Series([np.NaN, np.inf, np.NaN, np.NaN, 3.0], common_idx)
tm.assert_series_equal(result, expected)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
chg = s.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
tm.assert_series_equal(chg, expected)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, freq, periods, fill_method, limit, datetime_series
):
# GH#7292
rs_freq = datetime_series.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_series.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_series_equal(rs_freq, rs_periods)
empty_ts = Series(index=datetime_series.index, dtype=object)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_series_equal(rs_freq, rs_periods)
@pytest.mark.parametrize("fill_method", ["pad", "ffill", None])
def test_pct_change_with_duplicated_indices(fill_method):
# GH30463
s = Series([np.nan, 1, 2, 3, 9, 18], index=["a", "b"] * 3)
result = s.pct_change(fill_method=fill_method)
expected = Series([np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], index=["a", "b"] * 3)
tm.assert_series_equal(result, expected)
|
tests/ui/test_nodegraphhooks.py | Hengle/Houdini-Toolbox | 136 | 11143550 | <reponame>Hengle/Houdini-Toolbox
"""Test the python3.7libs/nodegraphhooks.py module."""
# ==============================================================================
# IMPORTS
# ==============================================================================
# Standard Library
import importlib.util
spec = importlib.util.spec_from_file_location("nodegraphhooks", "houdini/python3.7libs/nodegraphhooks.py")
nodegraphhooks = importlib.util.module_from_spec(spec)
spec.loader.exec_module(nodegraphhooks)
# Houdini
from canvaseventtypes import KeyboardEvent # pylint: disable=wrong-import-position
# ==============================================================================
# TESTS
# ==============================================================================
def test_KEY_HIT_TYPES():
"""Test for expected key hit types in nodegraphhooks.KEY_HIT_TYPES."""
assert nodegraphhooks.KEY_HIT_TYPES == ("keyhit", "menukeyhit", "parentkeyhit")
class Test_createEventHandler:
"""Test nodegraphhooks.createEventHandler."""
def test_houdini_paste(self, mocker):
"""Test when doing a standard Houdini paste event."""
mock_is = mocker.patch("ht.ui.nodegraph.is_houdini_paste_event", return_value=True)
mock_handle = mocker.patch("ht.ui.nodegraph.handle_houdini_paste_event")
mock_event = mocker.MagicMock(spec=KeyboardEvent)
mock_event.eventtype = "keyhit"
mock_pending = mocker.MagicMock(spec=list)
result = nodegraphhooks.createEventHandler(mock_event, mock_pending)
assert result == mock_handle.return_value
mock_is.assert_called_with(mock_event)
mock_handle.assert_called_with(mock_event)
def test_copy_items(self, mocker):
"""Test when doing a h.tool:copy_items event."""
mock_is = mocker.patch("ht.ui.nodegraph.is_houdini_paste_event", return_value=False)
mock_set = mocker.patch("nodegraphdisplay.setKeyPrompt", return_value=True)
mock_copy = mocker.patch("ht.ui.paste.copy_items_from_graph")
mock_event = mocker.MagicMock(spec=KeyboardEvent)
mock_event.eventtype = "keyhit"
mock_pending = mocker.MagicMock(spec=list)
result = nodegraphhooks.createEventHandler(mock_event, mock_pending)
assert result == mock_copy.return_value
mock_is.assert_called_with(mock_event)
mock_set.assert_called_with(mock_event.editor, mock_event.key, "h.tool:copy_items", mock_event.eventtype)
mock_copy.assert_called_with(mock_event.editor)
def test_paste_items(self, mocker):
"""Test when doing a h.tool:paste_items event."""
mock_is = mocker.patch("ht.ui.nodegraph.is_houdini_paste_event", return_value=False)
mock_set = mocker.patch("nodegraphdisplay.setKeyPrompt", side_effect=(False, True))
mock_copy = mocker.patch("ht.ui.paste.copy_items_from_graph")
mock_paste = mocker.patch("ht.ui.paste.paste_items_to_graph")
mock_event = mocker.MagicMock(spec=KeyboardEvent)
mock_event.eventtype = "keyhit"
mock_pending = mocker.MagicMock(spec=list)
result = nodegraphhooks.createEventHandler(mock_event, mock_pending)
assert result == mock_paste.return_value
mock_is.assert_called_with(mock_event)
mock_set.assert_has_calls(
[
mocker.call(mock_event.editor, mock_event.key, "h.tool:copy_items", mock_event.eventtype),
mocker.call(mock_event.editor, mock_event.key, "h.tool:paste_items", mock_event.eventtype),
]
)
mock_copy.assert_not_called()
mock_paste.assert_called_with(mock_event.eventtype, mock_event.editor, mock_event)
def test_other_key_hit(self, mocker):
"""Test when the event is something we don't care about."""
mock_is = mocker.patch("ht.ui.nodegraph.is_houdini_paste_event", return_value=False)
mock_set = mocker.patch("nodegraphdisplay.setKeyPrompt", return_value=False)
mock_event = mocker.MagicMock(spec=KeyboardEvent)
mock_event.eventtype = "keyhit"
mock_pending = mocker.MagicMock(spec=list)
result = nodegraphhooks.createEventHandler(mock_event, mock_pending)
assert result == (None, False)
mock_is.assert_called_with(mock_event)
mock_set.assert_has_calls(
[
mocker.call(mock_event.editor, mock_event.key, "h.tool:copy_items", mock_event.eventtype),
mocker.call(mock_event.editor, mock_event.key, "h.tool:paste_items", mock_event.eventtype),
]
)
def test_non_keyhit(self, mocker):
"""Test when the eventtype is not a valid type."""
mock_is = mocker.patch("ht.ui.nodegraph.is_houdini_paste_event", return_value=False)
mock_event = mocker.MagicMock(spec=KeyboardEvent)
mock_pending = mocker.MagicMock(spec=list)
result = nodegraphhooks.createEventHandler(mock_event, mock_pending)
assert result == (None, False)
mock_is.assert_not_called()
def test_non_keyboard(self, mocker):
"""Test when the event is not a KeyboardEvent."""
mock_is = mocker.patch("ht.ui.nodegraph.is_houdini_paste_event", return_value=False)
mock_event = mocker.MagicMock()
mock_pending = mocker.MagicMock(spec=list)
result = nodegraphhooks.createEventHandler(mock_event, mock_pending)
assert result == (None, False)
mock_is.assert_not_called()
|
core/polyaxon/polyflow/run/patch.py | admariner/polyaxon | 3,200 | 11143584 | <filename>core/polyaxon/polyflow/run/patch.py
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from marshmallow import ValidationError
from polyaxon.polyflow.run.cleaner import V1CleanerJob
from polyaxon.polyflow.run.dag import V1Dag
from polyaxon.polyflow.run.dask import V1Dask
from polyaxon.polyflow.run.job import V1Job
from polyaxon.polyflow.run.kinds import V1RunKind
from polyaxon.polyflow.run.kubeflow.mpi_job import V1MPIJob
from polyaxon.polyflow.run.kubeflow.pytorch_job import V1PytorchJob
from polyaxon.polyflow.run.kubeflow.replica import V1KFReplica
from polyaxon.polyflow.run.kubeflow.tf_job import V1TFJob
from polyaxon.polyflow.run.notifier import V1NotifierJob
from polyaxon.polyflow.run.service import V1Service
from polyaxon.polyflow.run.spark.replica import V1SparkReplica
from polyaxon.polyflow.run.spark.spark import V1Spark
from polyaxon.polyflow.run.tuner import V1TunerJob
def validate_run_patch(run_patch: Dict, kind: V1RunKind.allowable_values):
if kind == V1RunKind.JOB:
patch = V1Job.from_dict(run_patch)
elif kind == V1RunKind.SERVICE:
patch = V1Service.from_dict(run_patch)
elif kind == V1RunKind.DAG:
patch = V1Dag.from_dict(run_patch)
elif kind == V1RunKind.MPIJOB:
try:
patch = V1MPIJob.from_dict(run_patch)
except ValidationError:
patch = V1KFReplica.from_dict(run_patch)
elif kind == V1RunKind.PYTORCHJOB:
try:
patch = V1PytorchJob.from_dict(run_patch)
except ValidationError:
patch = V1KFReplica.from_dict(run_patch)
elif kind == V1RunKind.TFJOB:
try:
patch = V1TFJob.from_dict(run_patch)
except ValidationError:
patch = V1KFReplica.from_dict(run_patch)
elif kind == V1RunKind.SPARK:
try:
patch = V1Spark.from_dict(run_patch)
except ValidationError:
patch = V1SparkReplica.from_dict(run_patch)
elif kind == V1RunKind.DASK:
patch = V1Dask.from_dict(run_patch)
elif kind == V1RunKind.NOTIFIER:
patch = V1NotifierJob.from_dict(run_patch)
elif kind == V1RunKind.TUNER:
patch = V1TunerJob.from_dict(run_patch)
elif kind == V1RunKind.CLEANER:
patch = V1CleanerJob.from_dict(run_patch)
else:
raise ValidationError("runPatch cannot be validate without a supported kind.")
return patch
|
tests/test_search.py | nateprewitt/jmespath.py | 1,549 | 11143603 | import sys
import decimal
from tests import unittest, OrderedDict
import jmespath
import jmespath.functions
class TestSearchOptions(unittest.TestCase):
def test_can_provide_dict_cls(self):
result = jmespath.search(
'{a: a, b: b, c: c}.*',
{'c': 'c', 'b': 'b', 'a': 'a', 'd': 'd'},
options=jmespath.Options(dict_cls=OrderedDict))
self.assertEqual(result, ['a', 'b', 'c'])
def test_can_provide_custom_functions(self):
class CustomFunctions(jmespath.functions.Functions):
@jmespath.functions.signature(
{'types': ['number']},
{'types': ['number']})
def _func_custom_add(self, x, y):
return x + y
@jmespath.functions.signature(
{'types': ['number']},
{'types': ['number']})
def _func_my_subtract(self, x, y):
return x - y
options = jmespath.Options(custom_functions=CustomFunctions())
self.assertEqual(
jmespath.search('custom_add(`1`, `2`)', {}, options=options),
3
)
self.assertEqual(
jmespath.search('my_subtract(`10`, `3`)', {}, options=options),
7
)
# Should still be able to use the original functions without
# any interference from the CustomFunctions class.
self.assertEqual(
jmespath.search('length(`[1, 2]`)', {}), 2
)
class TestPythonSpecificCases(unittest.TestCase):
def test_can_compare_strings(self):
# This is python specific behavior that's not in the official spec
# yet, but this was regression from 0.9.0 so it's been added back.
self.assertTrue(jmespath.search('a < b', {'a': '2016', 'b': '2017'}))
@unittest.skipIf(not hasattr(sys, 'maxint'), 'Test requires long() type')
def test_can_handle_long_ints(self):
result = sys.maxint + 1
self.assertEqual(jmespath.search('[?a >= `1`].a', [{'a': result}]),
[result])
def test_can_handle_decimals_as_numeric_type(self):
result = decimal.Decimal('3')
self.assertEqual(jmespath.search('[?a >= `1`].a', [{'a': result}]),
[result])
|
benchmarks/operator_benchmark/pt/qtensor_method_test.py | jsun94/nimble | 206 | 11143606 | import operator_benchmark as op_bench
import torch
# Configs for pointwise and reduction unary ops
qmethods_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[32, 32],
],
cross_product_configs={
'dtype': [torch.quint8],
'contig': [False, True],
},
tags=['short']
)
qmethods_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
dtype=[torch.qint8, torch.qint32],
contig=[False, True],
tags=['long']
)
qmethods_tensor_input_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['q_copy', 'copy_'],
],
)
class _QMethodBenchmarkBase(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, contig, op_func):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
if not contig:
permute_dims = list(range(self.q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.op_func = op_func
class QMethodTensorInputBenchmark(_QMethodBenchmarkBase):
def forward(self):
getattr(self.q_input, self.op_func)(self.q_input)
class QMethodNoInputBenchmark(_QMethodBenchmarkBase):
def forward(self):
getattr(self.q_input, self.op_func)()
op_bench.generate_pt_tests_from_op_list(
qmethods_tensor_input_list,
qmethods_configs_short + qmethods_configs_long,
QMethodTensorInputBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
convokit/prompt_types/promptTypes.py | Ap1075/Cornell-Conversational-Analysis-Toolkit | 371 | 11143635 | import numpy as np
import pandas as pd
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import normalize
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans
import joblib
from convokit.transformer import Transformer
class PromptTypes(Transformer):
"""
Model that infers a vector representation of utterances in terms of the responses that similar utterances tend to
prompt, as well as types of rhetorical intentions encapsulated by utterances in a corpus, in terms of their
anticipated responses (operationalized as k-means clusters of vectors).
Under the surface, the model takes as input pairs of prompts and responses during the fit step. In this stage the
following subcomponents are involved:
1. a prompt embedding model that will learn the vector representations;
2. a prompt type model that learns a clustering of these representations.
The model can transform individual (unpaired) utterances in the transform step. While the focus is on representing
properties of prompts, as a side-effect the model can also compute representations that encapsulate properties of
responses and assign responses to prompt types (as "typical responses" to the prompts in that type).
Internally, the model contains the following elements:
* prompt_embedding_model: stores models that compute the vector representations. includes tf-idf models that convert the prompt and response input to term document matrices, an SVD model that produces a low-dimensional representation of responses and prompts, and vector representations of prompt and response terms
* type_models: stores kmeans models along with type assignments of prompt and response terms
* train_results: stores the vector representations of the corpus used to train the model in the fit step
* train_types: stores the type assignments of the corpus used in the fit step
The transformer will output several attributes of an utterance (names prefixed with <output_field>__). If the utterance is a prompt (in the default case, if it has a response), then the following will be outputted.
* prompt_repr: a vector representation of the utterance (stored as a corpus-wide matrix, or in the metadata of an individual utterance if `transform_utterance` is called)
* prompt_dists.<number of types>: a vector storing the distance between the utterance vector and the centroid of each k-means cluster (stored as a corpus-wide matrix, or in the metadata of an individual utterance if `transform_utterance` is called)
* prompt_type.<number of types>: the index of the type the utterance is assigned to
* prompt_type_dist.<number of types>: the distance from the vector representation to the centroid of the assigned type
If the utterance is a response to a previous utterance, then the utterance will also be annotated an analogous set of attributes denoting its response representation and type.
For downstream tasks, a reasonable first step is to only look at the prompt-side representations.
For an end-to-end implementation that runs several default values of the parameters, see the `PromptTypeWrapper` module.
:param prompt_field: the name of the attribute of prompts to use as input to fit.
:param reference_field: the name of the attribute of responses to use as input to fit. a reasonable choice is to set to
the same value as prompt_field.
:param output_field: the name of the attribute to write to in the transform step. the transformer outputs several
fields, as listed above.
:param n_types: the number of types to infer. defaults to 8.
:param prompt_transform_field: the name of the attribute of prompts to use as input to transform; defaults to the
same attribute as in fit.
:param reference_transform_field: the name of the attribute of responses to use as input to transform; defaults to the
same attribute as in fit.
:param prompt__tfidf_min_df: the minimum frequency of prompt terms to use. can be specified as a fraction or as an
absolute count, defaults to 100.
:param prompt__tfidf_max_df: the maximum frequency of prompt terms to use. can be specified as a fraction or as an
absolute count, defaults to 0.1. Setting higher is more permissive, but may result in many stopword-like terms
adding noise to the model.
:param reference__tfidf_min_df: the minimum frequency of response terms to use. can be specified as a fraction or as an
absolute count, defaults to 100.
:param reference__tfidf_max_df: the maximum frequency of response terms to use. can be specified as a fraction or as an
absolute count, defaults to 0.1.
:param snip_first_dim: whether or not to remove the first SVD dimension (which may add noise to the model; typically
this reflects frequency rather than any semantic interpretation). defaults to `True`.
:param svd__n_components: the number of SVD dimensions to use, defaults to 25. higher values result in richer
vector representations, perhaps at the cost of the model learning overly-specific types.
:param max_dist: the maximum distance between a vector representation of an utterance and the cluster centroid; a
cluster whose distance to all centroids is above this cutoff will get assigned to a null type, denoted by -1.
Defaults to 0.9.
:param random_state: the random seed to use.
:param verbosity: frequency of status messages.
"""
def __init__(self, prompt_field, reference_field, output_field, n_types=8,
prompt_transform_field=None, reference_transform_field=None,
prompt__tfidf_min_df=100, prompt__tfidf_max_df=.1,
reference__tfidf_min_df=100, reference__tfidf_max_df=.1,
snip_first_dim=True,
svd__n_components=25, max_dist=.9,
random_state=None, verbosity=0):
self.prompt_embedding_model = {}
self.type_models = {}
self.train_results = {}
self.train_types = {}
self.prompt_field = prompt_field
self.reference_field = reference_field
self.prompt_transform_field = prompt_transform_field if prompt_transform_field is not None else self.prompt_field
self.reference_transform_field = reference_transform_field if reference_transform_field is not None else self.reference_field
self.output_field = output_field
self.prompt__tfidf_min_df = prompt__tfidf_min_df
self.prompt__tfidf_max_df = prompt__tfidf_max_df
self.reference__tfidf_min_df = reference__tfidf_min_df
self.reference__tfidf_max_df = reference__tfidf_max_df
self.snip_first_dim = snip_first_dim
self.svd__n_components = svd__n_components
self.default_n_types = n_types
self.random_state = random_state
self.max_dist = max_dist
self.verbosity = verbosity
def fit(self, corpus, y=None, prompt_selector=lambda utt: True, reference_selector=lambda utt: True):
"""
Fits a PromptTypes model for a corpus -- that is, learns latent representations of prompt and response terms, as well as prompt types.
:param corpus: Corpus
:param prompt_selector: a boolean function of signature `filter(utterance)` that determines which
utterances will be considered as prompts in the fit step. defaults to using all utterances which have a response.
:param reference_selector: a boolean function of signature `filter(utterance)` that determines which utterances
will be considered as responses in the fit step. defaults to using all utterances which are responses to a
prompt.
:return: None
"""
self.prompt_selector = prompt_selector
self.reference_selector = reference_selector
_, prompt_input, _, reference_input = self._get_pair_input(corpus, self.prompt_field, self.reference_field,
self.prompt_selector, self.reference_selector)
self.prompt_embedding_model = fit_prompt_embedding_model(prompt_input, reference_input,
self.snip_first_dim, self.prompt__tfidf_min_df, self.prompt__tfidf_max_df,
self.reference__tfidf_min_df, self.reference__tfidf_max_df,
self.svd__n_components, self.random_state, self.verbosity)
self.train_results['prompt_ids'], self.train_results['prompt_vects'],\
self.train_results['reference_ids'], self.train_results['reference_vects'] = self._get_embeddings(corpus, prompt_selector, reference_selector)
self.refit_types(self.default_n_types, self.random_state)
def transform(self, corpus, use_fit_selectors=True, prompt_selector=lambda utt: True, reference_selector=lambda utt: True):
"""
Computes vector representations and prompt type assignments for utterances in a corpus.
:param corpus: Corpus
:param use_fit_selectors: defaults to True, will use the same filters as the fit step to determine which utterances will be considered as prompts and responses in the transform step.
:param prompt_selector: filter that determines which utterances will be considered as prompts in the
transform step. defaults to prompt_selector, the same as is used in fit.
:param reference_selector: filter that determines which utterances will be considered as responses in the
transform step. defaults to reference_selector, the same as is used in fit.
:return: the corpus, with per-utterance representations and type assignments.
"""
if use_fit_selectors:
prompt_selector = self.prompt_selector
reference_selector = self.reference_selector
prompt_ids, prompt_vects, reference_ids, reference_vects = self._get_embeddings(corpus, prompt_selector, reference_selector)
corpus.set_vector_matrix(self.output_field + '__prompt_repr', matrix=prompt_vects, ids=prompt_ids)
corpus.set_vector_matrix(self.output_field + '__reference_repr', matrix=reference_vects, ids=reference_ids)
prompt_df, reference_df = self._get_type_assignments(prompt_ids, prompt_vects, reference_ids, reference_vects)
prompt_dists, prompt_assigns = prompt_df[prompt_df.columns[:-1]].values, prompt_df['type_id'].values
prompt_min_dists = prompt_dists.min(axis=1)
reference_dists, reference_assigns = reference_df[reference_df.columns[:-1]].values, reference_df['type_id'].values
reference_min_dists = reference_dists.min(axis=1)
corpus.set_vector_matrix(self.output_field + '__prompt_dists.%s' % self.default_n_types, ids=prompt_df.index, matrix=prompt_dists,
columns=['type_%d_dist' % x for x in range(prompt_dists.shape[1])])
corpus.set_vector_matrix(self.output_field + '__reference_dists.%s' % self.default_n_types,
ids=reference_df.index, matrix=reference_dists,
columns=['type_%d_dist' % x for x in range(prompt_dists.shape[1])])
for id, assign, dist in zip(prompt_df.index, prompt_assigns, prompt_min_dists):
corpus.get_utterance(id).add_meta(self.output_field + '__prompt_type.%s' % self.default_n_types, assign)
corpus.get_utterance(id).add_meta(self.output_field + '__prompt_type_dist.%s' % self.default_n_types, float(dist))
for id, assign, dist in zip(reference_df.index, reference_assigns, reference_min_dists):
corpus.get_utterance(id).add_meta(self.output_field + '__reference_type.%s' % self.default_n_types, assign)
corpus.get_utterance(id).add_meta(self.output_field + '__reference_type_dist.%s' % self.default_n_types, float(dist))
return corpus
def transform_utterance(self, utterance):
"""
Computes vector representations and prompt type assignments for a single utterance.
:param utterance: the utterance.
:return: the utterance, annotated with representations and type assignments.
"""
# if self.prompt_transform_filter(utterance):
utterance = self._transform_utterance_side(utterance, 'prompt')
# if self.reference_transform_filter(utterance):
utterance = self._transform_utterance_side(utterance, 'reference')
return utterance
def _transform_utterance_side(self, utterance, side):
if side == 'prompt':
input_field = self.prompt_transform_field
elif side == 'reference':
input_field = self.reference_transform_field
utt_id = utterance.id
utt_input = utterance.retrieve_meta(input_field)
if isinstance(utt_input, list):
utt_input = '\n'.join(utt_input)
utt_ids, utt_vects = transform_embeddings(self.prompt_embedding_model, [utt_id], [utt_input], side=side)
assign_df = assign_prompt_types(self.type_models[self.default_n_types], utt_ids, utt_vects, self.max_dist)
vals = assign_df.values[0]
dists = vals[:-1]
min_dist = min(dists)
assign = vals[-1]
utterance.add_meta(self.output_field + '__%s_type.%s' % (side, self.default_n_types), assign)
utterance.add_meta(self.output_field + '__%s_type_dist.%s' % (side, self.default_n_types), float(min_dist))
utterance.add_meta(self.output_field + '__%s_dists.%s' % (side, self.default_n_types), [float(x) for x in dists])
utterance.add_meta(self.output_field + '__%s_repr' % side, [float(x) for x in utt_vects[0]])
return utterance
def refit_types(self, n_types, random_state=None, name=None):
"""
Using the latent representations of prompt terms learned during the initial `fit` call, infers `n_types` prompt types. permits retraining the clustering model that determines the number of types, on top of the initial model. calling this *and* updating the `default_n_types` field of the model will result in future `transform` calls assigning utterances to one of `n_types` prompt types.
:param n_types: number of types to learn
:param random_state: random seed
:param name: the name of the new type model. defaults to n_types.
:return: None
"""
if name is None:
key = n_types
else:
key = name
if random_state is None:
random_state = self.random_state
self.type_models[key] = fit_prompt_type_model(self.prompt_embedding_model, n_types, random_state, self.max_dist, self.verbosity)
prompt_df, reference_df = self._get_type_assignments(type_key=key)
self.train_types[key] = {'prompt_df': prompt_df, 'reference_df': reference_df}
def _get_embeddings(self, corpus, prompt_selector, reference_selector):
prompt_ids, prompt_inputs = self._get_input(corpus, self.prompt_transform_field,
prompt_selector)
reference_ids, reference_inputs = self._get_input(corpus, self.reference_transform_field, reference_selector)
prompt_ids, prompt_vects = transform_embeddings(self.prompt_embedding_model,
prompt_ids, prompt_inputs,
side='prompt')
reference_ids, reference_vects = transform_embeddings(self.prompt_embedding_model,
reference_ids, reference_inputs,
side='reference')
return prompt_ids, prompt_vects, reference_ids, reference_vects
def _get_type_assignments(self, prompt_ids=None, prompt_vects=None,
reference_ids=None, reference_vects=None, type_key=None):
if prompt_ids is None:
prompt_ids, prompt_vects, reference_ids, reference_vects = [self.train_results[k] for k in
['prompt_ids', 'prompt_vects', 'reference_ids', 'reference_vects']]
if type_key is None:
type_key = self.default_n_types
prompt_df = assign_prompt_types(self.type_models[type_key], prompt_ids, prompt_vects, self.max_dist)
reference_df = assign_prompt_types(self.type_models[type_key], reference_ids, reference_vects, self.max_dist)
return prompt_df, reference_df
def display_type(self, type_id, corpus=None, type_key=None, k=10):
"""
For a particular prompt type, displays the representative prompt and response terms. can also display representative prompt and response utterances.
:param type_id: ID of the prompt type to display.
:param corpus: pass in the training corpus to also display representative utterances.
:param type_key: the name of the prompt type clustering model to use. defaults to `n_types` that the model was initialized with, but if `refit_types` is called with different number of types, can be modified to display this updated model as well.
:param k: the number of sample terms (or utteranceS) to display.
:return: None
"""
if type_key is None:
type_key = self.default_n_types
prompt_df = self.type_models[type_key]['prompt_df']
reference_df = self.type_models[type_key]['reference_df']
top_prompt = prompt_df[prompt_df.type_id == type_id].sort_values(type_id).head(k)
top_ref = reference_df[reference_df.type_id == type_id].sort_values(type_id).head(k)
print('top prompt:')
print(top_prompt)
print('top response:')
print(top_ref)
if corpus is not None:
prompt_df = self.train_types[type_key]['prompt_df']
reference_df = self.train_types[type_key]['reference_df']
top_prompt = prompt_df[prompt_df.type_id == type_id].sort_values(type_id).head(k).index
top_ref = reference_df[reference_df.type_id == type_id].sort_values(type_id).head(k).index
print('top prompts:')
for utt in top_prompt:
print(utt, corpus.get_utterance(utt).text)
print(corpus.get_utterance(utt).retrieve_meta(self.prompt_transform_field))
print()
print('top responses:')
for utt in top_ref:
print(utt, corpus.get_utterance(utt).text)
print(corpus.get_utterance(utt).retrieve_meta(self.reference_transform_field))
print()
def summarize(self, corpus, type_ids=None, type_key=None, k=10):
'''
Displays representative prompt and response terms and utterances for each type learned. A wrapper for `display_type`.
:param corpus: corpus to display utterances for (must have `transform()` called on it)
:param type_ids: ID of the prompt type to display. if None, will display all types.
:param type_key: the name of the prompt type clustering model to use. defaults to `n_types` that the model was initialized with, but if `refit_types` is called with different number of types, can be modified to display this updated model as well.
:param k: the number of sample terms (or utteranceS) to display.
:return: None
'''
if type_key is None:
type_key = self.default_n_types
n_types = self.type_models[type_key]['km_model'].n_clusters
if type_ids is None:
type_ids = list(range(n_types))
if not isinstance(type_ids, list):
type_ids = [type_ids]
for type_id in type_ids:
print('TYPE', type_id)
self.display_type(type_id, corpus, type_key, k)
print('====')
def dump_model(self, model_dir, type_keys='default', dump_train_corpus=True):
"""
Dumps the model to disk.
:param model_dir: directory to write model to
:param type_keys: if 'default', will only write the type clustering model corresponding to the `n_types` the model was initialized with. if 'all', will write all clustering models that have been trained via calls to `refit_types`. can also take a list of clustering models.
:param dump_train_corpus: whether to also write the representations and type assignments of the training corpus. defaults to True.
:return: None
"""
if self.verbosity > 0:
print('dumping embedding model')
if not os.path.exists(model_dir):
try:
os.mkdir(model_dir)
except:
pass
for k in ['prompt_tfidf_model', 'reference_tfidf_model', 'svd_model']:
joblib.dump(self.prompt_embedding_model[k],
os.path.join(model_dir, k + '.joblib'))
for k in ['U_prompt', 'U_reference']:
np.save(os.path.join(model_dir, k), self.prompt_embedding_model[k])
if dump_train_corpus:
if self.verbosity > 0:
print('dumping training embeddings')
for k in ['prompt_ids', 'prompt_vects', 'reference_ids', 'reference_vects']:
np.save(os.path.join(model_dir, 'train_' + k), self.train_results[k])
if type_keys == 'default':
to_dump = [self.default_n_types]
elif type_keys == 'all':
to_dump = self.type_models.keys()
else:
to_dump = type_keys
for key in to_dump:
if self.verbosity > 0:
print('dumping type model', key)
type_model = self.type_models[key]
joblib.dump(type_model['km_model'], os.path.join(model_dir, 'km_model.%s.joblib' % key))
for k in ['prompt_df', 'reference_df']:
type_model[k].to_csv(os.path.join(model_dir, '%s.%s.tsv' % (k, key)), sep='\t')
if dump_train_corpus:
train_types = self.train_types[key]
for k in ['prompt_df', 'reference_df']:
train_types[k].to_csv(os.path.join(model_dir, 'train_%s.%s.tsv' % (k, key)), sep='\t')
def get_model(self, type_keys='default'):
"""
Returns the model as a dictionary containing:
* embedding_model: stores information pertaining to the vector representations.
* prompt_tfidf_model: sklearn tf-idf model that converts prompt input to term-document matrix
* reference_tfidf_model: tf-idf model that converts response input to term-document matrix
* svd_model: sklearn TruncatedSVD model that produces a low-dimensional representation of responses and prompts
* U_prompt: vector representations of prompt terms
* U_reference: vector representations of response terms
* type_models: a dictionary mapping each type clustering model to:
* km_model: a sklearn KMeans model of the learned types
* prompt_df: distances to cluster centroids, and type assignments, of prompt terms
* reference_df: distances to cluster centroids, and type assignments, of reference terms
:param type_keys: if 'default', will return the type clustering model corresponding to the `n_types` the model was initialized with. if 'all', returns all clustering models that have been trained via calls to `refit_types`. can also take a list of clustering models.
:return: the prompt types model
"""
if type_keys == 'default':
to_get = [self.default_n_types]
elif type_keys == 'all':
to_get = self.type_models.keys()
else:
to_get = type_keys
to_return = {'embedding_model': self.prompt_embedding_model,
'type_models': {k: self.type_models[k] for k in to_get}}
return to_return
def load_model(self, model_dir, type_keys='default', load_train_corpus=True):
"""
Loads the model from disk.
:param model_dir: directory to read model to
:param type_keys: if 'default', will only read the type clustering model corresponding to the `n_types` the model was initialized with. if 'all', will read all clustering models that are available in directory. can also take a list of clustering models.
:param load_train_corpus: whether to also read the representations and type assignments of the training corpus. defaults to True.
:return: None
"""
if self.verbosity > 0:
print('loading embedding model')
for k in ['prompt_tfidf_model', 'reference_tfidf_model', 'svd_model']:
self.prompt_embedding_model[k] = joblib.load(os.path.join(model_dir, k + '.joblib'))
for k in ['U_prompt', 'U_reference']:
self.prompt_embedding_model[k] = np.load(os.path.join(model_dir, k + '.npy'))
if load_train_corpus:
if self.verbosity > 0:
print('loading training embeddings')
for k in ['prompt_ids', 'prompt_vects', 'reference_ids', 'reference_vects']:
self.train_results[k] = np.load(os.path.join(model_dir, 'train_' + k + '.npy'))
if type_keys == 'default':
to_load = [self.default_n_types]
elif type_keys == 'all':
to_load = [x.replace('km_model.','').replace('.joblib','')
for x in os.listdir(model_dir) if x.startswith('km_model')]
else:
to_load = type_keys
for key in to_load:
try:
key = int(key)
except: pass
if self.verbosity > 0:
print('loading type model', key)
self.type_models[key] = {} # this should be an int-ish
self.type_models[key]['km_model'] = joblib.load(
os.path.join(model_dir, 'km_model.%s.joblib' % key))
for k in ['prompt_df', 'reference_df']:
self.type_models[key][k] =\
pd.read_csv(os.path.join(model_dir, '%s.%s.tsv' % (k, key)), sep='\t', index_col=0)
self.type_models[key][k].columns = [int(x) for x in self.type_models[key][k].columns[:-1]]\
+ ['type_id']
if load_train_corpus:
self.train_types[key] = {}
for k in ['prompt_df', 'reference_df']:
self.train_types[key][k] = pd.read_csv(
os.path.join(model_dir, 'train_%s.%s.tsv' % (k, key)), sep='\t', index_col=0
)
self.train_types[key][k].columns = \
[int(x) for x in self.train_types[key][k].columns[:-1]] + ['type_id']
def _get_input(self, corpus, field, filter_fn, check_nonempty=True):
ids = []
inputs = []
for utterance in corpus.iter_utterances():
input = utterance.retrieve_meta(field)
if isinstance(input, list):
input = '\n'.join(input)
if filter_fn(utterance)\
and ((not check_nonempty) or (len(input) > 0)):
ids.append(utterance.id)
inputs.append(input)
return ids, inputs
def _get_pair_input(self, corpus, prompt_field, reference_field,
prompt_selector, reference_selector,
check_nonempty=True):
prompt_ids = []
prompt_utts = []
reference_ids = []
reference_utts = []
for reference_utt in corpus.iter_utterances():
if reference_utt.reply_to is None:
continue
prompt_utt_id = reference_utt.reply_to
try:
prompt_utt = corpus.get_utterance(prompt_utt_id)
except:
continue
if prompt_selector(prompt_utt) \
and reference_selector(reference_utt):
prompt_input = prompt_utt.retrieve_meta(prompt_field)
reference_input = reference_utt.retrieve_meta(reference_field)
if (prompt_input is None) or (reference_input is None):
continue
if isinstance(prompt_input, list):
prompt_input = '\n'.join(prompt_input)
if isinstance(reference_input, list):
reference_input = '\n'.join(reference_input)
if (not check_nonempty) or ((len(prompt_input) > 0) and (len(reference_input) > 0)):
prompt_ids.append(prompt_utt.id)
prompt_utts.append(prompt_input)
reference_ids.append(reference_utt.id)
reference_utts.append(reference_input)
return prompt_ids, prompt_utts, reference_ids, reference_utts
def fit_prompt_embedding_model(prompt_input, reference_input, snip_first_dim=True,
prompt__tfidf_min_df=100, prompt__tfidf_max_df=.1,
reference__tfidf_min_df=100, reference__tfidf_max_df=.1,
svd__n_components=25, random_state=None, verbosity=0):
"""
Standalone function that fits an embedding model given paired prompt and response inputs. See docstring of the `PromptTypes` class for details.
:param prompt_input: list of prompts (represented as space-separated strings of terms)
:param reference_input: list of responses (represented as space-separated strings of terms). note that each entry of reference_input should be a response to the corresponding entry in prompt_input.
:return: prompt embedding model
"""
if verbosity > 0:
print('fitting %d input pairs' % len(prompt_input))
print('fitting reference tfidf model')
reference_tfidf_model = TfidfVectorizer(
min_df=reference__tfidf_min_df,
max_df=reference__tfidf_max_df,
binary=True,
token_pattern=r'(?u)(\S+)'
)
reference_vect = reference_tfidf_model.fit_transform(reference_input)
if verbosity > 0:
print('fitting prompt tfidf model')
prompt_tfidf_model = TfidfVectorizer(
min_df=prompt__tfidf_min_df,
max_df=prompt__tfidf_max_df,
binary=True,
token_pattern=r'(?u)(\S+)'
)
prompt_vect = prompt_tfidf_model.fit_transform(prompt_input)
if verbosity > 0:
print('fitting svd model')
svd_model = TruncatedSVD(n_components=svd__n_components, random_state=random_state, algorithm='arpack')
U_reference = svd_model.fit_transform(normalize(reference_vect.T))
s = svd_model.singular_values_
U_reference /= s
U_prompt = (svd_model.components_ * normalize(prompt_vect, axis=0) / s[:, np.newaxis]).T
if snip_first_dim:
U_prompt = U_prompt[:, 1:]
U_reference = U_reference[:, 1:]
U_prompt_norm = normalize(U_prompt)
U_reference_norm = normalize(U_reference)
return {'prompt_tfidf_model': prompt_tfidf_model, 'reference_tfidf_model': reference_tfidf_model,
'svd_model': svd_model, 'U_prompt': U_prompt_norm, 'U_reference': U_reference_norm}
def transform_embeddings(model, ids, input, side='prompt', filter_empty=True):
"""
Standalone function that returns vector representations of input text given a trained PromptTypes prompt_embedding_model. See docstring of `PromptTypes` class for details.
:param model: prompt embedding model
:param ids: ids of input text
:param input: a list where each entry has corresponding id in the ids argument, and is a string of terms corresponding to an utterance.
:param side: whether to return prompt or response embeddings ("prompt" and "reference" respectively); defaults to "prompt"
:param filter_empty: if `True`, will not return embeddings for prompts with no terms.
:return: input IDs `ids`, and corresponding vector representations of input `vect`
"""
tfidf_vects = normalize(model['%s_tfidf_model' % side].transform(input), norm='l1')
mask = np.array(tfidf_vects.sum(axis=1)).flatten() > 0
vects = normalize(tfidf_vects * model['U_%s' % side])
if filter_empty:
ids = np.array(ids)[mask]
vects = vects[mask]
return ids, vects
def fit_prompt_type_model(model, n_types, random_state=None, max_dist=0.9, verbosity=0):
"""
Standalone function that fits a prompt type model given paired prompt and response inputs. See docstring of the `PromptTypes` class for details.
:param model: prompt embedding model (from `fit_prompt_embedding_model()`)
:param n_types: number of prompt types to infer
:return: prompt type model
"""
if verbosity > 0:
print('fitting %d prompt types' % n_types)
km = KMeans(n_clusters=n_types, random_state=random_state)
km.fit(model['U_prompt'])
prompt_dists = km.transform(model['U_prompt'])
prompt_clusters = km.predict(model['U_prompt'])
prompt_clusters[prompt_dists.min(axis=1) >= max_dist] = -1
reference_dists = km.transform(model['U_reference'])
reference_clusters = km.predict(model['U_reference'])
reference_clusters[reference_dists.min(axis=1) >= max_dist] = -1
prompt_df = pd.DataFrame(index=model['prompt_tfidf_model'].get_feature_names(),
data=np.hstack([prompt_dists, prompt_clusters[:,np.newaxis]]),
columns=list(range(n_types)) + ['type_id'])
reference_df = pd.DataFrame(index=model['reference_tfidf_model'].get_feature_names(),
data=np.hstack([reference_dists, reference_clusters[:,np.newaxis]]),
columns=list(range(n_types)) + ['type_id'])
return {'km_model': km,
'prompt_df': prompt_df, 'reference_df': reference_df}
def assign_prompt_types(model, ids, vects, max_dist=0.9):
"""
Standalone function that returns type assignments of input vectors given a trained PromptTypes type model. See docstring of `PromptTypes` class for details.
:param model: prompt type model
:param ids: ids of input vectors
:param vects: input vectors
:return: a dataframe storing cluster centroid distances and the assigned type.
"""
dists = model['km_model'].transform(vects)
clusters = model['km_model'].predict(vects)
dist_mask = dists.min(axis=1) >= max_dist
clusters[ dist_mask] = -1
df = pd.DataFrame(index=ids, data=np.hstack([dists,clusters[:,np.newaxis]]),
columns=list(range(dists.shape[1])) + ['type_id'])
return df
|
demos/rpc/jsonrpc_sync_client.py | mstojcevich/cyclone | 254 | 11143638 | <reponame>mstojcevich/cyclone<filename>demos/rpc/jsonrpc_sync_client.py
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2010 <NAME>
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
def request(url, func, *args):
req = json.dumps({"method": func, "params": args, "id": 1})
result = urllib.urlopen(url, req).read()
try:
response = json.loads(result)
except:
return "error: %s" % result
else:
return response.get("result", response.get("error"))
url = "http://localhost:8888/jsonrpc"
print "echo:", request(url, "echo", "foo bar")
print "sort:", request(url, "sort", ["foo", "bar"])
print "count:", request(url, "count", ["foo", "bar"])
print "geoip_lookup:", request(url, "geoip_lookup", "google.com")
|
enn/networks/categorical_ensembles.py | MaxGhenis/enn | 130 | 11143643 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing some ensembles with categorical outputs.
Next step is to integrate more with the rest of the ENN code.
"""
from typing import Sequence
from enn import base
from enn import utils
from enn.networks import ensembles
from enn.networks import indexers
from enn.networks import priors
import haiku as hk
import jax
import jax.numpy as jnp
class CatOutputWithPrior(base.OutputWithPrior):
"""Categorical outputs with a real-valued prior."""
@property
def preds(self) -> base.Array:
train = jnp.sum(jax.nn.softmax(self.train) * self.extra['atoms'], axis=-1)
return train + jax.lax.stop_gradient(self.prior)
class CategoricalRegressionMLP(hk.Module):
"""Categorical MLP designed for regression ala MuZero value."""
def __init__(self, output_sizes: Sequence[int], atoms: base.Array):
"""Categorical MLP designed for regression ala MuZero value."""
super().__init__(name='categorical_regression_mlp')
self.dim_out = output_sizes[-1]
self.atoms = jnp.array(atoms)
self.output_sizes = list(output_sizes[:-1]) + [self.dim_out * len(atoms)]
def __call__(self, inputs: base.Array) -> base.Array:
"""Apply MLP and wrap outputs appropriately."""
out = hk.Flatten()(inputs)
out = hk.nets.MLP(self.output_sizes)(out)
return CatOutputWithPrior(
train=jnp.reshape(out, [-1, self.dim_out, len(self.atoms)]),
extra={'atoms': self.atoms},
)
class CatMLPEnsemble(base.EpistemicNetwork):
"""An ensemble of categorical MLP for regression."""
def __init__(self,
output_sizes: Sequence[int],
atoms: base.Array,
num_ensemble: int):
"""An ensemble of categorical MLP for regression."""
net_ctor = lambda: CategoricalRegressionMLP(output_sizes, atoms)
enn = utils.epistemic_network_from_module(
enn_ctor=lambda: ensembles.Ensemble( # pylint: disable=g-long-lambda
[net_ctor() for _ in range(num_ensemble)]),
indexer=indexers.EnsembleIndexer(num_ensemble),
)
super().__init__(enn.apply, enn.init, enn.indexer)
class CatMLPEnsembleGpPrior(base.EpistemicNetwork):
"""An ensemble of categorical MLP with a real-valued GP prior."""
def __init__(self,
output_sizes: Sequence[int],
atoms: base.Array,
input_dim: int,
num_ensemble: int,
num_feat: int,
gamma: priors.GpGamma = 1.,
prior_scale: float = 1,
seed: int = 0):
"""An ensemble of categorical MLP with a real-valued GP prior."""
gp_priors = ensembles.make_random_gp_ensemble_prior_fns(
input_dim, 1, num_feat, gamma, num_ensemble, seed)
enn = priors.EnnWithAdditivePrior(
enn=CatMLPEnsemble(output_sizes, atoms, num_ensemble),
prior_fn=ensembles.wrap_sequence_as_prior(gp_priors),
prior_scale=prior_scale,
)
super().__init__(enn.apply, enn.init, enn.indexer)
class CatMLPEnsembleMlpPrior(base.EpistemicNetwork):
"""An ensemble of categorical MLP with real-valued MLP prior."""
def __init__(self,
output_sizes: Sequence[int],
atoms: base.Array,
dummy_input: base.Array,
num_ensemble: int,
prior_scale: float = 1,
seed: int = 0):
"""An ensemble of categorical MLP with real-valued MLP prior."""
mlp_priors = ensembles.make_mlp_ensemble_prior_fns(
output_sizes, dummy_input, num_ensemble, seed)
enn = priors.EnnWithAdditivePrior(
enn=CatMLPEnsemble(output_sizes, atoms, num_ensemble),
prior_fn=ensembles.wrap_sequence_as_prior(mlp_priors),
prior_scale=prior_scale,
)
super().__init__(enn.apply, enn.init, enn.indexer)
|
pkgs/os-specific/linux/kernel/hardened/update.py | siddhantk232/nixpkgs | 9,717 | 11143646 | <filename>pkgs/os-specific/linux/kernel/hardened/update.py
#! /usr/bin/env nix-shell
#! nix-shell -i python -p "python38.withPackages (ps: [ps.PyGithub])" git gnupg
# This is automatically called by ../update.sh.
from __future__ import annotations
import json
import os
import re
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import (
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
)
from github import Github
from github.GitRelease import GitRelease
VersionComponent = Union[int, str]
Version = List[VersionComponent]
PatchData = TypedDict("PatchData", {"name": str, "url": str, "sha256": str, "extra": str})
Patch = TypedDict("Patch", {
"patch": PatchData,
"version": str,
"sha256": str,
})
@dataclass
class ReleaseInfo:
version: Version
release: GitRelease
HERE = Path(__file__).resolve().parent
NIXPKGS_KERNEL_PATH = HERE.parent
NIXPKGS_PATH = HERE.parents[4]
HARDENED_GITHUB_REPO = "anthraxx/linux-hardened"
HARDENED_TRUSTED_KEY = HERE / "anthraxx.asc"
HARDENED_PATCHES_PATH = HERE / "patches.json"
MIN_KERNEL_VERSION: Version = [4, 14]
def run(*args: Union[str, Path]) -> subprocess.CompletedProcess[bytes]:
try:
return subprocess.run(
args,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
except subprocess.CalledProcessError as err:
print(
f"error: `{err.cmd}` failed unexpectedly\n"
f"status code: {err.returncode}\n"
f"stdout:\n{err.stdout.strip()}\n"
f"stderr:\n{err.stderr.strip()}",
file=sys.stderr,
)
sys.exit(1)
def nix_prefetch_url(url: str) -> Tuple[str, Path]:
output = run("nix-prefetch-url", "--print-path", url).stdout
sha256, path = output.strip().split("\n")
return sha256, Path(path)
def verify_openpgp_signature(
*, name: str, trusted_key: Path, sig_path: Path, data_path: Path,
) -> bool:
with TemporaryDirectory(suffix=".nixpkgs-gnupg-home") as gnupg_home_str:
gnupg_home = Path(gnupg_home_str)
run("gpg", "--homedir", gnupg_home, "--import", trusted_key)
keyring = gnupg_home / "pubring.kbx"
try:
subprocess.run(
("gpgv", "--keyring", keyring, sig_path, data_path),
check=True,
stderr=subprocess.PIPE,
encoding="utf-8",
)
return True
except subprocess.CalledProcessError as err:
print(
f"error: signature for {name} failed to verify!",
file=sys.stderr,
)
print(err.stderr, file=sys.stderr, end="")
return False
def fetch_patch(*, name: str, release_info: ReleaseInfo) -> Optional[Patch]:
release = release_info.release
extra = f'-{release_info.version[-1]}'
def find_asset(filename: str) -> str:
try:
it: Iterator[str] = (
asset.browser_download_url
for asset in release.get_assets()
if asset.name == filename
)
return next(it)
except StopIteration:
raise KeyError(filename)
patch_filename = f"{name}.patch"
try:
patch_url = find_asset(patch_filename)
sig_url = find_asset(patch_filename + ".sig")
except KeyError:
print(f"error: {patch_filename}{{,.sig}} not present", file=sys.stderr)
return None
sha256, patch_path = nix_prefetch_url(patch_url)
_, sig_path = nix_prefetch_url(sig_url)
sig_ok = verify_openpgp_signature(
name=name,
trusted_key=HARDENED_TRUSTED_KEY,
sig_path=sig_path,
data_path=patch_path,
)
if not sig_ok:
return None
kernel_ver = release_info.release.tag_name.replace("-hardened1", "")
major = kernel_ver.split('.')[0]
sha256_kernel, _ = nix_prefetch_url(f"mirror://kernel/linux/kernel/v{major}.x/linux-{kernel_ver}.tar.xz")
return Patch(
patch=PatchData(name=patch_filename, url=patch_url, sha256=sha256, extra=extra),
version=kernel_ver,
sha256=sha256_kernel
)
def parse_version(version_str: str) -> Version:
version: Version = []
for component in re.split('\.|\-', version_str):
try:
version.append(int(component))
except ValueError:
version.append(component)
return version
def version_string(version: Version) -> str:
return ".".join(str(component) for component in version)
def major_kernel_version_key(kernel_version: Version) -> str:
return version_string(kernel_version[:-1])
def commit_patches(*, kernel_key: str, message: str) -> None:
new_patches_path = HARDENED_PATCHES_PATH.with_suffix(".new")
with open(new_patches_path, "w") as new_patches_file:
json.dump(patches, new_patches_file, indent=4, sort_keys=True)
new_patches_file.write("\n")
os.rename(new_patches_path, HARDENED_PATCHES_PATH)
message = f"linux/hardened/patches/{kernel_key}: {message}"
print(message)
if os.environ.get("COMMIT"):
run(
"git",
"-C",
NIXPKGS_PATH,
"commit",
f"--message={message}",
HARDENED_PATCHES_PATH,
)
# Load the existing patches.
patches: Dict[str, Patch]
with open(HARDENED_PATCHES_PATH) as patches_file:
patches = json.load(patches_file)
# Get the set of currently packaged kernel versions.
kernel_versions = {}
for filename in os.listdir(NIXPKGS_KERNEL_PATH):
filename_match = re.fullmatch(r"linux-(\d+)\.(\d+)\.nix", filename)
if filename_match:
nix_version_expr = f"""
with import {NIXPKGS_PATH} {{}};
(callPackage {NIXPKGS_KERNEL_PATH / filename} {{}}).version
"""
kernel_version_json = run(
"nix-instantiate", "--eval", "--json", "--expr", nix_version_expr,
).stdout
kernel_version = parse_version(json.loads(kernel_version_json))
if kernel_version < MIN_KERNEL_VERSION:
continue
kernel_key = major_kernel_version_key(kernel_version)
kernel_versions[kernel_key] = kernel_version
# Remove patches for unpackaged kernel versions.
for kernel_key in sorted(patches.keys() - kernel_versions.keys()):
commit_patches(kernel_key=kernel_key, message="remove")
g = Github(os.environ.get("GITHUB_TOKEN"))
repo = g.get_repo(HARDENED_GITHUB_REPO)
failures = False
# Match each kernel version with the best patch version.
releases = {}
for release in repo.get_releases():
version = parse_version(release.tag_name)
# needs to look like e.g. 5.6.3-hardened1
if len(version) < 4:
continue
if not (isinstance(version[-2], int)):
continue
kernel_version = version[:-1]
kernel_key = major_kernel_version_key(kernel_version)
try:
packaged_kernel_version = kernel_versions[kernel_key]
except KeyError:
continue
release_info = ReleaseInfo(version=version, release=release)
if kernel_version == packaged_kernel_version:
releases[kernel_key] = release_info
else:
# Fall back to the latest patch for this major kernel version,
# skipping patches for kernels newer than the packaged one.
if '.'.join(str(x) for x in kernel_version) > '.'.join(str(x) for x in packaged_kernel_version):
continue
elif (
kernel_key not in releases or releases[kernel_key].version < version
):
releases[kernel_key] = release_info
# Update hardened-patches.json for each release.
for kernel_key in sorted(releases.keys()):
release_info = releases[kernel_key]
release = release_info.release
version = release_info.version
version_str = release.tag_name
name = f"linux-hardened-{version_str}"
old_version: Optional[Version] = None
old_version_str: Optional[str] = None
update: bool
try:
old_filename = patches[kernel_key]["patch"]["name"]
old_version_str = old_filename.replace("linux-hardened-", "").replace(
".patch", ""
)
old_version = parse_version(old_version_str)
update = old_version < version
except KeyError:
update = True
if update:
patch = fetch_patch(name=name, release_info=release_info)
if patch is None:
failures = True
else:
patches[kernel_key] = patch
if old_version:
message = f"{old_version_str} -> {version_str}"
else:
message = f"init at {version_str}"
commit_patches(kernel_key=kernel_key, message=message)
missing_kernel_versions = kernel_versions.keys() - patches.keys()
if missing_kernel_versions:
print(
f"warning: no patches for kernel versions "
+ ", ".join(missing_kernel_versions),
file=sys.stderr,
)
if failures:
sys.exit(1)
|
sofi/ui/tablehead.py | screamingskulls/sofi | 402 | 11143669 | <gh_stars>100-1000
from .element import Element
from .tablerow import TableRow
from .tablecell import TableCell
class TableHead(Element):
"""Implements the <thead> tag"""
def __init__(self, cl=None, ident=None, style=None, attrs=None):
super().__init__(cl=cl, ident=ident, style=style, attrs=attrs)
def addrow(self, *args, **kwargs):
tr = TableRow(**kwargs)
for item in args:
if isinstance(item, Element):
tr.addelement(item)
else:
tr.addelement(TableCell(item, head=True))
self.addelement(tr)
return tr
def __repr__(self):
return "<TableHead>"
def __str__(self):
output = [ "<thead" ]
if self.ident:
output.append(" id=\"")
output.append(self.ident)
output.append("\"")
if self.cl:
output.append(" class=\"")
output.append(self.cl)
output.append("\"")
if self.style:
output.append(" style=\"")
output.append(self.style)
output.append("\"")
if self.attrs:
for k in self.attrs.keys():
output.append(' ' + k + '="' + self.attrs[k] + '"')
output.append(">")
for child in self._children:
output.append(str(child))
output.append("</thead>")
return "".join(output)
|
consolemenu/console_menu.py | ahauser31/console-menu | 230 | 11143692 | <filename>consolemenu/console_menu.py
from __future__ import print_function
import platform
import threading
import os
from consolemenu.menu_formatter import MenuFormatBuilder
from consolemenu.screen import Screen
class ConsoleMenu(object):
"""
A class that displays a menu and allows the user to select an option.
Args:
title (str): The title of the menu, or a method reference that returns a string.
subtitle (str): The subtitle of the menu, or a method reference that returns a string.
screen (:obj:`consolemenu.screen.Screen`): The screen object associated with this menu.
formatter (:obj:`MenuFormatBuilder`): The MenuFormatBuilder instance used to format this menu.
prologue_text (str): Text or method reference to include in the "prologue" section of the menu.
epilogue_text (str): Text or method reference to include in the "epilogue" section of the menu.
show_exit_option (bool): Specifies whether this menu should show an exit item by default. Defaults to True.
Can be overridden when the menu is started.
exit_option_text (str): Text for the Exit menu item. Defaults to 'Exit'.
Attributes:
cls.currently_active_menu (:obj:`ConsoleMenu`): Class variable that holds the currently active menu or None
if no menu is currently active (e.g. when switching between menus)
items (:obj:`list` of :obj:`MenuItem`): The list of MenuItems that the menu will display
parent (:obj:`ConsoleMenu`): The parent of this menu
previous_active_menu (:obj:`ConsoleMenu`): the previously active menu to be restored into the class's
currently active menu
current_option (int): The currently highlighted menu option
selected_option (int): The option that the user has most recently selected
"""
currently_active_menu = None
def __init__(self, title=None, subtitle=None, screen=None, formatter=None,
prologue_text=None, epilogue_text=None,
show_exit_option=True, exit_option_text='Exit'):
if screen is None:
screen = Screen()
self.screen = screen
if formatter is None:
formatter = MenuFormatBuilder()
self.formatter = formatter
self.title = title
self.subtitle = subtitle
self.prologue_text = prologue_text
self.epilogue_text = epilogue_text
self.highlight = None
self.normal = None
self.show_exit_option = show_exit_option
self.items = list()
self.parent = None
self.exit_item = ExitItem(menu=self, text=exit_option_text)
self.current_option = 0
self.selected_option = -1
self.returned_value = None
self.should_exit = False
self.previous_active_menu = None
self._main_thread = None
self._running = threading.Event()
def __repr__(self):
return "%s: %s. %d items" % (self.get_title(), self.get_subtitle(), len(self.items))
@property
def current_item(self):
"""
:obj:`consolemenu.items.MenuItem`: The item corresponding to the menu option that is currently highlighted,
or None.
"""
if self.items:
return self.items[self.current_option]
else:
return None
@property
def selected_item(self):
"""
:obj:`consolemenu.items.MenuItem`: The item in :attr:`items` that the user most recently selected, or None.
"""
if self.items and self.selected_option != -1:
return self.items[self.current_option]
else:
return None
def append_item(self, item):
"""
Add an item to the end of the menu before the exit item.
Args:
item (MenuItem): The item to be added.
"""
did_remove = self.remove_exit()
item.menu = self
self.items.append(item)
if did_remove:
self.add_exit()
def remove_item(self, item):
"""
Remove the specified item from the menu.
Args:
item (MenuItem): the item to be removed.
Returns:
bool: True if the item was removed; False otherwise.
"""
for idx, _item in enumerate(self.items):
if item == _item:
del self.items[idx]
return True
return False
def add_exit(self):
"""
Add the exit item if necessary. Used to make sure there aren't multiple exit items.
Returns:
bool: True if item needed to be added, False otherwise.
"""
if not self.items or self.items[-1] is not self.exit_item:
self.items.append(self.exit_item)
return True
return False
def remove_exit(self):
"""
Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else.
Returns:
bool: True if item needed to be removed, False otherwise.
"""
if self.items:
if self.items[-1] is self.exit_item:
del self.items[-1]
return True
return False
def is_selected_item_exit(self):
"""
Checks to determine if the currently selected item is the Exit Menu item.
Returns:
bool: True if the currently selected item is the Exit Menu item; False otherwise.
"""
return self.selected_item and self.selected_item is self.exit_item
def _wrap_start(self):
self._main_loop()
ConsoleMenu.currently_active_menu = None
self.clear_screen()
ConsoleMenu.currently_active_menu = self.previous_active_menu
def start(self, show_exit_option=None):
"""
Start the menu in a new thread and allow the user to interact with it.
The thread is a daemon, so :meth:`join()<consolemenu.ConsoleMenu.join>` should be called if there's a
possibility that the main thread will exit before the menu is done
Args:
show_exit_option (bool): Specify whether the exit item should be shown, defaults to the value
set in the constructor
"""
self.previous_active_menu = ConsoleMenu.currently_active_menu
ConsoleMenu.currently_active_menu = None
self.should_exit = False
if show_exit_option is None:
show_exit_option = self.show_exit_option
if show_exit_option:
self.add_exit()
else:
self.remove_exit()
try:
self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)
except TypeError:
self._main_thread = threading.Thread(target=self._wrap_start)
self._main_thread.daemon = True
self._main_thread.start()
def show(self, show_exit_option=None):
"""
Calls start and then immediately joins.
Args:
show_exit_option (bool): Specify whether the exit item should be shown, defaults to the value set
in the constructor
"""
self.start(show_exit_option)
self.join()
def _main_loop(self):
self._set_up_colors()
ConsoleMenu.currently_active_menu = self
self._running.set()
while self._running.wait() is not False and not self.should_exit:
self.screen.clear()
self.draw()
self.process_user_input()
def draw(self):
"""
Refresh the screen and redraw the menu. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.printf(self.formatter.format(title=self.get_title(),
subtitle=self.get_subtitle(),
items=self.items,
prologue_text=self.get_prologue_text(),
epilogue_text=self.get_epilogue_text()))
def is_running(self):
"""
Check if the menu has been started and is not paused.
Returns:
bool: True if the menu is started and hasn't been paused; False otherwise.
"""
return self._running.is_set()
def wait_for_start(self, timeout=None):
"""
Block until the menu is started.
Args:
timeout: How long to wait before timing out.
Returns:
bool: False if timeout is given and operation times out, True otherwise. None before Python 2.7.
"""
return self._running.wait(timeout)
def is_alive(self):
"""
Check whether the thread is stil alive.
Returns:
bool: True if the thread is still alive; False otherwise.
"""
return self._main_thread.is_alive()
def pause(self):
"""
Temporarily pause the menu until resume is called.
"""
self._running.clear()
def resume(self):
"""
Sets the currently active menu to this one and resumes it.
"""
ConsoleMenu.currently_active_menu = self
self._running.set()
def join(self, timeout=None):
"""
Should be called at some point after :meth:`start()<consolemenu.ConsoleMenu.start>` to block until
the menu exits.
Args:
timeout (Number): How long to wait before timing out.
"""
self._main_thread.join(timeout=timeout)
def get_input(self):
"""
Can be overridden to change the input method.
Called in :meth:`process_user_input()<consolemenu.ConsoleMenu.process_user_input>`
:return: the ordinal value of a single character
:rtype: int
"""
return self.screen.input()
def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input()
try:
num = int(user_input)
except Exception:
return
if 0 < num < len(self.items) + 1:
self.current_option = num - 1
self.select()
return user_input
def go_to(self, option):
"""
Go to the option entered by the user as a number
:param option: the option to go to
:type option: int
"""
self.current_option = option
self.draw()
def go_down(self):
"""
Go down one, wrap to beginning if necessary
"""
if self.current_option < len(self.items) - 1:
self.current_option += 1
else:
self.current_option = 0
self.draw()
def go_up(self):
"""
Go up one, wrap to end if necessary
"""
if self.current_option > 0:
self.current_option += -1
else:
self.current_option = len(self.items) - 1
self.draw()
def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
def exit(self):
"""
Signal the menu to exit, then block until it's done cleaning up
"""
self.should_exit = True
self.join()
def _set_up_colors(self):
# TODO add color support
# curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
# self.highlight = curses.color_pair(1)
# self.normal = curses.A_NORMAL
pass
def clear_screen(self):
"""
Clear the screen belonging to this menu
"""
self.screen.clear()
# Getters to get text in case method reference
def get_title(self):
return self.title() if callable(self.title) else self.title
def get_subtitle(self):
return self.subtitle() if callable(self.subtitle) else self.subtitle
def get_prologue_text(self):
return self.prologue_text() if callable(self.prologue_text) else self.prologue_text
def get_epilogue_text(self):
return self.epilogue_text() if callable(self.epilogue_text) else self.epilogue_text
class MenuItem(object):
"""
A generic menu item
"""
def __init__(self, text, menu=None, should_exit=False):
"""
:ivar str text: The text shown for this menu item
:ivar ConsoleMenu menu: The menu to which this item belongs
:ivar bool should_exit: Whether the menu should exit once this item's action is done
"""
self.text = text
self.menu = menu
self.should_exit = should_exit
def __str__(self):
return "%s %s" % (self.menu.get_title(), self.get_text())
def show(self, index):
"""
How this item should be displayed in the menu. Can be overridden, but should keep the same signature.
Default is:
1 - Item 1
2 - Another Item
:param int index: The index of the item in the items list of the menu
:return: The representation of the item to be shown in a menu
:rtype: str
"""
return "%2d - %s" % (index + 1, self.get_text())
def set_up(self):
"""
Override to add any setup actions necessary for the item
"""
pass
def action(self):
"""
Override to carry out the main action for this item.
"""
pass
def clean_up(self):
"""
Override to add any cleanup actions necessary for the item
"""
pass
def get_return(self):
"""
Override to change what the item returns.
Otherwise just returns the same value the last selected item did.
"""
return self.menu.returned_value
def __eq__(self, o):
return self.text == o.text and self.menu == o.menu and self.should_exit == o.should_exit
# Getters to get text in case method reference
def get_text(self):
return self.text() if callable(self.text) else self.text
class ExitItem(MenuItem):
"""
Used to exit the current menu. Handled by :class:`consolemenu.ConsoleMenu`
"""
def __init__(self, text="Exit", menu=None):
super(ExitItem, self).__init__(text=text, menu=menu, should_exit=True)
def show(self, index):
"""
ExitItem overrides this method to display appropriate Exit or Return text.
"""
# If we have a parent menu, and no overriding exit text was specified,
# change Exit text to "Return to {Parent Menu Title}"
if self.menu and self.menu.parent and self.get_text() == 'Exit':
self.text = "Return to %s" % self.menu.parent.get_title()
return super(ExitItem, self).show(index)
def clear_terminal():
"""
Call the platform specific function to clear the terminal: cls on windows, reset otherwise
"""
if platform.system().lower() == "windows":
os.system('cls')
else:
os.system('reset')
|
guppy/heapy/RM.py | odidev/guppy3 | 251 | 11143742 | # Start a remote monitoring enabling thread,
# unless I am that thread myself.
from guppy.heapy.Remote import on
on()
|
src/bronkerbosch.py | baoe/FLAS | 146 | 11143747 | <gh_stars>100-1000
#!/usr/bin/env python
#by bao: to let runFLAS.py use this script directly
# Finds all maximal cliques in a graph using the Bron-Kerbosch algorithm. The input graph here is
# in the adjacency list format, a dict with vertexes as keys and lists of their neighbors as values.
# https://en.wikipedia.org/wiki/Bron-Kerbosch_algorithm
from collections import defaultdict
import string
from sys import argv
#######################################
'''
graphh = {
1:[2,5],
2:[1,3,5],
3:[2,4],
4:[3,5,6],
5:[1,2,4],
6:[4]
}
'''
#######################################
fin = open(argv[1],'r')
graph = defaultdict(list)
linenum = 0
#by bao: numline counts number of alignments
numline = 0
for line in fin:
numline+=1
fin.seek(0)
for line in fin:
if int(line.split("\t")[1]) not in graph[int(line.split("\t")[0])]:
graph[int(line.split("\t")[0])].append(int(line.split("\t")[1]))
if int(line.split("\t")[0]) not in graph[int(line.split("\t")[1])]:
graph[int(line.split("\t")[1])].append(int(line.split("\t")[0]))
#if string.atoi(line.split("\t")[1]) not in graph[string.atoi(line.split("\t")[0])]:
# graph[string.atoi(line.split("\t")[0])].append(string.atoi(line.split("\t")[1]))
# if string.atoi(line.split("\t")[0]) not in graph[string.atoi(line.split("\t")[1])]:
# graph[string.atoi(line.split("\t")[1])].append(string.atoi(line.split("\t")[0]))
linenum+=1
#by bao: linenum samples the alignments, and multiple samples forming all the alignments will be processed in parallel in updated versions
if linenum > numline/int(argv[4])*5: break
#if linenum%1000 == 0 :
# print(linenum)
fin.close()
#print('graph\n')
########################################
cliques = []
countrecursion1=0
countrecursion2=0
####################################
def find_cliques(graph):
p = set(graph.keys())
r = set()
x = set()
global countrecursion1
global countrecursion2
ordering = []
ordering = degeneracy_ordering(graph)
for v in ordering:
#print('%d \tin \t %d' %(v,len(graph)))
countrecursion2=0
countrecursion1+=1
# if countrecursion1<len(ordering)*0.5:
# continue
neighs = graph[v]
find_cliques_pivot(graph, r.union([v]), p.intersection(neighs), x.intersection(neighs), cliques)
p.remove(v)
x.add(v)
return sorted(cliques, key = lambda x: len(x), reverse = True)
def find_cliques_pivot(graph, r, p, x, cliques):
global countrecursion1
global countrecursion2
#if countrecursion2 % 10000 == 0:
#print('len(p) = %d \tlen(x) = %d \t countrecursion1 = %d \tin \t %d ,\t countrecursion2 = %d' %(len(p),len(x),countrecursion1,len(graph),countrecursion2))
if countrecursion2 > 100:
return
countrecursion2+=1
if len(p) == 0 and len(x) == 0:
cliques.append(r)
else:
#u = iter(p.union(x)).next()
u = iter(p.union(x)).__next__()
for v in p.difference(graph[u]):
neighs = graph[v]
find_cliques_pivot(graph, r.union([v]), p.intersection(neighs), x.intersection(neighs), cliques)
p.remove(v)
x.add(v)
def degeneracy_ordering(graph):
ordering = []
ordering_set = set()
degrees = defaultdict(lambda : 0)
degen = defaultdict(list)
max_deg = -1
for v in graph:
deg = len(graph[v])
degen[deg].append(v)
degrees[v] = deg
if deg > max_deg:
max_deg = deg
while True:
i = 0
while i <= max_deg:
if len(degen[i]) != 0:
break
i += 1
else:
break
v = degen[i].pop()
ordering.append(v)
ordering_set.add(v)
for w in graph[v]:
if w not in ordering_set:
deg = degrees[w]
degen[deg].remove(w)
if deg > 0:
degrees[w] -= 1
degen[deg - 1].append(w)
# print('degeneracy_ordering(graph)\n')
ordering.reverse()
return ordering
find_cliques(graph)
#print('find_cliques(graph)\n')
fout1 = open(argv[2],'w')
fout2 = open(argv[3],'w')
cliquenum = 0
edgenum = 0
read0 = 0
read1 = 0
countreadname = 0
for c in cliques:
if len(c) > 10 :
if len(c)<100:
for j in c:
fout1.writelines(str(j))
fout1.writelines(" ")
cliquenum += 1
fout1.writelines("\n")
if cliquenum > 1000 :
break
elif len(c) == 2:
for j in c:
fout2.writelines(str(j))
fout2.writelines(" ")
edgenum += 1
# if edgenum % 1000 == 0:
# print('edgenum = ', edgenum )
fout2.writelines("\n")
fout1.close()
fout2.close()
|
RecoJets/JetProducers/python/hltPUIdAlgo_cff.py | ckamtsikis/cmssw | 852 | 11143770 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
####################################################################################################################
full_74x_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.83,-0.81,-0.74,-0.81),
Pt1020_Tight = cms.vdouble(-0.83,-0.81,-0.74,-0.81),
Pt2030_Tight = cms.vdouble( 0.73, 0.05,-0.26,-0.42),
Pt3050_Tight = cms.vdouble( 0.73, 0.05,-0.26,-0.42),
#Medium Id
Pt010_Medium = cms.vdouble(-0.83,-0.92,-0.90,-0.92),
Pt1020_Medium = cms.vdouble(-0.83,-0.92,-0.90,-0.92),
Pt2030_Medium = cms.vdouble( 0.10,-0.36,-0.54,-0.54),
Pt3050_Medium = cms.vdouble( 0.10,-0.36,-0.54,-0.54),
#Loose Id
Pt010_Loose = cms.vdouble(-0.95,-0.96,-0.94,-0.95),
Pt1020_Loose = cms.vdouble(-0.95,-0.96,-0.94,-0.95),
Pt2030_Loose = cms.vdouble(-0.63,-0.60,-0.55,-0.45),
Pt3050_Loose = cms.vdouble(-0.63,-0.60,-0.55,-0.45),
)
full_74x = cms.PSet(
impactParTkThreshold = cms.double(1.) ,
cutBased = cms.bool(False),
tmvaWeights = cms.FileInPath("RecoJets/JetProducers/data/MVAJetPuID.weights_newNames.xml.gz"),
tmvaMethod = cms.string("BDTG"),
version = cms.int32(-1),
tmvaVariables = cms.vstring(
"rho" ,
"nParticles" ,
"nCharged" ,
"majW" ,
"minW",
"frac01",
"frac02",
"frac03",
"frac04",
"ptD" ,
"beta" ,
"betaStar" ,
"dR2Mean" ,
"pull" ,
"jetR" ,
"jetRchg"
),
tmvaSpectators = cms.vstring(
"jetEta",
"jetPt",
),
JetIdParams = full_74x_wp,
label = cms.string("CATEv0")
)
|
src/primify/__init__.py | borea17/primify | 455 | 11143772 | # -*- coding: utf-8 -*-
from pkg_resources import get_distribution, DistributionNotFound
import logging
from rich.console import Console
console = Console()
FORMAT = "%(message)s"
logging.basicConfig(level="DEBUG", format=FORMAT, datefmt="[%X]", filename="logs.txt")
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = "unknown"
finally:
del get_distribution, DistributionNotFound
|
test/ut/tools/annotation/test_annotation.py | dutxubo/nni | 9,680 | 11143774 | <gh_stars>1000+
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from nni.tools import annotation
import ast
import json
from pathlib import Path
import shutil
import tempfile
import pytest
cwd = Path(__file__).parent
shutil.rmtree(cwd / '_generated', ignore_errors=True)
shutil.copytree(cwd / 'testcase/annotated', cwd / '_generated/annotated')
def test_search_space_generator():
search_space = annotation.generate_search_space(cwd / '_generated/annotated')
expected = json.load((cwd / 'testcase/searchspace.json').open())
assert search_space == expected
def test_code_generator():
src_dir = cwd / 'testcase/usercode'
dst_dir = cwd / '_generated/usercode'
code_dir = annotation.expand_annotations(src_dir, dst_dir, nas_mode='classic_mode')
assert Path(code_dir) == dst_dir
expect_dir = cwd / 'testcase/annotated'
_assert_source_equal(dst_dir, expect_dir, 'dir/simple.py')
_assert_source_equal(dst_dir, expect_dir, 'mnist.py')
_assert_source_equal(dst_dir, expect_dir, 'nas.py')
assert (src_dir / 'nonpy.txt').read_text() == (dst_dir / 'nonpy.txt').read_text()
def test_annotation_detecting():
src_dir = cwd / 'testcase/usercode/non_annotation'
code_dir = annotation.expand_annotations(src_dir, tempfile.mkdtemp())
assert Path(code_dir) == src_dir
def _assert_source_equal(dir1, dir2, file_name):
ast1 = ast.parse((dir1 / file_name).read_text())
ast2 = ast.parse((dir2 / file_name).read_text())
_assert_ast_equal(ast1, ast2)
def _assert_ast_equal(ast1, ast2):
assert type(ast1) is type(ast2)
if isinstance(ast1, ast.AST):
assert sorted(ast1._fields) == sorted(ast2._fields)
for field_name in ast1._fields:
field1 = getattr(ast1, field_name)
field2 = getattr(ast2, field_name)
_assert_ast_equal(field1, field2)
elif isinstance(ast1, list):
assert len(ast1) == len(ast2)
for item1, item2 in zip(ast1, ast2):
_assert_ast_equal(item1, item2)
else:
assert ast1 == ast2
if __name__ == '__main__':
pytest.main()
|
deprecated/software/Navigator/DiscreteGridUtils.py | mfkiwl/GAAS | 2,111 | 11143841 | #encoding=utf-8
class DiscreteGridUtils:
def __init__(self,grid_size = 0.3):
self.grid_size = grid_size
def continuous_to_discrete(self,pos): # pos:x,y,z
#print (((pos[0] - (self.grid_size * 0.5)) / self.grid_size),
# ((pos[1] - (self.grid_size * 0.5)) / self.grid_size),
# ((pos[2] - (self.grid_size * 0.5)) / self.grid_size))
return ( int((pos[0] + (self.grid_size*0.5))/self.grid_size)-1,
int((pos[1] + (self.grid_size * 0.5)) / self.grid_size)-1,
int((pos[2] + (self.grid_size * 0.5)) / self.grid_size)-1)
def discrete_to_continuous_target(self,grid_pos): # x,y,z the center of input grid
return ( (grid_pos[0]+0.5)*self.grid_size,
(grid_pos[1]+0.5)*self.grid_size,
(grid_pos[2]+0.5)*self.grid_size)
if __name__ == '__main__':
dg = DiscreteGridUtils(grid_size=0.5)
print ('res1:',dg.continuous_to_discrete((0.4,0.4,0.4) )) # 0,0,0
print('res2:',dg.continuous_to_discrete((-0.4, -0.4, -0.4))) # 0,0,0
print (dg.discrete_to_continuous_target((1,1,1)) )# 0.5,0.5,0.5
print(dg.discrete_to_continuous_target((0, 0, 0))) # 0.5,0.5,0.5 |
presidio-analyzer/app.py | vtols/presidio | 1,408 | 11143887 | <gh_stars>1000+
"""REST API server for analyzer."""
import json
import logging
import os
from logging.config import fileConfig
from pathlib import Path
from typing import Tuple
from flask import Flask, request, jsonify, Response
from werkzeug.exceptions import HTTPException
from presidio_analyzer.analyzer_engine import AnalyzerEngine
from presidio_analyzer.analyzer_request import AnalyzerRequest
DEFAULT_PORT = "3000"
LOGGING_CONF_FILE = "logging.ini"
WELCOME_MESSAGE = r"""
_______ _______ _______ _______ _________ ______ _________ _______
( ____ )( ____ )( ____ \( ____ \\__ __/( __ \ \__ __/( ___ )
| ( )|| ( )|| ( \/| ( \/ ) ( | ( \ ) ) ( | ( ) |
| (____)|| (____)|| (__ | (_____ | | | | ) | | | | | | |
| _____)| __)| __) (_____ ) | | | | | | | | | | | |
| ( | (\ ( | ( ) | | | | | ) | | | | | | |
| ) | ) \ \__| (____/\/\____) |___) (___| (__/ )___) (___| (___) |
|/ |/ \__/(_______/\_______)\_______/(______/ \_______/(_______)
"""
class Server:
"""HTTP Server for calling Presidio Analyzer."""
def __init__(self):
fileConfig(Path(Path(__file__).parent, LOGGING_CONF_FILE))
self.logger = logging.getLogger("presidio-analyzer")
self.logger.setLevel(os.environ.get("LOG_LEVEL", self.logger.level))
self.app = Flask(__name__)
self.logger.info("Starting analyzer engine")
self.engine = AnalyzerEngine()
self.logger.info(WELCOME_MESSAGE)
@self.app.route("/health")
def health() -> str:
"""Return basic health probe result."""
return "Presidio Analyzer service is up"
@self.app.route("/analyze", methods=["POST"])
def analyze() -> Tuple[str, int]:
"""Execute the analyzer function."""
# Parse the request params
try:
req_data = AnalyzerRequest(request.get_json())
if not req_data.text:
raise Exception("No text provided")
if not req_data.language:
raise Exception("No language provided")
recognizer_result_list = self.engine.analyze(
text=req_data.text,
language=req_data.language,
correlation_id=req_data.correlation_id,
score_threshold=req_data.score_threshold,
entities=req_data.entities,
return_decision_process=req_data.return_decision_process,
ad_hoc_recognizers=req_data.ad_hoc_recognizers,
)
return Response(
json.dumps(
recognizer_result_list,
default=lambda o: o.to_dict(),
sort_keys=True,
),
content_type="application/json",
)
except TypeError as te:
error_msg = (
f"Failed to parse /analyze request "
f"for AnalyzerEngine.analyze(). {te.args[0]}"
)
self.logger.error(error_msg)
return jsonify(error=error_msg), 400
except Exception as e:
self.logger.error(
f"A fatal error occurred during execution of "
f"AnalyzerEngine.analyze(). {e}"
)
return jsonify(error=e.args[0]), 500
@self.app.route("/recognizers", methods=["GET"])
def recognizers() -> Tuple[str, int]:
"""Return a list of supported recognizers."""
language = request.args.get("language")
try:
recognizers_list = self.engine.get_recognizers(language)
names = [o.name for o in recognizers_list]
return jsonify(names), 200
except Exception as e:
self.logger.error(
f"A fatal error occurred during execution of "
f"AnalyzerEngine.get_recognizers(). {e}"
)
return jsonify(error=e.args[0]), 500
@self.app.route("/supportedentities", methods=["GET"])
def supported_entities() -> Tuple[str, int]:
"""Return a list of supported entities."""
language = request.args.get("language")
try:
entities_list = self.engine.get_supported_entities(language)
return jsonify(entities_list), 200
except Exception as e:
self.logger.error(
f"A fatal error occurred during execution of "
f"AnalyzerEngine.supported_entities(). {e}"
)
return jsonify(error=e.args[0]), 500
@self.app.errorhandler(HTTPException)
def http_exception(e):
return jsonify(error=e.description), e.code
if __name__ == "__main__":
port = int(os.environ.get("PORT", DEFAULT_PORT))
server = Server()
server.app.run(host="0.0.0.0", port=port)
|
alipay/aop/api/domain/AlipayFundTransAacollectBatchCreateModel.py | snowxmas/alipay-sdk-python-all | 213 | 11143901 | <reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundTransAacollectBatchCreateModel(object):
def __init__(self):
self._batch_memo = None
self._channel = None
self._ext_param = None
self._limit_items_total = None
self._pay_amount_single = None
self._pay_amount_total = None
self._payee_user_id = None
self._payer_user_ids = None
self._real_items_total = None
self._show_items_total = None
self._source = None
@property
def batch_memo(self):
return self._batch_memo
@batch_memo.setter
def batch_memo(self, value):
self._batch_memo = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def ext_param(self):
return self._ext_param
@ext_param.setter
def ext_param(self, value):
self._ext_param = value
@property
def limit_items_total(self):
return self._limit_items_total
@limit_items_total.setter
def limit_items_total(self, value):
self._limit_items_total = value
@property
def pay_amount_single(self):
return self._pay_amount_single
@pay_amount_single.setter
def pay_amount_single(self, value):
self._pay_amount_single = value
@property
def pay_amount_total(self):
return self._pay_amount_total
@pay_amount_total.setter
def pay_amount_total(self, value):
self._pay_amount_total = value
@property
def payee_user_id(self):
return self._payee_user_id
@payee_user_id.setter
def payee_user_id(self, value):
self._payee_user_id = value
@property
def payer_user_ids(self):
return self._payer_user_ids
@payer_user_ids.setter
def payer_user_ids(self, value):
self._payer_user_ids = value
@property
def real_items_total(self):
return self._real_items_total
@real_items_total.setter
def real_items_total(self, value):
self._real_items_total = value
@property
def show_items_total(self):
return self._show_items_total
@show_items_total.setter
def show_items_total(self, value):
self._show_items_total = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
def to_alipay_dict(self):
params = dict()
if self.batch_memo:
if hasattr(self.batch_memo, 'to_alipay_dict'):
params['batch_memo'] = self.batch_memo.to_alipay_dict()
else:
params['batch_memo'] = self.batch_memo
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.ext_param:
if hasattr(self.ext_param, 'to_alipay_dict'):
params['ext_param'] = self.ext_param.to_alipay_dict()
else:
params['ext_param'] = self.ext_param
if self.limit_items_total:
if hasattr(self.limit_items_total, 'to_alipay_dict'):
params['limit_items_total'] = self.limit_items_total.to_alipay_dict()
else:
params['limit_items_total'] = self.limit_items_total
if self.pay_amount_single:
if hasattr(self.pay_amount_single, 'to_alipay_dict'):
params['pay_amount_single'] = self.pay_amount_single.to_alipay_dict()
else:
params['pay_amount_single'] = self.pay_amount_single
if self.pay_amount_total:
if hasattr(self.pay_amount_total, 'to_alipay_dict'):
params['pay_amount_total'] = self.pay_amount_total.to_alipay_dict()
else:
params['pay_amount_total'] = self.pay_amount_total
if self.payee_user_id:
if hasattr(self.payee_user_id, 'to_alipay_dict'):
params['payee_user_id'] = self.payee_user_id.to_alipay_dict()
else:
params['payee_user_id'] = self.payee_user_id
if self.payer_user_ids:
if hasattr(self.payer_user_ids, 'to_alipay_dict'):
params['payer_user_ids'] = self.payer_user_ids.to_alipay_dict()
else:
params['payer_user_ids'] = self.payer_user_ids
if self.real_items_total:
if hasattr(self.real_items_total, 'to_alipay_dict'):
params['real_items_total'] = self.real_items_total.to_alipay_dict()
else:
params['real_items_total'] = self.real_items_total
if self.show_items_total:
if hasattr(self.show_items_total, 'to_alipay_dict'):
params['show_items_total'] = self.show_items_total.to_alipay_dict()
else:
params['show_items_total'] = self.show_items_total
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundTransAacollectBatchCreateModel()
if 'batch_memo' in d:
o.batch_memo = d['batch_memo']
if 'channel' in d:
o.channel = d['channel']
if 'ext_param' in d:
o.ext_param = d['ext_param']
if 'limit_items_total' in d:
o.limit_items_total = d['limit_items_total']
if 'pay_amount_single' in d:
o.pay_amount_single = d['pay_amount_single']
if 'pay_amount_total' in d:
o.pay_amount_total = d['pay_amount_total']
if 'payee_user_id' in d:
o.payee_user_id = d['payee_user_id']
if 'payer_user_ids' in d:
o.payer_user_ids = d['payer_user_ids']
if 'real_items_total' in d:
o.real_items_total = d['real_items_total']
if 'show_items_total' in d:
o.show_items_total = d['show_items_total']
if 'source' in d:
o.source = d['source']
return o
|
websauna/tests/static/test_static_asset.py | stevepiercy/websauna | 286 | 11143904 | # Standard Library
import os
import shutil
# Pyramid
from pyramid.config import Configurator
import pytest
# Websauna
from websauna.system.devop.cmdline import init_websauna
from websauna.system.http.static import DefaultStaticAssetPolicy
from websauna.tests.cookiecutter.scaffold import execute_command
from websauna.tests.webserver import customized_web_server
HERE = os.path.dirname(__file__)
STATIC_CONF_FILE = os.path.join(HERE, "static-asset-test.ini")
@pytest.fixture(scope="module")
def cache_app(request):
"""Construct a WSGI app with static asset caching enabled."""
request = init_websauna(STATIC_CONF_FILE)
return request.app
@pytest.fixture
def caching_web_server(request, cache_app):
server = customized_web_server(request, cache_app)
return server()
def test_collect_static_asset():
"""Collect static files and stash them with MD5 sums."""
c = Configurator()
sap = DefaultStaticAssetPolicy(c)
sap.add_static_view("websauna-static", "websauna.system:static")
collected = sap.collect_static()
# Check one resource from the collectin to see we succeeded
assert collected["websauna-static"]["pyramid-32x32.png"] == 'perma-asset/pyramid-32x32.c453183eee6627ff09e49f0384cededd.png'
def test_collect_recurse():
"""Check another more complicated static file folder collect"""
c = Configurator()
sap = DefaultStaticAssetPolicy(c)
sap.add_static_view("deform-static", "deform:static")
collected = sap.collect_static()
assert len(collected) > 0
# Check one resource from the collectin to see we succeeded
assert collected["deform-static"]["pickadate/translations/ja_JP.js"].startswith('perma-asset/pickadate/translations/ja_JP.')
def test_map_static_asset(browser, caching_web_server):
"""Use collected information to return static URLs"""
cache = os.path.join("websauna", "system", "static", "perma-asset")
if os.path.exists(cache):
shutil.rmtree(cache)
# Run static asset collecteor
execute_command(["ws-collect-static", STATIC_CONF_FILE], folder=os.getcwd(), timeout=30.0)
b = browser
b.visit(caching_web_server)
el = b.find_by_css("link[rel='stylesheet']")[0]._element
bootstrap_css = el.get_attribute("href")
assert "perma-asset" in bootstrap_css
b.visit(bootstrap_css)
# Identify correct CSS load by having some known text inside CSS payload
assert "Bootstrap" in b.html
|
ncclient/logging_.py | doesitblend/ncclient | 498 | 11143917 | <gh_stars>100-1000
import logging
class SessionLoggerAdapter(logging.LoggerAdapter):
"""Logger adapter that automatically adds session information to logs."""
def process(self, msg, kwargs):
if 'session' not in self.extra or self.extra['session'] is None:
return msg, kwargs
session = self.extra['session']
prefix = ""
# All Session instances have an id. SSHSessions have a host as well.
if hasattr(session, 'host'):
prefix += "host %s " % session.host
if session.id is not None:
prefix += "session-id %s" % session.id
else:
prefix += "session 0x%x" % id(session)
# Pass the session information through to the LogRecord itself
if 'extra' not in kwargs:
kwargs['extra'] = self.extra
else:
kwargs['extra'].update(self.extra)
return "[%s] %s" % (prefix, msg), kwargs
|
scalene/scalene_arguments.py | barseghyanartur/scalene | 1,708 | 11143945 | <filename>scalene/scalene_arguments.py
import argparse
class ScaleneArguments(argparse.Namespace):
def __init__(self) -> None:
super(ScaleneArguments, self).__init__()
self.cpu_only = False
self.cpu_percent_threshold = 1
# mean seconds between interrupts for CPU sampling.
self.cpu_sampling_rate = 0.01
self.html = False
self.json = False
self.column_width = (
132 # Note that Scalene works best with at least 132 columns.
)
self.malloc_threshold = 100
self.outfile = None
self.pid = 0
# if we profile all code or just target code and code in its child directories
self.profile_all = False
# how long between outputting stats during execution
self.profile_interval = float("inf")
# what function pathnames must contain to be output during profiling
self.profile_only = ""
# what function pathnames should never be output during profiling
self.profile_exclude = ""
# The root of the directory that has the files that should be profiled
self.program_path = ""
# reduced profile?
self.reduced_profile = False
# do we use virtual time or wallclock time (capturing system time and blocking)?
self.use_virtual_time = False
self.memory_leak_detector = False # experimental
|
flexget/plugins/cli/filters.py | astrotee/Flexget | 1,322 | 11143976 | <gh_stars>1000+
import inspect
from flexget import options
from flexget.event import event
from flexget.terminal import TerminalTable, console, table_parser
from flexget.utils.template import get_filters
def do_cli(manager, options):
header = ['Name', 'Description']
table = TerminalTable(*header, table_type=options.table_type, show_lines=True)
for filter_name, filter in get_filters().items():
if options.name and not options.name in filter_name:
continue
filter_doc = inspect.getdoc(filter) or ''
table.add_row(filter_name, filter_doc)
console(table)
@event('options.register')
def register_parser_arguments():
# Register subcommand
parser = options.register_command(
'jinja-filters',
do_cli,
help='View registered jinja2 filters and their description',
parents=[table_parser],
)
parser.add_argument('--name', help='Filter results by filter name')
|
Algo and DSA/LeetCode-Solutions-master/Python/minimum-number-of-arrows-to-burst-balloons.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11143986 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/minimum-number-of-arrows-to-burst-balloons.py
# Time: O(nlogn)
# Space: O(1)
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
if not points:
return 0
points.sort()
result = 0
i = 0
while i < len(points):
j = i + 1
right_bound = points[i][1]
while j < len(points) and points[j][0] <= right_bound:
right_bound = min(right_bound, points[j][1])
j += 1
result += 1
i = j
return result
|
examples/nas/oneshot/pfld/train.py | ggzhang0071/nni | 9,680 | 11144015 | <reponame>ggzhang0071/nni<filename>examples/nas/oneshot/pfld/train.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import torch
import torchvision
import numpy as np
from datasets import PFLDDatasets
from lib.builder import search_space
from lib.ops import PRIMITIVES
from lib.trainer import PFLDTrainer
from lib.utils import PFLDLoss
from nni.algorithms.nas.pytorch.fbnet import LookUpTable, NASConfig
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
""" The main function for supernet pre-training and subnet fine-tuning. """
logging.basicConfig(
format="[%(asctime)s] [p%(process)s] [%(pathname)s\
:%(lineno)d] [%(levelname)s] %(message)s",
level=logging.INFO,
handlers=[
logging.FileHandler(args.log_file, mode="w"),
logging.StreamHandler(),
],
)
# print the information of arguments
for arg in vars(args):
s = arg + ": " + str(getattr(args, arg))
logging.info(s)
# for 106 landmarks
num_points = 106
# list of device ids, and the number of workers for data loading
device_ids = [int(id) for id in args.dev_id.split(",")]
dev_num = len(device_ids)
num_workers = 4 * dev_num
# random seed
manual_seed = 1
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
# import supernet for block-wise DNAS pre-training
from lib.supernet import PFLDInference, AuxiliaryNet
# the configuration for training control
nas_config = NASConfig(
model_dir=args.snapshot,
nas_lr=args.theta_lr,
mode=args.mode,
alpha=args.alpha,
beta=args.beta,
search_space=search_space,
start_epoch=args.start_epoch,
)
# look-up table with information of search space, flops per block, etc.
lookup_table = LookUpTable(config=nas_config, primitives=PRIMITIVES)
# create supernet
pfld_backbone = PFLDInference(lookup_table, num_points)
# the auxiliary-net of PFLD to predict the pose angle
auxiliarynet = AuxiliaryNet()
# main task loss
criterion = PFLDLoss()
# optimizer for weight train
if args.opt == "adam":
optimizer = torch.optim.AdamW(
[
{"params": pfld_backbone.parameters()},
{"params": auxiliarynet.parameters()},
],
lr=args.base_lr,
weight_decay=args.weight_decay,
)
elif args.opt == "rms":
optimizer = torch.optim.RMSprop(
[
{"params": pfld_backbone.parameters()},
{"params": auxiliarynet.parameters()},
],
lr=args.base_lr,
momentum=0.0,
weight_decay=args.weight_decay,
)
# data argmentation and dataloader
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]
)
# the landmark dataset with 106 points is default used
train_dataset = PFLDDatasets(
os.path.join(args.data_root, "train_data/list.txt"),
transform,
data_root=args.data_root,
img_size=args.img_size,
)
dataloader = DataLoader(
train_dataset,
batch_size=args.train_batchsize,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
val_dataset = PFLDDatasets(
os.path.join(args.data_root, "test_data/list.txt"),
transform,
data_root=args.data_root,
img_size=args.img_size,
)
val_dataloader = DataLoader(
val_dataset,
batch_size=args.val_batchsize,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
# create the trainer, then search/finetune
trainer = PFLDTrainer(
pfld_backbone,
auxiliarynet,
optimizer,
criterion,
device,
device_ids,
nas_config,
lookup_table,
dataloader,
val_dataloader,
n_epochs=args.end_epoch,
logger=logging,
)
trainer.train()
def parse_args():
""" Parse the user arguments. """
parser = argparse.ArgumentParser(description="FBNet for PFLD")
parser.add_argument("--dev_id", dest="dev_id", default="0", type=str)
parser.add_argument("--opt", default="rms", type=str)
parser.add_argument("--base_lr", default=0.0001, type=int)
parser.add_argument("--weight-decay", "--wd", default=1e-6, type=float)
parser.add_argument("--img_size", default=112, type=int)
parser.add_argument("--theta-lr", "--tlr", default=0.01, type=float)
parser.add_argument(
"--mode", default="mul", type=str, choices=["mul", "add"]
)
parser.add_argument("--alpha", default=0.25, type=float)
parser.add_argument("--beta", default=0.6, type=float)
parser.add_argument("--start_epoch", default=50, type=int)
parser.add_argument("--end_epoch", default=300, type=int)
parser.add_argument(
"--snapshot", default="models", type=str, metavar="PATH"
)
parser.add_argument("--log_file", default="train.log", type=str)
parser.add_argument(
"--data_root", default="/dataset", type=str, metavar="PATH"
)
parser.add_argument("--train_batchsize", default=256, type=int)
parser.add_argument("--val_batchsize", default=128, type=int)
args = parser.parse_args()
args.snapshot = os.path.join(args.snapshot, 'supernet')
args.log_file = os.path.join(args.snapshot, "{}.log".format('supernet'))
os.makedirs(args.snapshot, exist_ok=True)
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
Misc/SumOfAllDigits/solution1.py | gbrls/CompetitiveCode | 165 | 11144032 | a, b = map(int, input().split())
# Find sum of digits of a number
def sumDigits(n):
tot = 0
while(n > 0):
tot += n % 10
n //= 10
return tot
# Recursively find sum of digits till the number is single digit
def sumOfDigits(n):
k = sumDigits(n)
if k > 9:
return sumOfDigits(k)
else:
return k
num = sumDigits(a) * b
print(sumOfDigits(num))
|
insights/parsers/installed_product_ids.py | lhuett/insights-core | 121 | 11144097 | """
Installed product IDs
=====================
InstalledProductIDs - command ``find /etc/pki/product-default/ /etc/pki/product/ -name '*pem' -exec rct cat-cert --no-content '{}' \;``
---------------------------------------------------------------------------------------------------------------------------------------
This module provides a parser for information about certificates for
Red Hat product subscriptions.
"""
from insights.core.filters import add_filter
from insights.specs import Specs
from .. import parser, CommandParser
add_filter(Specs.subscription_manager_installed_product_ids, 'ID:')
@parser(Specs.subscription_manager_installed_product_ids)
class InstalledProductIDs(CommandParser):
"""
Parses the output of the comand::
find /etc/pki/product-default/ /etc/pki/product/ -name '*pem' -exec rct cat-cert --no-content '{}' \;
Sample output from the unfiltered command looks like::
+-------------------------------------------+
Product Certificate
+-------------------------------------------+
Certificate:
Path: /etc/pki/product-default/69.pem
Version: 1.0
Serial: 12750047592154749739
Start Date: 2017-06-28 18:05:10+00:00
End Date: 2037-06-23 18:05:10+00:00
Subject:
CN: Red Hat Product ID [4f9995e0-8dc4-4b4f-acfe-4ef1264b94f3]
Issuer:
C: US
CN: Red Hat Entitlement Product Authority
O: Red Hat, Inc.
OU: Red Hat Network
ST: North Carolina
emailAddress: <EMAIL>
Product:
ID: 69
Name: Red Hat Enterprise Linux Server
Version: 7.4
Arch: x86_64
Tags: rhel-7,rhel-7-server
Brand Type:
Brand Name:
+-------------------------------------------+
Product Certificate
+-------------------------------------------+
Certificate:
Path: /etc/pki/product/69.pem
Version: 1.0
Serial: 12750047592154751271
Start Date: 2018-04-13 11:23:50+00:00
End Date: 2038-04-08 11:23:50+00:00
Subject:
CN: Red Hat Product ID [f3c92a95-26be-4bdf-800f-02c044503896]
Issuer:
C: US
CN: Red Hat Entitlement Product Authority
O: Red Hat, Inc.
OU: Red Hat Network
ST: North Carolina
emailAddress: <EMAIL>
Product:
ID: 69
Name: Red Hat Enterprise Linux Server
Version: 7.6
Arch: x86_64
Tags: rhel-7,rhel-7-server
Brand Type:
Brand Name:
Filters have been added to the parser so that only the ``ID`` element will
be collected.
Attributes:
ids (set): set of strings of the unique IDs collected by the command
Examples:
>>> type(products)
<class 'insights.parsers.installed_product_ids.InstalledProductIDs'>
>>> list(products.ids)
['69']
"""
def parse_content(self, content):
""" Parse command output """
self.ids = set()
for line in content:
if line.strip().startswith('ID:'):
_, id = line.strip().split(':', 1)
self.ids.add(id.strip())
|
share/lib/python/neuron/tests/test_rxd.py | niltonlk/nrn | 203 | 11144109 | from neuron import h
import neuron
import unittest
import sys
from multiprocessing import Process
try:
import multiprocessing as mp
mp.set_start_method("spawn")
except:
pass
scalar_bistable_data = [
4.666144368739553e-24,
2.888704007378294e-23,
1.986504953195841e-22,
1.341708879755938e-21,
8.872570814175589e-21,
5.740880124662921e-20,
3.632196361482038e-19,
2.245604121004388e-18,
1.355711505202327e-17,
7.986451339137754e-17,
4.587323676899676e-16,
2.567045965818926e-15,
1.398326895049450e-14,
7.407949505967670e-14,
3.813250931917600e-13,
1.905347304599457e-12,
9.231798364410461e-12,
4.332732659668245e-11,
1.967471956902693e-10,
8.633990304666386e-10,
3.657079221471421e-09,
1.493207056713948e-08,
5.869401066025243e-08,
2.218029569259474e-07,
8.047212799629250e-07,
2.799213829568570e-06,
9.323183477925731e-06,
2.969562407156739e-05,
9.035408030381566e-05,
2.623954841897339e-04,
7.269141545185255e-04,
1.920726909911178e-03,
4.841879243431064e-03,
1.164965343224173e-02,
2.674863273052559e-02,
5.846777500048252e-02,
1.206799453008834e-01,
2.308459675650935e-01,
3.962758789592548e-01,
5.900229199039158e-01,
7.586218889022415e-01,
8.722981880510015e-01,
9.370823930114011e-01,
9.705058492437171e-01,
9.867204567968444e-01,
9.942426940783661e-01,
9.975977799681778e-01,
9.990359504416327e-01,
9.996249801006252e-01,
9.998477834074035e-01,
9.999041758657206e-01,
9.998477834074035e-01,
9.996249801006252e-01,
9.990359504416326e-01,
9.975977799681777e-01,
9.942426940783655e-01,
9.867204567968437e-01,
9.705058492437160e-01,
9.370823930113995e-01,
8.722981880509845e-01,
7.586218889021992e-01,
5.900229199038706e-01,
3.962758789592359e-01,
2.308459675650852e-01,
1.206799453008814e-01,
5.846777500048142e-02,
2.674863273052497e-02,
1.164965343224158e-02,
4.841879243431020e-03,
1.920726909911166e-03,
7.269141545185224e-04,
2.623954841897313e-04,
9.035408030381501e-05,
2.969562407156726e-05,
9.323183477925702e-06,
2.799213829568569e-06,
8.047212799629269e-07,
2.218029569259469e-07,
5.869401066025247e-08,
1.493207056713951e-08,
3.657079221471437e-09,
8.633990304666446e-10,
1.967471956902691e-10,
4.332732659668252e-11,
9.231798364410503e-12,
1.905347304599471e-12,
3.813250931917631e-13,
7.407949505967741e-14,
1.398326895049453e-14,
2.567045965818940e-15,
4.587323676899705e-16,
7.986451339137816e-17,
1.355711505202341e-17,
2.245604121004394e-18,
3.632196361482056e-19,
5.740880124662959e-20,
8.872570814175665e-21,
1.341708879755951e-21,
1.986504953195844e-22,
2.888704007378305e-23,
4.666144368739581e-24,
]
trivial_ecs_data = {
False: [
1.000000000000000e00,
9.999975013886804e-01,
9.999774378669442e-01,
9.998977298459814e-01,
9.996832492392076e-01,
9.992330951223182e-01,
9.984342775161091e-01,
9.971750000657639e-01,
9.953548976762590e-01,
9.928916564339932e-01,
9.897243754423555e-01,
9.858143683101370e-01,
9.811441475924241e-01,
9.757152507503439e-01,
9.695454356120868e-01,
9.626656387413414e-01,
9.551169704910168e-01,
9.469479226921377e-01,
9.382118895981384e-01,
9.289650476637475e-01,
9.192646016344460e-01,
9.091673797060952e-01,
8.987287459064514e-01,
8.880017905518656e-01,
8.770367573796769e-01,
8.658806669966089e-01,
8.545770993099358e-01,
8.431661016850746e-01,
8.316841940567001e-01,
8.201644466893430e-01,
8.086366104805101e-01,
7.971272834839320e-01,
7.856601006415908e-01,
7.742559365417961e-01,
7.629331133899011e-01,
7.517076083292696e-01,
7.405932558321451e-01,
7.296019421444131e-01,
7.187437897643434e-01,
7.080273307086645e-01,
6.974596679100502e-01,
6.870466245332152e-01,
6.767928813219394e-01,
6.667021023212317e-01,
6.567770494779278e-01,
6.470196867258985e-01,
6.374312742221647e-01,
6.280124534282580e-01,
6.187633237355973e-01,
6.096835113211366e-01,
6.007722308951912e-01,
5.920283409711493e-01,
5.834503932497362e-01,
5.750366766708355e-01,
5.667852566452943e-01,
5.586940099388136e-01,
5.507606556408047e-01,
5.429827826135633e-01,
5.353578737816238e-01,
5.278833275879240e-01,
5.205564769125375e-01,
5.133746057212193e-01,
5.063349636848303e-01,
4.994347789867492e-01,
4.926712695135610e-01,
4.860416526044784e-01,
4.795431535169798e-01,
4.731730127498988e-01,
4.669284923505265e-01,
4.608068813190689e-01,
4.548055002118984e-01,
4.489217050343421e-01,
4.431528905041363e-01,
4.374964927580476e-01,
4.319499915664357e-01,
4.265109121135835e-01,
4.211768263954196e-01,
4.159453542806875e-01,
4.108141642766345e-01,
4.057809740358395e-01,
4.008435506368045e-01,
3.959997106673694e-01,
3.912473201368166e-01,
3.865842942396779e-01,
3.820085969917059e-01,
3.775182407561858e-01,
3.731112856767305e-01,
3.687858390308746e-01,
3.645400545171554e-01,
3.603721314869148e-01,
3.562803141307546e-01,
3.522628906284160e-01,
3.483181922698216e-01,
3.444445925540838e-01,
3.406405062724689e-01,
3.369043885805584e-01,
3.332347340641985e-01,
3.296300758032397e-01,
3.260889844365475e-01,
3.226100672312980e-01,
3.191919671591613e-01,
],
1e-2: [
1.000000000000000e00,
1.000000000000000e00,
1.000000000000000e00,
9.999999999993757e-01,
9.999999999948940e-01,
9.999999999684935e-01,
9.999999997476527e-01,
9.999999928933891e-01,
9.999999611564773e-01,
9.999998797767268e-01,
9.999996998881439e-01,
9.999993374567406e-01,
9.999984853833063e-01,
9.999969740580184e-01,
9.999944882384333e-01,
9.999906315949002e-01,
9.999849509581277e-01,
9.999769206381147e-01,
9.999582122018814e-01,
9.999297152881566e-01,
9.998885520830283e-01,
9.998315279906250e-01,
9.996818635176246e-01,
9.994522087548142e-01,
9.991215537513162e-01,
9.986688188930973e-01,
9.980737640971513e-01,
9.973169807955987e-01,
9.963810719470240e-01,
9.952501731850908e-01,
9.939108695612021e-01,
9.923517494756579e-01,
9.905637628430480e-01,
9.885402079276301e-01,
9.862765182499404e-01,
9.837701825345700e-01,
9.810207339243457e-01,
9.780291325292961e-01,
9.747989764630028e-01,
9.713343413784561e-01,
9.676399370118218e-01,
9.610664511514549e-01,
9.539287981952346e-01,
9.462557577837218e-01,
9.380908351569157e-01,
9.295048630242134e-01,
9.272938750650753e-01,
9.250579717830375e-01,
9.227970935555948e-01,
9.164665386395120e-01,
9.099911643711576e-01,
9.033621457398684e-01,
8.966076655702790e-01,
8.897506341031615e-01,
8.827836396691656e-01,
8.757430253325005e-01,
8.686235564317917e-01,
8.614266253287830e-01,
8.541863694365750e-01,
8.468993821627302e-01,
8.395705411285332e-01,
8.322240727554534e-01,
8.248512178596673e-01,
8.174717412805539e-01,
8.101012803444277e-01,
8.027295128188973e-01,
7.953825709154709e-01,
7.880520562892473e-01,
7.807347758450052e-01,
7.734449586695845e-01,
7.661975818078829e-01,
7.589799271053987e-01,
7.518171027867613e-01,
7.446977692754075e-01,
7.376160661891280e-01,
7.305954685595947e-01,
7.236266087629475e-01,
7.167068363568081e-01,
7.140993659912696e-01,
7.115003569118910e-01,
7.053989759515575e-01,
6.937157033233743e-01,
6.892286429728892e-01,
6.847738597587193e-01,
6.803460439703601e-01,
6.736943892201644e-01,
6.671180984390447e-01,
6.622364244386257e-01,
6.573969190766189e-01,
6.525933207264888e-01,
6.478347504056896e-01,
6.431166994633450e-01,
6.384369407276960e-01,
6.290516157893640e-01,
6.198222188748919e-01,
6.175428045973979e-01,
6.152735585854211e-01,
6.111170811451881e-01,
6.031212236479893e-01,
5.952593539110380e-01,
5.921305074751056e-01,
5.890215438866859e-01,
5.843049294344698e-01,
5.796335966290896e-01,
5.750160381540556e-01,
5.704420016665088e-01,
5.659178450605010e-01,
5.614455518912613e-01,
5.591788148460601e-01,
5.569248279174298e-01,
5.506074431743505e-01,
5.478382336868175e-01,
5.450878675391179e-01,
5.387888979310328e-01,
5.362404523147913e-01,
5.337094641666161e-01,
5.282374991972423e-01,
5.259248454152894e-01,
5.236262814879684e-01,
5.182614254027890e-01,
5.158555312376837e-01,
5.134660721930772e-01,
5.077709278344216e-01,
5.053179820104490e-01,
5.028817741515068e-01,
4.974407642335685e-01,
4.952012718592664e-01,
4.929770110443390e-01,
4.880637159524582e-01,
4.859684632733423e-01,
4.838861488391641e-01,
4.790294758699501e-01,
4.768706516921771e-01,
4.747266903353097e-01,
4.696849124915995e-01,
4.675266380099869e-01,
4.653827515562493e-01,
4.605962240576634e-01,
4.586085068420688e-01,
4.566341643693850e-01,
4.522282618718538e-01,
4.503369915407668e-01,
4.484573583607938e-01,
4.440766399922996e-01,
4.421436521416536e-01,
4.402239033770282e-01,
4.357529446279363e-01,
4.338466468392896e-01,
4.319526749076715e-01,
4.277194009540304e-01,
4.259480291942094e-01,
4.241883948256027e-01,
4.202300998848154e-01,
4.185239108263153e-01,
4.168280792217872e-01,
4.128821814385824e-01,
4.111518412231812e-01,
4.094332017859506e-01,
4.054591322790473e-01,
4.037685048369686e-01,
4.020884387112946e-01,
3.983255882607288e-01,
3.967407957791139e-01,
3.951663235196693e-01,
3.916025661686941e-01,
3.900627879901100e-01,
3.885321706996029e-01,
3.849784483567140e-01,
3.834281028668022e-01,
3.818880643760414e-01,
3.783453864220350e-01,
3.768397186025170e-01,
3.753431216105738e-01,
3.719827243333056e-01,
3.705596473485314e-01,
3.691456730437073e-01,
3.659301712184497e-01,
3.645393288417587e-01,
3.631565593836533e-01,
3.599538911939479e-01,
3.585625234903733e-01,
3.571802315061479e-01,
3.540119540790766e-01,
3.526654577087503e-01,
3.513267792978637e-01,
3.483128923342382e-01,
3.470307463292109e-01,
3.457566587669847e-01,
3.428492728471916e-01,
3.415914251565042e-01,
3.403406775305906e-01,
3.374508890138111e-01,
3.361995660265230e-01,
3.349562467114465e-01,
3.321133636377239e-01,
3.309044582572311e-01,
3.297023177240813e-01,
3.269887097799309e-01,
3.258300483601927e-01,
3.246785486404988e-01,
3.220444676004350e-01,
3.209052709642934e-01,
3.197723133954909e-01,
],
1e-5: [
1.000000000000000e00,
1.000000000000000e00,
1.000000000000000e00,
1.000000000000000e00,
1.000000000000000e00,
1.000000000000000e00,
1.000000000000000e00,
1.000000000000000e00,
9.999999999999994e-01,
9.999999999999730e-01,
9.999999999998286e-01,
9.999999999994353e-01,
9.999999999985202e-01,
9.999999999965848e-01,
9.999999999914210e-01,
9.999999999815654e-01,
9.999999999642780e-01,
9.999999999357950e-01,
9.999999998244792e-01,
9.999999995977931e-01,
9.999999987558622e-01,
9.999999969054565e-01,
9.999999933717028e-01,
9.999999871894636e-01,
9.999999770945371e-01,
9.999999615055236e-01,
9.999999384936957e-01,
9.999999057511308e-01,
9.999998222204165e-01,
9.999996889877317e-01,
9.999994874837332e-01,
9.999991954947148e-01,
9.999987869759613e-01,
9.999978778927705e-01,
9.999965094234943e-01,
9.999945377974357e-01,
9.999917976154713e-01,
9.999881018395195e-01,
9.999832421044147e-01,
9.999769893073577e-01,
9.999690944252573e-01,
9.999592894153977e-01,
9.999401848217710e-01,
9.999149868024271e-01,
9.998825523031706e-01,
9.998416677700163e-01,
9.997910595925650e-01,
9.997294043565842e-01,
9.996553389790078e-01,
9.995674703998642e-01,
9.994643850266534e-01,
9.993446578768627e-01,
9.992068613208905e-01,
9.990495732666037e-01,
9.988713847467867e-01,
9.986709069016458e-01,
9.984467774204933e-01,
9.981976664056399e-01,
9.979222816859891e-01,
9.976193735296895e-01,
9.972877388222079e-01,
9.969262246907040e-01,
9.965337316535895e-01,
9.961092162612745e-01,
9.956516933110915e-01,
9.951602375882754e-01,
9.946339852520572e-01,
9.940721347818040e-01,
9.934739476574072e-01,
9.928387486091628e-01,
9.921659257085935e-01,
9.914549300099351e-01,
9.907052751686757e-01,
9.899165365560477e-01,
9.890883505198331e-01,
9.882204130401707e-01,
9.873124787412751e-01,
9.863643591446407e-01,
9.853759215358775e-01,
9.843470868593674e-01,
9.832778285388312e-01,
9.821681700449177e-01,
9.810181837626457e-01,
9.798279882517430e-01,
9.785977472491776e-01,
9.773276666309043e-01,
9.760179936330398e-01,
9.746690135131258e-01,
9.732810490694735e-01,
9.718544574219034e-01,
9.703896283469345e-01,
9.688869823629818e-01,
9.662560899379753e-01,
9.635203094788860e-01,
9.606821993784824e-01,
9.577444920821762e-01,
9.547100690698106e-01,
9.515819411387303e-01,
9.483632227783803e-01,
9.450571205528219e-01,
9.416669021033438e-01,
9.381959001044843e-01,
9.346474739566473e-01,
9.310250020056358e-01,
9.273318651957428e-01,
9.235714575597201e-01,
9.197471440818522e-01,
9.158622774784529e-01,
9.119201741214706e-01,
9.079241164053400e-01,
9.038773101206564e-01,
8.997829152782374e-01,
8.956440506961761e-01,
8.914637634840353e-01,
8.872450198327338e-01,
8.829907105217275e-01,
8.787036576363941e-01,
8.707492329602121e-01,
8.687467373185769e-01,
8.667391783027991e-01,
8.647268019171629e-01,
8.627098501557753e-01,
8.585978518951599e-01,
8.544698540634895e-01,
8.503277143709528e-01,
8.461732532471191e-01,
8.420082065243172e-01,
8.378342644577640e-01,
8.336530592572513e-01,
8.294661714342418e-01,
8.252750957681314e-01,
8.210812733406646e-01,
8.168861006274361e-01,
8.126909233407180e-01,
8.084970284424999e-01,
8.018705279973948e-01,
7.952548736797830e-01,
7.886543612921902e-01,
7.820730077033664e-01,
7.755145256970315e-01,
7.689823477006539e-01,
7.624796584457091e-01,
7.560093783176929e-01,
7.495742104755058e-01,
7.431766425064636e-01,
7.368189634231701e-01,
7.305032400696735e-01,
7.242313466189446e-01,
7.180049652774664e-01,
7.118256479784464e-01,
7.056948260292686e-01,
7.032583361895295e-01,
7.008298605278855e-01,
6.984094704872370e-01,
6.959972341046202e-01,
6.935932152404094e-01,
6.889317430774708e-01,
6.817668350489530e-01,
6.746798127752269e-01,
6.676718057412527e-01,
6.607437322115557e-01,
6.538963343303841e-01,
6.471301405820917e-01,
6.404455883106537e-01,
6.387872416184449e-01,
6.371340113850150e-01,
6.354859019542481e-01,
6.323179670207283e-01,
6.291691359105558e-01,
6.243492415503886e-01,
6.195747895117473e-01,
6.148457081070352e-01,
6.101619207395151e-01,
6.055232995385412e-01,
6.009296920940354e-01,
5.963809255739797e-01,
5.884265027623434e-01,
5.806112287097513e-01,
5.729337321255051e-01,
5.653924575507177e-01,
5.579857024351769e-01,
5.561548261765027e-01,
5.543322219963571e-01,
5.525178614099090e-01,
5.497619521958399e-01,
5.470251014183799e-01,
5.421958421339816e-01,
5.374261214332550e-01,
5.327153071018452e-01,
5.280627574108234e-01,
5.234678232248013e-01,
5.189298444469039e-01,
5.144481596152644e-01,
5.100220936678099e-01,
5.056509822776560e-01,
5.013341346033774e-01,
4.970708987429328e-01,
4.928605836118120e-01,
4.887024961805134e-01,
4.845959781161491e-01,
4.805403497251276e-01,
4.765349346341766e-01,
4.725790728234394e-01,
4.686720914229373e-01,
4.648133480441644e-01,
4.610021778781840e-01,
4.572379175659361e-01,
4.535199244031417e-01,
4.498475710369739e-01,
4.462202101246375e-01,
4.426372375753840e-01,
4.390980179451507e-01,
4.356019205688260e-01,
4.321483548727800e-01,
4.287367136802313e-01,
4.253663991812640e-01,
4.220368327619753e-01,
4.187474280885136e-01,
4.154976300176047e-01,
4.122868660369579e-01,
4.091145681113114e-01,
4.043631518312791e-01,
3.996975359393022e-01,
3.951158478794149e-01,
3.906162701651016e-01,
3.861970390711226e-01,
3.841086119708872e-01,
3.820380229719526e-01,
3.799850857201517e-01,
3.779496187391600e-01,
3.745426919402392e-01,
3.711844377457629e-01,
3.678740151738357e-01,
3.646106065243293e-01,
3.613933918170190e-01,
3.582215709845467e-01,
3.550943187555968e-01,
3.520108594211252e-01,
3.489704836755959e-01,
3.459724290685680e-01,
3.430159438599149e-01,
3.401003272980346e-01,
3.372248857211048e-01,
3.343889511711801e-01,
3.315918392385992e-01,
3.288328612250609e-01,
3.261113573130471e-01,
3.234267379284118e-01,
3.207783627593125e-01,
],
}
def scalar_bistable():
from neuron import rxd
h.load_file("stdrun.hoc")
s = h.Section(name="s")
s.nseg = 101
cyt = rxd.Region(h.allsec())
c = rxd.Species(
cyt, name="c", initial=lambda node: 1 if 0.4 < node.x < 0.6 else 0, d=1
)
r = rxd.Rate(c, -c * (1 - c) * (0.3 - c))
h.finitialize()
h.run()
# check the results
result = h.Vector(c.nodes.concentration)
cmpV = h.Vector(scalar_bistable_data)
cmpV.sub(result)
cmpV.abs()
if cmpV.sum() < 1e-6:
sys.exit(0)
sys.exit(-1)
def trivial_ecs(scale):
from neuron import h, crxd as rxd
import numpy
import warnings
warnings.simplefilter("ignore", UserWarning)
h.load_file("stdrun.hoc")
tstop = 10
if scale: # variable step case
h.CVode().active(True)
h.CVode().event(tstop)
else: # fixed step case
h.dt = 0.1
sec = h.Section() # NEURON requires at least 1 section
# enable extracellular RxD
rxd.options.enable.extracellular = True
# simulation parameters
dx = 1.0 # voxel size
L = 9.0 # length of initial cube
Lecs = 21.0 # lengths of ECS
# define the extracellular region
extracellular = rxd.Extracellular(
-Lecs / 2.0,
-Lecs / 2.0,
-Lecs / 2.0,
Lecs / 2.0,
Lecs / 2.0,
Lecs / 2.0,
dx=dx,
volume_fraction=0.2,
tortuosity=1.6,
)
# define the extracellular species
k_rxd = rxd.Species(
extracellular,
name="k",
d=2.62,
charge=1,
atolscale=scale,
initial=lambda nd: 1.0
if abs(nd.x3d) <= L / 2.0 and abs(nd.y3d) <= L / 2.0 and abs(nd.z3d) <= L / 2.0
else 0.0,
)
# record the concentration at (0,0,0)
ecs_vec = h.Vector()
ecs_vec.record(k_rxd[extracellular].node_by_location(0, 0, 0)._ref_value)
h.finitialize()
h.continuerun(tstop) # run the simulation
# compare with previous solution
ecs_vec.sub(h.Vector(trivial_ecs_data[scale]))
ecs_vec.abs()
if ecs_vec.sum() > 1e-9:
return -1
return 0
class RxDTestCase(unittest.TestCase):
"""Tests of rxd"""
def test_rxd(self):
p = Process(target=scalar_bistable)
p.start()
p.join()
assert p.exitcode == 0
return 0
def test_ecs_diffusion_fixed_step(self):
p = Process(target=trivial_ecs, args=(False,))
p.start()
p.join()
assert p.exitcode == 0
return 0
def test_ecs_diffusion_variable_step_coarse(self):
p = Process(target=trivial_ecs, args=(1e-2,))
p.start()
p.join()
assert p.exitcode == 0
return 0
def test_ecs_diffusion_variable_step_fine(self):
p = Process(target=trivial_ecs, args=(1e-5,))
p.start()
p.join()
assert p.exitcode == 0
return 0
def suite():
suite = unittest.makeSuite(RxDTestCase, "test")
return suite
def test():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
# unittest.main()
test()
|
tests/unit/test_gp_repurposer.py | apaleyes/xfer | 244 | 11144122 | <gh_stars>100-1000
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from unittest.mock import patch
import numpy as np
import os
import GPy
from xfer import load, GpRepurposer
from .test_meta_model_repurposer import MetaModelRepurposerTestCase
from ..repurposer_test_utils import RepurposerTestUtils
class GpRepurposerTestCase(MetaModelRepurposerTestCase):
def setUp(self):
super().setUp()
self.repurposer_class = GpRepurposer
self.expected_accuracy = 0.45714285714285713
self.expected_accuracy_from_features = 0.7
self.train_feature_indices = np.arange(0, self.train_features.shape[1])
self.feature_mean = self.train_features.mean(axis=0)
self.num_data_points_to_predict = 10
self.num_data_points_to_train = 10
def test_train_model_from_features(self):
self._test_train_model_from_features(sparse_gp=True, multiple_kernels=True)
self._test_train_model_from_features(sparse_gp=True, multiple_kernels=False)
self._test_train_model_from_features(sparse_gp=False, multiple_kernels=True)
self._test_train_model_from_features(sparse_gp=False, multiple_kernels=False)
def _test_train_model_from_features(self, sparse_gp, multiple_kernels):
gp_repurposer = GpRepurposer(self.source_model, self.source_model_layers)
num_inducing = self.num_data_points_to_train
gp_repurposer.NUM_INDUCING_SPARSE_GP = num_inducing
if not sparse_gp: # Select a small data set to apply normal GP classification
self.train_features = self.train_features[:num_inducing]
self.train_labels = self.train_labels[:num_inducing]
self.feature_mean = self.train_features.mean(axis=0)
if multiple_kernels:
trained_model = gp_repurposer._train_model_from_features(self.train_features, self.train_labels,
{'l1': self.train_feature_indices[:4],
'l2': self.train_feature_indices[4:]})
else:
trained_model = gp_repurposer._train_model_from_features(self.train_features, self.train_labels,
{'l1': self.train_feature_indices})
assert np.array_equal(gp_repurposer.feature_mean, self.feature_mean)
self._validate_trained_gp_model(trained_model, sparse_gp, num_inducing, multiple_kernels)
def _validate_trained_gp_model(self, trained_model, sparse_gp, num_inducing, multiple_kernels):
# Validate type of model
if sparse_gp:
assert all(isinstance(model, GPy.models.SparseGPClassification) for model in trained_model)
else:
assert all(isinstance(model, GPy.models.GPClassification) for model in trained_model)
for index, model in enumerate(trained_model):
if multiple_kernels:
assert isinstance(model.kern, GPy.kern.Add)
assert isinstance(model.kern.l1, GPy.kern.RBF)
assert isinstance(model.kern.l2, GPy.kern.RBF)
else:
assert isinstance(model.kern, GPy.kern.RBF)
assert np.array_equal(model.kern.active_dims, self.train_feature_indices)
expected_labels = np.loadtxt('{}GPmodel.{}.Y.out'.format(self._test_data_dir, index)).reshape(103, 1)
expected_features = self.train_features - self.feature_mean
if not sparse_gp: # A smaller data set was selected to apply normal GP classification
expected_labels = expected_labels[:num_inducing]
expected_features = expected_features[:num_inducing]
assert np.array_equal(model.Y, expected_labels)
assert np.array_equal(model.X, expected_features)
def test_predict_label_from_features(self):
gp_repurposer = GpRepurposer(self.source_model, self.source_model_layers, apply_l2_norm=True)
gp_repurposer.target_model = gp_repurposer._train_model_from_features(
self.train_features[:self.num_data_points_to_train],
self.train_labels[:self.num_data_points_to_train],
{'l1': self.train_feature_indices})
predicted_labels = gp_repurposer._predict_label_from_features(self.test_features
[:self.num_data_points_to_predict])
self._validate_prediction_results(predicted_labels, test_predict_probability=False,
expected_accuracy=self.expected_accuracy_from_features,
num_predictions=self.num_data_points_to_predict)
def test_predict_probability_from_features(self):
gp_repurposer = GpRepurposer(self.source_model, self.source_model_layers, apply_l2_norm=True)
gp_repurposer.target_model = gp_repurposer._train_model_from_features(
self.train_features[:self.num_data_points_to_train],
self.train_labels[:self.num_data_points_to_train],
{'l1': self.train_feature_indices})
predictions = gp_repurposer._predict_probability_from_features(self.test_features
[:self.num_data_points_to_predict])
self._validate_prediction_results(predictions, test_predict_probability=True,
expected_accuracy=self.expected_accuracy_from_features,
num_predictions=self.num_data_points_to_predict)
@patch(RepurposerTestUtils.META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS)
def test_repurpose(self, mock_model_handler):
# Patch model_handler and then create gp_repurposer
mock_model_handler.return_value = RepurposerTestUtils.get_mock_model_handler_object()
mock_model_handler.return_value.get_layer_output.return_value = {'l1': self.train_features}, self.train_labels
gp_repurposer = GpRepurposer(self.source_model, self.source_model_layers)
gp_repurposer.NUM_INDUCING_SPARSE_GP = 5 # To speed-up unit test running time
self._run_common_repurposer_tests(gp_repurposer)
def _validate_trained_model(self, target_model):
self._validate_trained_gp_model(target_model, sparse_gp=True, num_inducing=100, multiple_kernels=False)
@patch.object(GpRepurposer, RepurposerTestUtils.VALIDATE_REPURPOSE_METHOD_NAME)
@patch(RepurposerTestUtils.META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS)
def test_repurpose_calls_validate(self, mock_model_handler, mock_validate_method):
self._test_repurpose_calls_validate(mock_model_handler, mock_validate_method)
@patch.object(GpRepurposer, RepurposerTestUtils.VALIDATE_PREDICT_METHOD_NAME)
@patch(RepurposerTestUtils.META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS)
def test_predict_label(self, mock_model_handler, validate_method):
self._test_predict(mock_model_handler, validate_method, test_predict_probability=False,
expected_accuracy=self.expected_accuracy)
@patch.object(GpRepurposer, RepurposerTestUtils.VALIDATE_PREDICT_METHOD_NAME)
@patch(RepurposerTestUtils.META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS)
def test_predict_probability(self, mock_model_handler, validate_method):
self._test_predict(mock_model_handler, validate_method, test_predict_probability=True,
expected_accuracy=self.expected_accuracy)
def test_serialisation(self):
self._test_gp_serialisation(sparse_gp=True, multiple_kernels=False)
self._test_gp_serialisation(sparse_gp=True, multiple_kernels=True)
self._test_gp_serialisation(sparse_gp=False, multiple_kernels=True)
self._test_gp_serialisation(sparse_gp=False, multiple_kernels=False)
def _test_gp_serialisation(self, sparse_gp, multiple_kernels):
gp_repurposer = GpRepurposer(self.source_model, self.source_model_layers, apply_l2_norm=True)
num_inducing = 2
gp_repurposer.NUM_INDUCING_SPARSE_GP = num_inducing
if not sparse_gp: # Select a small data set to apply normal GP classification
self.train_features = self.train_features[:num_inducing]
self.train_labels = self.train_labels[:num_inducing]
self.feature_mean = self.train_features.mean(axis=0)
if multiple_kernels:
gp_repurposer.target_model = gp_repurposer._train_model_from_features(self.train_features,
self.train_labels,
{'l1': self.train_feature_indices[:4],
'l2': self.train_feature_indices[
4:]})
else:
gp_repurposer.target_model = gp_repurposer._train_model_from_features(self.train_features,
self.train_labels,
{'l1': self.train_feature_indices})
# Save and load repurposer to test serialization
loaded_repurposer = self._save_and_load_repurposer(gp_repurposer)
# Validate repurposer properties
self._compare_gp_repurposers(gp_repurposer, loaded_repurposer)
# Get prediction results using both repurposers
predictions_before = gp_repurposer._predict_probability_from_features(self.test_features
[:self.num_data_points_to_predict])
predictions_after = loaded_repurposer._predict_probability_from_features(self.test_features
[:self.num_data_points_to_predict])
# Compare probabilities predicted per test instance
self.assertTrue(predictions_before.shape == predictions_after.shape,
"Prediction shape is incorrect. Expected: {} Actual: {}"
.format(predictions_before.shape, predictions_after.shape))
for sample_id, prediction in enumerate(predictions_before):
self.assertTrue(np.allclose(prediction, predictions_after[sample_id]),
"Incorrect prediction for sample id: {}. Expected: {} Actual: {}"
.format(sample_id, predictions_before[sample_id], predictions_after[sample_id]))
# Validate if accuracy is above expected threshold
predicted_labels = np.argmax(predictions_after, axis=1)
accuracy = np.mean(predicted_labels == self.test_labels[:self.num_data_points_to_predict])
expected_accuracy = 0.3
self.assertTrue(accuracy >= expected_accuracy, "Accuracy {} less than {}".format(accuracy, expected_accuracy))
def _save_and_load_repurposer(self, gp_repurposer):
file_path = 'test_serialisation'
RepurposerTestUtils._remove_files_with_prefix(file_path)
assert not os.path.isfile(file_path + '.json')
gp_repurposer.save_repurposer(model_name=file_path, save_source_model=False)
assert os.path.isfile(file_path + '.json')
loaded_repurposer = load(file_path, source_model=gp_repurposer.source_model)
RepurposerTestUtils._remove_files_with_prefix(file_path)
return loaded_repurposer
def _compare_gp_repurposers(self, repurposer1, repurposer2):
self.assertTrue(type(repurposer1) == type(repurposer2),
"Incorrect repurposer type. Expected: {} Actual: {}".format(type(repurposer1),
type(repurposer2)))
self.assertTrue(isinstance(repurposer2.target_model, type(repurposer1.target_model)),
"Incorrect target_model type. Expected: {} Actual: {}".format(type(repurposer1.target_model),
type(repurposer2.target_model)))
self.assertTrue(len(repurposer1.target_model) == len(repurposer2.target_model),
"Incorrect number of target_models. Expected:{} Actual:{}"
.format(len(repurposer1.target_model), len(repurposer2.target_model)))
for model_id, target_model in enumerate(repurposer1.target_model):
self.assertTrue(isinstance(repurposer2.target_model[model_id], type(target_model)),
"Incorrect GP model type. Expected:{} Actual:{}"
.format(type(repurposer1.target_model[model_id]), type(repurposer2.target_model[model_id])))
RepurposerTestUtils._assert_common_attributes_equal(repurposer1, repurposer2)
def test_binary_classification(self):
train_features = np.array([[0.0286274, 0.41107054, 0.30557073], [0.18646135, 0.71026038, 0.87030804],
[0.46904668, 0.96190886, 0.85772885], [0.40327128, 0.5739354, 0.21895921],
[0.53548, 0.9645708, 0.56493308], [0.80917639, 0.78891976, 0.96257564],
[0.10951679, 0.75733494, 0.10935291]])
train_labels = np.array([0, 0, 0, 1, 0, 0, 1])
gp_repurposer = GpRepurposer(self.source_model, self.source_model_layers)
gp_repurposer.target_model = gp_repurposer._train_model_from_features(train_features, train_labels,
{'l1': np.arange(0, 3)})
self.assertTrue(len(gp_repurposer.target_model) == 1,
"Number of GP models expected: 1. Got: {}".format(len(gp_repurposer.target_model)))
# Validate predicted probabilities
test_features = np.array([[0.63747595, 0.86516482, 0.21255967],
[0.33403457, 0.43162212, 0.77119909],
[0.1678248, 0.41870605, 0.37232554]])
test_labels = np.array([1, 0, 0])
expected_probabilities = np.array([[0.48597323, 0.51402677],
[0.67488224, 0.32511776],
[0.55386502, 0.44613498]])
predicted_probabilities = gp_repurposer._predict_probability_from_features(test_features)
self.assertTrue(np.allclose(predicted_probabilities, expected_probabilities))
# Validate predicted labels
predicted_labels = gp_repurposer._predict_label_from_features(test_features)
self.assertTrue(np.array_equal(predicted_labels, test_labels))
|
getsub/__version__.py | pzgz/GetSubtitles | 793 | 11144138 | __version__ = "1.7.9"
|
lintreview/tools/pytype.py | jsoref/lint-review | 271 | 11144142 | import hashlib
import os
import re
import logging
import lintreview.docker as docker
from lintreview.tools import Tool, extract_version
from lintreview.review import IssueComment
buildlog = logging.getLogger('buildlog')
class Pytype(Tool):
name = 'pytype'
def version(self):
output = docker.run('pytype', ['pytype', '--version'], self.base_path)
return extract_version(output)
def check_dependencies(self):
"""See if the pytype image exists
"""
return docker.image_exists('pytype')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext in ('.py', '.pyi')
def has_fixer(self):
"""pytype has a fixer that can be enabled through configuration.
"""
return bool(self.options.get('fixer', False))
def process_files(self, files):
"""
Run code checks with pytype.
Only a single process is made for all files
to save resources.
"""
command = self._apply_options(['pytype'])
command += files
output = docker.run(
'pytype',
command,
source_dir=self.base_path)
if not output:
return
self.parse_output(output)
def _apply_options(self, command):
if 'config' in self.options:
command.extend(['--config', docker.apply_base(self.options['config'])])
command.extend(['-o', '/tmp/pytype'])
return command
def parse_output(self, output):
"""
Pytype has is own output format that is not machine readable, so we use
regex and string contains to munge it into something usable. The output looks like
```
Computing dependencies
Analyzing 1 sources with 0 local dependencies
ninja: Entering directory `/src/.pytype'
[1/1] check has_errors
FAILED: /src/.pytype/pyi/has_errors.pyi
pytype-single --imports_info /src/.pytype/imports/has_errors.imports ...
File "../pytype/has_errors.py", line 5, in get_username: message text [attribute-error]
In Optional[Match[str]]
File "../pytype/has_errors.py", line 8, in <module>: message text: '1' [bad-slots]
```
We use regex to slice out the file, line and message information.
"""
message = ''
lineno = 0
filename = ''
message_pattern = re.compile(
r'File "(?P<file>[^"]+)",\s+line\s+(?P<line>\d+),[^:]+\:\s+(?P<message>.*)'
)
lines = output.split('\n')
if len(lines) and lines[0].startswith('CRITICAL'):
message = (
u"Pytype failed with the following error:\n"
"```\n"
"{}\n"
"```\n"
)
self.problems.add(IssueComment(message.format("\n".join(lines))))
for line in output.split("\n"):
# Some errors have continuations on subsequent lines
if len(message) and not line.startswith('File'):
message = message + ' ' + line.strip()
continue
if line.startswith('File '):
# Starting a new message append to the error list.
if filename and message:
self.problems.add(filename, lineno, message)
filename = ''
lineno = 0
message = ''
matches = message_pattern.match(line)
lineno = int(matches.group('line'))
filename = docker.strip_base(matches.group('file'))
message = matches.group('message')
if filename and message:
self.problems.add(filename, lineno, message)
def _container_name(self, files):
m = hashlib.md5()
m.update('-'.join(files).encode('utf8'))
return 'pytype-' + m.hexdigest()
def process_fixer(self, files):
"""
Autofixing typing errors requires generating type
stubs and then applying them individually.
"""
command = self._apply_options(['pytype'])
command += files
container_name = self._container_name(files)
# run in a container that sticks around so we can
# run merge-pyi on the output files.
docker.run(
'pytype',
command,
source_dir=self.base_path,
name=container_name)
buildlog.info('Creating cusotm image for pytype')
docker.commit(container_name)
docker.rm_container(container_name)
update_command = ['merge-pyi-wrapper']
update_command += files
# Apply merge-pyi
try:
out = docker.run(
container_name,
update_command,
source_dir=self.base_path
)
except Exception as e:
buildlog.warning('Pytype merging failed. error=%s output=%s', e, out)
finally:
buildlog.info('Removing custom pytype image')
docker.rm_image(container_name)
|
PyFin/Env/Settings.py | rpatil524/Finance-Python | 325 | 11144212 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
u"""
Created on 2015-8-14
@author: cheng.li
"""
from PyFin.DateUtilities import Date
from PyFin.Utilities import require
class SettingsFactory(object):
def __init__(self):
self._evaluationDate = None
self._includeToday = None
@property
def evaluationDate(self):
if not self._evaluationDate:
return Date.todaysDate()
return self._evaluationDate
@evaluationDate.setter
def evaluationDate(self, value):
require(isinstance(value, Date), ValueError, "{0} is not a valid PyFin date object".format(value))
self._evaluationDate = value
@property
def includeTodaysCashFlows(self):
return self._includeToday
@includeTodaysCashFlows.setter
def includeTodaysCashFlows(self, value):
self._evaluationDate = value
def resetEvaluationDate(self):
self._evaluationDate = None
def anchorEvaluationDate(self):
if self._evaluationDate is None:
self._evaluationDate = Date.todaysDate()
Settings = SettingsFactory()
|
chrome/installer/tools/shortcut_properties.py | google-ar/chromium | 2,151 | 11144224 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a Windows shortcut's property bag to stdout.
This is required to confirm correctness of properties that aren't readily
available in Windows UI.
"""
import optparse
from pywintypes import IID
import sys
from win32com.propsys import propsys
from win32com.propsys import pscon
def PrintShortcutProperties(shortcut_path, dump_all):
properties = propsys.SHGetPropertyStoreFromParsingName(shortcut_path)
print 'Known properties (--dump-all for more):'
app_id = properties.GetValue(pscon.PKEY_AppUserModel_ID).GetValue()
print '\tAppUserModelId => "%s"' % app_id
# Hard code PKEY_AppUserModel_IsDualMode as pscon doesn't support it.
PKEY_AppUserModel_IsDualMode = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'),
11)
dual_mode = properties.GetValue(PKEY_AppUserModel_IsDualMode).GetValue()
print '\tDual Mode => "%s"' % dual_mode
# Dump all other properties with their raw ID if requested, add them above
# over time as we explicitly care about more properties, see propkey.h or
# pscon.py for a reference of existing PKEYs' meaning.
if dump_all:
print '\nOther properties:'
for i in range(0, properties.GetCount()):
property_key = properties.GetAt(i)
property_value = properties.GetValue(property_key).GetValue()
print '\t%s => "%s"' % (property_key, property_value)
def main():
usage = 'usage: %prog [options] "C:\\Path\\To\\My Shortcut.lnk"'
parser = optparse.OptionParser(usage,
description="Dumps a shortcut's properties.")
parser.add_option('-a', '--dump-all', action='store_true', dest='dump_all',
default=False)
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
PrintShortcutProperties(args[0], options.dump_all)
if __name__ == '__main__':
sys.exit(main())
|
app/controllers/api/records.py | grepleria/SnitchDNS | 152 | 11144243 | from . import bp
from app.lib.base.decorators import api_auth
from app.lib.api.records import ApiRecords
from flask_login import current_user
@bp.route('/zones/<string:zone>/records', methods=['GET'])
@api_auth
def zone_records(zone):
user_id = None if current_user.admin else current_user.id
domain = None
zone_id = None
if zone.isdigit():
zone_id = int(zone)
else:
domain = zone
return ApiRecords().all(user_id, zone_id=zone_id, domain=domain)
@bp.route('/zones/<string:zone>/records', methods=['POST'])
@api_auth
def zone_records_create(zone):
domain = None
zone_id = None
if zone.isdigit():
zone_id = int(zone)
else:
domain = zone
return ApiRecords().create(current_user.id, zone_id=zone_id, domain=domain)
@bp.route('/zones/<string:zone>/records/<int:id>', methods=['GET'])
@api_auth
def zone_record_by_id(zone, id):
user_id = None if current_user.admin else current_user.id
domain = None
zone_id = None
if zone.isdigit():
zone_id = int(zone)
else:
domain = zone
return ApiRecords().one(user_id, id, zone_id=zone_id, domain=domain)
@bp.route('/zones/<string:zone>/records/<int:id>', methods=['POST'])
@api_auth
def zone_records_update(zone, id):
user_id = None if current_user.admin else current_user.id
domain = None
zone_id = None
if zone.isdigit():
zone_id = int(zone)
else:
domain = zone
return ApiRecords().update(user_id, id, zone_id=zone_id, domain=domain)
@bp.route('/zones/<string:zone>/records/<int:id>', methods=['DELETE'])
@api_auth
def zone_record_delete(zone, id):
user_id = None if current_user.admin else current_user.id
domain = None
zone_id = None
if zone.isdigit():
zone_id = int(zone)
else:
domain = zone
return ApiRecords().delete(user_id, id, zone_id=zone_id, domain=domain)
@bp.route('/records/classes', methods=['GET'])
@api_auth
def record_classes():
return ApiRecords().classes()
@bp.route('/records/types', methods=['GET'])
@api_auth
def record_types():
return ApiRecords().types()
|
challenge_2/python/bryantpq/search_item.py | rchicoli/2017-challenges | 271 | 11144265 | <gh_stars>100-1000
def search_item(a):
d = {}
for i in a:
if i in d:
d[i] += 1
else:
d[i] = 1
if 1 not in d.values():
print("No unique values found!")
return
for k, v in d.items():
if v == 1:
print("Unique value is " + str(k))
return
if __name__ == "__main__":
a = [2,3,4,2,3,5,4,6,4,6,9,10,9,8,7,8,10,7]
search_item(a)
b = [2,'a','l',3,'l',4,'k',2,3,4,'a',6,'c',4,'m',6,'m','k',9,10,9,8,7,8,10,7]
search_item(b)
|
azure-devops/azext_devops/devops_sdk/v5_1/project_analysis/models.py | keithlemon/azure-devops-cli-extension | 326 | 11144272 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class CodeChangeTrendItem(Model):
"""
:param time:
:type time: datetime
:param value:
:type value: int
"""
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'int'}
}
def __init__(self, time=None, value=None):
super(CodeChangeTrendItem, self).__init__()
self.time = time
self.value = value
class LanguageMetricsSecuredObject(Model):
"""
:param namespace_id:
:type namespace_id: str
:param project_id:
:type project_id: str
:param required_permissions:
:type required_permissions: int
"""
_attribute_map = {
'namespace_id': {'key': 'namespaceId', 'type': 'str'},
'project_id': {'key': 'projectId', 'type': 'str'},
'required_permissions': {'key': 'requiredPermissions', 'type': 'int'}
}
def __init__(self, namespace_id=None, project_id=None, required_permissions=None):
super(LanguageMetricsSecuredObject, self).__init__()
self.namespace_id = namespace_id
self.project_id = project_id
self.required_permissions = required_permissions
class LanguageStatistics(LanguageMetricsSecuredObject):
"""
:param namespace_id:
:type namespace_id: str
:param project_id:
:type project_id: str
:param required_permissions:
:type required_permissions: int
:param bytes:
:type bytes: long
:param files:
:type files: int
:param files_percentage:
:type files_percentage: float
:param language_percentage:
:type language_percentage: float
:param name:
:type name: str
"""
_attribute_map = {
'namespace_id': {'key': 'namespaceId', 'type': 'str'},
'project_id': {'key': 'projectId', 'type': 'str'},
'required_permissions': {'key': 'requiredPermissions', 'type': 'int'},
'bytes': {'key': 'bytes', 'type': 'long'},
'files': {'key': 'files', 'type': 'int'},
'files_percentage': {'key': 'filesPercentage', 'type': 'float'},
'language_percentage': {'key': 'languagePercentage', 'type': 'float'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, namespace_id=None, project_id=None, required_permissions=None, bytes=None, files=None, files_percentage=None, language_percentage=None, name=None):
super(LanguageStatistics, self).__init__(namespace_id=namespace_id, project_id=project_id, required_permissions=required_permissions)
self.bytes = bytes
self.files = files
self.files_percentage = files_percentage
self.language_percentage = language_percentage
self.name = name
class ProjectActivityMetrics(Model):
"""
:param authors_count:
:type authors_count: int
:param code_changes_count:
:type code_changes_count: int
:param code_changes_trend:
:type code_changes_trend: list of :class:`CodeChangeTrendItem <azure.devops.v5_1.project_analysis.models.CodeChangeTrendItem>`
:param project_id:
:type project_id: str
:param pull_requests_completed_count:
:type pull_requests_completed_count: int
:param pull_requests_created_count:
:type pull_requests_created_count: int
"""
_attribute_map = {
'authors_count': {'key': 'authorsCount', 'type': 'int'},
'code_changes_count': {'key': 'codeChangesCount', 'type': 'int'},
'code_changes_trend': {'key': 'codeChangesTrend', 'type': '[CodeChangeTrendItem]'},
'project_id': {'key': 'projectId', 'type': 'str'},
'pull_requests_completed_count': {'key': 'pullRequestsCompletedCount', 'type': 'int'},
'pull_requests_created_count': {'key': 'pullRequestsCreatedCount', 'type': 'int'}
}
def __init__(self, authors_count=None, code_changes_count=None, code_changes_trend=None, project_id=None, pull_requests_completed_count=None, pull_requests_created_count=None):
super(ProjectActivityMetrics, self).__init__()
self.authors_count = authors_count
self.code_changes_count = code_changes_count
self.code_changes_trend = code_changes_trend
self.project_id = project_id
self.pull_requests_completed_count = pull_requests_completed_count
self.pull_requests_created_count = pull_requests_created_count
class ProjectLanguageAnalytics(LanguageMetricsSecuredObject):
"""
:param namespace_id:
:type namespace_id: str
:param project_id:
:type project_id: str
:param required_permissions:
:type required_permissions: int
:param id:
:type id: str
:param language_breakdown:
:type language_breakdown: list of :class:`LanguageStatistics <azure.devops.v5_1.project_analysis.models.LanguageStatistics>`
:param repository_language_analytics:
:type repository_language_analytics: list of :class:`RepositoryLanguageAnalytics <azure.devops.v5_1.project_analysis.models.RepositoryLanguageAnalytics>`
:param result_phase:
:type result_phase: object
:param url:
:type url: str
"""
_attribute_map = {
'namespace_id': {'key': 'namespaceId', 'type': 'str'},
'project_id': {'key': 'projectId', 'type': 'str'},
'required_permissions': {'key': 'requiredPermissions', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'language_breakdown': {'key': 'languageBreakdown', 'type': '[LanguageStatistics]'},
'repository_language_analytics': {'key': 'repositoryLanguageAnalytics', 'type': '[RepositoryLanguageAnalytics]'},
'result_phase': {'key': 'resultPhase', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, namespace_id=None, project_id=None, required_permissions=None, id=None, language_breakdown=None, repository_language_analytics=None, result_phase=None, url=None):
super(ProjectLanguageAnalytics, self).__init__(namespace_id=namespace_id, project_id=project_id, required_permissions=required_permissions)
self.id = id
self.language_breakdown = language_breakdown
self.repository_language_analytics = repository_language_analytics
self.result_phase = result_phase
self.url = url
class RepositoryActivityMetrics(Model):
"""
:param code_changes_count:
:type code_changes_count: int
:param code_changes_trend:
:type code_changes_trend: list of :class:`CodeChangeTrendItem <azure.devops.v5_1.project_analysis.models.CodeChangeTrendItem>`
:param repository_id:
:type repository_id: str
"""
_attribute_map = {
'code_changes_count': {'key': 'codeChangesCount', 'type': 'int'},
'code_changes_trend': {'key': 'codeChangesTrend', 'type': '[CodeChangeTrendItem]'},
'repository_id': {'key': 'repositoryId', 'type': 'str'}
}
def __init__(self, code_changes_count=None, code_changes_trend=None, repository_id=None):
super(RepositoryActivityMetrics, self).__init__()
self.code_changes_count = code_changes_count
self.code_changes_trend = code_changes_trend
self.repository_id = repository_id
class RepositoryLanguageAnalytics(LanguageMetricsSecuredObject):
"""
:param namespace_id:
:type namespace_id: str
:param project_id:
:type project_id: str
:param required_permissions:
:type required_permissions: int
:param id:
:type id: str
:param language_breakdown:
:type language_breakdown: list of :class:`LanguageStatistics <azure.devops.v5_1.project_analysis.models.LanguageStatistics>`
:param name:
:type name: str
:param result_phase:
:type result_phase: object
:param updated_time:
:type updated_time: datetime
"""
_attribute_map = {
'namespace_id': {'key': 'namespaceId', 'type': 'str'},
'project_id': {'key': 'projectId', 'type': 'str'},
'required_permissions': {'key': 'requiredPermissions', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'language_breakdown': {'key': 'languageBreakdown', 'type': '[LanguageStatistics]'},
'name': {'key': 'name', 'type': 'str'},
'result_phase': {'key': 'resultPhase', 'type': 'object'},
'updated_time': {'key': 'updatedTime', 'type': 'iso-8601'}
}
def __init__(self, namespace_id=None, project_id=None, required_permissions=None, id=None, language_breakdown=None, name=None, result_phase=None, updated_time=None):
super(RepositoryLanguageAnalytics, self).__init__(namespace_id=namespace_id, project_id=project_id, required_permissions=required_permissions)
self.id = id
self.language_breakdown = language_breakdown
self.name = name
self.result_phase = result_phase
self.updated_time = updated_time
__all__ = [
'CodeChangeTrendItem',
'LanguageMetricsSecuredObject',
'LanguageStatistics',
'ProjectActivityMetrics',
'ProjectLanguageAnalytics',
'RepositoryActivityMetrics',
'RepositoryLanguageAnalytics',
]
|
bazel/deps.bzl | mway/prototool | 4,922 | 11144274 | <gh_stars>1000+
load("@bazel_gazelle//:deps.bzl", "go_repository")
def prototool_deps(**kwargs):
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=",
version = "v0.0.1-2019.2.3",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_cpuguy83_go_md2man",
importpath = "github.com/cpuguy83/go-md2man",
sum = "h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=",
version = "v1.0.10",
)
go_repository(
name = "com_github_davecgh_go_spew",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
go_repository(
name = "com_github_emicklei_proto",
importpath = "github.com/emicklei/proto",
sum = "h1:l0QiNT6Qs7Yj0Mb4X6dnWBQer4ebei2BFcgQLbGqUDc=",
version = "v1.9.0",
)
go_repository(
name = "com_github_fullstorydev_grpcurl",
importpath = "github.com/fullstorydev/grpcurl",
sum = "h1:rKQyAaegPtCj4mpItnCHd+PIEHspIZl14VWhHYIHhls=",
version = "v1.4.0",
)
go_repository(
name = "com_github_gobuffalo_flect",
importpath = "github.com/gobuffalo/flect",
sum = "h1:GPoRjEN0QObosV4XwuoWvSd5uSiL0N3e91/xqyY4crQ=",
version = "v0.2.1",
)
go_repository(
name = "com_github_gofrs_flock",
importpath = "github.com/gofrs/flock",
sum = "h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc=",
version = "v0.7.1",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=",
version = "v1.1.1",
)
go_repository(
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
sum = "h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=",
version = "v1.3.4",
)
go_repository(
name = "com_github_inconshreveable_mousetrap",
importpath = "github.com/inconshreveable/mousetrap",
sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jhump_protoreflect",
importpath = "github.com/jhump/protoreflect",
sum = "h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=",
version = "v1.6.0",
)
go_repository(
name = "com_github_mitchellh_go_wordwrap",
importpath = "github.com/mitchellh/go-wordwrap",
sum = "h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com/pkg/errors",
sum = "h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=",
version = "v0.8.1",
)
go_repository(
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_russross_blackfriday",
importpath = "github.com/russross/blackfriday",
sum = "h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=",
version = "v1.5.2",
)
go_repository(
name = "com_github_spf13_cobra",
importpath = "github.com/spf13/cobra",
sum = "h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs=",
version = "v0.0.6",
)
go_repository(
name = "com_github_spf13_pflag",
importpath = "github.com/spf13/pflag",
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
version = "v1.0.5",
)
go_repository(
name = "com_github_stretchr_objx",
importpath = "github.com/stretchr/objx",
sum = "h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=",
version = "v0.1.1",
)
go_repository(
name = "com_github_stretchr_testify",
importpath = "github.com/stretchr/testify",
sum = "h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=",
version = "v1.5.1",
)
go_repository(
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
sum = "h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=",
version = "v0.26.0",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=",
version = "v1.0.0-20180628173108-788fd7840127",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
sum = "h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=",
version = "v2.2.8",
)
go_repository(
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=",
version = "v1.4.0",
)
go_repository(
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
sum = "h1:IXPzGf8J51hBQirC+OIHbIlTuVYOMarft+Wvi+qDzmg=",
version = "v0.0.0-20200311144346-b662892dd51b",
)
go_repository(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
sum = "h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=",
version = "v1.28.0",
)
go_repository(
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
sum = "h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=",
version = "v0.0.0-20200302205851-738671d3881b",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=",
version = "v0.0.0-20200301022130-244492dfa37a",
)
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
sum = "h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=",
version = "v0.0.0-20180821212333-d2e6202438be",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
sum = "h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=",
version = "v0.0.0-20190911185100-cd5d95a43a6e",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
sum = "h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=",
version = "v0.0.0-20200302150141-5c8b2ff67527",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
version = "v0.3.2",
)
go_repository(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
sum = "h1:CuaXjesf8HXc9cSxHpEzHyFF+1FCOpgdxhQ/5vRcYZw=",
version = "v0.0.0-20200311222014-c807066ff753",
)
go_repository(
name = "org_uber_go_atomic",
importpath = "go.uber.org/atomic",
sum = "h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=",
version = "v1.6.0",
)
go_repository(
name = "org_uber_go_multierr",
importpath = "go.uber.org/multierr",
sum = "h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=",
version = "v1.5.0",
)
go_repository(
name = "org_uber_go_zap",
importpath = "go.uber.org/zap",
sum = "h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o=",
version = "v1.14.0",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
sum = "h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=",
version = "v0.0.0-20191011191535-87dc89f01550",
)
go_repository(
name = "com_github_armon_consul_api",
importpath = "github.com/armon/consul-api",
sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=",
version = "v0.0.0-20180202201655-eb2c6b5be1b6",
)
go_repository(
name = "com_github_coreos_etcd",
importpath = "github.com/coreos/etcd",
sum = "h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=",
version = "v3.3.10+incompatible",
)
go_repository(
name = "com_github_coreos_go_etcd",
importpath = "github.com/coreos/go-etcd",
sum = "h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_coreos_go_semver",
importpath = "github.com/coreos/go-semver",
sum = "h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_fsnotify_fsnotify",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=",
version = "v1.4.7",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
sum = "h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=",
version = "v0.2.0",
)
go_repository(
name = "com_github_hashicorp_hcl",
importpath = "github.com/hashicorp/hcl",
sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_magiconair_properties",
importpath = "github.com/magiconair/properties",
sum = "h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=",
version = "v1.8.0",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_mapstructure",
importpath = "github.com/mitchellh/mapstructure",
sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=",
version = "v1.1.2",
)
go_repository(
name = "com_github_pelletier_go_toml",
importpath = "github.com/pelletier/go-toml",
sum = "h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=",
version = "v1.2.0",
)
go_repository(
name = "com_github_spf13_afero",
importpath = "github.com/spf13/afero",
sum = "h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=",
version = "v1.1.2",
)
go_repository(
name = "com_github_spf13_cast",
importpath = "github.com/spf13/cast",
sum = "h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=",
version = "v1.3.0",
)
go_repository(
name = "com_github_spf13_jwalterweatherman",
importpath = "github.com/spf13/jwalterweatherman",
sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_spf13_viper",
importpath = "github.com/spf13/viper",
sum = "h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=",
version = "v1.4.0",
)
go_repository(
name = "com_github_ugorji_go_codec",
importpath = "github.com/ugorji/go/codec",
sum = "h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=",
version = "v0.0.0-20181204163529-d75b2dcb6bc8",
)
go_repository(
name = "com_github_xordataexchange_crypt",
importpath = "github.com/xordataexchange/crypt",
sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=",
version = "v0.0.3-0.20170626215501-b2862e3d0a77",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
sum = "h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=",
version = "v0.0.0-20190121172915-509febef88a4",
)
go_repository(
name = "com_github_alecthomas_template",
importpath = "github.com/alecthomas/template",
sum = "h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=",
version = "v0.0.0-20160405071501-a0175ee3bccc",
)
go_repository(
name = "com_github_alecthomas_units",
importpath = "github.com/alecthomas/units",
sum = "h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=",
version = "v0.0.0-20151022065526-2efee857e7cf",
)
go_repository(
name = "com_github_beorn7_perks",
importpath = "github.com/beorn7/perks",
sum = "h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_cespare_xxhash",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_coreos_bbolt",
importpath = "github.com/coreos/bbolt",
sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=",
version = "v1.3.2",
)
go_repository(
name = "com_github_coreos_go_systemd",
importpath = "github.com/coreos/go-systemd",
sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
version = "v0.0.0-20190321100706-95778dfbb74e",
)
go_repository(
name = "com_github_coreos_pkg",
importpath = "github.com/coreos/pkg",
sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
version = "v0.0.0-20180928190104-399ea9e2e55f",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_dgrijalva_jwt_go",
importpath = "github.com/dgrijalva/jwt-go",
sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=",
version = "v3.2.0+incompatible",
)
go_repository(
name = "com_github_dgryski_go_sip13",
importpath = "github.com/dgryski/go-sip13",
sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=",
version = "v0.0.0-20181026042036-e10d5fee7954",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=",
version = "v0.9.4",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_ghodss_yaml",
importpath = "github.com/ghodss/yaml",
sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_kit_kit",
importpath = "github.com/go-kit/kit",
sum = "h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=",
version = "v0.8.0",
)
go_repository(
name = "com_github_go_logfmt_logfmt",
importpath = "github.com/go-logfmt/logfmt",
sum = "h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=",
version = "v0.4.0",
)
go_repository(
name = "com_github_go_stack_stack",
importpath = "github.com/go-stack/stack",
sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
version = "v1.8.0",
)
go_repository(
name = "com_github_gogo_protobuf",
importpath = "github.com/gogo/protobuf",
sum = "h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=",
version = "v1.2.1",
)
go_repository(
name = "com_github_golang_groupcache",
importpath = "github.com/golang/groupcache",
sum = "h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=",
version = "v0.0.0-20190129154638-5b532d6fd5ef",
)
go_repository(
name = "com_github_google_btree",
importpath = "github.com/google/btree",
sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_renameio",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gorilla_websocket",
importpath = "github.com/gorilla/websocket",
sum = "h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=",
version = "v1.4.0",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_middleware",
importpath = "github.com/grpc-ecosystem/go-grpc-middleware",
sum = "h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=",
version = "v1.0.0",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_prometheus",
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=",
version = "v1.2.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_gateway",
importpath = "github.com/grpc-ecosystem/grpc-gateway",
sum = "h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=",
version = "v1.9.0",
)
go_repository(
name = "com_github_jonboulle_clockwork",
importpath = "github.com/jonboulle/clockwork",
sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_julienschmidt_httprouter",
importpath = "github.com/julienschmidt/httprouter",
sum = "h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=",
version = "v1.2.0",
)
go_repository(
name = "com_github_kisielk_errcheck",
importpath = "github.com/kisielk/errcheck",
sum = "h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0=",
version = "v1.1.0",
)
go_repository(
name = "com_github_kisielk_gotool",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_konsorten_go_windows_terminal_sequences",
importpath = "github.com/konsorten/go-windows-terminal-sequences",
sum = "h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=",
version = "v1.0.1",
)
go_repository(
name = "com_github_kr_logfmt",
importpath = "github.com/kr/logfmt",
sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=",
version = "v0.0.0-20140226030751-b84e30acd515",
)
go_repository(
name = "com_github_kr_pretty",
importpath = "github.com/kr/pretty",
sum = "h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kr_pty",
importpath = "github.com/kr/pty",
sum = "h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=",
version = "v1.1.1",
)
go_repository(
name = "com_github_kr_text",
importpath = "github.com/kr/text",
sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=",
version = "v0.1.0",
)
go_repository(
name = "com_github_matttproud_golang_protobuf_extensions",
importpath = "github.com/matttproud/golang_protobuf_extensions",
sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=",
version = "v1.0.1",
)
go_repository(
name = "com_github_mwitkow_go_conntrack",
importpath = "github.com/mwitkow/go-conntrack",
sum = "h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=",
version = "v0.0.0-20161129095857-cc309e4a2223",
)
go_repository(
name = "com_github_oklog_ulid",
importpath = "github.com/oklog/ulid",
sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_oneofone_xxhash",
importpath = "github.com/OneOfOne/xxhash",
sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=",
version = "v1.2.2",
)
go_repository(
name = "com_github_prometheus_client_golang",
importpath = "github.com/prometheus/client_golang",
sum = "h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=",
version = "v0.9.3",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=",
version = "v0.0.0-20190812154241-14fe0d1b01d4",
)
go_repository(
name = "com_github_prometheus_common",
importpath = "github.com/prometheus/common",
sum = "h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=",
version = "v0.4.0",
)
go_repository(
name = "com_github_prometheus_procfs",
importpath = "github.com/prometheus/procfs",
sum = "h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=",
version = "v0.0.0-20190507164030-5867b95ac084",
)
go_repository(
name = "com_github_prometheus_tsdb",
importpath = "github.com/prometheus/tsdb",
sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=",
version = "v0.7.1",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
importpath = "github.com/rogpeppe/fastuuid",
sum = "h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng=",
version = "v0.0.0-20150106093220-6724a57986af",
)
go_repository(
name = "com_github_rogpeppe_go_internal",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=",
version = "v1.3.0",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_sirupsen_logrus",
importpath = "github.com/sirupsen/logrus",
sum = "h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=",
version = "v1.2.0",
)
go_repository(
name = "com_github_soheilhy_cmux",
importpath = "github.com/soheilhy/cmux",
sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=",
version = "v0.1.4",
)
go_repository(
name = "com_github_spaolacci_murmur3",
importpath = "github.com/spaolacci/murmur3",
sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=",
version = "v0.0.0-20180118202830-f09979ecbc72",
)
go_repository(
name = "com_github_tmc_grpc_websocket_proxy",
importpath = "github.com/tmc/grpc-websocket-proxy",
sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=",
version = "v0.0.0-20190109142713-0ad062ec5ee5",
)
go_repository(
name = "com_github_ugorji_go",
importpath = "github.com/ugorji/go",
sum = "h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=",
version = "v1.1.4",
)
go_repository(
name = "com_github_xiang90_probing",
importpath = "github.com/xiang90/probing",
sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=",
version = "v0.0.0-20190116061207-43a291ad63a2",
)
go_repository(
name = "in_gopkg_alecthomas_kingpin_v2",
importpath = "gopkg.in/alecthomas/kingpin.v2",
sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
version = "v2.2.6",
)
go_repository(
name = "in_gopkg_errgo_v2",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_resty_v1",
importpath = "gopkg.in/resty.v1",
sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
version = "v1.12.0",
)
go_repository(
name = "io_etcd_go_bbolt",
importpath = "go.etcd.io/bbolt",
sum = "h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=",
version = "v1.3.2",
)
go_repository(
name = "org_golang_x_mod",
importpath = "golang.org/x/mod",
sum = "h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=",
version = "v0.2.0",
)
go_repository(
name = "org_golang_x_time",
importpath = "golang.org/x/time",
sum = "h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=",
version = "v0.0.0-20190308202827-9d24e82272b4",
)
go_repository(
name = "org_golang_x_xerrors",
importpath = "golang.org/x/xerrors",
sum = "h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=",
version = "v0.0.0-20191204190536-9bdfabe68543",
)
go_repository(
name = "org_uber_go_tools",
importpath = "go.uber.org/tools",
sum = "h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=",
version = "v0.0.0-20190618225709-2cfd321de3ee",
)
go_repository(
name = "com_github_cncf_udpa_go",
importpath = "github.com/cncf/udpa/go",
sum = "h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=",
version = "v0.0.0-20191209042840-269d4d468f6f",
)
|
tests/api/v1/teams/test_scoring.py | nox237/CTFd | 3,592 | 11144284 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.models import Users
from CTFd.utils import set_config
from tests.helpers import create_ctfd, destroy_ctfd, gen_award, gen_team, login_as_user
def test_api_team_place_hidden_if_scores_hidden():
"""/api/v1/teams/me should not reveal team place if scores aren't visible"""
app = create_ctfd(user_mode="teams")
with app.app_context():
gen_team(app.db)
app.db.session.commit()
gen_award(app.db, user_id=2, team_id=1)
u = Users.query.filter_by(id=2).first()
with login_as_user(app, name=u.name) as client:
r = client.get("/api/v1/teams/me", json="")
resp = r.get_json()
assert resp["data"]["place"] == "1st"
set_config("score_visibility", "hidden")
with login_as_user(app, name=u.name) as client:
r = client.get("/api/v1/teams/me", json="")
resp = r.get_json()
assert resp["data"]["place"] is None
set_config("score_visibility", "admins")
with login_as_user(app, name=u.name) as client:
r = client.get("/api/v1/teams/me", json="")
resp = r.get_json()
assert resp["data"]["place"] is None
with login_as_user(app, name="admin") as client:
r = client.get("/api/v1/teams/1", json="")
resp = r.get_json()
print(resp)
assert resp["data"]["place"] == "1st"
destroy_ctfd(app)
|
simulation/decai/simulation/contract/classification/ncc_module.py | boost-entropy-python/0xDeCA10B | 445 | 11144290 | from decai.simulation.contract.classification.ncc import NearestCentroidClassifier
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
class NearestCentroidClassifierModule(SciKitClassifierModule):
def __init__(self):
super().__init__(
_model_initializer=NearestCentroidClassifier)
|
hummingbot/connector/exchange/coinflex/coinflex_exchange.py | cardosofede/hummingbot | 542 | 11144310 | <reponame>cardosofede/hummingbot
import asyncio
import logging
import time
from decimal import Decimal
from typing import (
Any,
AsyncIterable,
Dict,
List,
Optional,
)
from async_timeout import timeout
import hummingbot.connector.exchange.coinflex.coinflex_constants as CONSTANTS
import hummingbot.connector.exchange.coinflex.coinflex_web_utils as web_utils
from hummingbot.connector.client_order_tracker import ClientOrderTracker
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.connector.exchange.coinflex import coinflex_utils
from hummingbot.connector.exchange.coinflex.coinflex_api_order_book_data_source import CoinflexAPIOrderBookDataSource
from hummingbot.connector.exchange.coinflex.coinflex_auth import CoinflexAuth
from hummingbot.connector.exchange.coinflex.coinflex_order_book_tracker import CoinflexOrderBookTracker
from hummingbot.connector.exchange.coinflex.coinflex_user_stream_tracker import CoinflexUserStreamTracker
from hummingbot.connector.trading_rule import TradingRule
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.data_type.in_flight_order import (
InFlightOrder,
OrderState,
OrderUpdate,
TradeUpdate,
)
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.trade_fee import DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.core.web_assistant.connections.data_types import RESTMethod
from hummingbot.logger import HummingbotLogger
s_logger = None
s_decimal_0 = Decimal(0)
s_decimal_NaN = Decimal("nan")
class CoinflexExchange(ExchangeBase):
SHORT_POLL_INTERVAL = 5.0
UPDATE_ORDER_STATUS_MIN_INTERVAL = 10.0
LONG_POLL_INTERVAL = 120.0
MAX_ORDER_UPDATE_RETRIEVAL_RETRIES_WITH_FAILURES = 3
def __init__(self,
coinflex_api_key: str,
coinflex_api_secret: str,
trading_pairs: Optional[List[str]] = None,
trading_required: bool = True,
domain: str = CONSTANTS.DEFAULT_DOMAIN
):
self._domain = domain
super().__init__()
self._trading_required = trading_required
self._auth = CoinflexAuth(
api_key=coinflex_api_key,
secret_key=coinflex_api_secret)
self._throttler = AsyncThrottler(CONSTANTS.RATE_LIMITS)
self._api_factory = web_utils.build_api_factory(auth=self._auth)
self._order_book_tracker = CoinflexOrderBookTracker(
trading_pairs=trading_pairs,
domain=domain,
api_factory=self._api_factory,
throttler=self._throttler)
self._user_stream_tracker = CoinflexUserStreamTracker(
auth=self._auth,
domain=domain,
throttler=self._throttler,
api_factory=self._api_factory)
self._ev_loop = asyncio.get_event_loop()
self._poll_notifier = asyncio.Event()
self._last_timestamp = 0
self._order_not_found_records = {} # Dict[client_order_id:str, count:int]
self._trading_rules = {} # Dict[trading_pair:str, TradingRule]
self._trade_fees = {} # Dict[trading_pair:str, (maker_fee_percent:Decimal, taken_fee_percent:Decimal)]
self._last_update_trade_fees_timestamp = 0
self._status_polling_task = None
self._user_stream_event_listener_task = None
self._trading_rules_polling_task = None
self._last_poll_timestamp = 0
self._last_trades_poll_coinflex_timestamp = 0
self._order_tracker: ClientOrderTracker = ClientOrderTracker(connector=self)
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@property
def name(self) -> str:
if self._domain != CONSTANTS.DEFAULT_DOMAIN:
return f"coinflex_{self._domain}"
return "coinflex"
@property
def order_books(self) -> Dict[str, OrderBook]:
return self._order_book_tracker.order_books
@property
def trading_rules(self) -> Dict[str, TradingRule]:
return self._trading_rules
@property
def in_flight_orders(self) -> Dict[str, InFlightOrder]:
return self._order_tracker.active_orders
@property
def limit_orders(self) -> List[LimitOrder]:
return [
in_flight_order.to_limit_order()
for in_flight_order in self.in_flight_orders.values()
]
@property
def tracking_states(self) -> Dict[str, any]:
"""
Returns a dictionary associating current active orders client id to their JSON representation
"""
return {
key: value.to_json()
for key, value in self.in_flight_orders.items()
}
def _sleep_time(self, delay: int = 0):
"""
Function created to enable patching during unit tests execution.
"""
return delay
@property
def order_book_tracker(self) -> CoinflexOrderBookTracker:
return self._order_book_tracker
@property
def user_stream_tracker(self) -> CoinflexUserStreamTracker:
return self._user_stream_tracker
@property
def status_dict(self) -> Dict[str, bool]:
"""
Returns a dictionary with the values of all the conditions that determine if the connector is ready to operate.
The key of each entry is the condition name, and the value is True if condition is ready, False otherwise.
"""
return {
"symbols_mapping_initialized": CoinflexAPIOrderBookDataSource.trading_pair_symbol_map_ready(
domain=self._domain),
"order_books_initialized": self._order_book_tracker.ready,
"account_balance": len(self._account_balances) > 0 if self._trading_required else True,
"trading_rule_initialized": len(self._trading_rules) > 0,
"user_stream_initialized": self._user_stream_tracker.data_source.last_recv_time > 0,
}
@property
def ready(self) -> bool:
"""
Returns True if the connector is ready to operate (all connections established with the exchange). If it is
not ready it returns False.
"""
return all(self.status_dict.values())
@staticmethod
def coinflex_order_type(order_type: OrderType) -> str:
return order_type.name.upper().split("_")[0]
@staticmethod
def to_hb_order_type(coinflex_type: str) -> OrderType:
return OrderType[coinflex_type]
def supported_order_types(self):
return [OrderType.MARKET, OrderType.LIMIT, OrderType.LIMIT_MAKER]
async def start_network(self):
"""
Start all required tasks to update the status of the connector. Those tasks include:
- The order book tracker
- The polling loop to update the trading rules
- The polling loop to update order status and balance status using REST API (backup for main update process)
- The background task to process the events received through the user stream tracker (websocket connection)
"""
self._order_book_tracker.start()
self._trading_rules_polling_task = safe_ensure_future(self._trading_rules_polling_loop())
if self._trading_required:
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._user_stream_tracker_task = safe_ensure_future(self._user_stream_tracker.start())
self._user_stream_event_listener_task = safe_ensure_future(self._user_stream_event_listener())
async def stop_network(self):
"""
This function is executed when the connector is stopped. It perform a general cleanup and stops all background
tasks that require the connection with the exchange to work.
"""
# Reset timestamps and _poll_notifier for status_polling_loop
self._last_poll_timestamp = 0
self._last_timestamp = 0
self._poll_notifier = asyncio.Event()
self._order_book_tracker.stop()
if self._status_polling_task is not None:
self._status_polling_task.cancel()
if self._user_stream_tracker_task is not None:
self._user_stream_tracker_task.cancel()
if self._user_stream_event_listener_task is not None:
self._user_stream_event_listener_task.cancel()
if self._trading_rules_polling_task is not None:
self._trading_rules_polling_task.cancel()
self._status_polling_task = self._user_stream_tracker_task = self._user_stream_event_listener_task = None
async def check_network(self) -> NetworkStatus:
"""
Checks connectivity with the exchange using the API
"""
try:
response = await self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.PING_PATH_URL,
)
if str(response["success"]).lower() == "true":
return NetworkStatus.CONNECTED
except asyncio.CancelledError:
raise
except Exception:
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.NOT_CONNECTED
def restore_tracking_states(self, saved_states: Dict[str, any]):
"""
Restore in-flight orders from saved tracking states, this is st the connector can pick up on where it left off
when it disconnects.
:param saved_states: The saved tracking_states.
"""
self._order_tracker.restore_tracking_states(tracking_states=saved_states)
def tick(self, timestamp: float):
"""
Includes the logic that has to be processed every time a new tick happens in the bot. Particularly it enables
the execution of the status update polling loop using an event.
"""
now = time.time()
poll_interval = (self.SHORT_POLL_INTERVAL
if now - self.user_stream_tracker.last_recv_time > 60.0
else self.LONG_POLL_INTERVAL)
last_tick = int(self._last_timestamp / poll_interval)
current_tick = int(timestamp / poll_interval)
if current_tick > last_tick:
if not self._poll_notifier.is_set():
self._poll_notifier.set()
self._last_timestamp = timestamp
def get_order_book(self, trading_pair: str) -> OrderBook:
"""
Returns the current order book for a particular market
:param trading_pair: the pair of tokens for which the order book should be retrieved
"""
if trading_pair not in self._order_book_tracker.order_books:
raise ValueError(f"No order book exists for '{trading_pair}'.")
return self._order_book_tracker.order_books[trading_pair]
def start_tracking_order(self,
order_id: str,
exchange_order_id: Optional[str],
trading_pair: str,
trade_type: TradeType,
price: Decimal,
amount: Decimal,
order_type: OrderType):
"""
Starts tracking an order by adding it to the order tracker.
:param order_id: the order identifier
:param exchange_order_id: the identifier for the order in the exchange
:param trading_pair: the token pair for the operation
:param trade_type: the type of order (buy or sell)
:param price: the price for the order
:param amount: the amount for the order
:order type: type of execution for the order (MARKET, LIMIT, LIMIT_MAKER)
"""
self._order_tracker.start_tracking_order(
InFlightOrder(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
order_type=order_type,
trade_type=trade_type,
amount=amount,
price=price,
creation_timestamp=self.current_timestamp
)
)
def stop_tracking_order(self, order_id: str):
"""
Stops tracking an order
:param order_id: The id of the order that will not be tracked any more
"""
self._order_tracker.stop_tracking_order(client_order_id=order_id)
def get_order_price_quantum(self, trading_pair: str, price: Decimal) -> Decimal:
"""
Used by quantize_order_price() in _create_order()
Returns a price step, a minimum price increment for a given trading pair.
:param trading_pair: the trading pair to check for market conditions
:param price: the starting point price
"""
trading_rule = self._trading_rules[trading_pair]
return trading_rule.min_price_increment
def get_order_size_quantum(self, trading_pair: str, order_size: Decimal) -> Decimal:
"""
Used by quantize_order_price() in _create_order()
Returns an order amount step, a minimum amount increment for a given trading pair.
:param trading_pair: the trading pair to check for market conditions
:param order_size: the starting point order price
"""
trading_rule = self._trading_rules[trading_pair]
return trading_rule.min_base_amount_increment
def quantize_order_amount(self, trading_pair: str, amount: Decimal, price: Decimal = s_decimal_0) -> Decimal:
"""
Applies the trading rules to calculate the correct order amount for the market
:param trading_pair: the token pair for which the order will be created
:param amount: the intended amount for the order
:param price: the intended price for the order
:return: the quantized order amount after applying the trading rules
"""
trading_rule = self._trading_rules[trading_pair]
quantized_amount: Decimal = super().quantize_order_amount(trading_pair, amount)
# Check against min_order_size and min_notional_size. If not passing either check, return 0.
if quantized_amount < trading_rule.min_order_size:
return s_decimal_0
if price == s_decimal_0:
current_price: Decimal = self.get_price(trading_pair, False)
notional_size = current_price * quantized_amount
else:
notional_size = price * quantized_amount
# Add 1% as a safety factor in case the prices changed while making the order.
if notional_size < trading_rule.min_notional_size * Decimal("1.01"):
return s_decimal_0
return quantized_amount
def get_fee(self,
base_currency: str,
quote_currency: str,
order_type: OrderType,
order_side: TradeType,
amount: Decimal,
price: Decimal = s_decimal_NaN,
is_maker: Optional[bool] = None) -> TradeFeeBase:
"""
Calculates the estimated fee an order would pay based on the connector configuration
:param base_currency: the order base currency
:param quote_currency: the order quote currency
:param order_type: the type of order (MARKET, LIMIT, LIMIT_MAKER)
:param order_side: if the order is for buying or selling
:param amount: the order amount
:param price: the order price
:return: the estimated fee for the order
"""
"""
To get trading fee, this function is simplified by using fee override configuration. Most parameters to this
function are ignore except order_type. Use OrderType.LIMIT_MAKER to specify you want trading fee for
maker order.
"""
is_maker = order_type is OrderType.LIMIT_MAKER
return DeductedFromReturnsTradeFee(percent=self.estimate_fee_pct(is_maker))
def buy(self, trading_pair: str, amount: Decimal, order_type: OrderType = OrderType.LIMIT,
price: Decimal = s_decimal_NaN, **kwargs) -> str:
"""
Creates a promise to create a buy order using the parameters.
:param trading_pair: the token pair to operate with
:param amount: the order amount
:param order_type: the type of order to create (MARKET, LIMIT, LIMIT_MAKER)
:param price: the order price
:return: the id assigned by the connector to the order (the client id)
"""
client_order_id = coinflex_utils.get_new_client_order_id(is_buy=True, trading_pair=trading_pair)
safe_ensure_future(self._create_order(TradeType.BUY, client_order_id, trading_pair, amount, order_type, price))
return client_order_id
def sell(self, trading_pair: str, amount: Decimal, order_type: OrderType = OrderType.MARKET,
price: Decimal = s_decimal_NaN, **kwargs) -> str:
"""
Creates a promise to create a sell order using the parameters.
:param trading_pair: the token pair to operate with
:param amount: the order amount
:param order_type: the type of order to create (MARKET, LIMIT, LIMIT_MAKER)
:param price: the order price
:return: the id assigned by the connector to the order (the client id)
"""
client_order_id = coinflex_utils.get_new_client_order_id(is_buy=False, trading_pair=trading_pair)
safe_ensure_future(self._create_order(TradeType.SELL, client_order_id, trading_pair, amount, order_type, price))
return client_order_id
def cancel(self, trading_pair: str, order_id: str):
"""
Creates a promise to cancel an order in the exchange
:param trading_pair: the trading pair the order to cancel operates with
:param order_id: the client id of the order to cancel
:return: the client id of the order to cancel
"""
safe_ensure_future(self._execute_cancel(trading_pair, order_id))
return order_id
async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:
"""
Cancels all currently active orders. The cancellations are performed in parallel tasks.
:param timeout_seconds: the maximum time (in seconds) the cancel logic should run
:return: a list of CancellationResult instances, one for each of the orders to be cancelled
"""
incomplete_orders = [o for o in self.in_flight_orders.values() if not o.is_done]
tasks = [self._execute_cancel(o.trading_pair, o.client_order_id) for o in incomplete_orders]
order_id_set = set([o.client_order_id for o in incomplete_orders])
successful_cancellations = []
try:
async with timeout(timeout_seconds):
cancellation_results = await safe_gather(*tasks, return_exceptions=True)
for cr in cancellation_results:
if isinstance(cr, Exception):
continue
if isinstance(cr, dict) and "clientOrderId" in cr:
client_order_id = cr.get("clientOrderId")
order_id_set.remove(client_order_id)
successful_cancellations.append(CancellationResult(client_order_id, True))
except Exception:
self.logger().network(
"Unexpected error canceling orders.",
exc_info=True,
app_warning_msg="Failed to cancel order with CoinFLEX. Check API key and network connection."
)
failed_cancellations = [CancellationResult(oid, False) for oid in order_id_set]
return successful_cancellations + failed_cancellations
async def _create_order(self,
trade_type: TradeType,
order_id: str,
trading_pair: str,
amount: Decimal,
order_type: OrderType,
price: Optional[Decimal] = Decimal("NaN")):
"""
Creates a an order in the exchange using the parameters to configure it
:param trade_type: the side of the order (BUY of SELL)
:param order_id: the id that should be assigned to the order (the client id)
:param trading_pair: the token pair to operate with
:param amount: the order amount
:param order_type: the type of order to create (MARKET, LIMIT, LIMIT_MAKER)
:param price: the order price
"""
trading_rule: TradingRule = self._trading_rules[trading_pair]
price = self.quantize_order_price(trading_pair, price)
quantize_amount_price = Decimal("0") if price.is_nan() else price
amount = self.quantize_order_amount(trading_pair=trading_pair, amount=amount, price=quantize_amount_price)
self.start_tracking_order(
order_id=order_id,
exchange_order_id=None,
trading_pair=trading_pair,
trade_type=trade_type,
price=price,
amount=amount,
order_type=order_type)
if amount < trading_rule.min_order_size:
self.logger().warning(f"{trade_type.name.title()} order amount {amount} is lower than the minimum order"
f" size {trading_rule.min_order_size}. The order will not be created.")
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
trading_pair=trading_pair,
update_timestamp=self.current_timestamp,
new_state=OrderState.FAILED,
)
self._order_tracker.process_order_update(order_update)
return
order_result = None
amount_str = f"{amount:f}"
type_str = CoinflexExchange.coinflex_order_type(order_type)
side_str = CONSTANTS.SIDE_BUY if trade_type is TradeType.BUY else CONSTANTS.SIDE_SELL
symbol = await CoinflexAPIOrderBookDataSource.exchange_symbol_associated_to_pair(
trading_pair=trading_pair,
domain=self._domain,
api_factory=self._api_factory,
throttler=self._throttler)
api_params = {"responseType": "FULL"}
order_params = {"marketCode": symbol,
"side": side_str,
"quantity": amount_str,
"orderType": type_str,
"clientOrderId": order_id}
if order_type is not OrderType.MARKET:
order_params["price"] = f"{price:f}"
if order_type is OrderType.LIMIT:
order_params["timeInForce"] = CONSTANTS.TIME_IN_FORCE_GTC
elif order_type is OrderType.LIMIT_MAKER:
order_params["timeInForce"] = CONSTANTS.TIME_IN_FORCE_MAK
api_params["orders"] = [order_params]
try:
result = await self._api_request(
method=RESTMethod.POST,
path_url=CONSTANTS.ORDER_CREATE_PATH_URL,
data=api_params,
is_auth_required=True,
disable_retries=True)
order_result = result["data"][0]
exchange_order_id = str(order_result["orderId"])
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
update_timestamp=int(order_result["timestamp"]) * 1e-3,
new_state=OrderState.OPEN,
)
self._order_tracker.process_order_update(order_update)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(
f"Error submitting {side_str} {type_str} order to CoinFLEX for "
f"{amount} {trading_pair} "
f"{price}.",
exc_info=True,
app_warning_msg=str(e)
)
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
trading_pair=trading_pair,
update_timestamp=self.current_timestamp,
new_state=OrderState.FAILED,
)
self._order_tracker.process_order_update(order_update)
async def _execute_cancel(self, trading_pair: str, order_id: str):
"""
Requests the exchange to cancel an active order
:param trading_pair: the trading pair the order to cancel operates with
:param order_id: the client id of the order to cancel
"""
tracked_order = self._order_tracker.fetch_tracked_order(order_id)
if tracked_order is not None:
try:
symbol = await CoinflexAPIOrderBookDataSource.exchange_symbol_associated_to_pair(
trading_pair=trading_pair,
domain=self._domain,
api_factory=self._api_factory,
throttler=self._throttler)
api_params = {
"responseType": "FULL",
}
cancel_params = {
"marketCode": symbol,
"clientOrderId": order_id,
}
api_params["orders"] = [cancel_params]
try:
result = await self._api_request(
method=RESTMethod.DELETE,
path_url=CONSTANTS.ORDER_CANCEL_PATH_URL,
data=api_params,
is_auth_required=True)
cancel_result = result["data"][0]
except web_utils.CoinflexAPIError as e:
# Catch order not found as cancelled.
cancel_result = {}
if e.error_payload.get("errors") == CONSTANTS.ORDER_NOT_FOUND_ERROR:
cancel_result = e.error_payload["data"][0]
else:
self.logger().error(f"Unhandled error canceling order: {order_id}. Error: {e.error_payload}", exc_info=True)
if cancel_result.get("status") in CONSTANTS.ORDER_CANCELED_STATES:
cancelled_timestamp = cancel_result.get("timestamp", result.get("timestamp"))
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
trading_pair=tracked_order.trading_pair,
update_timestamp=int(cancelled_timestamp) * 1e-3 if cancelled_timestamp else self.current_timestamp,
new_state=OrderState.CANCELED,
)
self._order_tracker.process_order_update(order_update)
else:
if not self._process_order_not_found(order_id, tracked_order):
raise IOError
return cancel_result
except asyncio.CancelledError:
raise
except Exception:
self.logger().exception(f"There was a an error when requesting cancelation of order {order_id}")
raise
async def _status_polling_loop(self):
"""
Performs all required operation to keep the connector updated and synchronized with the exchange.
It contains the backup logic to update status using API requests in case the main update source (the user stream
data source websocket) fails.
It also updates the time synchronizer. This is necessary because CoinFLEX require the time of the client to be
the same as the time in the exchange.
Executes when the _poll_notifier event is enabled by the `tick` function.
"""
while True:
try:
await self._poll_notifier.wait()
await safe_gather(
self._update_balances(),
)
await self._update_order_status()
self._last_poll_timestamp = self.current_timestamp
except asyncio.CancelledError:
raise
except Exception:
self.logger().network("Unexpected error while fetching account updates.", exc_info=True,
app_warning_msg="Could not fetch account updates from CoinFLEX. "
"Check API key and network connection.")
await asyncio.sleep(0.5)
finally:
self._poll_notifier = asyncio.Event()
async def _trading_rules_polling_loop(self):
"""
Updates the trading rules by requesting the latest definitions from the exchange.
Executes regularly every 30 minutes
"""
while True:
try:
await safe_gather(
self._update_trading_rules(),
)
await asyncio.sleep(30 * 60)
except asyncio.CancelledError:
raise
except Exception:
self.logger().network("Unexpected error while fetching trading rules.", exc_info=True,
app_warning_msg="Could not fetch new trading rules from CoinFLEX. "
"Check network connection.")
await asyncio.sleep(0.5)
async def _update_trading_rules(self):
exchange_info = await self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.EXCHANGE_INFO_PATH_URL)
trading_rules_list = await self._format_trading_rules(exchange_info)
self._trading_rules.clear()
for trading_rule in trading_rules_list:
self._trading_rules[trading_rule.trading_pair] = trading_rule
async def _format_trading_rules(self, exchange_info_dict: Dict[str, Any]) -> List[TradingRule]:
"""
Example:
{
"marketId": "2001000000000",
"marketCode": "BTC-USD",
"name": "BTC/USD",
"referencePair": "BTC/USD",
"base": "BTC",
"counter": "USD",
"type": "SPOT",
"tickSize": "1",
"qtyIncrement": "0.001",
"marginCurrency": "USD",
"contractValCurrency": "BTC",
"upperPriceBound": "41580",
"lowerPriceBound": "38380",
"marketPrice": "39980",
"markPrice": null,
"listingDate": 1593316800000,
"endDate": 0,
"marketPriceLastUpdated": 1645265706110,
"markPriceLastUpdated": 0,
}
"""
trading_pair_rules = exchange_info_dict.get("data", [])
retval = []
for rule in filter(coinflex_utils.is_exchange_information_valid, trading_pair_rules):
try:
trading_pair = await CoinflexAPIOrderBookDataSource.trading_pair_associated_to_exchange_symbol(
symbol=rule.get("marketCode"),
domain=self._domain,
api_factory=self._api_factory,
throttler=self._throttler)
min_order_size = Decimal(rule.get("qtyIncrement"))
tick_size = Decimal(rule.get("tickSize"))
retval.append(
TradingRule(trading_pair,
min_order_size=min_order_size,
min_price_increment=tick_size,
min_base_amount_increment=min_order_size))
except Exception:
self.logger().exception(f"Error parsing the trading pair rule {rule}. Skipping.")
return retval
async def _user_stream_event_listener(self):
"""
This functions runs in background continuously processing the events received from the exchange by the user
stream data source. It keeps reading events from the queue until the task is interrupted.
The events received are balance updates, order updates and trade events.
"""
async for event_message in self._iter_user_event_queue():
try:
event_type = event_message.get("table")
if event_type == "order":
order_data = event_message["data"][0]
client_order_id = order_data.get("clientOrderId")
tracked_order = self.in_flight_orders.get(client_order_id)
if tracked_order is not None:
async with timeout(self._sleep_time(5)):
await tracked_order.get_exchange_order_id()
exec_amt_base = coinflex_utils.decimal_val_or_none(order_data.get("matchQuantity"))
if exec_amt_base:
fill_price = coinflex_utils.decimal_val_or_none(order_data.get("matchPrice"))
exec_amt_quote = exec_amt_base * fill_price if exec_amt_base and fill_price else None
fee_paid = coinflex_utils.decimal_val_or_none(order_data.get("fees"))
if fee_paid:
fee = TradeFeeBase.new_spot_fee(
fee_schema=self.trade_fee_schema(),
trade_type=tracked_order.trade_type,
percent_token=order_data.get("feeInstrumentId"),
flat_fees=[TokenAmount(amount=fee_paid, token=order_data.get("feeInstrumentId"))]
)
else:
fee = self.get_fee(base_currency=tracked_order.base_asset,
quote_currency=tracked_order.quote_asset,
order_type=tracked_order.order_type,
order_side=tracked_order.trade_type,
amount=tracked_order.amount,
price=tracked_order.price,
is_maker=True)
trade_update = TradeUpdate(
trading_pair=tracked_order.trading_pair,
trade_id=int(order_data["matchId"]),
client_order_id=client_order_id,
exchange_order_id=str(order_data["orderId"]),
fill_timestamp=int(order_data["timestamp"]) * 1e-3,
fill_price=fill_price,
fill_base_amount=exec_amt_base,
fill_quote_amount=exec_amt_quote,
fee=fee,
)
self._order_tracker.process_trade_update(trade_update=trade_update)
order_update = OrderUpdate(
trading_pair=tracked_order.trading_pair,
update_timestamp=int(order_data["timestamp"]) * 1e-3,
new_state=CONSTANTS.ORDER_STATE[order_data["status"]],
client_order_id=client_order_id,
exchange_order_id=str(order_data["orderId"]),
)
self._order_tracker.process_order_update(order_update=order_update)
elif event_type == "balance":
self._process_balance_message(event_message)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error in user stream listener loop.", exc_info=True)
await asyncio.sleep(5.0)
async def _update_order_fills_from_trades(self, tracked_order, order_update):
"""
This is intended to be a backup measure to get filled events from order status
in case CoinFLEX's user stream events are not working.
"""
fee_collected = False
for match_data in order_update["matchIds"]:
for trade_id in match_data.keys():
trade_data = match_data[trade_id]
exec_amt_base = coinflex_utils.decimal_val_or_none(trade_data.get("matchQuantity"))
fill_price = coinflex_utils.decimal_val_or_none(trade_data.get("matchPrice"))
exec_amt_quote = exec_amt_base * fill_price if exec_amt_base and fill_price else None
if not fee_collected and len(order_update.get("fees", {})):
fee_collected = True
fee_data = order_update.get("fees")
fee_token = list(fee_data.keys())[0]
fee_paid = coinflex_utils.decimal_val_or_none(fee_data[fee_token])
else:
fee_token = tracked_order.quote_asset
fee_paid = s_decimal_0
fee = TradeFeeBase.new_spot_fee(
fee_schema=self.trade_fee_schema(),
trade_type=tracked_order.trade_type,
percent_token=fee_token,
flat_fees=[TokenAmount(amount=fee_paid, token=fee_token)]
)
trade_update = TradeUpdate(
trading_pair=tracked_order.trading_pair,
trade_id=int(trade_id),
client_order_id=tracked_order.client_order_id,
exchange_order_id=str(order_update["orderId"]),
fill_timestamp=int(trade_data["timestamp"]) * 1e-3,
fill_price=fill_price,
fill_base_amount=exec_amt_base,
fill_quote_amount=exec_amt_quote,
fee=fee,
)
self._order_tracker.process_trade_update(trade_update=trade_update)
def _process_order_not_found(self,
client_order_id: str,
tracked_order: InFlightOrder) -> bool:
self._order_not_found_records[client_order_id] = (
self._order_not_found_records.get(client_order_id, 0) + 1)
if (self._order_not_found_records[client_order_id] >=
self.MAX_ORDER_UPDATE_RETRIEVAL_RETRIES_WITH_FAILURES):
# Wait until the order not found error have repeated a few times before actually treating
# it as failed. See: https://github.com/CoinAlpha/hummingbot/issues/601
order_update: OrderUpdate = OrderUpdate(
client_order_id=client_order_id,
trading_pair=tracked_order.trading_pair,
update_timestamp=self.current_timestamp,
new_state=OrderState.FAILED,
)
self._order_tracker.process_order_update(order_update)
return True
return False
async def _fetch_order_status(self, tracked_order) -> Dict[str, Any]:
"""
Helper function to fetch order status.
Returns a dictionary with the response.
"""
order_params = {
"marketCode": await CoinflexAPIOrderBookDataSource.exchange_symbol_associated_to_pair(
trading_pair=tracked_order.trading_pair,
domain=self._domain,
api_factory=self._api_factory,
throttler=self._throttler)
}
# If we get the exchange order id, use that, otherwise use client order id.
try:
async with timeout(self._sleep_time(1)):
await tracked_order.get_exchange_order_id()
order_params["orderId"] = tracked_order.exchange_order_id
except asyncio.TimeoutError:
order_params["clientOrderId"] = tracked_order.client_order_id
return await self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.ORDER_PATH_URL,
params=order_params,
is_auth_required=True,
endpoint_api_version="v2.1")
async def _update_order_status(self):
"""
This is intended to be a backup measure to close straggler orders, in case CoinFLEX's user stream events
are not working.
The minimum poll interval for order status is 10 seconds.
"""
last_tick = self._last_poll_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL
current_tick = self.current_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL
tracked_orders: List[InFlightOrder] = list(self.in_flight_orders.values())
if current_tick > last_tick and len(tracked_orders) > 0:
tasks = [self._fetch_order_status(o) for o in tracked_orders]
self.logger().debug(f"Polling for order status updates of {len(tasks)} orders.")
results = await safe_gather(*tasks, return_exceptions=True)
for order_result, tracked_order in zip(results, tracked_orders):
client_order_id = tracked_order.client_order_id
# If the order has already been cancelled or has failed do nothing
if client_order_id not in self.in_flight_orders:
continue
if isinstance(order_result, Exception) or not order_result.get("data"):
if not isinstance(order_result, web_utils.CoinflexAPIError) or order_result.error_payload.get("errors") == CONSTANTS.ORDER_NOT_FOUND_ERROR:
self.logger().network(
f"Error fetching status update for the order {client_order_id}, marking as not found: {order_result}.",
app_warning_msg=f"Failed to fetch status update for the order {client_order_id}."
)
self._process_order_not_found(client_order_id, tracked_order)
else:
self.logger().network(
f"Error fetching status update for the order {client_order_id}: {order_result}.",
app_warning_msg=f"Failed to fetch status update for the order {client_order_id}."
)
else:
order_update = order_result["data"][0]
# Update order execution status
new_state = CONSTANTS.ORDER_STATE[order_update["status"]]
# Deprecated
# # Get total fees from order data, should only be one fee asset.
# order_fees = order_update.get("fees")
# fee_asset = None
# cumulative_fee_paid = None
# if order_fees:
# for fee_asset in order_fees.keys():
# cumulative_fee_paid = coinflex_utils.decimal_val_or_none(order_fees[fee_asset])
# break
order_update_timestamp = order_update.get("timestamp",
order_update.get("orderOpenedTimestamp",
order_result.get("timestamp")))
update = OrderUpdate(
client_order_id=client_order_id,
exchange_order_id=str(order_update["orderId"]),
trading_pair=tracked_order.trading_pair,
update_timestamp=int(order_update_timestamp) * 1e-3,
new_state=new_state,
)
self._order_tracker.process_order_update(update)
# Fill missing trades from order status.
if len(order_update.get("matchIds", [])):
await self._update_order_fills_from_trades(tracked_order, order_update)
async def _iter_user_event_queue(self) -> AsyncIterable[Dict[str, any]]:
while True:
try:
yield await self._user_stream_tracker.user_stream.get()
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unknown error. Retrying after 1 seconds.",
exc_info=True,
app_warning_msg="Could not fetch user events from CoinFLEX. Check API key and network connection."
)
await asyncio.sleep(1.0)
async def _update_balances(self):
try:
account_info = await self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.ACCOUNTS_PATH_URL,
is_auth_required=True)
self._process_balance_message(account_info)
except Exception:
self.logger().exception("Error getting account balances from server")
def _process_balance_message(self, account_info):
local_asset_names = set(self._account_balances.keys())
remote_asset_names = set()
balances = account_info["data"]
for balance_entry in balances:
asset_name = balance_entry["instrumentId"]
free_balance = Decimal(balance_entry["available"])
total_balance = Decimal(balance_entry["total"])
self._account_available_balances[asset_name] = free_balance
self._account_balances[asset_name] = total_balance
remote_asset_names.add(asset_name)
asset_names_to_remove = local_asset_names.difference(remote_asset_names)
for asset_name in asset_names_to_remove:
del self._account_available_balances[asset_name]
del self._account_balances[asset_name]
async def _api_request(self,
method: RESTMethod,
path_url: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
is_auth_required: bool = False,
domain_api_version: str = None,
endpoint_api_version: str = None,
disable_retries: bool = False) -> Dict[str, Any]:
return await web_utils.api_request(
path=path_url,
api_factory=self._api_factory,
throttler=self._throttler,
domain=self._domain,
params=params,
data=data,
method=method,
is_auth_required=is_auth_required,
domain_api_version=domain_api_version,
endpoint_api_version=endpoint_api_version,
disable_retries=disable_retries
)
|
test/hummingbot/connector/exchange/coinzoom/test_coinzoom_auth.py | BGTCapital/hummingbot | 3,027 | 11144320 | from unittest import TestCase
from hummingbot.connector.exchange.coinzoom.coinzoom_auth import CoinzoomAuth
class CoinzoomAuthTests(TestCase):
def setUp(self) -> None:
super().setUp()
self._api_key = 'testApiKey'
self._secret_key = 'testSecretKey'
self._username = 'testUserName'
self.auth = CoinzoomAuth(
api_key=self._api_key,
secret_key=self._secret_key,
username=self._username
)
def test_get_ws_params(self):
params = self.auth.get_ws_params()
self.assertEqual(self._api_key, params["apiKey"])
self.assertEqual(self._secret_key, params["secretKey"])
def test_get_headers(self):
headers = self.auth.get_headers()
self.assertEqual("application/json", headers["Content-Type"])
self.assertEqual(self._api_key, headers["Coinzoom-Api-Key"])
self.assertEqual(self._secret_key, headers["Coinzoom-Api-Secret"])
self.assertEqual(f"hummingbot ZoomMe: {self._username}", headers["User-Agent"])
|
models/nvidia_pn.py | cclauss/DBNet | 188 | 11144328 | <gh_stars>100-1000
import os
import sys
import tensorflow as tf
import scipy
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
import pointnet
def placeholder_inputs(batch_size, img_rows=66, img_cols=200, points=16384, separately=False):
imgs_pl = tf.placeholder(tf.float32, shape=(batch_size, img_rows, img_cols, 3))
pts_pl = tf.placeholder(tf.float32, shape=(batch_size, points, 3))
if separately:
speeds_pl = tf.placeholder(tf.float32, shape=(batch_size))
angles_pl = tf.placeholder(tf.float32, shape=(batch_size))
labels_pl = [speeds_pl, angles_pl]
labels_pl = tf.placeholder(tf.float32, shape=(batch_size, 2))
return imgs_pl, pts_pl, labels_pl
def get_model(net, is_training, bn_decay=None, separately=False):
""" NVIDIA regression model, input is BxWxHx3, output Bx2"""
batch_size = net[0].get_shape()[0].value
img_net, pt_net = net[0], net[1]
for i, dim in enumerate([24, 36, 48, 64, 64]):
scope = "conv" + str(i + 1)
img_net = tf_util.conv2d(img_net, dim, [5, 5],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope=scope, bn_decay=bn_decay)
img_net = tf.reshape(img_net, [batch_size, -1])
img_net = tf_util.fully_connected(img_net, 256, bn=True,
is_training=is_training,
scope='img_fc0',
bn_decay=bn_decay)
with tf.variable_scope('pointnet'):
pt_net = pointnet.get_model(pt_net, tf.constant(True))
net = tf.reshape(tf.stack([img_net, pt_net], axis=2), [batch_size, 512])
for i, dim in enumerate([256, 128, 16]):
fc_scope = "fc" + str(i + 1)
dp_scope = "dp" + str(i + 1)
net = tf_util.fully_connected(net, dim, bn=True,
is_training=is_training,
scope=fc_scope,
bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7,
is_training=is_training,
scope=dp_scope)
net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc5')
return net
def get_loss(pred, label, l2_weight=0.0001):
diff = tf.square(tf.subtract(pred, label))
train_vars = tf.trainable_variables()
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in train_vars[1:]]) * l2_weight
loss = tf.reduce_mean(diff + l2_loss)
tf.summary.scalar('l2 loss', l2_loss * l2_weight)
tf.summary.scalar('loss', loss)
return loss
def summary_scalar(pred, label):
threholds = [5, 4, 3, 2, 1, 0.5]
angles = [float(t) / 180 * scipy.pi for t in threholds]
speeds = [float(t) / 20 for t in threholds]
for i in range(len(threholds)):
scalar_angle = "angle(" + str(angles[i]) + ")"
scalar_speed = "speed(" + str(speeds[i]) + ")"
ac_angle = tf.abs(tf.subtract(pred[:, 1], label[:, 1])) < threholds[i]
ac_speed = tf.abs(tf.subtract(pred[:, 0], label[:, 0])) < threholds[i]
ac_angle = tf.reduce_mean(tf.cast(ac_angle, tf.float32))
ac_speed = tf.reduce_mean(tf.cast(ac_speed, tf.float32))
tf.summary.scalar(scalar_angle, ac_angle)
tf.summary.scalar(scalar_speed, ac_speed)
if __name__ == '__main__':
with tf.Graph().as_default():
inputs = [tf.zeros((32, 66, 200, 3)), tf.zeros((32, 16384, 3))]
outputs = get_model(inputs, tf.constant(True))
print(outputs)
|
semantic_release/helpers.py | Agilicus/python-semantic-release | 445 | 11144333 | import functools
from typing import Union
from requests import Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def format_arg(value):
if type(value) == str:
return f"'{value.strip()}'"
else:
return str(value)
def build_requests_session(
raise_for_status=True, retry: Union[bool, int, Retry] = True
) -> Session:
"""
Create a requests session.
:param raise_for_status: If True, a hook to invoke raise_for_status be installed
:param retry: If true, it will use default Retry configuration. if an integer, it will use default Retry
configuration with given integer as total retry count. if Retry instance, it will use this instance.
:return: configured requests Session
"""
session = Session()
if raise_for_status:
session.hooks = {"response": [lambda r, *args, **kwargs: r.raise_for_status()]}
if retry:
if isinstance(retry, bool):
retry = Retry()
elif isinstance(retry, int):
retry = Retry(retry)
elif not isinstance(retry, Retry):
raise ValueError("retry should be a bool, int or Retry instance.")
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
class LoggedFunction:
"""
Decorator which adds debug logging to a function.
The input arguments are logged before the function is called, and the
return value is logged once it has completed.
:param logger: Logger to send output to.
"""
def __init__(self, logger):
self.logger = logger
def __call__(self, func):
@functools.wraps(func)
def logged_func(*args, **kwargs):
# Log function name and arguments
self.logger.debug(
"{function}({args}{kwargs})".format(
function=func.__name__,
args=", ".join([format_arg(x) for x in args]),
kwargs="".join(
[f", {k}={format_arg(v)}" for k, v in kwargs.items()]
),
)
)
# Call function
result = func(*args, **kwargs)
# Log result
if result is not None:
self.logger.debug(f"{func.__name__} -> {result}")
return result
return logged_func
|
machine-learning/optical-character-recognition/extracting_text.py | caesarcc/python-code-tutorials | 1,059 | 11144345 | import pytesseract
import cv2
import matplotlib.pyplot as plt
import sys
from PIL import Image
# read the image using OpenCV
# from the command line first argument
image = cv2.imread(sys.argv[1])
# or you can use Pillow
# image = Image.open(sys.argv[1])
# get the string
string = pytesseract.image_to_string(image)
# print it
print(string)
# get all data
# data = pytesseract.image_to_data(image)
# print(data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.