max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
libsortvis/algos/oddevensort.py | tknuth/sortvis | 117 | 11184171 | <reponame>tknuth/sortvis
def oddevensort(lst, nloops=2):
finished = False
while not finished:
finished = True
for n in xrange(nloops):
for i in xrange(n, len(lst) - 1, nloops):
if lst[i] > lst[i + 1]:
lst[i], lst[i + 1] = lst[i + 1], lst[i]
lst.log()
finished = False
|
test/outputs/test_commentator.py | fetus-hina/IkaLog | 285 | 11184179 | <filename>test/outputs/test_commentator.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Unittests for commentator.py.
# Usage:
# python ./test_commentator.py
# or
# py.test ./test_commentator.py
import unittest
import os.path
import sys
# Append the Ikalog root dir to sys.path to import Commentator.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from ikalog.outputs.commentator import Commentator
class TestIkaUtils(unittest.TestCase):
def test__death_reason_label(self):
# _death_reason_lable is only for Japanese at this moment.
commentator = Commentator()
# Custom read
self.assertEqual('ごーにーガロン',
commentator._death_reason_label('52gal'))
# Weapons
self.assertEqual('わかばシューター',
commentator._death_reason_label('wakaba'))
# Sub weapons
self.assertEqual('チェイスボム',
commentator._death_reason_label('chasebomb'))
# Special weapons
self.assertEqual('トルネード',
commentator._death_reason_label('tornado'))
# Hurtable objects
self.assertEqual('プロペラから飛び散ったインク',
commentator._death_reason_label('propeller'))
# OOB is treated in a different function.
# Unknown
self.assertEqual('未知の武器',
commentator._death_reason_label('530000gal'))
if __name__ == '__main__':
unittest.main()
|
datasets/kitti_raw_monodepth.py | NIRVANALAN/self-mono-sf | 213 | 11184210 | from __future__ import absolute_import, division, print_function
import os.path
import torch
import torch.utils.data as data
import numpy as np
from torchvision import transforms as vision_transforms
from .common import read_image_as_byte, read_calib_into_dict
from .common import kitti_crop_image_list, kitti_adjust_intrinsic
from .common import intrinsic_scale
class KITTI_Raw(data.Dataset):
def __init__(self,
args,
images_root=None,
preprocessing_crop=False,
crop_size=[370, 1224],
num_examples=-1,
index_file=None):
self._args = args
self._seq_len = 1
self._preprocessing_crop = preprocessing_crop
self._crop_size = crop_size
path_dir = os.path.dirname(os.path.realpath(__file__))
path_index_file = os.path.join(path_dir, index_file)
if not os.path.exists(path_index_file):
raise ValueError("Index File '%s' not found!", path_index_file)
index_file = open(path_index_file, 'r')
## loading image -----------------------------------
if not os.path.isdir(images_root):
raise ValueError("Image directory '%s' not found!")
filename_list = [line.rstrip().split(' ') for line in index_file.readlines()]
self._image_list = []
view1 = 'image_02/data'
view2 = 'image_03/data'
ext = '.jpg'
for item in filename_list:
date = item[0][:10]
scene = item[0]
idx_src = item[1]
for ii in range(self._seq_len):
idx_tgt = '%.10d' % (int(idx_src) + ii + 1)
name_l1 = os.path.join(images_root, date, scene, view1, idx_src) + ext
name_r1 = os.path.join(images_root, date, scene, view2, idx_src) + ext
if os.path.isfile(name_l1) and os.path.isfile(name_r1):
self._image_list.append([name_l1, name_r1])
if num_examples > 0:
self._image_list = self._image_list[:num_examples]
self._size = len(self._image_list)
## loading calibration matrix
self.intrinsic_dict_l = {}
self.intrinsic_dict_r = {}
self.intrinsic_dict_l, self.intrinsic_dict_r = read_calib_into_dict(path_dir)
# ----------------------------------------------------------
# Image resize only
# ----------------------------------------------------------
self._resize_to_tensor = vision_transforms.Compose([
vision_transforms.ToPILImage(),
vision_transforms.Resize((256, 512)),
vision_transforms.transforms.ToTensor()
])
self._to_tensor = vision_transforms.Compose([
vision_transforms.transforms.ToTensor()
])
def __getitem__(self, index):
index = index % self._size
im_l1_filename = self._image_list[index][0]
im_r1_filename = self._image_list[index][1]
# read float32 images and flow
im_l1_np = read_image_as_byte(im_l1_filename)
im_r1_np = read_image_as_byte(im_r1_filename)
# example filename
basename = os.path.basename(im_l1_filename)[:6]
dirname = os.path.dirname(im_l1_filename)[-51:]
datename = dirname[:10]
k_l1 = torch.from_numpy(self.intrinsic_dict_l[datename]).float()
k_r1 = torch.from_numpy(self.intrinsic_dict_r[datename]).float()
k_l1_orig = k_l1.clone()
h_orig, w_orig, _ = im_l1_np.shape
input_im_size = torch.from_numpy(np.array([h_orig, w_orig])).float()
# resizing image
if self._preprocessing_crop == False:
# No Geometric Augmentation, Resizing to 256 x 512 here
# resizing input images
im_l1 = self._resize_to_tensor(im_l1_np)
im_r1 = self._resize_to_tensor(im_r1_np)
# resizing intrinsic matrix
k_l1 = intrinsic_scale(k_l1, im_l1.size(1) / h_orig, im_l1.size(2) / w_orig)
k_r1 = intrinsic_scale(k_r1, im_r1.size(1) / h_orig, im_r1.size(2) / w_orig)
else:
# For Geometric Augmentation, first croping the images to 370 x 1224 here,
# then do the augmentation in augmentation.py
# get starting positions
crop_height = self._crop_size[0]
crop_width = self._crop_size[1]
x = np.random.uniform(0, w_orig - crop_width + 1)
y = np.random.uniform(0, h_orig - crop_height + 1)
crop_info = [int(x), int(y), int(x + crop_width), int(y + crop_height)]
# cropping images and adjust intrinsic accordingly
im_l1_np, im_r1_np = kitti_crop_image_list([im_l1_np, im_r1_np], crop_info)
im_l1 = self._to_tensor(im_l1_np)
im_r1 = self._to_tensor(im_r1_np)
k_l1, k_r1 = kitti_adjust_intrinsic(k_l1, k_r1, crop_info)
# For CamCOnv
k_r1_flip = k_r1.clone()
k_r1_flip[0, 2] = im_r1.size(2) - k_r1_flip[0, 2]
example_dict = {
"input_l1": im_l1,
"input_r1": im_r1,
"index": index,
"basename": basename,
"datename": datename,
"input_k_l1_orig": k_l1_orig,
"input_k_l1": k_l1,
"input_k_r1": k_r1,
"input_k_r1_flip": k_r1_flip,
"input_size": input_im_size
}
return example_dict
def __len__(self):
return self._size
class KITTI_Raw_KittiSplit_Train(KITTI_Raw):
def __init__(self,
args,
root,
preprocessing_crop=False,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_KittiSplit_Train, self).__init__(
args,
images_root=root,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/kitti_train.txt")
class KITTI_Raw_KittiSplit_Valid(KITTI_Raw):
def __init__(self,
args,
root,
preprocessing_crop=False,
crop_size=[370, 1224],
num_examples=-1):
super(KITTI_Raw_KittiSplit_Valid, self).__init__(
args,
images_root=root,
preprocessing_crop=preprocessing_crop,
crop_size=crop_size,
num_examples=num_examples,
index_file="index_txt/kitti_valid.txt") |
src/olympia/scanners/migrations/0021_auto_20200122_1347.py | shashwatsingh/addons-server | 843 | 11184216 | <gh_stars>100-1000
# Generated by Django 2.2.9 on 2020-01-22 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scanners', '0020_auto_20200116_1250'),
]
operations = [
migrations.AlterField(
model_name='scannerqueryresult',
name='scanner',
field=models.PositiveSmallIntegerField(choices=[(1, 'customs'), (2, 'wat'), (3, 'yara'), (4, 'ml_api')]),
),
migrations.AlterField(
model_name='scannerqueryrule',
name='scanner',
field=models.PositiveSmallIntegerField(choices=[(1, 'customs'), (2, 'wat'), (3, 'yara'), (4, 'ml_api')]),
),
migrations.AlterField(
model_name='scannerresult',
name='scanner',
field=models.PositiveSmallIntegerField(choices=[(1, 'customs'), (2, 'wat'), (3, 'yara'), (4, 'ml_api')]),
),
migrations.AlterField(
model_name='scannerrule',
name='scanner',
field=models.PositiveSmallIntegerField(choices=[(1, 'customs'), (2, 'wat'), (3, 'yara'), (4, 'ml_api')]),
),
]
|
impala/_thrift_api.py | wzhou-code/impyla | 661 | 11184229 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This package's main goal in the past was to clean up references to thrift, because
# we were using thriftpy2 for Py3. This is no longer necessary since upgrading to
# Thrift 0.11.0, as Thrift supports Python 3 since 0.10.0. Now there are only some
# leftover utility classes and functions.
# pylint: disable=wrong-import-position
from __future__ import absolute_import
import base64
import datetime
import getpass
import os
import os.path
from collections import namedtuple
from io import BytesIO
from six.moves import urllib, http_client
import warnings
import six
import ssl
import sys
from impala.error import HttpError
from impala.util import get_logger_and_init_null
from impala.util import get_all_matching_cookies, get_cookie_expiry
# Declare namedtuple for Cookie with named fields - cookie and expiry_time
Cookie = namedtuple('Cookie', ['cookie', 'expiry_time'])
log = get_logger_and_init_null(__name__)
# pylint: disable=import-error,unused-import
# import Apache Thrift code
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import (
TBufferedTransport, TTransportException, TTransportBase)
# import HS2 codegen objects
from impala._thrift_gen.ImpalaService import ImpalaHiveServer2Service
ThriftClient = ImpalaHiveServer2Service.Client
# ImpalaHttpClient is copied from Impala Shell.
# The implementations should be kept in sync as much as possible.
class ImpalaHttpClient(TTransportBase):
"""Http implementation of TTransport base."""
# When sending requests larger than this size, include the 'Expect: 100-continue' header
# to indicate to the server to validate the request before reading the contents. This
# value was chosen to match curl's behavior. See Section 8.2.3 of RFC2616.
MIN_REQUEST_SIZE_FOR_EXPECT = 1024
def __init__(self, uri_or_host, port=None, path=None, cafile=None, cert_file=None,
key_file=None, ssl_context=None, http_cookie_names=None):
"""ImpalaHttpClient supports two different types of construction:
ImpalaHttpClient(host, port, path) - deprecated
ImpalaHttpClient(uri, [port=<n>, path=<s>, cafile=<filename>, cert_file=<filename>,
key_file=<filename>, ssl_context=<context>, http_cookie_names=<cookienamelist>])
Only the second supports https. To properly authenticate against the server,
provide the client's identity by specifying cert_file and key_file. To properly
authenticate the server, specify either cafile or ssl_context with a CA defined.
NOTE: if both cafile and ssl_context are defined, ssl_context will override cafile.
http_cookie_names is used to specify the list of possible cookie names used for
cookie-based authentication or session management. If there's only one name in the
cookie name list, a str value can be specified instead of the list. If a cookie with
one of these names is returned in an http response by the server or an intermediate
proxy then it will be included in each subsequent request for the same connection.
"""
if port is not None:
warnings.warn(
"Please use the ImpalaHttpClient('http{s}://host:port/path') constructor",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urllib.parse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or http_client.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or http_client.HTTPS_PORT
self.certfile = cert_file
self.keyfile = key_file
self.context = ssl.create_default_context(cafile=cafile) \
if (cafile and not ssl_context) else ssl_context
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
try:
proxy = urllib.request.getproxies()[self.scheme]
except KeyError:
proxy = None
else:
if urllib.request.proxy_bypass(self.host):
proxy = None
if proxy:
parsed = urllib.parse.urlparse(proxy)
self.realhost = self.host
self.realport = self.port
self.host = parsed.hostname
self.port = parsed.port
self.proxy_auth = self.basic_proxy_auth_header(parsed)
else:
self.realhost = self.realport = self.proxy_auth = None
if not http_cookie_names:
# 'http_cookie_names' was explicitly set as an empty value ([], or '') in connect().
self.__http_cookie_dict = None
self.__auth_cookie_names = None
else:
if isinstance(http_cookie_names, six.string_types):
http_cookie_names = [http_cookie_names]
# Build a dictionary that maps cookie name to namedtuple.
self.__http_cookie_dict = \
{ cn: Cookie(cookie=None, expiry_time=None) for cn in http_cookie_names }
# Store the auth cookie names in __auth_cookie_names.
# Assume auth cookie names end with ".auth".
self.__auth_cookie_names = \
[ cn for cn in http_cookie_names if cn.endswith(".auth") ]
# Set __are_matching_cookies_found as True if matching cookies are found in response.
self.__are_matching_cookies_found = False
self.__wbuf = BytesIO()
self.__http = None
self.__http_response = None
self.__timeout = None
# __custom_headers is used to store HTTP headers which are generated in runtime for
# new request.
self.__custom_headers = None
self.__get_custom_headers_func = None
@staticmethod
def basic_proxy_auth_header(proxy):
if proxy is None or not proxy.username:
return None
ap = "%s:%s" % (urllib.parse.unquote(proxy.username),
urllib.parse.unquote(proxy.password))
cr = base64.b64encode(ap).strip()
return "Basic " + cr
def using_proxy(self):
return self.realhost is not None
def open(self):
if self.scheme == 'http':
self.__http = http_client.HTTPConnection(self.host, self.port,
timeout=self.__timeout)
elif self.scheme == 'https':
self.__http = http_client.HTTPSConnection(self.host, self.port,
key_file=self.keyfile,
cert_file=self.certfile,
timeout=self.__timeout,
context=self.context)
if self.using_proxy():
self.__http.set_tunnel(self.realhost, self.realport,
{"Proxy-Authorization": self.proxy_auth})
def close(self):
self.__http.close()
self.__http = None
self.__http_response = None
def isOpen(self):
return self.__http is not None
def is_open(self):
return self.__http is not None
def setTimeout(self, ms):
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
# Set callback function which generate HTTP headers for a specific auth mechanism.
def setGetCustomHeadersFunc(self, func):
self.__get_custom_headers_func = func
# Update HTTP headers based on the saved cookies and auth mechanism.
def refreshCustomHeaders(self):
if self.__get_custom_headers_func:
cookie_header, has_auth_cookie = self.getHttpCookieHeaderForRequest()
self.__custom_headers = \
self.__get_custom_headers_func(cookie_header, has_auth_cookie)
# Return first value as a cookie list for Cookie header. It's a list of name-value
# pairs in the form of <cookie-name>=<cookie-value>. Pairs in the list are separated by
# a semicolon and a space ('; ').
# Return second value as True if the cookie list contains auth cookie.
def getHttpCookieHeaderForRequest(self):
if (self.__http_cookie_dict is None) or not self.__are_matching_cookies_found:
return None, False
cookie_headers = []
has_auth_cookie = False
for cn, c_tuple in self.__http_cookie_dict.items():
if c_tuple.cookie:
if c_tuple.expiry_time and c_tuple.expiry_time <= datetime.datetime.now():
self.__http_cookie_dict[cn] = Cookie(cookie=None, expiry_time=None)
else:
cookie_header = c_tuple.cookie.output(attrs=['value'], header='').strip()
cookie_headers.append(cookie_header)
if not has_auth_cookie and self.__auth_cookie_names \
and cn in self.__auth_cookie_names:
has_auth_cookie = True
if not cookie_headers:
self.__are_matching_cookies_found = False
return None, False
else:
return '; '.join(cookie_headers), has_auth_cookie
# Extract cookies from response and save those cookies for which the cookie names
# are in the cookie name list specified in the connect() API.
def extractHttpCookiesFromResponse(self):
if self.__http_cookie_dict is not None:
matching_cookies = get_all_matching_cookies(
self.__http_cookie_dict.keys(), self.path, self.headers)
if matching_cookies:
self.__are_matching_cookies_found = True
for c in matching_cookies:
self.__http_cookie_dict[c.key] = Cookie(c, get_cookie_expiry(c))
# Return True if there are any saved cookies which are sent in previous request.
def areHttpCookiesSaved(self):
return self.__are_matching_cookies_found
# Clean all saved cookies.
def cleanHttpCookies(self):
if (self.__http_cookie_dict is not None) and self.__are_matching_cookies_found:
self.__are_matching_cookies_found = False
self.__http_cookie_dict = \
{ cn: Cookie(cookie=None, expiry_time=None) for cn in self.__http_cookie_dict }
def read(self, sz):
return self.__http_response.read(sz)
def readBody(self):
return self.__http_response.read()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
# Send HTTP request and receive response.
# Return True if the client should retry this method.
def sendRequestRecvResp(data):
if self.isOpen():
self.close()
self.open()
# HTTP request
if self.using_proxy() and self.scheme == "http":
# need full URL of real host for HTTP proxy here (HTTPS uses CONNECT tunnel)
self.__http.putrequest('POST', "http://%s:%s%s" %
(self.realhost, self.realport, self.path))
else:
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Content-Type', 'application/x-thrift')
data_len = len(data)
self.__http.putheader('Content-Length', str(data_len))
if data_len > ImpalaHttpClient.MIN_REQUEST_SIZE_FOR_EXPECT:
# Add the 'Expect' header to large requests. Note that we do not explicitly wait
# for the '100 continue' response before sending the data - HTTPConnection simply
# ignores these types of responses, but we'll get the right behavior anyways.
self.__http.putheader("Expect", "100-continue")
if self.using_proxy() and self.scheme == "http" and self.proxy_auth is not None:
self.__http.putheader("Proxy-Authorization", self.proxy_auth)
self.refreshCustomHeaders()
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/ImpalaHttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.parse.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in six.iteritems(self.__custom_headers):
self.__http.putheader(key, val)
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.__http_response = self.__http.getresponse()
self.code = self.__http_response.status
self.message = self.__http_response.reason
self.headers = self.__http_response.msg
# A '401 Unauthorized' response might mean that we tried cookie-based
# authentication with one or more expired cookies.
# Delete the cookies and try again.
if self.code == 401 and self.areHttpCookiesSaved():
self.cleanHttpCookies()
return True
else:
self.extractHttpCookiesFromResponse()
return False
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
retry = sendRequestRecvResp(data)
if retry:
log.debug('Received "401 Unauthorized" response. '
'Delete HTTP cookies and then retry.')
sendRequestRecvResp(data)
if self.code >= 300:
# Report any http response code that is not 1XX (informational response) or
# 2XX (successful).
body = self.readBody()
raise HttpError(self.code, self.message, body, self.headers)
def get_socket(host, port, use_ssl, ca_cert):
# based on the Impala shell impl
log.debug('get_socket: host=%s port=%s use_ssl=%s ca_cert=%s',
host, port, use_ssl, ca_cert)
if use_ssl:
from thrift.transport.TSSLSocket import TSSLSocket
if ca_cert is None:
return TSSLSocket(host, port, validate=False)
else:
return TSSLSocket(host, port, validate=True, ca_certs=ca_cert)
else:
return TSocket(host, port)
def get_http_transport(host, port, http_path, timeout=None, use_ssl=False,
ca_cert=None, auth_mechanism='NOSASL', user=None,
password=<PASSWORD>, kerberos_host=None, kerberos_service_name=None,
http_cookie_names=None, jwt=None):
# TODO: support timeout
if timeout is not None:
log.error('get_http_transport does not support a timeout')
if use_ssl:
ssl_ctx = ssl.create_default_context(cafile=ca_cert)
if ca_cert:
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
else:
ssl_ctx.check_hostname = False # Mandated by the SSL lib for CERT_NONE mode.
ssl_ctx.verify_mode = ssl.CERT_NONE
url = 'https://%s:%s/%s' % (host, port, http_path)
log.debug('get_http_transport url=%s', url)
# TODO(#362): Add server authentication with thrift 0.12.
transport = ImpalaHttpClient(url, ssl_context=ssl_ctx,
http_cookie_names=http_cookie_names)
else:
url = 'http://%s:%s/%s' % (host, port, http_path)
log.debug('get_http_transport url=%s', url)
transport = ImpalaHttpClient(url, http_cookie_names=http_cookie_names)
if auth_mechanism in ['PLAIN', 'LDAP']:
# Set defaults for PLAIN SASL / LDAP connections.
if user is None:
user = getpass.getuser()
log.debug('get_http_transport: user=%s', user)
if password is None:
if auth_mechanism == 'LDAP':
password = ''
else:
# PLAIN always requires a password for HS2.
password = 'password'
log.debug('get_http_transport: password=%s', password)
auth_mechanism = 'PLAIN' # sasl doesn't know mechanism LDAP
# Set the BASIC auth header
user_password = '%s:%s'.encode() % (user.encode(), password.encode())
try:
auth = base64.encodebytes(user_password).decode().strip('\n')
except AttributeError:
auth = base64.encodestring(user_password).decode().strip('\n')
def get_custom_headers(cookie_header, has_auth_cookie):
custom_headers = {}
if cookie_header:
log.debug('add cookies to HTTP header')
custom_headers['Cookie'] = cookie_header
# Add the 'Authorization' header to request even if the auth cookie is
# present to avoid a round trip in case the cookie is expired when server
# receive the request. Since the 'auth' value is calculated once, so it
# won't cause a performance issue.
custom_headers['Authorization'] = "Basic " + auth
return custom_headers
transport.setGetCustomHeadersFunc(get_custom_headers)
elif auth_mechanism == 'GSSAPI':
# For GSSAPI over http we need to dynamically generate custom request headers.
def get_custom_headers(cookie_header, has_auth_cookie):
import kerberos
custom_headers = {}
if cookie_header:
log.debug('add cookies to HTTP header')
custom_headers['Cookie'] = cookie_header
if not has_auth_cookie:
_, krb_context = kerberos.authGSSClientInit("%s@%s" %
(kerberos_service_name, kerberos_host))
kerberos.authGSSClientStep(krb_context, "")
negotiate_details = kerberos.authGSSClientResponse(krb_context)
custom_headers['Authorization'] = "Negotiate " + negotiate_details
return custom_headers
transport.setGetCustomHeadersFunc(get_custom_headers)
elif auth_mechanism == 'JWT':
# For JWT authentication, the JWT is sent on the Authorization Bearer
# HTTP header.
def get_custom_headers(cookie_header, has_auth_cookie):
custom_headers = {}
if cookie_header:
log.debug('add cookies to HTTP header')
custom_headers['Cookie'] = cookie_header
custom_headers['Authorization'] = "Bearer " + jwt
return custom_headers
transport.setGetCustomHeadersFunc(get_custom_headers)
elif auth_mechanism == 'NOSASL':
def get_custom_headers(cookie_header, has_auth_cookie):
custom_headers = {}
if cookie_header:
log.debug('add cookies to HTTP header')
custom_headers['Cookie'] = cookie_header
return custom_headers
transport.setGetCustomHeadersFunc(get_custom_headers)
# Without buffering Thrift would call socket.recv() each time it deserializes
# something (e.g. a member in a struct).
transport = TBufferedTransport(transport)
return transport
def get_transport(socket, host, kerberos_service_name, auth_mechanism='NOSASL',
user=None, password=None):
"""
Creates a new Thrift Transport using the specified auth_mechanism.
Supported auth_mechanisms are:
- None or 'NOSASL' - returns simple buffered transport (default)
- 'PLAIN' - returns a SASL transport with the PLAIN mechanism
- 'GSSAPI' - returns a SASL transport with the GSSAPI mechanism
"""
log.debug('get_transport: socket=%s host=%s kerberos_service_name=%s '
'auth_mechanism=%s user=%s password=<PASSWORD>', socket, host,
kerberos_service_name, auth_mechanism, user)
if auth_mechanism == 'NOSASL':
return TBufferedTransport(socket)
# Set defaults for PLAIN SASL / LDAP connections.
if auth_mechanism in ['LDAP', 'PLAIN']:
if user is None:
user = getpass.getuser()
log.debug('get_transport: user=%s', user)
if password is None:
if auth_mechanism == 'LDAP':
password = ''
else:
# PLAIN always requires a password for HS2.
password = 'password'
log.debug('get_transport: password=%s', password)
auth_mechanism = 'PLAIN' # sasl doesn't know mechanism LDAP
# Initializes a sasl client
from thrift_sasl import TSaslClientTransport
from impala.sasl_compat import PureSASLClient
def sasl_factory():
return PureSASLClient(host, username=user, password=password,
service=kerberos_service_name)
return TSaslClientTransport(sasl_factory, auth_mechanism, socket)
|
leo/external/leoserver/leoserver.py | ATikhonov2/leo-editor | 1,550 | 11184230 | #@+leo-ver=5-thin
#@+node:ekr.20180216124241.1: * @file c:/leo.repo/leo-editor/leo/external/leoserver/leoserver.py
#@@language python
#@@tabwidth -4
#@+<< imports >>
#@+node:ekr.20180216124319.1: ** << imports >>
import json
import webbrowser
from http.server import HTTPServer, BaseHTTPRequestHandler
import leo.core.leoBridge as leoBridge
#@-<< imports >>
#@+others
#@+node:ekr.20180216124319.2: ** class LeoHTTPRequestHandler
class LeoHTTPRequestHandler(BaseHTTPRequestHandler):
#@+others
#@+node:ekr.20180216124319.3: *3* do_GET
def do_GET(self):
self.send_response(200)
self.end_headers()
if self.path in STATIC_FILES:
# g.trace('%s => %s' % (self.path, STATIC_FILES.get(self.path)))
self.wfile.write(open(STATIC_FILES[self.path], 'rb').read())
else:
assert self.path == '/get_tree'
c = self.server.namespace['c']
nodes = [{'h':i.h, 'b':i.b} for i in c.p.self_and_siblings()]
# for i in c.all_positions()
response = {'nodes': nodes}
self.wfile.write(json.dumps(response).encode('utf-8'))
#@+node:ekr.20180216124319.4: *3* do_POST
def do_POST(self):
self.send_response(200)
self.end_headers()
content_length = int(self.headers['content-length'])
data = self.rfile.read(content_length).decode('utf-8')
data = json.loads(data)
command = data['cmd']
if not command:
return
if command[0] == ':':
# A statement.
exec(data['cmd'][1:], self.server.namespace)
response = {'answer': 'OK\n'}
else:
# An expression.
result = eval(command, self.server.namespace)
response = {'answer': repr(result)+'\n'}
s = json.dumps(response).encode('utf-8')
self.wfile.write(s)
#@-others
#@+node:ekr.20180216124319.5: ** open_bridge
def open_bridge():
'''Open Leo bridge and return g.'''
print('opening leoBridge...')
controller = leoBridge.controller(
gui='nullGui',
loadPlugins=False, # True: attempt to load plugins.
readSettings=False, # True: read standard settings files.
silent=True, # True: don't print signon messages.
verbose=False, # True: print informational messages.
)
g = controller.globals()
return controller, g
#@-others
controller, g = open_bridge()
join = g.os_path_finalize_join
loadDir = g.app.loadDir
#@+<< define STATIC_FILES >>
#@+node:ekr.20180216125137.1: ** << define STATIC_FILES >>
STATIC_FILES = {
# '/favicon.ico': 'leo/Icons/LeoApp.ico',
'/favicon.ico': join(loadDir, '..', 'Icons', 'LeoApp.ico'),
# '/index.html': 'leoserver.html',
'/index.html': join(loadDir, '..', 'external', 'leoserver', 'leoserver.html'),
# '/leoserver.js': 'leoserver.js',
# '/leoserver.js': 'c:/test/Terry/leoserver.js',
'/leoserver.js': join(loadDir,'..', 'external', 'leoserver', 'leoserver.js'),
# '/leoserver.css': 'leoserver.css',
# '/leoserver.css': 'c:/test/Terry/leoserver.css',
'/leoserver.css': join(loadDir,'..', 'external', 'leoserver', 'leoserver.css'),
}
#@-<< define STATIC_FILES >>
path = join(loadDir, '..', 'doc', 'LeoDocs.leo')
c = controller.openLeoFile(path)
server = HTTPServer(('127.0.0.1', 8370), LeoHTTPRequestHandler)
server.namespace = {'c': c, 'g': g}
webbrowser.open("http://127.0.0.1:8370/index.html")
try:
server.serve_forever()
except KeyboardInterrupt:
print('Keyboard interrupt. Bye')
#@-leo
|
youtube_dl/extractor/tiktok.py | thename2468/youtube-dl-commithistory88 | 927 | 11184235 | <reponame>thename2468/youtube-dl-commithistory88
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
compat_str,
ExtractorError,
int_or_none,
str_or_none,
try_get,
url_or_none,
)
class TikTokBaseIE(InfoExtractor):
def _extract_aweme(self, data):
video = data['video']
description = str_or_none(try_get(data, lambda x: x['desc']))
width = int_or_none(try_get(data, lambda x: video['width']))
height = int_or_none(try_get(data, lambda x: video['height']))
format_urls = set()
formats = []
for format_id in (
'play_addr_lowbr', 'play_addr', 'play_addr_h264',
'download_addr'):
for format in try_get(
video, lambda x: x[format_id]['url_list'], list) or []:
format_url = url_or_none(format)
if not format_url:
continue
if format_url in format_urls:
continue
format_urls.add(format_url)
formats.append({
'url': format_url,
'ext': 'mp4',
'height': height,
'width': width,
})
self._sort_formats(formats)
thumbnail = url_or_none(try_get(
video, lambda x: x['cover']['url_list'][0], compat_str))
uploader = try_get(data, lambda x: x['author']['nickname'], compat_str)
timestamp = int_or_none(data.get('create_time'))
comment_count = int_or_none(data.get('comment_count')) or int_or_none(
try_get(data, lambda x: x['statistics']['comment_count']))
repost_count = int_or_none(try_get(
data, lambda x: x['statistics']['share_count']))
aweme_id = data['aweme_id']
return {
'id': aweme_id,
'title': uploader or aweme_id,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'timestamp': timestamp,
'comment_count': comment_count,
'repost_count': repost_count,
'formats': formats,
}
class TikTokIE(TikTokBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
(?:m\.)?tiktok\.com/v|
(?:www\.)?tiktok\.com/share/video
)
/(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://m.tiktok.com/v/6606727368545406213.html',
'md5': 'd584b572e92fcd48888051f238022420',
'info_dict': {
'id': '6606727368545406213',
'ext': 'mp4',
'title': 'Zureeal',
'description': '#bowsette#mario#cosplay#uk#lgbt#gaming#asian#bowsettecosplay',
'thumbnail': r're:^https?://.*~noop.image',
'uploader': 'Zureeal',
'timestamp': 1538248586,
'upload_date': '20180929',
'comment_count': int,
'repost_count': int,
}
}, {
'url': 'https://www.tiktok.com/share/video/6606727368545406213',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://m.tiktok.com/v/%s.html' % video_id, video_id)
data = self._parse_json(self._search_regex(
r'\bdata\s*=\s*({.+?})\s*;', webpage, 'data'), video_id)
return self._extract_aweme(data)
class TikTokUserIE(TikTokBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
(?:m\.)?tiktok\.com/h5/share/usr|
(?:www\.)?tiktok\.com/share/user
)
/(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://m.tiktok.com/h5/share/usr/188294915489964032.html',
'info_dict': {
'id': '188294915489964032',
},
'playlist_mincount': 24,
}, {
'url': 'https://www.tiktok.com/share/user/188294915489964032',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
data = self._download_json(
'https://m.tiktok.com/h5/share/usr/list/%s/' % user_id, user_id,
query={'_signature': '_'})
entries = []
for aweme in data['aweme_list']:
try:
entry = self._extract_aweme(aweme)
except ExtractorError:
continue
entry['extractor_key'] = TikTokIE.ie_key()
entries.append(entry)
return self.playlist_result(entries, user_id)
|
albu/src/eval.py | chritter/kaggle_carvana_segmentation | 447 | 11184244 | import os
import cv2
import numpy as np
from scipy.spatial.distance import dice
import torch
import torch.nn.functional as F
import torch.nn as nn
# torch.backends.cudnn.benchmark = True
import tqdm
from dataset.neural_dataset import ValDataset, SequentialDataset
from torch.utils.data.dataloader import DataLoader as PytorchDataLoader
from utils import heatmap
class flip:
FLIP_NONE=0
FLIP_LR=1
FLIP_FULL=2
def flip_tensor_lr(batch):
columns = batch.data.size()[-1]
return batch.index_select(3, torch.LongTensor(list(reversed(range(columns)))).cuda())
def flip_tensor_ud(batch):
rows = batch.data.size()[-2]
return batch.index_select(2, torch.LongTensor(list(reversed(range(rows)))).cuda())
def to_numpy(batch):
if isinstance(batch, tuple):
batch = batch[0]
return F.sigmoid(batch).data.cpu().numpy()
def predict(model, batch, flips=flip.FLIP_NONE):
pred1 = model(batch)
if flips > flip.FLIP_NONE:
pred2 = flip_tensor_lr(model(flip_tensor_lr(batch)))
masks = [pred1, pred2]
if flips > flip.FLIP_LR:
pred3 = flip_tensor_ud(model(flip_tensor_ud(batch)))
pred4 = flip_tensor_ud(flip_tensor_lr(model(flip_tensor_ud(flip_tensor_lr(batch)))))
masks.extend([pred3, pred4])
new_mask = torch.mean(torch.stack(masks, 0), 0)
return to_numpy(new_mask)
return to_numpy(pred1)
def read_model(weights_path, project, fold):
model = nn.DataParallel(torch.load(os.path.join(weights_path, project, 'fold{}_best.pth'.format(fold))).module)
model.eval()
return model
class Evaluator:
def __init__(self, config, ds, folds, test=False, flips=0, num_workers=0, border=12):
self.config = config
self.ds = ds
self.folds = folds
self.test = test
self.flips = flips
self.num_workers = num_workers
self.full_image = None
self.full_mask = None
self.current_mask = None
self.full_pred = None
self.border = border
self.folder = config.folder
self.prev_name = None
self.on_new = False
self.show_mask = config.dbg
self.need_dice = False
self.dice = []
if self.config.save_images:
os.makedirs(os.path.join('..', 'results', self.config.folder), exist_ok=True)
def visualize(self, show_light=False, show_base=True):
dsize = None
hmap = heatmap(self.full_pred)
if self.full_image is not None and show_light:
light_heat = cv2.addWeighted(self.full_image[:,:,:3], 0.6, hmap, 0.4, 0)
if dsize:
light_heat = cv2.resize(light_heat, (dsize, dsize))
cv2.imshow('light heat', light_heat)
if self.full_mask is not None and self.show_mask:
light_mask = cv2.addWeighted(self.full_image[:,:,:3], 0.6, cv2.cvtColor(self.full_mask, cv2.COLOR_GRAY2BGR), 0.4, 0)
if dsize:
light_mask = cv2.resize(light_mask, (dsize, dsize))
cv2.imshow('light mask', light_mask)
if self.full_image is not None and show_base:
if dsize:
cv2.imshow('image', cv2.resize(self.full_image[:,:,:3], (dsize, dsize)))
else:
cv2.imshow('image', self.full_image[:,:,:3])
if dsize:
hmap = cv2.resize(hmap, (dsize, dsize))
cv2.imshow('heatmap', hmap)
if self.full_mask is not None and self.show_mask:
if dsize:
cv2.imshow('mask', cv2.resize(self.full_mask, (dsize, dsize)))
else:
cv2.imshow('mask', self.full_mask)
if show_light or show_base:
cv2.waitKey()
def predict(self, skip_folds=None):
for fold, (train_index, val_index) in enumerate(self.folds):
prefix = ('fold' + str(fold) + "_") if self.test else ""
if skip_folds is not None:
if fold in skip_folds:
continue
self.prev_name = None
ds_cls = ValDataset if not self.test else SequentialDataset
val_dataset = ds_cls(self.ds, val_index, stage='test', config=self.config)
val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.predict_batch_size, num_workers=self.num_workers, drop_last=False)
weights_path = os.path.join(self.config.models_dir, 'albu')
model = read_model(weights_path, self.folder, fold)
pbar = val_dl if self.config.dbg else tqdm.tqdm(val_dl, total=len(val_dl))
for data in pbar:
self.show_mask = 'mask' in data and self.show_mask
if 'mask' not in data:
self.need_dice = False
predicted = self.predict_samples(model, data)
self.process_data(predicted, model, data, prefix=prefix)
if not self.config.dbg and self.need_dice:
pbar.set_postfix(dice="{:.5f}".format(np.mean(self.dice)))
if self.config.use_crop:
self.on_image_constructed(prefix=prefix)
def cut_border(self, image):
return image if not self.border else image[self.border:-self.border, self.border:-self.border, ...]
def on_image_constructed(self, prefix=""):
self.full_pred = self.cut_border(self.full_pred)
if self.full_image is not None:
self.full_image = self.cut_border(self.full_image)
if self.full_mask is not None:
self.full_mask = self.cut_border(self.full_mask)
if np.any(self.full_pred>.5) or np.any(self.full_mask>=1):
d = 1 - dice(self.full_pred.flatten() > .5, self.full_mask.flatten() >= 1)
self.dice.append(d)
if self.config.dbg:
print(self.prev_name, ' dice: ', d)
else:
return
# print(self.prev_name)
if self.config.dbg:
self.visualize(show_light=True)
if self.config.save_images:
self.save(self.prev_name, prefix=prefix)
def predict_samples(self, model, data):
samples = torch.autograd.Variable(data['image'].cuda(), volatile=True)
predicted = predict(model, samples, flips=self.flips)
return predicted
def get_data(self, data):
names = data['image_name']
samples = data['image'].numpy()
if self.need_dice or self.show_mask:
masks = data['mask'].numpy()
masks = np.moveaxis(masks, 1, -1)
else:
masks = None
if self.config.dbg:
samples = np.moveaxis(samples, 1, -1)
else:
samples = None
return names, samples, masks
def save(self, name, prefix=""):
raise NotImplementedError
def process_data(self, predicted, model, data, prefix=""):
raise NotImplementedError
|
netrd/dynamics/voter.py | sdmccabe/netrd | 116 | 11184261 | """
voter.py
--------
Implementation of voter model dynamics on a network.
author: <NAME>
Submitted as part of the 2019 NetSI Collabathon.
"""
from netrd.dynamics import BaseDynamics
import numpy as np
import networkx as nx
from ..utilities import unweighted
class VoterModel(BaseDynamics):
"""Voter dynamics."""
@unweighted
def simulate(self, G, L, noise=None):
r"""Simulate voter-model-style dynamics on a network.
Nodes are randomly assigned a state in :math:`\{-1, 1\}`; at each
time step all nodes asynchronously update by choosing their new
state uniformly from their neighbors. Generates an :math:`N \times
L` time series.
The results dictionary also stores the ground truth network as
`'ground_truth'`.
Parameters
----------
G (nx.Graph)
the input (ground-truth) graph with `N` nodes.
L (int)
the length of the desired time series.
noise (float, str or None)
if noise is present, with this probability a node's state will
be randomly redrawn from :math:`\{-1, 1\}` independent of its
neighbors' states. If 'automatic', set noise to :math:`1/N`.
Returns
-------
TS (np.ndarray)
an :math:`N \times L` array of synthetic time series data.
"""
N = G.number_of_nodes()
if noise is None:
noise = 0
elif noise == 'automatic' or noise == 'auto':
noise = 1 / N
elif not isinstance(noise, (int, float)):
raise ValueError("noise must be a number, 'automatic', or None")
transitions = nx.to_numpy_array(G)
transitions = transitions / np.sum(transitions, axis=0)
TS = np.zeros((N, L))
TS[:, 0] = [1 if x < 0.5 else -1 for x in np.random.rand(N)]
indices = np.arange(N)
for t in range(1, L):
np.random.shuffle(indices)
TS[:, t] = TS[:, t - 1]
for i in indices:
TS[i, t] = np.random.choice(TS[:, t], p=transitions[:, i])
if np.random.rand() < noise:
TS[i, t] = 1 if np.random.rand() < 0.5 else -1
self.results['ground_truth'] = G
self.results['TS'] = TS
return TS
|
capstone/capdb/migrations/0028_auto_20180319_2038.py | rachelaus/capstone | 134 | 11184276 | # Generated by Django 2.0.2 on 2018-03-19 20:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('capdb', '0027_auto_20180314_2100'),
]
operations = [
migrations.RemoveField(
model_name='casexml',
name='case_id',
),
migrations.RemoveField(
model_name='historicalcasexml',
name='case_id',
),
]
|
webhooks/fail2ban/setup.py | hamptons/alerta-contrib | 114 | 11184287 | <reponame>hamptons/alerta-contrib
from setuptools import setup, find_packages
version = '1.0.0'
setup(
name="alerta-fail2ban",
version=version,
description='Alerta Webhook for Fail2Ban',
url='https://github.com/alerta/alerta-contrib',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
py_modules=['alerta_fail2ban'],
install_requires=[],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.webhooks': [
'fail2ban = alerta_fail2ban:Fail2BanWebhook'
]
}
)
|
tests/perf/adam_test1.py | ConnollyLeon/DeepSpeed | 6,728 | 11184345 | import torch
from deepspeed.ops.adam import DeepSpeedCPUAdam
import time
device = 'cpu'
model_size = 1 * 1024**3
param = torch.nn.Parameter(torch.ones(model_size, device=device))
param_fp16 = torch.nn.Parameter(torch.ones(model_size,
dtype=torch.half,
device='cuda:0'))
optimizer = DeepSpeedCPUAdam([param])
#torch.set_num_threads(128)
param.grad = torch.ones(model_size, device=device)
avg = 0
for i in range(100):
start = time.time()
optimizer.step(fp16_param_groups=[param_fp16])
stop = time.time()
avg += (stop - start)
param.grad = torch.ones(model_size, device=device) * 2
print("Elapsed Time is ", avg / 100)
|
brew/generation/bagging.py | va26/brew | 344 | 11184391 | import numpy as np
from sklearn.ensemble import BaggingClassifier
from brew.base import Ensemble
from brew.combination.combiner import Combiner
import sklearn
from .base import PoolGenerator
class Bagging(PoolGenerator):
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
self.ensemble = None
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.ensemble = Ensemble()
for _ in range(self.n_classifiers):
# bootstrap
idx = np.random.choice(X.shape[0], X.shape[0], replace=True)
data, target = X[idx, :], y[idx]
classifier = sklearn.base.clone(self.base_classifier)
classifier.fit(data, target)
self.ensemble.add(classifier)
return
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
class BaggingSK(PoolGenerator):
""""
This class should not be used, use brew.generation.bagging.Bagging instead.
"""
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
# using the sklearn implementation of bagging for now
self.sk_bagging = BaggingClassifier(base_estimator=base_classifier,
n_estimators=n_classifiers,
max_samples=1.0,
max_features=1.0)
self.ensemble = Ensemble()
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.sk_bagging.fit(X, y)
self.ensemble.add_classifiers(self.sk_bagging.estimators_)
# self.classes_ = set(y)
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
|
Chapter05/sift_detect.py | debojyoti007/OpenCV | 105 | 11184400 | <gh_stars>100-1000
import cv2
import numpy as np
input_image = cv2.imread('images/fishing_house.jpg')
gray_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
# For version opencv < 3.0.0, use cv2.SIFT()
sift = cv2.xfeatures2d.SIFT_create()
keypoints = sift.detect(gray_image, None)
cv2.drawKeypoints(input_image, keypoints, input_image, flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('SIFT features', input_image)
cv2.waitKey() |
observations/r/co2.py | hajime9652/observations | 199 | 11184406 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def co2(path):
"""Carbon Dioxide Uptake in Grass Plants
The `CO2` data frame has 84 rows and 5 columns of data from an
experiment on the cold tolerance of the grass species *Echinochloa
crus-galli*.
An object of class
`c("nfnGroupedData", "nfGroupedData", "groupedData", "data.frame")`
containing the following columns:
Plant
an ordered factor with levels `Qn1` < `Qn2` < `Qn3` < ... <
`Mc1` giving a unique identifier for each plant.
Type
a factor with levels `Quebec` `Mississippi` giving the origin of
the plant
Treatment
a factor with levels `nonchilled` `chilled`
conc
a numeric vector of ambient carbon dioxide concentrations (mL/L).
uptake
a numeric vector of carbon dioxide uptake rates (*umol/m^2* sec).
<NAME>., <NAME>. and <NAME>. (1990) “The statistical
analysis of ecophysiological response curves obtained from experiments
involving repeated measures”, *Ecology*, **71**, 1389–1400.
<NAME>. and <NAME>. (2000) *Mixed-effects Models in S and
S-PLUS*, Springer.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `co2.csv`.
Returns:
Tuple of np.ndarray `x_train` with 237 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'co2.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/datasets/CO2.csv'
maybe_download_and_extract(path, url,
save_file_name='co2.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
plugin.video.yatp/site-packages/hachoir_parser/video/avchd.py | mesabib/kodi.yatp | 194 | 11184428 | """
Parser for AVCHD/Blu-ray formats
Notice: This parser is based off reverse-engineering efforts.
It is NOT based on official specifications, and is subject to change as
more information becomes available. There's a lot of guesswork here, so if you find
that something disagrees with an official specification, please change it.
Notice: This parser has NOT been tested on Blu-ray disc data, only on files
taken from AVCHD camcorders.
Author: <NAME>
Creation: December 30, 2010
References:
- Wikipedia: http://en.wikipedia.org/wiki/AVCHD
- European patent EP1821310: http://www.freepatentsonline.com/EP1821310.html
"""
"""
File structure:
Root (/PRIVATE/AVCHD, /AVCHD, /, etc.)
AVCHDTN/: (AVCHD only)
THUMB.TDT: Thumbnail Data: stored as a series of 16KiB pages, where each thumbnail starts on a page boundary
THUMB.TID: Thumbnail Index (TIDX), unknown format
BDMV/:
INDEX.BDM|index.bdmv: Bluray Disc Metadata (INDX): Clip index file
MOVIEOBJ.BDM|MovieObject.bdmv: Bluray Disc Metadata (MOBJ): Clip description file
AUXDATA/: (Optional, Blu-ray only)
sound.bdmv: Sound(s) associated with HDMV Interactive Graphic streams applications
?????.otf: Font(s) associated with Text subtitle applications
BACKUP/: (Optional)
[Copies of *.bdmv, CLIPINF/* and PLAYLIST/*]
CLIPINF/:
?????.CPI/?????.clpi: Clip information (HDMV)
PLAYLIST/:
?????.MPL/?????.mpls: Movie Playlist information (MPLS)
STREAM/:
?????.MTS|?????.m2ts: BDAV MPEG-2 Transport Stream (video file)
SSIF/: (Blu-ray 3D only)
?????.ssif: Stereoscopic Interleaved file
IISVPL/: (Optional?, AVCHD only?)
?????.VPL: Virtual Playlist? (MPLS)
"""
from hachoir_parser import HachoirParser
from hachoir_core.field import (RootSeekableFieldSet, FieldSet,
RawBytes, Bytes, String, Bits, UInt8, UInt16, UInt32, PascalString8, Enum)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.iso639 import ISO639_2
from hachoir_core.text_handler import textHandler, hexadecimal
from datetime import datetime
def fromhex(field):
return int('%x'%field.value)
class AVCHDTimestamp(FieldSet):
static_size = 8*8
def createFields(self):
yield textHandler(UInt8(self, "unknown", description="0x1E"), hexadecimal)
yield textHandler(UInt8(self, "century"), hexadecimal)
yield textHandler(UInt8(self, "year"), hexadecimal)
yield textHandler(UInt8(self, "month"), hexadecimal)
yield textHandler(UInt8(self, "day"), hexadecimal)
yield textHandler(UInt8(self, "hour"), hexadecimal)
yield textHandler(UInt8(self, "minute"), hexadecimal)
yield textHandler(UInt8(self, "second"), hexadecimal)
def createValue(self):
return datetime(fromhex(self['century'])*100 + fromhex(self['year']),
fromhex(self['month']), fromhex(self['day']),
fromhex(self['hour']), fromhex(self['minute']), fromhex(self['second']))
class AVCHDGenericChunk(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield RawBytes(self, "raw[]", self['size'].value)
class AVCHDINDX_0(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield RawBytes(self, "unknown[]", 22)
yield UInt32(self, "count")
for i in xrange(self['count'].value):
yield RawBytes(self, "data[]", 12)
class AVCHDIDEX_0(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield RawBytes(self, "unknown[]", 40)
yield AVCHDTimestamp(self, "last_modified")
yield RawBytes(self, "unknown[]", self._size//8-52)
class AVCHDMOBJ_Chunk(FieldSet):
def createFields(self):
yield UInt32(self, "unknown[]")
yield UInt32(self, "index")
yield UInt32(self, "unknown[]")
yield textHandler(UInt32(self, "unknown_id"), hexadecimal)
yield UInt32(self, "unknown[]")
yield textHandler(UInt32(self, "playlist_id"), lambda field: '%05d'%field.value)
yield UInt32(self, "unknown[]")
class AVCHDMPLS_StreamEntry(FieldSet):
ENTRYTYPE = {1:'PlayItem on disc',
2:'SubPath on disc',
3:'PlayItem in local storage',
4:'SubPath in local storage'}
def createFields(self):
yield UInt8(self, "size")
self._size = (self['size'].value+1)*8
yield Enum(UInt8(self, "type"), self.ENTRYTYPE)
if self['type'].value in (1,3):
yield textHandler(UInt16(self, "pid", "PID of item in clip stream m2ts file"), hexadecimal)
else: # 2,4
'''
The patent says:
ref_to_SubPath_id
ref_to_SubClip_entry_id
ref_to_Stream_PID_of_subClip
Sizes aren't given, though, so I cannot determine the format without a sample.
'''
pass
class AVCHDMPLS_StreamAttribs(FieldSet):
STREAMTYPE = {
0x01: "V_MPEG1",
0x02: "V_MPEG2",
0x1B: "V_AVC",
0xEA: "V_VC1",
0x03: "A_MPEG1",
0x04: "A_MPEG2",
0x80: "A_LPCM",
0x81: "A_AC3",
0x84: "A_AC3_PLUS",
0xA1: "A_AC3_PLUS_SEC",
0x83: "A_TRUEHD",
0x82: "A_DTS",
0x85: "A_DTS-HD",
0xA2: "A_DTS-HD_SEC",
0x86: "A_DTS-MA",
0x90: "S_PGS",
0x91: "S_IGS",
0x92: "T_SUBTITLE",
}
# Enumerations taken from "ClownBD's CLIPINF Editor". Values may not be accurate.
def createFields(self):
yield UInt8(self, "size")
self._size = (self['size'].value+1)*8
yield Enum(UInt8(self, "type"), self.STREAMTYPE)
if self['type'].display.startswith('V'): # Video
yield Enum(Bits(self, "resolution", 4), {1:'480i', 2:'576i', 3:'480p', 4:'1080i', 5:'720p', 6:'1080p', 7:'576p'})
yield Enum(Bits(self, "fps", 4), {1:'24/1.001', 2:'24', 3:'25', 4:'30/1.001', 6:'50', 7:'60/1.001'})
yield Enum(UInt8(self, "aspect_ratio"), {0x20:'4:3', 0x30:'16:9'})
elif self['type'].display.startswith('A'): # Audio
yield Enum(Bits(self, "channel_layout", 4), {1:'Mono', 3:'Stereo', 6:'Multi', 12:'Combi'})
yield Enum(Bits(self, "sample_rate", 4), {1:'48KHz', 4:'96KHz', 5:'192KHz', 12:'48-192KHz', 14:'48-96KHz'})
yield Enum(String(self, "language", 3), ISO639_2)
elif self['type'].display.startswith('T'): # Text subtitle
yield UInt8(self, "unknown[]")
yield Enum(String(self, "language", 3), ISO639_2)
elif self['type'].display.startswith('S'): # Graphics
yield Enum(String(self, "language", 3), ISO639_2)
else:
pass
class AVCHDMPLS_Stream(FieldSet):
def createFields(self):
yield AVCHDMPLS_StreamEntry(self, "entry")
yield AVCHDMPLS_StreamAttribs(self, "attribs")
class AVCHDMPLS_PlayItem(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield UInt16(self, "unknown[]")
yield UInt8(self, "video_count", "Number of video stream entries")
yield UInt8(self, "audio_count", "Number of video stream entries")
yield UInt8(self, "subtitle_count", "Number of presentation graphics/text subtitle entries")
yield UInt8(self, "ig_count", "Number of interactive graphics entries")
yield RawBytes(self, "unknown[]", 8)
for i in xrange(self['video_count'].value):
yield AVCHDMPLS_Stream(self, "video[]")
for i in xrange(self['audio_count'].value):
yield AVCHDMPLS_Stream(self, "audio[]")
for i in xrange(self['subtitle_count'].value):
yield AVCHDMPLS_Stream(self, "subtitle[]")
for i in xrange(self['ig_count'].value):
yield AVCHDMPLS_Stream(self, "ig[]")
class AVCHDMPLS_0_Chunk(FieldSet):
def createFields(self):
yield UInt16(self, "size")
self._size = (self['size'].value+2)*8
yield Bytes(self, "clip_id", 5)
yield Bytes(self, "clip_type", 4)
yield RawBytes(self, "unknown[]", 3)
yield UInt32(self, "clip_start_time[]", "clip start time (units unknown)")
yield UInt32(self, "clip_end_time[]", "clip end time (units unknown)")
yield RawBytes(self, "unknown[]", 10)
yield AVCHDMPLS_PlayItem(self, "playitem")
class AVCHDMPLS_0(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield UInt32(self, "count")
yield UInt16(self, "unknown[]")
for i in xrange(self['count'].value):
yield AVCHDMPLS_0_Chunk(self, "chunk[]")
class AVCHDMPLS_PlayItemMark(FieldSet):
def createFields(self):
yield UInt16(self, "unknown[]")
yield UInt16(self, "playitem_idx", "Index of the associated PlayItem")
yield UInt32(self, "mark_time", "Marker time in clip (units unknown)")
yield RawBytes(self, "unknown", 6)
class AVCHDMPLS_1(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield UInt16(self, "count")
for i in xrange(self['count'].value):
yield AVCHDMPLS_PlayItemMark(self, "chunk[]")
class AVCHDPLEX_1_Chunk(FieldSet):
static_size = 66*8
def createFields(self):
yield RawBytes(self, "unknown[]", 10)
yield AVCHDTimestamp(self, "date")
yield RawBytes(self, "unknown[]", 1)
yield PascalString8(self, "date")
def createValue(self):
return self['date'].value
class AVCHDPLEX_0(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield RawBytes(self, "unknown[]", 10)
yield AVCHDTimestamp(self, "last_modified")
yield RawBytes(self, "unknown[]", 2)
yield PascalString8(self, "date")
class AVCHDPLEX_1(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield UInt16(self, "count")
for i in xrange(self['count'].value):
yield AVCHDPLEX_1_Chunk(self, "chunk[]")
class AVCHDCLPI_1(FieldSet):
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield RawBytes(self, "unknown[]", 10)
yield textHandler(UInt16(self, "video_pid", "PID of video data in stream file"), hexadecimal)
yield AVCHDMPLS_StreamAttribs(self, "video_attribs")
yield textHandler(UInt16(self, "audio_pid", "PID of audio data in stream file"), hexadecimal)
yield AVCHDMPLS_StreamAttribs(self, "audio_attribs")
def AVCHDIDEX(self):
yield AVCHDIDEX_0(self, "chunk[]")
yield AVCHDGenericChunk(self, "chunk[]")
def AVCHDPLEX(self):
yield AVCHDPLEX_0(self, "chunk[]")
yield AVCHDPLEX_1(self, "chunk[]")
yield AVCHDGenericChunk(self, "chunk[]")
def AVCHDCLEX(self):
yield AVCHDGenericChunk(self, "chunk[]")
yield AVCHDGenericChunk(self, "chunk[]")
class AVCHDChunkWithHeader(FieldSet):
TYPES = {'IDEX': AVCHDIDEX,
'PLEX': AVCHDPLEX,
'CLEX': AVCHDCLEX,}
def createFields(self):
yield UInt32(self, "size")
self._size = (self['size'].value+4)*8
yield UInt32(self, "unknown[]", "24")
yield UInt32(self, "unknown[]", "1")
yield UInt32(self, "unknown[]", "0x10000100")
yield UInt32(self, "unknown[]", "24")
yield UInt32(self, "size2")
assert self['size'].value == self['size2'].value+20
yield Bytes(self, "magic", 4)
yield RawBytes(self, "unknown[]", 36)
for field in self.TYPES[self['magic'].value](self):
yield field
class AVCHDINDX(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "INDX0"
PARSER_TAGS = {
"id": "bdmv_index",
"category": "video",
"file_ext": ("bdm","bdmv"),
"magic": ((MAGIC, 0),),
"min_size": 8, # INDX0?00
"description": "INDEX.BDM",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "filetype", 4, "File type (INDX)")
yield Bytes(self, "fileversion", 4, "File version (0?00)")
yield UInt32(self, "offset[0]")
yield UInt32(self, "offset[1]")
self.seekByte(self['offset[0]'].value)
yield AVCHDINDX_0(self, "chunk[]")
self.seekByte(self['offset[1]'].value)
yield AVCHDChunkWithHeader(self, "chunk[]")
class AVCHDMOBJ(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "MOBJ0"
PARSER_TAGS = {
"id": "bdmv_mobj",
"category": "video",
"file_ext": ("bdm","bdmv"),
"magic": ((MAGIC, 0),),
"min_size": 8, # MOBJ0?00
"description": "MOVIEOBJ.BDM",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "filetype", 4, "File type (MOBJ)")
yield Bytes(self, "fileversion", 4, "File version (0?00)")
yield RawBytes(self, "unknown[]", 32)
yield UInt32(self, "size")
yield UInt32(self, "unknown[]")
yield UInt16(self, "count")
yield textHandler(UInt32(self, "unknown_id"), hexadecimal)
for i in xrange(1, self['count'].value):
yield AVCHDMOBJ_Chunk(self, "movie_object[]")
class AVCHDMPLS(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "MPLS0"
PARSER_TAGS = {
"id": "bdmv_mpls",
"category": "video",
"file_ext": ("mpl","mpls","vpl"),
"magic": ((MAGIC, 0),),
"min_size": 8, # MPLS0?00
"description": "MPLS",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "filetype", 4, "File type (MPLS)")
yield Bytes(self, "fileversion", 4, "File version (0?00)")
yield UInt32(self, "offset[0]")
yield UInt32(self, "offset[1]")
yield UInt32(self, "offset[2]")
self.seekByte(self['offset[0]'].value)
yield AVCHDMPLS_0(self, "chunk[]")
self.seekByte(self['offset[1]'].value)
yield AVCHDMPLS_1(self, "chunk[]")
self.seekByte(self['offset[2]'].value)
yield AVCHDChunkWithHeader(self, "chunk[]")
class AVCHDCLPI(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "HDMV0"
PARSER_TAGS = {
"id": "bdmv_clpi",
"category": "video",
"file_ext": ("cpi","clpi"),
"magic": ((MAGIC, 0),),
"min_size": 8, # HDMV0?00
"description": "HDMV",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "filetype", 4, "File type (HDMV)")
yield Bytes(self, "fileversion", 4, "File version (0?00)")
yield UInt32(self, "offset[]")
yield UInt32(self, "offset[]")
yield UInt32(self, "offset[]")
yield UInt32(self, "offset[]")
yield UInt32(self, "offset[]")
self.seekByte(self['offset[0]'].value)
yield AVCHDGenericChunk(self, "chunk[]")
self.seekByte(self['offset[1]'].value)
yield AVCHDCLPI_1(self, "chunk[]")
self.seekByte(self['offset[2]'].value)
yield AVCHDGenericChunk(self, "chunk[]")
self.seekByte(self['offset[3]'].value)
yield AVCHDGenericChunk(self, "chunk[]")
self.seekByte(self['offset[4]'].value)
yield AVCHDChunkWithHeader(self, "chunk[]")
|
apps/invitations/admin.py | goztrk/django-htk | 206 | 11184430 | <gh_stars>100-1000
# Python Standard Library Imports
# Django Imports
from django.contrib import admin
class HtkInvitationAdmin(admin.ModelAdmin):
list_display = (
'id',
'email',
'first_name',
'last_name',
'invited_by',
'campaign',
'notes',
'user',
'status',
'created_at',
'timestamp',
)
list_filter = (
'campaign',
'invited_by',
'status',
)
|
tests/files.py | sourya-deepsource/pdf-annotate | 137 | 11184439 | <reponame>sourya-deepsource/pdf-annotate<gh_stars>100-1000
import os.path
dirname, _ = os.path.split(os.path.abspath(__file__))
SIMPLE = os.path.join(dirname, 'pdfs', 'simple.pdf')
ROTATED_90 = os.path.join(dirname, 'pdfs', 'rotated_90.pdf')
ROTATED_180 = os.path.join(dirname, 'pdfs', 'rotated_180.pdf')
ROTATED_270 = os.path.join(dirname, 'pdfs', 'rotated_270.pdf')
BINARIZED_PNG = os.path.join(dirname, 'images', 'binarized.png')
GRAYSCALE_PNG = os.path.join(dirname, 'images', 'grayscale.png')
RGB_PNG = os.path.join(dirname, 'images', 'rgb.png')
ALPHA_PNG = os.path.join(dirname, 'images', 'rgba.png')
PNG_FILES = [
BINARIZED_PNG,
GRAYSCALE_PNG,
RGB_PNG,
ALPHA_PNG,
]
GRAYSCALE_JPEG = os.path.join(dirname, 'images', 'grayscale.jpeg')
RGB_JPEG = os.path.join(dirname, 'images', 'rgb.jpeg')
CMYK_JPEG = os.path.join(dirname, 'images', 'cmyk.jpeg')
JPEG_FILES = [
GRAYSCALE_JPEG,
RGB_JPEG,
CMYK_JPEG,
]
RGB_GIF = os.path.join(dirname, 'images', 'rgb.gif')
GRAYSCALE_GIF = os.path.join(dirname, 'images', 'grayscale.gif')
GIF_FILES = [
GRAYSCALE_GIF,
RGB_GIF,
]
|
webcompat/db/__init__.py | Rahib777-7/webcompat.com | 298 | 11184447 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Database initialization."""
from hashlib import sha512
import os
from uuid import uuid4
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Integer
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy import String
from webcompat import app
from webcompat.helpers import to_bytes
session_engine = create_engine('sqlite:///' + os.path.join(
app.config['DATA_PATH'], 'session.db'))
session_db = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=session_engine))
site_engine = create_engine('sqlite:///' + os.path.join(
app.config['DATA_PATH'], 'topsites.db'))
site_db = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=site_engine))
UsersBase = declarative_base()
UsersBase.query = session_db.query_property()
class User(UsersBase):
"""Define the user DB holding the sessions."""
__tablename__ = 'users'
user_id = Column(String(128), unique=True, primary_key=True)
access_token = Column(String(128), unique=True)
def __init__(self, access_token):
"""Initialize the user db parameters."""
self.access_token = access_token
# We use the user_id in the session cookie to identify auth'd users.
# Here we salt and hash the GitHub access token so you can't get
# back to the auth token if the session cookie was ever compromised.
self.user_id = sha512(
to_bytes(access_token + uuid4().hex)).hexdigest()[0:128]
UsersBase.metadata.create_all(bind=session_engine)
SiteBase = declarative_base()
SiteBase.query = site_db.query_property()
class Site(SiteBase):
"""SQLAchemy base object for an Alexa top site."""
__tablename__ = 'topsites'
url = Column(String, primary_key=True)
priority = Column(Integer)
country_code = Column(String)
ranking = Column(Integer)
def __init__(self, url, priority, country_code, ranking):
self.url = url
self.priority = priority
self.country_code = country_code
self.ranking = ranking
SiteBase.metadata.create_all(bind=site_engine)
|
python/linkedlist/leetcode/delete_nth_node.py | googege/algo-learn | 153 | 11184486 | <reponame>googege/algo-learn<gh_stars>100-1000
# 删除链表倒数第N个节点
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
# 先计数,再删除
def removeNthFromEnd_1(self, head: ListNode, n: int) -> ListNode:
def length(node: ListNode) -> int:
k = 0
while node:
k, node = k + 1, node.next
return k
dummy = ListNode(0, head)
length = length(head)
cur = dummy
for _ in range(1, length - n + 1):
cur = cur.next
cur.next = cur.next.next
return dummy.next
# 快慢指针,一次遍历
def removeNthFromEnd_2(self, head: ListNode, n: int) -> ListNode:
dummy = ListNode(0, head)
slow, fast = dummy, head
for _ in range(n):
fast = fast.next
while fast:
slow, fast = slow.next, fast.next
slow.next = slow.next.next
return dummy.next
|
app/controllers/config/account/profile.py | ctxis/crackerjack | 237 | 11184524 | from .. import bp
from flask import current_app
from flask_login import login_required, current_user
from flask import render_template, redirect, url_for, flash, request
from app.lib.base.provider import Provider
@bp.route('/profile', methods=['GET'])
@bp.route('/profile/<int:user_id>', methods=['GET'])
@login_required
def profile(user_id=None):
if user_id is None:
user_id = current_user.id
elif user_id != current_user.id:
flash('Access denied', 'error')
return redirect(url_for('home.index'))
provider = Provider()
users = provider.users()
user = users.get_by_id(user_id)
return render_template(
'config/account/profile.html',
user=user
)
@bp.route('/profile/<int:user_id>/save', methods=['POST'])
@login_required
def profile_save(user_id):
if user_id != current_user.id:
flash('Access denied', 'error')
return redirect(url_for('home.index'))
provider = Provider()
users = provider.users()
user = users.get_by_id(current_user.id)
if not user.ldap:
existing_password = request.form['existing_password'].strip()
new_password = request.form['new_password'].strip()
confirm_password = request.form['confirm_password'].strip()
if len(existing_password) == 0:
flash('Please enter your existing password', 'error')
return redirect(url_for('config.profile', user_id=user_id))
elif len(new_password) == 0:
flash('Please enter your new password', 'error')
return redirect(url_for('config.profile', user_id=user_id))
elif len(confirm_password) == 0:
flash('Please confirm your new password', 'error')
return redirect(url_for('config.profile', user_id=user_id))
elif new_password != confirm_password:
flash('Passwords do not match', 'error')
return redirect(url_for('config.profile', user_id=user_id))
elif not users.validate_user_password(user_id, existing_password):
flash('Existing password is invalid', 'error')
return redirect(url_for('config.profile', user_id=user_id))
elif not users.password_complexity.meets_requirements(new_password):
flash(
'Password does not meet complexity requirements: ' + users.password_complexity.get_requirement_description(),
'error')
return redirect(url_for('config.profile', user_id=user_id))
users.update_password(user_id, new_password)
flash('Settings updated', 'success')
return redirect(url_for('config.profile', user_id=user_id))
|
tools/SeeDot/seedot/compiler/ONNX/onnx_test_run.py | Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML | 719 | 11184529 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import numpy as np
import onnxruntime
import common
import os, sys
import onnx
from onnx import helper
file_path = '../../../model/lenet/cifar-multiclass/input.onnx'
model = onnx.load(file_path)
sess = onnxruntime.InferenceSession(file_path)
dataset_path = '../../../datasets/lenet/cifar-multiclass/test_onnx.npy'
test = np.load(dataset_path)
run_all = True
intermediate = None
correct = 0
total = 0
for i in range(test.shape[0] if run_all else 1):
x = test[i,1:].reshape(-1,1)
# x = test[i,1:].reshape(1,32,32,3).transpose(0,3,1,2).reshape(-1,1)
output = test[i,0]
# print(x.shape)
# print(output)
input_name = model.graph.input[0].name
x = x.astype(np.float32)
if (intermediate is not None):
intermediate_layer_value_info = helper.ValueInfoProto()
intermediate_layer_value_info.name = intermediate
model.graph.output.extend([intermediate_layer_value_info])
onnx.save(model, file_path + '_1')
sess = onnxruntime.InferenceSession(file_path + '_1')
pred = sess.run([intermediate_layer_value_info.name], {input_name: x})
# np.save('debug/' + model_name + '/' + model_name + '_debug', pred)
# with open('debug/onnx_debug.txt', 'w') as f:
# f.write(common.numpy_float_array_to_float_val_str(pred))
# print("Saving the onnx runtime intermediate output for " + intermediate_layer_value_info.name)
print(len(pred))
print(pred[0])
exit()
pred = sess.run(None, {input_name: x})
predicted_class = pred[0][0] + 1
print(predicted_class)
print(int(output))
correct += (predicted_class == int(output))
total += 1
# np.save('debug/' + model_name + '/' + model_name + '_output', pred)
# with open('debug/onnx_output.txt', 'w') as f:
# f.write(common.numpy_float_array_to_float_val_str(pred))
# output_dims = common.proto_val_to_dimension_tuple(model.graph.output[0])
# print("Saving the onnx runtime output of dimension " + str(output_dims))
print(str((float(correct)*100)/float(total)) + '% is the accuracy')
|
google_or_tools/coins3_sat.py | tias/hakank | 279 | 11184562 | # Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coin application in Google CP Solver.
From 'Constraint Logic Programming using ECLiPSe'
pages 99f and 234 ff.
The solution in ECLiPSe is at page 236.
'''
What is the minimum number of coins that allows one to pay _exactly_
any amount smaller than one Euro? Recall that there are six different
euro cents, of denomination 1, 2, 5, 10, 20, 50
'''
This is a port of my old OR-tools CP model coins3.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
from cp_sat_utils import scalar_product, SimpleSolutionPrinter
def main():
model = cp.CpModel()
#
# data
#
n = 6 # number of different coins
variables = [1, 2, 5, 10, 25, 50]
# declare variables
x = [model.NewIntVar(0, 99, "x%i" % i) for i in range(n)]
num_coins = model.NewIntVar(0, 99, "num_coins")
#
# constraints
#
# number of used coins, to be minimized
model.Add(num_coins == sum(x))
# Check that all changes from 1 to 99 can be made.
for j in range(1, 100):
tmp = [model.NewIntVar(0, 99, "b%i" % i) for i in range(n)]
model.Add(j == cp.LinearExpr.ScalProd(tmp,variables))
# scalar_product(model,tmp,variables,j)
[model.Add(tmp[i] <= x[i]) for i in range(n)]
# objective
# objective = solver.Minimize(num_coins, 1)
model.Minimize(num_coins)
#
# solution and search
#
solver = cp.CpSolver()
# solver.parameters.search_branching = cp.PORTFOLIO_SEARCH
solver.parameters.cp_model_presolve = False
solver.parameters.linearization_level = 0
solver.parameters.cp_model_probing_level = 0
# status = solver.Solve(model)
solution_printer = SimpleSolutionPrinter([num_coins])
status = solver.SolveWithSolutionCallback(model, solution_printer)
if status == cp.OPTIMAL:
print("x: ", [solver.Value(x[i]) for i in range(n)])
print("num_coins:", solver.Value(num_coins))
print()
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
main()
|
example/functions/add.py | osblinnikov/pytorch-binary | 293 | 11184595 | # functions/add.py
import torch
from torch.autograd import Function
from _ext import my_lib
class MyAddFunction(Function):
def forward(self, input1, input2):
output = input1.new()
if not input1.is_cuda:
my_lib.my_lib_add_forward(input1, input2, output)
else:
my_lib.my_lib_add_forward_cuda(input1, input2, output)
return output
def backward(self, grad_output):
grad_input = grad_output.new()
if not grad_output.is_cuda:
my_lib.my_lib_add_backward(grad_output, grad_input)
else:
my_lib.my_lib_add_backward_cuda(grad_output, grad_input)
return grad_input
|
src/python/nimbusml/feature_extraction/image/__init__.py | michaelgsharp/NimbusML | 134 | 11184726 | from .loader import Loader
from .pixelextractor import PixelExtractor
from .resizer import Resizer
__all__ = [
'Loader',
'PixelExtractor',
'Resizer'
]
|
src/test/tests/operators/multires.py | visit-dav/vis | 226 | 11184738 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: multires.py
#
# Programmer: <NAME>
# Date: August 6, 2010
#
# ----------------------------------------------------------------------------
ds = data_path("Chombo_test_data/chombo.visit")
OpenDatabase(ds)
AddPlot("Pseudocolor", "Scalar_1")
AddOperator("MultiresControl")
att = MultiresControlAttributes()
att.resolution = 0
SetOperatorOptions(att)
DrawPlots()
Test("multires_0")
att.resolution = 1
SetOperatorOptions(att)
DrawPlots()
Test("multires_1")
DeleteAllPlots()
CloseDatabase(ds)
Exit()
|
scripts/parse_atmo_status_logs.py | simpsonw/atmosphere | 197 | 11184778 | #!/usr/bin/env python
import csv
from datetime import datetime
def _parse_logs(filename):
user_history = {}
pending_instances = {}
with open(filename, 'r') as the_file:
csvreader = csv.reader(the_file, delimiter=',')
for row in csvreader:
try:
(
timestamp, username, instance_id, machine_id, size_id,
status_name
) = row
except:
print 'Could not parse row:\n%s' % row
continue
if status_name == 'Request Received':
pending_instances[(username, machine_id, size_id)] = row
else:
first_row = pending_instances.pop(
(username, machine_id, size_id), None
)
user_instance_history = user_history.get(username, {})
instance_history = user_instance_history.get(instance_id, [])
if first_row:
instance_history.append(first_row)
instance_history.append(row)
user_instance_history[instance_id] = instance_history
user_history[username] = user_instance_history
print "Username,Instance ID, Machine ID, Size ID, Request Time, Launch Time, Networking Time, Deployment Time, Request-to-launch, launch-to-deploy"
for username, instance_history in user_history.items():
for instance_id, history in instance_history.items():
request_time = None
launch_time = None
network_time = None
deploy_time = None
for row in history:
status = row[5]
if not request_time and 'Request Received' in status:
request_time = get_time(row[0])
elif not launch_time and 'Launching Instance' in status:
launch_time = get_time(row[0])
elif not network_time and 'Networking Complete' in status:
network_time = get_time(row[0])
elif not deploy_time and 'Deploy Finished' in status:
deploy_time = get_time(row[0])
if not launch_time or not request_time:
total_launch_time = "N/A"
else:
total_launch_time = launch_time - request_time
if not launch_time or not deploy_time:
total_deploy_time = "N/A"
else:
total_deploy_time = deploy_time - launch_time
print "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s"\
% (username, instance_id, row[3], row[4], request_time, launch_time, network_time, deploy_time, total_launch_time, total_deploy_time)
def get_time(time_str):
return datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
_parse_logs("logs/atmosphere_status.log")
|
ts_datasets/ts_datasets/anomaly/synthetic.py | ankitakashyap05/Merlion | 2,215 | 11184779 | #
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import glob
import os
import pandas as pd
from ts_datasets.anomaly.base import TSADBaseDataset
class Synthetic(TSADBaseDataset):
"""
Wrapper to load a sythetically generated dataset.
The dataset was generated using three base time series, each of which
was separately injected with shocks, spikes, dips and level shifts, making
a total of 15 time series (including the base time series without anomalies).
Subsets can are defined by the base time series used ("horizontal",
"seasonal", "upward_downward"), or the type of injected anomaly ("shock",
"spike", "dip", "level"). The "anomaly" subset refers to all times series with
injected anomalies (12) while "base" refers to all time series without them (3).
"""
base_ts_subsets = ["horizontal", "seasonal", "upward_downward"]
anomaly_subsets = ["shock", "spike", "dip", "level", "trend"]
valid_subsets = ["anomaly", "all", "base"] + base_ts_subsets + anomaly_subsets
def __init__(self, subset="anomaly", rootdir=None):
super().__init__()
assert subset in self.valid_subsets, f"subset should be in {self.valid_subsets}, but got {subset}"
self.subset = subset
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "synthetic_anomaly")
csvs = sorted(glob.glob(f"{rootdir}/*.csv"))
if subset == "base":
csvs = [csv for csv in csvs if "anom" not in os.path.basename(csv)]
elif subset != "all":
csvs = [csv for csv in csvs if "anom" in os.path.basename(csv)]
if subset in self.base_ts_subsets + self.anomaly_subsets:
csvs = [csv for csv in csvs if subset in os.path.basename(csv)]
for csv in csvs:
df = pd.read_csv(csv)
df["timestamp"] = pd.to_datetime(df["timestamp"], unit="s")
df = df.set_index("timestamp")
ts = df[df.columns[0:1]]
metadata = pd.DataFrame(
{
"anomaly": df["anomaly"].astype(bool) if df.shape[1] > 1 else [False] * len(df),
"trainval": [j < len(df) * 0.5 for j in range(len(df))],
},
index=df.index,
)
self.time_series.append(ts)
self.metadata.append(metadata)
|
Candidates/trainCandidates.py | shv07/microexpnet | 150 | 11184790 | '''
Title :trainCandidates.py
Description :This script trains Candidate nets, plots learning curves
and saves corresponding Tensorflow models
Author :<NAME> & <NAME>
Date Created :28-04-2017
Date Modified :09-06-2019
version :1.3
python_version :2.7.11
'''
from __future__ import print_function
from time import gmtime, strftime
from Preprocessing import *
from CandidateExpNet_v import *
from CandidateExpNet_p1 import *
from CandidateExpNet_p2 import *
from CandidateExpNet_p12 import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import matplotlib.image as mpimg
import tensorflow as tf
import numpy as np
import cv2
import sys
import os
if __name__ == '__main__':
# Static parameters
imgXdim = 84
imgYdim = 84
nInput = imgXdim*imgYdim # Since RGB is transformed to Grayscale
nClasses = 8
dropout = 0.5
batchSize = 64
#learningRate = 1e-04
stepSize = 50000
epochs = 1000
testStep = 20
displayStep = 20
'''
mode : "-v" -> CandidateExpNet_v,
"-p1" -> CandidateExpNet_p1
"-p2" -> CandidateExpNet_p2
"-p12" -> CandidateExpNet_p12
valSet : Index of the chosen test batch (10 batches in total) or file path of the test labels
labelPath : Absolute path of the label file
outputGraphName : Name of the learning curve graph
outputModelName : Name of the Tensorflow model file
squeezeCoefficient : Model compression parameter
'''
if len(sys.argv) != 8:
print("Usage: python trainCandidates.py <mode> <valSet> <labelPath> <outputGraphName> <outputModelName> <learningRate> <squeezeCoefficient>")
else:
# Dynamic parameters
mode = str(sys.argv[1])
if sys.argv[2].isdigit():
valSet = int(sys.argv[2])
else:
valSet = str(sys.argv[2])
labelPath = str(sys.argv[3])
outputGraphName = str(sys.argv[4])
outputModelName = str(sys.argv[5])
learningRate = float(sys.argv[6])
squeezeCoefficient = int(sys.argv[7])
if mode == "-v":
print("[" + get_time() + "] " + "Mode: CandidateExpNet_v Training")
elif mode == "-p1":
print("[" + get_time() + "] " + "Mode: CandidateExpNet_p1 Training")
elif mode == "-p2":
print("[" + get_time() + "] " + "Mode: CandidateExpNet_p2 Training")
else:
print("[" + get_time() + "] " + "Mode: CandidateExpNet_p12 Training")
# Deploy images and their labels
print("[" + get_time() + "] " + "Deploying images...")
trainX, trainY, teacherLogits = deployImages(labelPath, None)
# Produce one-hot labels
print("[" + get_time() + "] " + "Producing one-hot labels...")
trainY = produceOneHot(trainY, nClasses)
print("[" + get_time() + "] " + "Start training for val[" + str(valSet) + "]")
print("[" + get_time() + "] " + "Initializing batches...")
batches = []
test_batches = []
if type(valSet) == type("str"):
testX, testY, _ = deployImages(valSet, None)
testY = produceOneHot(testY, nClasses)
batches.extend(produceBatch(trainX, trainY, teacherLogits, batchSize))
test_batches.extend(produceBatch(testX, testY, None, batchSize))
else:
# Produce 10 folds for training & validation
folds = produce10foldCrossVal(trainX, trainY, teacherLogits, labelPath)
for i in range(10):
if i != valSet:
batches.extend(produceBatch(folds[i]['x'], folds[i]['y'], folds[i]['teacherLogits'], batchSize))
else:
test_batches.extend(produceBatch(folds[i]['x'], folds[i]['y'], folds[i]['teacherLogits'], batchSize))
print("[" + get_time() + "] " + "Initializing placeholders...")
# tf Graph input
x = tf.placeholder(tf.float32, shape=[None, nInput])
lr = tf.placeholder(tf.float32)
keepProb = tf.placeholder(tf.float32)
y = tf.placeholder(tf.int32, shape=[None, nClasses])
# Loss values for plotting
train_loss_vals = []
train_acc_vals = []
train_iter_num = []
test_loss_vals = []
test_acc_vals = []
test_iter_num = []
fin_accuracy = 0
classifier = None
# Construct model
if mode == "-v":
classifier = CandidateExpNet_v(x, y, lr, nClasses, imgXdim, imgYdim, batchSize, keepProb, squeezeCoefficient)
elif mode == "-p1":
classifier = CandidateExpNet_p1(x, y, lr, nClasses, imgXdim, imgYdim, batchSize, keepProb, squeezeCoefficient)
elif mode == "-p2":
classifier = CandidateExpNet_p2(x, y, lr, nClasses, imgXdim, imgYdim, batchSize, keepProb, squeezeCoefficient)
else:
classifier = CandidateExpNet_p12(x, y, lr, nClasses, imgXdim, imgYdim, batchSize, keepProb, squeezeCoefficient)
# Deploy weights and biases for the model saver
model_saver = tf.train.Saver()
weights_biases_deployer = tf.train.Saver({"wc1": classifier.w["wc1"], \
"wc2": classifier.w["wc2"], \
"wfc": classifier.w["wfc"], \
"wo": classifier.w["out"], \
"bc1": classifier.b["bc1"], \
"bc2": classifier.b["bc2"], \
"bfc": classifier.b["bfc"], \
"bo": classifier.b["out"]})
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
print("[" + get_time() + "] " + "Training is started...")
step = 0
# Keep training until each max iterations
while step <= epochs:
total_batch = len(batches)
total_test_batch = len(test_batches)
for i in range(total_batch):
batch_x = batches[i]['x']
batch_y = batches[i]['y']
# Run optimization op (backprop)
sess.run(classifier.optimizer, feed_dict={x: batch_x, y: batch_y, lr: learningRate, keepProb: dropout})
if step % displayStep == 0:
avg_cost = 0
avg_perf = 0
for i in range(total_batch):
batch_x = batches[i]['x']
batch_y = batches[i]['y']
c, p = sess.run([classifier.cost, classifier.accuracy], feed_dict={x: batch_x, y: batch_y, lr: learningRate, keepProb: 1.0})
avg_cost += c
avg_perf += p
avg_cost /= float(total_batch)
avg_perf /= float(total_batch)
train_loss_vals.append(avg_cost)
train_acc_vals.append(avg_perf)
train_iter_num.append(step)
print("[" + get_time() + "] [Iter " + str(step) + "] Training Loss: " + \
"{:.6f}".format(avg_cost) + " Training Accuracy: " + "{:.5f}".format(avg_perf))
if avg_cost < -1:
break
if step % testStep == 0:
avg_cost = 0
fin_accuracy = 0
for i in range(total_test_batch):
testX = test_batches[i]['x']
testY = test_batches[i]['y']
c, f = sess.run([classifier.cost, classifier.accuracy], feed_dict={x: testX, y: testY, lr: learningRate, keepProb: 1.0})
avg_cost += c
fin_accuracy += f
avg_cost /= float(total_test_batch)
fin_accuracy /= float(total_test_batch)
test_loss_vals.append(avg_cost)
test_acc_vals.append(fin_accuracy)
test_iter_num.append(step)
print("[" + get_time() + "] [Iter " + str(step) + "] Testing Loss: " + \
"{:.6f}".format(avg_cost) + " Testing Accuracy: " + "{:.5f}".format(fin_accuracy))
if step % stepSize == 0:
learningRate /= 10
step += 1
model_saver.save(sess, outputModelName)
print("[" + get_time() + "] [Iter " + str(step) + "] Weights & Biases are saved.")
# Print final accuracy independent of the mode
print ("[" + get_time() + "] Test Accuracy: " + str(fin_accuracy))
print ("[" + get_time() + "] Training for val[" + str(valSet) + "] is completed.")
# Starting building the learning curve graph
fig, ax1 = plt.subplots()
# Plotting training and test losses
train_loss, = ax1.plot(train_iter_num, train_loss_vals, color='red', alpha=.5)
test_loss, = ax1.plot(test_iter_num, test_loss_vals, linewidth=2, color='green')
ax1.set_xlabel('Epochs', fontsize=15)
ax1.set_ylabel('Loss', fontsize=15)
ax1.tick_params(labelsize=15)
# Plotting test accuracy
ax2 = ax1.twinx()
test_accuracy, = ax2.plot(test_iter_num, test_acc_vals, linewidth=2, color='blue')
train_accuracy, = ax2.plot(train_iter_num, train_acc_vals, linewidth=1, color='orange')
ax2.set_ylim(ymin=0, ymax=1)
ax2.set_ylabel('Accuracy', fontsize=15)
ax2.tick_params(labelsize=15)
# Adding legend
plt.legend([train_loss, test_loss, test_accuracy, train_accuracy], ['Training Loss', 'Test Loss', 'Test Accuracy', 'Training Accuracy'], bbox_to_anchor=(1, 0.8))
plt.title('Learning Curve', fontsize=18)
# Saving learning curve
plt.savefig(outputGraphName)
|
rastervision/data/raster_transformer/raster_transformer.py | carderne/raster-vision | 1,577 | 11184865 | <gh_stars>1000+
from abc import (ABC, abstractmethod)
class RasterTransformer(ABC):
"""Transforms raw chips to be input to a neural network."""
@abstractmethod
def transform(self, chip, channel_order=None):
"""Transform a chip of a raster source.
Args:
chip: ndarray of shape [height, width, channels] This is assumed to already
have the channel_order applied to it if channel_order is set. In other
words, channels should be equal to len(channel_order).
channel_order: list of indices of channels that were extracted from the
raw imagery.
Returns:
[height, width, channels] numpy array
"""
pass
|
transformations/factive_verb_transformation/transformation.py | JerryX1110/NL-Augmenter | 583 | 11184866 | <filename>transformations/factive_verb_transformation/transformation.py
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
import random
from typing import List
import spacy
from initialize import spacy_nlp
def extract_names_and_titles():
"""
Extract names, titles, profession and relation of males and females
"""
SINGULAR_GENDER_PAIRS = {
"manager": "manageress",
"nephew": "niece",
"prince": "princess",
"baron": "baroness",
"wizard": "witch",
"father": "mother",
"sons-in-law": "daughters-in-law",
"boyfriend": "girlfriend",
"dad": "mom",
"shepherd": "shepherdess",
"beau": "belle",
"hunter": "huntress",
"step-son": "step-daughter",
"policeman": "policewoman",
"brother": "sister",
"grandfather": "grandmother",
"priest": "priestess",
"landlord": "landlady",
"husband": "wife",
"poet": "poetess",
"masseur": "masseuse",
"usher": "usherette",
"hero": "heroine",
"stepson": "stepdaughter",
"postman": "postwoman",
"god": "goddess",
"grandpa": "grandma",
"murderer": "murderess",
"manservant": "maidservant",
"host": "hostess",
"masseurs": "masseuses",
"boy": "girl",
"male": "female",
"son-in-law": "daughter-in-law",
"waiter": "waitress",
"bachelor": "spinster",
"millionaire": "millionairess",
"steward": "stewardess",
"congressman": "congresswoman",
"emperor": "empress",
"duke": "duchess",
"sire": "dam",
"son": "daughter",
"widower": "widow",
"proprietor": "proprietress",
"monk": "nun",
"heir": "heiress",
"gentleman": "lady",
"lord": "lady",
"uncle": "aunt",
"he": "she",
"king": "queen",
"governor": "matron",
"fiance": "fiancee",
"step-father": "step-mother",
"mr": "mrs",
"stepfather": "stepmother",
"daddy": "mummy",
"father-in-law": "mother-in-law",
"abbot": "abbess",
"sir": "madam",
"actor": "actress",
"mr.": "mrs.",
"chairman": "chairwoman",
"sorcerer": "sorceress",
"postmaster": "postmistress",
"lad": "lass",
"headmaster": "headmistress",
"papa": "mama",
"milkman": "milkmaid",
"man": "woman",
"grandson": "granddaughter",
"groom": "bride",
"businessman": "businesswoman",
"his": "her",
"he": "she",
}
male_name_file = open("gender.male.names", "r")
male_names = []
for line in male_name_file:
male_names.append(line.strip())
female_name_file = open("gender.female.names", "r")
female_names = []
for line in female_name_file:
female_names.append(line.strip())
male_names.extend(SINGULAR_GENDER_PAIRS.keys()) # adding male title, profession and relation
female_names.extend(SINGULAR_GENDER_PAIRS.values()) # adding female title, profession and relation
return male_names, female_names
def is_candidate_word(word):
"""
check a word is correct candidate word for identifying pronoun
"""
discarded_words = ["a", "an", "the"] # can enhance this list
if len(word)<=2 or word.lower() in discarded_words:
return False
return True
def extract_nsubj_phrase(parse):
"""
extract phrase from nsubj subtree
"""
nsubj_phrase = []
for token in parse:
if token.dep_ == "nsubj" and token.head.dep_ == "ROOT":
nsubj_phrase.append(token.text)
if token.head.dep_ == "nsubj" or token.head.head.dep_ == "nsubj":
nsubj_phrase.append(token.text)
return " ".join(nsubj_phrase)
def map_pronoun(word, male_names, female_names):
"""
map word with male and females names, profession, title etc.
"""
pronoun = ""
if word in male_names or word.lower() in male_names:
pronoun = "he"
elif word in female_names or word.lower() in female_names:
pronoun = "she"
return pronoun
def fetch_corresponding_pronoun(nsubj_phrase, male_names, female_names):
"""
Fetch pronoun of nsubj phrase
"""
if nsubj_phrase.lower() in ["i", "you", "we", "he", "she", "they"]:
return nsubj_phrase.lower()
if len(nsubj_phrase.split(" ")) > 1:
for ph in nsubj_phrase.split(" "): # if nsubj phrase contains multiple words.
if is_candidate_word(ph):
pronoun = map_pronoun(ph, male_names, female_names)
if(pronoun != ""):
return pronoun
return "they" # default pronoun
else:
return map_pronoun(nsubj_phrase)
def get_transformation(sentence, nlp, factive_verbs, non_factive_verbs, initial_verbs, male_names, female_names, seed):
"""
transform a input sentence by adding factive verb
"""
parse = nlp(sentence)
nsubj_phrase = extract_nsubj_phrase(parse)
pronoun = fetch_corresponding_pronoun(nsubj_phrase, male_names, female_names)
random.seed(0)
verb = random.choice(factive_verbs + non_factive_verbs) # pick random verb
#initial_verb = random.choice(initial_verbs) # TODO:
sentence = sentence.replace(nsubj_phrase, pronoun)
return f"{nsubj_phrase} {verb} that, {sentence}"#, f"{nsubj} didn't {verb} that, {sentence}"
class FactiveVerbTransformation(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
TaskType.SENTIMENT_ANALYSIS,
]
languages = ["en"]
heavy = False
def __init__(self, seed=1, max_outputs=1):
super().__init__(seed, max_outputs=max_outputs)
self.nlp = spacy_nlp if spacy_nlp else spacy.load("en_core_web_sm")
self.initial_verbs = ["", "have to", "has to", "need to"] # TODO: use this in next push after discussion
#TODO: we can add third person variation like (after discussion)
# "Peter published a research paper. => John revealed that, Peter published a research paper."
self.male_names, self.female_names = extract_names_and_titles()
self.factive_verbs = ["accept", "accepts", "accepted",
"establish", "establishes", "established",
"note", "notes", "noted",
"reveal", "reveals", "revealed",
"acknowledge","acknowledges", "acknowledged",
"explain", "explains", "explained",
"observe", "observes", "observed",
"see", "saw", "seen",
"know", "knows", "knew",
"prove", "proves", "proved",
"show", "shows", "showed",
"demonstrate", "demonstrates","demonstrated",
"learn", "learns", "learnt",
"recognise", "recognises", "recognised",
"inform", "informs", "informed",
"understand", "understands", "understood"
"confirm", "confirms", "confirmed"] # more verbs can be added
self.non_factive_verbs = ["argue", "argues", "argued",
"doubt", "doubts", "doubted",
"hypothesise", "hypothesises", "hypothesised",
"recommend", "recommends", "recommended",
"assume", "assumes", "assumed",
"estimate", "estimates", "estimated",
"imply", "implies", "implied",
"suggest", "suggests", "suggested",
"believe", "believes", "believed",
"expect", "expects", "expected",
"predict", "predicts", "predicted",
"suspect", "suspects", "suspected",
"claim", "claims", "claimed",
"foresee", "foresaw", "foreseen",
"presume", "presumes", "presumed",
"think", "thinks", "thought"]
def generate(self, sentence: str) -> List[str]:
transformed_sentences = []
for _ in range(self.max_outputs):
transformed_sentence = get_transformation(sentence, self.nlp,
self.factive_verbs, self.non_factive_verbs,
self.initial_verbs, self.male_names,
self.female_names, self.seed)
transformed_sentences.append(transformed_sentence)
return transformed_sentences
# if __name__ == "__main__":
# import json
# from TestRunner import convert_to_snake_case
#
# tf = FactiveVerbTransformation()
# test_cases=[]
# input_sent = ["He killed a street dog yesterday.",
# "An actress made her debut in hollywood.",
# "<NAME> was enjoying the summer in Baker street.",
# "The lady doctor made a huge mistake during the operation.",
# "Mr. <NAME> won the Quidditch championship.",
# "A small group of researchers found a new variant of Coivd-19."]
# # for i, sentence in enumerate(input_sent):
# # transformed_sentence = tf.generate(sentence)
# # test_cases.append({
# # "class": tf.name(),
# # "inputs": {"sentence": sentence},
# # "outputs": [],}
# # )
# # for trans_sentence in transformed_sentence:
# # test_cases[i]["outputs"].append({"sentence":trans_sentence})
# # json_file = {"type":convert_to_snake_case("factive_verb_transformation"),
# # "test_cases": test_cases}
# # print(json.dumps(json_file))
# for ip in input_sent:
# print(ip)
# trans_sent = tf.generate(ip)
# print(trans_sent)
|
eosfactory/core/utils.py | tuan-tl/eosfactory | 255 | 11184910 | <reponame>tuan-tl/eosfactory
import time
import os
import shutil
import threading
import subprocess
import eosfactory.core.errors as errors
def wslMapLinuxWindows(path, back_slash=True):
if not path or path.find("/mnt/") != 0:
return path
path = path[5].upper() + ":" + path[6:]
if back_slash:
path = path.replace("/", r"\\")
return path
def wslMapWindowsLinux(path):
if path.find(":") == -1:
return path
path = path.replace("\\", "/")
drive = path[0]
return path.replace(drive + ":/", "/mnt/" + drive.lower() + "/")
def heredoc(message):
from textwrap import dedent
message = dedent(message).strip()
message.replace("<br>", "\n")
return message
def spawn(command_line, error_message='', shell=False, raise_exception=True):
stdout = None
stderr = None
try:
p = subprocess.run(
command_line,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = p.stdout.decode("ISO-8859-1").strip()
stderr = p.stderr.decode("ISO-8859-1").strip()
except Exception as e:
stderr = str(e)
if raise_exception:
if stderr:
raise errors.Error('''
{}
command line:
=============
{}
error message:
==============
{}
'''.format(error_message, " ".join(command_line), stderr),
translate=False)
return stdout
else:
return (stdout, stderr)
UBUNTU = "Ubuntu"
DARWIN = "Darwin"
OTHER_OS = None
def os_version():
version = spawn(["uname", "-v"])
if "Microsoft" in version or "ubuntu" in version:
return UBUNTU
if "Darwin" in version:
return DARWIN
return OTHER_OS
def is_windows_ubuntu():
return "Microsoft" in spawn(["uname", "-v"])
def which(file_path):
return spawn("which {}".format(file_path), shell=True)
def long_process(command_line, build_dir=None, is_verbose=True, prompt=None,
shell=False):
stop = False
PERIOD = 2
def thread_function():
if prompt:
print("{}: ".format(prompt), end="", flush=True)
while True:
print(".", end="", flush=True)
time.sleep(PERIOD)
if stop:
break
cwd = None
if build_dir:
cwd = os.path.join(build_dir, "cwd")
if os.path.exists(cwd):
try:
shutil.rmtree(cwd)
except Exception as e:
raise errors.Error('''
Cannot remove the directory {}.
error message:
==============
{}
'''.format(cwd, str(e)))
os.mkdir(cwd)
threading.Thread(target=thread_function).start()
try:
p = subprocess.run(
command_line,
cwd=cwd,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except Exception as e:
stop = True
time.sleep(PERIOD)
print(str(e))
exit()
stop = True
time.sleep(PERIOD)
print()
stdout = p.stdout.decode("ISO-8859-1")
stderr = p.stderr.decode("ISO-8859-1")
returncode = p.returncode
if is_verbose:
print(stdout)
if cwd:
shutil.rmtree(cwd)
if returncode:
raise errors.Error('''
command line:
=============
{}
error message:
==============
{}
'''.format(" ".join(command_line), stderr))
return p
def locate(start_dir, partial_path):
cl = ["find", start_dir, "-wholename", '"*{}"'.format(partial_path)]
p = long_process(" ".join(cl), None, False,
"locating '{}'".format(partial_path), True)
stdout = p.stdout.decode("ISO-8859-1")
if stdout:
return stdout.strip()
return ""
def project_zip():
# from zipfile_infolist import print_info
import zipfile
zip_file = '/mnt/c/Workspaces/EOS/contracts/examples/project_zip.zip'
with zipfile.ZipFile(zip_file, mode='w') as zf:
print('adding README.txt')
zf.write('/mnt/c/Workspaces/EOS/contracts/examples/fund/build/fund.abi')
# print('creating archive')
# zf = zipfile.ZipFile(zip_file, mode='w')
# try:
# for f in "/mnt/c/Workspaces/EOS/contracts/examples/fund/build":
# print("adding {}".format(f))
# zf.write(f)
# finally:
# print('closing')
# zf.close()
# print(print_info(zip_file))
# zip_file = "/mnt/c/Workspaces/EOS/contracts/examples/project_zip.zip"
# zip_object = zipfile.ZipFile(zip_file, 'w')
# for f in "/mnt/c/Workspaces/EOS/contracts/examples/fund":
# zip_object.write(f)
|
Twitter_Scraper_without_API/display_hashtags.py | avinashkranjan/PraticalPythonProjects | 930 | 11184927 | <reponame>avinashkranjan/PraticalPythonProjects<filename>Twitter_Scraper_without_API/display_hashtags.py
import sqlite3
import os
def sql_connection():
"""
Establishes a connection to the SQL file database
:return connection object:
"""
path = os.path.abspath('./Twitter_Scraper_without_API/TwitterDatabase.db')
con = sqlite3.connect(path)
return con
def sql_fetcher(con):
"""
Fetches all the tweets with the given hashtag from our database
:param con:
:return:
"""
hashtag = input("\nEnter hashtag to search: #")
hashtag = '#' + hashtag
count = 0
cur = con.cursor()
cur.execute('SELECT * FROM tweets') # SQL search query
rows = cur.fetchall()
for r in rows:
if hashtag in r:
count += 1
print(f'USERNAME: {r[1]}\nTWEET CONTENT: {r[2]}\nURL: {r[3]}\n')
if count:
print(f'{count} tweets fetched from database')
else:
print('No tweets available for this hashtag')
con = sql_connection()
while 1:
sql_fetcher(con)
ans = input('Press (y) to continue or any other key to exit: ').lower()
if ans == 'y':
continue
else:
print('Exiting..')
break
|
contrib/tools/python/src/Lib/plat-mac/lib-scriptpackages/Terminal/Terminal_Suite.py | HeyLey/catboost | 6,989 | 11184932 | <filename>contrib/tools/python/src/Lib/plat-mac/lib-scriptpackages/Terminal/Terminal_Suite.py
"""Suite Terminal Suite: Terms and Events for controlling the Terminal application
Level 1, version 1
Generated from /Applications/Utilities/Terminal.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'trmx'
class Terminal_Suite_Events:
def GetURL(self, _object, _attributes={}, **_arguments):
"""GetURL: Opens a telnet: URL
Required argument: the object for the command
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'GURL'
_subcode = 'GURL'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_do_script = {
'in_' : 'kfil',
'with_command' : 'cmnd',
}
def do_script(self, _object, _attributes={}, **_arguments):
"""do script: Run a UNIX shell script or command
Required argument: the object for the command
Keyword argument in_: the window in which to execute the command
Keyword argument with_command: data to be passed to the Terminal application as the command line, deprecated, use direct parameter
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the reply for the command
"""
_code = 'core'
_subcode = 'dosc'
aetools.keysubst(_arguments, self._argmap_do_script)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - The Terminal program """
want = 'capp'
class _Prop__3c_Inheritance_3e_(aetools.NProperty):
"""<Inheritance> - All of the properties of the superclass. """
which = 'c@#^'
want = 'capp'
_3c_Inheritance_3e_ = _Prop__3c_Inheritance_3e_()
class _Prop_properties(aetools.NProperty):
"""properties - every property of the Terminal program """
which = 'pALL'
want = '****'
properties = _Prop_properties()
# element 'cwin' as ['name', 'indx', 'rele', 'rang', 'test', 'ID ']
# element 'docu' as ['name', 'indx', 'rele', 'rang', 'test']
applications = application
class window(aetools.ComponentItem):
"""window - A Terminal window """
want = 'cwin'
class _Prop_background_color(aetools.NProperty):
"""background color - the background color for the window """
which = 'pbcl'
want = '****'
class _Prop_bold_text_color(aetools.NProperty):
"""bold text color - the bold text color for the window """
which = 'pbtc'
want = '****'
class _Prop_bounds(aetools.NProperty):
"""bounds - the boundary rectangle for the window, relative to the upper left corner of the screen """
which = 'pbnd'
want = '****'
class _Prop_busy(aetools.NProperty):
"""busy - Is the window busy running a process? """
which = 'busy'
want = 'bool'
class _Prop_contents(aetools.NProperty):
"""contents - the currently visible contents of the window """
which = 'pcnt'
want = 'utxt'
class _Prop_cursor_color(aetools.NProperty):
"""cursor color - the cursor color for the window """
which = 'pcuc'
want = '****'
class _Prop_custom_title(aetools.NProperty):
"""custom title - the custom title for the window """
which = 'titl'
want = 'utxt'
class _Prop_frame(aetools.NProperty):
"""frame - the origin and size of the window """
which = 'pfra'
want = '****'
class _Prop_frontmost(aetools.NProperty):
"""frontmost - Is the window in front of the other Terminal windows? """
which = 'pisf'
want = 'bool'
class _Prop_history(aetools.NProperty):
"""history - the contents of the entire scrolling buffer of the window """
which = 'hist'
want = 'utxt'
class _Prop_normal_text_color(aetools.NProperty):
"""normal text color - the normal text color for the window """
which = 'ptxc'
want = '****'
class _Prop_number_of_columns(aetools.NProperty):
"""number of columns - the number of columns in the window """
which = 'ccol'
want = 'long'
class _Prop_number_of_rows(aetools.NProperty):
"""number of rows - the number of rows in the window """
which = 'crow'
want = 'long'
class _Prop_origin(aetools.NProperty):
"""origin - the lower left coordinates of the window, relative to the lower left corner of the screen """
which = 'pori'
want = '****'
class _Prop_position(aetools.NProperty):
"""position - the upper left coordinates of the window, relative to the upper left corner of the screen """
which = 'ppos'
want = '****'
class _Prop_processes(aetools.NProperty):
"""processes - a list of the currently running processes """
which = 'prcs'
want = 'utxt'
class _Prop_size(aetools.NProperty):
"""size - the width and height of the window """
which = 'psiz'
want = '****'
class _Prop_title_displays_custom_title(aetools.NProperty):
"""title displays custom title - Does the title for the window contain a custom title? """
which = 'tdct'
want = 'bool'
class _Prop_title_displays_device_name(aetools.NProperty):
"""title displays device name - Does the title for the window contain the device name? """
which = 'tddn'
want = 'bool'
class _Prop_title_displays_file_name(aetools.NProperty):
"""title displays file name - Does the title for the window contain the file name? """
which = 'tdfn'
want = 'bool'
class _Prop_title_displays_shell_path(aetools.NProperty):
"""title displays shell path - Does the title for the window contain the shell path? """
which = 'tdsp'
want = 'bool'
class _Prop_title_displays_window_size(aetools.NProperty):
"""title displays window size - Does the title for the window contain the window size? """
which = 'tdws'
want = 'bool'
windows = window
application._superclassnames = []
import Standard_Suite
application._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'properties' : _Prop_properties,
}
application._privelemdict = {
'document' : Standard_Suite.document,
'window' : window,
}
window._superclassnames = []
window._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'background_color' : _Prop_background_color,
'bold_text_color' : _Prop_bold_text_color,
'bounds' : _Prop_bounds,
'busy' : _Prop_busy,
'contents' : _Prop_contents,
'cursor_color' : _Prop_cursor_color,
'custom_title' : _Prop_custom_title,
'frame' : _Prop_frame,
'frontmost' : _Prop_frontmost,
'history' : _Prop_history,
'normal_text_color' : _Prop_normal_text_color,
'number_of_columns' : _Prop_number_of_columns,
'number_of_rows' : _Prop_number_of_rows,
'origin' : _Prop_origin,
'position' : _Prop_position,
'processes' : _Prop_processes,
'properties' : _Prop_properties,
'size' : _Prop_size,
'title_displays_custom_title' : _Prop_title_displays_custom_title,
'title_displays_device_name' : _Prop_title_displays_device_name,
'title_displays_file_name' : _Prop_title_displays_file_name,
'title_displays_shell_path' : _Prop_title_displays_shell_path,
'title_displays_window_size' : _Prop_title_displays_window_size,
}
window._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
'cwin' : window,
}
_propdeclarations = {
'busy' : _Prop_busy,
'c@#^' : _Prop__3c_Inheritance_3e_,
'ccol' : _Prop_number_of_columns,
'crow' : _Prop_number_of_rows,
'hist' : _Prop_history,
'pALL' : _Prop_properties,
'pbcl' : _Prop_background_color,
'pbnd' : _Prop_bounds,
'pbtc' : _Prop_bold_text_color,
'pcnt' : _Prop_contents,
'pcuc' : _Prop_cursor_color,
'pfra' : _Prop_frame,
'pisf' : _Prop_frontmost,
'pori' : _Prop_origin,
'ppos' : _Prop_position,
'prcs' : _Prop_processes,
'psiz' : _Prop_size,
'ptxc' : _Prop_normal_text_color,
'tdct' : _Prop_title_displays_custom_title,
'tddn' : _Prop_title_displays_device_name,
'tdfn' : _Prop_title_displays_file_name,
'tdsp' : _Prop_title_displays_shell_path,
'tdws' : _Prop_title_displays_window_size,
'titl' : _Prop_custom_title,
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
appengine/gallery_api/submit.py | bharati-software/blockly-games-Kannada | 1,184 | 11184946 | <reponame>bharati-software/blockly-games-Kannada
"""Blockly Games: Gallery
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Submit to gallery with App Engine.
"""
__author__ = "<EMAIL> (<NAME>)"
import cgi
import storage
from gallery_api import *
print("Content-Type: text/plain\n")
forms = cgi.FieldStorage()
xml = forms["xml"].value
uuid = storage.xmlToKey(xml)
print("XML saved as %s." % uuid)
app = forms["app"].value
thumb = forms["thumb"].value
title = forms["title"].value
art = Art(uuid=uuid, app=app, thumb=thumb, title=title, public=False)
art.put()
print("Submitted to %s as %s." % (app, uuid))
|
alipay/aop/api/domain/CarRentalVehicleInfo.py | antopen/alipay-sdk-python-all | 213 | 11184959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CarRentalVehicleInfo(object):
def __init__(self):
self._car_no = None
self._vehicle_capacity = None
self._vehicle_color = None
self._vehicle_models = None
self._vehicle_number = None
self._vehicle_operation_type = None
self._vehicle_seats = None
self._vehicle_type = None
@property
def car_no(self):
return self._car_no
@car_no.setter
def car_no(self, value):
self._car_no = value
@property
def vehicle_capacity(self):
return self._vehicle_capacity
@vehicle_capacity.setter
def vehicle_capacity(self, value):
self._vehicle_capacity = value
@property
def vehicle_color(self):
return self._vehicle_color
@vehicle_color.setter
def vehicle_color(self, value):
self._vehicle_color = value
@property
def vehicle_models(self):
return self._vehicle_models
@vehicle_models.setter
def vehicle_models(self, value):
self._vehicle_models = value
@property
def vehicle_number(self):
return self._vehicle_number
@vehicle_number.setter
def vehicle_number(self, value):
self._vehicle_number = value
@property
def vehicle_operation_type(self):
return self._vehicle_operation_type
@vehicle_operation_type.setter
def vehicle_operation_type(self, value):
self._vehicle_operation_type = value
@property
def vehicle_seats(self):
return self._vehicle_seats
@vehicle_seats.setter
def vehicle_seats(self, value):
self._vehicle_seats = value
@property
def vehicle_type(self):
return self._vehicle_type
@vehicle_type.setter
def vehicle_type(self, value):
self._vehicle_type = value
def to_alipay_dict(self):
params = dict()
if self.car_no:
if hasattr(self.car_no, 'to_alipay_dict'):
params['car_no'] = self.car_no.to_alipay_dict()
else:
params['car_no'] = self.car_no
if self.vehicle_capacity:
if hasattr(self.vehicle_capacity, 'to_alipay_dict'):
params['vehicle_capacity'] = self.vehicle_capacity.to_alipay_dict()
else:
params['vehicle_capacity'] = self.vehicle_capacity
if self.vehicle_color:
if hasattr(self.vehicle_color, 'to_alipay_dict'):
params['vehicle_color'] = self.vehicle_color.to_alipay_dict()
else:
params['vehicle_color'] = self.vehicle_color
if self.vehicle_models:
if hasattr(self.vehicle_models, 'to_alipay_dict'):
params['vehicle_models'] = self.vehicle_models.to_alipay_dict()
else:
params['vehicle_models'] = self.vehicle_models
if self.vehicle_number:
if hasattr(self.vehicle_number, 'to_alipay_dict'):
params['vehicle_number'] = self.vehicle_number.to_alipay_dict()
else:
params['vehicle_number'] = self.vehicle_number
if self.vehicle_operation_type:
if hasattr(self.vehicle_operation_type, 'to_alipay_dict'):
params['vehicle_operation_type'] = self.vehicle_operation_type.to_alipay_dict()
else:
params['vehicle_operation_type'] = self.vehicle_operation_type
if self.vehicle_seats:
if hasattr(self.vehicle_seats, 'to_alipay_dict'):
params['vehicle_seats'] = self.vehicle_seats.to_alipay_dict()
else:
params['vehicle_seats'] = self.vehicle_seats
if self.vehicle_type:
if hasattr(self.vehicle_type, 'to_alipay_dict'):
params['vehicle_type'] = self.vehicle_type.to_alipay_dict()
else:
params['vehicle_type'] = self.vehicle_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CarRentalVehicleInfo()
if 'car_no' in d:
o.car_no = d['car_no']
if 'vehicle_capacity' in d:
o.vehicle_capacity = d['vehicle_capacity']
if 'vehicle_color' in d:
o.vehicle_color = d['vehicle_color']
if 'vehicle_models' in d:
o.vehicle_models = d['vehicle_models']
if 'vehicle_number' in d:
o.vehicle_number = d['vehicle_number']
if 'vehicle_operation_type' in d:
o.vehicle_operation_type = d['vehicle_operation_type']
if 'vehicle_seats' in d:
o.vehicle_seats = d['vehicle_seats']
if 'vehicle_type' in d:
o.vehicle_type = d['vehicle_type']
return o
|
fastweb/test/interactive.py | BSlience/fastweb | 123 | 11184977 | import code
import readline
import atexit
import os
class HistoryConsole(code.InteractiveConsole):
def __init__(self, locals=None, filename="<console>",
histfile=os.path.expanduser("~/.console-history")):
code.InteractiveConsole.__init__(self, locals, filename)
self.init_history(histfile)
def init_history(self, histfile):
readline.parse_and_bind("tab: complete")
if hasattr(readline, "read_history_file"):
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(self.save_history, histfile)
def save_history(self, histfile):
readline.set_history_length(1000)
readline.write_history_file(histfile)
if __name__ == '__main__':
while True:
c = HistoryConsole().raw_input()
print(c)
|
commands/type_lookup.py | ikust/omnisharp-sublime | 424 | 11184995 | import os
import sublime
import sublime_plugin
from ..lib import helpers
from ..lib import omnisharp
class OmniSharpTypeLookup(sublime_plugin.TextCommand):
outputpanel = None
def run(self, edit):
sublime.active_window().run_command("hide_panel",{"panel": "output.variable_get"})
self.outputpanel = self.view.window().create_output_panel("variable_get")
self.outputpanel.run_command('erase_view')
params = {}
params["includedocumentation"] = True
omnisharp.get_response(self.view, '/typelookup', self._handle_typelookup, params)
def is_enabled(self):
return helpers.is_csharp(sublime.active_window().active_view())
def _handle_typelookup(self, data):
sublime.status_message('' if data["Type"] is None else data["Type"])
if data["Documentation"] is not None:
self.outputpanel.run_command('append', {'characters': "Type : " + '' if data["Type"] is None else data["Type"] + "\n" + "Documentation : " + '' if data["Documentation"] is None else data["Documentation"]})
self.view.window().run_command("show_panel", {"panel": "output.variable_get"})
|
tools/generate-smtp.py | drewm27/misp-warninglists | 277 | 11185043 | <reponame>drewm27/misp-warninglists<filename>tools/generate-smtp.py
#!/usr/bin/env python3
import multiprocessing.dummy
from generator import get_version, write_to_file, Dns, consolidate_networks, create_resolver
# Source: https://github.com/mailcheck/mailcheck/wiki/List-of-Popular-Domains
domains = [
# Default domains included
"aol.com", "att.net", "comcast.net", "facebook.com", "gmail.com", "gmx.com", "googlemail.com",
"google.com", "hotmail.com", "hotmail.co.uk", "mac.com", "me.com", "mail.com", "msn.com",
"live.com", "sbcglobal.net", "verizon.net", "yahoo.com", "yahoo.co.uk",
# Other global domains
"email.com", "fastmail.fm", "games.com", "gmx.net", "hush.com", "hushmail.com", "icloud.com",
"iname.com", "inbox.com", "lavabit.com",
"love.com", "outlook.com", "pobox.com", "protonmail.ch", "protonmail.com", "tutanota.de", "tutanota.com",
"tutamail.com", "tuta.io",
"keemail.me", "rocketmail.com", "safe-mail.net", "wow.com", "ygm.com",
"ymail.com", "zoho.com", "yandex.com",
# United States ISP domains
"bellsouth.net", "charter.net", "cox.net", "earthlink.net", "juno.com",
# British ISP domains
"btinternet.com", "virginmedia.com", "blueyonder.co.uk", "live.co.uk",
"ntlworld.com", "orange.net", "sky.com", "talktalk.co.uk", "tiscali.co.uk",
"virgin.net", "bt.com",
# Domains used in Asia
"sina.com", "sina.cn", "qq.com", "naver.com", "hanmail.net", "daum.net", "nate.com", "yahoo.co.jp", "yahoo.co.kr",
"yahoo.co.id", "yahoo.co.in", "yahoo.com.sg", "yahoo.com.ph", "163.com", "yeah.net", "126.com", "21cn.com",
"aliyun.com", "foxmail.com",
# French ISP domains
"hotmail.fr", "live.fr", "laposte.net", "yahoo.fr", "wanadoo.fr", "orange.fr", "gmx.fr", "sfr.fr", "neuf.fr",
"free.fr",
# German ISP domains
"gmx.de", "hotmail.de", "live.de", "online.de", "t-online.de", "web.de", "yahoo.de",
# Italian ISP domains
"libero.it", "virgilio.it", "hotmail.it", "aol.it", "tiscali.it",
"alice.it", "live.it", "yahoo.it", "email.it", "tin.it", "poste.it", "teletu.it",
# Russian ISP domains
"bk.ru", "inbox.ru", "list.ru", "mail.ru", "rambler.ru", "yandex.by", "yandex.com", "yandex.kz", "yandex.ru",
"yandex.ua", "ya.ru",
# Belgian ISP domains
"hotmail.be", "live.be", "skynet.be", "voo.be", "tvcablenet.be", "telenet.be",
# Argentinian ISP domains
"hotmail.com.ar", "live.com.ar", "yahoo.com.ar", "fibertel.com.ar", "speedy.com.ar", "arnet.com.ar",
# Domains used in Mexico
"yahoo.com.mx", "live.com.mx", "hotmail.es", "hotmail.com.mx", "prodigy.net.mx",
# Domains used in Canada
"yahoo.ca", "hotmail.ca", "bell.net", "shaw.ca", "sympatico.ca", "rogers.com",
# Domains used in Brazil
"yahoo.com.br", "hotmail.com.br", "outlook.com.br", "uol.com.br", "bol.com.br", "terra.com.br", "ig.com.br",
"r7.com", "zipmail.com.br", "globo.com", "globomail.com", "oi.com.br",
# Custom extension
# Domains used in Czechia
"seznam.cz", "atlas.cz", "centrum.cz",
]
if __name__ == '__main__':
dns = Dns(create_resolver())
spf_ranges = []
p = multiprocessing.dummy.Pool(40)
for domain_ranges in p.map(lambda d: dns.get_ip_ranges_from_spf(d), domains):
spf_ranges.extend(domain_ranges)
warninglist = {
'name': "List of known SMTP sending IP ranges",
'version': get_version(),
'description': "List of IP ranges for known SMTP servers.",
'matching_attributes': ["ip-src", "ip-dst", "domain|ip"],
'type': 'cidr',
'list': consolidate_networks(spf_ranges),
}
write_to_file(warninglist, "smtp-sending-ips")
mx_ips = []
for domain_ranges in p.map(lambda d: dns.get_mx_ips_for_domain(d), domains):
mx_ips.extend(domain_ranges)
warninglist = {
'name': "List of known SMTP receiving IP addresses",
'version': get_version(),
'description': "List of IP addresses for known SMTP servers.",
'matching_attributes': ["ip-src", "ip-dst", "domain|ip"],
'type': 'cidr',
'list': map(str, mx_ips),
}
write_to_file(warninglist, "smtp-receiving-ips")
|
macaw/core/interaction_handler/user_requests_db.py | SouvickG/macaw | 146 | 11185057 | """
The conversation (or interaction) database implemented using MongoDB.
Authors: <NAME> (<EMAIL>)
"""
from pymongo import MongoClient
from macaw import util
from macaw.core.interaction_handler.msg import Message
class InteractionDB:
def __init__(self, host, port, dbname):
self.client = MongoClient(host, port)
self.db = self.client[dbname]
self.col = self.db['macaw_msgs']
def insert_one(self, msg):
if msg.user_id is None or msg.text is None or msg.timestamp is None or msg.user_interface is None:
raise Exception('Each message should include a user_interface, user_id, text, and timestamp.')
self.col.insert_one(msg.__dict__)
def get_all(self):
print('Using get_all is only recommended for development purposes. It is not efficient!')
return self.dict_list_to_msg_list(self.col.find({}))
def get_conv_history(self, user_id, max_time, max_count):
if max_time is None:
res = self.col.find({'user_id': user_id}).sort([('timestamp', -1)])
else:
res = self.col.find({'user_id': user_id,
'timestamp': {'$gt': util.current_time_in_milliseconds() - max_time}}).sort([('timestamp', -1)])
if max_count is not None:
res = res.limit(max_count)
return self.dict_list_to_msg_list(res)
def close(self):
self.client.close()
@staticmethod
def dict_list_to_msg_list(msg_dict_list):
return [Message.from_dict(msg_dict) for msg_dict in msg_dict_list]
|
spectacles/types.py | felipefrancisco/spectacles | 150 | 11185098 | from typing import Dict, Any
from typing_extensions import Literal
QueryMode = Literal["batch", "hybrid", "single"]
JsonDict = Dict[str, Any]
|
docs/code/buffer_single_side.py | Jeremiah-England/Shapely | 2,382 | 11185155 | from matplotlib import pyplot
from shapely.geometry import LineString
from descartes import PolygonPatch
from figures import SIZE, BLUE, GRAY, set_limits, plot_line
line = LineString([(0, 0), (1, 1), (0, 2), (2, 2), (3, 1), (1, 0)])
fig = pyplot.figure(1, figsize=SIZE, dpi=90)
# 1
ax = fig.add_subplot(121)
plot_line(ax, line)
left_hand_side = line.buffer(0.5, single_sided=True)
patch1 = PolygonPatch(left_hand_side, fc=BLUE, ec=BLUE, alpha=0.5, zorder=2)
ax.add_patch(patch1)
ax.set_title('a) left hand buffer')
set_limits(ax, -1, 4, -1, 3)
#2
ax = fig.add_subplot(122)
plot_line(ax, line)
right_hand_side = line.buffer(-0.3, single_sided=True)
patch2 = PolygonPatch(right_hand_side, fc=GRAY, ec=GRAY, alpha=0.5, zorder=1)
ax.add_patch(patch2)
ax.set_title('b) right hand buffer')
set_limits(ax, -1, 4, -1, 3)
pyplot.show()
|
utils/imdb_data_util.py | dujiaxin/graph_star | 112 | 11185219 | import numpy as np
import os
import torch
import random
from bert_serving.client import BertClient
random.seed(31415926)
def build_imdb_npy(imdb_path):
for file_path in [os.path.join(imdb_path, "train"),
os.path.join(imdb_path, "test")]:
txt_list = []
y = []
for label in ["pos", "neg"]:
train_path = os.path.join(file_path, label)
for fname in os.listdir(train_path):
f = open(os.path.join(train_path, fname))
txt = ""
for l in f.readlines():
txt += (l + " ")
txt_list.append(txt)
if label == "pos":
y.append(1)
else:
y.append(0)
y = np.array(y)
bc = BertClient()
res = bc.encode(txt_list)
np.save(os.path.join(file_path, "bert_large_encode_res.npy"), res)
np.save(os.path.join(file_path, "y.npy"), y)
# res = np.load(os.path.join(file_path, "all_bert_fine_tuning_encode_res.npy"))
# y = np.load(os.path.join(file_path, "all_y.npy"))
topic_dic = dict()
lines = []
f = open(os.path.join(file_path, "urls_pos.txt"))
lines.extend([x[26:35] for x in f.readlines()])
f = open(os.path.join(file_path, "urls_neg.txt"))
lines.extend([x[26:35] for x in f.readlines()])
s_edge = []
s_bug_edge = []
s_be = []
s_y = []
t_idx = 0
for idx, id in enumerate(lines):
if id not in topic_dic:
topic_dic[id] = len(topic_dic)
s_edge.append([])
s_bug_edge.append([])
s_be.append([res[idx]])
s_y.append([y[idx]])
# t_idx += 1
else:
t_idx = topic_dic[id]
new_idx = len(s_be[t_idx])
for i in range(len(s_be[t_idx])):
s_edge[t_idx].append([i, new_idx])
s_edge[t_idx].append([new_idx, i])
s_bug_edge[t_idx].append([0, new_idx])
s_bug_edge[t_idx].append([new_idx, 0])
s_be[t_idx].append(res[idx])
s_y[t_idx].append(y[idx])
np.save(os.path.join(file_path, "split_bert_large_encode_res.npy"), s_be)
np.save(os.path.join(file_path, "split_edge.npy"), s_edge)
np.save(os.path.join(file_path, "split_bug_edge.npy"), s_bug_edge)
np.save(os.path.join(file_path, "split_y.npy"), s_y)
def load_data(filepath):
bert_encode_res = np.load(os.path.join(filepath, "split_bert_large_encode_res.npy"),allow_pickle=True) # 25000,768
y = np.load(os.path.join(filepath, "split_y.npy"),allow_pickle=True) # 25000
edge = np.load(os.path.join(filepath, "split_edge.npy"),allow_pickle=True) # 2,edge_num*2
bug_edge = np.load(os.path.join(filepath, "split_bug_edge.npy"),allow_pickle=True) # 2,edge_num*2
datas = []
for x, y, e, eb in zip(bert_encode_res, y, edge, bug_edge):
x = np.array([_.tolist() for _ in x], dtype=np.float)
y = np.array(y, dtype=np.long)
if len(e) == 0:
e = np.empty((0, 2), dtype=np.long).transpose()
eb = np.empty((0, 2), dtype=np.long).transpose()
else:
e = np.array(e).transpose()
eb = np.array(eb).transpose()
datas.append((x, y, e, eb))
random.shuffle(datas)
max_node_num = 2000
def empty():
x = np.empty((0, 1024), dtype=np.float)
y = np.empty(0, dtype=np.long)
e = np.empty((0, 2), dtype=np.long).transpose()
eb = np.empty((0, 2), dtype=np.long).transpose()
return x, y, e, eb
new_res = []
n_x, n_y, n_e, n_eb = empty()
for x, y, e, eb in datas:
if len(n_x) + len(x) > max_node_num:
new_res.append((n_x, n_y, n_e, n_eb))
n_x, n_y, n_e, n_eb = empty()
if len(e) > 0:
e = e + len(n_x)
eb = eb + len(n_x)
n_e = np.concatenate((n_e, e), axis=1)
n_eb = np.concatenate((n_eb, eb), axis=1)
n_x = np.concatenate((n_x, x), axis=0)
n_y = np.concatenate((n_y, y), axis=0)
if len(n_x) > 0:
new_res.append((n_x, n_y, n_e, n_eb))
# print(new_res)
xx = []
yy = []
ee = []
eebb = []
for x, y, e, eb in new_res:
xx.append(x)
yy.append(y)
ee.append(e.transpose())
eebb.append(eb.transpose())
np.save(os.path.join(filepath, "split_2k_bert_large_encode_res.npy"), xx)
np.save(os.path.join(filepath, "split_2k_edge.npy"), ee)
np.save(os.path.join(filepath, "split_2k_bug_edge.npy"), eebb)
np.save(os.path.join(filepath, "split_2k_y.npy"), yy)
imdb_path = "/mnt/nas1/NLP/public_dataset/TC/imdb/aclImdb"
build_imdb_npy(imdb_path)
load_data(os.path.join(imdb_path,"train"))
load_data(os.path.join(imdb_path,"test"))
|
hpccm/building_blocks/kokkos.py | robertmaynard/hpc-container-maker | 340 | 11185228 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Kokkos building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import hpccm.config
import hpccm.templates.downloader
import hpccm.templates.envvars
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
class kokkos(bb_base, hpccm.templates.downloader, hpccm.templates.envvars):
"""The `kokkos` building block downloads and installs the
[Kokkos](https://github.com/kokkos/kokkos) component.
The [CMake](#cmake) building block should be installed prior to
this building block.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
arch: List of target architectures to build. If set adds
`-DKokkos_ARCH_<value>=ON` to the list of CMake options. The
default value is `VOLTA70`, i.e., sm_70. If a CUDA aware build is
not selected, then a non-default value should be used.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
check: Boolean flag to specify whether the build should be
checked. If True, adds `-DKokkos_ENABLE_TESTS=ON` to the list of
CMake options. The default is False.
cmake_opts: List of options to pass to `cmake`. The default is
`-DCMAKE_BUILD_TYPE=RELEASE`.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
cuda: Flag to control whether a CUDA aware build is performed. If
True, adds `-DKokkos_ENABLE_CUDA=ON` and
`-DCMAKE_CXX_COMPILER=$(pwd)/../bin/nvcc_wrapper` to the list of
CMake options. The default value is True.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
Kokkos. The default is True.
hwloc: Flag to control whether a hwloc aware build is performed.
If True, adds `-DKokkos_ENABLE_HWLOC=ON` to the list of CMake
options. The default value is True.
ospackages: List of OS packages to install prior to building. For
Ubuntu, the default values are `gzip`, `libhwloc-dev`, `make`,
`tar`, and `wget`. For RHEL-based Linux distributions the default
values are `gzip`, `hwloc-devel`, `make`, `tar`, and `wget`.
prefix: The top level installation location. The default value
is `/usr/local/kokkos`.
repository: The location of the git repository that should be used to build OpenMPI. If True, then use the default `https://github.com/kokkos/kokkos.git`
repository. The default is empty, i.e., use the release package
specified by `version`.
url: The location of the tarball that should be used to build
Kokkos. The default is empty, i.e., use the release package
specified by `version`.
version: The version of Kokkos source to download. The default
value is `3.2.00`.
# Examples
```python
kokkos(prefix='/opt/kokkos/3.1.01', version='3.1.01')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(kokkos, self).__init__(**kwargs)
self.__arch = kwargs.pop('arch', ['VOLTA70'])
self.__baseurl = kwargs.pop('baseurl',
'https://github.com/kokkos/kokkos/archive')
self.__check = kwargs.pop('check', False)
self.__cmake_opts = kwargs.pop('cmake_opts',
['-DCMAKE_BUILD_TYPE=RELEASE'])
self.__cuda = kwargs.pop('cuda', True)
self.__default_repository = 'https://github.com/kokkos/kokkos.git'
self.__hwloc = kwargs.pop('hwloc', True)
self.__ospackages = kwargs.pop('ospackages', [])
self.__powertools = False # enable the CentOS PowerTools repo
self.__prefix = kwargs.pop('prefix', '/usr/local/kokkos')
self.__version = kwargs.pop('version', '3.2.00')
if self.repository:
self.__directory = ''
else:
self.__directory = kwargs.pop('directory',
'kokkos-{}'.format(self.__version))
# Set the CMake options
self.__cmake()
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
kwargs['repository'] = self.repository
kwargs['url'] = self.url
# Setup the environment variables
self.environment_variables['PATH'] = '{}/bin:$PATH'.format(
self.__prefix)
# Setup build configuration
self.__bb = generic_cmake(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
cmake_opts=self.__cmake_opts,
comment=False,
devel_environment=self.environment_variables,
directory=self.__directory,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
**kwargs)
# Container instructions
self += comment('Kokkos version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages,
powertools=self.__powertools)
self += self.__bb
def __cmake(self):
"""Set CMake options based on user input"""
# Set options
if self.__arch:
for arch in self.__arch:
self.__cmake_opts.append('-DKokkos_ARCH_{}=ON'.format(
arch.upper()))
if self.__check:
self.__cmake_opts.append('-DKokkos_ENABLE_TESTS=ON')
if self.__cuda:
self.__cmake_opts.append('-DKokkos_ENABLE_CUDA=ON')
self.__cmake_opts.append(
'-DCMAKE_CXX_COMPILER=$(pwd)/../bin/nvcc_wrapper')
if self.__hwloc:
self.__cmake_opts.append('-DKokkos_ENABLE_HWLOC=ON')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['libhwloc-dev', 'make']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['hwloc-devel', 'make']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# hwloc-devel is in the CentOS powertools repository
self.__powertools = True
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
if self.repository:
self.__ospackages.extend(['ca-certificates', 'git'])
else:
self.__ospackages.extend(['gzip', 'tar', 'wget'])
def __download(self):
"""Set download source based on user parameters"""
# Use the default repository if set to True
if self.repository is True:
self.repository = self.__default_repository
if not self.repository and not self.url:
self.url='{0}/{1}.tar.gz'.format(self.__baseurl, self.__version)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
k = kokkos(...)
Stage0 += k
Stage1 += k.runtime()
```
"""
self.rt += comment('Kokkos')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
|
python/fetch.py | npmmirror/notes | 1,384 | 11185237 | #!/usr/bin/env python
# encoding: utf-8
import os
from contextlib import closing
from urllib.parse import urlparse
import requests
# 转自https://www.zhihu.com/question/41132103/answer/93438156
def wget(url, file_name):
with closing(requests.get(url, stream=True)) as response:
chunk_size = 1024 # 单次请求最大值
content_size = int(response.headers['content-length']) # 内容体总大小
progress = ProgressBar(file_name, total=content_size,
unit="KB", chunk_size=chunk_size, run_status="正在下载", fin_status="下载完成")
with open(file_name, "wb") as file:
for data in response.iter_content(chunk_size=chunk_size):
file.write(data)
progress.refresh(count=len(data))
class ProgressBar(object):
def __init__(self, title,
count=0.0,
run_status=None,
fin_status=None,
total=100.0,
unit='', sep='/',
chunk_size=1.0):
super(ProgressBar, self).__init__()
self.info = "【%s】%s %.2f %s %s %.2f %s"
self.title = title
self.total = total
self.count = count
self.chunk_size = chunk_size
self.status = run_status or ""
self.fin_status = fin_status or " " * len(self.status)
self.unit = unit
self.seq = sep
def __get_info(self):
# 【名称】状态 进度 单位 分割线 总数 单位
_info = self.info % (self.title, self.status,
self.count / self.chunk_size, self.unit, self.seq, self.total / self.chunk_size, self.unit)
return _info
def refresh(self, count=1, status=None):
self.count += count
# if status is not None:
self.status = status or self.status
end_str = "\r"
if self.count >= self.total:
end_str = '\n'
self.status = status or self.fin_status
print(self.__get_info(), end=end_str)
if __name__ == "__main__":
print("请输入要下载文件的 url: ")
url = input()
print("请输入要保存的文件名: ")
f = input()
wget(url,f)
|
benchmark/scripts/benchmark_resnet.py | kalyc/keras-apache-mxnet | 300 | 11185250 | <filename>benchmark/scripts/benchmark_resnet.py
'''Trains a ResNet on the ImageNet/CIFAR10 dataset.
Credit:
Script modified from examples/cifar10_resnet.py
Reference:
ResNet v1
[a] Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
ResNet v2
[b] Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
'''
from __future__ import print_function
import argparse
import logging
import math
import os
import random
import time
import numpy as np
from logging_metrics import LoggingMetrics
from models.resnet import get_resnet_model
from models.timehistory import TimeHistory
import keras
from keras import backend as K
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.utils import multi_gpu_model
parser = argparse.ArgumentParser()
parser.add_argument('--dataset',
help='Dataset for training: cifar10 or imagenet')
parser.add_argument('--version',
help='Provide resnet version: 1 or 2')
parser.add_argument('--layers',
help='Provide number of layers: 20, 56 or 110')
parser.add_argument('--gpus',
help='Number of GPUs to use')
parser.add_argument('--train_mode',
help='Required for imagenet: train_on_batch or fit_generator')
parser.add_argument('--data_path',
help='Required for imagenet: path_to_imagenet_data')
parser.add_argument('--epoch', default=200, type=int,
help='Number of epoch')
args = parser.parse_args()
# Check args
if args.dataset not in ['cifar10', 'imagenet']:
raise ValueError('Only support cifar10 or imagenet data set')
if args.version not in ['1', '2']:
raise ValueError('Provide resnet version: 1 or 2')
if args.layers not in ['20', '56', '110']:
raise ValueError('Provide number of layers: 20, 56 or 110')
if args.dataset == 'imagenet':
if not args.train_mode or not args.data_path:
raise ValueError('Need to provide training mode(train_on_batch or fit_generator) '
'and data path to imagenet dataset')
if args.train_mode not in ['train_on_batch', 'fit_generator']:
raise ValueError('Only support train_on_batch or fit_generator training mode')
if args.gpus is None or args.gpus < 1:
num_gpus = 0
else:
num_gpus = int(args.gpus)
# Training parameters
batch_size = 32 * num_gpus if num_gpus > 0 else 32
epochs = int(args.epoch)
num_classes = 1000 if args.dataset == 'imagenet' else 10
data_format = K._image_data_format
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
# prepare logging
# file name: backend_data_format_dataset_model_batch_size_gpus.log
log_file = K.backend() + '_' + K.image_data_format() + '_' + args.dataset + '_resnet_v' + args.version + '_' + args.layers + '_batch_size' + str(batch_size) + '_' + str(num_gpus) + 'gpus' # nopep8
logFormatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s')
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler('{0}/{1}.log'.format('./', log_file))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
rootLogger.info('saving log file to {0}/{1}.log'.format('./', log_file))
rootLogger.info('using image format: %s' % data_format)
# Prepare Training Data
# CIFAR10 data set
if args.dataset == 'cifar10':
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
rootLogger.info('x_train shape: %s' % str(x_train.shape))
rootLogger.info('%d train_samples' % x_train.shape[0])
rootLogger.info('%d test samples' % x_test.shape[0])
rootLogger.info('y_train shape: %s' % str(y_train.shape))
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# ImageNet Dataset
if args.dataset == 'imagenet':
input_shape = (256, 256, 3) if data_format == 'channels_last' else (3, 256, 256)
if args.train_mode == 'fit_generator':
train_datagen = ImageDataGenerator(
rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
args.data_path,
target_size=(256, 256),
batch_size=batch_size)
else:
# Load the imagenet data.
train_images = []
train_labels = []
label_counter = 0
for subdir, dirs, files in os.walk(args.data_path):
for folder in dirs:
for folder_subdir, folder_dirs, folder_files in \
os.walk(os.path.join(subdir, folder)):
for file in folder_files:
train_images.append(os.path.join(folder_subdir, file))
train_labels.append(label_counter)
label_counter = label_counter + 1
# shuffle data
perm = list(range(len(train_images)))
random.shuffle(perm)
train_images = [train_images[index] for index in perm]
train_labels = [train_labels[index] for index in perm]
nice_n = math.floor(len(train_images) / batch_size) * batch_size
# process batch data for imagenet
def get_batch():
index = 1
global current_index
B = np.zeros(shape=(batch_size, input_shape[0], input_shape[1], input_shape[2]))
L = np.zeros(shape=(batch_size))
while index < batch_size:
try:
img = load_img(train_images[current_index].rstrip(),
target_size=(256, 256, 3))
B[index] = img_to_array(img)
B[index] /= 255
L[index] = train_labels[current_index]
index = index + 1
current_index = current_index + 1
except:
rootLogger.info('Ignore image {}'.format(train_images[current_index]))
current_index = current_index + 1
return B, keras.utils.to_categorical(L, num_classes)
# Prepare Model
# Model parameter
# ----------------------------------------------------------------------------
# | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
# |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
# ----------------------------------------------------------------------------
# ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)
# ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)
# ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)
# ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)
# ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)
# ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)
# ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)
# ---------------------------------------------------------------------------
# Model version
# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
version = int(args.version)
# Computed depth from supplied model parameter n
depth = int(args.layers)
# Model name, depth and version
model_type = 'ResNet%dv%d' % (depth, version)
def lr_schedule(epoch):
'''Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
'''
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
return lr
# get the model base on configs
model = get_resnet_model(version=version, input_shape=input_shape,
depth=depth, num_classes=num_classes)
# use multi gpu model for multi gpus
if num_gpus > 1:
if K.backend() == 'mxnet':
# MXNet merge weights on GPU by default
model = multi_gpu_model(model, gpus=num_gpus)
else:
# merge weights on GPU
model = multi_gpu_model(model, gpus=num_gpus, cpu_merge=False)
# compile the model
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
model.summary()
rootLogger.info('Training using: ' + model_type)
# Prepare model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'imagenet_%s_model.{epoch:03d}.h5' % model_type
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
time_callback = TimeHistory()
callbacks = [checkpoint, lr_reducer, lr_scheduler, time_callback]
# Run training, without data augmentation.
if args.dataset == 'imagenet':
rootLogger.info('Not using data augmentation.')
if args.train_mode == 'train_on_batch':
for i in range(0, epochs):
current_index = 0
total_time = 0
rootLogger.info('starting epoch {}/{}'.format(i, epochs))
while current_index + batch_size < len(train_images):
b, l = get_batch()
# only record training time
start_time = time.time()
loss, accuracy = model.train_on_batch(b, l)
end_time = time.time()
total_time += 1000 * (end_time - start_time)
batch_time = 1000 * (end_time - start_time)
speed = batch_size * 1000.0 / batch_time if batch_time != 0 else 0
rootLogger.info('batch {}/{} loss: {} accuracy: {} '
'time: {}ms speed: {}'.format(int(current_index / batch_size),
int(nice_n / batch_size), loss, accuracy,
batch_time, speed))
rootLogger.info('finish epoch {}/{} total epoch time: {}ms'.format(i, epochs, total_time))
else:
model.fit_generator(train_generator, epochs=epochs)
else:
history_callback = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
verbose=0,
callbacks=[lr_reducer, lr_scheduler, time_callback])
logg = LoggingMetrics(history_callback, time_callback)
logg.save_metrics_to_log(rootLogger)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
rootLogger.info('Test loss: %.4f' % scores[0])
rootLogger.info('Test accuracy: %.4f' % scores[1])
|
okta/models/log_security_context.py | corylevine/okta-sdk-python | 145 | 11185261 | # flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
class LogSecurityContext(
OktaObject
):
"""
A class for LogSecurityContext objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.as_number = config["asNumber"]\
if "asNumber" in config else None
self.as_org = config["asOrg"]\
if "asOrg" in config else None
self.domain = config["domain"]\
if "domain" in config else None
self.is_proxy = config["isProxy"]\
if "isProxy" in config else None
self.isp = config["isp"]\
if "isp" in config else None
else:
self.as_number = None
self.as_org = None
self.domain = None
self.is_proxy = None
self.isp = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"asNumber": self.as_number,
"asOrg": self.as_org,
"domain": self.domain,
"isProxy": self.is_proxy,
"isp": self.isp
}
parent_req_format.update(current_obj_format)
return parent_req_format
|
ebcli/controllers/setenv.py | senstb/aws-elastic-beanstalk-cli | 110 | 11185270 | <gh_stars>100-1000
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.core.abstractcontroller import AbstractBaseController
from ebcli.resources.strings import strings, flag_text
from ebcli.core import fileoperations, io
from ebcli.lib import elasticbeanstalk
from ebcli.operations import envvarops
class SetEnvController(AbstractBaseController):
class Meta:
label = 'setenv'
description = strings['setenv.info']
usage = 'eb setenv [VAR_NAME=KEY ...] [-e environment] [options ...]'
arguments = [
(
['varKey'],
dict(
action='store',
nargs='+',
default=[],
help=flag_text['setenv.vars']
)
),
(
['-e', '--environment'],
dict(
dest='environment_name',
help=flag_text['setenv.env']
)
),
(
['--timeout'],
dict(
type=int,
help=flag_text['general.timeout']
)
),
]
epilog = strings['setenv.epilog']
def do_command(self):
app_name = self.get_app_name()
env_name = self.get_env_name()
var_list = self.app.pargs.varKey
timeout = self.app.pargs.timeout
envvarops.setenv(app_name, env_name, var_list, timeout)
|
sdk/python/tests/compiler/testdata/many_results_with_warnings.py | shrivs3/kfp-tekton | 102 | 11185276 | <reponame>shrivs3/kfp-tekton<gh_stars>100-1000
# Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
import kfp.components as comp
from typing import NamedTuple
import json
def print4results() -> NamedTuple('taskOutput', [('param1', str), ('param2', str), ('param3', str),
('superlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongname', str)]):
"""Print 4 long results"""
a = 'a' * 2500
b = 'b' * 700
c = 'c' * 500
d = 'd' * 900
from collections import namedtuple
task_output = namedtuple('taskOutput', ['param1', 'param2', 'param3',
'superlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongname'])
return task_output(a, b, c, d)
print_op = comp.func_to_container_op(print4results)
@dsl.pipeline(
name='many-results-pipeline',
description='A pipeline that produce many results.'
)
def many_results_pipeline(
):
output_estimation_json = {'param1': 2500, 'param23': 700, 'param3': 500,
'superlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongnamesuperlongname': 900}
print_task = print_op().add_pod_annotation('tekton-result-sizes', json.dumps(output_estimation_json))
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(many_results_pipeline, __file__.replace('.py', '.yaml'))
|
Python/QuantLib/__init__.py | yrtf/QuantLib-SWIG | 231 | 11185291 | # -*- coding: iso-8859-1 -*-
"""
Copyright (C) 2000, 2001, 2002, 2003 RiskMap srl
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<<EMAIL>>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
import sys
if sys.version_info.major >= 3:
from .QuantLib import *
from .QuantLib import _QuantLib
else:
from QuantLib import *
from QuantLib import _QuantLib
del sys
__author__ = 'The QuantLib Group'
__email__ = '<EMAIL>'
if hasattr(_QuantLib,'__version__'):
__version__ = _QuantLib.__version__
elif hasattr(_QuantLib.cvar,'__version__'):
__version__ = _QuantLib.cvar.__version__
else:
print('Could not find __version__ attribute')
if hasattr(_QuantLib,'__hexversion__'):
__hexversion__ = _QuantLib.__hexversion__
elif hasattr(_QuantLib.cvar,'__hexversion__'):
__hexversion__ = _QuantLib.cvar.__hexversion__
else:
print('Could not find __hexversion__ attribute')
|
deep_qa/layers/__init__.py | richarajpal/deep_qa | 459 | 11185310 | <reponame>richarajpal/deep_qa<gh_stars>100-1000
# Individual layers.
from .additive import Additive
from .bigru_index_selector import BiGRUIndexSelector
from .complex_concat import ComplexConcat
from .highway import Highway
from .l1_normalize import L1Normalize
from .masked_layer import MaskedLayer
from .noisy_or import BetweenZeroAndOne, NoisyOr
from .option_attention_sum import OptionAttentionSum
from .overlap import Overlap
from .vector_matrix_merge import VectorMatrixMerge
from .vector_matrix_split import VectorMatrixSplit
|
tests/test_utils.py | MZehren/msaf | 372 | 11185321 | # Run me as follows:
# cd tests/
# nosetests -v -s test_utils.py
import copy
import librosa
import numpy as np
import os
# Msaf imports
import msaf
# Global vars
audio_file = os.path.join("fixtures", "chirp.mp3")
sr = msaf.config.sample_rate
audio, fs = librosa.load(audio_file, sr=sr)
y_harmonic, y_percussive = librosa.effects.hpss(audio)
def test_synchronize_labels():
old_bound_idxs = [0, 82, 150, 268, 342, 353, 463, 535, 616, 771, 833, 920,
979, 1005]
new_bound_idxs = [0, 229, 337, 854, 929, 994, 1004]
labels = [4, 6, 2, 0, 0, 2, 5, 3, 0, 5, 1, 5, 1]
N = 1005
new_labels = msaf.utils.synchronize_labels(new_bound_idxs,
old_bound_idxs,
labels,
N)
assert len(new_labels) == len(new_bound_idxs) - 1
def test_get_num_frames():
dur = 320.2
anal = {"sample_rate": 22050, "hop_size": 512}
n_frames = msaf.utils.get_num_frames(dur, anal)
assert n_frames == int(dur * anal["sample_rate"] / anal["hop_size"])
def test_get_time_frames():
dur = 1
anal = {"sample_rate": 22050, "hop_size": 512}
n_frames = msaf.utils.get_time_frames(dur, anal)
assert n_frames.shape[0] == 43
assert n_frames[0] == 0.0
assert n_frames[-1] == 1.0
def test_align_end_hierarchies():
def _test_equal_hier(hier_orig, hier_new):
for layer_orig, layer_new in zip(hier_orig, hier_new):
assert layer_orig == layer_new
hier1 = [[0, 10, 20, 30], [0, 30]]
hier2 = [[0, 5, 40, 50], [0, 50]]
hier1_orig = copy.deepcopy(hier1)
hier2_orig = copy.deepcopy(hier2)
msaf.utils.align_end_hierarchies(hier1, hier2)
yield (_test_equal_hier, hier1_orig, hier1)
yield (_test_equal_hier, hier2_orig, hier2)
def test_lognormalize():
# Just check that we're not overwriting data
X = np.random.random((300, 10))
Y = msaf.utils.lognormalize(X)
assert not np.array_equal(X, Y)
def test_min_max_normalize():
# Just check that we're not overwriting data
X = np.random.random((300, 10))
Y = msaf.utils.min_max_normalize(X)
assert not np.array_equal(X, Y)
|
Dynamic Programming/877. Stone Game.py | beckswu/Leetcode | 138 | 11185333 | <gh_stars>100-1000
class Solution:
def stoneGame(self, piles: List[int]) -> bool:
n = len(piles)
dp = [[0] * n for i in range(n)]
for i in range(n): dp[i][i] = piles[i]
for d in range(1, n):
for i in range(n - d):
dp[i][i + d] = max(piles[i] - dp[i + 1][i + d], piles[i + d] - dp[i][i + d - 1])
return dp[0][-1] > 0
# O(n) space
class Solution:
def stoneGame(self, piles: List[int]) -> bool:
n = len(piles)
dp = piles[:]
for d in range(1, n):
for i in range(n - d):
dp[i] = max(piles[i] - dp[i + 1], piles[i + d] - dp[i])
return dp[0] > 0
class Solution:
def stoneGame(self, piles: List[int]) -> bool:
cache = {}
piles = tuple(piles)
def firstscore(i,j):
if i>=j: return 0
if j==i+1 and j < len(piles): return piles[i]
if (i,j) in cache: return cache[i,j]
res = max(piles[i]+min(firstscore(i+2,j), firstscore(i+1,j-1)) , piles[j-1] + min(firstscore(i+1,j-1), firstscore(i,j-2)))
cache[i,j] = res
return res
Alex = firstscore(0,len(piles))
Lee = sum(piles) - Alex
return Alex > Lee |
packages/pyright-internal/src/tests/samples/assert1.py | sasano8/pyright | 4,391 | 11185366 | # This sample tests the ability to detect errant assert calls
# that are always true - the "reportAssertAlwaysTrue" option.
from typing import Any, Tuple
# This should generate a warning.
assert (1 != 2, "Error message")
def foo(a: Tuple[int, ...]):
assert a
b = ()
assert b
c = (2, 3)
# This should generate a warning.
assert c
|
idaes/tests/test_docs.py | eslickj/idaes-pse | 112 | 11185387 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Test doc build.
This does *not* try to build the docs. Instead it looks
for the log of the build and looks for errors in it.
"""
# stdlib
import logging
import os
from subprocess import Popen
# third-party
import pytest
_log = logging.getLogger(__name__)
@pytest.fixture
def docs_path():
"""Find the docs.
We have to assume that the docs are somewhere
relative to either this file or the user's current
working directory.
Start with the working directory.
Returns:
str: Doc path or None if not found.
"""
cwd = os.path.realpath(os.getcwd())
# look for "./docs"
path = os.path.join(cwd, "docs")
if os.path.exists(path):
return path
# look for (last) "docs/" in full current working dir
cwd_comp = cwd.split(os.path.sep)
comp_cnt = len(cwd_comp)
try:
idx = list(reversed(cwd_comp)).index("docs")
return os.path.sep.join(cwd_comp[: comp_cnt - idx])
except ValueError:
pass
# look for docs relative to this file
try:
path = os.path.join(os.path.dirname(__file__), "..", "..", "docs")
# look for {this file}/../../docs
if os.path.exists(path):
return path
# look for "docs/" in file's full path
file_comp = path.split(os.path.sep)
comp_cnt = len(file_comp)
try:
idx = list(reversed(file_comp)).index("docs")
return os.path.sep.join(file_comp[: comp_cnt - idx])
except ValueError:
pass
except NameError:
pass # __file__ not defined(?)
ERRLOG = "sphinx-errors.txt"
@pytest.mark.unit
def test_sphinx_build_log(docs_path):
""" Check the sphinx log for errors or warnings. """
_log.info('docs path = "{}"'.format(docs_path))
if docs_path is None:
_log.warning('Could not find "docs" directory')
return
log_path = os.path.join(docs_path, ERRLOG)
if not os.path.exists(log_path):
_log.warning(
'Could not find "{}" in docs directory: {}'.format(ERRLOG, log_path)
)
return
if os.stat(log_path).st_size == 0: # file is empty - good
return
# Dump contents to stdout
err_count = 0
with open(log_path) as log:
for line in log:
err_count += 1
print(line, end='')
assert False, f"{err_count} Errors and/or Warnings found in {log_path}"
def _have_sphinx():
"""Test if a working 'sphinx-build' command exists.
"""
have_sphinx = True
try:
Popen(["sphinx-build", "--version"]).wait()
except:
have_sphinx = False
return have_sphinx
@pytest.mark.component
def test_doctests(docs_path):
if _have_sphinx():
build_path = os.path.join(docs_path, "build")
command = ["sphinx-build", "-M", "doctest", docs_path, build_path]
proc = Popen(command)
proc.wait(600)
assert proc.returncode == 0
|
Day2/Python/String_and_pallindrome.py | Grace0Hud/dailycodebase | 249 | 11185389 | """
* @author Shashank
* @date 21/12/2018
"""
a=input("Enter the input string:")
d=a.replace(" ","")
c=list(d)
c.reverse()
e="".join(c)
if d==e:
print("String is pallindrome")
else:
print("Not a pallindrome")
|
tests/columns/test_choices.py | 0scarB/piccolo | 750 | 11185492 | from tests.base import DBTestCase
from tests.example_apps.music.tables import Shirt
class TestChoices(DBTestCase):
def _insert_shirts(self):
Shirt.insert(
Shirt(size=Shirt.Size.small),
Shirt(size=Shirt.Size.medium),
Shirt(size=Shirt.Size.large),
).run_sync()
def test_save(self):
"""
Make sure saving works, when setting a value as an Enum.
"""
shirt = Shirt(size=Shirt.Size.large)
shirt.save().run_sync()
def test_default(self):
"""
Make sure the default works correctly, when the default is an Enum.
"""
Shirt().save().run_sync()
shirt = Shirt.objects().first().run_sync()
self.assertEqual(shirt.size, "l")
def test_update(self):
"""
Make sure rows can be updated using Enums.
"""
self._insert_shirts()
Shirt.update({Shirt.size: Shirt.Size.large}).where(
Shirt.size == Shirt.Size.small
).run_sync()
shirts = (
Shirt.select(Shirt.size)
.output(as_list=True)
.order_by(Shirt._meta.primary_key)
.run_sync()
)
self.assertEqual(shirts, ["l", "m", "l"])
def test_select_where(self):
"""
Make sure Enums can be used in the where clause of select queries.
"""
self._insert_shirts()
shirts = (
Shirt.select(Shirt.size)
.where(Shirt.size == Shirt.Size.small)
.run_sync()
)
self.assertEqual(shirts, [{"size": "s"}])
def test_objects_where(self):
"""
Make sure Enums can be used in the where clause of objects queries.
"""
self._insert_shirts()
shirts = (
Shirt.objects().where(Shirt.size == Shirt.Size.small).run_sync()
)
self.assertEqual(len(shirts), 1)
self.assertEqual(shirts[0].size, "s")
|
mobility_scraper/__init__.py | ActiveConclusion/COVID19_mobility | 239 | 11185495 | <gh_stars>100-1000
from .paths_and_URLs import *
from .download_files import *
from .utils import *
from .mobility_processing import (
google_mobility,
apple_mobility,
waze_mobility,
tomtom_mobility,
merge_reports,
) |
tensorflow_model_analysis/evaluators/analysis_table_evaluator.py | jaymessina3/model-analysis | 1,118 | 11185499 | # Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API for creating analysis table."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Any, Dict, Iterable, Optional, Text, Union
import apache_beam as beam
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.extractors import extractor
def AnalysisTableEvaluator( # pylint: disable=invalid-name
key: Text = constants.ANALYSIS_KEY,
run_after: Text = extractor.LAST_EXTRACTOR_STAGE_NAME,
include: Optional[Union[Iterable[Text], Dict[Text, Any]]] = None,
exclude: Optional[Union[Iterable[Text],
Dict[Text, Any]]] = None) -> evaluator.Evaluator:
"""Creates an Evaluator for returning Extracts data for analysis.
If both include and exclude are None then tfma.INPUT_KEY extracts will be
excluded by default.
Args:
key: Name to use for key in Evaluation output.
run_after: Extractor to run after (None means before any extractors).
include: List or map of keys to include in output. Keys starting with '_'
are automatically filtered out at write time. If a map of keys is passed
then the keys and sub-keys that exist in the map will be included in the
output. An empty dict behaves as a wildcard matching all keys or the value
itself. Since matching on feature values is not currently supported, an
empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
exclude: List or map of keys to exclude from output. If a map of keys is
passed then the keys and sub-keys that exist in the map will be excluded
from the output. An empty dict behaves as a wildcard matching all keys or
the value itself. Since matching on feature values is not currently
supported, an empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
Returns:
Evaluator for collecting analysis data. The output is stored under the key
'analysis'.
Raises:
ValueError: If both include and exclude are used.
"""
# pylint: disable=no-value-for-parameter
return evaluator.Evaluator(
stage_name='EvaluateExtracts',
run_after=run_after,
ptransform=EvaluateExtracts(key=key, include=include, exclude=exclude))
# pylint: enable=no-value-for-parameter
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(Any)
def EvaluateExtracts( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection,
key: Text = constants.ANALYSIS_KEY,
include: Optional[Union[Iterable[Text], Dict[Text, Any]]] = None,
exclude: Optional[Union[Iterable[Text],
Dict[Text, Any]]] = None) -> evaluator.Evaluation:
"""Creates Evaluation output for extracts.
If both include and exclude are None then tfma.INPUT_KEY extracts will be
excluded by default.
Args:
extracts: PCollection of Extracts.
key: Name to use for key in Evaluation output.
include: List or map of keys to include in output. Keys starting with '_'
are automatically filtered out at write time. If a map of keys is passed
then the keys and sub-keys that exist in the map will be included in the
output. An empty dict behaves as a wildcard matching all keys or the value
itself. Since matching on feature values is not currently supported, an
empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
exclude: List or map of keys to exclude from output. If a map of keys is
passed then the keys and sub-keys that exist in the map will be excluded
from the output. An empty dict behaves as a wildcard matching all keys or
the value itself. Since matching on feature values is not currently
supported, an empty dict must be used to represent the leaf nodes.
For example: {'key1': {'key1-subkey': {}}, 'key2': {}}.
Returns:
Evaluation containing PCollection of Extracts.
"""
if include is None and exclude is None:
exclude = [constants.INPUT_KEY]
filtered = extracts
if include or exclude:
filtered = extracts | extractor.Filter(include=include, exclude=exclude)
return {key: filtered}
|
scripts/python/qliblabels.py | JosephSilvermanArt/qLib | 572 | 11185500 | <filename>scripts/python/qliblabels.py
"""
@file qliblabels.py
@author xy
@since 2014-08-09
@brief qLib, node tagging/labeling functions (semantics).
"""
import hou
import re
import traceback
import datetime
import sys
labels = {
# a completed partial-result
'waypoint': {
'cats': ('sop', 'dop', ),
'color': (1.0, 0.6, 0.6),
'prefix': 'WP_',
'prefix.sop': 'GEO_',
'prefix.dop': 'DOP_',
},
# fetch-like op (ObjectMerge, etc.)
'fetch': {
'cats': ('sop', ),
'color': (0.8, 1.0, 0.8),
'prefix': 'IN_',
},
# export-like op (an output point for another fetch)
'export': {
'cats': ('sop', ),
'color': (0.0, 0.3, 0.0),
'prefix': 'OUT_',
},
# RENDER op
'render': {
'cats': ('sop', ),
'color': (0.4, 0.2, 0.6),
'name': 'RENDER',
},
# DISPLAY op
'display': {
'cats': ('sop', ),
'color': (0.0, 0.4, 1.0),
'name': 'DISPLAY',
},
'out': {
'cats': ('sop', ),
'color': (0.9, 0.9, 0.9),
'name': 'OUT',
},
# ----
# default op settings (do not delete this)
'default': {
'color': (0.8, 0.8, 0.8),
},
}
def msg(m):
msg = "[%s qliblabels.py] %s" % (
datetime.datetime.now().strftime("%y%m%d %H:%M.%S"), str(m), )
sys.stderr.write(msg+"\n")
#print msg
def warn(m):
msg('WARNING: %s' % m)
def err(m):
msg('ERROR: %s' % m)
def dbg(m):
msg('[debug] %s' % m)
dbg('LOADED')
# module-wide constants
#
# name/label for tags parameter
n_tags = '__tags'
l_tags = 'tags (semantics)'
# tag list separator
tags_s = ' '
def has_tags_parm(node):
'''.'''
#assert type(node) is hou.Node
r = node.parm(n_tags) is not None
return r
def add_tags_parm(node):
'''.'''
#assert type(node) is hou.Node
if not has_tags_parm(node):
pass # add tags parm
#hou_parm_template_group = hou.ParmTemplateGroup()
hou_parm_template_group = node.parmTemplateGroup()
# Code for parameter template
hou_parm_template = hou.StringParmTemplate(n_tags, l_tags,
1, default_value=([""]), naming_scheme=hou.parmNamingScheme.Base1,
string_type=hou.stringParmType.Regular,
menu_items=([]),
menu_labels=([]),
icon_names=([]),
item_generator_script="",
item_generator_script_language=hou.scriptLanguage.Python,
menu_type=hou.menuType.Normal)
hou_parm_template_group.append(hou_parm_template)
node.setParmTemplateGroup(hou_parm_template_group)
else:
dbg('%s already has tags parm' % node.path())
return node.parm(n_tags)
def rem_tags_parm(node):
'''.'''
# assert
if has_tags_parm(node):
g = node.parmTemplateGroup()
t = g.find(n_tags)
if t:
g.remove(t)
node.setParmTemplateGroup(g)
else:
dbg('%s -- no tags parm found' % node.path())
else:
dbg('%s has no tags parm' % node.path())
def tags_parm(node):
'''.'''
return add_tags_parm(node)
def get_tag_list(node):
'''.'''
r = []
if has_tags_parm(node):
p = tags_parm(node)
r = p.evalAsString().lower()
# strip of spaces (in case separator is not a space)
r = [n.strip() for n in r.split(tags_s)]
return r
def set_tag_list(node, taglist):
'''.'''
assert type(taglist) is list, 'expecting a list of strings'
l = [n.strip() for n in taglist if n.strip() != '']
dbg(' -- tags: %s' % str(l))
l = tags_s.join(l)
dbg(' -- writing tags: "%s"' % l)
if len(l) > 0:
p = tags_parm(node)
p.set(l)
else:
dbg('(removing parm)')
rem_tags_parm(node)
def get_label_table():
'''.'''
return labels
def find_all_prefixes(labels):
'''.'''
r = set()
for L in labels:
E = labels[L]
cats = E['cats'] if 'cats' in E else []
if 'prefix' in E:
r.add(E['prefix'])
for c in cats:
p = 'prefix.%s' % c
if p in E:
r.add(E[p])
r = list(r)
return r
def get_label_data(label):
L = get_label_table()
r = L[label] if label in L else {}
return r
def apply_color(node):
'''.'''
pass
# TODO: finish this
def apply_naming(node):
'''.'''
"""
TODO: make sure to
- remove _all_ known prefixes as well, when necessary
- rename to a default name if previous tagging had an explicit naming scheme
"""
pass
c = node.type().category().name().lower() # 'sop', etc.
def process_op(node, tags, tags_prev=[]):
'''.'''
# TODO: set color, prefix/name, etc.
had_prev_tags = len(tags_prev) > 0
if len(tags):
pass # TODO: apply new color, replace prefix, etc.
else:
pass # TODO: reset op to its defaults
def uimsg(msg, sev=hou.severityType.Message):
'''.'''
hou.ui.setStatusMessage('[qLib | semantics] %s' % str(msg), severity=sev)
def uiwarn(msg):
uimsg(msg, sev=hou.severityType.Warning)
def shelfToolClicked(kwargs):
'''.'''
dbg('shelfToolClicked(): %s' % str(kwargs))
assert type(kwargs) is dict, 'expecting a dict for kwargs'
try:
label = kwargs['toolname'].lower()
label = re.search('[a-z]*$', label).group(0)
nodes = hou.selectedNodes()
add_mode = kwargs['shiftclick'] is True
clear_mode = kwargs['altclick'] is True
if label:
uimsg("%s node(s) to '%s'" %
('added label to' if add_mode else 'labeled', label, ))
for n in nodes:
dbg(" -- %s" % n.path())
tags_prev = get_tag_list(n)
tags = tags_prev if add_mode else []
if label not in tags:
tags.append(label)
if clear_mode:
tags = []
set_tag_list(n, tags)
process_op(n, tags, tags_prev)
else:
uiwarn("couldn't determine label from shelf tool '%s'" %
kwargs['toolname'])
except:
err('shelfToolClicked() failed')
traceback.print_exc()
#dbg('%s' % str( traceback.format_exc() ) )
pass
|
env/Lib/site-packages/OpenGL/GLES1/EXT/discard_framebuffer.py | 5gconnectedbike/Navio2 | 210 | 11185571 | '''OpenGL extension EXT.discard_framebuffer
This module customises the behaviour of the
OpenGL.raw.GLES1.EXT.discard_framebuffer to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a new command, DiscardFramebufferEXT, which
causes the contents of the named framebuffer attachable images to become
undefined. The contents of the specified buffers are undefined until a
subsequent operation modifies the content, and only the modified region
is guaranteed to hold valid content. Effective usage of this command
may provide an implementation with new optimization opportunities.
Some OpenGL ES implementations cache framebuffer images in a small pool
of fast memory. Before rendering, these implementations must load the
existing contents of one or more of the logical buffers (color, depth,
stencil, etc.) into this memory. After rendering, some or all of these
buffers are likewise stored back to external memory so their contents can
be used again in the future. In many applications, some or all of the
logical buffers are cleared at the start of rendering. If so, the
effort to load or store those buffers is wasted.
Even without this extension, if a frame of rendering begins with a full-
screen Clear, an OpenGL ES implementation may optimize away the loading
of framebuffer contents prior to rendering the frame. With this extension,
an application can use DiscardFramebufferEXT to signal that framebuffer
contents will no longer be needed. In this case an OpenGL ES
implementation may also optimize away the storing back of framebuffer
contents after rendering the frame.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/discard_framebuffer.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.EXT.discard_framebuffer import *
from OpenGL.raw.GLES1.EXT.discard_framebuffer import _EXTENSION_NAME
def glInitDiscardFramebufferEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glDiscardFramebufferEXT.attachments size not checked against numAttachments
glDiscardFramebufferEXT=wrapper.wrapper(glDiscardFramebufferEXT).setInputArraySize(
'attachments', None
)
### END AUTOGENERATED SECTION |
tests/monitor_temp_file.py | ebourg/isign | 666 | 11185596 | """"
isign creates big temporary files, using the standard tempfile library.
If they are not cleaned up, they can fill up the disk. This has
already happened in production. :(
This library monkey-patches tempfile to use our own temporary
directory, so it's easy to test that we aren't leaving any temp files behind.
"""
import os
import shutil
import tempfile
class MonitorTempFile(object):
TEMP_DIR = None
@classmethod
def mkdtemp(cls, *args, **kwargs):
""" ensure temp directories are subdirs of TEMP_DIR """
kwargs['dir'] = MonitorTempFile.TEMP_DIR
return tempfile._original_mkdtemp(*args, **kwargs)
@classmethod
def mkstemp(cls, *args, **kwargs):
""" ensure temp files are within TEMP_DIR """
kwargs['dir'] = MonitorTempFile.TEMP_DIR
return tempfile._original_mkstemp(*args, **kwargs)
@classmethod
def NamedTemporaryFile(cls, *args, **kwargs):
""" ensure named temp files are within TEMP_DIR """
kwargs['dir'] = MonitorTempFile.TEMP_DIR
return tempfile._original_NamedTemporaryFile(*args, **kwargs)
@classmethod
def start(cls):
""" swap a few methods in tempfile with our versions that limit them
to a particular directory """
if hasattr(tempfile, '_is_patched') and tempfile._is_patched:
raise Exception("need tempfile to be in unpatched state!")
cls.TEMP_DIR = tempfile.mkdtemp(prefix='isign-test-run-')
tempfile._original_mkdtemp = tempfile.mkdtemp
tempfile.mkdtemp = MonitorTempFile.mkdtemp
tempfile._original_mkstemp = tempfile.mkstemp
tempfile.mkstemp = MonitorTempFile.mkstemp
tempfile._original_NamedTemporaryFile = tempfile.NamedTemporaryFile
tempfile.NamedTemporaryFile = MonitorTempFile.NamedTemporaryFile
tempfile._is_patched = True
@classmethod
def stop(cls):
""" restore a few methods in tempfile. opposite of _tempfile_patch """
tempfile.mkdtemp = tempfile._original_mkdtemp
tempfile.mkstemp = tempfile._original_mkstemp
tempfile.NamedTemporaryFile = tempfile._original_NamedTemporaryFile
tempfile._is_patched = False
shutil.rmtree(cls.TEMP_DIR)
cls.TEMP_DIR = None
@classmethod
def get_temp_files(cls):
return os.listdir(cls.TEMP_DIR)
@classmethod
def has_no_temp_files(cls):
""" check if this test has created any temp files which
aren't cleaned up """
if cls.TEMP_DIR is None:
raise Exception("temp dir is None. Maybe call patch() first?")
return len(cls.get_temp_files()) == 0
|
qtl/leafcutter/src/cluster_prepare_fastqtl.py | cjops/gtex-pipeline | 247 | 11185598 | <filename>qtl/leafcutter/src/cluster_prepare_fastqtl.py
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
import subprocess
import os
import gzip
import contextlib
from datetime import datetime
import tempfile
import shutil
import glob
from sklearn.decomposition import PCA
@contextlib.contextmanager
def cd(cd_path):
saved_path = os.getcwd()
os.chdir(cd_path)
yield
os.chdir(saved_path)
def gtf_to_bed(annotation_gtf, feature='gene'):
"""
Parse genes from GTF, create placeholder DataFrame for BED output
"""
chrom = []
start = []
end = []
gene_id = []
with open(annotation_gtf, 'r') as gtf:
for row in gtf:
row = row.strip().split('\t')
if row[0][0]=='#' or row[2]!=feature: continue # skip header
chrom.append(row[0])
# TSS: gene start (0-based coordinates for BED)
if row[6]=='+':
start.append(np.int64(row[3])-1)
end.append(np.int64(row[3]))
elif row[6]=='-':
start.append(np.int64(row[4])-1) # last base of gene
end.append(np.int64(row[4]))
else:
raise ValueError('Strand not specified.')
gene_id.append(row[8].split(';',1)[0].split(' ')[1].replace('"',''))
bed_df = pd.DataFrame(
data={'chr':chrom, 'start':start, 'end':end, 'gene_id':gene_id},
columns=['chr', 'start', 'end', 'gene_id'],
index=gene_id)
return bed_df
def write_bed(bed_df, output_name):
"""Write DataFrame to BED"""
bgzip = subprocess.Popen('bgzip -c > '+output_name,
stdin=subprocess.PIPE, shell=True)
bed_df.to_csv(bgzip.stdin, sep='\t', index=False)
stdout, stderr = bgzip.communicate()
subprocess.check_call('tabix -f '+output_name, shell=True)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Run leafcutter clustering, prepare for FastQTL')
parser.add_argument('junc_files_list', help='File with paths to ${sample_id}.junc files')
parser.add_argument('exons', help='Exon definitions file, with columns: chr, start, end, strand, gene_id, gene_name')
parser.add_argument('genes_gtf', help='Collapsed gene annotation in GTF format')
parser.add_argument('prefix', help='Prefix for output files (sample set ID)')
parser.add_argument('--min_clu_reads', default='50', type=str, help='Minimum number of reads supporting each cluster')
parser.add_argument('--min_clu_ratio', default='0.001', type=str, help='Minimum fraction of reads in a cluster that support a junction')
parser.add_argument('--max_intron_len', default='500000', type=str, help='Maximum intron length')
parser.add_argument('--num_pcs', default=10, type=int, help='Number of principal components to calculate')
parser.add_argument('--leafcutter_dir', default='/opt/leafcutter',
help="leafcutter directory, containing 'clustering' directory")
parser.add_argument('-o', '--output_dir', default='.', help='Output directory')
args = parser.parse_args()
print('['+datetime.now().strftime("%b %d %H:%M:%S")+'] leafcutter clustering')
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
with cd(args.output_dir):
print(' * decompressing and renaming junc files')
with open(args.junc_files_list) as f:
junc_files = f.read().strip().split('\n')
junc_dir = os.path.join(args.output_dir, 'junc_files')
if not os.path.exists(junc_dir):
os.mkdir(junc_dir)
sample_ids = []
for f in junc_files:
sample_id = os.path.split(f)[1].split('.')[0]
sample_ids.append(sample_id)
# shutil.move(f, os.path.join(junc_dir, sample_id+'.junc.gz'))
shutil.copy2(f, os.path.join(junc_dir, sample_id+'.junc.gz'))
subprocess.check_call('gunzip -f '+os.path.join(junc_dir, '*.junc.gz'), shell=True)
junc_files = sorted([os.path.join(junc_dir, i+'.junc') for i in sample_ids])
print(' * running leafcutter clustering')
# generates ${prefix}_perind_numers.counts.gz and ${prefix}_perind.counts.gz
with tempfile.NamedTemporaryFile(dir=args.output_dir) as tmp:
with open(tmp.name, 'w') as f:
f.write('\n'.join(junc_files)+'\n')
subprocess.check_call(
'python '+os.path.join(args.leafcutter_dir, 'clustering', 'leafcutter_cluster.py' \
+' --juncfiles '+tmp.name \
+' --outprefix '+args.prefix \
+' --minclureads '+args.min_clu_reads \
+' --mincluratio '+args.min_clu_ratio \
+' --maxintronlen '+args.max_intron_len), shell=True)
print(' * compressing outputs')
subprocess.check_call('gzip {}_pooled'.format(args.prefix), shell=True)
subprocess.check_call('gzip {}_refined'.format(args.prefix), shell=True)
print(' * mapping clusters to genes')
subprocess.check_call(
'Rscript' \
+' '+os.path.abspath(os.path.join(os.path.dirname(__file__), 'map_clusters_to_genes.R')) \
+' '+os.path.join(args.output_dir, args.prefix+'_perind.counts.gz') \
+' '+args.exons \
+' '+args.prefix + '.leafcutter.clusters_to_genes.txt', shell=True)
print(' * filtering counts')
counts_df = pd.read_csv(os.path.join(args.output_dir, args.prefix+'_perind.counts.gz'), sep='\s+').set_index('chrom')
calculate_frac = lambda x: float(x[0])/float(x[1]) if x[1]>0 else 0
frac_df = counts_df.applymap(lambda x: calculate_frac([int(i) for i in x.split('/')]))
pct_zero = (frac_df==0).sum(axis=1) / frac_df.shape[1] # for zero counts, frac is zero
n_unique = frac_df.apply(lambda x: len(x.unique()), axis=1)
zscore_df = ((frac_df.T-frac_df.mean(1)) / frac_df.std(1)).T
# filter out introns with low counts or low complexity
n = np.floor(frac_df.shape[1]*0.1)
if n<10:
n = 10
mask = (pct_zero <= 0.5) & (n_unique >= n)
# additional filter for low complexity
ns = zscore_df.shape[1]
mask2 = ((zscore_df.abs()<0.25).sum(1)>=ns-3) & ((zscore_df.abs()>6).sum(1)<=3)
if np.any(mask & mask2):
print(' ** dropping {} introns with low variation'.format(np.sum(mask & mask2)))
mask = mask & ~mask2
filtered_counts_df = counts_df.loc[mask]
cluster_ids = np.unique(counts_df.index.map(lambda x: x.split(':')[-1]))
filtered_cluster_ids = np.unique(filtered_counts_df.index.map(lambda x: x.split(':')[-1]))
print(' ** dropping {} introns with counts in fewer than 50% of samples\n'
' {}/{} introns remain ({}/{} clusters)'.format(
counts_df.shape[0]-filtered_counts_df.shape[0], filtered_counts_df.shape[0], counts_df.shape[0], len(filtered_cluster_ids), len(cluster_ids)
))
filtered_counts_file = os.path.join(args.output_dir, args.prefix+'_perind.counts.filtered.gz')
with gzip.open(filtered_counts_file, 'wt') as f:
filtered_counts_df.to_csv(f, sep=' ')
print(' * preparing phenotype table')
subprocess.check_call(
'python '+os.path.join(args.leafcutter_dir, 'scripts', 'prepare_phenotype_table.py') \
+' '+filtered_counts_file \
+' -p '+str(args.num_pcs), shell=True)
print(' * concatenating BED files')
bed_files = sorted(glob.glob(os.path.join(args.output_dir, '*_perind.counts.filtered.gz.qqnorm_*')))
bed_df = []
for f in bed_files:
bed_df.append(pd.read_csv(f, sep='\t', dtype=str))
bed_df = pd.concat(bed_df, axis=0)
print(' ** sorting')
# leafcutter doesn't produce output for chrX --> numeric sort
for c in ['#Chr', 'start', 'end']:
bed_df[c] = bed_df[c].astype(int)
bed_df = bed_df.sort_values(['#Chr', 'start', 'end'])
print(' ** writing BED')
bed_file = os.path.join(args.output_dir, args.prefix+'.perind.counts.filtered.qqnorm.bed.gz')
bgzip = subprocess.Popen('bgzip -c > '+bed_file, stdin=subprocess.PIPE, shell=True, universal_newlines=True)
stdout, stderr = bgzip.communicate(bed_df.to_csv(sep='\t', index=False))
print(' ** indexing')
subprocess.check_call('tabix '+bed_file, shell=True)
print(' * converting cluster coordinates to gene coordinates')
tss_df = gtf_to_bed(args.genes_gtf)
cluster2gene_dict = pd.read_csv(os.path.join(args.output_dir, args.prefix + '.leafcutter.clusters_to_genes.txt'),
sep='\t', index_col=0, squeeze=True).to_dict()
# add 'chr' prefix
bed_df['#Chr'] = 'chr'+bed_df['#Chr'].astype(str)
bed_df['ID'] = 'chr'+bed_df['ID']
print(' ** assigning introns to gene mapping(s)')
n = 0
gene_bed_df = []
group_s = {}
for _,r in bed_df.iterrows():
s = r['ID'].split(':')
cluster_id = s[0]+':'+s[-1]
if cluster_id in cluster2gene_dict:
gene_ids = cluster2gene_dict[cluster_id].split(',')
for g in gene_ids:
gi = r['ID']+':'+g
gene_bed_df.append(tss_df.loc[g, ['chr', 'start', 'end']].tolist() + [gi] + r.iloc[4:].tolist())
group_s[gi] = g
else:
n += 1
if n>0:
print(' ** discarded {} introns without gene mapping'.format(n))
print(' * writing FastQTL inputs')
gene_bed_df = pd.DataFrame(gene_bed_df, columns=bed_df.columns)
gene_bed_df = gene_bed_df.groupby('#Chr', sort=False, group_keys=False).apply(lambda x: x.sort_values('start'))
# change sample IDs to participant IDs
gene_bed_df.rename(columns={i:'-'.join(i.split('-')[:2]) for i in gene_bed_df.columns[4:]}, inplace=True)
write_bed(gene_bed_df, os.path.join(args.output_dir, args.prefix+'.leafcutter.bed.gz'))
pd.Series(group_s).sort_values().to_csv(os.path.join(args.output_dir, args.prefix+'.leafcutter.phenotype_groups.txt'), sep='\t')
print(' * calculating PCs')
pca = PCA(n_components=args.num_pcs)
pca.fit(bed_df[bed_df.columns[4:]])
pc_df = pd.DataFrame(pca.components_, index=['PC{}'.format(i) for i in range(1,11)],
columns=['-'.join(i.split('-')[:2]) for i in bed_df.columns[4:]])
pc_df.index.name = 'ID'
pc_df.to_csv(args.prefix+'.leafcutter.PCs.txt', sep='\t')
print('['+datetime.now().strftime("%b %d %H:%M:%S")+'] done')
|
nlpaug/model/spectrogram/time_warping.py | techthiyanes/nlpaug | 3,121 | 11185599 | # import numpy as np
#
# from nlpaug.model import Spectrogram
#
#
# class TimeWarping(Spectrogram):
# def __init__(self, time_warp):
# super(TimeWarping, self).__init__()
#
# self.time_warp = time_warp
#
# # TODO
# def mask(self, mel_spectrogram):
# """
# From: https://arxiv.org/pdf/1904.08779.pdf,
# Time warping is applied via the function
# sparse image warp of tensorflow. Given
# a log mel spectrogram with t time steps, we view it
# as an image where the time axis is horizontal and the
# frequency axis is vertical. A random point along the
# horizontal line passing through the center of the image
# within the time steps (W, t - W) is to be warped
# either to the left or right by a distance w chosen from a
# uniform distribution from 0 to the time warp parameter
# W along that line.
# :return:
# """
#
# time_range = mel_spectrogram.shape[1]
# self.w = np.random.randint(self.time_warp)
#
# center_point = np.random.randint(self.time_warp, time_range-self.time_warp)
# distance = np.random.randint(-self.w, self.w)
#
# # self.w0 = np.random.randint(time_range - self.t)
# #
# # augmented_mel_spectrogram = mel_spectrogram.copy()
# # augmented_mel_spectrogram[:, self.time_warp:self.time_range-self.time_warp] = 0
# # return augmented_mel_spectrogram
# return mel_spectrogram
|
benchmarks/benchmarks/cluster_hierarchy_disjoint_set.py | Ennosigaeon/scipy | 9,095 | 11185600 | import numpy as np
try:
from scipy.cluster.hierarchy import DisjointSet
except ImportError:
pass
from .common import Benchmark
class Bench(Benchmark):
params = [[100, 1000, 10000]]
param_names = ['n']
def setup(self, n):
# Create random edges
rng = np.random.RandomState(seed=0)
self.edges = rng.randint(0, 10 * n, (n, 2))
self.nodes = np.unique(self.edges)
self.disjoint_set = DisjointSet(self.nodes)
self.pre_merged = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged.merge(a, b)
self.pre_merged_found = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged_found.merge(a, b)
for x in self.nodes:
self.pre_merged_found[x]
def time_merge(self, n):
dis = self.disjoint_set
for a, b in self.edges:
dis.merge(a, b)
def time_merge_already_merged(self, n):
dis = self.pre_merged
for a, b in self.edges:
dis.merge(a, b)
def time_find(self, n):
dis = self.pre_merged
return [dis[i] for i in self.nodes]
def time_find_already_found(self, n):
dis = self.pre_merged_found
return [dis[i] for i in self.nodes]
def time_contains(self, n):
assert self.nodes[0] in self.pre_merged
assert self.nodes[n // 2] in self.pre_merged
assert self.nodes[-1] in self.pre_merged
def time_absence(self, n):
# Test for absence
assert None not in self.pre_merged
assert "dummy" not in self.pre_merged
assert (1, 2, 3) not in self.pre_merged
|
tests/test_cursor.py | AlekseyMolchanov/aioodbc | 251 | 11185602 | <reponame>AlekseyMolchanov/aioodbc<gh_stars>100-1000
import pyodbc
import pytest
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_cursor_with(conn, table):
ret = []
# regular cursor usage
cur = await conn.cursor()
await cur.execute('SELECT * FROM t1;')
assert not cur.closed
assert not cur.echo
# cursor should be closed
async with cur:
assert not cur.echo
async for i in cur:
ret.append(i)
expected = [tuple(r) for r in ret]
assert [(1, '123.45'), (2, 'foo')] == expected
assert cur.closed
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_cursor_lightweight(conn, table):
cur = await conn.cursor()
ex_cursor = await cur.execute('SELECT * FROM t1;')
assert ex_cursor is cur
assert not cur.closed
async with cur:
pass
assert cur.closed
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_cursor_await(conn, table):
async with conn.cursor() as cur:
await cur.execute('SELECT * FROM t1;')
assert not cur.closed
assert cur.closed
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_cursor(conn):
cur = await conn.cursor()
assert cur.connection is conn
assert cur._loop, conn.loop
assert cur.arraysize == 1
assert cur.rowcount == -1
r = await cur.setinputsizes()
assert r is None
await cur.setoutputsize()
assert r is None
await cur.close()
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_execute_on_closed_cursor(conn):
cur = await conn.cursor()
await cur.close()
with pytest.raises(pyodbc.OperationalError):
await cur.execute('SELECT 1;')
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_close(conn):
cur = await conn.cursor()
assert not cur.closed
await cur.close()
await cur.close()
assert cur.closed
@pytest.mark.parametrize('db', ['sqlite'])
@pytest.mark.asyncio
async def test_description(conn):
cur = await conn.cursor()
assert cur.description is None
await cur.execute('SELECT 1;')
expected = (('1', float, None, 54, 54, 0, True), )
assert cur.description == expected
await cur.close()
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_description_with_real_table(conn, table):
cur = await conn.cursor()
await cur.execute("SELECT * FROM t1;")
expected = (('n', int, None, 10, 10, 0, True),
('v', str, None, 10, 10, 0, True))
assert cur.description == expected
await cur.close()
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_rowcount_with_table(conn, table):
cur = await conn.cursor()
await cur.execute("SELECT * FROM t1;")
await cur.fetchall()
# sqlite does not provide working rowcount attribute
# http://stackoverflow.com/questions/4911404/in-pythons-sqlite3-
# module-why-cant-cursor-rowcount-tell-me-the-number-of-ro
# TODO: figure out for proper test
assert cur.rowcount in (0, 2)
await cur.close()
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_arraysize(conn):
cur = await conn.cursor()
assert 1 == cur.arraysize
cur.arraysize = 10
assert 10 == cur.arraysize
await cur.close()
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_fetchall(conn, table):
cur = await conn.cursor()
await cur.execute("SELECT * FROM t1;")
resp = await cur.fetchall()
expected = [(1, '123.45'), (2, 'foo')]
for row, exp in zip(resp, expected):
assert exp == tuple(row)
await cur.close()
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_fetchmany(conn, table):
cur = await conn.cursor()
await cur.execute("SELECT * FROM t1;")
resp = await cur.fetchmany(1)
expected = [(1, '123.45')]
for row, exp in zip(resp, expected):
assert exp == tuple(row)
await cur.close()
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_fetchone(conn, table):
cur = await conn.cursor()
await cur.execute("SELECT * FROM t1;")
resp = await cur.fetchone()
expected = (1, '123.45')
assert expected == tuple(resp)
await cur.close()
@pytest.mark.parametrize('db', ['sqlite'])
@pytest.mark.asyncio
async def test_tables(conn, table):
cur = await conn.cursor()
await cur.tables()
resp = await cur.fetchall()
expectd = (None, None, 't1', 'TABLE', None)
assert len(resp) == 1, resp
assert expectd == tuple(resp[0]), resp
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_cursor_rollback(conn, table):
cur = await conn.cursor()
await cur.execute("INSERT INTO t1 VALUES (3, '123.45');")
await cur.execute("SELECT v FROM t1 WHERE n=3;")
(value, ) = await cur.fetchone()
assert value == '123.45'
await cur.rollback()
await cur.execute("SELECT v FROM t1 WHERE n=3;")
value = await cur.fetchone()
assert value is None
@pytest.mark.parametrize('db', ['sqlite'])
@pytest.mark.asyncio
async def test_columns(conn, table):
cur = await conn.cursor()
await cur.columns()
resp = await cur.fetchall()
expectd = [('', '', 't1', 'n', 4, 'INT', 9, 10, 10, 0, 1, None,
'NULL', 4, None, 16384, 1, 'YES'),
('', '', 't1', 'v', 12, 'VARCHAR(10)', 10, 10, 10, 0, 1, None,
'NULL', 12, None, 16384, 2, 'YES')]
columns = [tuple(r) for r in resp]
assert expectd == columns
@pytest.mark.parametrize('db', pytest.db_list)
@pytest.mark.asyncio
async def test_executemany(conn):
cur = await conn.cursor()
await cur.execute("CREATE TABLE t1(a int, b VARCHAR(10))")
# TODO: figure out why it is possible to insert only strings... but not int
params = [(str(i), str(i)) for i in range(1, 6)]
await cur.executemany("INSERT INTO t1(a, b) VALUES (?, ?)", params)
await cur.execute("SELECT COUNT(*) FROM t1")
count = await cur.fetchone()
assert count[0] == len(params)
await cur.execute("SELECT a, b FROM t1 ORDER BY a")
rows = await cur.fetchall()
assert count[0] == len(rows)
for param, row in zip(params, rows):
assert int(param[0]) == row[0]
assert param[1] == row[1]
await cur.execute("DROP TABLE t1;")
@pytest.mark.parametrize('db', ['sqlite'])
@pytest.mark.asyncio
async def test_procedures_empty(conn, table):
cur = await conn.cursor()
await cur.procedures()
resp = await cur.fetchall()
assert resp == []
@pytest.mark.parametrize('db', ['sqlite'])
@pytest.mark.asyncio
async def test_procedureColumns_empty(conn, table):
cur = await conn.cursor()
await cur.procedureColumns()
resp = await cur.fetchall()
assert resp == []
@pytest.mark.parametrize('db', ['sqlite'])
@pytest.mark.asyncio
async def test_primaryKeys_empty(conn, table):
cur = await conn.cursor()
await cur.primaryKeys('t1', 't1', 't1')
resp = await cur.fetchall()
assert resp == []
@pytest.mark.parametrize('db', ['sqlite'])
@pytest.mark.asyncio
async def test_foreignKeys_empty(conn, table):
cur = await conn.cursor()
await cur.foreignKeys('t1')
resp = await cur.fetchall()
assert resp == []
@pytest.mark.asyncio
async def test_getTypeInfo_empty(conn, table):
cur = await conn.cursor()
await cur.getTypeInfo(pyodbc.SQL_CHAR)
resp = await cur.fetchall()
expected = [('char', 1, 255, "'", "'", 'length', 1, 0, 3, None, 0, 0,
'char', None, None, 1, 0, None, None)]
type_info = [tuple(r) for r in resp]
assert type_info == expected
|
examples/import_yara_ruleset.py | Dmenifee23-star/vt-py | 208 | 11185649 | <reponame>Dmenifee23-star/vt-py
#!/usr/local/bin/python
# Copyright © 2020 The vt-py authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import YARA rulesets to a VirusTotal account.
This script imports either a ruleset in a file or all ruleset files in a given
directory. These imported YARA rules can be used in VT Hunting.
Read more:
https://www.virustotal.com/gui/hunting-overview
https://developers.virustotal.com/v3.0/reference#livehunt
https://support.virustotal.com/hc/en-us/articles/360000363717-VT-Hunting
"""
import argparse
import asyncio
import os
import vt
import sys
async def get_rules_files(queue, path):
"""Finds which rules will be uploaded to VirusTotal."""
if os.path.isfile(path):
await queue.put(path)
return
with os.scandir(path) as it:
for entry in it:
if not entry.name.startswith('.') and entry.is_file():
await queue.put(entry.path)
async def upload_rules(queue, apikey, enable):
"""Uploads selected files to VirusTotal."""
async with vt.Client(apikey) as client:
while not queue.empty():
file_path = await queue.get()
with open(file_path) as f:
ruleset = vt.Object(
obj_type='hunting_ruleset',
obj_attributes={
'name': os.path.basename(file_path),
'enabled': enable,
'rules': f.read()})
try:
await client.post_object_async(
path='/intelligence/hunting_rulesets', obj=ruleset)
print(f'File {file_path} uploaded.')
except vt.error.APIError as e:
print(f'Error uploading {file_path}: {e}')
queue.task_done()
def main():
parser = argparse.ArgumentParser(
description='Import YARA rules to a VirusTotal account.')
parser.add_argument('--apikey', required=True, help='your VirusTotal API key')
parser.add_argument('--path', required=True,
help='path to the file/directory to upload.')
parser.add_argument('--enable', action='store_true',
help='Whether to enable the YARA rules or not.')
parser.add_argument('--workers', type=int, required=False, default=4,
help='number of concurrent workers')
args = parser.parse_args()
if not os.path.exists(args.path):
print(f'ERROR: file {args.path} not found.')
sys.exit(1)
loop = asyncio.get_event_loop()
queue = asyncio.Queue(loop=loop)
loop.create_task(get_rules_files(queue, args.path))
_worker_tasks = []
for i in range(args.workers):
_worker_tasks.append(
loop.create_task(upload_rules(queue, args.apikey, args.enable)))
# Wait until all worker tasks has completed.
loop.run_until_complete(asyncio.gather(*_worker_tasks))
loop.close()
if __name__ == '__main__':
main()
|
vilya/libs/auth/check_auth.py | mubashshirjamal/code | 1,582 | 11185685 | # -*- coding: utf-8 -*-
from datetime import datetime
from vilya.models.api_key import ApiKey
from vilya.models.api_token import ApiToken
from vilya.libs.auth.oauth import OAuthError
from vilya.libs.auth import errors as err
from vilya.libs.auth import AuthCode
def check_auth(request):
auth_header = request.environ.get('HTTP_AUTHORIZATION')
# 无 Token 直接返回
if not auth_header:
return
# Token 格式是否正确
if not auth_header.startswith('Bearer '):
# raise OAuthError(*err.auth_access_token_is_missing)
# 考虑到需要兼容 qaci 使用 Basic auth 的场景,先不 raise
auth = AuthCode(auth_header)
if auth.confirm():
request.user = auth.user
return
oauth_token = auth_header[7:]
token = ApiToken.get_by_token(oauth_token)
# ApiToken 是否存在
if not token:
raise OAuthError(*err.auth_invalid_access_token)
# ApiKey 是否存在
if not token.key:
raise OAuthError(*err.auth_invalid_apikey)
# ApiKey 是否可用
if token.key.status == ApiKey.STATUS_BLOCKED:
raise OAuthError(*err.auth_apikey_blocked)
# ApiToken 是否过期
if datetime.now() > token.expire_time:
raise OAuthError(*err.auth_access_token_has_expired)
request.user = token.user
request.client_id = token.client_id
|
backup/drivers/mysql_base.py | a4913994/openstack_trove | 244 | 11185743 | <gh_stars>100-1000
# Copyright 2020 Catalyst Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from backup.drivers import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class MySQLBaseRunner(base.BaseRunner):
def __init__(self, *args, **kwargs):
self.datadir = kwargs.pop('db_datadir', '/var/lib/mysql/data')
super(MySQLBaseRunner, self).__init__(*args, **kwargs)
@property
def user_and_pass(self):
return ('--user=%(user)s --password=%(password)s --host=%(host)s' %
{'user': CONF.db_user,
'password': CONF.db_password,
'host': CONF.db_host})
@property
def filename(self):
return '%s.xbstream' % self.base_filename
def check_process(self):
"""Check the backup output for 'completed OK!'."""
LOG.debug('Checking backup process output.')
with open(self.backup_log, 'r') as backup_log:
output = backup_log.read()
if not output:
LOG.error("Backup log file %s empty.", self.backup_log)
return False
last_line = output.splitlines()[-1].strip()
if not re.search('completed OK!', last_line):
LOG.error(f"Backup did not complete successfully, last line:\n"
f"{last_line}")
return False
return True
def get_metadata(self):
LOG.debug('Getting metadata for backup %s', self.base_filename)
meta = {}
lsn = re.compile(r"The latest check point \(for incremental\): "
r"'(\d+)'")
with open(self.backup_log, 'r') as backup_log:
output = backup_log.read()
match = lsn.search(output)
if match:
meta = {'lsn': match.group(1)}
LOG.info("Updated metadata for backup %s: %s", self.base_filename,
meta)
return meta
def incremental_restore_cmd(self, incremental_dir):
"""Return a command for a restore with a incremental location."""
args = {'restore_location': incremental_dir}
return (self.decrypt_cmd + self.unzip_cmd + self.restore_cmd % args)
def incremental_prepare_cmd(self, incremental_dir):
if incremental_dir is not None:
incremental_arg = '--incremental-dir=%s' % incremental_dir
else:
incremental_arg = ''
args = {
'restore_location': self.restore_location,
'incremental_args': incremental_arg,
}
return self.incremental_prep % args
def incremental_prepare(self, incremental_dir):
prepare_cmd = self.incremental_prepare_cmd(incremental_dir)
LOG.info("Running restore prepare command: %s.", prepare_cmd)
processutils.execute(prepare_cmd, shell=True)
def incremental_restore(self, location, checksum):
"""Recursively apply backups from all parents.
If we are the parent then we restore to the restore_location and
we apply the logs to the restore_location only.
Otherwise if we are an incremental we restore to a subfolder to
prevent stomping on the full restore data. Then we run apply log
with the '--incremental-dir' flag
:param location: The source backup location.
:param checksum: Checksum of the source backup for validation.
"""
metadata = self.storage.load_metadata(location, checksum)
incremental_dir = None
if 'parent_location' in metadata:
LOG.info("Restoring parent: %(parent_location)s, "
"checksum: %(parent_checksum)s.", metadata)
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially
self.incremental_restore(parent_location, parent_checksum)
# for *this* backup set the incremental_dir
# just use the checksum for the incremental path as it is
# sufficiently unique /var/lib/mysql/<checksum>
incremental_dir = os.path.join('/var/lib/mysql', checksum)
os.makedirs(incremental_dir)
command = self.incremental_restore_cmd(incremental_dir)
else:
# The parent (full backup) use the same command from InnobackupEx
# super class and do not set an incremental_dir.
LOG.info("Restoring back to full backup.")
command = self.restore_command
self.restore_content_length += self.unpack(location, checksum, command)
self.incremental_prepare(incremental_dir)
# Delete after restoring this part of backup
if incremental_dir:
shutil.rmtree(incremental_dir)
|
greenbot/exc.py | EMorf/greenbot | 145 | 11185762 | class FailedCommand(Exception):
pass
class UserNotFound(Exception):
pass
class InvalidLogin(Exception):
pass
class InvalidPointAmount(Exception):
pass
class TimeoutException(Exception):
pass
|
pyekaboo/mkpyekaboo.py | SafeBreach-Labs/pyekaboo | 154 | 11185770 | #!/usr/bin/env python
#
# Copyright (c) 2017, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
# AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import argparse
import pyekaboo
import inspect
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, SafeBreach"
START_OF_CODE = "### DO NOT REMOVE THIS LINE! ###"
START_OF_CODE_LEN = 32
###########
# Classes #
###########
# http://stackoverflow.com/questions/3853722/python-argparse-how-to-insert-newline-in-the-help-text
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('ML|'):
return text[3:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
#############
# Functions #
#############
def main(args):
parser = argparse.ArgumentParser(prog='mkpyekaboo', description='Python Hooking Library and Tool', formatter_class=SmartFormatter)
parser.add_argument('pymodule', metavar='PYTHON_MODULE_NAME', type=str, help='Python module to be hooked (e.g. string, os, etc.)')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-l', '--hook-level', type=int, default=0, metavar='HOOK_LEVEL', help='ML|Level 0: Empty Pyekaboo Boilerplate (Default)\nLevel 1: Hook all Python Classes in PYTHON_MODULE_NAME\nLevel 2: Hook all Python Functions in PYTHON_MODULE_NAME\nLevel 3: Hook all Python Classes & Functions in PYTHON_MODULE_NAME\nLevel 4: Hook all Python Classes in PYTHON_MODULE_NAME and Enable Trace\nLevel 5: Hook all Python Functions in PYTHON_MODULE_NAME and Enable Trace\nLevel 6: Hook all Python Classes & Functions in PYTHON_MODULE_NAME and Enable Trace')
args = parser.parse_args(args=args[1:])
mod = None
tot_hooked_classes = 0
tot_hooked_fcns = 0
with open(args.pymodule + '.py', 'w') as outfile:
if args.verbose:
print "Output Filename: %s" % outfile
buf = open(pyekaboo.__file__.replace('pyc', 'py'), 'r').read()
outfile.write(buf[buf.find(START_OF_CODE)+START_OF_CODE_LEN:].strip())
outfile.write('\n\n###############\n# Entry Point #\n###############\n\n')
if args.verbose:
print "Wrote Pykeaboo Library (%d bytes)" % len(buf)
# Hook all Classes (and maybe Enable Trace?)
if args.hook_level == 1 or args.hook_level == 3 or args.hook_level == 4 or args.hook_level == 6:
if args.verbose:
print "Hooking Classes (hook_level == %d)" % args.hook_level
mod = pyekaboo._load_and_register_as(args.pymodule, [args.pymodule], sys.path[::-1])
if args.verbose:
print "Imported %s as %s ..." % (args.pymodule, mod)
for cls_name in dir(mod):
if args.verbose:
print "Is %s a Class ... " % (cls_name),
cls_obj = getattr(mod, cls_name)
# TODO: Need a better way to handle cases where class is not really a class (i.e. socket.MethodType)
if inspect.isclass(cls_obj) is True and repr(cls_obj).find("class") != -1:
# class _fileobject():
# __metaclass__ = _InstallClsHook
# __trace__ = True?
outfile.write('class ' + cls_name + '():\n')
outfile.write(' __metaclass__ = _InstallClsHook\n')
if args.hook_level == 4 or args.hook_level == 6:
outfile.write(' __trace__ = True\n')
outfile.write('\n')
tot_hooked_classes = tot_hooked_classes + 1
if args.verbose:
print "Yes! (%s)" % cls_obj
else:
if args.verbose:
print "No"
print "[*] Hooked %d Classes!" % tot_hooked_classes
# Hook all Functions (and maybe Enable Trace?)
if args.hook_level == 2 or args.hook_level == 3 or args.hook_level == 5 or args.hook_level == 6:
mod = pyekaboo._load_and_register_as(args.pymodule, [args.pymodule], sys.path[::-1])
if args.verbose:
print "Imported %s as %s ..." % (args.pymodule, mod)
for fcn_name in dir(mod):
if args.verbose:
print "Is %s a Function ... " % (fcn_name),
fcn_obj = getattr(mod, fcn_name)
if inspect.isfunction(fcn_obj) is True or inspect.isroutine(fcn_obj) is True:
dbg_flag = "False"
if args.hook_level == 5 or args.hook_level == 6:
dbg_flag = "True"
# gethostbyname = _InstallFcnHook(gethostbyname, debug=True)
outfile.write('\n%s=_InstallFcnHook(%s, debug=%s)\n' % (fcn_name, fcn_name, dbg_flag))
tot_hooked_fcns = tot_hooked_fcns + 1
if args.verbose:
print "Yes! (%s)" % fcn_obj
else:
if args.verbose:
print "No"
print "[*] Hooked %d Functions!" % tot_hooked_fcns
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
python/ql/test/query-tests/Security/CWE-327/InsecureProtocol.py | timoles/codeql | 4,036 | 11185794 | <filename>python/ql/test/query-tests/Security/CWE-327/InsecureProtocol.py
import ssl
from OpenSSL import SSL
from ssl import SSLContext
# insecure versions specified
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv2)
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv3)
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1)
SSLContext(protocol=ssl.PROTOCOL_SSLv2)
SSLContext(protocol=ssl.PROTOCOL_SSLv3)
SSLContext(protocol=ssl.PROTOCOL_TLSv1)
SSL.Context(SSL.SSLv2_METHOD)
SSL.Context(SSL.SSLv3_METHOD)
SSL.Context(SSL.TLSv1_METHOD)
METHOD = SSL.SSLv2_METHOD
SSL.Context(METHOD)
# importing the protocol constant directly
from ssl import PROTOCOL_SSLv2
ssl.wrap_socket(ssl_version=PROTOCOL_SSLv2)
SSLContext(protocol=PROTOCOL_SSLv2)
# secure versions specified
ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1_2)
SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)
SSL.Context(SSL.TLSv1_2_METHOD)
# insecure versions allowed by specified range
SSLContext(protocol=ssl.PROTOCOL_SSLv23)
SSLContext(protocol=ssl.PROTOCOL_TLS)
SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)
SSLContext(protocol=ssl.PROTOCOL_TLS_SERVER)
SSL.Context(SSL.SSLv23_METHOD)
|
modules/method.py | Mattmess/Test | 638 | 11185800 | import asyncio
from modules.printer import clout
async def test_method(session, url):
try:
response = await session.get(url, allow_redirects=True)
if response.status != 404:
await clout(response.url)
else:
pass
except asyncio.exceptions.TimeoutError:
#print(f'{Y}[!] Timeout :{C} {url}{W}')
return
except Exception as exc:
#print(f'{Y}[!] Exception [test_method] [{url}] :{W} {exc}')
return |
pkg_pytorch/blendtorch/btt/utils.py | kongdai123/pytorch-blender | 381 | 11185827 | <filename>pkg_pytorch/blendtorch/btt/utils.py<gh_stars>100-1000
import socket
def get_primary_ip():
'''Returns the primary IP address of this machine (the one with).
See https://stackoverflow.com/a/28950776
Returns the IP address with the default route attached or `127.0.0.1`.
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP |
vit/color_mappings.py | kinifwyne/vit | 179 | 11185831 | import re
BRIGHT_REGEX = re.compile('.*bright.*')
def task_256_to_urwid_256():
manual_map = {
'red': 'dark red',
'green': 'dark green',
'blue': 'dark blue',
'cyan': 'dark cyan',
'magenta': 'dark magenta',
'gray': 'light gray',
'yellow': 'brown',
'color0': 'black',
'color1': 'dark red',
'color2': 'dark green',
'color3': 'brown',
'color4': 'dark blue',
'color5': 'dark magenta',
'color6': 'dark cyan',
'color7': 'light gray',
'color8': 'dark gray',
'color9': 'light red',
'color10': 'light green',
'color11': 'yellow',
'color12': 'light blue',
'color13': 'light magenta',
'color14': 'light cyan',
'color15': 'white',
}
manual_map.update(task_color_gray_to_g())
manual_map.update(task_color_to_h())
manual_map.update(task_rgb_to_h())
return manual_map
def task_bright_to_color(color_string):
color_map = {
'bright black': 'color8',
'bright red': 'color9',
'bright green': 'color10',
'bright yellow': 'color11',
'bright blue': 'color12',
'bright magenta': 'color13',
'bright cyan': 'color14',
'bright white': 'color15',
}
if BRIGHT_REGEX.match(color_string):
for bright_color in color_map:
color_string = color_string.replace(bright_color, color_map[bright_color])
return color_string
def task_color_gray_to_g():
color_map = {}
for i in range(0, 24):
gray_key = 'gray%d' % i
color_key = 'color%d' % (i + 232)
# NOTE: This is an approximation of the conversion, close enough!
value = 'g%d' % (i * 4)
color_map[gray_key] = value
color_map[color_key] = value
return color_map
def task_color_to_h():
color_map = {}
for i in range(16, 232):
key = 'color%d' % i
value = 'h%d' % i
color_map[key] = value
return color_map
def task_rgb_to_h():
index_to_hex = [
'0',
'6',
'8',
'a',
'd',
'f',
]
color_map = {}
count = 0
for r in range(0, 6):
for g in range(0, 6):
for b in range(0, 6):
key = '<KEY>' % (r, g, b)
value = '#%s%s%s' % (index_to_hex[r], index_to_hex[g], index_to_hex[b])
color_map[key] = value
count += 1
return color_map
|
runtime/python/Lib/site-packages/pyvisa/compat/__init__.py | hwaipy/InteractionFreeNode | 154 | 11185882 | # -*- coding: utf-8 -*-
"""
pyvisa.compat
~~~~~~~~~~~~~
Compatibility layer.
:copyright: 2014 by PyVISA Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import sys
PYTHON3 = sys.version >= '3'
if PYTHON3:
string_types = str
def u(x):
return x
integer_types = (int, )
input = input
int_to_bytes = int.to_bytes
int_from_bytes = int.from_bytes
else:
string_types = basestring
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
integer_types = (int, long)
input = raw_input
# The 2 following function implementation extracted from the python-future
# project
import collections
def int_to_bytes(integer, length, byteorder, signed=False):
"""
Return an array of bytes representing an integer.
The integer is represented using length bytes. An OverflowError is
raised if the integer is not representable with the given number of
bytes.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the
native byte order of the host system, use `sys.byteorder' as the byte
order value.
The signed keyword-only argument determines whether two's complement is
used to represent the integer. If signed is False and a negative
integer is given, an OverflowError is raised.
"""
if length < 0:
raise ValueError("length argument must be non-negative")
if length == 0 and integer == 0:
return bytes()
if signed and integer < 0:
bits = length * 8
num = (2**bits) + integer
if num <= 0:
raise OverflowError("int too smal to convert")
else:
if integer < 0:
raise OverflowError("can't convert negative int to unsigned")
num = integer
if byteorder not in ('little', 'big'):
raise ValueError("byteorder must be either 'little' or 'big'")
h = b'%x' % num
s = bytes((b'0'*(len(h) % 2) + h).zfill(length*2).decode('hex'))
if signed:
high_set = s[0] & 0x80
if integer > 0 and high_set:
raise OverflowError("int too big to convert")
if integer < 0 and not high_set:
raise OverflowError("int too small to convert")
if len(s) > length:
raise OverflowError("int too big to convert")
return s if byteorder == 'big' else s[::-1]
def int_from_bytes(mybytes, byteorder='big', signed=False):
"""
Return the integer represented by the given array of bytes.
The mybytes argument must either support the buffer protocol or be an
iterable object producing bytes. Bytes and bytearray are examples of
built-in objects that support the buffer protocol.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the
native byte order of the host system, use `sys.byteorder' as the byte
order value.
The signed keyword-only argument indicates whether two's complement is
used to represent the integer.
"""
if byteorder not in ('little', 'big'):
raise ValueError("byteorder must be either 'little' or 'big'")
if isinstance(mybytes, unicode):
raise TypeError("cannot convert unicode objects to bytes")
# mybytes can also be passed as a sequence of integers on Py3.
# Test for this:
elif isinstance(mybytes, collections.Iterable):
mybytes = bytes(mybytes)
b = mybytes if byteorder == 'big' else mybytes[::-1]
if len(b) == 0:
b = b'\x00'
# The encode() method has been disabled by newbytes, but Py2's
# str has it:
num = int(b.encode('hex'), 16)
if signed and (b[0] & 0x80):
num = num - (2 ** (len(b)*8))
return num
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
try:
from logging import NullHandler
except ImportError:
from .nullhandler import NullHandler
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, str('temporary_class'), (), {})
|
tests/test_gammatone_filters.py | glhr/gammatone | 176 | 11185889 | #!/usr/bin/env python3
# Copyright 2014 <NAME>, <EMAIL>
#
# This file is part of the gammatone toolkit, and is licensed under the 3-clause
# BSD license: https://github.com/detly/gammatone/blob/master/COPYING
import nose
import numpy as np
import scipy.io
from pkg_resources import resource_stream
import gammatone.filters
REF_DATA_FILENAME = 'data/test_erb_filter_data.mat'
INPUT_KEY = 'erb_filter_inputs'
RESULT_KEY = 'erb_filter_results'
INPUT_COLS = ('fs', 'cfs')
RESULT_COLS = ('fcoefs',)
def load_reference_data():
""" Load test data generated from the reference code """
# Load test data
with resource_stream(__name__, REF_DATA_FILENAME) as test_data:
data = scipy.io.loadmat(test_data, squeeze_me=False)
zipped_data = zip(data[INPUT_KEY], data[RESULT_KEY])
for inputs, refs in zipped_data:
input_dict = dict(zip(INPUT_COLS, map(np.squeeze, inputs)))
ref_dict = dict(zip(RESULT_COLS, map(np.squeeze, refs)))
yield (input_dict, ref_dict)
def test_make_ERB_filters_known_values():
for inputs, refs in load_reference_data():
args = (
inputs['fs'],
inputs['cfs'],
)
expected = (refs['fcoefs'],)
yield MakeERBFiltersTester(args, expected)
class MakeERBFiltersTester:
def __init__(self, args, expected):
self.fs = args[0]
self.cfs = args[1]
self.expected = expected[0]
self.description = (
"Gammatone filters for {:f}, {:.1f} ... {:.1f}".format(
float(self.fs),
float(self.cfs[0]),
float(self.cfs[-1])
))
def __call__(self):
result = gammatone.filters.make_erb_filters(self.fs, self.cfs)
assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12)
if __name__ == '__main__':
nose.main()
|
migrations/versions/501983249c94_set_on_delete_cascad.py | vault-the/changes | 443 | 11185891 | """Set ON DELETE CASCADE on Patch.*
Revision ID: 501983249c94
Revises: 403b3fb41569
Create Date: 2013-12-23 16:12:13.610366
"""
# revision identifiers, used by Alembic.
revision = '501983249c94'
down_revision = '403b3fb41569'
from alembic import op
def upgrade():
op.drop_constraint('patch_change_id_fkey', 'patch')
op.create_foreign_key('patch_change_id_fkey', 'patch', 'change', ['change_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('patch_project_id_fkey', 'patch')
op.create_foreign_key('patch_project_id_fkey', 'patch', 'project', ['project_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('patch_repository_id_fkey', 'patch')
op.create_foreign_key('patch_repository_id_fkey', 'patch', 'repository', ['repository_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
app/manifest_worker.py | tappi287/openvr_fsr_app | 146 | 11185895 | import concurrent.futures
import logging
import os
from pathlib import Path
from typing import Optional, List
from .globals import OPEN_VR_DLL, EXE_NAME
from .openvr_mod import get_available_mods
class ManifestWorker:
""" Multi threaded search in steam apps for openvr_api.dll """
max_workers = min(48, int(max(4, os.cpu_count()))) # Number of maximum concurrent workers
chunk_size = 16 # Number of Manifests per worker
@classmethod
def update_steam_apps(cls, steam_apps: dict) -> dict:
app_id_list = list(steam_apps.keys())
# -- Split server addresses into chunks for workers
manifest_ls_chunks = list()
while app_id_list:
# -- Create a list of chunk size number of AppIds
id_chunk_ls = list()
for i in range(min(cls.chunk_size, len(app_id_list))):
id_chunk_ls.append(app_id_list.pop())
# -- Append a list of manifests to search thru in this chunk
manifest_ls_chunks.append(
[steam_apps.get(app_id) for app_id in id_chunk_ls]
)
logging.debug('Using %s worker threads to search for OpenVr Api Dll in %s SteamApps in %s chunks.',
cls.max_workers, len(steam_apps.keys()), len(manifest_ls_chunks))
with concurrent.futures.ThreadPoolExecutor(max_workers=cls.max_workers) as executor:
future_info = {
executor.submit(cls.worker, manifest_ls): manifest_ls for manifest_ls in manifest_ls_chunks
}
for future in concurrent.futures.as_completed(future_info):
manifest_chunk = future_info[future]
try:
manifest_ls = future.result()
except Exception as exc:
if len(manifest_chunk):
logging.error('Chunk %s generated an exception: %s', manifest_chunk[0].get('name'), exc)
else:
logging.error('Worker generated an exception: %s', exc)
else:
if not manifest_ls:
continue
# -- Update SteamApp entries
for manifest in manifest_ls:
steam_apps[manifest.get('appid')] = manifest
return steam_apps
@staticmethod
def worker(manifest_ls):
for manifest in manifest_ls:
manifest['openVrDllPaths'] = list()
manifest['openVrDllPathsSelected'] = list()
manifest['openVr'] = False
# -- Test for valid path
try:
if not manifest['path'] or not Path(manifest['path']).exists():
logging.error('Skipping app with invalid paths: %s', manifest.get('name', 'Unknown'))
continue
except Exception as e:
logging.error('Error reading path for: %s %s', manifest.get('name', 'Unknown'), e)
continue
# -- LookUp OpenVr Api location(s)
try:
open_vr_dll_path_ls = ManifestWorker.find_open_vr_dll(Path(manifest['path']))
except Exception as e:
logging.error('Error locating OpenVR dll for: %s %s', manifest.get('name', 'Unknown'), e)
continue
# -- LookUp Executable location(s)
try:
executable_path_ls = ManifestWorker.find_executables(Path(manifest['path']))
except Exception as e:
logging.error('Error locating Executables for: %s %s', manifest.get('name', 'Unknown'), e)
continue
if open_vr_dll_path_ls:
# -- Add OpenVr path info
manifest['openVrDllPaths'] = [p.as_posix() for p in open_vr_dll_path_ls]
manifest['openVrDllPathsSelected'] = [p.as_posix() for p in open_vr_dll_path_ls]
manifest['openVr'] = True
# -- Add executables path info
manifest['executablePaths'] = [p.as_posix() for p in executable_path_ls]
manifest['executablePathsSelected'] = [p.as_posix() for p in executable_path_ls]
for mod in get_available_mods(manifest):
mod.update_from_disk()
return manifest_ls
@staticmethod
def find_open_vr_dll(base_path: Path) -> List[Optional[Path]]:
open_vr_dll_ls: List[Optional[Path]] = list()
for file in base_path.glob(f'**/{OPEN_VR_DLL}'):
open_vr_dll_ls.append(file)
return open_vr_dll_ls
@staticmethod
def find_executables(base_path: Path) -> List[Optional[Path]]:
executable_ls: List[Optional[Path]] = list()
for file in base_path.glob(f'**/{EXE_NAME}'):
executable_ls.append(file)
return executable_ls
|
Trakttv.bundle/Contents/Libraries/Shared/oem_framework/models/core/base/mapping.py | disrupted/Trakttv.bundle | 1,346 | 11185925 | from oem_framework.models.core.base.model import Model
from oem_framework.models.core.mixins.names import NamesMixin
class BaseMapping(Model, NamesMixin):
__slots__ = ['collection']
def __init__(self, collection):
self.collection = collection
def to_dict(self, key=None, flatten=True):
raise NotImplementedError
|
happytransformer/wp/__init__.py | TheCuriousNerd/happy-transformer | 277 | 11185935 | from .trainer import WPTrainer
from .default_args import ARGS_WP_TRAIN, ARGS_WP_EVAl, ARGS_WP_TEST
name = "happytransformer.mwp"
|
timemachines/skaters/rvr/rvrsarimax.py | iklasky/timemachines | 253 | 11185944 | from timemachines.skaters.rvr.rvrinclusion import using_river
from timemachines.skatertools.utilities.conventions import Y_TYPE, A_TYPE, R_TYPE, E_TYPE, T_TYPE, wrap
if using_river:
from river import linear_model, optim, preprocessing, time_series
from timemachines.skatertools.components.parade import parade
from timemachines.skatertools.utilities.nonemath import nonecast, nonecenter
def rvr_sarimax_factory(y :Y_TYPE, s, k:int, a:A_TYPE =None,
t:T_TYPE =None, e:E_TYPE =None, r:R_TYPE=None,
p:int=0, d:int=0, q:int=0, m=1, sp:int=0, sq:int=0,
intercept_init=110, optim_sgd=0.01, intercept_lr=0.3 ):
y = wrap(y)
a = wrap(a)
if not s.get('k'):
s['k']=k
s['n'] = len(y)
s['p']={} # parade
s['model']=None
else:
assert len(y) == s['n']
assert k == s['k']
if y is None:
return None, s, None
else:
model = s.get('model')
if model is None:
model = time_series.SNARIMAX(p=p, d=d, q=q, m=m, sp=sp, sq=sq,
regressor=(
preprocessing.StandardScaler() |
linear_model.LinearRegression(
intercept_init=intercept_init,
optimizer=optim.SGD(optim_sgd),
intercept_lr=intercept_lr
)
))
x = model.forecast(horizon=1)
_we_ignore_bias, x_std, s['p'] = parade(p=s['p'], x=x, y=y[0])
x_std_fallback = nonecast(x_std, fill_value=1.0)
model = model.learn_one(x=None,y=y[0])
s['model'] = model
return x, x_std_fallback, s
|
recipes/Python/162224_Check_HTTP_Content_Type/recipe-162224.py | tdiprima/code | 2,023 | 11186001 | import urllib
from types import *
def iscontenttype(URLorFile,contentType='text'):
"""
Return true or false (1 or 0) based on HTTP Content-Type.
Accepts either a url (string) or a "urllib.urlopen" file.
Defaults to 'text' type.
Only looks at start of content-type, so you can be as vague or precise
as you want.
For example, 'image' will match 'image/gif' or 'image/jpg'.
"""
result = 1
try:
if type(URLorFile) == StringType:
file=urllib.urlopen(URLorFile)
else:
file = URLorFile
testType=file.info().getheader("Content-Type")
if testType and testType.find(contentType) == 0:
result=1
else:
result=0
if type(URLorFile) == StringType:
file.close()
return result
except:
return 0
|
peering/migrations/0031_auto_20190227_2210.py | schiederme/peering-manager | 173 | 11186010 | <filename>peering/migrations/0031_auto_20190227_2210.py
# Generated by Django 2.1.7 on 2019-02-27 21:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("peering", "0030_directpeeringsession_router")]
operations = [
migrations.AlterModelOptions(
name="directpeeringsession",
options={"ordering": ["autonomous_system", "ip_address"]},
),
migrations.AlterModelOptions(
name="internetexchangepeeringsession",
options={"ordering": ["autonomous_system", "ip_address"]},
),
]
|
aliyun-python-sdk-hcs-mgw/aliyunsdkhcs_mgw/request/v20171024/CreateRemoteRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 11186019 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateRemoteRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hcs-mgw', '2017-10-24', 'CreateRemote')
self.set_method('POST')
def get_RemoteName(self):
return self.get_query_params().get('RemoteName')
def set_RemoteName(self,RemoteName):
self.add_query_param('RemoteName',RemoteName)
def get_MountPoint(self):
return self.get_query_params().get('MountPoint')
def set_MountPoint(self,MountPoint):
self.add_query_param('MountPoint',MountPoint)
def get_Path(self):
return self.get_query_params().get('Path')
def set_Path(self,Path):
self.add_query_param('Path',Path)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_RemoteHost(self):
return self.get_query_params().get('RemoteHost')
def set_RemoteHost(self,RemoteHost):
self.add_query_param('RemoteHost',RemoteHost)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_MountType(self):
return self.get_query_params().get('MountType')
def set_MountType(self,MountType):
self.add_query_param('MountType',MountType)
def get_RemoteType(self):
return self.get_query_params().get('RemoteType')
def set_RemoteType(self,RemoteType):
self.add_query_param('RemoteType',RemoteType)
def get_UserName(self):
return self.get_query_params().get('UserName')
def set_UserName(self,UserName):
self.add_query_param('UserName',UserName) |
aleph/logic/diagrams.py | tolgatasci/aleph | 1,213 | 11186028 | import logging
from uuid import uuid4
from flask import render_template
from tempfile import NamedTemporaryFile
from pantomime.types import HTML
from aleph import settings
from aleph.core import archive
from aleph.index.entities import entities_by_ids
log = logging.getLogger(__name__)
FIELDS = ["id", "schema", "properties"]
def publish_diagram(entityset):
embed = render_diagram(entityset)
with NamedTemporaryFile("w") as fh:
fh.write(embed)
fh.flush()
publish_id = uuid4().hex
embed_path = f"embeds/{entityset.id}/{publish_id}.html"
url = archive.publish_file(fh.name, embed_path, mime_type=HTML)
return {"embed": embed, "url": url}
def render_diagram(entityset):
"""Generate an HTML snippet from a diagram object."""
entity_ids = entityset.entities
entities = []
for entity in entities_by_ids(entity_ids, cached=True):
for field in list(entity.keys()):
if field not in FIELDS:
entity.pop(field)
entities.append(entity)
# TODO: add viewport
return render_template(
"diagram.html",
data={
"entities": entities,
"layout": entityset.layout,
"viewport": {"center": {"x": 0, "y": 0}},
},
entityset=entityset,
settings=settings,
)
|
python/ql/test/library-tests/frameworks/pycurl/test.py | RoryPreddyGithubEnterprise/codeql | 643 | 11186108 | <gh_stars>100-1000
import pycurl
c = pycurl.Curl()
c.setopt(pycurl.URL, "url") # $ clientRequestUrlPart="url" |
recipes/Python/578988_Money_Game/recipe-578988.py | tdiprima/code | 2,023 | 11186109 | <filename>recipes/Python/578988_Money_Game/recipe-578988.py
"""A simple money counting game for kids."""
import random
import sys
class Money:
def __init__(self):
pass
@staticmethod
def display_intro():
"""Display the introduction at the start of program execution."""
print('*' * 75)
print('This is a simple money countinggame to help kids learn '
'to count money.')
print('The program helps kids learn various change amounts in US '
'currency.')
print('*' * 75)
def start(self):
"""Randomly display an amount of change and ask how many of each coin
type are needed to equal the amount displayed.
"""
self.display_intro()
currency_amt = random.randint(1, 99)
print('\nHow much change is needed to equal .{0} cents?\n'
.format(str(currency_amt)))
your_total_amt = get_input_values(currency_amt)
if sum(your_total_amt) == 0:
print('Thank you for playing.')
sys.exit(0)
else:
if your_total_amt[0] > 1 or your_total_amt[0] == 0:
quarter_spelling = 'quarters'
else:
quarter_spelling = 'quarter'
if your_total_amt[1] > 1 or your_total_amt[1] == 0:
dime_spelling = 'dimes'
else:
dime_spelling = 'dime'
if your_total_amt[2] > 1 or your_total_amt[2] == 0:
nickel_spelling = 'nickels'
else:
nickel_spelling = 'nickel'
if your_total_amt[3] > 1 or your_total_amt[3] == 0:
penny_spelling = 'pennies'
else:
penny_spelling = 'penny'
print('\nCorrect! You entered {0:d} {1}, {2:d} {3},'
' {4:d} {5} and {6:d} {7}.'.format(your_total_amt[0],
quarter_spelling,
your_total_amt[1],
dime_spelling,
your_total_amt[2],
nickel_spelling,
your_total_amt[3],
penny_spelling))
print('Which equals .{0} cents. Nice job!'
.format(str(currency_amt)))
response = input('\nWould you like to try again? ')
if response.lower() != 'y':
print('Thanks for playing.')
sys.exit(0)
self.start()
def get_input_values(currency_amt):
"""Main logic of the program that tallies the value of each entered
coin. Validation on the values entered is also performed.
"""
quarter = 25
dime = 10
nickel = 5
penny = 1
total_amt = 0
total_quarters = 0
total_dimes = 0
total_nickels = 0
total_pennies = 0
print('Enter change in the form of (25 = quarter, 10 = dime,'
' 5 = nickel, 1 = penny)')
coin_value = input('Enter coin amount: ')
while len(coin_value) > 0:
try:
coin_amt = int(coin_value)
if not coin_amt not in (quarter, dime, nickel, penny):
if coin_amt < currency_amt or coin_amt < total_amt:
if (coin_amt + total_amt) <= currency_amt:
if (coin_amt + total_amt) != currency_amt:
if coin_amt == 25:
total_quarters += 1
total_amt += quarter
elif coin_amt == 10:
total_dimes += 1
total_amt += dime
elif coin_amt == 5:
total_nickels += 1
total_amt += nickel
elif coin_amt == 1:
total_pennies += 1
total_amt += penny
else:
print('This is not a valid amount!\n')
print('Enter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
else:
if coin_amt == 25:
total_quarters += 1
elif coin_amt == 10:
total_dimes += 1
elif coin_amt == 5:
total_nickels += 1
elif coin_amt == 1:
total_pennies += 1
break
else:
print('You have entered more than I currently have'
' totalled up!')
print('\nI currently have a total of .{0} and need to get to .{1}'
.format(str(total_amt), str(currency_amt)))
print('Enter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
else:
if (coin_amt + total_amt) > currency_amt:
print('You entered more than what I need')
print('Enter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
if (coin_amt + total_amt) != currency_amt:
print('\nEnter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
else:
if coin_amt == 25:
total_quarters += 1
elif coin_amt == 10:
total_dimes += 1
elif coin_amt == 5:
total_nickels += 1
elif coin_amt == 1:
total_pennies += 1
break
else:
print('This is not a valid amount!\n')
print('\nEnter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
except ValueError:
print('This is not a valid amount!')
coin_value = input('\nEnter coin amount: ')
currency_totals = (total_quarters, total_dimes, total_nickels,
total_pennies)
return currency_totals
if __name__ == '__main__':
money_game = Money()
money_game.start()
|
benchmarks/tpch-pyarrow-p.py | ritchie46/connector-x | 565 | 11186139 | """
Usage:
tpch-pyarrow-p.py <num>
Options:
-h --help Show this screen.
--version Show version.
"""
import io
import itertools
import os
from multiprocessing import Pool
from typing import Any, List
import numpy as np
import pyarrow as pa
from contexttimer import Timer
from docopt import docopt
from pyarrow import csv
from sqlalchemy import create_engine
def get_sqls(table: str, count: int) -> List[str]:
sqls = []
split = np.linspace(0, 60000000, num=count + 1, endpoint=True, dtype=int)
for i in range(len(split) - 1):
sqls.append(
f"""SELECT
l_orderkey,
l_partkey,
l_suppkey,
l_linenumber,
l_quantity::float8,
l_extendedprice::float8,
l_discount::float8,
l_tax::float8,
l_returnflag,
l_linestatus,
l_shipdate,
l_commitdate,
l_receiptdate,
l_shipinstruct,
l_shipmode,
l_comment
FROM {table}
WHERE l_orderkey > {split[i]} and l_orderkey <= {split[i+1]}"""
)
return sqls
def func(id: int, conn: str, query: str) -> Any:
engine = create_engine(conn)
conn = engine.connect()
cur = conn.connection.cursor()
store = io.BytesIO()
with Timer() as timer:
cur.copy_expert(f"COPY ({query}) TO STDOUT WITH CSV HEADER;", store)
print(f"[Copy {id}] {timer.elapsed:.2f}s")
store.seek(0)
with Timer() as timer:
df = csv.read_csv(store, read_options=csv.ReadOptions(use_threads=False))
print(f"[Read CSV {id}] {timer.elapsed:.2f}s")
return df
if __name__ == "__main__":
args = docopt(__doc__, version="1.0")
conn = os.environ["POSTGRES_URL"]
table = os.environ["POSTGRES_TABLE"]
queries = get_sqls(table, int(args["<num>"]))
print(f"number of threads: {len(queries)}\nsqls: {queries}")
with Timer() as timer, Pool(len(queries)) as pool:
dfs = pool.starmap(
func, zip(range(len(queries)), itertools.repeat(conn), queries)
)
print(f"[All Jobs] {timer.elapsed:.2f}s")
with Timer() as timer:
df = pa.concat_tables(dfs)
print(f"[Concat] {timer.elapsed:.2f}s")
with Timer() as timer:
df = df.to_pandas()
print(f"[To Pandas] {timer.elapsed:.2f}s")
print(df.head())
|
icevision/models/mmdet/lightning/__init__.py | ai-fast-track/mantisshrimp | 580 | 11186177 | from icevision.models.mmdet.lightning.model_adapter import *
|
examples/getting_started/plot_nested_pipelines.py | vincent-antaki/Neuraxle | 519 | 11186209 | """
Create Nested Pipelines in Neuraxle
================================================
You can create pipelines within pipelines using the composition design pattern.
This demonstrates how to create pipelines within pipelines, and how to access the steps and their
attributes in the nested pipelines.
For more info, see the `thread here <https://stackoverflow.com/questions/28822756/getting-model-attributes-from-scikit-learn-pipeline/58359509#58359509>`__.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
..
Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning
project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc.
"""
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from neuraxle.base import Identity
from neuraxle.pipeline import Pipeline
def main():
np.random.seed(42)
X = np.random.randint(5, size=(100, 5))
# Create and fit the pipeline:
pipeline = Pipeline([
StandardScaler(),
Identity(),
Pipeline([
Identity(),
Identity(), # Note: an Identity step is a step that does nothing.
Identity(), # We use it here for demonstration purposes.
Pipeline([
Identity(),
PCA(n_components=2)
])
])
])
pipeline, X_t = pipeline.fit_transform(X)
# Get the components:
pca_components = pipeline["Pipeline"]["Pipeline"][-1].get_wrapped_sklearn_predictor().components_
assert pca_components.shape == (2, 5)
# Discussion:
# https://stackoverflow.com/questions/28822756/getting-model-attributes-from-scikit-learn-pipeline/58359509#58359509
if __name__ == "__main__":
main()
|
tests/api/product_tests.py | prdonahue/overholt | 1,152 | 11186230 | # -*- coding: utf-8 -*-
"""
tests.api.product_tests
~~~~~~~~~~~~~~~~~~~~~~~
api product tests module
"""
from ..factories import CategoryFactory, ProductFactory
from . import OverholtApiTestCase
class ProductApiTestCase(OverholtApiTestCase):
def _create_fixtures(self):
super(ProductApiTestCase, self)._create_fixtures()
self.category = CategoryFactory()
self.product = ProductFactory(categories=[self.category])
def test_get_products(self):
r = self.jget('/products')
self.assertOkJson(r)
def test_get_product(self):
r = self.jget('/products/%s' % self.product.id)
self.assertOkJson(r)
def test_create_product(self):
r = self.jpost('/products', data={
'name': 'New Product',
'categories': [self.category.id]
})
self.assertOkJson(r)
def test_create_invalid_product(self):
r = self.jpost('/products', data={
'categories': [self.category.id]
})
self.assertBadJson(r)
def test_update_product(self):
r = self.jput('/products/%s' % self.product.id, data={
'name': 'New Product'
})
self.assertOkJson(r)
def test_delete_product(self):
r = self.jdelete('/products/%s' % self.product.id)
self.assertStatusCode(r, 204)
|
bench/convert.py | security-geeks/paratext | 1,145 | 11186350 | <reponame>security-geeks/paratext
#!/usr/bin/env python
import pandas
import pickle
import feather
import h5py
import numpy as np
import scipy.io as sio
import os
import sys
def convert_feather(df, output_filename):
feather.write_dataframe(df, output_filename)
def convert_hdf5(df, output_filename):
X = df.values
f = h5py.File(output_filename, "w")
ds=f.create_dataset("mydataset", X.shape, dtype=X.dtype)
ds[...] = X
def convert_npy(df, output_filename):
X = df.values
np.save(output_filename, X)
def convert_pkl(df, output_filename):
fid = open(output_filename, "wb")
pickle.dump(df, fid)
fid.close()
def convert_mat(df, output_filename):
dd = {key: df[key].values.flatten() for key in df.keys()}
sio.savemat(output_filename, dd)
input_filename = sys.argv[1]
output_filenames = sys.argv[2:]
if not input_filename.endswith(".csv"):
print "input must be a CSV file (by extension)"
sys.exit(1)
df = paratext.load_csv_to_pandas(input_filename, allow_quoted_newlines=True)
for output_filename in output_filenames:
_, extension = os.path.splitext(output_filename)
if extension == ".hdf5":
convert_hdf5(df, output_filename)
elif extension == ".feather":
convert_feather(df, output_filename)
elif extension == ".pkl":
convert_pkl(df, output_filename)
elif extension == ".npy":
convert_npy(df, output_filename)
elif extension == ".mat":
convert_mat(df, output_filename)
else:
print "skipping '%s'; invalid output format '%s'" % (output_filename, extension)
|
lib/stripe_lib/enums.py | goztrk/django-htk | 206 | 11186416 | <reponame>goztrk/django-htk
# Python Standard Library Imports
from enum import Enum
class StripeProductType(Enum):
good = 1
service = 2
class StripePlanInterval(Enum):
day = 1
week = 2
month = 3
year = 4
|
lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/varscript_formatting/helperfunc.py | medismailben/llvm-project | 2,338 | 11186424 | import lldb
def f(value, d):
return "pointer type" if value.GetType().GetTemplateArgumentType(
0).IsPointerType() else "non-pointer type"
|
jupyterlab_git/tests/test_single_file_log.py | bsande6/jupyterlab-git | 1,097 | 11186452 | <reponame>bsande6/jupyterlab-git
from pathlib import Path
from unittest.mock import patch
import pytest
from jupyterlab_git.git import Git
from .testutils import maybe_future
@pytest.mark.asyncio
async def test_single_file_log():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
process_output = [
"8852729159bef63d7197f8aa26355b387283cb58",
"Lazy Senior Developer",
"2 hours ago",
"Something",
"0 1 folder/test.txt\x00\x00e6d4eed300811e886cadffb16eeed19588eb5eec",
"Lazy Senior Developer",
"18 hours ago",
"move test.txt to folder/test.txt",
"0 0 \x00test.txt\x00folder/test.txt\x00\x00263f762e0aad329c3c01bbd9a28f66403e6cfa5f",
"Lazy Senior Developer",
"18 hours ago",
"append more to test.txt",
"1 0 test.txt\x00\x00d19001d71bb928ec9ed6ae3fe1bfc474e1b771d0",
"Lazy Senior Developer",
"18 hours ago",
"add test.txt to root",
"1 0 test.txt\x00",
]
mock_execute.return_value = maybe_future((0, "\n".join(process_output), ""))
expected_response = {
"code": 0,
"commits": [
{
"commit": "<KEY>",
"author": "Lazy Senior Developer",
"date": "2 hours ago",
"commit_msg": "Something",
"pre_commit": "e6d4eed300811e886cadffb16eeed19588eb5eec",
"is_binary": False,
"file_path": "folder/test.txt",
},
{
"commit": "e6d4eed300811e886cadffb16eeed19588eb5eec",
"author": "Lazy Senior Developer",
"date": "18 hours ago",
"commit_msg": "move test.txt to folder/test.txt",
"pre_commit": "<KEY>",
"is_binary": False,
"file_path": "folder/test.txt",
"previous_file_path": "test.txt",
},
{
"commit": "<KEY>",
"author": "Lazy Senior Developer",
"date": "18 hours ago",
"commit_msg": "append more to test.txt",
"pre_commit": "d19001d71bb928ec9ed6ae3fe1bfc474e1b771d0",
"is_binary": False,
"file_path": "test.txt",
},
{
"commit": "d19001d71bb928ec9ed6ae3fe1bfc474e1b771d0",
"author": "Lazy Senior Developer",
"date": "18 hours ago",
"commit_msg": "add test.txt to root",
"pre_commit": "",
"is_binary": False,
"file_path": "test.txt",
},
],
}
# When
actual_response = await Git().log(
path=str(Path("/bin/test_curr_path")),
history_count=25,
follow_path="folder/test.txt",
)
# Then
mock_execute.assert_called_once_with(
[
"git",
"log",
"--pretty=format:%H%n%an%n%ar%n%s",
"-25",
"-z",
"--numstat",
"--follow",
"--",
"folder/test.txt",
],
cwd=str(Path("/bin") / "test_curr_path"),
)
assert expected_response == actual_response
|
plyer/platforms/android/devicename.py | lcs-d3v/plyer | 1,184 | 11186455 | <gh_stars>1000+
'''
Module of Android API for plyer.devicename.
'''
from jnius import autoclass
from plyer.facades import DeviceName
Build = autoclass('android.os.Build')
class AndroidDeviceName(DeviceName):
'''
Implementation of Android devicename API.
'''
def _get_device_name(self):
"""
Method to get the device name aka model in an android environment.
Changed the implementation from 'android.provider.Settings.Global' to
'android.os.Build' because 'android.provider.Settings.Global' was
introduced in API 17 whereas 'android.os.Build' is present since API 1
Thereby making this method more backward compatible.
"""
return Build.MODEL
def instance():
'''
Instance for facade proxy.
'''
return AndroidDeviceName()
|
osr2mp4/VideoProcess/Setup.py | ADoesGit/osr2mp4-core | 103 | 11186465 | <filename>osr2mp4/VideoProcess/Setup.py
import numpy as np
from PIL import Image
from recordclass import recordclass
FrameInfo = recordclass("FrameInfo", "cur_time index_hitobj info_index osr_index index_fp obj_endtime x_end y_end, break_index")
CursorEvent = recordclass("CursorEvent", "event old_x old_y")
def get_buffer(img, settings):
np_img = np.frombuffer(img, dtype=np.uint8)
np_img = np_img.reshape((settings.height, settings.width, 4))
pbuffer = Image.frombuffer("RGBA", (settings.width, settings.height), np_img, 'raw', "RGBA", 0, 1)
pbuffer.readonly = False
return np_img, pbuffer
|
generalization/utils/client_data_utils_test.py | garyxcheng/federated | 330 | 11186505 | <filename>generalization/utils/client_data_utils_test.py
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for client_data_utils."""
import collections
import math
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from generalization.utils import client_data_utils
_unpack_fn = lambda fds: [list(ds) for ds in fds]
_unpack_ds_to_list = lambda ds: [t.numpy() for t in ds]
def _create_test_client_data(num_clients=10, samples_per_client=10):
return tff.simulation.datasets.TestClientData({
str(i): [i + j for j in range(samples_per_client)
] for i in range(num_clients)
})
def _build_fake_elem(x):
return collections.OrderedDict([('a', 1 + x), ('b', 3 + x), ('c', 5.0 + x)])
class TestConvertToTensorSlices(tf.test.TestCase):
"""Test for convert_to_tensor_slices."""
def test_convert_list_of_elems_to_tensor_slices(self):
list_of_elems = [_build_fake_elem(x) for x in range(5)]
tensor_slices = client_data_utils.convert_list_of_elems_to_tensor_slices(
list_of_elems)
self.assertEqual(type(tensor_slices), collections.OrderedDict)
self.assertEqual(list(tensor_slices.keys()), ['a', 'b', 'c'])
self.assertAllEqual(tensor_slices['a'], tf.constant(range(1, 6)))
self.assertAllEqual(tensor_slices['b'], tf.constant(range(3, 8)))
self.assertAllEqual(tensor_slices['c'],
tf.constant(range(5, 10), dtype=float))
def test_convert_dataset_to_tensor_slices_reconstructs_client_data(self):
cd = tff.simulation.datasets.TestClientData({
str(i): collections.OrderedDict(a=[i + j for j in range(5)])
for i in range(6)
})
tensor_slices_dict = dict()
for client_id in cd.client_ids:
local_ds = cd.create_tf_dataset_for_client(client_id)
tensor_slices_dict[
client_id] = client_data_utils.convert_list_of_elems_to_tensor_slices(
list(local_ds))
reconstruct_cd = tff.simulation.datasets.TestClientData(tensor_slices_dict)
self.assertCountEqual(reconstruct_cd.client_ids, cd.client_ids)
self.assertEqual(reconstruct_cd.element_type_structure,
cd.element_type_structure)
for client_id in cd.client_ids:
reconstruct_ds = reconstruct_cd.create_tf_dataset_for_client(client_id)
reconstruct_ds_list = [elem['a'].numpy() for elem in reconstruct_ds]
ds = cd.create_tf_dataset_for_client(client_id)
ds_list = [elem['a'].numpy() for elem in ds]
self.assertEqual(reconstruct_ds_list, ds_list)
class TestSubsampleListByProportion(tf.test.TestCase, parameterized.TestCase):
"""Test for subsample_list_by_proportion."""
@parameterized.product(
input_len=[5, 9], proportion=[0.0, 0.5, 1.0], seed=[0, 1, None])
def test_correct_length(self, input_len, proportion, seed):
input_list = list(range(input_len))
result_list = client_data_utils.subsample_list_by_proportion(
input_list, proportion, seed=seed)
self.assertLen(result_list, round(input_len * proportion))
@parameterized.product(
input_len=[5, 9],
proportion=[0.0, 0.5, 1.0],
)
def test_use_seed(self, input_len, proportion):
input_list = list(range(input_len))
result_list_1 = client_data_utils.subsample_list_by_proportion(
input_list, proportion, seed=1)
result_list_2 = client_data_utils.subsample_list_by_proportion(
input_list, proportion, seed=1)
self.assertEqual(result_list_1, result_list_2)
class InterleaveCreateTFDatasetFromAllClients(tf.test.TestCase,
parameterized.TestCase):
"""Test for `interleave_create_tf_dataset_from_all_clients`."""
@parameterized.named_parameters(('seed=None', None), ('seed=1', 1))
def test_interleave_create_tf_dataset_from_all_clients(self, seed):
client_data = _create_test_client_data(5, 4)
tf_dataset = client_data_utils.interleave_create_tf_dataset_from_all_clients(
cd=client_data, seed=seed)
self.assertIsInstance(tf_dataset, tf.data.Dataset)
expected_dataset = client_data.create_tf_dataset_from_all_clients()
self.assertCountEqual([t.numpy() for t in tf_dataset],
[t.numpy() for t in expected_dataset])
def test_interleave_create_tf_dataset_from_all_clients_uses_random_seed(self):
client_data = _create_test_client_data(5, 4)
tf_dataset1 = client_data_utils.interleave_create_tf_dataset_from_all_clients(
client_data, seed=1)
tf_dataset2 = client_data_utils.interleave_create_tf_dataset_from_all_clients(
client_data, seed=1)
tf_dataset3 = client_data_utils.interleave_create_tf_dataset_from_all_clients(
client_data, seed=2)
# We take only the 'x' elements to do exact comparisons.
dataset1_elements = [t.numpy() for t in tf_dataset1]
dataset2_elements = [t.numpy() for t in tf_dataset2]
dataset3_elements = [t.numpy() for t in tf_dataset3]
self.assertAllEqual(dataset1_elements, dataset2_elements)
self.assertNotAllEqual(dataset1_elements, dataset3_elements)
self.assertCountEqual(dataset1_elements, dataset3_elements)
class HorizontalSplitClientDataTest(tf.test.TestCase, parameterized.TestCase):
"""Test for client_data_utils.horizontal_split_client_data."""
@parameterized.named_parameters(
(f'first_second_ratio={ratio}, num_elems_per_client={elems}', ratio,
elems) for ratio, elems in ((1, 2), (3, 2), (3, 6), (4, 10)))
def test_dataset_has_correct_length(self, ratio, num_elems_per_client):
num_clients = 3
cd = _create_test_client_data(num_clients, num_elems_per_client)
first_cd, second_cd = client_data_utils.horizontal_split_client_data(
cd, first_second_ratio_for_each_client=ratio, shuffle_before_split=True)
self.assertListEqual(cd.client_ids, first_cd.client_ids)
self.assertListEqual(cd.client_ids, second_cd.client_ids)
second_cd_expected_elems_per_client = math.ceil(num_elems_per_client /
(ratio + 1))
first_cd_expected_elems_per_client = num_elems_per_client - second_cd_expected_elems_per_client
for client_id in cd.client_ids:
self.assertLen(
list(first_cd.create_tf_dataset_for_client(client_id)),
first_cd_expected_elems_per_client)
self.assertLen(
list(second_cd.create_tf_dataset_for_client(client_id)),
second_cd_expected_elems_per_client)
@parameterized.named_parameters(
(f'first_second_ratio={ratio}', ratio) for ratio in range(1, 4))
def test_remove_single_elem_input_clients(self, ratio):
cd = tff.simulation.datasets.TestClientData({
'1': [1.0],
'2': [1.0, 2.0],
'3': [1.0, 2.0, 3.0]
})
first_cd, second_cd = client_data_utils.horizontal_split_client_data(
cd,
first_second_ratio_for_each_client=ratio,
remove_single_elem_input_clients=True)
self.assertCountEqual(first_cd.client_ids, ['2', '3'])
self.assertCountEqual(second_cd.client_ids, ['2', '3'])
@parameterized.named_parameters(
('unshuffled', False, None, None),
('unshuffled_with_redundant_seed', False, 1, 2),
('shuffle_with_same_seed', True, 1, 1))
def test_split_is_the_same_when_intended(self, shuffle_before_split,
shuffle_seed1, shuffle_seed2):
num_clients = 3
num_elems_per_client = 10
ratio = 3
cd = _create_test_client_data(num_clients, num_elems_per_client)
first_cd1, second_cd1 = client_data_utils.horizontal_split_client_data(
cd,
first_second_ratio_for_each_client=ratio,
shuffle_before_split=shuffle_before_split,
shuffle_seed=shuffle_seed1)
first_cd2, second_cd2 = client_data_utils.horizontal_split_client_data(
cd,
first_second_ratio_for_each_client=ratio,
shuffle_before_split=shuffle_before_split,
shuffle_seed=shuffle_seed2)
for client_id in cd.client_ids:
self.assertListEqual(
list(first_cd1.create_tf_dataset_for_client(client_id)),
list(first_cd2.create_tf_dataset_for_client(client_id)))
self.assertListEqual(
list(second_cd1.create_tf_dataset_for_client(client_id)),
list(second_cd2.create_tf_dataset_for_client(client_id)))
@parameterized.named_parameters(('unshuffled', False, None),
('unshuffled_with_redundant_seed', False, 1),
('shuffle_with_none_seed', True, None),
('shuffle_with_int_seed', True, 1))
def test_not_reshuffled_when_repeated(self, shuffle_before_split,
shuffle_seed):
num_clients = 3
num_elems_per_client = 10
ratio = 3
cd = _create_test_client_data(num_clients, num_elems_per_client)
first_cd, second_cd = client_data_utils.horizontal_split_client_data(
cd,
first_second_ratio_for_each_client=ratio,
shuffle_before_split=shuffle_before_split,
shuffle_seed=shuffle_seed)
first_cd = first_cd.preprocess(lambda ds: ds.repeat(2))
second_cd = second_cd.preprocess(lambda ds: ds.repeat(2))
for client_id in cd.client_ids:
for preproc_cd in (first_cd, second_cd):
ds = preproc_cd.create_tf_dataset_for_client(client_id)
list_of_ds = list(ds)
self.assertListEqual(list_of_ds[:len(list_of_ds) // 2],
list_of_ds[len(list_of_ds) // 2:])
class HorizontalConcatClientDataTest(tf.test.TestCase, parameterized.TestCase):
"""Test for client_data_utils.horizontal_concat_client_data."""
def test_horizontal_concat(self):
cd1 = _create_test_client_data(5, 3)
cd2 = _create_test_client_data(5, 4)
result_cd = client_data_utils.horizontal_concat_client_data(cd1, cd2)
self.assertCountEqual(result_cd.client_ids, cd1.client_ids)
for client_id in result_cd.client_ids:
expected_data = list(cd1.create_tf_dataset_for_client(client_id)) + list(
cd2.create_tf_dataset_for_client(client_id))
self.assertLen(
list(result_cd.create_tf_dataset_for_client(client_id)),
len(expected_data))
@parameterized.named_parameters(
(f'first_second_ratio={ratio}, num_elems_per_client={elems}', ratio,
elems) for ratio, elems in ((1, 2), (3, 2), (3, 6), (4, 10)))
def test_split_and_concat_are_reversible_up_to_local_order(
self, ratio, elems):
original_cd = _create_test_client_data(5, elems)
cd1, cd2 = client_data_utils.horizontal_split_client_data(
original_cd, first_second_ratio_for_each_client=ratio)
concat_cd = client_data_utils.horizontal_concat_client_data(cd1, cd2)
self.assertCountEqual(concat_cd.client_ids, original_cd.client_ids)
for cid in concat_cd.client_ids:
concat_ds = concat_cd.create_tf_dataset_for_client(cid)
original_ds = original_cd.create_tf_dataset_for_client(cid)
self.assertCountEqual([t.numpy() for t in concat_ds],
[t.numpy() for t in original_ds])
def test_raises_value_error_if_client_ids_are_different(self):
cd1 = _create_test_client_data(5, 3)
cd2 = _create_test_client_data(4, 3)
with self.assertRaises(ValueError):
client_data_utils.horizontal_concat_client_data(cd1, cd2)
class VerticalSubsetClientData(tf.test.TestCase, parameterized.TestCase):
"""Test for client_data_utils.vertical_subset_client_data."""
def test_vertical_subset(self):
cd = _create_test_client_data(5, 3)
subset_client_ids = ['0', '1', '2', '3']
subset_cd = client_data_utils.vertical_subset_client_data(
cd, subset_client_ids)
self.assertCountEqual(subset_cd.client_ids, subset_client_ids)
expected_subset_cd = _create_test_client_data(4, 3)
for cid in subset_client_ids:
result_ds = subset_cd.dataset_computation(cid)
expected_ds = expected_subset_cd.create_tf_dataset_for_client(cid)
self.assertCountEqual([t.numpy() for t in result_ds],
[t.numpy() for t in expected_ds])
def test_raises_value_error_if_client_ids_are_not_subset(self):
cd = _create_test_client_data(5, 3)
subset_client_ids = ['1', '6']
with self.assertRaises(ValueError):
client_data_utils.vertical_subset_client_data(cd, subset_client_ids)
class ThreeWaySplitFromHorizontalSplitTest(tf.test.TestCase,
parameterized.TestCase):
"""Test for client_data_utils.construct_three_way_split_from_predefined_horizontal_split."""
@parameterized.product(
include_unpart_train_for_val=[True, False],
max_elements_per_client=[None, 2])
def test_load_partitioned_tff_original_emnist_client_data(
self, include_unpart_train_for_val, max_elements_per_client):
unpart_clients_proportion = 0.5
part_clients_subsampling_rate = 0.5
num_clients = 5
num_train_elems_per_client = 7
num_val_elems_per_client = 3
train_cd_orig = _create_test_client_data(num_clients,
num_train_elems_per_client)
val_cd_orig = _create_test_client_data(num_clients,
num_val_elems_per_client)
(part_train_cd, part_val_cd, unpart_cd
) = client_data_utils.construct_three_way_split_from_predefined_horizontal_split(
train_cd_orig,
val_cd_orig,
unpart_clients_proportion=unpart_clients_proportion,
part_clients_subsampling_rate=part_clients_subsampling_rate,
include_unpart_train_for_val=include_unpart_train_for_val,
max_elements_per_client=max_elements_per_client)
# Assert the returned client_datas have the correct number of clients.
all_client_ids = train_cd_orig.client_ids
total_clients = len(all_client_ids)
expected_unpart_clients = round(total_clients * unpart_clients_proportion)
expected_part_clients = round((total_clients - expected_unpart_clients) *
part_clients_subsampling_rate)
self.assertLen(part_train_cd.client_ids, expected_part_clients)
self.assertLen(part_val_cd.client_ids, expected_part_clients)
self.assertLen(unpart_cd.client_ids, expected_unpart_clients)
# Assert the correctness of client_ids.
self.assertCountEqual(part_train_cd.client_ids, part_val_cd.client_ids)
# Assert detailed equivalence.
test_part_client_id = part_train_cd.client_ids[0]
part_train_cd_ds = part_train_cd.create_tf_dataset_for_client(
test_part_client_id)
expected_len = len(
list(train_cd_orig.create_tf_dataset_for_client(test_part_client_id)))
if max_elements_per_client is not None:
expected_len = min(max_elements_per_client, expected_len)
self.assertLen(list(part_train_cd_ds), expected_len)
part_val_cd_ds = part_val_cd.create_tf_dataset_for_client(
test_part_client_id)
expected_len = len(
list(val_cd_orig.create_tf_dataset_for_client(test_part_client_id)))
if max_elements_per_client is not None:
expected_len = min(max_elements_per_client, expected_len)
self.assertLen(list(part_val_cd_ds), expected_len)
test_unpart_client_id = unpart_cd.client_ids[0]
unpart_cd_ds = unpart_cd.create_tf_dataset_for_client(test_unpart_client_id)
expected_ds = val_cd_orig.create_tf_dataset_for_client(
test_unpart_client_id)
if include_unpart_train_for_val:
expected_ds = expected_ds.concatenate(
train_cd_orig.create_tf_dataset_for_client(test_unpart_client_id))
expected_len = len(list(expected_ds))
if max_elements_per_client is not None:
expected_len = min(max_elements_per_client, expected_len)
self.assertLen(list(unpart_cd_ds), expected_len)
class CanonicalThreeWayPartitionTest(tf.test.TestCase, parameterized.TestCase):
"""Test for client_data_utils.canonical_three_way_partition_client_data."""
@parameterized.product(
part_clients_subsampling_rate=[0.5, 1.0],
include_unpart_train_for_val=[True, False],
max_elements_per_client=[None, 2])
def test_dataset_has_correct_length(self, part_clients_subsampling_rate,
include_unpart_train_for_val,
max_elements_per_client):
num_clients = 5
num_elems_per_client = 7
unpart_clients_proportion = 0.2
train_val_ratio_intra_client = 4
cd = _create_test_client_data(num_clients, num_elems_per_client)
(part_train_cd, part_val_cd,
unpart_cd) = client_data_utils.canonical_three_way_partition_client_data(
cd,
unpart_clients_proportion=unpart_clients_proportion,
train_val_ratio_intra_client=train_val_ratio_intra_client,
part_clients_subsampling_rate=part_clients_subsampling_rate,
include_unpart_train_for_val=include_unpart_train_for_val,
max_elements_per_client=max_elements_per_client,
)
expected_num_unpart_clients = round(num_clients * unpart_clients_proportion)
expected_num_part_clients = round(
(num_clients - expected_num_unpart_clients) *
part_clients_subsampling_rate)
self.assertLen(part_train_cd.client_ids, expected_num_part_clients)
self.assertLen(part_val_cd.client_ids, expected_num_part_clients)
self.assertLen(unpart_cd.client_ids, expected_num_unpart_clients)
self.assertCountEqual(part_train_cd.client_ids, part_val_cd.client_ids)
self.assertEmpty(set(part_train_cd.client_ids) & set(unpart_cd.client_ids))
self.assertTrue(
set(part_train_cd.client_ids + unpart_cd.client_ids).issubset(
cd.client_ids))
# Fine-grained check:
expected_val_per_client = (num_elems_per_client +
train_val_ratio_intra_client) // (
train_val_ratio_intra_client + 1)
expected_train_per_client = num_elems_per_client - expected_val_per_client
if max_elements_per_client is not None:
expected_train_per_client = min(expected_train_per_client,
max_elements_per_client)
expected_val_per_client = min(expected_val_per_client,
max_elements_per_client)
for client_id in part_train_cd.client_ids:
part_train_ds = part_train_cd.create_tf_dataset_for_client(client_id)
part_val_ds = part_val_cd.create_tf_dataset_for_client(client_id)
part_train_ds_list = list(part_train_ds)
part_val_ds_list = list(part_val_ds)
self.assertLen(part_train_ds_list, expected_train_per_client)
self.assertLen(part_val_ds_list, expected_val_per_client)
if include_unpart_train_for_val:
expected_unpart_len = num_elems_per_client
else:
expected_unpart_len = expected_val_per_client
if max_elements_per_client is not None:
expected_unpart_len = min(max_elements_per_client, expected_unpart_len)
for client_id in unpart_cd.client_ids:
unpart_ds = unpart_cd.create_tf_dataset_for_client(client_id)
unpart_ds_list = list(unpart_ds)
self.assertLen(unpart_ds_list, expected_unpart_len)
@parameterized.product(
part_clients_subsampling_rate=[0.5, 1.0],
include_unpart_train_for_val=[True, False],
max_elements_per_client=[None, 2])
def test_three_way_partition_use_seed(self, part_clients_subsampling_rate,
include_unpart_train_for_val,
max_elements_per_client):
num_clients = 6
num_elems_per_client = 7
unpart_clients_proportion = 0.2
train_val_ratio_intra_client = 4
cd = _create_test_client_data(num_clients, num_elems_per_client)
(part_train_cd, part_val_cd,
unpart_cd) = client_data_utils.canonical_three_way_partition_client_data(
cd,
unpart_clients_proportion=unpart_clients_proportion,
train_val_ratio_intra_client=train_val_ratio_intra_client,
part_clients_subsampling_rate=part_clients_subsampling_rate,
include_unpart_train_for_val=include_unpart_train_for_val,
max_elements_per_client=max_elements_per_client,
seed=1)
part_train_cd_same, part_val_cd_same, unpart_cd_same = client_data_utils.canonical_three_way_partition_client_data(
cd,
unpart_clients_proportion=unpart_clients_proportion,
train_val_ratio_intra_client=train_val_ratio_intra_client,
part_clients_subsampling_rate=part_clients_subsampling_rate,
include_unpart_train_for_val=include_unpart_train_for_val,
max_elements_per_client=max_elements_per_client,
seed=1)
for cd1, cd2 in zip([part_train_cd, part_val_cd, unpart_cd],
[part_train_cd_same, part_val_cd_same, unpart_cd_same]):
self.assertCountEqual(cd1.client_ids, cd2.client_ids)
for client_id in cd1.client_ids:
ds1 = cd1.create_tf_dataset_for_client(client_id)
ds2 = cd2.create_tf_dataset_for_client(client_id)
self.assertCountEqual(_unpack_ds_to_list(ds1), _unpack_ds_to_list(ds2))
class ConstructClientDataFromMappingTest(tf.test.TestCase):
"""Test for `construct_client_data_from_mapping`."""
def test_mapping_with_various_lengths(self):
test_mapping = {str(i): tf.data.Dataset.range(i) for i in range(1, 8, 3)}
cd = client_data_utils.construct_client_data_from_mapping(test_mapping)
self.assertCountEqual(cd.client_ids, [str(i) for i in range(1, 8, 3)])
for cid in cd.client_ids:
local_ds = cd.dataset_computation(cid)
self.assertLen(list(local_ds), int(cid))
self.assertEqual([t.numpy() for t in local_ds], list(range(int(cid))))
global_ds = cd.create_tf_dataset_from_all_clients()
self.assertLen(list(global_ds), 1 + 4 + 7)
self.assertCountEqual([t.numpy() for t in global_ds],
list(range(1)) + list(range(4)) + list(range(7)))
def test_dataset_with_nested_structure(self):
test_data = collections.OrderedDict(
label=([tf.constant(0, dtype=tf.int32)]),
pixels=([tf.zeros((28, 28), dtype=tf.float32)]),
)
ds = tf.data.Dataset.from_tensor_slices(test_data)
test_mapping = {'1': ds, '2': ds}
cd = client_data_utils.construct_client_data_from_mapping(test_mapping)
self.assertCountEqual(cd.client_ids, ('1', '2'))
for cid in cd.client_ids:
local_ds = cd.dataset_computation(cid)
self.assertLen(list(local_ds), 1)
local_data = next(iter(local_ds))
self.assertEqual(local_ds.element_spec, ds.element_spec)
self.assertCountEqual(list(local_data.keys()), list(test_data.keys()))
self.assertAllEqual(local_data['label'], test_data['label'][0])
self.assertAllEqual(local_data['pixels'], test_data['pixels'][0])
def test_dataset_with_very_large_cardinality(self):
"""Test the dataset will not be eagerly computed unexpectedly."""
test_mapping = {
'short': tf.data.Dataset.range(10),
'long': tf.data.Dataset.range(9999999999999999)
}
cd = client_data_utils.construct_client_data_from_mapping(test_mapping)
self.assertCountEqual(cd.client_ids, ('short', 'long'))
short_ds = cd.create_tf_dataset_for_client('short')
self.assertEqual([t.numpy() for t in short_ds], list(range(10)))
long_ds = cd.create_tf_dataset_for_client('long')
long_ds_take_10 = long_ds.take(10)
self.assertEqual([t.numpy() for t in long_ds_take_10], list(range(10)))
class CacheClientDataTest(tf.test.TestCase):
"""Tests for cache_client_data."""
def test_cached_cd_has_same_elements(self):
cd = _create_test_client_data(5, 5)
cached_cd = client_data_utils.cache_client_data(cd)
self.assertCountEqual(cached_cd.client_ids, cd.client_ids)
for cid in cached_cd.client_ids:
cached_cd_ds = cached_cd.create_tf_dataset_for_client(cid)
expected_cd_ds = cd.create_tf_dataset_for_client(cid)
self.assertCountEqual(
_unpack_ds_to_list(cached_cd_ds), _unpack_ds_to_list(expected_cd_ds))
class FederatedDatasetSamplerTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for iterator class FederatedDatasetSampler."""
@parameterized.product(resample=[True, False])
def test_len_with_specified_num_sample_clients(self, resample):
"""Test sampler returns the correct length."""
cd = _create_test_client_data(10, 10)
num_sample_clients = 5
it = client_data_utils.FederatedDatasetSampler(
cd, num_sample_clients, resample=resample)
self.assertLen(next(it), num_sample_clients)
@parameterized.product(resample=[True, False])
def test_len_with_unspecified_num_sample_clients(self, resample):
"""Test sampler returns the correct length when num_sample_clients is not specified."""
cd = _create_test_client_data(10, 10)
it = client_data_utils.FederatedDatasetSampler(
cd, num_sample_clients=None, resample=resample)
self.assertLen(next(it), 10)
@parameterized.product(use_cache=[True, False])
def test_sample_emits_the_same_datasets_if_no_resample(self, use_cache):
"""Test sampler emits the same dataset if resample is False."""
cd = _create_test_client_data(10, 10)
it = client_data_utils.FederatedDatasetSampler(
cd, 5, resample=False, seed=1, use_cache=use_cache)
self.assertListEqual(_unpack_fn(next(it)), _unpack_fn(next(it)))
@parameterized.product(use_cache=[True, False])
def test_sample_emits_different_datasets_if_resample(self, use_cache):
"""Test sampler emits a different dataset if resample is True."""
cd = _create_test_client_data(100, 2)
# This should not be flaky given the seed.
it = client_data_utils.FederatedDatasetSampler(
cd, 50, resample=True, seed=1, use_cache=use_cache)
self.assertNotEqual(_unpack_fn(next(it)), _unpack_fn(next(it)))
@parameterized.product(resample=[True, False], use_cache=[True, False])
def test_two_samplers_with_the_same_int_seed_emit_the_same_datasets(
self, resample, use_cache):
"""Test two samplers emit the same datasets if the seeds are the same integer."""
cd = _create_test_client_data(10, 10)
it1 = client_data_utils.FederatedDatasetSampler(
cd, 5, resample=resample, seed=1, use_cache=use_cache)
it2 = client_data_utils.FederatedDatasetSampler(
cd, 5, resample=resample, seed=1, use_cache=use_cache)
for _ in range(3):
self.assertListEqual(_unpack_fn(next(it1)), _unpack_fn(next(it2)))
@parameterized.product(resample=[True, False], use_cache=[True, False])
def test_two_samplers_with_none_seed_emit_different_datasets(
self, resample, use_cache):
"""Test two samplers emit different datasets if seed is the None."""
cd = _create_test_client_data(100, 2)
it1 = client_data_utils.FederatedDatasetSampler(
cd, 50, resample=resample, seed=None, use_cache=use_cache)
it2 = client_data_utils.FederatedDatasetSampler(
cd, 50, resample=resample, seed=None, use_cache=use_cache)
for _ in range(3):
# This test may be flaky. With <1e-29 probability this test my fail.
self.assertNotEqual(_unpack_fn(next(it1)), _unpack_fn(next(it2)))
@parameterized.product(resample=[True, False], use_cache=[True, False])
def test_two_samplers_with_different_seeds_emit_different_datasets(
self, resample, use_cache):
"""Test two samplers emit different datasets if seeds are different."""
cd = _create_test_client_data(100, 2)
it1 = client_data_utils.FederatedDatasetSampler(
cd, 50, resample=resample, seed=0, use_cache=use_cache)
it2 = client_data_utils.FederatedDatasetSampler(
cd, 50, resample=resample, seed=1, use_cache=use_cache)
for _ in range(3):
self.assertNotEqual(_unpack_fn(next(it1)), _unpack_fn(next(it2)))
if __name__ == '__main__':
tf.test.main()
|
scripts/run_parsers.py | joye1503/cocrawler | 166 | 11186521 | <reponame>joye1503/cocrawler
'''
Runs all of the available parsers over a tree of html
Accumulate cpu time
Compare counts of urls and embeds
'''
import sys
import os
import logging
import functools
from bs4 import BeautifulSoup
import cocrawler.stats as stats
import cocrawler.parse as parse
def parse_all(name, string):
all_links = []
# warmup
head, body = parse.split_head_body(string)
links, embeds = parse.find_html_links_re(string) # embeds is empty here by design
links, embeds = parse.find_body_links_re(body)
head_soup = BeautifulSoup(head, 'lxml')
body_soup = BeautifulSoup(body, 'lxml')
links, embeds = parse.find_head_links_soup(head_soup)
links, embeds = parse.find_body_links_soup(body_soup)
# measurement
with stats.record_burn('split_head_body', url=name):
head, body = parse.split_head_body(string)
with stats.record_burn('find_html_links_re', url=name):
links, embeds = parse.find_html_links_re(string) # embeds is empty here by design
all_links.append(links.union(embeds))
with stats.record_burn('head_soup', url=name):
head_soup = BeautifulSoup(head, 'lxml')
with stats.record_burn('find_head_links_soup', url=name):
head_links, head_embeds = parse.find_head_links_soup(head_soup)
body = '<html>' + body # because a closing tag at the start of body screws up lxml
with stats.record_burn('find_body_links_re', url=name):
links, embeds = parse.find_body_links_re(body)
all_links.append(links.union(embeds).union(head_links).union(head_embeds))
with stats.record_burn('body_soup', url=name):
body_soup = BeautifulSoup(body, 'lxml')
with stats.record_burn('find_body_links_soup', url=name):
links, embeds = parse.find_body_links_soup(body_soup)
all_links.append(links.union(embeds).union(head_links).union(head_embeds))
# evaluation
biggest = functools.reduce(max, [len(x) for x in all_links])
for i, v in enumerate(all_links):
if len(v) == biggest:
biggest_index = i
biggest_links = v
names = 'find_html_links_re', 'find_body_links_re', 'find_body_links_soup'
for i, v in enumerate(all_links):
if len(v) != biggest:
print('{} had different link counts of {} and {}'.format(name, biggest, len(v)))
extra1 = v.difference(biggest_links)
extra2 = biggest_links.difference(v)
if extra1:
print(' extra in {}: {!r}'.format(names[i], extra1))
else:
print(' count was {} for {}'.format(len(v), names[i]))
if extra2:
print(' extra in {}: {!r}'.format(names[biggest_index], extra2))
else:
print(' count was {} for {}'.format(len(biggest_links), names[biggest_index]))
LOGGER = logging.getLogger(__name__)
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[3])
for d in sys.argv[1:]:
if os.path.isfile(d):
with open(d, 'r', errors='ignore') as fi:
parse_all(d, fi.read())
continue
for root, _, files in os.walk(d):
for f in files:
if f.endswith('.html') or f.endswith('.htm'):
expanded = os.path.join(root, f)
with open(expanded, 'r', errors='ignore') as fi:
parse_all(expanded, fi.read())
stats.report()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.