max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Script/reflect/tool_helper.py | ValtoGameEngines/Fish-Engine | 240 | 12730251 | <gh_stars>100-1000
from __future__ import print_function
import re
import os
'''CamelCase -> CamelCase
m_fieldOfView -> Field Of View
'''
def CamelCaseToReadable(string):
# http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
if string.startswith('m_'):
string = string[2:]
if string[0].islower():
string = string[0].upper() + string[1:]
return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r' \1', string)
def unittest_CamelCaseToReadable():
for s in ("m_fieldOfView", "CamelCase", "Camel2Camel2Case", "getHTTPResponseCode", "get2HTTPResponse123Code", "HTTPResponseCodeXYZ"):
print(s,'==>', CamelCaseToReadable(s))
def UpdateFile(out_path, content):
need_update = True
directory = os.path.dirname(out_path)
if not os.path.exists(directory):
os.makedirs(directory)
if os.path.exists(out_path):
with open(out_path) as f:
old_content = f.read()
need_update = (content != old_content)
if need_update:
print("update", out_path)
with open(out_path, 'w') as f:
f.write(content)
else:
print("no update", out_path)
if __name__ == "__main__":
unittest_CamelCaseToReadable() |
examples/package_example.py | ywchiu/flasgger | 1,696 | 12730263 | # coding: utf-8
"""
This tests the use of a view coming from installed
package.
"""
from flask import Flask, jsonify
from flasgger import Swagger
from flasgger_package import package_view
app = Flask(__name__)
swag = Swagger(app)
app.add_url_rule(
'/v1/decorated/<username>',
view_func=package_view
)
@app.route('/v2/decorated/<username>')
def package_view_2(username):
"""
This is the summary defined in yaml file
First line is the summary
All following lines until the hyphens is added to description
the format of the first lines until 3 hyphens will be not yaml compliant
but everything below the 3 hyphens should be.
---
tags:
- users
import: "flasgger_package/parameters.yml"
responses:
200:
description: A single user item
schema:
id: rec_username
properties:
username:
type: string
description: The name of the user
default: 'steve-harris 2'
"""
return jsonify({'username': username})
def test_swag(client, specs_data):
"""
This test is runs automatically in Travis CI
:param client: Flask app test client
:param specs_data: {'url': {swag_specs}} for every spec in app
"""
for url, spec in specs_data.items():
assert 'rec_username' in spec['definitions']
assert 'users' in spec['paths'][
'/v1/decorated/{username}'
]['get']['tags']
if __name__ == "__main__":
app.run(debug=True)
|
dirigible/sheet/cell.py | EnoX1/dirigible-spreadsheet | 168 | 12730274 | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from .formula_interpreter import (
get_dependencies_from_parse_tree,
get_python_formula_from_parse_tree
)
from .parser import FormulaError, parser
class Undefined(object):
def __repr__(self):
return "<undefined>"
undefined = Undefined()
class Cell(object):
def __init__(self):
self.clear()
def _set_formula(self, value):
self._python_formula = None
if value is None:
self._formula = None
elif type(value) == str or type(value) == unicode:
self._formula = value
if value.startswith('='):
try:
parsed_formula = parser.parse(value)
self.dependencies = get_dependencies_from_parse_tree(parsed_formula)
self._python_formula = get_python_formula_from_parse_tree(parsed_formula)
except FormulaError, e:
self.dependencies = []
self._python_formula = '_raise(FormulaError("{}"))'.format(e)
else:
raise TypeError('cell formula must be str or unicode')
def _get_formula(self):
return self._formula
formula = property(_get_formula, _set_formula)
def _set_python_formula(self, value):
if type(value) == str or type(value) == unicode:
self._python_formula = value
else:
raise TypeError('cell python_formula must be str or unicode')
def _get_python_formula(self):
return self._python_formula
python_formula = property(_get_python_formula, _set_python_formula)
def _set_value(self, value):
self._value = value
if value is undefined:
self._set_formatted_value(u'')
else:
self._set_formatted_value(unicode(value))
def _get_value(self):
return self._value
value = property(_get_value, _set_value)
def clear_value(self):
self._value = undefined
def _set_formatted_value(self, value):
if value is None:
self._formatted_value = u''
elif type(value) == str or type(value) == unicode:
self._formatted_value = value
else:
raise TypeError('cell formatted_value must be str or unicode')
def _get_formatted_value(self):
return self._formatted_value
formatted_value = property(_get_formatted_value, _set_formatted_value)
def clear(self):
self._value = undefined
self._formula = None
self._python_formula = None
self.dependencies = []
self._formatted_value = u''
self.error = None
def __repr__(self):
error = ""
if self.error:
error = " error=%r" % (self.error,)
return '<Cell formula=%s value=%r formatted_value=%r%s>' % \
(self.formula, self._value, self.formatted_value, error)
def __eq__(self, other):
return (
isinstance(other, Cell) and
self._formula == other.formula and
self._value == other.value and
self._formatted_value == other.formatted_value and
self.error == other.error
)
def __ne__(self, other):
return not self.__eq__(other)
|
lpot/experimental/metric/bleu.py | intelkevinputnam/lpot-docs | 172 | 12730293 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Official evaluation script for v1.1 of the SQuAD dataset.
https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py """
import re
import six
import sys
import numpy as np
import pandas as pd
import unicodedata
from .bleu_util import compute_bleu
from .metric import metric_registry
class UnicodeRegex(object):
def __init__(self):
punctuation = self.property_chars("P")
self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])")
self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])")
self.symbol_re = re.compile("([" + self.property_chars("S") + "])")
def property_chars(self, prefix):
return "".join(six.unichr(x) for x in range(sys.maxunicode)
if unicodedata.category(six.unichr(x)).startswith(prefix))
uregex = UnicodeRegex()
def bleu_tokenize(string):
string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string)
string = uregex.punct_nondigit_re.sub(r" \1 \2", string)
string = uregex.symbol_re.sub(r" \1 ", string)
return string.split()
@metric_registry('BLEU', 'tensorflow')
class BLEU(object):
"""Computes Bilingual Evaluation Understudy Score
BLEU score computation between labels and predictions. An approximate BLEU scoring
method since we do not glue word pieces or decode the ids and tokenize the output.
By default, we use ngram order of 4 and use brevity penalty. Also, this does not
have beam search
"""
def __init__(self):
self.translations = []
self.labels = []
def reset(self):
"""clear preds and labels storage"""
self.translations = []
self.labels = []
def update(self, pred, label):
"""add preds and labels to storage"""
if len(label) != len(pred):
raise ValueError("Reference and translation files have different number "
"of lines. If training only a few steps (100-200), the "
"translation may be empty.")
label = [x.lower() for x in label]
pred = [x.lower() for x in pred]
label = [bleu_tokenize(x) for x in label]
pred = [bleu_tokenize(x) for x in pred]
self.labels.extend(label)
self.translations.extend(pred)
def result(self):
"""calculate metric"""
return compute_bleu(self.labels, self.translations) * 100
|
PS4Joystick/PS4Joystick.py | jensanjo/QuadrupedRobot | 545 | 12730303 |
import sys
import time
import subprocess
import math
from threading import Thread
from collections import OrderedDict, deque
from ds4drv.actions import ActionRegistry
from ds4drv.backends import BluetoothBackend, HidrawBackend
from ds4drv.config import load_options
from ds4drv.daemon import Daemon
from ds4drv.eventloop import EventLoop
from ds4drv.exceptions import BackendError
from ds4drv.action import ReportAction
from ds4drv.__main__ import create_controller_thread
class ActionShim(ReportAction):
""" intercepts the joystick report"""
def __init__(self, *args, **kwargs):
super(ActionShim, self).__init__(*args, **kwargs)
self.timer = self.create_timer(0.02, self.intercept)
self.values = None
self.timestamps = deque(range(10), maxlen=10)
def enable(self):
self.timer.start()
def disable(self):
self.timer.stop()
self.values = None
def load_options(self, options):
pass
def deadzones(self,values):
deadzone = 0.14
if math.sqrt( values['left_analog_x'] ** 2 + values['left_analog_y'] ** 2) < deadzone:
values['left_analog_y'] = 0.0
values['left_analog_x'] = 0.0
if math.sqrt( values['right_analog_x'] ** 2 + values['right_analog_y'] ** 2) < deadzone:
values['right_analog_y'] = 0.0
values['right_analog_x'] = 0.0
return values
def intercept(self, report):
new_out = OrderedDict()
for key in report.__slots__:
value = getattr(report, key)
new_out[key] = value
for key in ["left_analog_x", "left_analog_y",
"right_analog_x", "right_analog_y",
"l2_analog", "r2_analog"]:
new_out[key] = 2*( new_out[key]/255 ) - 1
new_out = self.deadzones(new_out)
self.timestamps.append(new_out['timestamp'])
if len(set(self.timestamps)) <= 1:
self.values = None
else:
self.values = new_out
return True
class Joystick:
def __init__(self):
self.thread = None
options = load_options()
if options.hidraw:
raise ValueError("HID mode not supported")
backend = HidrawBackend(Daemon.logger)
else:
subprocess.run(["hciconfig", "hciX", "up"])
backend = BluetoothBackend(Daemon.logger)
backend.setup()
self.thread = create_controller_thread(1, options.controllers[0])
self.thread.controller.setup_device(next(backend.devices))
self.shim = ActionShim(self.thread.controller)
self.thread.controller.actions.append(self.shim)
self.shim.enable()
self._color = (None, None, None)
self._rumble = (None, None)
self._flash = (None, None)
# ensure we get a value before returning
while self.shim.values is None:
pass
def close(self):
if self.thread is None:
return
self.thread.controller.exit("Cleaning up...")
self.thread.controller.loop.stop()
def __del__(self):
self.close()
@staticmethod
def map(val, in_min, in_max, out_min, out_max):
""" helper static method that helps with rescaling """
in_span = in_max - in_min
out_span = out_max - out_min
value_scaled = float(val - in_min) / float(in_span)
value_mapped = (value_scaled * out_span) + out_min
if value_mapped < out_min:
value_mapped = out_min
if value_mapped > out_max:
value_mapped = out_max
return value_mapped
def get_input(self):
""" returns ordered dict with state of all inputs """
if self.thread.controller.error:
raise IOError("Encountered error with controller")
if self.shim.values is None:
raise TimeoutError("Joystick hasn't updated values in last 200ms")
return self.shim.values
def led_color(self, red=0, green=0, blue=0):
""" set RGB color in range 0-255"""
color = (int(red),int(green),int(blue))
if( self._color == color ):
return
self._color = color
self.thread.controller.device.set_led( *self._color )
def rumble(self, small=0, big=0):
""" rumble in range 0-255 """
rumble = (int(small),int(big))
if( self._rumble == rumble ):
return
self._rumble = rumble
self.thread.controller.device.rumble( *self._rumble )
def led_flash(self, on=0, off=0):
""" flash led: on and off times in range 0 - 255 """
flash = (int(on),int(off))
if( self._flash == flash ):
return
self._flash = flash
if( self._flash == (0,0) ):
self.thread.controller.device.stop_led_flash()
else:
self.thread.controller.device.start_led_flash( *self._flash )
if __name__ == "__main__":
j = Joystick()
while 1:
for key, value in j.get_input().items():
print(key,value)
print()
time.sleep(0.1)
|
tests/plugins/trackers/__init__.py | DmitryRibalka/monitorrent | 465 | 12730444 | from monitorrent.plugins.trackers import TrackerSettings
class TrackerSettingsMock(TrackerSettings):
def get_requests_kwargs(self):
result = super(TrackerSettingsMock, self).get_requests_kwargs()
result.pop('timeout')
result['verify'] = False
return result
|
pyjobs/marketing/newsletter.py | Mdslino/PyJobs | 132 | 12730477 | from django.conf import settings
import requests
import json
from mailchimp3 import MailChimp
def subscribe_user_to_mailer(profile):
status = True
if not settings.MAILERLITE_API_KEY:
return
content = json.dumps({"email": profile.user.email})
headers = {
"content-type": "application/json",
"x-mailerlite-apikey": settings.MAILERLITE_API_KEY,
}
try:
req = requests.post(
"https://api.mailerlite.com/api/v2/subscribers",
data=content,
headers=headers,
)
except: # TODO specify which errors can be raised at this point
status = False
return status
def subscribe_user_to_chimp(profile):
status = True
configs = (
settings.MAILCHIMP_API_KEY,
settings.MAILCHIMP_USERNAME,
settings.MAILCHIMP_LIST_KEY,
)
if not all(configs):
return False
try:
client = MailChimp(settings.MAILCHIMP_API_KEY, settings.MAILCHIMP_USERNAME)
client.lists.members.create(
settings.MAILCHIMP_LIST_KEY,
{"status": "subscribed", "email_address": profile.user.email},
)
except: # TODO specify which errors can be raised at this point
status = False
return status
|
tests/commands/test_config.py | kapb14/hatch | 2,549 | 12730483 | from click.testing import CliRunner
from hatch.cli import hatch
from hatch.settings import (
SETTINGS_FILE, copy_default_settings, load_settings, save_settings
)
from hatch.utils import temp_chdir, temp_move_path
def test_show_location():
with temp_chdir():
runner = CliRunner()
result = runner.invoke(hatch, ['config'])
assert result.exit_code == 0
assert 'Settings location: ' in result.output
assert 'settings.json' in result.output
def test_restore():
with temp_chdir() as d:
runner = CliRunner()
with temp_move_path(SETTINGS_FILE, d):
result = runner.invoke(hatch, ['config', '--restore'])
assert result.exit_code == 0
assert 'Settings were successfully restored.' in result.output
assert load_settings() == copy_default_settings()
def test_update():
with temp_chdir() as d:
runner = CliRunner()
with temp_move_path(SETTINGS_FILE, d):
new_settings = copy_default_settings()
new_settings.pop('email')
new_settings['new setting'] = ''
save_settings(new_settings)
assert load_settings() == new_settings
result = runner.invoke(hatch, ['config', '-u'])
updated_settings = load_settings()
assert result.exit_code == 0
assert 'Settings were successfully updated.' in result.output
assert 'email' in updated_settings
assert 'new setting' in updated_settings
def test_update_config_not_exist():
with temp_chdir() as d:
runner = CliRunner()
with temp_move_path(SETTINGS_FILE, d):
result = runner.invoke(hatch, ['config', '-u'])
assert result.exit_code == 0
assert 'Settings were successfully restored.' in result.output
assert load_settings() == copy_default_settings()
|
tests/test_core_ants_image_io.py | xemio/ANTsPy | 338 | 12730490 | <gh_stars>100-1000
"""
Test ants_image.py
nptest.assert_allclose
self.assertEqual
self.assertTrue
"""
import os
import unittest
from common import run_tests
from tempfile import mktemp
import numpy as np
import nibabel as nib
import numpy.testing as nptest
import ants
class TestModule_ants_image_io(unittest.TestCase):
def setUp(self):
img2d = ants.image_read(ants.get_ants_data('r16')).clone('float')
img3d = ants.image_read(ants.get_ants_data('mni')).clone('float')
arr2d = np.random.randn(69,70).astype('float32')
arr3d = np.random.randn(69,70,71).astype('float32')
vecimg2d = ants.from_numpy(np.random.randn(69,70,4), has_components=True)
vecimg3d = ants.from_numpy(np.random.randn(69,70,71,2), has_components=True)
self.imgs = [img2d, img3d]
self.arrs = [arr2d, arr3d]
self.vecimgs = [vecimg2d, vecimg3d]
self.pixeltypes = ['unsigned char', 'unsigned int', 'float']
def tearDown(self):
pass
def test_from_numpy(self):
self.setUp()
# no physical space info
for arr in self.arrs:
img = ants.from_numpy(arr)
self.assertTrue(img.dimension, arr.ndim)
self.assertTrue(img.shape, arr.shape)
self.assertTrue(img.dtype, arr.dtype.name)
nptest.assert_allclose(img.numpy(), arr)
new_origin = tuple([6.9]*arr.ndim)
new_spacing = tuple([3.6]*arr.ndim)
new_direction = np.eye(arr.ndim)*9.6
img2 = ants.from_numpy(arr, origin=new_origin, spacing=new_spacing, direction=new_direction)
self.assertEqual(img2.origin, new_origin)
self.assertEqual(img2.spacing, new_spacing)
nptest.assert_allclose(img2.direction, new_direction)
# test with components
arr2d_components = np.random.randn(69,70,4).astype('float32')
img = ants.from_numpy(arr2d_components, has_components=True)
self.assertEqual(img.components, arr2d_components.shape[-1])
nptest.assert_allclose(arr2d_components, img.numpy())
def test_make_image(self):
self.setUp()
for arr in self.arrs:
voxval = 6.
img = ants.make_image(arr.shape, voxval=voxval)
self.assertTrue(img.dimension, arr.ndim)
self.assertTrue(img.shape, arr.shape)
nptest.assert_allclose(img.mean(), voxval)
new_origin = tuple([6.9]*arr.ndim)
new_spacing = tuple([3.6]*arr.ndim)
new_direction = np.eye(arr.ndim)*9.6
img2 = ants.make_image(arr.shape, voxval=voxval, origin=new_origin, spacing=new_spacing, direction=new_direction)
self.assertTrue(img2.dimension, arr.ndim)
self.assertTrue(img2.shape, arr.shape)
nptest.assert_allclose(img2.mean(), voxval)
self.assertEqual(img2.origin, new_origin)
self.assertEqual(img2.spacing, new_spacing)
nptest.assert_allclose(img2.direction, new_direction)
for ptype in self.pixeltypes:
img = ants.make_image(arr.shape, voxval=1., pixeltype=ptype)
self.assertEqual(img.pixeltype, ptype)
# test with components
img = ants.make_image((69,70,4), has_components=True)
self.assertEqual(img.components, 4)
self.assertEqual(img.dimension, 2)
nptest.assert_allclose(img.mean(), 0.)
img = ants.make_image((69,70,71,4), has_components=True)
self.assertEqual(img.components, 4)
self.assertEqual(img.dimension, 3)
nptest.assert_allclose(img.mean(), 0.)
# set from image
for img in self.imgs:
mask = ants.image_clone( img > img.mean(), pixeltype = 'float' )
arr = img[mask]
img2 = ants.make_image(mask, voxval=arr)
nptest.assert_allclose(img2.numpy(), (img*mask).numpy())
self.assertTrue(ants.image_physical_space_consistency(img2,mask))
# set with arr.ndim > 1
img2 = ants.make_image(mask, voxval=np.expand_dims(arr,-1))
nptest.assert_allclose(img2.numpy(), (img*mask).numpy())
self.assertTrue(ants.image_physical_space_consistency(img2,mask))
#with self.assertRaises(Exception):
# # wrong number of non-zero voxels
# img3 = ants.make_image(img, voxval=arr)
def test_matrix_to_images(self):
# def matrix_to_images(data_matrix, mask):
for img in self.imgs:
imgmask = ants.image_clone( img > img.mean(), pixeltype = 'float' )
data = img[imgmask]
dataflat = data.reshape(1,-1)
mat = np.vstack([dataflat,dataflat]).astype('float32')
imglist = ants.matrix_to_images(mat, imgmask)
nptest.assert_allclose((img*imgmask).numpy(), imglist[0].numpy())
nptest.assert_allclose((img*imgmask).numpy(), imglist[1].numpy())
self.assertTrue(ants.image_physical_space_consistency(img,imglist[0]))
self.assertTrue(ants.image_physical_space_consistency(img,imglist[1]))
# go back to matrix
mat2 = ants.images_to_matrix(imglist, imgmask)
nptest.assert_allclose(mat, mat2)
# test with matrix.ndim > 2
img = img.clone()
img.set_direction(img.direction*2)
imgmask = ants.image_clone( img > img.mean(), pixeltype = 'float' )
arr = (img*imgmask).numpy()
arr = arr[arr>=0.5]
arr2 = arr.copy()
mat = np.stack([arr,arr2])
imglist = ants.matrix_to_images(mat, imgmask)
for im in imglist:
self.assertTrue(ants.allclose(im, imgmask*img))
self.assertTrue(ants.image_physical_space_consistency(im, imgmask))
# test for wrong number of voxels
#with self.assertRaises(Exception):
# arr = (img*imgmask).numpy()
# arr = arr[arr>0.5]
# arr2 = arr.copy()
# mat = np.stack([arr,arr2])
# imglist = ants.matrix_to_images(mat, img)
def test_images_to_matrix(self):
# def images_to_matrix(image_list, mask=None, sigma=None, epsilon=0):
for img in self.imgs:
mask = ants.image_clone( img > img.mean(), pixeltype = 'float' )
imglist = [img.clone(),img.clone(),img.clone()]
imgmat = ants.images_to_matrix(imglist, mask=mask)
self.assertTrue(imgmat.shape[0] == len(imglist))
self.assertTrue(imgmat.shape[1] == (mask>0).sum())
# go back to images
imglist2 = ants.matrix_to_images(imgmat, mask)
for i1,i2 in zip(imglist,imglist2):
self.assertTrue(ants.image_physical_space_consistency(i1,i2))
nptest.assert_allclose(i1.numpy()*mask.numpy(),i2.numpy())
if img.dimension == 2:
# with sigma
mask = ants.image_clone( img > img.mean(), pixeltype = 'float' )
imglist = [img.clone(),img.clone(),img.clone()]
imgmat = ants.images_to_matrix(imglist, mask=mask, sigma=2.)
# with no mask
mask = ants.image_clone( img > img.mean(), pixeltype = 'float' )
imglist = [img.clone(),img.clone(),img.clone()]
imgmat = ants.images_to_matrix(imglist)
# with mask of different shape
s = [65]*img.dimension
mask2 = ants.from_numpy(np.random.randn(*s))
mask2 = mask2 > mask2.mean()
imgmat = ants.images_to_matrix(imglist, mask=mask2)
def test_image_header_info(self):
# def image_header_info(filename):
for img in self.imgs:
img.set_spacing([6.9]*img.dimension)
img.set_origin([3.6]*img.dimension)
tmpfile = mktemp(suffix='.nii.gz')
ants.image_write(img, tmpfile)
info = ants.image_header_info(tmpfile)
self.assertEqual(info['dimensions'], img.shape)
nptest.assert_allclose(info['direction'], img.direction)
self.assertEqual(info['nComponents'], img.components)
self.assertEqual(info['nDimensions'], img.dimension)
self.assertEqual(info['origin'], img.origin)
self.assertEqual(info['pixeltype'], img.pixeltype)
self.assertEqual(info['pixelclass'], 'vector' if img.has_components else 'scalar')
self.assertEqual(info['spacing'], img.spacing)
try:
os.remove(tmpfile)
except:
pass
# test on vector image
img = ants.from_numpy(np.random.randn(69,60,4).astype('float32'), has_components=True)
tmpfile = mktemp(suffix='.nii.gz')
ants.image_write(img, tmpfile)
info = ants.image_header_info(tmpfile)
self.assertEqual(info['dimensions'], img.shape)
nptest.assert_allclose(info['direction'], img.direction)
self.assertEqual(info['nComponents'], img.components)
self.assertEqual(info['nDimensions'], img.dimension)
self.assertEqual(info['origin'], img.origin)
self.assertEqual(info['pixeltype'], img.pixeltype)
self.assertEqual(info['pixelclass'], 'vector' if img.has_components else 'scalar')
self.assertEqual(info['spacing'], img.spacing)
img = ants.from_numpy(np.random.randn(69,60,70,2).astype('float32'), has_components=True)
tmpfile = mktemp(suffix='.nii.gz')
ants.image_write(img, tmpfile)
info = ants.image_header_info(tmpfile)
self.assertEqual(info['dimensions'], img.shape)
nptest.assert_allclose(info['direction'], img.direction)
self.assertEqual(info['nComponents'], img.components)
self.assertEqual(info['nDimensions'], img.dimension)
self.assertEqual(info['origin'], img.origin)
self.assertEqual(info['pixeltype'], img.pixeltype)
self.assertEqual(info['pixelclass'], 'vector' if img.has_components else 'scalar')
self.assertEqual(info['spacing'], img.spacing)
# non-existant file
with self.assertRaises(Exception):
tmpfile = mktemp(suffix='.nii.gz')
ants.image_header_info(tmpfile)
def test_image_clone(self):
for img in self.imgs:
img = ants.image_clone(img, 'unsigned char')
orig_ptype = img.pixeltype
for ptype in self.pixeltypes:
imgcloned = ants.image_clone(img, ptype)
self.assertTrue(ants.image_physical_space_consistency(img,imgcloned))
nptest.assert_allclose(img.numpy(), imgcloned.numpy())
self.assertEqual(imgcloned.pixeltype, ptype)
self.assertEqual(img.pixeltype, orig_ptype)
for img in self.vecimgs:
img = img.clone('unsigned char')
orig_ptype = img.pixeltype
for ptype in self.pixeltypes:
imgcloned = ants.image_clone(img, ptype)
self.assertTrue(ants.image_physical_space_consistency(img,imgcloned))
self.assertEqual(imgcloned.components, img.components)
nptest.assert_allclose(img.numpy(), imgcloned.numpy())
self.assertEqual(imgcloned.pixeltype, ptype)
self.assertEqual(img.pixeltype, orig_ptype)
def test_nibabel(self):
fn = ants.get_ants_data( 'mni' )
ants_img = ants.image_read( fn )
nii_mni = nib.load( fn )
ants_mni = ants_img.to_nibabel()
self.assertTrue( ( ants_mni.get_qform() == nii_mni.get_qform() ).all() )
temp = ants.from_nibabel( nii_mni )
self.assertTrue(ants.image_physical_space_consistency(ants_img,temp))
def test_image_read_write(self):
# def image_read(filename, dimension=None, pixeltype='float'):
# def image_write(image, filename):
# test scalar images
for img in self.imgs:
img = (img - img.min()) / (img.max() - img.min())
img = img * 255.
img = img.clone('unsigned char')
for ptype in self.pixeltypes:
img = img.clone(ptype)
tmpfile = mktemp(suffix='.nii.gz')
ants.image_write(img, tmpfile)
img2 = ants.image_read(tmpfile)
self.assertTrue(ants.image_physical_space_consistency(img,img2))
self.assertEqual(img2.components, img.components)
nptest.assert_allclose(img.numpy(), img2.numpy())
# unsupported ptype
with self.assertRaises(Exception):
ants.image_read(tmpfile, pixeltype='not-suppoted-ptype')
# test vector images
for img in self.vecimgs:
img = (img - img.min()) / (img.max() - img.min())
img = img * 255.
img = img.clone('unsigned char')
for ptype in self.pixeltypes:
img = img.clone(ptype)
tmpfile = mktemp(suffix='.nii.gz')
ants.image_write(img, tmpfile)
img2 = ants.image_read(tmpfile)
self.assertTrue(ants.image_physical_space_consistency(img,img2))
self.assertEqual(img2.components, img.components)
nptest.assert_allclose(img.numpy(), img2.numpy())
# test saving/loading as npy
for img in self.imgs:
tmpfile = mktemp(suffix='.npy')
ants.image_write(img, tmpfile)
img2 = ants.image_read(tmpfile)
self.assertTrue(ants.image_physical_space_consistency(img,img2))
self.assertEqual(img2.components, img.components)
nptest.assert_allclose(img.numpy(), img2.numpy())
# with no json header
arr = img.numpy()
tmpfile = mktemp(suffix='.npy')
np.save(tmpfile, arr)
img2 = ants.image_read(tmpfile)
nptest.assert_allclose(img.numpy(), img2.numpy())
# non-existant file
with self.assertRaises(Exception):
tmpfile = mktemp(suffix='.nii.gz')
ants.image_read(tmpfile)
if __name__ == '__main__':
run_tests()
|
corehq/apps/auditcare/tests/data/auditcare_migration.py | akashkj/commcare-hq | 471 | 12730513 | <gh_stars>100-1000
from datetime import datetime
navigation_test_docs = [
{
'description': 'Test User',
'extra': {},
'status_code': 200,
'user': '<EMAIL>',
'session_key': '14f8fb95aece47d8341dc561dfd108df',
'ip_address': '0.0.0.0',
'request_path': '/a/test-domain/reports/',
'view_kwargs': {
'domain': 'test-domain'
},
'doc_type': 'NavigationEventAudit',
'headers': {
'REQUEST_METHOD': 'GET',
'SERVER_PORT': '443',
},
'base_type': 'AuditEvent',
'user_agent': 'Mozilla/5.0 (Windows NT 5.1)',
'event_date': '2021-06-01T00:13:01Z',
'view': 'corehq.apps.reports.views.default'
},
{
'description': 'Test User',
'extra': {},
'status_code': 200,
'user': '<EMAIL>',
'session_key': '14f8fb95aece47d8341dc561dfd108df',
'ip_address': '0.0.0.0',
'request_path': '/a/test-domain/reports/',
'view_kwargs': {
'domain': 'test-domain'
},
'doc_type': 'NavigationEventAudit',
'headers': {
'REQUEST_METHOD': 'GET',
'SERVER_PORT': '443',
},
'base_type': 'AuditEvent',
'user_agent': 'Mozilla/5.0 (Windows NT 5.1)',
'event_date': '2021-06-01T01:13:01Z',
'view': 'corehq.apps.reports.views.default'
},
{
'description': 'Test User',
'extra': {},
'status_code': 200,
'user': '<EMAIL>',
'session_key': '14f8fb95aece47d8341dc561dfd108df',
'ip_address': '0.0.0.0',
'request_path': '/a/test-domain/reports/',
'view_kwargs': {
'domain': 'test-domain'
},
'doc_type': 'NavigationEventAudit',
'headers': {
'SERVER_NAME': 'www.commcarehq.org',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'REQUEST_METHOD': 'GET',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch'
},
'base_type': 'AuditEvent',
'user_agent': 'Mozilla/5.0 (Windows NT 5.1)',
'event_date': '2021-06-01T00:01:00Z',
'view': 'corehq.apps.reports.views.default'
}
]
audit_test_docs = [
{
'http_accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'doc_type': 'AccessAudit',
'description': 'Login Success',
'get_data': [],
'access_type': 'login',
'base_type': 'AuditEvent',
'post_data': [],
'user_agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
'failures_since_start': None,
'event_date': '2021-06-15T04:23:32Z',
'path_info': '/accounts/login/',
'session_key': 'sess_key',
'ip_address': '0.0.0.0',
'user': '<EMAIL>',
'headers': {
'SERVER_NAME': 'www.commcarehq.org',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'REQUEST_METHOD': 'GET',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch'
},
},
{
'access_type': 'logout',
'ip_address': '0.0.0.0',
'session_key': 'sess_key',
'user_agent': None,
'get_data': [],
'post_data': [],
'http_accept': None,
'path_info': None,
'failures_since_start': None,
'doc_type': 'AccessAudit',
'user': '<EMAIL>',
'base_type': 'AuditEvent',
'event_date': '2021-06-24T00:00:00.15Z',
'description': 'Logout test',
'headers': {}
}
]
failed_docs = [
{
'http_accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'doc_type': 'AccessAudit',
'description': 'Login Success',
'get_data': [],
'access_type': 'login',
'base_type': 'AuditEvent',
'post_data': [],
'user_agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
'failures_since_start': None,
'event_date': '2021-05-15T04:23:32Z',
'path_info': '/accounts/login/',
'session_key': 'sess_key',
'ip_address': '0.0.0.0',
'user': '<EMAIL>',
},
{
'description': 'Test User',
'extra': {},
'status_code': 200,
'user': '<EMAIL>',
'session_key': '14f8fb95aece47d8341dc561dfd108df',
'ip_address': '0.0.0.0',
'request_path': '/a/test-domain/reports/',
'view_kwargs': {
'domain': 'test-domain'
},
'doc_type': 'NavigationEventAudit',
'headers': {
'SERVER_NAME': 'www.commcarehq.org',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'REQUEST_METHOD': 'GET',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch'
},
'base_type': 'AuditEvent',
'user_agent': 'Mozilla/5.0 (Windows NT 5.1)',
'event_date': '2021-05-01T00:01:00Z',
'view': 'corehq.apps.reports.views.default'
}
]
task_docs = [
{
'doc_type': 'NavigationEventAudit',
'user': '<EMAIL>',
'event_date': datetime(2021, 1, 1).strftime("%Y-%m-%dT%H:%M:%SZ"),
'description': 'User Name',
'extra': {},
'headers': {
'REQUEST_METHOD': 'GET',
},
'ip_address': '10.1.2.3',
'request_path': '/a/delmar/phone/restore/?version=2.0&since=...',
'session_key': 'abc123',
'status_code': 200,
'view_kwargs': {'domain': 'delmar'},
'view': 'corehq.apps.ota.views.restore',
},
{
'doc_type': 'NavigationEventAudit',
'user': '<EMAIL>',
'event_date': datetime(2021, 2, 1, 2).strftime("%Y-%m-%dT%H:%M:%SZ"),
'description': 'User Name',
'extra': {},
'headers': {
'REQUEST_METHOD': 'GET',
},
'ip_address': '10.1.2.3',
'request_path': '/a/test-space/phone/restore/?version=2.0&since=...',
'session_key': 'abc123',
'status_code': 200,
'view_kwargs': {'domain': 'test-space'},
'view': 'corehq.apps.ota.views.restore',
},
{
'doc_type': 'NavigationEventAudit',
'user': '<EMAIL>',
'event_date': datetime(2021, 2, 1, 2, 1).strftime("%Y-%m-%dT%H:%M:%SZ"),
'description': 'User Name',
'extra': {},
'headers': {
'REQUEST_METHOD': 'GET',
},
'ip_address': '10.1.2.3',
'request_path': '/a/random/phone/restore/?version=2.0&since=...',
'session_key': 'abc123',
'status_code': 200,
'view_kwargs': {'domain': 'random'},
'view': 'corehq.apps.ota.views.restore',
},
{
'doc_type': "AccessAudit",
'user': '<EMAIL>',
'event_date': datetime(2021, 2, 1, 3).strftime("%Y-%m-%dT%H:%M:%SZ"),
'access_type': 'login',
'description': 'Login Success',
'failures_since_start': None,
'get_data': [],
'http_accept': 'text/html',
'ip_address': '10.1.3.2',
'path_info': '/a/delmar/login/',
'post_data': [],
'session_key': 'abc123',
'user_agent': 'Mozilla/5.0',
},
{
'doc_type': 'NavigationEventAudit',
'user': '<EMAIL>',
'event_date': datetime(2021, 2, 2).strftime("%Y-%m-%dT%H:%M:%SZ"),
'description': 'User Name',
'extra': {},
'headers': {
'REQUEST_METHOD': 'GET',
},
'ip_address': '10.1.2.3',
'request_path': '/a/sandwich/phone/restore/?version=2.0&since=...&db=/etc/passwd\x00',
'session_key': 'abc123',
'status_code': 200,
'view_kwargs': {'domain': 'sandwich'},
'view': 'corehq.apps.ota.views.restore',
}
]
|
memcnn/config/tests/test_config.py | classner/memcnn | 224 | 12730538 | import unittest
import json
import os
from memcnn.experiment.factory import load_experiment_config, experiment_config_parser
from memcnn.config import Config
import memcnn.config
class ConfigTestCase(unittest.TestCase):
class ConfigTest(Config):
@staticmethod
def get_filename():
return os.path.join(Config.get_dir(), "config.json.example")
def setUp(self):
self.config = ConfigTestCase.ConfigTest()
self.config_fname = os.path.join(os.path.dirname(__file__), "..", "config.json.example")
self.experiments_fname = os.path.join(os.path.dirname(__file__), "..", "experiments.json")
def load_json_file(fname):
with open(fname, 'r') as f:
data = json.load(f)
return data
self.load_json_file = load_json_file
def test_loading_main_config(self):
self.assertTrue(os.path.exists(self.config.get_filename()))
data = self.config
self.assertTrue(isinstance(data, dict))
self.assertTrue("data_dir" in data)
self.assertTrue("results_dir" in data)
def test_loading_experiments_config(self):
self.assertTrue(os.path.exists(self.experiments_fname))
data = self.load_json_file(self.experiments_fname)
self.assertTrue(isinstance(data, dict))
def test_experiment_configs(self):
data = self.load_json_file(self.experiments_fname)
config = self.config
keys = data.keys()
for key in keys:
result = load_experiment_config(self.experiments_fname, [key])
self.assertTrue(isinstance(result, dict))
if "dataset" in result:
experiment_config_parser(result, config['data_dir'])
def test_config_get_filename(self):
self.assertEqual(Config.get_filename(), os.path.join(os.path.dirname(memcnn.config.__file__), "config.json"))
def test_config_get_dir(self):
self.assertEqual(Config.get_dir(), os.path.dirname(memcnn.config.__file__))
def test_verbose(self):
ConfigTestCase.ConfigTest(verbose=True)
if __name__ == '__main__':
unittest.main()
|
dialogue-engine/test/programytest/security/authorise/test_usergroups.py | cotobadesign/cotoba-agent-oss | 104 | 12730557 | <filename>dialogue-engine/test/programytest/security/authorise/test_usergroups.py<gh_stars>100-1000
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.security.authorise.usergroups import User
from programy.security.authorise.usergroups import Group
from programy.security.authorise.usergroups import Authorisable
class UserGroupTests(unittest.TestCase):
def test_users(self):
user = User("keith")
self.assertEqual("keith", user.userid)
user.roles.append("admin1")
self.assertTrue(user.has_role("admin1"))
self.assertFalse(user.has_role("adminx"))
group = Group("sysadmin")
self.assertFalse(group.has_user("keith"))
self.assertEqual([], user.groups)
user.add_to_group(group)
self.assertTrue(group.has_user("keith"))
self.assertEqual([group], user.groups)
user.add_to_group(group)
self.assertTrue(group.has_user("keith"))
self.assertEqual([group], user.groups)
def test_groups(self):
group = Group("sysadmin")
self.assertEqual("sysadmin", group.groupid)
self.assertFalse(group.has_role("admin2"))
group.roles.append("admin2")
self.assertTrue(group.has_role("admin2"))
self.assertEqual([], group.users)
self.assertFalse(group.has_user("keith"))
self.assertFalse(group.has_user("fred"))
user = User("keith")
group.add_user(user)
self.assertEqual([user], group.users)
self.assertTrue(group.has_user("keith"))
self.assertFalse(group.has_user("fred"))
group.add_user(user)
self.assertEqual([user], group.users)
def test_users_and_groups(self):
user1 = User("keith")
user1.roles.append("admin1")
self.assertTrue(user1.has_role("admin1"))
self.assertFalse(user1.has_role("adminx"))
group1 = Group("sysadmin")
group1.roles.append("admin2")
self.assertTrue(group1.has_role("admin2"))
group2 = Group("operations")
group2.roles.append("audit")
group1.groups.append(group2)
user2 = User("fred")
user2.groups.append(group1)
user2.roles.append("admin3")
self.assertTrue(user2.has_group("sysadmin"))
self.assertTrue(user2.has_role("admin2"))
self.assertTrue(user2.has_role("admin3"))
self.assertFalse(user2.has_role("adminx"))
def test_authorisable(self):
authorisable = Authorisable("testid")
self.assertEqual("testid", authorisable._id)
self.assertEqual([], authorisable.roles)
self.assertEqual([], authorisable.groups)
self.assertEqual([], authorisable.available_roles())
self.assertFalse(authorisable.has_role("user"))
self.assertFalse(authorisable.has_role("admin"))
self.assertFalse(authorisable.has_group("sysadmin"))
self.assertEqual([], authorisable.roles)
authorisable.add_role("user")
self.assertEqual(['user'], authorisable.roles)
authorisable.add_role("user")
self.assertEqual(['user'], authorisable.roles)
self.assertTrue(authorisable.has_role("user"))
group = Group("sysadmin")
group.roles.append("admin")
self.assertEqual([], authorisable.groups)
authorisable.add_group(group)
self.assertEqual([group], authorisable.groups)
authorisable.add_group(group)
self.assertEqual([group], authorisable.groups)
self.assertTrue(authorisable.has_group("sysadmin"))
self.assertTrue(authorisable.has_role("admin"))
self.assertEqual(['user', 'admin'], authorisable.available_roles())
group2 = Group("root")
self.assertFalse(authorisable.has_group("root"))
group.add_group(group2)
self.assertTrue(authorisable.has_group("root"))
|
observations/r/morley.py | hajime9652/observations | 199 | 12730598 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def morley(path):
"""Michelson Speed of Light Data
A classical data of Michelson (but not this one with Morley) on
measurements done in 1879 on the speed of light. The data consists of
five experiments, each consisting of 20 consecutive ‘runs’. The response
is the speed of light measurement, suitably coded (km/sec, with
`299000` subtracted).
A data frame with 100 observations on the following 3 variables.
`Expt`
The experiment number, from 1 to 5.
`Run`
The run number within each experiment.
`Speed`
Speed-of-light measurement.
Details
~~~~~~~
The data is here viewed as a randomized block experiment with
‘experiment’ and ‘run’ as the factors. ‘run’ may also be considered a
quantitative variate to account for linear (or polynomial) changes in
the measurement over the course of a single experiment.
<NAME> (1986) *A Genstat Primer*. London: <NAME>.
<NAME> (1977) Do robust estimators work with real data? *Annals
of Statistics* **5**, 1055–1098. (See Table 6.)
<NAME> (1882) Experimental determination of the velocity of
light made at the United States Naval Academy, Annapolis. *Astronomic
Papers* **1** 135–8. U.S. Nautical Almanac Office. (See Table 24.)
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `morley.csv`.
Returns:
Tuple of np.ndarray `x_train` with 100 rows and 3 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'morley.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/datasets/morley.csv'
maybe_download_and_extract(path, url,
save_file_name='morley.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
mayan/apps/documents/tests/test_document_views.py | atitaya1412/Mayan-EDMS | 343 | 12730623 | <gh_stars>100-1000
from django.test import override_settings
from ..events import event_document_type_changed, event_document_viewed
from ..permissions import (
permission_document_properties_edit, permission_document_view
)
from .base import GenericDocumentViewTestCase
from .mixins.document_mixins import DocumentViewTestMixin
class DocumentViewTestCase(
DocumentViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self._create_test_document_stub()
def test_document_properties_view_no_permission(self):
self._clear_events()
response = self._request_test_document_properties_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_properties_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self._clear_events()
response = self._request_test_document_properties_view()
self.assertContains(
response=response, status_code=200, text=self.test_document.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_properties_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_properties_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_properties_edit_get_view_no_permission(self):
self._clear_events()
response = self._request_test_document_properties_edit_get_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_properties_edit_get_view_with_access(self):
self.grant_access(
permission=permission_document_properties_edit,
obj=self.test_document_type
)
self._clear_events()
response = self._request_test_document_properties_edit_get_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_properties_edit_get_view_with_access(self):
self.grant_access(
permission=permission_document_properties_edit,
obj=self.test_document_type
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_properties_edit_get_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
@override_settings(DOCUMENTS_LANGUAGE='fra')
def test_document_properties_view_setting_non_us_language_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self._clear_events()
response = self._request_test_document_properties_view()
self.assertContains(
response=response, status_code=200, text=self.test_document.label
)
self.assertContains(
response=response, status_code=200,
text='Language:</label>\n \n \n English'
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
@override_settings(DOCUMENTS_LANGUAGE='fra')
def test_document_properties_edit_get_view_setting_non_us_language_with_access(self):
self.grant_access(
permission=permission_document_properties_edit,
obj=self.test_document_type
)
self._clear_events()
response = self._request_test_document_properties_edit_get_view()
self.assertContains(
response=response, status_code=200,
text='<option value="eng" selected>English</option>',
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_list_view_no_permission(self):
self._clear_events()
response = self._request_test_document_list_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object_list'].count(), 0)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_list_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self._clear_events()
response = self._request_test_document_list_view()
self.assertContains(
response=response, status_code=200, text=self.test_document.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_list_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_list_view()
self.assertNotContains(
response=response, status_code=200, text=self.test_document.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_type_change_post_view_no_permission(self):
self._create_test_document_type()
document_type = self.test_document.document_type
self._clear_events()
response = self._request_test_document_type_change_post_view()
self.assertEqual(response.status_code, 404)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.document_type, document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_type_change_post_view_with_access(self):
self._create_test_document_type()
document_type = self.test_document.document_type
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
self._clear_events()
response = self._request_test_document_type_change_post_view()
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertNotEqual(
self.test_document.document_type, document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_document_types[1])
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document)
self.assertEqual(events[0].verb, event_document_type_changed.id)
def test_trashed_document_document_type_change_post_view_with_access(self):
self._create_test_document_type()
document_type = self.test_document.document_type
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_type_change_post_view()
self.assertEqual(response.status_code, 404)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.document_type, document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_type_change_view_get_no_permission(self):
self._create_test_document_type()
document_type = self.test_document.document_type
self._clear_events()
response = self._request_test_document_type_change_get_view()
self.assertEqual(response.status_code, 404)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.document_type, document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_type_change_view_get_with_access(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
self._clear_events()
response = self._request_test_document_type_change_get_view()
self.assertEqual(response.status_code, 200)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.document_type, self.test_document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_trashed_document_type_change_view_get_with_access(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
self._create_test_document_type()
document_type = self.test_document.document_type
self.test_document.delete()
self._clear_events()
response = self._request_test_document_type_change_get_view()
self.assertEqual(response.status_code, 404)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.document_type, document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_multiple_document_type_change_view_no_permission(self):
self._create_test_document_type()
document_type = self.test_document.document_type
self._clear_events()
response = self._request_test_document_multiple_type_change()
self.assertEqual(response.status_code, 404)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.document_type, document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_multiple_document_type_change_view_with_permission(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
self._create_test_document_type()
document_type = self.test_document.document_type
self._clear_events()
response = self._request_test_document_multiple_type_change()
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertNotEqual(
self.test_document.document_type, document_type
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_document_types[1])
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document)
self.assertEqual(events[0].verb, event_document_type_changed.id)
def test_document_preview_view_no_permission(self):
self._clear_events()
response = self._request_test_document_preview_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_document_preview_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self._clear_events()
response = self._request_test_document_preview_view()
self.assertContains(
response=response, status_code=200, text=self.test_document.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_document)
self.assertEqual(events[0].verb, event_document_viewed.id)
def test_trashed_document_preview_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
self.test_document.delete()
self._clear_events()
response = self._request_test_document_preview_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
|
test/test_consistency.py | olliethomas/torchtyping | 881 | 12730626 | import pytest
from torch import rand
from torchtyping import TensorType
from typeguard import typechecked
x = y = None
def test_single():
@typechecked
def func1(x: TensorType["x"], y: TensorType["x"]):
pass
@typechecked
def func2(x: TensorType["x"], y: TensorType["x"]) -> TensorType["x"]:
return x + y
@typechecked
def func3(x: TensorType["x"], y: TensorType["x"]) -> TensorType["x", "x"]:
return x + y
@typechecked
def func4(x: TensorType["x"], y: TensorType["x"]) -> TensorType["x", "x"]:
return x.unsqueeze(0) + y.unsqueeze(1)
@typechecked
def func5(x: TensorType["x"], y: TensorType["x"]) -> TensorType["x", "y"]:
return x
@typechecked
def func6(x: TensorType["x"], y: TensorType["x"]) -> TensorType["y", "x"]:
return x
@typechecked
def func7(x: TensorType["x"]) -> TensorType["x"]:
assert x.shape != (1,)
return rand((1,))
func1(rand(2), rand(2))
func2(rand(2), rand(2))
with pytest.raises(TypeError):
func3(rand(2), rand(2))
func4(rand(2), rand(2))
with pytest.raises(TypeError):
func5(rand(2), rand(2))
with pytest.raises(TypeError):
func6(rand(2), rand(2))
with pytest.raises(TypeError):
func7(rand(3))
def test_multiple():
# Fun fact, this "wrong" func0 is actually a mistype of func1, that torchtyping
# caught for me when I ran the tests!
@typechecked
def func0(x: TensorType["x"], y: TensorType["y"]) -> TensorType["x", "y"]:
return x.unsqueeze(0) + y.unsqueeze(1)
@typechecked
def func1(x: TensorType["x"], y: TensorType["y"]) -> TensorType["x", "y"]:
return x.unsqueeze(1) + y.unsqueeze(0)
@typechecked
def func2(x: TensorType["x", "x"]):
pass
@typechecked
def func3(x: TensorType["x", "x", "x"]):
pass
@typechecked
def func4(x: TensorType["x"], y: TensorType["x", "y"]):
pass
@typechecked
def func5(x: TensorType["x", "y"], y: TensorType["y", "x"]):
pass
@typechecked
def func6(x: TensorType["x"], y: TensorType["y"]) -> TensorType["x", "y"]:
assert not (x.shape == (2,) and y.shape == (3,))
return rand(2, 3)
func0(rand(2), rand(2)) # can't catch this
with pytest.raises(TypeError):
func0(rand(2), rand(3))
with pytest.raises(TypeError):
func0(rand(10), rand(0))
func1(rand(2), rand(2))
func1(rand(2), rand(3))
func1(rand(10), rand(0))
func2(rand(0, 0))
func2(rand(2, 2))
func2(rand(9, 9))
with pytest.raises(TypeError):
func2(rand(0, 4))
func2(rand(1, 4))
func2(rand(3, 4))
func3(rand(0, 0, 0))
func3(rand(2, 2, 2))
func3(rand(9, 9, 9))
with pytest.raises(TypeError):
func3(rand(0, 4, 4))
func3(rand(1, 4, 4))
func3(rand(3, 3, 4))
func4(rand(3), rand(3, 4))
with pytest.raises(TypeError):
func4(rand(3), rand(4, 3))
func5(rand(2, 3), rand(3, 2))
func5(rand(0, 5), rand(5, 0))
func5(rand(2, 2), rand(2, 2))
with pytest.raises(TypeError):
func5(rand(2, 3), rand(2, 3))
func5(rand(2, 3), rand(2, 2))
with pytest.raises(TypeError):
func6(rand(5), rand(3))
|
03-machine-learning-tabular-crossection/06 - Clustering/01/solutions/solutions/solution_05.py | abefukasawa/datascience_course | 331 | 12730629 | <filename>03-machine-learning-tabular-crossection/06 - Clustering/01/solutions/solutions/solution_05.py
X, _ = make_blobs(n_samples = 800, centers = clusters, n_features=3) |
checkmate/checkmate_schedule.py | stjordanis/MONeT-1 | 161 | 12730649 | import torch
from collections import namedtuple
import monet.lm_ops as lm_ops
import numpy as np
from monet.graph import *
from checkmate.checkmate_solver import *
from monet.solver_info import *
from monet.pipelined_solver_info import *
from models.unet import UNet
ScheduleType = namedtuple('ScheduleType', 'recompute store_output delete_nodes store_intermediate')
KEEP_FWDOP = False
class Schedule(Graph):
def __init__(self, graph: Graph, info: SolverInfo):
self.si = info
self._nodes = graph.nodes
self.lennodes = len(self.nodes)
self._outputs = graph._outputs
self._op = [] # List of operations
self.bs = -1
# Stored tensors
self._stored = [None for i in range(self.lennodes)]
self._stored_intermediate = [None for i in range(self.lennodes)]
self._bwd_stored = [None for i in range(self.lennodes)]
# Parameters list
self._args = []
self.args_updated = []
# Preprocessing
self.computeInstance = []
for k, n in enumerate(self.nodes):
if isinstance(n,ComputeNode) and not n.op=="aten::t":
self.computeInstance.append(k)
def init_schedule(self, solution: CheckmateSolution, mode):
T = len(self.si.nodes)
# Create main structures
self._op = [None for i in range(self.lennodes)] # List of operations
self._fwd_schedule = [[] for i in range(T)] # Forward schedule
self._bwd_schedule = [[] for i in range(T)] # Backward schedule
self.fwdargs = [None for i in range(self.lennodes)] # Index to forward node input tensor
self.bwdargs = [None for i in range(self.lennodes)] # Index to backward node input tensors
# Initialize forward pass structures
for t in range(T):
for i, n in enumerate(self.nodes):
if isinstance(n, ComputeNode) and n.op != "aten::t":
j = self.si.graph_to_solver[i]
ops_list = lm_ops.list_ops(self.si.mode, n.op)
if isinstance(self.si, PipelinedSolverInfo) and self.si.nodes[j].has_intermediates:
op = ops_list[-1]() # Select intermediate-computing and intermediate-activated operator implementation
else:
op = ops_list[0]() # Select the default operator implementations
if n.is_depthwise:
op.is_depthwise = True
s = solution.s[t+1][j] if t<T-1 else False
r = solution.r[t][j]
f = solution.f[t][j]
schedule_intermediate = False
storage = op.backward_storage
if not isinstance(storage, list):
storage = [storage]
for store in storage:
if isinstance(store, lm_ops.IntermediateStorage):
schedule_intermediate = True
if r or len(f) or s:
self._fwd_schedule[t].append((i,ScheduleType(r, s, f, schedule_intermediate), n.op))
self._op[i] = op
self.fwdargs[i] = [(a.value,None) if isinstance(a, ComputeNode.V) else (a.index,a.requires_grad) for a in n.args]
elif isinstance(n, ComputeNode) and n.op == "aten::t":
pass
else:
# Node represents a parameter
self._fwd_schedule[t].append((i,None,None))
self._op[i] = None
# Initialize backward pass structures
for k, m in reversed(list(enumerate(self.nodes))):
# Create backward dependencies
if isinstance(m, ComputeNode) and m.op != "aten::t":
j = self.si.fwd_to_bwd[self.si.graph_to_solver[k]]
n = self.si.nodes[j]
assert isinstance(n, BwdNode)
self.bwdargs[k] = {'param':[], 'ip':[]}
storage_list = self._op[k].backward_storage
if not isinstance(storage_list, list):
storage_list = [storage_list]
for storage in storage_list:
if isinstance(storage, lm_ops.InputStorage):
for posi, i in enumerate(storage.ids):
idx = m.args[i].index
if (((m.op == "aten::_convolution" and not m.is_depthwise) or m.op == "aten::addmm") and n.bwd_op == "ip_grad"):
self.bwdargs[k]['param'].append((idx, True, False))
if posi == 0:
self.bwdargs[k]['ip'].append((idx,False,False)) # Input tensor for conv/addmm ip grad need not be stored
else:
self.bwdargs[k]['ip'].append((idx,True,False))
else:
self.bwdargs[k]['ip'].append((idx,True,False))
elif isinstance(storage, lm_ops.OutputStorage):
self.bwdargs[k]['ip'].append((k,True,False))
elif isinstance(storage, lm_ops.IntermediateStorage):
self.bwdargs[k]['ip'].append((k,True,True))
# Create backward schedule
for t in range(T):
if isinstance(m, ComputeNode) and m.op != "aten::t":
j = self.si.fwd_to_bwd[self.si.graph_to_solver[k]]
n = self.si.nodes[j]
assert isinstance(n, BwdNode)
s = solution.s[t+1][j] if t<T-1 else False
r = solution.r[t][j]
f = solution.f[t][j]
if (((m.op == "aten::_convolution" and not m.is_depthwise) or m.op == "aten::addmm") and n.bwd_op == "ip_grad"):
s1 = solution.s[t+1][j-1] if t<T-1 else False
if solution.r[t][j-1] or len(solution.f[t][j-1]) or s1:
self._bwd_schedule[t].append((k,ScheduleType(solution.r[t][j-1], s1, solution.f[t][j-1], False),"param"))
if r or len(f) or s:
self._bwd_schedule[t].append((k,ScheduleType(r, s, f, False),"ip"))
elif isinstance(m, ComputeNode) and m.op == "aten::t":
pass
else:
self._bwd_schedule[t].append((k,None,"grad"))
self.opshapes = defaultdict()
for k in self._outputs:
self.opshapes[k] = [self.bs if dim==-1 else dim for dim in self._nodes[k].shape]
def _forward(self, t):
tensors = self._stored
bw_tensors = self._bwd_stored
self._stored = [None] * self.lennodes
self._bwd_stored = [None] * self.lennodes
if len(self._fwd_schedule[t]):
for (k,schedule,op_name) in self._fwd_schedule[t]:
if schedule == None:
tensors[k] = self._args[k]
else:
recompute, s, f, si = schedule
if recompute:
args = [a if b==None else tensors[a].requires_grad_(b) for (a,b) in self.fwdargs[k]]
# Checkmate does not reuse params for BN
if op_name == "aten::batch_norm":
self._op[k].params = None
if si:
tensors[k], self._stored_intermediates[k] = self._op[k].forward(*args)
else:
tensors[k] = self._op[k].forward(*args)
assert tensors[k] is not None
del args
for u in f:
assert u < self.si.loss
graphu = self.si.solver_to_graph[u]
tensors[graphu] = None
if s:
self._stored[k] = tensors[k]
if len(self._bwd_schedule[t]):
for (k,schedule,optype) in self._bwd_schedule[t]:
if schedule == None:
if bw_tensors[k] is not None and k not in self.args_updated and self._args[k].requires_grad:
assert len(bw_tensors[k]) == 1
for u in bw_tensors[k]:
self._args[k].backward(bw_tensors[k][u])
bw_tensors[k] = None
# self._bwd_stored[k] = None
self.args_updated.append(k)
else:
recompute, s, f, si = schedule
if recompute:
for (idx, checkNone, intmd) in self.bwdargs[k][optype]:
if checkNone:
if intmd:
assert self._stored_intermediate[idx] is not None
else:
assert tensors[idx] is not None
stored = [tensors[idx] if not intmd else self._stored_intermediates[idx] for (idx,_,intmd) in self.bwdargs[k][optype]]
grad_nd = self._op[k]
m = self.nodes[k]
if ((m.op == "aten::_convolution" and not m.is_depthwise) or m.op == "aten::addmm"):
if optype == "param":
grad_nd.algorithm = 0
elif optype == "ip":
grad_nd.algorithm = 10
# Call backward
bwd_in = None
if k in self._outputs:
s = [val if val>0 else self.bs for val in list(self.opshapes[k])]
bw_tensors[k] = {-1: torch.ones(s, device=self._args[0].device)}
with torch.no_grad():
assert bw_tensors[k] is not None, "k, t: %d %d " % (k, t)
for u in bw_tensors[k]:
assert bw_tensors[k][u] is not None, "k, u, t: %d %d %s %d " % (k, u, self.si.nodes[self.si.graph_to_solver[k]], t)
if bwd_in == None:
bwd_in = bw_tensors[k][u]
else:
bwd_in += bw_tensors[k][u]
assert bwd_in is not None
bw_outs = grad_nd.backward(bwd_in, stored)
del bwd_in
if not isinstance(bw_outs, (list, tuple)):
bw_outs = (bw_outs,)
assert len(bw_outs) == len(self.nodes[k].dependencies), \
"Require the same number of grad outputs as forward inputs" \
" %s (%d) , %s (%d)" % (
repr(bw_outs), len(bw_outs),
repr(self.nodes[k].dependencies), len(self.nodes[k].dependencies))
# Accumulate the backward gradient
for (i, r), o in zip(self.nodes[k].dependencies, bw_outs):
if r:
if o is not None:
if bw_tensors[i] is None:
bw_tensors[i] = {k: o}
else:
bw_tensors[i][k] = o
del grad_nd, bw_outs, o
for u in f:
if u < self.si.loss:
graphu = self.si.solver_to_graph[u]
tensors[graphu] = None
elif u == self.si.loss:
pass
# Do not delete loss nodes
else:
graphu = self.si.solver_to_graph[self.si.bwd_to_fwd[u]]
unode = self.si.nodes[self.si.bwd_to_fwd[u]].gnode
for (i,r) in unode.dependencies:
if isinstance(self.nodes[i], ComputeNode):
bw_tensors[i][graphu] = None
if s:
for (i,r) in self.nodes[k].dependencies:
if r:
if isinstance(self._nodes[i], ComputeNode):
if optype != "param":
assert bw_tensors[i] is not None
assert bw_tensors[i][k] is not None, "%d (%s) should have bwd input from %d (%s)" % (self.si.graph_to_solver[i], self.si.nodes[self.si.graph_to_solver[i]], self.si.graph_to_solver[k], self.si.nodes[self.si.graph_to_solver[k]])
if self._bwd_stored[i] is None:
self._bwd_stored[i] = {k: bw_tensors[i][k]}
else:
self._bwd_stored[i][k] = bw_tensors[i][k]
del tensors, bw_tensors
def forward(self, *args):
self._args = args
T = self.si.size
fwd_output = None
for t in range(T):
self._forward(t)
for k, n in enumerate(self._nodes):
if k in self.computeInstance and n.op == "aten::batch_norm":
self._op[k].params = None
self.args_updated = []
return fwd_output
def disable_dropout(model):
for m in model.modules():
if isinstance(m, torch.nn.Dropout):
m.p = 0.0
def load_solution(filename):
import pickle
print(f'Loading solver_info, solution from {filename}')
with open(filename, 'rb') as f:
si, solution = pickle.load(f)
return si, solution
if __name__ == '__main__':
import argparse
import torchvision
from time import time
from pathlib import Path
from monet.cvxpy_solver import Solution
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('bs')
parser.add_argument('budget')
parser.add_argument('mode')
parser.add_argument('solver')
parser.add_argument(
"--solution_file", type=str, default="",
help="If specified, load stored solution file.")
parser.add_argument(
"--check_diff", action="store_true",
help="Compute the output (gradient) difference between ours and normal model.")
parser.add_argument(
"--check_runtime", action="store_true",
help="Compute the runtime difference between ours and normal model.")
parser.add_argument(
"--run_bs", action="store_true",
help="Run the given batch size.")
parser.add_argument(
"--pipeline", action="store_true",
help="Pipeline the operator optimization followed by checkpointing")
parser.add_argument(
"--ablation", action="store_true",
help="Do ablation?.")
args = parser.parse_args()
budget = float(args.budget)
import config
config.budget = budget
bs = int(args.bs)
model_name = args.model.split(".")[-1][:-2]
mode = args.mode
print("Memory budget ", budget, " GB")
print("Batch size ", bs)
print("Model", model_name)
print("Mode", mode)
if args.model == 'unet':
height, width = 416, 608
model = UNet(n_channels=3, n_classes=1, height=height, width=width)
else:
height, width = 224, 224
model = eval(args.model, {'torch': torch, 'torchvision': torchvision})
if 'mobilenet_v2' in args.model:
model = torch.nn.Sequential(
model.features,
torch.nn.AdaptiveAvgPool2d((1, 1)), torch.nn.Flatten(start_dim=1),
model.classifier[0], model.classifier[1])
if args.check_diff:
disable_dropout(model)
graph = Graph.create(model, input_shape=(3, height, width))
model.cuda()
solvert = -1
if args.check_diff:
input_ = torch.randn((bs, 3, height, width)).cuda()
if len(args.solution_file) > 0:
solver_info, solution = load_solution(args.solution_file)
else:
solver_info = SolverInfo(bs=bs, model_name=model_name, mode=mode)
solver_info.extract(graph, input_, *list(model.state_dict(keep_vars=True).values()))
solution = solve_ilp_gurobi(solver_info, budget, approx=False, time_limit=86400)
schedule = Schedule(graph, solver_info)
schedule.init_schedule(solution, mode)
x0 = model(input_)
if 'googlenet' in args.model:
(-(x0[0]+x0[1]+x0[2])).sum().backward()
else:
(-x0).sum().backward()
KEEP_FWDOP = True
x1 = schedule.forward(input_, *list(model.state_dict(keep_vars=True).values()))
print('Forward mean absolute difference',
abs(x0[0] - x1).mean() if 'googlenet' in args.model else abs(x0 - x1).mean())
print('original output', x0)
print('ours output', x1)
print('Gradient of normal model',
['{:.5f} {}'.format(float(v.grad.mean()), v.shape)
for v in model.parameters() if v.grad is not None])
if args.check_runtime:
FORWARD_EMPTY_CACHE = False
if len(args.solution_file) > 0:
solver_info, solution = load_solution(args.solution_file)
else:
input_ = torch.randn((bs, 3, height, width)).cuda()
if args.pipeline:
solver_info = PipelinedSolverInfo(bs=bs, model_name=model_name, mode=mode)
else:
solver_info = SolverInfo(bs=bs, model_name=model_name, mode=mode)
solver_info.extract(graph, input_, *list(model.state_dict(keep_vars=True).values()))
solution = solve_ilp_gurobi(solver_info, budget, approx=False, time_limit=86400)
# t0 = time()
# solution = solver_model.solve()
# solvert = time() - t0
del input_
input_ = torch.randn((bs, 3, height, width)).cuda()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
schedule = Schedule(graph, solver_info)
schedule.bs = bs
schedule.init_schedule(solution, mode)
torch.cuda.synchronize()
start_event_monet = torch.cuda.Event(enable_timing=True)
end_event_monet = torch.cuda.Event(enable_timing=True)
for iterid in range(120):
if iterid == 100:
start_event_monet.record()
x1 = schedule.forward(input_, *list(model.state_dict(keep_vars=True).values()))
for v in model.parameters():
v.grad = None
end_event_monet.record()
torch.cuda.synchronize()
del x1
autosave_maxmem = torch.cuda.max_memory_allocated() / 2**20
print("checkmate: %f ms avg, %8.2f MB" % (start_event_monet.elapsed_time(end_event_monet)/20, autosave_maxmem))
exit()
solvert = -1
if args.run_bs:
bs = int(args.bs)
print("Solver trying batch size %d" % bs)
if len(args.solution_file) > 0:
solver_info, solution = load_solution(args.solution_file)
else:
input_ = torch.randn((bs, 3, height, width)).cuda()
if args.pipeline:
solver_info = PipelinedSolverInfo(bs=bs, model_name=model_name, mode=mode)
else:
solver_info = SolverInfo(bs=bs, model_name=model_name, mode=mode)
solver_info.extract(graph, input_, *list(model.state_dict(keep_vars=True).values()))
solution = solve_ilp_gurobi(solver_info, budget, approx=False, time_limit=86400)
del input_
print("Batch size %d feasible" % bs)
print("Solved in %fs with actual opt taking %fs" % (solvert, solution.solve_time))
print("Running schedule for batch size %d" % bs)
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
input_ = torch.randn((bs, 3, height, width)).cuda()
schedule = Schedule(graph, solver_info)
schedule.bs = bs
schedule.init_schedule(solution, mode)
t0 = time()
x1 = schedule.forward(input_, *list(model.state_dict(keep_vars=True).values()))
del input_
del x1
torch.cuda.synchronize()
t1 = time() - t0
print("Ran schedule for batch %d " % bs)
torch.cuda.empty_cache()
mem = torch.cuda.max_memory_allocated() / 2**20
print("Ran batch %d with peak memory %8.2fM, %fs" % (bs, mem, t1))
|
tools/odrive/tests/endstop_manualtest.py | deafloo/ODrive | 1,068 | 12730652 | <reponame>deafloo/ODrive<gh_stars>1000+
import odrive
from odrive.enums import *
from odrive.utils import *
print("finding an odrive...")
odrv0 = odrive.find_any()
print('Odrive found')
odrv0.axis1.controller.config.vel_limit = 50000
odrv0.axis1.controller.config.control_mode = CONTROL_MODE_POSITION_CONTROL
odrv0.axis1.controller.config.input_mode = INPUT_MODE_PASSTHROUGH
odrv0.axis1.encoder.config.cpr = 2400
odrv0.axis1.encoder.config.bandwidth = 1000
odrv0.axis1.motor.config.calibration_current = 5
odrv0.axis1.motor.config.current_lim = 5
odrv0.axis1.controller.config.homing_speed = 5000
odrv0.config.brake_resistance = 0
odrv0.axis0.min_endstop.config.gpio_num = 6
odrv0.axis0.min_endstop.config.enabled = True
odrv0.axis0.min_endstop.config.offset = -1000
odrv0.axis0.max_endstop.config.gpio_num = 5
odrv0.axis0.max_endstop.config.enabled = True
odrv0.axis1.min_endstop.config.gpio_num = 8
odrv0.axis1.min_endstop.config.enabled = True
odrv0.axis1.min_endstop.config.offset = -1000
odrv0.axis1.max_endstop.config.gpio_num = 7
odrv0.axis1.max_endstop.config.enabled = True
odrv0.axis1.config.startup_encoder_offset_calibration = True
odrv0.axis1.config.startup_motor_calibration = True
odrv0.axis1.config.startup_homing = True
odrv0.axis1.config.startup_closed_loop_control = True
|
envi/archs/z80/__init__.py | rnui2k/vivisect | 716 | 12730653 | '''
Z80 architecture base...
'''
|
mods/NERO_Battle/menu.py | SummitChen/opennero | 215 | 12730656 | <filename>mods/NERO_Battle/menu.py<gh_stars>100-1000
import os
import sys
sys.path.append(os.path.join(sys.path[0], os.pardir))
import NERO.menu
import NERO.constants
try:
import wx
except:
import tkMessageBox
tkMessageBox.showwarning('Warning!', 'Could not start the external menu for NERO because wxPython is not installed.')
sys.exit()
class NeroPanel(NERO.menu.NeroPanel):
def __init__(self, parent):
NERO.menu.NeroPanel.__init__(self, parent)
self._sliders['HP'].Enable()
def add_buttons(self):
self.add_button('Load Blue Team', self.OnLoad1)
self.add_button('Load Red Team', self.OnLoad2)
self.add_button('Continue', self.OnPause, disabled=True)
self.add_button('Help', self.OnHelp)
def add_sliders(self):
self.add_slider('Hitpoints', 'HP', span=100, center=0, thumb=NERO.constants.DEFAULT_HITPOINTS)
def OnLoad2(self, event):
dirname = ""
dlg = wx.FileDialog(self, "Red Team: Load Population File", dirname, "", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetPath()
self.send("load2 %s" % dirname)
self.loaded2 = True
if self.loaded1 and self.loaded2:
self._buttons['OnPause'].Enable()
if __name__ == '__main__':
print 'creating NERO Battle controls'
app = wx.App(False)
frame = wx.Frame(None, title="NERO Battle Controls", size=(600, 250))
panel = NeroPanel(frame)
frame.Show()
app.MainLoop()
|
code/automate-download/auto-download-cu-covid19.py | eycramer/covid19-forecast-hub | 428 | 12730664 | # Before executing the script, we need urllib3. Run `pip install urllib3`
# A simple script for automatically download the ihme-covid19.zip file and extract it
import requests
import os
import sys
import calendar
import datetime
def download_file_by_date(path, date):
# metadata
prefix = "https://raw.githubusercontent.com/shaman-lab/COVID-19Projection/master/Projection_"
suffix = "/cdc_hosp/state_cdchosp_"
raw_list = ["60contact.csv", "70contact.csv", "80contact.csv", "nointerv.csv", "80contact_1x.csv", "80contactw.csv"]
# Check all urls if there's new data on specific date
working_urls = []
for raw_file in raw_list:
url = prefix+date+suffix+raw_file
response = requests.get(url)
working_urls.append(response.status_code)
# Download data
savepath = os.path.join(path, "Projection_"+date+"/cdc_hosp/")
if 200 in working_urls:
if not os.path.exists(savepath):
os.makedirs(savepath)
for raw_file in raw_list:
url = prefix+date+suffix+raw_file
response = requests.get(url)
if response.status_code == 200:
with open(os.path.join(savepath,"state_cdchosp_"+raw_file), "wb") as writer:
for chunk in response:
writer.write(chunk)
writer.close()
return True
else:
return False
def download_recent_CU_data(path):
# Because the script is run at 1pm everyday, it may miss forecasts that
# are uploaded after 1pm if the script only look at the current day. We
# set it up to also look at the day before
today = datetime.datetime.today() - datetime.timedelta(days=2)
today_date_v1 = calendar.month_name[today.month] + today.strftime('%d')
today_date_v2 = calendar.month_name[today.month] + today.strftime('%d').strip('0')
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
yesterday_date_v1 = calendar.month_name[yesterday.month] + yesterday.strftime('%d')
yesterday_date_v2 = calendar.month_name[yesterday.month] + yesterday.strftime('%d').strip('0')
# Check for different combination of new data from yesterday (Example: May3 vs May03)
download_with_yesterday_v1_is_successful = download_file_by_date(path, yesterday_date_v1)
download_with_yesterday_v2_is_successful = download_file_by_date(path, yesterday_date_v2)
if (download_with_yesterday_v1_is_successful or download_with_yesterday_v2_is_successful):
print('There is new data from CU on '+yesterday_date_v1)
else:
print('There is no new data from CU on '+yesterday_date_v1)
# Check for different combination of new data from today (Example: May4 vs May04)
download_with_today_v1_is_successful = download_file_by_date(path, today_date_v1)
download_with_today_v2_is_successful = download_file_by_date(path, today_date_v2)
if (download_with_today_v1_is_successful or download_with_today_v2_is_successful):
print('There is new data from CU on '+today_date_v1)
else:
print('There is no new data from CU on '+today_date_v1)
if __name__ == '__main__':
path = sys.argv[1]
download_recent_CU_data(path)
|
examples/stacked_area_examples.py | ahlusar1989/vincent | 1,052 | 12730706 | <reponame>ahlusar1989/vincent
# -*- coding: utf-8 -*-
"""
Vincent Stacked Area Examples
"""
#Build a Stacked Area Chart from scratch
from vincent import *
import pandas as pd
import pandas.io.data as web
all_data = {}
for ticker in ['AAPL', 'GOOG', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.get_data_yahoo(ticker, '1/1/2010', '1/1/2013')
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
vis = Visualization(width=500, height=300)
vis.padding = {'top': 10, 'left': 50, 'bottom': 50, 'right': 100}
data = Data.from_pandas(price)
vis.data['table'] = data
facets = Transform(type='facet', keys=['data.idx'])
stats = Transform(type='stats', value='data.val')
stat_dat = Data(name='stats', source='table', transform=[facets, stats])
vis.data['stats'] = stat_dat
vis.scales['x'] = Scale(name='x', type='time', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', type='linear', nice=True,
domain=DataRef(data='stats', field="sum"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
facet = Transform(type='facet', keys=['data.col'])
stack = Transform(type='stack', point='data.idx', height='data.val')
transform = MarkRef(data='table',transform=[facet, stack])
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="y"),
interpolate=ValueRef(value='monotone'),
y2=ValueRef(field='y2', scale='y'),
fill=ValueRef(scale='color', field='data.col'))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='area',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
vis.axis_titles(x='Date', y='Price')
vis.legend(title='Tech Stocks')
vis.to_json('vega.json')
#Convenience method
vis = StackedArea(price)
vis.axis_titles(x='Date', y='Price')
vis.legend(title='Tech Stocks')
vis.colors(brew='Paired')
vis.to_json('vega.json')
|
parameter_search.py | LaudateCorpus1/RIDDLE-1 | 110 | 12730723 | """parameter_search.py
Search for optimal parameters for RIDDLE and various ML classifiers.
Requires: Keras, NumPy, scikit-learn, RIDDLE (and their dependencies)
Author: <NAME>, Rzhetsky Lab
Copyright: 2018, all rights reserved
"""
from __future__ import print_function
import argparse
import os
import pickle
import time
import warnings
import numpy as np
from sklearn.metrics import log_loss
from sklearn.model_selection import RandomizedSearchCV
from riddle import emr
from riddle import tuning
from riddle.models import MLP
from utils import get_param_path
from utils import get_preprocessed_data
from utils import recursive_mkdir
from utils import select_features
from utils import subset_reencode_features
from utils import vectorize_features
SEED = 109971161161043253 % 8085
TUNING_K = 3 # number of partitions to use to evaluate a parameter config
parser = argparse.ArgumentParser(
description='Perform parameter search for various classification methods.')
parser.add_argument(
'--method', type=str, default='riddle',
help='Classification method to use.')
parser.add_argument(
'--data_fn', type=str, default='dummy.txt',
help='Filename of text data file.')
parser.add_argument(
'--prop_missing', type=float, default=0.0,
help='Proportion of feature observations to simulate as missing.')
parser.add_argument(
'--max_num_feature', type=int, default=-1,
help='Maximum number of features to use; with the default of -1, use all'
'available features')
parser.add_argument(
'--feature_selection', type=str, default='random',
help='Method to use for feature selection.')
parser.add_argument(
'--force_run', type=bool, default=False,
help='Whether to force parameter search to run even if it has been already'
'performed.')
parser.add_argument(
'--max_num_sample', type=int, default=10000,
help='Maximum number of samples to use during parameter tuning.')
parser.add_argument(
'--num_search', type=int, default=5,
help='Number of parameter settings (searches) to try.')
parser.add_argument(
'--data_dir', type=str, default='_data',
help='Directory of data files.')
parser.add_argument(
'--cache_dir', type=str, default='_cache',
help='Directory where to cache files and outputs.')
def loss_scorer(estimator, x, y):
"""Negative log loss scoring function for scikit-learn model selection."""
loss = log_loss(y, estimator.predict_proba(x))
assert loss >= 0
# we want to minimize loss; since scikit-learn model selection tries to
# maximize a given score, return the negative of the loss
return -1 * loss
def run(method, x_unvec, y, idx_feat_dict, num_feature, max_num_feature,
num_class, max_num_sample, feature_selection, k_idx, k, num_search,
perm_indices):
"""Run a parameter search for a single k-fold partitions
Arguments:
method: string
name of classification method; values = {'logit', 'random_forest',
'linear_svm', 'poly_svm', 'rbf_svm', 'gbdt', 'riddle'}
x_unvec: [[int]]
feature indices that have not been vectorized; each inner list
collects the indices of features that are present (binary on)
for a sample
y: [int]
list of class labels as integer indices
idx_feat_dict: {int: string}
dictionary mapping feature indices to features
num_feature: int
number of features present in the dataset
max_num_feature: int
maximum number of features to use
num_class: int
number of classes present
feature_selection: string
feature selection method; values = {'random', 'frequency', 'chi2'}
k_idx: int
index of the k-fold partition to use
k: int
number of partitions for k-fold cross-validation
num_search: int
number of searches (parameter configurations) to try
perm_indices: np.ndarray, int
array of indices representing a permutation of the samples with
shape (num_sample, )
Returns:
best_param: {string: ?}
dictionary mapping parameter names to the best values found
"""
print('-' * 72)
print('Partition k = {}'.format(k_idx))
x_train_unvec, y_train, x_val_unvec, y_val, _, _ = (
emr.get_k_fold_partition(x_unvec, y, k_idx=k_idx, k=k,
perm_indices=perm_indices))
if max_num_feature > 0: # select features and re-encode
feat_encoding_dict, _ = select_features(
x_train_unvec, y_train, idx_feat_dict,
method=feature_selection, num_feature=num_feature,
max_num_feature=max_num_feature)
x_val_unvec = subset_reencode_features(x_val_unvec, feat_encoding_dict)
num_feature = max_num_feature
# cap number of validation samples
if max_num_sample != None and len(x_val_unvec) > max_num_sample:
x_val_unvec = x_val_unvec[0:max_num_sample]
y_val = y_val[0:max_num_sample]
start = time.time()
if method == 'riddle':
model_class = MLP
init_args = {'num_feature': num_feature, 'num_class': num_class}
param_dist = {
'num_hidden_layer': 2, # [1, 2]
'num_hidden_node': 512, # [128, 256, 512]
'activation': ['prelu', 'relu'],
'dropout': tuning.Uniform(lo=0.2, hi=0.8),
'learning_rate': tuning.UniformLogSpace(10, lo=-6, hi=-1),
}
best_param = tuning.random_search(
model_class, init_args, param_dist, x_val_unvec, y_val,
num_class=num_class, k=TUNING_K, num_search=num_search)
else: # scikit-learn methods
x_val = vectorize_features(x_val_unvec, num_feature)
if method == 'logit': # logistic regression
from sklearn.linear_model import LogisticRegression
estimator = LogisticRegression(multi_class='multinomial',
solver='lbfgs')
param_dist = {'C': tuning.UniformLogSpace(base=10, lo=-3, hi=3)}
elif method == 'random_forest':
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier()
param_dist = {
'max_features': ['sqrt', 'log2', None],
'max_depth': tuning.UniformIntegerLogSpace(base=2, lo=0, hi=7),
'n_estimators': tuning.UniformIntegerLogSpace(base=2, lo=4, hi=8)
}
elif method == 'linear_svm':
from sklearn.svm import SVC
# remark: due to a bug in scikit-learn / libsvm, the sparse 'linear'
# kernel is much slower than the sparse 'poly' kernel, so we use
# the 'poly' kernel with degree=1 over the 'linear' kernel
estimator = SVC(kernel='poly', degree=1, coef0=0., gamma=1.,
probability=True, cache_size=1000)
param_dist = {
'C': tuning.UniformLogSpace(base=10, lo=-2, hi=1)
}
elif method == 'poly_svm':
from sklearn.svm import SVC
estimator = SVC(kernel='poly', probability=True, cache_size=1000)
param_dist = {
'C': tuning.UniformLogSpace(base=10, lo=-2, hi=1),
'degree': [2, 3, 4],
'gamma': tuning.UniformLogSpace(base=10, lo=-5, hi=1)
}
elif method == 'rbf_svm':
from sklearn.svm import SVC
estimator = SVC(kernel='rbf', probability=True, cache_size=1000)
param_dist = {
'C': tuning.UniformLogSpace(base=10, lo=-2, hi=1),
'gamma': tuning.UniformLogSpace(base=10, lo=-5, hi=1)
}
elif method == 'gbdt':
from xgboost import XGBClassifier
estimator = XGBClassifier(objective='multi:softprob')
param_dist = {
'max_depth': tuning.UniformIntegerLogSpace(base=2, lo=0, hi=5),
'n_estimators': tuning.UniformIntegerLogSpace(base=2, lo=4, hi=8),
'learning_rate': tuning.UniformLogSpace(base=10, lo=-3, hi=0)
}
else:
raise ValueError('unknown method: {}'.format(method))
param_search = RandomizedSearchCV(
estimator, param_dist, refit=False, n_iter=num_search,
scoring=loss_scorer)
param_search.fit(x_val, y_val)
best_param = param_search.best_params_
print('Best parameters for {} for k_idx={}: {} found in {:.3f} s'
.format(method, k_idx, best_param, time.time() - start))
return best_param
def run_kfold(data_fn, method='logit', prop_missing=0., max_num_feature=-1,
feature_selection='random', k=10, max_num_sample=10000,
num_search=30, data_dir='_data', cache_dir='_cache',
force_run=False):
"""Run several parameter searches a la k-fold cross-validation.
Arguments:
data_fn: string
data file filename
method: string
name of classification method; values = {'logit', 'random_forest',
'linear_svm', 'poly_svm', 'rbf_svm', 'gbdt', 'riddle'}
prop_missing: float
proportion of feature observations which should be randomly masked;
values in [0, 1)
max_num_feature: int
maximum number of features to use
feature_selection: string
feature selection method; values = {'random', 'frequency', 'chi2'}
k: int
number of partitions for k-fold cross-validation
max_num_sample: int
maximum number of samples to use
num_search: int
number of searches (parameter configurations) to try for each
partition
data_dir: string
directory where data files are located
cache_dir: string
directory where cached files (e.g., saved parameters) are located
out_dir: string
directory where outputs (e.g., results) should be saved
"""
if 'debug' in data_fn:
num_search = 3
# check if already did param search, if so, skip
param_path = get_param_path(cache_dir, method, data_fn, prop_missing,
max_num_feature, feature_selection)
if not force_run and os.path.isfile(param_path):
warnings.warn('Already did search for {}, skipping the search'
.format(method))
return
x_unvec, y, idx_feat_dict, idx_class_dict, _, perm_indices = (
get_preprocessed_data(data_dir, data_fn, prop_missing=prop_missing))
num_feature = len(idx_feat_dict)
num_class = len(idx_class_dict)
params = {}
for k_idx in range(0, k):
params[k_idx] = run(
method, x_unvec, y, idx_feat_dict, num_feature=num_feature,
max_num_feature=max_num_feature, num_class=num_class,
max_num_sample=max_num_sample, feature_selection=feature_selection,
k_idx=k_idx, k=k, num_search=num_search, perm_indices=perm_indices)
recursive_mkdir(FLAGS.cache_dir)
with open(param_path, 'wb') as f: # save
pickle.dump(params, f)
print('Finished parameter search for method: {}'.format(method))
def main():
"""Main method."""
np.random.seed(SEED) # for reproducibility, must be before Keras imports!
run_kfold(data_fn=FLAGS.data_fn,
method=FLAGS.method,
prop_missing=FLAGS.prop_missing,
max_num_feature=FLAGS.max_num_feature,
feature_selection=FLAGS.feature_selection,
max_num_sample=FLAGS.max_num_sample,
num_search=FLAGS.num_search,
data_dir=FLAGS.data_dir,
cache_dir=FLAGS.cache_dir,
force_run=FLAGS.force_run)
# if run as script, execute main
if __name__ == '__main__':
FLAGS, _ = parser.parse_known_args()
main()
|
alembic/versions/2020-01-20_669e9df34ea7_add_new_netloc_tracking_table.py | fake-name/ReadableWebProxy | 193 | 12730840 | <gh_stars>100-1000
"""add new netloc tracking table
Revision ID: 669e9df34ea7
Revises: <KEY>
Create Date: 2020-01-20 01:36:51.862767
"""
# revision identifiers, used by Alembic.
revision = '669e9df34ea7'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('seen_netloc_tracker',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('netloc', citext.CIText(), nullable=False),
sa.Column('ignore', sa.Boolean(), nullable=True),
sa.Column('have', sa.Boolean(), nullable=True),
sa.Column('extra', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_seen_netloc_tracker_netloc'), 'seen_netloc_tracker', ['netloc'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_seen_netloc_tracker_netloc'), table_name='seen_netloc_tracker')
op.drop_table('seen_netloc_tracker')
# ### end Alembic commands ###
|
src/functionapp/azext_functionapp/_help.py | haroonf/azure-cli-extensions | 207 | 12730843 | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
helps['functionapp devops-pipeline'] = """
type: group
short-summary: Azure Function specific integration with Azure DevOps. Please visit https://aka.ms/functions-azure-devops for more information.
"""
helps['functionapp devops-pipeline create'] = """
type: command
short-summary: Create an Azure DevOps pipeline for a function app.
examples:
- name: create an Azure Pipeline to a function app.
text: >
az functionapp devops-pipeline create --functionapp-name FunctionApp
- name: create an Azure Pipeline from a Github function app repository.
text: >
az functionapp devops-pipeline create --github-repository GithubOrganization/GithubRepository --github-pat GithubPersonalAccessToken
- name: create an Azure Pipeline with specific Azure DevOps organization and project
text: >
az functionapp devops-pipeline create --organization-name AzureDevOpsOrganization --project-name AzureDevOpsProject
"""
|
python_web/manage.py | LouisYZK/Frodo | 123 | 12730880 | import os
import aiofiles
import yaml
import click
import asyncio
from datetime import datetime
from models.user import create_user
from models import Post, User
def run_async(coro):
asyncio.run(coro)
@click.group()
def cli():
...
async def _adduser(**kwargs):
try:
user = await create_user(**kwargs)
except Exception as e:
print(e)
click.echo(str(e))
else:
click.echo(f'User {user.name} created!!! ID: {user.id}')
async def extract_meta(file_path: str):
data = ''
data_exist = False
content = ''
async with aiofiles.open(file_path) as fp:
async for line in fp:
if line.strip() == '---' and data_exist:
data_exist = False
continue
if line.strip() == '---':
data_exist = True
continue
if data_exist:
data += line
else:
content += line
return data, content
async def add_post(dct, content, user_id=None):
title = dct.get('title', '')
tags = dct.get('tags', [])
author_id = user_id
date = dct.get('date', None)
if not title:
return
post = await Post.async_first(title=title)
if post:
return
if date is None:
date = datetime.now()
await Post.acreate(title=title, content=content,
author_id=author_id, slug=title,
summary='',
status=Post.STATUS_ONLINE,
can_comment=True,
type=Post.TYPE_ARTICLE,
created_at=date)
print(f'{title} save...')
async def _hexo_export(dir, uname):
user = await User.async_first(name=uname)
id = user.get('id', '')
if not id:
return
for article in os.listdir(dir):
if not article.endswith('.md'):
continue
else:
file = f'{dir}/{article}'
metdata, content = await extract_meta(file)
dct = yaml.load(metdata)
if 'title' not in dct:
title = ' '.join(file.split('-')[3:])
title = title.replace('.md', '')
dct.update(title=title)
asyncio.create_task(add_post(dct, content, user_id=id))
@cli.command()
@click.option('--name', required=True, prompt=True)
@click.option('--email', required=False, default=None, prompt=True)
@click.option('--password', required=True, prompt=True, hide_input=True,
confirmation_prompt=True)
def adduser(name, email, password):
run_async(_adduser(name=name, password=password, email=email))
@cli.command()
@click.option('--dir', required=True)
@click.option('--uname', required=True)
def hexo_export(dir, uname):
run_async(_hexo_export(dir=dir, uname=uname))
click.echo('Export Hexo Finished!')
if __name__ == '__main__':
cli() |
剑指offer/19_MirrorOfBinaryTree(二叉树镜像).py | PegasusWang/python_data_structures_and_algorithms | 2,468 | 12730900 | <reponame>PegasusWang/python_data_structures_and_algorithms<filename>剑指offer/19_MirrorOfBinaryTree(二叉树镜像).py
"""
请完成一个函数,输入一个二叉树,该函数输出它的镜像。
https://leetcode.com/problems/invert-binary-tree/
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class _Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
# Definition for a binary tree node.
class Node:
def __init__(self, x, left=None, right=None):
self.val = x
self.left = left
self.right = right
def __str__(self):
return '{}'.format(self.val)
__repr__ = __str__
from collections import deque
class Stack:
def __init__(self):
self.items = deque()
def push(self, val):
return self.items.append(val)
def pop(self):
return self.items.pop()
def empty(self):
return len(self.items) == 0
class Solution: # https://leetcode.com/problems/symmetric-tree/
def isSymmetric(self, root):
""" use stack """
if not root:
return True
s = Stack()
s.push((root.left, root.right)) # push a tuple
while not s.empty():
top_vals = s.pop()
left_node, right_node = top_vals[0], top_vals[1]
if left_node and right_node:
if left_node.val == right_node.val:
s.push((left_node.left, right_node.right))
s.push((left_node.right, right_node.left))
else:
return False
else:
if left_node != right_node:
return False
return True
def isSymmetric_recursive(self, root):
""" 判断是否是镜像,使用递归的方式
:type root: TreeNode
:rtype: bool
"""
def _check(left, right):
if left and right:
if left.val == right.val:
flag1 = _check(left.left, right.right)
flag2 = _check(left.right, right.left)
return flag1 and flag2
else:
return False
else:
return left == right # 这种情况下 left 和 right 要么一个为 None,或者都是 None
if root:
return _check(root.left, root.right)
return True
def isSymmetric_layer(self, root):
""" 判断是否是镜像,使用层序遍历
:type root: TreeNode
:rtype: bool
"""
if not root:
return True
curnodes = [root]
next_nodes = []
while curnodes or next_nodes:
lefts = []
rights = []
for node in curnodes:
lefts.append(node.left.val if node.left else None) # NOTE: append val not node
rights.append(node.right.val if node.right else None)
if node.left:
next_nodes.append(node.left)
if node.right:
next_nodes.append(node.right)
if lefts != rights[::-1]:
return False
curnodes = next_nodes
next_nodes = []
return True
def test():
t = Node(1, Node(2, Node(3), Node(4)), Node(2, Node(4), Node(3)))
s = Solution()
assert s.isSymmetric(t) is True
test()
|
tg/__init__.py | JingMatrix/tg | 558 | 12730903 | """
Terminal client for telegram
"""
__version__ = "0.17.0"
|
boto3_type_annotations_with_docs/boto3_type_annotations/marketplace_entitlement/paginator.py | cowboygneox/boto3_type_annotations | 119 | 12730926 | <filename>boto3_type_annotations_with_docs/boto3_type_annotations/marketplace_entitlement/paginator.py
from typing import Dict
from botocore.paginate import Paginator
class GetEntitlements(Paginator):
def paginate(self, ProductCode: str, Filter: Dict = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`MarketplaceEntitlementService.Client.get_entitlements`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/entitlement.marketplace-2017-01-11/GetEntitlements>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ProductCode='string',
Filter={
'string': [
'string',
]
},
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Entitlements': [
{
'ProductCode': 'string',
'Dimension': 'string',
'CustomerIdentifier': 'string',
'Value': {
'IntegerValue': 123,
'DoubleValue': 123.0,
'BooleanValue': True|False,
'StringValue': 'string'
},
'ExpirationDate': datetime(2015, 1, 1)
},
],
}
**Response Structure**
- *(dict) --*
The GetEntitlementsRequest contains results from the GetEntitlements operation.
- **Entitlements** *(list) --*
The set of entitlements found through the GetEntitlements operation. If the result contains an empty set of entitlements, NextToken might still be present and should be used.
- *(dict) --*
An entitlement represents capacity in a product owned by the customer. For example, a customer might own some number of users or seats in an SaaS application or some amount of data capacity in a multi-tenant database.
- **ProductCode** *(string) --*
The product code for which the given entitlement applies. Product codes are provided by AWS Marketplace when the product listing is created.
- **Dimension** *(string) --*
The dimension for which the given entitlement applies. Dimensions represent categories of capacity in a product and are specified when the product is listed in AWS Marketplace.
- **CustomerIdentifier** *(string) --*
The customer identifier is a handle to each unique customer in an application. Customer identifiers are obtained through the ResolveCustomer operation in AWS Marketplace Metering Service.
- **Value** *(dict) --*
The EntitlementValue represents the amount of capacity that the customer is entitled to for the product.
- **IntegerValue** *(integer) --*
The IntegerValue field will be populated with an integer value when the entitlement is an integer type. Otherwise, the field will not be set.
- **DoubleValue** *(float) --*
The DoubleValue field will be populated with a double value when the entitlement is a double type. Otherwise, the field will not be set.
- **BooleanValue** *(boolean) --*
The BooleanValue field will be populated with a boolean value when the entitlement is a boolean type. Otherwise, the field will not be set.
- **StringValue** *(string) --*
The StringValue field will be populated with a string value when the entitlement is a string type. Otherwise, the field will not be set.
- **ExpirationDate** *(datetime) --*
The expiration date represents the minimum date through which this entitlement is expected to remain valid. For contractual products listed on AWS Marketplace, the expiration date is the date at which the customer will renew or cancel their contract. Customers who are opting to renew their contract will still have entitlements with an expiration date.
:type ProductCode: string
:param ProductCode: **[REQUIRED]**
Product code is used to uniquely identify a product in AWS Marketplace. The product code will be provided by AWS Marketplace when the product listing is created.
:type Filter: dict
:param Filter:
Filter is used to return entitlements for a specific customer or for a specific dimension. Filters are described as keys mapped to a lists of values. Filtered requests are *unioned* for each value in the value list, and then *intersected* for each filter key.
- *(string) --*
- *(list) --*
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
contact/forms.py | sreekanth1990/djangoproject.com | 1,440 | 12730936 | import logging
import django
from contact_form.forms import ContactForm
from django import forms
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.encoding import force_bytes
from pykismet3 import Akismet, AkismetServerError
logger = logging.getLogger(__name__)
class BaseContactForm(ContactForm):
message_subject = forms.CharField(
max_length=100,
widget=forms.TextInput(attrs={'class': 'required', 'placeholder': 'Message subject'}),
label='Message subject',
)
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'required', 'placeholder': 'E-mail'}))
name = forms.CharField(widget=forms.TextInput(attrs={'class': 'required', 'placeholder': 'Name'}))
body = forms.CharField(widget=forms.Textarea(attrs={'class': 'required', 'placeholder': 'Your message'}))
def subject(self):
# Strip all linebreaks from the subject string.
subject = ''.join(self.cleaned_data["message_subject"].splitlines())
return "[Contact form] " + subject
def message(self):
return "From: {name} <{email}>\n\n{body}".format(**self.cleaned_data)
def clean_body(self):
"""
Check spam against Akismet.
Backported from django-contact-form pre-1.0; 1.0 dropped built-in
Akismet support.
"""
if 'body' in self.cleaned_data and getattr(settings, 'AKISMET_API_KEY', None):
try:
akismet_api = Akismet(
api_key=settings.AKISMET_API_KEY,
blog_url='http://%s/' % Site.objects.get_current().domain,
user_agent='Django {}.{}.{}'.format(*django.VERSION)
)
akismet_data = {
'user_ip': self.request.META.get('REMOTE_ADDR', ''),
'user_agent': self.request.META.get('HTTP_USER_AGENT', ''),
'referrer': self.request.META.get('HTTP_REFERER', ''),
'comment_content': force_bytes(self.cleaned_data['body']),
'comment_author': self.cleaned_data.get('name', ''),
}
if getattr(settings, 'AKISMET_TESTING', None):
# Adding test argument to the request in order to tell akismet that
# they should ignore the request so that test runs affect the heuristics
akismet_data['test'] = 1
if akismet_api.check(akismet_data):
raise forms.ValidationError("Akismet thinks this message is spam")
except AkismetServerError:
logger.error('Akismet server error')
return self.cleaned_data['body']
class FoundationContactForm(BaseContactForm):
recipient_list = ["<EMAIL>"]
|
tests/resources/test_comment.py | Glushiator/jira | 1,639 | 12730974 | <gh_stars>1000+
from tests.conftest import JiraTestCase
class CommentTests(JiraTestCase):
def setUp(self):
JiraTestCase.setUp(self)
self.issue_1 = self.test_manager.project_b_issue1
self.issue_2 = self.test_manager.project_b_issue2
self.issue_3 = self.test_manager.project_b_issue3
def test_comments(self):
for issue in [self.issue_1, self.jira.issue(self.issue_2)]:
self.jira.issue(issue)
comment1 = self.jira.add_comment(issue, "First comment")
comment2 = self.jira.add_comment(issue, "Second comment")
comments = self.jira.comments(issue)
assert comments[0].body == "First comment"
assert comments[1].body == "Second comment"
comment1.delete()
comment2.delete()
comments = self.jira.comments(issue)
assert len(comments) == 0
def test_expanded_comments(self):
comment1 = self.jira.add_comment(self.issue_1, "First comment")
comment2 = self.jira.add_comment(self.issue_1, "Second comment")
comments = self.jira.comments(self.issue_1, expand="renderedBody")
self.assertTrue(hasattr(comments[0], "renderedBody"))
ret_comment1 = self.jira.comment(
self.issue_1, comment1.id, expand="renderedBody"
)
ret_comment2 = self.jira.comment(self.issue_1, comment2.id)
comment1.delete()
comment2.delete()
self.assertTrue(hasattr(ret_comment1, "renderedBody"))
self.assertFalse(hasattr(ret_comment2, "renderedBody"))
comments = self.jira.comments(self.issue_1)
assert len(comments) == 0
def test_add_comment(self):
comment = self.jira.add_comment(
self.issue_3,
"a test comment!",
visibility={"type": "role", "value": "Administrators"},
)
self.assertEqual(comment.body, "a test comment!")
self.assertEqual(comment.visibility.type, "role")
self.assertEqual(comment.visibility.value, "Administrators")
comment.delete()
def test_add_comment_with_issue_obj(self):
issue = self.jira.issue(self.issue_3)
comment = self.jira.add_comment(
issue,
"a new test comment!",
visibility={"type": "role", "value": "Administrators"},
)
self.assertEqual(comment.body, "a new test comment!")
self.assertEqual(comment.visibility.type, "role")
self.assertEqual(comment.visibility.value, "Administrators")
comment.delete()
def test_update_comment(self):
comment = self.jira.add_comment(self.issue_3, "updating soon!")
comment.update(body="updated!")
self.assertEqual(comment.body, "updated!")
# self.assertEqual(comment.visibility.type, 'role')
# self.assertEqual(comment.visibility.value, 'Administrators')
comment.delete()
|
performance_tests/storage/test_random_access.py | avkudr/aim | 2,195 | 12730975 | <filename>performance_tests/storage/test_random_access.py
from parameterized import parameterized
from aim import Repo
from performance_tests.base import StorageTestBase
from performance_tests.utils import get_baseline, write_baseline
from performance_tests.storage.utils import random_access_metric_values
class TestRandomAccess(StorageTestBase):
@parameterized.expand({0: 50, 1: 250, 2: 500}.items())
def test_random_access(self, test_key, density):
test_name = f'test_random_access_{test_key}'
repo = Repo.default_repo()
query = 'metric.name == "metric 0"'
execution_time = random_access_metric_values(repo, query, density)
baseline = get_baseline(test_name)
if baseline:
self.assertInRange(execution_time, baseline)
else:
write_baseline(test_name, execution_time)
|
common/tests/test_utils.py | krisshol/bach-kmno | 248 | 12730976 | <reponame>krisshol/bach-kmno<gh_stars>100-1000
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import unittest
from unittest.mock import Mock, patch
from irma.common.utils.utils import UUID, bytes_to_utf8, save_to_file
from irma.common.base.utils import IrmaFrontendReturn, IrmaTaskReturn, \
IrmaReturnCode, IrmaScanStatus, IrmaValueError, common_celery_options, \
IrmaScanRequest, IrmaProbeType
# =================
# Logging options
# =================
def enable_logging(level=logging.INFO, handler=None, formatter=None):
global log
log = logging.getLogger()
if formatter is None:
formatter = logging.Formatter("%(asctime)s [%(name)s] " +
"%(levelname)s: %(message)s")
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(level)
# ============
# Test Cases
# ============
class TestCommonUtils(unittest.TestCase):
def test_uuid(self):
uuid = UUID.generate()
self.assertTrue(UUID.validate(uuid))
self.assertEqual(len(uuid), 36)
self.assertEqual(uuid.count("-"), 4)
def test_uuid_generate(self):
uuid = UUID.normalize("01234567-abcd-ef01-2345-deadbeaff00d")
self.assertTrue(UUID.validate(uuid))
self.assertEqual(uuid, "01234567-abcd-ef01-2345-deadbeaff00d")
def test_uuid_validate(self):
self.assertFalse(UUID.validate("not a uuid"))
def test_bytes_to_utf8_0(self):
result = bytes_to_utf8(b"something")
self.assertEqual(result, "something")
def test_bytes_to_utf8_1(self):
result = bytes_to_utf8("something")
self.assertIs(result, "something")
def test_bytes_to_utf8_2(self):
result = bytes_to_utf8(["foo", b"bar", (b"baz",)])
self.assertEqual(result, ["foo", "bar", ("baz",)])
def test_bytes_to_utf8_3(self):
result = bytes_to_utf8({"foo": b"bar", b"baz": None})
self.assertDictEqual(result, {"foo": "bar", "baz": None})
@patch("builtins.open")
def test_save_to_file0(self, m_open):
Mockfile = type("Mockfile", (Mock, ),
{"seek": lambda self, pos: setattr(self, "pos", pos)})
fileobj = Mockfile()
fileobj.read.return_value = ""
fileobj.pos = 0
dstobj = Mockfile()
m_open.return_value.__enter__.return_value = dstobj
size = save_to_file(fileobj, "/foo")
m_open.assert_called_once_with("/foo", "wb")
self.assertEqual(fileobj.pos, 0)
self.assertEqual(size, 0)
dstobj.write.assert_not_called()
@patch("builtins.open")
def test_save_to_file1(self, m_open):
Mockfile = type("Mockfile", (Mock, ),
{"seek": lambda self, pos: setattr(self, "pos", pos),
"write": lambda self, buf: setattr(
self, "written", self.written + buf)})
fileobj = Mockfile()
fileobj.read.side_effect = ["foo", "bar", "baz", ""]
fileobj.pos = 0
dstobj = Mockfile()
dstobj.written = ""
m_open.return_value.__enter__.return_value = dstobj
size = save_to_file(fileobj, "/foo")
m_open.assert_called_once_with("/foo", "wb")
self.assertEqual(dstobj.written, "foobarbaz")
self.assertEqual(fileobj.pos, 0)
self.assertEqual(size, 9)
def test_irma_taskreturn_success(self):
ret = IrmaTaskReturn.success("success")
self.assertEqual(ret[0],
IrmaReturnCode.success)
self.assertEqual(ret[1],
"success")
self.assertEqual(type(ret),
tuple)
self.assertEqual(type(ret[0]),
int)
self.assertEqual(type(ret[1]),
str)
def test_irma_taskreturn_warning(self):
ret = IrmaTaskReturn.warning("warning")
self.assertEqual(ret[0],
IrmaReturnCode.warning)
self.assertEqual(ret[1],
"warning")
self.assertEqual(type(ret),
tuple)
self.assertEqual(type(ret[0]),
int)
self.assertEqual(type(ret[1]),
str)
def test_irma_taskreturn_error(self):
ret = IrmaTaskReturn.error("error")
self.assertEqual(ret[0],
IrmaReturnCode.error)
self.assertEqual(ret[1],
"error")
self.assertEqual(type(ret),
tuple)
self.assertEqual(type(ret[0]),
int)
self.assertEqual(type(ret[1]),
str)
def test_irma_frontendreturn_success(self):
f_success = IrmaFrontendReturn.success
ret = f_success(optional={'key': 'value'})
self.assertEqual(ret['code'],
IrmaReturnCode.success)
self.assertEqual(ret['msg'],
"success")
self.assertEqual(type(ret),
dict)
self.assertEqual(type(ret['code']),
int)
self.assertEqual(type(ret['msg']),
str)
self.assertEqual(type(ret['optional']),
dict)
self.assertEqual(ret['optional']['key'],
'value')
def test_irma_frontendreturn_warning(self):
f_warning = IrmaFrontendReturn.warning
ret = f_warning("warning", optional={'key': 'value'})
self.assertEqual(ret['code'],
IrmaReturnCode.warning)
self.assertEqual(ret['msg'],
"warning")
self.assertEqual(type(ret),
dict)
self.assertEqual(type(ret['code']),
int)
self.assertEqual(type(ret['msg']),
str)
self.assertEqual(type(ret['optional']),
dict)
self.assertEqual(ret['optional']['key'],
'value')
def test_irma_frontendreturn_error(self):
f_error = IrmaFrontendReturn.error
ret = f_error("error", optional={'key': 'value'})
self.assertEqual(ret['code'],
IrmaReturnCode.error)
self.assertEqual(ret['msg'],
"error")
self.assertEqual(type(ret),
dict)
self.assertEqual(type(ret['code']),
int)
self.assertEqual(type(ret['msg']),
str)
self.assertEqual(type(ret['optional']),
dict)
self.assertEqual(ret['optional']['key'],
'value')
def test_irmascanstatus_is_error0(self):
self.assertFalse(IrmaScanStatus.is_error(IrmaScanStatus.finished))
def test_irmascanstatus_is_error1(self):
self.assertTrue(IrmaScanStatus.is_error(IrmaScanStatus.error))
def test_irmascanstatus_is_error2(self):
self.assertTrue(IrmaScanStatus.is_error(IrmaScanStatus.error_probe_na))
def test_irmascanstatus_filter_status0(self):
mini, maxi = IrmaScanStatus.launched, IrmaScanStatus.flushed
self.assertIs(IrmaScanStatus.filter_status(
IrmaScanStatus.processed, mini, maxi), None)
def test_irmascanstatus_filter_status1(self):
mini, maxi = IrmaScanStatus.launched, IrmaScanStatus.flushed
with self.assertRaises(IrmaValueError):
IrmaScanStatus.filter_status(IrmaScanStatus.ready, mini, maxi)
def test_irmascanstatus_filter_status2(self):
mini, maxi = IrmaScanStatus.launched, IrmaScanStatus.flushed
with self.assertRaises(IrmaValueError):
IrmaScanStatus.filter_status(IrmaScanStatus.cancelled, mini, maxi)
def test_irmascanstatus_filter_status3(self):
mini, maxi = IrmaScanStatus.launched, IrmaScanStatus.flushed
with self.assertRaises(IrmaValueError):
IrmaScanStatus.filter_status(25, mini, maxi)
def test_irmascanstatus_code_ot_label0(self):
self.assertEqual(
IrmaScanStatus.code_to_label(IrmaScanStatus.finished),
"finished")
def test_irmascanstatus_code_ot_label1(self):
self.assertEqual(
IrmaScanStatus.code_to_label(25),
"Unknown status code")
def test_irmaprobetype_normalize0(self):
self.assertEqual(
IrmaProbeType.normalize("external"),
IrmaProbeType.external)
def test_irmaprobetype_normalize1(self):
self.assertEqual(
IrmaProbeType.normalize("foo"),
IrmaProbeType.unknown)
@patch("irma.common.base.utils.UUID.generate")
def test_common_celery_options0(self, m_generate):
m_generate.return_value = "a-random-uuid"
result = common_celery_options("foo", "bar", 0, 50, 100)
self.assertEqual(result, [
"--app=foo",
"--loglevel=info",
"--without-gossip",
"--without-mingle",
"--without-heartbeat",
"--soft-time-limit=50",
"--time-limit=100",
"--hostname=bar-a-random-uuid"])
@patch("irma.common.base.utils.UUID.generate")
def test_common_celery_options1(self, m_generate):
m_generate.return_value = "a-random-uuid"
result = common_celery_options("foo", "bar", 3, 50, 100)
self.assertEqual(result, [
"--app=foo",
"--loglevel=info",
"--without-gossip",
"--without-mingle",
"--without-heartbeat",
"--concurrency=3",
"--soft-time-limit=50",
"--time-limit=100",
"--hostname=bar-a-random-uuid"])
class TestIrmaScanRequest(unittest.TestCase):
def setUp(self):
self.isr = IrmaScanRequest()
def test_init(self):
isr = IrmaScanRequest({"foo": Mock(), "bar": Mock()})
self.assertEqual(isr.nb_files, 2)
def test_add_file(self):
self.isr.add_file("foo", "probelist", "mimetype")
self.assertDictEqual(
self.isr.request["foo"],
{"probe_list": "probelist", "mimetype": "mimetype"})
self.assertEqual(self.isr.nb_files, 1)
def test_del_file0(self):
self.isr.add_file("foo", "probelist", "mimetype")
self.isr.del_file("foo")
self.assertNotIn("foo", self.isr.request)
self.assertEqual(self.isr.nb_files, 0)
def test_del_file1(self):
self.isr.del_file("foo")
self.assertNotIn("foo", self.isr.request)
self.assertEqual(self.isr.nb_files, 0)
def test_get_probelist(self):
self.isr.add_file("foo", "bar", "mimetype")
result = self.isr.get_probelist("foo")
self.assertEqual(result, "bar")
self.assertEqual(self.isr.nb_files, 1)
def test_set_probelist(self):
self.isr.add_file("foo", "bar", "mimetype")
self.isr.set_probelist("foo", "baz")
self.assertEqual(self.isr.get_probelist("foo"), "baz")
self.assertEqual(self.isr.nb_files, 1)
def test_get_mimetype(self):
self.isr.add_file("foo", "probelist", "bar")
result = self.isr.get_mimetype("foo")
self.assertEqual(result, "bar")
self.assertEqual(self.isr.nb_files, 1)
def test_to_dict(self):
self.assertIs(self.isr.request, self.isr.to_dict())
def test_files(self):
self.assertEqual(self.isr.request.keys(), self.isr.files())
if __name__ == '__main__':
enable_logging()
unittest.main()
|
CONFidence CTF 2014/Main event/Memory/solution/disasm.py | IMULMUL/ctf-tasks | 581 | 12730981 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: disasm.py
# Author: <NAME>
# Task: Memory
# Competition: CONFidence CTF 2014
# Category: Reverse engineering
# Scoring: 50 pts (easy)
# Number of solves: 2 out of 11 participants
import struct
def Disassemble(program):
for offset in xrange(0, len(program), 8):
op = struct.unpack("<I", program[offset: offset + 4])[0]
op = ["add", "sub", "xor", "neg", "cmp", "exit"][op]
idx = ord(program[offset + 4])
arg = ord(program[offset + 5])
if op == "exit":
print "[%.8x] exit" % (offset)
break
elif op == "neg":
print "[%.8x] neg [%2d]" % (offset, idx)
else:
print "[%.8x] %s [%2d], 0x%x" % (offset, op, idx, arg)
def main():
with open("unpacked.exe", "rb") as f:
file_data = f.read()
Disassemble(file_data[0x1440:])
if __name__ == "__main__":
main()
|
hummingbot/connector/exchange/beaxy/beaxy_misc.py | BGTCapital/hummingbot | 3,027 | 12731000 | class BeaxyIOError(IOError):
def __init__(self, msg, response, result, *args, **kwargs):
self.response = response
self.result = result
super(BeaxyIOError, self).__init__(msg, *args, **kwargs)
|
wooey/migrations/0030_populate-sv-m2m.py | fridmundklaus/wooey | 1,572 | 12731026 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-15 15:43
from __future__ import unicode_literals
from django.db import migrations
def populate_m2m(apps, schema_editor):
ScriptParser = apps.get_model('wooey', 'ScriptParser')
ScriptParameterGroup = apps.get_model('wooey', 'ScriptParameterGroup')
for obj in ScriptParser.objects.all():
obj.new_script_version.add(obj.script_version)
for obj in ScriptParameterGroup.objects.all():
obj.new_script_version.add(obj.script_version)
class Migration(migrations.Migration):
dependencies = [
('wooey', '0029_add-m2m-sv'),
]
operations = [
migrations.RunPython(populate_m2m)
]
|
tests/test_downloadermiddleware_stats.py | HyunTruth/scrapy | 41,267 | 12731048 | <filename>tests/test_downloadermiddleware_stats.py
from unittest import TestCase
from scrapy.downloadermiddlewares.stats import DownloaderStats
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class MyException(Exception):
pass
class TestDownloaderStats(TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('scrapytest.org')
self.mw = DownloaderStats(self.crawler.stats)
self.crawler.stats.open_spider(self.spider)
self.req = Request('http://scrapytest.org')
self.res = Response('scrapytest.org', status=400)
def assertStatsEqual(self, key, value):
self.assertEqual(
self.crawler.stats.get_value(key, spider=self.spider),
value,
str(self.crawler.stats.get_stats(self.spider))
)
def test_process_request(self):
self.mw.process_request(self.req, self.spider)
self.assertStatsEqual('downloader/request_count', 1)
def test_process_response(self):
self.mw.process_response(self.req, self.res, self.spider)
self.assertStatsEqual('downloader/response_count', 1)
def test_process_exception(self):
self.mw.process_exception(self.req, MyException(), self.spider)
self.assertStatsEqual('downloader/exception_count', 1)
self.assertStatsEqual(
'downloader/exception_type_count/tests.test_downloadermiddleware_stats.MyException',
1
)
def tearDown(self):
self.crawler.stats.close_spider(self.spider, '')
|
client/verta/tests/registry/model_version/test_log_code.py | fool-sec-review/modeldb | 835 | 12731066 | # -*- coding: utf-8 -*-
import pytest
from verta.code import Git, Notebook
pytestmark = pytest.mark.not_oss # skip if run in oss setup. Applied to entire module
class TestLogCode:
def test_log_code(self, model_version):
key1, key2, key3 = "version1", "version2", "version3"
version1 = Git(
repo_url="<EMAIL>:VertaAI/models.git",
commit_hash="52f3d22",
autocapture=False,
)
version2 = Git(
repo_url="<EMAIL>:VertaAI/data-processing.git",
commit_hash="26f9787",
autocapture=False,
)
version3 = Notebook(
"conftest.py", # not a notebook, but fine for testing
_autocapture=False,
)
model_version.log_code_version(key1, version1)
model_version.log_code_versions(
{
key2: version2,
key3: version3,
},
)
assert model_version.get_code_version(key1) == version1
assert model_version.get_code_version(key2) == version2
assert model_version.get_code_version(key3) == version3
assert model_version.get_code_versions() == {
key1: version1,
key2: version2,
key3: version3,
}
|
ansible/roles/monitoring_agent/files/plugins/check_drive_health.py | thogaw/docker-tools | 180 | 12731092 | <reponame>thogaw/docker-tools
#!/usr/bin/env python3
"""check_drive_health
Nagios plugin to check health of SSD and magnetic drives. Examines
SMART attributes and software-RAID status. As a drive ages,
reallocated sector counts may be recorded in SMART attributes - this
plugin provides a way to define per-drive tolerance for nonzero values
reported in SMART attributes, and to warn when new events occur.
Dependencies: python >= 3.6, smartmontools >=7.0, click >= 6.0, mdstat >= 1.0.4
Usage example:
$ check_drive_health.py -w 45 -e drive_tolerate.yaml
/dev/sda OK: temp=38 serial=7E3020001587 cap=0.064T
/dev/sdb OK: temp=42 serial=1632137A883D cap=1.050T
RAID OK: 1 array clean
Error-list example:
Top-level key in drive_tolerate.yaml is drive serial number, second-level
keys are attribute names as reported by smartctl -A:
---
PN1338P4J8MT49:
Reallocated_Sector_Ct: 20
Reallocated_Event_Count: 45
Setup:
# install smartmontools package if 7.1 is available in distro
# or download smartmontools-7.1.tar.gz from
# https://sourceforge.net/projects/smartmontools/files/smartmontools/7.1/
tar xf smartmontools.7.1.tar.gz
cd smartmontools-7.1 && ./configure && make install
pip3 install click==7.1.2 mdstat==1.0.4
Grant this plugin sudo (for smartctl) with an entry in /etc/sudoers.d:
nagios ALL=NOPASSWD: /usr/local/lib/nagios/check_drive_health.py
created 25 oct 2020 by richb at instantlinux.net
homepage https://github.com/instantlinux/docker-tools - find this plugin
under ansible monitoring_agent role
license Apache-2.0
"""
import click
import json
import mdstat
import os
import sys
import yaml
__version__ = '0.1.2'
STATUS_OK = 0
STATUS_WARN = 1
STATUS_CRIT = 2
STATUS_UNK = 3
SMART_ATTR_CHECKS = {
5: dict(name='Reallocated_Sector_Ct', level=STATUS_WARN),
196: dict(name='Reallocated_Event_Count', level=STATUS_WARN),
197: dict(name='Current_Pending_Sector', level=STATUS_WARN),
198: dict(name='Offline_Uncorrectable', level=STATUS_CRIT)}
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.version_option(version=__version__,)
@click.option('--device', '-d', default=['all'],
type=str, multiple=True,
help='Device to check - /dev/xxx or all [default: all]')
@click.option('--error-list', '-e',
type=click.File('r'),
help='Expected errors: list of known problems indexed by '
'device serial number, in YAML format')
@click.option('--raid/--no-raid', default=True,
help='Examine RAID devices found in /proc/mdstat [true]')
@click.option('--warn-temp', '-w', default=50,
type=int,
help='Temperature warning threshold [50]')
@click.option('--crit-temp', '-c', default=65,
type=int,
help='Temperature critical threshold [65]')
@click.option('--warn-spare', default=50,
type=int,
help='Spare-percentage warning threshold for nvme [50]')
def main(device, error_list, raid, warn_temp, crit_temp, warn_spare):
if 'all' in device:
# Get all block storage devices except loopback (major=7)
device = [item['name'] for item in
json.load(os.popen('lsblk -dJ -e 7'))['blockdevices']]
error_items = yaml.safe_load(error_list) if error_list else {}
retval, messages = STATUS_OK, ([], [], [], [])
for drive in device:
status, message = check_smart(drive, error_items, warn_temp,
crit_temp, warn_spare)
messages[status].append(message)
retval = max(retval, status)
if raid and 'active' in open('/proc/mdstat', 'r').read():
status, message = check_raid()
messages[status].append(message)
retval = max(retval, status)
print('\n'.join([msg for sts in reversed(messages) for msg in sts]))
exit(retval)
def check_smart(drive, error_items, warn_temp, crit_temp, warn_spare):
"""Read SMART attributes for a drive, looking for values above
0 or as defined in error_items; also check nvme available-spare blocks
Returns:
tuple(int, str) - status and message
"""
if not drive.startswith('/dev/'):
drive = '/dev/%s' % drive
try:
smart = json.load(os.popen('smartctl -AHij %s' % drive))
except json.JSONDecodeError:
sys.stderr.write('ERR: Please upgrade smartctl to 7.0 or newer\n')
exit(STATUS_UNK)
if dot_get(smart, 'smartctl.exit_status') != 0:
return STATUS_UNK, 'UNK(%s): %s' % (drive, dot_get(
smart, 'smartctl.messages', [{}])[0].get('string'))
status, message = STATUS_OK, ''
attribs = dot_get(smart, 'ata_smart_attributes.table')
capacity = dot_get(smart, 'user_capacity.bytes')
nvme_log = smart.get('nvme_smart_health_information_log')
serial_num = smart.get('serial_number')
temperature = dot_get(smart, 'temperature.current')
tolerated = error_items.get(serial_num, {})
if not dot_get(smart, 'smart_status.passed'):
return STATUS_CRIT, 'CRIT: serial=%s smart_status not OK' % serial_num
if temperature > crit_temp:
return STATUS_CRIT, 'CRIT: %s serial=%s temp=%d exceeds threshold' % (
drive, serial_num, temperature)
elif temperature > warn_temp:
status = STATUS_WARN
message = 'WARN: %s serial=%s, temp=%d exceeds threshold' % (
drive, serial_num, temperature)
if nvme_log:
spare_threshold = max(nvme_log.get('available_spare_threshold', 0),
warn_spare)
available_spare = nvme_log.get('available_spare', 100)
if available_spare < spare_threshold:
status = STATUS_WARN
message = 'WARN: %s serial=%s low available_spare=%d' % (
drive, serial_num, available_spare)
if attribs:
values = {}
for item in attribs:
if item.get('id') in SMART_ATTR_CHECKS.keys():
values[item['name']] = dict(
val=dot_get(item, 'raw.value'),
level=SMART_ATTR_CHECKS[item['id']]['level'])
for key, item in values.items():
if item['val'] > tolerated.get(key, 0):
status = max(item['level'], status)
message = '%s: %s serial=%s %s: %d' % (
'CRIT' if status == STATUS_CRIT else 'WARN',
drive, serial_num, key, item['val'])
if status == STATUS_OK:
message = '%s OK: temp=%d serial=%s cap=%.3fT' % (
drive, temperature, serial_num, capacity / 1e12)
return status, message
def check_raid():
"""Check all RAID devices seen in /proc/mdstat
Returns:
tuple(int, str) - status and message
"""
status, message, count = STATUS_OK, '', 0
for array, state in mdstat.parse().get('devices', {}).items():
for element, values in state.get('disks').items():
if values.get('faulty'):
return STATUS_CRIT, 'CRIT: /dev/%s element=%s faulty' % (
array, element)
# unless monthly checkarray is running, warn if out of sync
action = open('/sys/block/%s/md/sync_action' % array, 'r').read()
if (False in dot_get(state, 'status.synced') or
state.get('resync')) and action.strip() != 'check':
status = STATUS_WARN
message = 'WARN: /dev/%s resync progress=%s finish=%s' % (
array, dot_get(state, 'resync.progress'),
dot_get(state, 'resync.finish'))
count += 1
if status == STATUS_OK:
message = 'RAID OK: %d array%s clean' % (count, 's'[:count - 1])
return status, message
def dot_get(_dict, path, default=None):
"""Fetch item from nested dict; path is a dot-delimited key into
the dictionary
Returns: obj if found, specified default otherwise
"""
for key in path.split('.'):
try:
_dict = _dict[key]
except KeyError:
return default
return _dict
if __name__ == "__main__":
main()
|
venv/Lib/site-packages/ipywidgets/widgets/tests/test_docutils.py | ajayiagbebaku/NFL-Model | 2,015 | 12731143 | <reponame>ajayiagbebaku/NFL-Model
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from unittest import TestCase
from ipywidgets.widgets.docutils import doc_subst
class TestDocSubst(TestCase):
def test_substitution(self):
snippets = {'key': '62'}
@doc_subst(snippets)
def f():
""" Docstring with value {key} """
assert f.__doc__ == " Docstring with value 62 "
def test_unused_keys(self):
snippets = {'key': '62', 'other-key': 'unused'}
@doc_subst(snippets)
def f():
""" Docstring with value {key} """
assert f.__doc__ == " Docstring with value 62 "
|
zamiaai/skills/food/food.py | 0zAND1z/zamia-ai | 129 | 12731180 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016, 2017, 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def get_data(k):
k.dte.set_prefixes([u''])
k.dte.dt('en', u"what is wine", u"An alcoholic drink made from fermented grape juice.")
k.dte.dt('de', u"was ist wein", u"Ein alkoholisches Getränk aus fermentiertem Traubensaft.")
|
language/orqa/utils/scann_utils_test.py | Xtuden-com/language | 1,199 | 12731190 | <reponame>Xtuden-com/language
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for scann_utils.py."""
import os
from language.orqa.utils import scann_utils
import numpy as np
import tensorflow.compat.v1 as tf
class ScannUtilsTest(tf.test.TestCase):
def test_scann_searcher(self):
temp_dir = self.create_tempdir().full_path
checkpoint_path = os.path.join(temp_dir, "dummy_db.ckpt")
dummy_db = np.random.uniform(size=[1024, 32]).astype(np.float32)
scann_utils.write_array_to_checkpoint("dummy_db", dummy_db, checkpoint_path)
dummy_queries = np.random.uniform(size=[4, 32]).astype(np.float32)
_, searcher = scann_utils.load_scann_searcher(
var_name="dummy_db", checkpoint_path=checkpoint_path, num_neighbors=10)
distance, index = searcher.search_batched(dummy_queries)
self.assertAllEqual(distance.numpy().shape, [4, 10])
self.assertAllEqual(index.numpy().shape, [4, 10])
if __name__ == "__main__":
tf.test.main()
|
examples/notebooks/solutions/cartpole_analytical_derivatives.py | pFernbach/crocoddyl | 322 | 12731194 | def cartpole_analytical_derivatives(model, data, x, u=None):
if u is None:
u = model.unone
# Getting the state and control variables
y, th, ydot, thdot = x[0].item(), x[1].item(), x[2].item(), x[3].item()
f = u[0].item()
# Shortname for system parameters
m1, m2, l, g = model.m1, model.m2, model.l, model.g
s, c = np.sin(th), np.cos(th)
m = m1 + m2
mu = m1 + m2 * s**2
w = model.costWeights
# derivative of xddot by x, theta, xdot, thetadot
# derivative of thddot by x, theta, xdot, thetadot
data.Fx[:, :] = np.array(
[[0.0, (m2 * g * c * c - m2 * g * s * s - m2 * l * c * thdot) / mu, 0.0, -m2 * l * s / mu],
[
0.0, ((-s * f / l) + (m * g * c / l) - (m2 * c * c * thdot**2) + (m2 * s * s * thdot**2)) / mu, 0.0,
-2 * m2 * c * s * thdot
]])
# derivative of xddot and thddot by f
data.Fu[:] = np.array([1 / mu, c / (l * mu)])
# first derivative of data.cost by x, theta, xdot, thetadot
data.Lx[:] = np.array([y * w[2]**2, s * ((w[0]**2 - w[1]**2) * c + w[1]**2), ydot * w[3]**2, thdot * w[4]**2])
# first derivative of data.cost by f
data.Lu[:] = np.array([f * w[5]**2])
# second derivative of data.cost by x, theta, xdot, thetadot
data.Lxx[:] = np.array([w[2]**2, w[0]**2 * (c**2 - s**2) + w[1]**2 * (s**2 - c**2 + c), w[3]**2, w[4]**2])
# second derivative of data.cost by f
data.Luu[:] = np.array([w[5]**2])
|
observations/r/nsw74psid_a.py | hajime9652/observations | 199 | 12731205 | <reponame>hajime9652/observations
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def nsw74psid_a(path):
"""A Subset of the nsw74psid1 Data Set
The `nsw74psidA` data frame has 252 rows and 10 columns. See
`nsw74psid1` for more information.
This data frame contains the following columns:
trt
a numeric vector
age
a numeric vector
educ
a numeric vector
black
a numeric vector
hisp
a numeric vector
marr
a numeric vector
nodeg
a numeric vector
re74
a numeric vector
re75
a numeric vector
re78
a numeric vector
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `nsw74psid_a.csv`.
Returns:
Tuple of np.ndarray `x_train` with 252 rows and 10 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'nsw74psid_a.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/nsw74psidA.csv'
maybe_download_and_extract(path, url,
save_file_name='nsw74psid_a.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
numba/tests/doc_examples/test_llvm_pass_timings.py | auderson/numba | 6,620 | 12731237 | # "magictoken" is used for markers as beginning and ending of example text.
import unittest
from numba.tests.support import captured_stdout, override_config
class DocsLLVMPassTimings(unittest.TestCase):
def test_pass_timings(self):
with override_config('LLVM_PASS_TIMINGS', True):
with captured_stdout() as stdout:
# magictoken.ex_llvm_pass_timings.begin
import numba
@numba.njit
def foo(n):
c = 0
for i in range(n):
for j in range(i):
c += j
return c
foo(10)
md = foo.get_metadata(foo.signatures[0])
print(md['llvm_pass_timings'])
# magictoken.ex_llvm_pass_timings.end
self.assertIn("Finalize object", stdout.getvalue())
if __name__ == '__main__':
unittest.main()
|
tests/unit/alertapi30/test_enums.py | ahertz/pyowm | 799 | 12731241 | <reponame>ahertz/pyowm<filename>tests/unit/alertapi30/test_enums.py
import unittest
from pyowm.alertapi30.enums import AlertChannelsEnum, OperatorsEnum, WeatherParametersEnum
class TestAlertChannelsEnum(unittest.TestCase):
def test_items(self):
alert_channels_enum = AlertChannelsEnum()
self.assertEqual(alert_channels_enum.items(),
[AlertChannelsEnum.OWM_API_POLLING])
class TestOperatorsEnum(unittest.TestCase):
def test_items(self):
operators_enum = OperatorsEnum()
self.assertEqual(sorted(operators_enum.items()), sorted([operators_enum.GREATER_THAN,
operators_enum.GREATER_THAN_EQUAL,
operators_enum.LESS_THAN,
operators_enum.LESS_THAN_EQUAL,
operators_enum.EQUAL,
operators_enum.NOT_EQUAL]))
class TestWeatherParametersEnum(unittest.TestCase):
def test_item(self):
weather_parameters_enum = WeatherParametersEnum()
self.assertEqual(sorted(weather_parameters_enum.items()),
sorted([weather_parameters_enum.CLOUDS, weather_parameters_enum.HUMIDITY,
weather_parameters_enum.PRESSURE, weather_parameters_enum.WIND_DIRECTION,
weather_parameters_enum.WIND_SPEED, weather_parameters_enum.TEMPERATURE]))
|
.venv/lib/python3.8/site-packages/seasonal/__init__.py | eo1989/VectorBTanalysis | 357 | 12731369 | # -*- coding: utf-8 -*-
"""
Seasonal Adjustment
==========================================
Robustly estimate and remove trend and periodicity in a noisy timeseries.
Functions
---------
fit_slope -- estimate slope of a timeseries
fit_seasons -- estimate periodicity and seasonal offsets for a timeseries
adjust_trend -- de-trend a timeseries
adjust_seasons -- de-trend and de-seasonalize a timeseries
periodogram -- compute a periodogram of the data
periodogram_peaks -- return a list of intervals containg high-scoring periods
Author
------
<NAME> (<EMAIL>)
"""
from .version import __version__, VERSION
from .trend import fit_trend
from .seasonal import fit_seasons, adjust_seasons, rsquared_cv
from .periodogram import periodogram, periodogram_peaks
|
ground_truth_labeling_jobs/multi_modal_parallel_sagemaker_labeling_workflows_with_step_functions/src/lambda_test/api_batch_create/main_test.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 12731385 | import json
import os
import unittest
from unittest import TestCase
from unittest.mock import Mock, patch
from api_batch_create.main import lambda_handler
from botocore.exceptions import ClientError
from test_shared.mock_objects import InputTestData, TestContext
class TestCase(TestCase):
def mock_sagemaker_api_call(self, operation_name, kwarg):
if operation_name == "DescribeLabelingJob":
parsed_response = {"Error": {"Code": "500", "Message": "Error Uploading"}}
raise ClientError(parsed_response, operation_name)
@patch("shared.db.get_batch_metadata")
@patch("botocore.client.BaseClient._make_api_call", new=mock_sagemaker_api_call)
@patch.dict(os.environ, {"DEFAULT_WORKTEAM_ARN": "TEST"})
def test_lambda_handler_happyCase(self, get_batch_metadata_mock):
# Setup
event = Mock()
event.get.return_value = json.dumps(InputTestData.create_batch_request)
get_batch_metadata_mock.return_value = None
# Act
val = lambda_handler(event, TestContext())
# Assert
self.assertEqual(val["statusCode"], 200, "Unexpected status code returned")
if __name__ == "__main__":
unittest.main()
|
scvelo/pp.py | WeilerP/scvelo | 272 | 12731422 | <gh_stars>100-1000
from scvelo.preprocessing import * # noqa
|
tests/test_stringsifter.py | digitalsleuth/stringsifter | 523 | 12731457 | <reponame>digitalsleuth/stringsifter
# Copyright (C) 2019 FireEye, Inc. All Rights Reserved.
import os
import numpy
from io import StringIO
import stringsifter.rank_strings as rank_strings
test_strings = 'testing text\n' \
'nagain\n' \
'wheredoesitgo\n' \
'testing text\n' \
'nagain\n' \
'wheredoesitgo\n' \
'testing text\n' \
'nagain\n' \
'wheredoesitgo\n' \
'testing text\n'
def _get_rank_strings_stdoutput(capsys, kwargs):
rank_strings.main(**kwargs)
stdout = capsys.readouterr().out
return stdout.split('\n')[:-1]
def _get_kwargs(input_strings=test_strings, cutoff=None,
cutoff_score=numpy.nan, scores=False, batch=False):
return {'input_strings': StringIO(input_strings),
'cutoff': cutoff,
'cutoff_score': cutoff_score,
'scores': scores,
'batch': batch}
def test_string_length(featurizer):
test_set = [['', 0],
['foo', 3],
['everybody', 9]]
for s, true_len in test_set:
feat_len = featurizer.string_length(s)
assert feat_len == true_len
def test_default(capsys):
"""
test default processing flow: # strings in == # strings out
"""
output_lines = _get_rank_strings_stdoutput(capsys, _get_kwargs())
assert len(output_lines) == 10
def test_scores(capsys):
scores_value = True
output_lines = _get_rank_strings_stdoutput(
capsys, _get_kwargs(scores=scores_value))
split_output_lines = [output_line.split(",") for output_line
in output_lines]
previous_score = numpy.inf
for output_score, output_string in split_output_lines:
assert(type(output_string) is str)
float_output_score = float(output_score)
assert(type(float_output_score) is float)
assert(previous_score >= float_output_score)
previous_score = float_output_score
def test_cutoff(capsys):
cutoff_value = 5
output_lines = _get_rank_strings_stdoutput(
capsys, _get_kwargs(cutoff=cutoff_value))
assert len(output_lines) == cutoff_value
def test_cutoff_score(capsys):
scores_value = True
cutoff_score_value = 0.0
output_lines = _get_rank_strings_stdoutput(
capsys, _get_kwargs(scores=scores_value,
cutoff_score=cutoff_score_value))
split_output_lines = [output_line.split(",") for output_line
in output_lines]
for output_score, output_string in split_output_lines:
assert float(output_score) >= cutoff_score_value
def test_batch():
batch_value = 'tests/fixtures/'
batch_files = [batch_value + batch_file for batch_file in
os.listdir(batch_value)]
output_lines = rank_strings.main(
**_get_kwargs(batch=batch_value))
for batch_file in batch_files:
ranking_file = batch_file + '.ranked_strings'
assert os.path.isfile(ranking_file) is True
os.remove(ranking_file)
|
src/plugins_/in_file_completions/in_file_completions.py | jcberquist/sublimetext-cfml | 130 | 12731459 | from ... import utils, component_index
def get_script_completions(cfml_view):
completions = component_index.build_file_completions(cfml_view.view_metadata)[
utils.get_setting("cfml_cfc_completions")
]
completions = [
make_completion(completion, cfml_view.file_path)
for completion in completions["functions"]
]
if len(completions) > 0:
return cfml_view.CompletionList(completions, 0, False)
return None
def get_dot_completions(cfml_view):
if len(cfml_view.dot_context) == 0:
return None
for symbol in cfml_view.dot_context:
if not symbol.is_function:
if symbol.name == "this":
completions = component_index.build_file_completions(
cfml_view.view_metadata
)[utils.get_setting("cfml_cfc_completions")]
completions = [
make_completion(completion, cfml_view.file_path)
for completion in completions["functions"]
]
return cfml_view.CompletionList(completions, 0, False)
if len(cfml_view.dot_context) == 1 and symbol.name == "arguments":
current_function_body = utils.get_current_function_body(
cfml_view.view, cfml_view.position, component_method=False
)
if current_function_body:
function = cfml_view.get_function(current_function_body.begin() - 1)
meta = cfml_view.get_string_metadata(
cfml_view.view.substr(function[2]) + "{}"
)
if "functions" in meta and function[0] in meta["functions"]:
args = meta["functions"][function[0]]["meta"]["parameters"]
completions = [
(arg["name"] + "\targuments", arg["name"]) for arg in args
]
return cfml_view.CompletionList(completions, 0, False)
if (
symbol.name == "super"
and cfml_view.project_name
and cfml_view.view_metadata["extends"]
):
comp = component_index.component_index.get_completions_by_dot_path(
cfml_view.project_name, cfml_view.view_metadata["extends"]
)
if not comp and cfml_view.file_path:
extends_file_path = component_index.component_index.resolve_path(
cfml_view.project_name,
cfml_view.file_path,
cfml_view.view_metadata["extends"],
)
comp = component_index.component_index.get_completions_by_file_path(
cfml_view.project_name, extends_file_path
)
if comp:
completions = [
(completion.key + "\t" + completion.hint, completion.content)
for completion in comp["functions"]
]
return cfml_view.CompletionList(completions, 0, False)
return None
def make_completion(comp, file_path):
hint = "this"
if len(comp.file_path) > 0 and comp.file_path != file_path:
hint = comp.hint
return (comp.key + "\t" + hint, comp.content)
|
bin/commands/citations_csv.py | davidmcclure/open-syllabus-project | 220 | 12731481 |
import click
import csv
from osp.citations.models import Text, Citation, Text_Index
from peewee import fn
@click.group()
def cli():
pass
@cli.command()
@click.argument('out_file', type=click.File('w'))
@click.option('--min_count', default=100)
def fuzz(out_file, min_count):
"""
Write a CSV with title and fuzz.
"""
cols = [
'text_id',
'count',
'fuzz',
'surname',
'title',
]
writer = csv.DictWriter(out_file, cols)
writer.writeheader()
count = fn.count(Citation.id)
query = (
Text
.select(Text, count)
.join(Citation)
.where(Text.display==True)
.having(count > min_count)
.group_by(Text.id)
.naive()
)
texts = list(query)
# Sort on fuzz, high -> low.
for t in sorted(texts, key=lambda t: t.fuzz, reverse=True):
writer.writerow(dict(
text_id=t.id,
count=t.count,
fuzz=t.fuzz,
surname=t.surname,
title=t.title,
))
@cli.command()
@click.argument('out_file', type=click.File('w'))
@click.option('--depth', default=1000)
def ranks(out_file, depth):
"""
Write the top N text ranks.
"""
cols = [
'count',
'title',
'author',
]
writer = csv.DictWriter(out_file, cols)
writer.writeheader()
ranks = Text_Index.rank_texts()
ranks = sorted(ranks, key=lambda x: x['rank'])
for r in ranks[:depth]:
text = r['text']
writer.writerow(dict(
count=text.count,
title=text.title,
author=text.authors[0],
))
|
example/extensions/lib_custom_op/test_gemm.py | mozga-intel/incubator-mxnet | 211 | 12731555 | <filename>example/extensions/lib_custom_op/test_gemm.py
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=arguments-differ
# This test checks dynamic loading of custom library into MXNet
# and checks end to end compute of a simple 2D gemm custom op
import mxnet as mx
import os
#load library
if (os.name=='posix'):
path = os.path.abspath('libgemm_lib.so')
mx.library.load(path)
elif (os.name=='nt'):
path = os.path.abspath('libgemm_lib.dll')
mx.library.load(path)
a = mx.nd.array([[1,2,3],[4,5,6]])
b = mx.nd.array([[7],[8],[9]])
print("--------start ndarray compute---------")
print(mx.nd.my_gemm(a,b))
print("--------")
print(mx.nd.state_gemm(a,b,test_kw=100))
print("--------start symbolic compute--------")
s = mx.sym.Variable('s')
t = mx.sym.Variable('t')
c = mx.sym.my_gemm(s,t)
d = mx.sym.state_gemm(s,t,test_kw=200)
e = mx.sym.linalg.gemm2(s,t)
out_grad = mx.nd.ones((2,1))
# stateless
block = mx.gluon.nn.SymbolBlock(c,[s,t])
with mx.autograd.record():
a_ = mx.nd.array([[1,2,3],[4,5,6]])
b_ = mx.nd.array([[7],[8],[9]])
a_.attach_grad()
b_.attach_grad()
# foward
out = block(a_,b_)
print(out)
print('+++++')
# backward
out.backward(out_grad)
print(a_.grad)
print(b_.grad)
print("-------")
# stateful
block2 = mx.gluon.nn.SymbolBlock(d,[s,t])
block2.hybridize(static_alloc=True, static_shape=True)
out2 = block2(a,b)
out2 = block2(a,b)
print(out2)
with mx.autograd.record():
a_ = mx.nd.array([[1,2,3],[4,5,6]])
b_ = mx.nd.array([[7],[8],[9]])
a_.attach_grad()
b_.attach_grad()
# forward
out2 = block2(a_,b_)
print('+++++')
# backward
out2.backward(out_grad)
print(a_.grad)
print(b_.grad)
print("-------")
# baseline
block3 = mx.gluon.nn.SymbolBlock(e,[s,t])
with mx.autograd.record():
a_ = mx.nd.array([[1,2,3],[4,5,6]])
b_ = mx.nd.array([[7],[8],[9]])
a_.attach_grad()
b_.attach_grad()
# forward
out3 = block3(a_,b_)
print(out3)
print('+++++')
# backward
out3.backward(out_grad)
print(a_.grad)
print(b_.grad)
|
lib/python2.7/site-packages/samba/netcmd/gpo.py | abankalarm/pth-toolkit | 480 | 12731560 | <gh_stars>100-1000
# implement samba_tool gpo commands
#
# Copyright <NAME> 2010
# Copyright <NAME> 2011-2012 <<EMAIL>>
#
# based on C implementation by <NAME> and <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import samba.getopt as options
import ldb
from samba.auth import system_session
from samba.netcmd import (
Command,
CommandError,
Option,
SuperCommand,
)
from samba.samdb import SamDB
from samba import dsdb
from samba.dcerpc import security
from samba.ndr import ndr_unpack
import samba.security
import samba.auth
from samba.auth import AUTH_SESSION_INFO_DEFAULT_GROUPS, AUTH_SESSION_INFO_AUTHENTICATED, AUTH_SESSION_INFO_SIMPLE_PRIVILEGES
from samba.netcmd.common import netcmd_finddc
from samba import policy
from samba import smb
import uuid
from samba.ntacls import dsacl2fsacl
from samba.dcerpc import nbt
from samba.net import Net
def samdb_connect(ctx):
'''make a ldap connection to the server'''
try:
ctx.samdb = SamDB(url=ctx.url,
session_info=system_session(),
credentials=ctx.creds, lp=ctx.lp)
except Exception, e:
raise CommandError("LDAP connection to %s failed " % ctx.url, e)
def attr_default(msg, attrname, default):
'''get an attribute from a ldap msg with a default'''
if attrname in msg:
return msg[attrname][0]
return default
def gpo_flags_string(value):
'''return gpo flags string'''
flags = policy.get_gpo_flags(value)
if not flags:
ret = 'NONE'
else:
ret = ' '.join(flags)
return ret
def gplink_options_string(value):
'''return gplink options string'''
options = policy.get_gplink_options(value)
if not options:
ret = 'NONE'
else:
ret = ' '.join(options)
return ret
def parse_gplink(gplink):
'''parse a gPLink into an array of dn and options'''
ret = []
a = gplink.split(']')
for g in a:
if not g:
continue
d = g.split(';')
if len(d) != 2 or not d[0].startswith("[LDAP://"):
raise RuntimeError("Badly formed gPLink '%s'" % g)
ret.append({ 'dn' : d[0][8:], 'options' : int(d[1])})
return ret
def encode_gplink(gplist):
'''Encode an array of dn and options into gPLink string'''
ret = ''
for g in gplist:
ret += "[LDAP://%s;%d]" % (g['dn'], g['options'])
return ret
def dc_url(lp, creds, url=None, dc=None):
'''If URL is not specified, return URL for writable DC.
If dc is provided, use that to construct ldap URL'''
if url is None:
if dc is None:
try:
dc = netcmd_finddc(lp, creds)
except Exception, e:
raise RuntimeError("Could not find a DC for domain", e)
url = 'ldap://' + dc
return url
def get_gpo_dn(samdb, gpo):
'''Construct the DN for gpo'''
dn = samdb.get_default_basedn()
dn.add_child(ldb.Dn(samdb, "CN=Policies,CN=System"))
dn.add_child(ldb.Dn(samdb, "CN=%s" % gpo))
return dn
def get_gpo_info(samdb, gpo=None, displayname=None, dn=None,
sd_flags=security.SECINFO_OWNER|security.SECINFO_GROUP|security.SECINFO_DACL|security.SECINFO_SACL):
'''Get GPO information using gpo, displayname or dn'''
policies_dn = samdb.get_default_basedn()
policies_dn.add_child(ldb.Dn(samdb, "CN=Policies,CN=System"))
base_dn = policies_dn
search_expr = "(objectClass=groupPolicyContainer)"
search_scope = ldb.SCOPE_ONELEVEL
if gpo is not None:
search_expr = "(&(objectClass=groupPolicyContainer)(name=%s))" % ldb.binary_encode(gpo)
if displayname is not None:
search_expr = "(&(objectClass=groupPolicyContainer)(displayname=%s))" % ldb.binary_encode(displayname)
if dn is not None:
base_dn = dn
search_scope = ldb.SCOPE_BASE
try:
msg = samdb.search(base=base_dn, scope=search_scope,
expression=search_expr,
attrs=['nTSecurityDescriptor',
'versionNumber',
'flags',
'name',
'displayName',
'gPCFileSysPath'],
controls=['sd_flags:1:%d' % sd_flags])
except Exception, e:
if gpo is not None:
mesg = "Cannot get information for GPO %s" % gpo
else:
mesg = "Cannot get information for GPOs"
raise CommandError(mesg, e)
return msg
def get_gpo_containers(samdb, gpo):
'''lists dn of containers for a GPO'''
search_expr = "(&(objectClass=*)(gPLink=*%s*))" % gpo
try:
msg = samdb.search(expression=search_expr, attrs=['gPLink'])
except Exception, e:
raise CommandError("Could not find container(s) with GPO %s" % gpo, e)
return msg
def del_gpo_link(samdb, container_dn, gpo):
'''delete GPO link for the container'''
# Check if valid Container DN and get existing GPlinks
try:
msg = samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
expression="(objectClass=*)",
attrs=['gPLink'])[0]
except Exception, e:
raise CommandError("Container '%s' does not exist" % container_dn, e)
found = False
gpo_dn = str(get_gpo_dn(samdb, gpo))
if 'gPLink' in msg:
gplist = parse_gplink(msg['gPLink'][0])
for g in gplist:
if g['dn'].lower() == gpo_dn.lower():
gplist.remove(g)
found = True
break
else:
raise CommandError("No GPO(s) linked to this container")
if not found:
raise CommandError("GPO '%s' not linked to this container" % gpo)
m = ldb.Message()
m.dn = container_dn
if gplist:
gplink_str = encode_gplink(gplist)
m['r0'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_REPLACE, 'gPLink')
else:
m['d0'] = ldb.MessageElement(msg['gPLink'][0], ldb.FLAG_MOD_DELETE, 'gPLink')
try:
samdb.modify(m)
except Exception, e:
raise CommandError("Error removing GPO from container", e)
def parse_unc(unc):
'''Parse UNC string into a hostname, a service, and a filepath'''
if unc.startswith('\\\\') and unc.startswith('//'):
raise ValueError("UNC doesn't start with \\\\ or //")
tmp = unc[2:].split('/', 2)
if len(tmp) == 3:
return tmp
tmp = unc[2:].split('\\', 2)
if len(tmp) == 3:
return tmp
raise ValueError("Invalid UNC string: %s" % unc)
def copy_directory_remote_to_local(conn, remotedir, localdir):
if not os.path.isdir(localdir):
os.mkdir(localdir)
r_dirs = [ remotedir ]
l_dirs = [ localdir ]
while r_dirs:
r_dir = r_dirs.pop()
l_dir = l_dirs.pop()
dirlist = conn.list(r_dir)
for e in dirlist:
r_name = r_dir + '\\' + e['name']
l_name = os.path.join(l_dir, e['name'])
if e['attrib'] & smb.FILE_ATTRIBUTE_DIRECTORY:
r_dirs.append(r_name)
l_dirs.append(l_name)
os.mkdir(l_name)
else:
data = conn.loadfile(r_name)
file(l_name, 'w').write(data)
def copy_directory_local_to_remote(conn, localdir, remotedir):
if not conn.chkpath(remotedir):
conn.mkdir(remotedir)
l_dirs = [ localdir ]
r_dirs = [ remotedir ]
while l_dirs:
l_dir = l_dirs.pop()
r_dir = r_dirs.pop()
dirlist = os.listdir(l_dir)
for e in dirlist:
l_name = os.path.join(l_dir, e)
r_name = r_dir + '\\' + e
if os.path.isdir(l_name):
l_dirs.append(l_name)
r_dirs.append(r_name)
conn.mkdir(r_name)
else:
data = file(l_name, 'r').read()
conn.savefile(r_name, data)
def create_directory_hier(conn, remotedir):
elems = remotedir.replace('/', '\\').split('\\')
path = ""
for e in elems:
path = path + '\\' + e
if not conn.chkpath(path):
conn.mkdir(path)
class cmd_listall(Command):
"""List all GPOs."""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H")
]
def run(self, H=None, sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
msg = get_gpo_info(self.samdb, None)
for m in msg:
self.outf.write("GPO : %s\n" % m['name'][0])
self.outf.write("display name : %s\n" % m['displayName'][0])
self.outf.write("path : %s\n" % m['gPCFileSysPath'][0])
self.outf.write("dn : %s\n" % m.dn)
self.outf.write("version : %s\n" % attr_default(m, 'versionNumber', '0'))
self.outf.write("flags : %s\n" % gpo_flags_string(int(attr_default(m, 'flags', 0))))
self.outf.write("\n")
class cmd_list(Command):
"""List GPOs for an account."""
synopsis = "%prog <username> [options]"
takes_args = ['username']
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server",
type=str, metavar="URL", dest="H")
]
def run(self, username, H=None, sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
try:
msg = self.samdb.search(expression='(&(|(samAccountName=%s)(samAccountName=%s$))(objectClass=User))' %
(ldb.binary_encode(username),ldb.binary_encode(username)))
user_dn = msg[0].dn
except Exception:
raise CommandError("Failed to find account %s" % username)
# check if its a computer account
try:
msg = self.samdb.search(base=user_dn, scope=ldb.SCOPE_BASE, attrs=['objectClass'])[0]
is_computer = 'computer' in msg['objectClass']
except Exception:
raise CommandError("Failed to find objectClass for user %s" % username)
session_info_flags = ( AUTH_SESSION_INFO_DEFAULT_GROUPS |
AUTH_SESSION_INFO_AUTHENTICATED )
# When connecting to a remote server, don't look up the local privilege DB
if self.url is not None and self.url.startswith('ldap'):
session_info_flags |= AUTH_SESSION_INFO_SIMPLE_PRIVILEGES
session = samba.auth.user_session(self.samdb, lp_ctx=self.lp, dn=user_dn,
session_info_flags=session_info_flags)
token = session.security_token
gpos = []
inherit = True
dn = ldb.Dn(self.samdb, str(user_dn)).parent()
while True:
msg = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=['gPLink', 'gPOptions'])[0]
if 'gPLink' in msg:
glist = parse_gplink(msg['gPLink'][0])
for g in glist:
if not inherit and not (g['options'] & dsdb.GPLINK_OPT_ENFORCE):
continue
if g['options'] & dsdb.GPLINK_OPT_DISABLE:
continue
try:
sd_flags=security.SECINFO_OWNER|security.SECINFO_GROUP|security.SECINFO_DACL
gmsg = self.samdb.search(base=g['dn'], scope=ldb.SCOPE_BASE,
attrs=['name', 'displayName', 'flags',
'nTSecurityDescriptor'],
controls=['sd_flags:1:%d' % sd_flags])
secdesc_ndr = gmsg[0]['nTSecurityDescriptor'][0]
secdesc = ndr_unpack(security.descriptor, secdesc_ndr)
except Exception:
self.outf.write("Failed to fetch gpo object with nTSecurityDescriptor %s\n" %
g['dn'])
continue
try:
samba.security.access_check(secdesc, token,
security.SEC_STD_READ_CONTROL |
security.SEC_ADS_LIST |
security.SEC_ADS_READ_PROP)
except RuntimeError:
self.outf.write("Failed access check on %s\n" % msg.dn)
continue
# check the flags on the GPO
flags = int(attr_default(gmsg[0], 'flags', 0))
if is_computer and (flags & dsdb.GPO_FLAG_MACHINE_DISABLE):
continue
if not is_computer and (flags & dsdb.GPO_FLAG_USER_DISABLE):
continue
gpos.append((gmsg[0]['displayName'][0], gmsg[0]['name'][0]))
# check if this blocks inheritance
gpoptions = int(attr_default(msg, 'gPOptions', 0))
if gpoptions & dsdb.GPO_BLOCK_INHERITANCE:
inherit = False
if dn == self.samdb.get_default_basedn():
break
dn = dn.parent()
if is_computer:
msg_str = 'computer'
else:
msg_str = 'user'
self.outf.write("GPOs for %s %s\n" % (msg_str, username))
for g in gpos:
self.outf.write(" %s %s\n" % (g[0], g[1]))
class cmd_show(Command):
"""Show information for a GPO."""
synopsis = "%prog <gpo> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['gpo']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str)
]
def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
try:
msg = get_gpo_info(self.samdb, gpo)[0]
except Exception:
raise CommandError("GPO '%s' does not exist" % gpo)
try:
secdesc_ndr = msg['nTSecurityDescriptor'][0]
secdesc = ndr_unpack(security.descriptor, secdesc_ndr)
secdesc_sddl = secdesc.as_sddl()
except Exception:
secdesc_sddl = "<hidden>"
self.outf.write("GPO : %s\n" % msg['name'][0])
self.outf.write("display name : %s\n" % msg['displayName'][0])
self.outf.write("path : %s\n" % msg['gPCFileSysPath'][0])
self.outf.write("dn : %s\n" % msg.dn)
self.outf.write("version : %s\n" % attr_default(msg, 'versionNumber', '0'))
self.outf.write("flags : %s\n" % gpo_flags_string(int(attr_default(msg, 'flags', 0))))
self.outf.write("ACL : %s\n" % secdesc_sddl)
self.outf.write("\n")
class cmd_getlink(Command):
"""List GPO Links for a container."""
synopsis = "%prog <container_dn> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['container_dn']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str)
]
def run(self, container_dn, H=None, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
try:
msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
expression="(objectClass=*)",
attrs=['gPLink'])[0]
except Exception:
raise CommandError("Container '%s' does not exist" % container_dn)
if msg['gPLink']:
self.outf.write("GPO(s) linked to DN %s\n" % container_dn)
gplist = parse_gplink(msg['gPLink'][0])
for g in gplist:
msg = get_gpo_info(self.samdb, dn=g['dn'])
self.outf.write(" GPO : %s\n" % msg[0]['name'][0])
self.outf.write(" Name : %s\n" % msg[0]['displayName'][0])
self.outf.write(" Options : %s\n" % gplink_options_string(g['options']))
self.outf.write("\n")
else:
self.outf.write("No GPO(s) linked to DN=%s\n" % container_dn)
class cmd_setlink(Command):
"""Add or update a GPO link to a container."""
synopsis = "%prog <container_dn> <gpo> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['container_dn', 'gpo']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str),
Option("--disable", dest="disabled", default=False, action='store_true',
help="Disable policy"),
Option("--enforce", dest="enforced", default=False, action='store_true',
help="Enforce policy")
]
def run(self, container_dn, gpo, H=None, disabled=False, enforced=False,
sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
gplink_options = 0
if disabled:
gplink_options |= dsdb.GPLINK_OPT_DISABLE
if enforced:
gplink_options |= dsdb.GPLINK_OPT_ENFORCE
# Check if valid GPO DN
try:
msg = get_gpo_info(self.samdb, gpo=gpo)[0]
except Exception:
raise CommandError("GPO '%s' does not exist" % gpo)
gpo_dn = str(get_gpo_dn(self.samdb, gpo))
# Check if valid Container DN
try:
msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
expression="(objectClass=*)",
attrs=['gPLink'])[0]
except Exception:
raise CommandError("Container '%s' does not exist" % container_dn)
# Update existing GPlinks or Add new one
existing_gplink = False
if 'gPLink' in msg:
gplist = parse_gplink(msg['gPLink'][0])
existing_gplink = True
found = False
for g in gplist:
if g['dn'].lower() == gpo_dn.lower():
g['options'] = gplink_options
found = True
break
if found:
raise CommandError("GPO '%s' already linked to this container" % gpo)
else:
gplist.insert(0, { 'dn' : gpo_dn, 'options' : gplink_options })
else:
gplist = []
gplist.append({ 'dn' : gpo_dn, 'options' : gplink_options })
gplink_str = encode_gplink(gplist)
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, container_dn)
if existing_gplink:
m['new_value'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_REPLACE, 'gPLink')
else:
m['new_value'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_ADD, 'gPLink')
try:
self.samdb.modify(m)
except Exception, e:
raise CommandError("Error adding GPO Link", e)
self.outf.write("Added/Updated GPO link\n")
cmd_getlink().run(container_dn, H, sambaopts, credopts, versionopts)
class cmd_dellink(Command):
"""Delete GPO link from a container."""
synopsis = "%prog <container_dn> <gpo> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['container', 'gpo']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str),
]
def run(self, container, gpo, H=None, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
# Check if valid GPO
try:
get_gpo_info(self.samdb, gpo=gpo)[0]
except Exception:
raise CommandError("GPO '%s' does not exist" % gpo)
container_dn = ldb.Dn(self.samdb, container)
del_gpo_link(self.samdb, container_dn, gpo)
self.outf.write("Deleted GPO link.\n")
cmd_getlink().run(container_dn, H, sambaopts, credopts, versionopts)
class cmd_listcontainers(Command):
"""List all linked containers for a GPO."""
synopsis = "%prog <gpo> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['gpo']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str)
]
def run(self, gpo, H=None, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
msg = get_gpo_containers(self.samdb, gpo)
if len(msg):
self.outf.write("Container(s) using GPO %s\n" % gpo)
for m in msg:
self.outf.write(" DN: %s\n" % m['dn'])
else:
self.outf.write("No Containers using GPO %s\n" % gpo)
class cmd_getinheritance(Command):
"""Get inheritance flag for a container."""
synopsis = "%prog <container_dn> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['container_dn']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str)
]
def run(self, container_dn, H=None, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
try:
msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
expression="(objectClass=*)",
attrs=['gPOptions'])[0]
except Exception:
raise CommandError("Container '%s' does not exist" % container_dn)
inheritance = 0
if 'gPOptions' in msg:
inheritance = int(msg['gPOptions'][0])
if inheritance == dsdb.GPO_BLOCK_INHERITANCE:
self.outf.write("Container has GPO_BLOCK_INHERITANCE\n")
else:
self.outf.write("Container has GPO_INHERIT\n")
class cmd_setinheritance(Command):
"""Set inheritance flag on a container."""
synopsis = "%prog <container_dn> <block|inherit> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = [ 'container_dn', 'inherit_state' ]
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str)
]
def run(self, container_dn, inherit_state, H=None, sambaopts=None, credopts=None,
versionopts=None):
if inherit_state.lower() == 'block':
inheritance = dsdb.GPO_BLOCK_INHERITANCE
elif inherit_state.lower() == 'inherit':
inheritance = dsdb.GPO_INHERIT
else:
raise CommandError("Unknown inheritance state (%s)" % inherit_state)
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
samdb_connect(self)
try:
msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
expression="(objectClass=*)",
attrs=['gPOptions'])[0]
except Exception:
raise CommandError("Container '%s' does not exist" % container_dn)
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, container_dn)
if 'gPOptions' in msg:
m['new_value'] = ldb.MessageElement(str(inheritance), ldb.FLAG_MOD_REPLACE, 'gPOptions')
else:
m['new_value'] = ldb.MessageElement(str(inheritance), ldb.FLAG_MOD_ADD, 'gPOptions')
try:
self.samdb.modify(m)
except Exception, e:
raise CommandError("Error setting inheritance state %s" % inherit_state, e)
class cmd_fetch(Command):
"""Download a GPO."""
synopsis = "%prog <gpo> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['gpo']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str),
Option("--tmpdir", help="Temporary directory for copying policy files", type=str)
]
def run(self, gpo, H=None, tmpdir=None, sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
# We need to know writable DC to setup SMB connection
if H and H.startswith('ldap://'):
dc_hostname = H[7:]
self.url = H
else:
dc_hostname = netcmd_finddc(self.lp, self.creds)
self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
samdb_connect(self)
try:
msg = get_gpo_info(self.samdb, gpo)[0]
except Exception:
raise CommandError("GPO '%s' does not exist" % gpo)
# verify UNC path
unc = msg['gPCFileSysPath'][0]
try:
[dom_name, service, sharepath] = parse_unc(unc)
except ValueError:
raise CommandError("Invalid GPO path (%s)" % unc)
# SMB connect to DC
try:
conn = smb.SMB(dc_hostname, service, lp=self.lp, creds=self.creds)
except Exception:
raise CommandError("Error connecting to '%s' using SMB" % dc_hostname)
# Copy GPT
if tmpdir is None:
tmpdir = "/tmp"
if not os.path.isdir(tmpdir):
raise CommandError("Temoprary directory '%s' does not exist" % tmpdir)
localdir = os.path.join(tmpdir, "policy")
if not os.path.isdir(localdir):
os.mkdir(localdir)
gpodir = os.path.join(localdir, gpo)
if os.path.isdir(gpodir):
raise CommandError("GPO directory '%s' already exists, refusing to overwrite" % gpodir)
try:
os.mkdir(gpodir)
copy_directory_remote_to_local(conn, sharepath, gpodir)
except Exception, e:
# FIXME: Catch more specific exception
raise CommandError("Error copying GPO from DC", e)
self.outf.write('GPO copied to %s\n' % gpodir)
class cmd_create(Command):
"""Create an empty GPO."""
synopsis = "%prog <displayname> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['displayname']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str),
Option("--tmpdir", help="Temporary directory for copying policy files", type=str)
]
def run(self, displayname, H=None, tmpdir=None, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
net = Net(creds=self.creds, lp=self.lp)
# We need to know writable DC to setup SMB connection
if H and H.startswith('ldap://'):
dc_hostname = H[7:]
self.url = H
flags = (nbt.NBT_SERVER_LDAP |
nbt.NBT_SERVER_DS |
nbt.NBT_SERVER_WRITABLE)
cldap_ret = net.finddc(address=dc_hostname, flags=flags)
else:
flags = (nbt.NBT_SERVER_LDAP |
nbt.NBT_SERVER_DS |
nbt.NBT_SERVER_WRITABLE)
cldap_ret = net.finddc(domain=self.lp.get('realm'), flags=flags)
dc_hostname = cldap_ret.pdc_dns_name
self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
samdb_connect(self)
msg = get_gpo_info(self.samdb, displayname=displayname)
if msg.count > 0:
raise CommandError("A GPO already existing with name '%s'" % displayname)
# Create new GUID
guid = str(uuid.uuid4())
gpo = "{%s}" % guid.upper()
realm = cldap_ret.dns_domain
unc_path = "\\\\%s\\sysvol\\%s\\Policies\\%s" % (realm, realm, gpo)
# Create GPT
if tmpdir is None:
tmpdir = "/tmp"
if not os.path.isdir(tmpdir):
raise CommandError("Temporary directory '%s' does not exist" % tmpdir)
localdir = os.path.join(tmpdir, "policy")
if not os.path.isdir(localdir):
os.mkdir(localdir)
gpodir = os.path.join(localdir, gpo)
if os.path.isdir(gpodir):
raise CommandError("GPO directory '%s' already exists, refusing to overwrite" % gpodir)
try:
os.mkdir(gpodir)
os.mkdir(os.path.join(gpodir, "Machine"))
os.mkdir(os.path.join(gpodir, "User"))
gpt_contents = "[General]\r\nVersion=0\r\n"
file(os.path.join(gpodir, "GPT.INI"), "w").write(gpt_contents)
except Exception, e:
raise CommandError("Error Creating GPO files", e)
# Connect to DC over SMB
[dom_name, service, sharepath] = parse_unc(unc_path)
try:
conn = smb.SMB(dc_hostname, service, lp=self.lp, creds=self.creds)
except Exception, e:
raise CommandError("Error connecting to '%s' using SMB" % dc_hostname, e)
self.samdb.transaction_start()
try:
# Add cn=<guid>
gpo_dn = get_gpo_dn(self.samdb, gpo)
m = ldb.Message()
m.dn = gpo_dn
m['a01'] = ldb.MessageElement("groupPolicyContainer", ldb.FLAG_MOD_ADD, "objectClass")
self.samdb.add(m)
# Add cn=User,cn=<guid>
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, "CN=User,%s" % str(gpo_dn))
m['a01'] = ldb.MessageElement("container", ldb.FLAG_MOD_ADD, "objectClass")
self.samdb.add(m)
# Add cn=Machine,cn=<guid>
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, "CN=Machine,%s" % str(gpo_dn))
m['a01'] = ldb.MessageElement("container", ldb.FLAG_MOD_ADD, "objectClass")
self.samdb.add(m)
# Get new security descriptor
ds_sd_flags = ( security.SECINFO_OWNER |
security.SECINFO_GROUP |
security.SECINFO_DACL )
msg = get_gpo_info(self.samdb, gpo=gpo, sd_flags=ds_sd_flags)[0]
ds_sd_ndr = msg['nTSecurityDescriptor'][0]
ds_sd = ndr_unpack(security.descriptor, ds_sd_ndr).as_sddl()
# Create a file system security descriptor
domain_sid = security.dom_sid(self.samdb.get_domain_sid())
sddl = dsacl2fsacl(ds_sd, domain_sid)
fs_sd = security.descriptor.from_sddl(sddl, domain_sid)
# Copy GPO directory
create_directory_hier(conn, sharepath)
# Set ACL
sio = ( security.SECINFO_OWNER |
security.SECINFO_GROUP |
security.SECINFO_DACL |
security.SECINFO_PROTECTED_DACL )
conn.set_acl(sharepath, fs_sd, sio)
# Copy GPO files over SMB
copy_directory_local_to_remote(conn, gpodir, sharepath)
m = ldb.Message()
m.dn = gpo_dn
m['a02'] = ldb.MessageElement(displayname, ldb.FLAG_MOD_REPLACE, "displayName")
m['a03'] = ldb.MessageElement(unc_path, ldb.FLAG_MOD_REPLACE, "gPCFileSysPath")
m['a05'] = ldb.MessageElement("0", ldb.FLAG_MOD_REPLACE, "versionNumber")
m['a07'] = ldb.MessageElement("2", ldb.FLAG_MOD_REPLACE, "gpcFunctionalityVersion")
m['a04'] = ldb.MessageElement("0", ldb.FLAG_MOD_REPLACE, "flags")
controls=["permissive_modify:0"]
self.samdb.modify(m, controls=controls)
except Exception:
self.samdb.transaction_cancel()
raise
else:
self.samdb.transaction_commit()
self.outf.write("GPO '%s' created as %s\n" % (displayname, gpo))
class cmd_del(Command):
"""Delete a GPO."""
synopsis = "%prog <gpo> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ['gpo']
takes_options = [
Option("-H", help="LDB URL for database or target server", type=str),
]
def run(self, gpo, H=None, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
# We need to know writable DC to setup SMB connection
if H and H.startswith('ldap://'):
dc_hostname = H[7:]
self.url = H
else:
dc_hostname = netcmd_finddc(self.lp, self.creds)
self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
samdb_connect(self)
# Check if valid GPO
try:
msg = get_gpo_info(self.samdb, gpo=gpo)[0]
unc_path = msg['gPCFileSysPath'][0]
except Exception:
raise CommandError("GPO '%s' does not exist" % gpo)
# Connect to DC over SMB
[dom_name, service, sharepath] = parse_unc(unc_path)
try:
conn = smb.SMB(dc_hostname, service, lp=self.lp, creds=self.creds)
except Exception, e:
raise CommandError("Error connecting to '%s' using SMB" % dc_hostname, e)
self.samdb.transaction_start()
try:
# Check for existing links
msg = get_gpo_containers(self.samdb, gpo)
if len(msg):
self.outf.write("GPO %s is linked to containers\n" % gpo)
for m in msg:
del_gpo_link(self.samdb, m['dn'], gpo)
self.outf.write(" Removed link from %s.\n" % m['dn'])
# Remove LDAP entries
gpo_dn = get_gpo_dn(self.samdb, gpo)
self.samdb.delete(ldb.Dn(self.samdb, "CN=User,%s" % str(gpo_dn)))
self.samdb.delete(ldb.Dn(self.samdb, "CN=Machine,%s" % str(gpo_dn)))
self.samdb.delete(gpo_dn)
# Remove GPO files
conn.deltree(sharepath)
except Exception:
self.samdb.transaction_cancel()
raise
else:
self.samdb.transaction_commit()
self.outf.write("GPO %s deleted.\n" % gpo)
class cmd_aclcheck(Command):
"""Check all GPOs have matching LDAP and DS ACLs."""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H")
]
def run(self, H=None, sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.url = dc_url(self.lp, self.creds, H)
# We need to know writable DC to setup SMB connection
if H and H.startswith('ldap://'):
dc_hostname = H[7:]
self.url = H
else:
dc_hostname = netcmd_finddc(self.lp, self.creds)
self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
samdb_connect(self)
msg = get_gpo_info(self.samdb, None)
for m in msg:
# verify UNC path
unc = m['gPCFileSysPath'][0]
try:
[dom_name, service, sharepath] = parse_unc(unc)
except ValueError:
raise CommandError("Invalid GPO path (%s)" % unc)
# SMB connect to DC
try:
conn = smb.SMB(dc_hostname, service, lp=self.lp, creds=self.creds)
except Exception:
raise CommandError("Error connecting to '%s' using SMB" % dc_hostname)
fs_sd = conn.get_acl(sharepath, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL, security.SEC_FLAG_MAXIMUM_ALLOWED)
ds_sd_ndr = m['nTSecurityDescriptor'][0]
ds_sd = ndr_unpack(security.descriptor, ds_sd_ndr).as_sddl()
# Create a file system security descriptor
domain_sid = security.dom_sid(self.samdb.get_domain_sid())
expected_fs_sddl = dsacl2fsacl(ds_sd, domain_sid)
if (fs_sd.as_sddl(domain_sid) != expected_fs_sddl):
raise CommandError("Invalid GPO ACL %s on path (%s), should be %s" % (fs_sd.as_sddl(domain_sid), sharepath, expected_fs_sddl))
class cmd_gpo(SuperCommand):
"""Group Policy Object (GPO) management."""
subcommands = {}
subcommands["listall"] = cmd_listall()
subcommands["list"] = cmd_list()
subcommands["show"] = cmd_show()
subcommands["getlink"] = cmd_getlink()
subcommands["setlink"] = cmd_setlink()
subcommands["dellink"] = cmd_dellink()
subcommands["listcontainers"] = cmd_listcontainers()
subcommands["getinheritance"] = cmd_getinheritance()
subcommands["setinheritance"] = cmd_setinheritance()
subcommands["fetch"] = cmd_fetch()
subcommands["create"] = cmd_create()
subcommands["del"] = cmd_del()
subcommands["aclcheck"] = cmd_aclcheck()
|
python/ql/test/library-tests/frameworks/aiohttp/routing_test.py | timoles/codeql | 4,036 | 12731565 | # Inspired by https://docs.aiohttp.org/en/stable/web_quickstart.html
# and https://docs.aiohttp.org/en/stable/web_quickstart.html#resources-and-routes
from aiohttp import web
app = web.Application()
## ================================= ##
## Ways to specify routes / handlers ##
## ================================= ##
## Using coroutines
if True:
# `app.add_routes` with list
async def foo(request): # $ requestHandler
return web.Response(text="foo") # $ HttpResponse
async def foo2(request): # $ requestHandler
return web.Response(text="foo2") # $ HttpResponse
async def foo3(request): # $ requestHandler
return web.Response(text="foo3") # $ HttpResponse
app.add_routes([
web.get("/foo", foo), # $ routeSetup="/foo"
web.route("*", "/foo2", foo2), # $ routeSetup="/foo2"
web.get(path="/foo3", handler=foo3), # $ routeSetup="/foo3"
])
# using decorator
routes = web.RouteTableDef()
@routes.get("/bar") # $ routeSetup="/bar"
async def bar(request): # $ requestHandler
return web.Response(text="bar") # $ HttpResponse
@routes.route("*", "/bar2") # $ routeSetup="/bar2"
async def bar2(request): # $ requestHandler
return web.Response(text="bar2") # $ HttpResponse
@routes.get(path="/bar3") # $ routeSetup="/bar3"
async def bar3(request): # $ requestHandler
return web.Response(text="bar3") # $ HttpResponse
app.add_routes(routes)
# `app.router.add_get` / `app.router.add_route`
async def baz(request): # $ requestHandler
return web.Response(text="baz") # $ HttpResponse
app.router.add_get("/baz", baz) # $ routeSetup="/baz"
async def baz2(request): # $ requestHandler
return web.Response(text="baz2") # $ HttpResponse
app.router.add_route("*", "/baz2", baz2) # $ routeSetup="/baz2"
async def baz3(request): # $ requestHandler
return web.Response(text="baz3") # $ HttpResponse
app.router.add_get(path="/baz3", handler=baz3) # $ routeSetup="/baz3"
## Using classes / views
if True:
# see https://docs.aiohttp.org/en/stable/web_quickstart.html#organizing-handlers-in-classes
class MyCustomHandlerClass:
async def foo_handler(self, request): # $ MISSING: requestHandler
return web.Response(text="MyCustomHandlerClass.foo") # $ HttpResponse
my_custom_handler = MyCustomHandlerClass()
app.router.add_get("/MyCustomHandlerClass/foo", my_custom_handler.foo_handler) # $ routeSetup="/MyCustomHandlerClass/foo"
# Using `web.View`
# ---------------
# `app.add_routes` with list
class MyWebView1(web.View):
async def get(self): # $ requestHandler
return web.Response(text="MyWebView1.get") # $ HttpResponse
app.add_routes([
web.view("/MyWebView1", MyWebView1) # $ routeSetup="/MyWebView1"
])
# using decorator
routes = web.RouteTableDef()
@routes.view("/MyWebView2") # $ routeSetup="/MyWebView2"
class MyWebView2(web.View):
async def get(self): # $ requestHandler
return web.Response(text="MyWebView2.get") # $ HttpResponse
app.add_routes(routes)
# `app.router.add_view`
class MyWebView3(web.View):
async def get(self): # $ requestHandler
return web.Response(text="MyWebView3.get") # $ HttpResponse
app.router.add_view("/MyWebView3", MyWebView3) # $ routeSetup="/MyWebView3"
# no route-setup
class MyWebViewNoRoute(web.View):
async def get(self): # $ requestHandler
return web.Response(text="MyWebViewNoRoute.get") # $ HttpResponse
if len(__name__) < 0: # avoid running, but fool analysis to not consider dead code
# no explicit-view subclass (but route-setup)
class MyWebViewNoSubclassButRoute(somelib.someclass):
async def get(self): # $ requestHandler
return web.Response(text="MyWebViewNoSubclassButRoute.get") # $ HttpResponse
app.router.add_view("/MyWebViewNoSubclassButRoute", MyWebViewNoSubclassButRoute) # $ routeSetup="/MyWebViewNoSubclassButRoute"
# Apparently there is no enforcement that `add_view` is only for views, and vice-versa
# for `add_get` only being for async functions.
if True:
async def no_rules(request): # $ requestHandler
return web.Response(text="no_rules") # $ HttpResponse
app.router.add_view("/no_rules", no_rules) # $ routeSetup="/no_rules"
class NoRulesView(web.View):
async def get(self): # $ requestHandler
return web.Response(text="NoRulesView.get") # $ HttpResponse
app.router.add_get("/NoRulesView", NoRulesView) # $ routeSetup="/NoRulesView"
## =================== ##
## "Routed parameters" ##
## =================== ##
if True:
# see https://docs.aiohttp.org/en/stable/web_quickstart.html#variable-resources
async def matching(request: web.Request): # $ requestHandler
name = request.match_info['name']
number = request.match_info['number']
return web.Response(text="matching name={} number={}".format(name, number)) # $ HttpResponse
app.router.add_get(r"/matching/{name}/{number:\d+}", matching) # $ routeSetup="/matching/{name}/{number:\d+}"
## ======= ##
## subapps ##
## ======= ##
if True:
subapp = web.Application()
async def subapp_handler(request): # $ requestHandler
return web.Response(text="subapp_handler") # $ HttpResponse
subapp.router.add_get("/subapp_handler", subapp_handler) # $ routeSetup="/subapp_handler"
app.add_subapp("/my_subapp", subapp)
# similar behavior is possible with `app.add_domain`, but since I don't think we'll have special handling
# for any kind of subapps, I have not created a test for this.
## ================================ ##
## Constructing UrlDispatcher first ##
## ================================ ##
if True:
async def manual_dispatcher_instance(request): # $ requestHandler
return web.Response(text="manual_dispatcher_instance") # $ HttpResponse
url_dispatcher = web.UrlDispatcher()
url_dispatcher.add_get("/manual_dispatcher_instance", manual_dispatcher_instance) # $ routeSetup="/manual_dispatcher_instance"
subapp2 = web.Application(router=url_dispatcher)
app.add_subapp("/manual_dispatcher_instance_app", subapp2)
## =========== ##
## Run the app ##
## =========== ##
if __name__ == "__main__":
print("For auto-reloading server you can use:")
print(f"aiohttp-devtools runserver {__file__}")
print("after doing `pip install aiohttp-devtools`")
print()
web.run_app(app)
|
parallel/1-Async-Q-Learning/worker.py | g6ling/Pytorch-Cartpole | 116 | 12731652 | <filename>parallel/1-Async-Q-Learning/worker.py
import gym
import torch
import torch.multiprocessing as mp
import numpy as np
from model import QNet
from memory import Memory
from config import env_name, async_update_step, update_target, max_episode, device, log_interval, goal_score
class Worker(mp.Process):
def __init__(self, online_net, target_net, optimizer, global_ep, global_ep_r, res_queue, name):
super(Worker, self).__init__()
self.env = gym.make(env_name)
self.env.seed(500)
self.name = 'w%i' % name
self.global_ep, self.global_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.online_net, self.target_net, self.optimizer = online_net, target_net, optimizer
def record(self, score, epsilon, loss):
with self.global_ep.get_lock():
self.global_ep.value += 1
with self.global_ep_r.get_lock():
if self.global_ep_r.value == 0.:
self.global_ep_r.value = score
else:
self.global_ep_r.value = 0.99 * self.global_ep_r.value + 0.01 * score
if self.global_ep.value % log_interval == 0:
print('{} , {} episode | score: {:.2f}, | epsilon: {:.2f}'.format(
self.name, self.global_ep.value, self.global_ep_r.value, epsilon))
self.res_queue.put([self.global_ep.value, self.global_ep_r.value, loss])
def update_target_model(self):
self.target_net.load_state_dict(self.online_net.state_dict())
def get_action(self, state, epsilon):
if np.random.rand() <= epsilon:
return self.env.action_space.sample()
else:
return self.target_net.get_action(state)
def run(self):
epsilon = 1.0
steps = 0
while self.global_ep.value < max_episode:
if self.global_ep_r.value > goal_score:
break
done = False
score = 0
state = self.env.reset()
state = torch.Tensor(state).to(device)
state = state.unsqueeze(0)
memory = Memory(async_update_step)
while not done:
steps += 1
action = self.get_action(state, epsilon)
next_state, reward, done, _ = self.env.step(action)
next_state = torch.Tensor(next_state)
next_state = next_state.unsqueeze(0)
mask = 0 if done else 1
reward = reward if not done or score == 499 else -1
action_one_hot = np.zeros(2)
action_one_hot[action] = 1
memory.push(state, next_state, action_one_hot, reward, mask)
score += reward
state = next_state
epsilon -= 0.00001
epsilon = max(epsilon, 0.1)
if len(memory) == async_update_step or done:
batch = memory.sample()
loss = QNet.train_model(self.online_net, self.target_net, self.optimizer, batch)
memory = Memory(async_update_step)
if done:
self.record(score, epsilon, loss)
break
if steps % update_target == 0:
self.update_target_model()
score = score if score == 500.0 else score + 1
self.res_queue.put(None)
|
models/image_recognition/tensorflow/resnet50v1_5/training/mlperf_compliance/_ssd_tags.py | yangw1234/models-1 | 567 | 12731674 | # Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keys which only appear in SSD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Pretrained classifer model
BACKBONE = "backbone"
FEATURE_SIZES = "feature_sizes"
STEPS = "steps"
SCALES = "scales"
ASPECT_RATIOS = "aspect_ratios"
NUM_DEFAULTS_PER_CELL = "num_defaults_per_cell"
LOC_CONF_OUT_CHANNELS = "loc_conf_out_channels"
NUM_DEFAULTS = "num_default_boxes"
# Overlap threshold for NMS
NMS_THRESHOLD = "nms_threshold"
NMS_MAX_DETECTIONS = "nms_max_detections"
# data pipeline
NUM_CROPPING_ITERATIONS = "num_cropping_iterations"
RANDOM_FLIP_PROBABILITY = "random_flip_probability"
DATA_NORMALIZATION_MEAN = "data_normalization_mean"
DATA_NORMALIZATION_STD = "data_normalization_std"
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/utils/__init__.py | bluetiger9/Vitis-AI | 848 | 12731704 | from .torch_op_attr import *
from .nndct2torch_op_map import *
from .op_register import *
from .torch_const import *
from .tensor_utils import *
from .schema import *
|
tests/test_action_interface_vrrp_vip.py | cognifloyd/stackstorm-vdx | 164 | 12731732 | <gh_stars>100-1000
"""Generated test for checking pynos based actions
"""
import xml.etree.ElementTree as ET
from st2tests.base import BaseActionTestCase
from interface_vrrp_vip import interface_vrrp_vip
__all__ = [
'TestInterfaceVrrpVip'
]
class MockCallback(object): # pylint:disable=too-few-public-methods
"""Class to hold mock callback and result
"""
returned_data = None
def callback(self, call, **kwargs): # pylint:disable=unused-argument
"""Mock callback method
"""
xml_result = ET.tostring(call)
self.returned_data = xml_result
class TestInterfaceVrrpVip(BaseActionTestCase):
"""Test holder class
"""
action_cls = interface_vrrp_vip
def test_action(self):
"""Generated test to check action
"""
action = self.get_action_instance()
mock_callback = MockCallback()
kwargs = {
'username': '',
'rbridge_id': '224',
'ip': '',
'vrid': '10',
'vip': '10.9.2.1',
'int_type': 'gigabitethernet',
'password': '',
'port': '22',
'name': '10/0/1',
'test': True,
'callback': mock_callback.callback
}
action.run(**kwargs)
expected_xml = (
'<config><interface xmlns="urn:brocade.com:mgmt:brocade-interface"'
'><gigabitethernet><name>10/0/1</name><vrrp xmlns="urn:brocade.com'
':mgmt:brocade-vrrp"><vrid>10</vrid><version>3</version><virtual-i'
'p><virtual-ipaddr>10.9.2.1</virtual-ipaddr></virtual-ip></vrrp></'
'gigabitethernet></interface></config>'
)
self.assertTrue(expected_xml, mock_callback.returned_data)
|
tests/test_dithering.py | hamidralmasi/byteps | 3,361 | 12731753 | # Copyright 2020 Amazon Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
import itertools
import unittest
import byteps.mxnet as bps
import mxnet as mx
import mxnet.ndarray as nd
import numpy as np
from gluoncv.model_zoo import get_model
from mxnet import autograd, gluon
from numba import jit
from parameterized import parameterized
from tqdm import tqdm
from meta_test import MetaTest
from utils import bernoulli, fake_data
@jit(nopython=True)
def round_next_pow2(v):
v -= np.uint32(1)
v |= v >> np.uint32(1)
v |= v >> np.uint32(2)
v |= v >> np.uint32(4)
v |= v >> np.uint32(8)
v |= v >> np.uint32(16)
v += np.uint32(1)
return v
def dithering(x, k, state, partition='linear', norm="max"):
y = x.flatten()
if norm == "max":
scale = np.max(np.abs(y))
elif norm == "l2":
scale = np.linalg.norm(y.astype(np.float64), ord=2)
else:
raise ValueError("Unsupported normalization")
y /= scale
sign = np.sign(y)
y = np.abs(y)
# stocastic rounding
if partition == 'linear':
y *= k
low = np.floor(y)
p = y - low # whether to ceil
y = low + bernoulli(p, state)
y /= k
elif partition == "natural":
y *= 2**(k-1)
low = round_next_pow2((np.ceil(y).astype(np.uint32))) >> 1
length = copy.deepcopy(low)
length[length == 0] = 1
p = (y - low) / length
y = low + length * bernoulli(p, state)
y = y.astype(np.float32)
y /= 2**(k-1)
else:
raise ValueError("Unsupported partition")
y *= sign
y *= scale
return y.reshape(x.shape)
class DitheringTestCase(unittest.TestCase, metaclass=MetaTest):
@parameterized.expand(itertools.product([2, 4, 8], ["linear, natural"], ["max", "l2"], np.random.randint(0, 2020, size=3).tolist()))
def test_dithering(self, k, ptype, ntype, seed):
ctx = mx.gpu(0)
net = get_model("resnet18_v2")
net.initialize(mx.init.Xavier(), ctx=ctx)
net.summary(nd.ones((1, 3, 224, 224), ctx=ctx))
# hyper-params
batch_size = 32
optimizer_params = {'momentum': 0, 'wd': 0,
'learning_rate': 0.01}
compression_params = {
"compressor": "dithering",
"k": k,
"partition": ptype,
"normalize": ntype,
"seed": seed
}
print(compression_params)
trainer = bps.DistributedTrainer(net.collect_params(
), "sgd", optimizer_params, compression_params=compression_params)
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
train_data = fake_data(batch_size=batch_size)
params = {}
rngs = {}
rngs_s = {}
for i, param in enumerate(trainer._params):
if param.grad_req != 'null':
params[i] = param._data[0].asnumpy()
rngs[i] = np.array([seed, seed], dtype=np.uint64)
rngs_s[i] = np.array([seed, seed], dtype=np.uint64)
for it, batch in tqdm(enumerate(train_data)):
data = batch[0].as_in_context(ctx)
label = batch[1].as_in_context(ctx)
with autograd.record():
output = net(data)
loss = loss_fn(output, label)
loss.backward()
gs = {}
xs = {}
for i, param in enumerate(trainer._params):
if param.grad_req != 'null':
gs[i] = param._grad[0].asnumpy()
xs[i] = param._data[0].asnumpy()
trainer.step(batch_size)
for i, param in enumerate(trainer._params):
if param.grad_req != "null":
g = gs[i] / (batch_size * bps.size())
c = dithering(g, k, rngs[i], ptype, ntype)
cs = dithering(c, k, rngs_s[i], ptype, ntype)
c = cs
params[i] -= optimizer_params["learning_rate"] * c
np_g = c.flatten()
mx_g = param._grad[0].asnumpy().flatten()
if not np.allclose(np_g, mx_g, atol=np.finfo(np.float32).eps):
diff = np.abs(np_g - mx_g)
print("np", np_g)
print("mx", mx_g)
print("diff", diff)
print("max diff", np.max(diff))
idx = np.nonzero(diff > 1e-5)
print("idx", idx, np_g[idx], mx_g[idx])
input()
cnt = 0
tot = 0
for i, param in enumerate(trainer._params):
if param.grad_req != "null":
x = param._data[0].asnumpy()
tot += len(x.flatten())
if not np.allclose(params[i], x, atol=np.finfo(np.float32).eps):
diff = np.abs(x.flatten() - params[i].flatten())
idx = np.where(diff > np.finfo(np.float32).eps)
cnt += len(idx[0])
assert cnt == 0, "false/tot=%d/%d=%f" % (cnt, tot, cnt/tot)
if __name__ == '__main__':
unittest.main()
|
html_parsing/Wikipedia__Timeline_of_release_years/Mortal_Kombat__series.py | DazEB2/SimplePyScripts | 117 | 12731778 | <reponame>DazEB2/SimplePyScripts<filename>html_parsing/Wikipedia__Timeline_of_release_years/Mortal_Kombat__series.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Хронология выхода игр
from common import get_parsed_two_column_wikitable
def is_match_table_func(table) -> bool:
return 'TIMELINE OF RELEASE YEARS' in table.caption.text.strip().upper()
url = 'https://en.wikipedia.org/wiki/Mortal_Kombat'
for year, name in get_parsed_two_column_wikitable(url, is_match_table_func):
print(f'{year}: {name}')
# 1992: Mortal Kombat
# 1993: Mortal Kombat II
# 1995: Mortal Kombat 3
# 1995: Ultimate Mortal Kombat 3
# 1996: Mortal Kombat Trilogy
# 1997: Mortal Kombat Mythologies: Sub-Zero
# 1997: Mortal Kombat 4
# 1999: Mortal Kombat Gold
# 2000: Mortal Kombat: Special Forces
# 2002: Mortal Kombat: Deadly Alliance
# 2004: Mortal Kombat: Deception
# 2005: Mortal Kombat: Shaolin Monks
# 2006: Mortal Kombat: Armageddon
# 2006: Mortal Kombat: Unchained
# 2007: Ultimate Mortal Kombat
# 2008: Mortal Kombat vs. DC Universe
# 2011: Mortal Kombat
# 2011: Mortal Kombat Arcade Kollection
# 2012: Mortal Kombat: Komplete Edition
# 2015: Mortal Kombat X
# 2016: Mortal Kombat XL
# 2019: Mortal Kombat 11
|
src/chime_dash/app/components/intro.py | riverkoko/chime | 222 | 12731832 | <gh_stars>100-1000
"""components/intro
initializes the leading text as of right now
currently both classes handle control and view this should be separated
with the logic for dynamic text moving to services.
"""
from typing import List
from dash.development.base_component import ComponentMeta
from dash_core_components import Markdown
from chime_dash.app.components.base import Component
class Intro(Component):
"""
"""
localization_file = "intro.yml"
def get_html(self) -> List[ComponentMeta]: # pylint: disable=W0613
"""Initializes the header dash html
"""
return [Markdown(id="intro", dangerously_allow_html=True, dedent=True)]
def build(self, model, pars):
result = None
if model and pars:
intro = self.content
infected_population_warning_str = (
intro["infected-population-warning"]
if model.infected > pars.population
else ""
)
mitigation_rt_str = (
intro["mitigation-rt-less-than-1"]
if model.r_t < 1
else intro["mitigation-rt-more-than-equal-1"]
)
result = intro["description-total-infection"].format(
total_infections=model.infected,
current_hosp=pars.current_hospitalized,
hosp_rate=pars.hospitalized.rate,
S=pars.population,
market_share=pars.market_share
) + "\n\n" + infected_population_warning_str + "\n\n" + intro["description-doubling-time"].format(
doubling_time=pars.doubling_time,
recovery_days=pars.infectious_days,
r_naught=model.r_naught,
daily_growth=model.daily_growth_rate * 100.0
) + "\n\n" + mitigation_rt_str.format(
relative_contact_rate=pars.relative_contact_rate,
doubling_time_t=model.doubling_time_t,
r_t=model.r_t,
daily_growth_t=model.daily_growth_rate_t * 100.0
)
return [result]
|
tests/test_models.py | kaaass/BGmi | 483 | 12731838 | <filename>tests/test_models.py
from bgmi.lib.models import Filter
from bgmi.website.model import Episode
def test_include():
e = Filter(include="2,3,5").apply_on_episodes(
[
Episode(name="1", title="1", download="1", episode=1),
Episode(name="1", title="1", download="2", episode=1),
Episode(name="2", title="2", download="3", episode=2),
Episode(name="2", title="2", download="4", episode=2),
Episode(name="3", title="3", download="5", episode=3),
Episode(name="5", title="5", download="6", episode=5),
]
)
assert len(e) == 4, e
assert {x.download for x in e} == set("3456")
def test_exclude():
e = Filter(exclude="2,3,5").apply_on_episodes(
[
Episode(title="1", download="1", episode=1),
Episode(title="1", download="2", episode=2),
Episode(title="2", download="3", episode=1),
Episode(title="2", download="4", episode=2),
Episode(title="3", download="5", episode=3),
Episode(title="5", download="6", episode=5),
]
)
assert len(e) == 2, e
assert {x.download for x in e} == {"1", "2"}
|
release/stubs.min/System/__init___parts/ResolveEventArgs.py | htlcnn/ironpython-stubs | 182 | 12731863 | <filename>release/stubs.min/System/__init___parts/ResolveEventArgs.py
class ResolveEventArgs(EventArgs):
"""
Provides data for loader resolution events,such as the System.AppDomain.TypeResolve,System.AppDomain.ResourceResolve,System.AppDomain.ReflectionOnlyAssemblyResolve,and System.AppDomain.AssemblyResolve events.
ResolveEventArgs(name: str)
ResolveEventArgs(name: str,requestingAssembly: Assembly)
"""
@staticmethod
def __new__(self,name,requestingAssembly=None):
"""
__new__(cls: type,name: str)
__new__(cls: type,name: str,requestingAssembly: Assembly)
"""
pass
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the item to resolve.
Get: Name(self: ResolveEventArgs) -> str
"""
RequestingAssembly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the assembly whose dependency is being resolved.
Get: RequestingAssembly(self: ResolveEventArgs) -> Assembly
"""
|
tridet/data/augmentations/color_transform.py | flipson/dd3d | 227 | 12731923 | # Copyright 2021 Toyota Research Institute. All rights reserved.
# pylint: disable=unused-argument
from fvcore.transforms.transform import BlendTransform
from detectron2.data.transforms import RandomBrightness as _RandomBrightness
from detectron2.data.transforms import RandomContrast as _RandomContrast
from detectron2.data.transforms import RandomSaturation as _RandomSaturation
def apply_no_op_intrinsics(blend_tfm, intrinsics):
return intrinsics
def apply_no_op_depth(blend_tfm, depth):
return depth
def apply_no_op_box3d(blend_tfm, box3d):
return box3d
# (dennis.park) Augment ResizeTransform to handle intrinsics, depth
BlendTransform.register_type("intrinsics", apply_no_op_intrinsics)
BlendTransform.register_type("depth", apply_no_op_depth)
BlendTransform.register_type("box3d", apply_no_op_box3d)
class RandomContrast(_RandomContrast):
def get_transform(self, image):
tfm = super().get_transform(image)
return BlendTransform(tfm.src_image, tfm.src_weight, tfm.dst_weight)
class RandomBrightness(_RandomBrightness):
def get_transform(self, image):
tfm = super().get_transform(image)
return BlendTransform(tfm.src_image, tfm.src_weight, tfm.dst_weight)
class RandomSaturation(_RandomSaturation):
def get_transform(self, image):
tfm = super().get_transform(image)
return BlendTransform(tfm.src_image, tfm.src_weight, tfm.dst_weight)
|
datapackage_pipelines/generators/schedules.py | gperonato/datapackage-pipelines | 109 | 12731981 | SCHEDULE_NONE = None
SCHEDULE_HOURLY = '0 * * * *'
SCHEDULE_DAILY = '0 0 * * *'
SCHEDULE_WEEKLY = '0 0 * * 0'
SCHEDULE_MONTHLY = '0 0 1 * *'
SCHEDULE_YEARLY = '0 0 1 1 *'
|
indra/util/perm_cache.py | zebulon2/indra | 136 | 12731999 | <gh_stars>100-1000
__all__ = ['perm_cache']
import json
import pickle
from functools import update_wrapper
from os.path import exists
def perm_cache(cache_type='pkl', cache_file=None):
class PermCache(object):
_cache_type = cache_type
_cache_file = cache_file
def __init__(self, func):
if self._cache_type not in ['pkl', 'json']:
raise ValueError("Invalid cache type: %s" % self._cache_type)
self._cache_type = self._cache_type
self.func = func
if self._cache_file is None:
self._cache_file = (func.__code__.co_filename
.replace('.py', '.' + self.func.__name__))
self._cache_file += '.cache'
if self._cache_file.endswith('.py'):
self._cache_file = self._cache_file.replace('.py',
'.' + self._cache_type)
else:
self._cache_file += '.' + self._cache_type
if exists(self._cache_file):
if self._cache_type == 'pkl':
with open(self._cache_file, 'rb') as f:
self.cache = pickle.load(f)
elif self._cache_type == 'json':
with open(self._cache_file, 'r') as f:
self.cache = json.load(f)
else:
self.cache = {}
self.__cache_info = dict.fromkeys(['added', 'read', 'total'], 0)
update_wrapper(self, func)
return
def __call__(self, *args, **kwargs):
key = ' '.join(args) \
+ ' '.join(['%s=%s' % (k, v) for k, v in kwargs.items()])
self.__cache_info['total'] += 1
try:
res = self.cache[key]
self.__cache_info['read'] += 1
except KeyError:
res = self.func(*args, **kwargs)
self.cache[key] = res
self.__cache_info['added'] += 1
return res
def cache_info(self):
return self.__cache_info.copy()
def stash_cache(self):
if self._cache_type == 'pkl':
with open(self._cache_file, 'wb') as f:
pickle.dump(self.cache, f)
elif self._cache_type == 'json':
with open(self._cache_file, 'w') as f:
json.dump(self.cache, f, indent=2)
return
return PermCache
|
toy/visualise-dataset.py | Cyanogenoid/vqa-counting | 205 | 12732035 | <reponame>Cyanogenoid/vqa-counting
import sys
import random
import data
import torch
import matplotlib.pyplot as plt
import matplotlib.patches as patches
plt.rc('text', usetex=True)
plt.rc('font', family='serif', serif='Times')
q = float(sys.argv[1])
# guessing seeds for nice looking datasets
torch.manual_seed(int(2 * q) + 10)
random.seed(int(2 * q) + 16)
cm = plt.cm.coolwarm
params = [
(0.05, q),
(0.1, q),
(0.2, q),
(0.3, q),
(0.4, q),
(0.5, q),
]
n = 0
plt.figure(figsize=(4, 11.5), dpi=200)
for coord, noise in params:
dataset = data.ToyTask(10, coord, noise)
a, b, c = next(iter(dataset))
ax_true = plt.subplot(len(params), 2, n + 1, aspect='equal')
ax_data = plt.subplot(len(params), 2, n + 2, aspect='equal')
for i, (weight, box) in enumerate(zip(a, b)):
x = box[0]
y = box[1]
w = box[2] - box[0]
h = box[3] - box[1]
config = {
'alpha': 0.3,
'linewidth': 0,
}
ax_true.add_patch(patches.Rectangle(
(x, y), w, h,
**config,
color=cm(1 - float(i < c))
))
ax_data.add_patch(patches.Rectangle(
(x, y), w, h,
**config,
color=cm(1 - weight)
))
ax_true.axes.get_xaxis().set_visible(False)
ax_data.axes.get_xaxis().set_visible(False)
ax_true.axes.get_yaxis().set_major_locator(plt.NullLocator())
ax_data.axes.get_yaxis().set_visible(False)
ax_true.set_title('Ground truth: {}'.format(c))
ax_data.set_title('Data')
ax_true.set_ylabel('$l = {}$'.format(coord))
n += 2
plt.suptitle('\Large$q = {}$'.format(noise))
plt.subplots_adjust(left=0.1, right=0.9, top=0.96, bottom=0.0, hspace=0)
plt.savefig('dataset-{}.pdf'.format(int(round(10 * q))))
|
pages/cache.py | timbortnik/django-page-cms | 113 | 12732098 | # -*- coding: utf-8 -*-
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
try:
cache = caches['pages']
except InvalidCacheBackendError:
cache = caches['default']
|
tests/godagtimed_old.py | flying-sheep/goatools | 477 | 12732122 | #!/usr/bin/env python
"""Test deprecated location of GoDagTimed"""
import os
import timeit
from goatools.test_data.godag_timed import GoDagTimed
from goatools.test_data.godag_timed import prt_hms
from goatools.base import download_go_basic_obo
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_deprecatedloc_godagtimed():
"""Test deprecated location of GoDagTimed"""
tic = timeit.default_timer()
prt_hms(tic, 'prt_hms TESTED')
fin_go_obo = os.path.join(REPO, "go-basic.obo")
download_go_basic_obo(fin_go_obo, loading_bar=None)
GoDagTimed(fin_go_obo)
if __name__ == '__main__':
test_deprecatedloc_godagtimed()
|
tools/noms/pushd.py | rajeev02101987/noms | 8,126 | 12732137 | #!/usr/bin/python
# Copyright 2016 <NAME>, Inc. All rights reserved.
# Licensed under the Apache License, version 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
import os
from contextlib import contextmanager
@contextmanager
def pushd(path):
currentDir = os.getcwd()
os.chdir(path)
yield
os.chdir(currentDir)
|
social/backends/beats.py | raccoongang/python-social-auth | 1,987 | 12732147 | <filename>social/backends/beats.py<gh_stars>1000+
from social_core.backends.beats import BeatsOAuth2
|
gateway/lora/aes-python-lib/LoRaWAN/PhyPayload.py | leavitia/ioticd | 654 | 12732166 | <filename>gateway/lora/aes-python-lib/LoRaWAN/PhyPayload.py
#
# lorawan packet: mhdr(1) mac_payload(1..N) mic(4)
#
from MalformedPacketException import MalformedPacketException
from MHDR import MHDR
from Direction import Direction
from MacPayload import MacPayload
class PhyPayload:
def __init__(self, key):
self.key = key
def read(self, packet):
if len(packet) < 12:
raise MalformedPacketException("Invalid lorawan packet");
self.mhdr = MHDR(packet[0])
self.set_direction()
self.mac_payload = MacPayload()
self.mac_payload.read(self.get_mhdr().get_mtype(), packet[1:-4])
self.mic = packet[-4:]
def create(self, mhdr, args):
self.mhdr = MHDR(mhdr)
self.set_direction()
self.mac_payload = MacPayload()
self.mac_payload.create(self.get_mhdr().get_mtype(), self.key, args)
self.mic = None
def length(self):
return len(self.to_raw())
def to_raw(self):
phy_payload = [self.get_mhdr().to_raw()]
phy_payload += self.mac_payload.to_raw()
phy_payload += self.get_mic()
return phy_payload
def get_mhdr(self):
return self.mhdr;
def set_mhdr(self, mhdr):
self.mhdr = mhdr
def get_direction(self):
return self.direction.get()
def set_direction(self):
self.direction = Direction(self.get_mhdr())
def get_mac_payload(self):
return self.mac_payload
def set_mac_payload(self, mac_payload):
self.mac_payload = mac_payload
def get_mic(self):
if self.mic == None:
self.set_mic(self.compute_mic())
return self.mic
def set_mic(self, mic):
self.mic = mic
def compute_mic(self):
return self.mac_payload.frm_payload.compute_mic(self.key, self.get_direction(), self.get_mhdr())
def valid_mic(self):
return self.get_mic() == self.mac_payload.frm_payload.compute_mic(self.key, self.get_direction(), self.get_mhdr())
def get_payload(self):
return self.mac_payload.frm_payload.decrypt_payload(self.key, self.get_direction())
|
google-api-client-generator/src/googleapis/codegen/unicode_test.py | cclauss/discovery-artifact-manager | 709 | 12732170 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Unicode handling."""
import json
import os
import gflags as flags
from google.apputils import basetest
from googleapis.codegen import api
FLAGS = flags.FLAGS
class UnicodeTest(basetest.TestCase):
_TEST_DISCOVERY_DOC = 'unicode.json'
def ApiFromDiscoveryDoc(self, path):
"""Load a discovery doc from a file and creates a library Api.
Args:
path: (str) The path to the document.
Returns:
An Api for that document.
"""
with open(os.path.join(os.path.dirname(__file__), 'testdata', path)) as f:
discovery_doc = json.loads(f.read().decode('utf-8'))
return api.Api(discovery_doc)
def testGiveMeAName(self):
an_api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
accented = u'\xdaRL' # "URL" with an accent
# An object which holds a count. This is just to have an object to
# increment as a side-effect of a lambda.
class Counter(object):
value = 0
def Increment(self, expr):
self.value += expr or 0
def CheckDescription(counter, x, match):
"""Does a CodeObject object contain a string in its description."""
counter.Increment(match in (x.values.get('description') or ''))
# Look for 'RL' for a baseline
rl_counter = Counter()
an_api.VisitAll(lambda x: CheckDescription(rl_counter, x, 'RL'))
self.assertLess(6, rl_counter.value)
url_counter = Counter()
an_api.VisitAll(lambda x: CheckDescription(url_counter, x, accented))
self.assertEquals(rl_counter.value, url_counter.value)
def CheckEnumDescription(counter, x, match):
enum_type = x.values.get('enumType')
if enum_type:
for _, _, description in enum_type.values.get('pairs') or []:
counter.Increment(match in description)
enum_counter = Counter()
an_api.VisitAll(lambda x: CheckEnumDescription(enum_counter, x, accented))
self.assertEquals(2, enum_counter.value)
if __name__ == '__main__':
basetest.main()
|
tests/test_tower.py | theosech/ec | 290 | 12732191 | <gh_stars>100-1000
import unittest
class TestTowerMain(unittest.TestCase):
def test_imports(self):
try:
from dreamcoder.domains.tower.main import (
Flatten,
TowerCNN,
tower_options,
dreamOfTowers,
visualizePrimitives,
main
)
except Exception:
self.fail('Unable to import tower module')
if __name__ == '__main__':
unittest.main()
|
shynet/core/migrations/0006_service_hide_referrer_regex.py | f97/shynet | 1,904 | 12732201 | # Generated by Django 3.0.6 on 2020-05-07 21:23
from django.db import migrations, models
import core.models
class Migration(migrations.Migration):
dependencies = [
("core", "0005_service_ignored_ips"),
]
operations = [
migrations.AddField(
model_name="service",
name="hide_referrer_regex",
field=models.TextField(
blank=True, default="", validators=[core.models._validate_regex]
),
),
]
|
lithops/util/joblib/lithops_backend.py | kpavel/lithops | 158 | 12732237 | #
# (C) Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import gc
import logging
import pickle
import diskcache
from joblib._parallel_backends import ParallelBackendBase, PoolManagerMixin
from joblib.parallel import register_parallel_backend
from numpy import ndarray
from concurrent.futures import ThreadPoolExecutor
from lithops.multiprocessing import Pool
from lithops.storage import Storage
logger = logging.getLogger(__name__)
def register_lithops():
""" Register Lithops Backend to be called with parallel_backend("lithops"). """
register_parallel_backend("lithops", LithopsBackend)
class LithopsBackend(ParallelBackendBase, PoolManagerMixin):
"""A ParallelBackend which will use a multiprocessing.Pool.
Will introduce some communication and memory overhead when exchanging
input and output data with the with the worker Python processes.
However, does not suffer from the Python Global Interpreter Lock.
"""
def __init__(self, nesting_level=None, inner_max_num_threads=None, **pool_kwargs):
super().__init__(nesting_level, inner_max_num_threads, **{})
self.__pool_kwargs = pool_kwargs
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
supports_timeout = True
supports_sharedmem = False
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel.
This also checks if we are attempting to create a nested parallel
loop.
"""
# this must be 1 as we only want to create 1 LithopsExecutor()
return 1
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
**memmappingpool_args):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
already_forked = int(os.environ.get(self.JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError(
'[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information')
# Set an environment variable to avoid infinite loops
os.environ[self.JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
self._pool = Pool()
self.parallel = parallel
return n_jobs
def terminate(self):
"""Shutdown the process or thread pool"""
super().terminate()
if self.JOBLIB_SPAWNED_PROCESS in os.environ:
del os.environ[self.JOBLIB_SPAWNED_PROCESS]
def compute_batch_size(self):
return int(1e6)
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
# return self._get_pool().map_async(handle_call, func.items, callback=callback) # bypass
mem_opt_calls = find_shared_objects(func.items)
return self._get_pool().starmap_async(handle_call, mem_opt_calls)
def find_shared_objects(calls):
# find and annotate repeated arguments
record = {}
for i, call in enumerate(calls):
for j, arg in enumerate(call[1]):
if id(arg) in record:
record[id(arg)].append((i, j))
else:
record[id(arg)] = [arg, (i, j)]
for k, v in call[2].items():
if id(v) in record:
record[id(v)].append((i, k))
else:
record[id(v)] = [v, (i, k)]
# If we found multiple occurrences of one object, then
# store it in shared memory, pass a proxy as a value
calls = [list(item) for item in calls]
storage = Storage()
thread_pool = ThreadPoolExecutor(max_workers=len(record))
def put_arg_obj(positions):
obj = positions.pop(0)
if len(positions) > 1 and consider_sharing(obj):
logger.debug('Proxying {}'.format(type(obj)))
obj_bin = pickle.dumps(obj)
cloud_object = storage.put_cloudobject(obj_bin)
for pos in positions:
call_n, idx_or_key = pos
call = calls[call_n]
if isinstance(idx_or_key, str):
call[2][idx_or_key] = cloud_object
else:
args_as_list = list(call[1])
args_as_list[idx_or_key] = cloud_object
call[1] = tuple(args_as_list)
try:
call[3].append(idx_or_key)
except IndexError:
call.append([idx_or_key])
fut = []
for positions in record.values():
f = thread_pool.submit(put_arg_obj, positions)
fut.append(f)
[f.result() for f in fut]
return [tuple(item) for item in calls]
def handle_call(func, args, kwargs, proxy_positions=[]):
if len(proxy_positions) > 0:
args, kwargs = replace_with_values(args, kwargs, proxy_positions)
return func(*args, **kwargs)
def replace_with_values(args, kwargs, proxy_positions):
args_as_list = list(args)
thread_pool = ThreadPoolExecutor(max_workers=len(proxy_positions))
cache = diskcache.Cache('/tmp/lithops/cache')
def get_arg_obj(idx_or_key):
if isinstance(idx_or_key, str):
obj_id = kwargs[idx_or_key]
else:
obj_id = args_as_list[idx_or_key]
if obj_id in cache:
logger.debug('Get {} (arg {}) from cache'.format(obj_id, idx_or_key))
obj = cache[obj_id]
else:
logger.debug('Get {} (arg {}) from storage'.format(obj_id, idx_or_key))
storage = Storage()
obj_bin = storage.get_cloudobject(obj_id)
obj = pickle.loads(obj_bin)
cache[obj_id] = obj
if isinstance(idx_or_key, str):
kwargs[idx_or_key] = obj
else:
args_as_list[idx_or_key] = obj
fut = []
for idx_or_key in proxy_positions:
f = thread_pool.submit(get_arg_obj, idx_or_key)
fut.append(f)
[f.result() for f in fut]
return args_as_list, kwargs
def consider_sharing(obj):
if isinstance(obj, (ndarray, list)): # TODO: some heuristic
return True
return False
|
gammapy/astro/darkmatter/utils.py | Rishank2610/gammapy | 155 | 12732238 | <filename>gammapy/astro/darkmatter/utils.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities to compute J-factor maps."""
import astropy.units as u
__all__ = ["JFactory"]
class JFactory:
"""Compute J-Factor maps.
All J-Factors are computed for annihilation. The assumed dark matter
profiles will be centered on the center of the map.
Parameters
----------
geom : `~gammapy.maps.WcsGeom`
Reference geometry
profile : `~gammapy.astro.darkmatter.profiles.DMProfile`
Dark matter profile
distance : `~astropy.units.Quantity`
Distance to convert angular scale of the map
"""
def __init__(self, geom, profile, distance):
self.geom = geom
self.profile = profile
self.distance = distance
def compute_differential_jfactor(self):
r"""Compute differential J-Factor.
.. math::
\frac{\mathrm d J}{\mathrm d \Omega} =
\int_{\mathrm{LoS}} \mathrm d r \rho(r)
"""
# TODO: Needs to be implemented more efficiently
separation = self.geom.separation(self.geom.center_skydir)
rmin = separation.rad * self.distance
rmax = self.distance
val = [self.profile.integral(_, rmax) for _ in rmin.flatten()]
jfact = u.Quantity(val).to("GeV2 cm-5").reshape(rmin.shape)
return jfact / u.steradian
def compute_jfactor(self):
r"""Compute astrophysical J-Factor.
.. math::
J(\Delta\Omega) =
\int_{\Delta\Omega} \mathrm d \Omega^{\prime}
\frac{\mathrm d J}{\mathrm d \Omega^{\prime}}
"""
diff_jfact = self.compute_differential_jfactor()
return diff_jfact * self.geom.to_image().solid_angle()
|
iCaRL-Tensorflow/main_resnet_tf.py | augustoolucas/iCaRL | 215 | 12732314 | <filename>iCaRL-Tensorflow/main_resnet_tf.py<gh_stars>100-1000
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
import numpy as np
import scipy
import os
import scipy.io
import sys
try:
import cPickle
except:
import _pickle as cPickle
# Syspath for the folder with the utils files
#sys.path.insert(0, "/media/data/srebuffi")
import utils_resnet
import utils_icarl
import utils_data
######### Modifiable Settings ##########
batch_size = 128 # Batch size
nb_val = 50 # Validation samples per class
nb_cl = 10 # Classes per group
nb_groups = 10 # Number of groups
nb_proto = 20 # Number of prototypes per class: total protoset memory/ total number of classes
epochs = 60 # Total number of epochs
lr_old = 2. # Initial learning rate
lr_strat = [20,30,40,50] # Epochs where learning rate gets decreased
lr_factor = 5. # Learning rate decrease factor
gpu = '0' # Used GPU
wght_decay = 0.00001 # Weight Decay
########################################
######### Paths ##########
# Working station
devkit_path = '/home/srebuffi'
train_path = '/data/datasets/imagenets72'
save_path = '/data/srebuffi/backup/'
###########################
#####################################################################################################
### Initialization of some variables ###
class_means = np.zeros((512,nb_groups*nb_cl,2,nb_groups))
loss_batch = []
files_protoset =[]
for _ in range(nb_groups*nb_cl):
files_protoset.append([])
### Preparing the files for the training/validation ###
# Random mixing
print("Mixing the classes and putting them in batches of classes...")
np.random.seed(1993)
order = np.arange(nb_groups * nb_cl)
mixing = np.arange(nb_groups * nb_cl)
np.random.shuffle(mixing)
# Loading the labels
labels_dic, label_names, validation_ground_truth = utils_data.parse_devkit_meta(devkit_path)
# Or you can just do like this
# define_class = ['apple', 'banana', 'cat', 'dog', 'elephant', 'forg']
# labels_dic = {k: v for v, k in enumerate(define_class)}
# Preparing the files per group of classes
print("Creating a validation set ...")
files_train, files_valid = utils_data.prepare_files(train_path, mixing, order, labels_dic, nb_groups, nb_cl, nb_val)
# Pickle order and files lists and mixing
with open(str(nb_cl)+'mixing.pickle','wb') as fp:
cPickle.dump(mixing,fp)
with open(str(nb_cl)+'settings_resnet.pickle','wb') as fp:
cPickle.dump(order,fp)
cPickle.dump(files_valid,fp)
cPickle.dump(files_train,fp)
### Start of the main algorithm ###
for itera in range(nb_groups):
# Files to load : training samples + protoset
print('Batch of classes number {0} arrives ...'.format(itera+1))
# Adding the stored exemplars to the training set
if itera == 0:
files_from_cl = files_train[itera]
else:
files_from_cl = files_train[itera][:]
for i in range(itera*nb_cl):
nb_protos_cl = int(np.ceil(nb_proto*nb_groups*1./itera)) # Reducing number of exemplars of the previous classes
tmp_var = files_protoset[i]
files_from_cl += tmp_var[0:min(len(tmp_var),nb_protos_cl)]
## Import the data reader ##
image_train, label_train = utils_data.read_data(train_path, labels_dic, mixing, files_from_cl=files_from_cl)
image_batch, label_batch_0 = tf.train.batch([image_train, label_train], batch_size=batch_size, num_threads=8)
label_batch = tf.one_hot(label_batch_0,nb_groups*nb_cl)
## Define the objective for the neural network ##
if itera == 0:
# No distillation
variables_graph,variables_graph2,scores,scores_stored = utils_icarl.prepare_networks(gpu,image_batch, nb_cl, nb_groups)
# Define the objective for the neural network: 1 vs all cross_entropy
with tf.device('/gpu:0'):
scores = tf.concat(scores,0)
l2_reg = wght_decay * tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='ResNet18'))
loss_class = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch, logits=scores))
loss = loss_class + l2_reg
learning_rate = tf.placeholder(tf.float32, shape=[])
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_step = opt.minimize(loss,var_list=variables_graph)
if itera > 0:
# Distillation
variables_graph,variables_graph2,scores,scores_stored = utils_icarl.prepare_networks(gpu,image_batch, nb_cl, nb_groups)
# Copying the network to use its predictions as ground truth labels
op_assign = [(variables_graph2[i]).assign(variables_graph[i]) for i in range(len(variables_graph))]
# Define the objective for the neural network : 1 vs all cross_entropy + distillation
with tf.device('/gpu:0'):
scores = tf.concat(scores,0)
scores_stored = tf.concat(scores_stored,0)
old_cl = (order[range(itera*nb_cl)]).astype(np.int32)
new_cl = (order[range(itera*nb_cl,nb_groups*nb_cl)]).astype(np.int32)
label_old_classes = tf.sigmoid(tf.stack([scores_stored[:,i] for i in old_cl],axis=1))
label_new_classes = tf.stack([label_batch[:,i] for i in new_cl],axis=1)
pred_old_classes = tf.stack([scores[:,i] for i in old_cl],axis=1)
pred_new_classes = tf.stack([scores[:,i] for i in new_cl],axis=1)
l2_reg = wght_decay * tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='ResNet18'))
loss_class = tf.reduce_mean(tf.concat([tf.nn.sigmoid_cross_entropy_with_logits(labels=label_old_classes, logits=pred_old_classes),tf.nn.sigmoid_cross_entropy_with_logits(labels=label_new_classes, logits=pred_new_classes)],1))
loss = loss_class + l2_reg
learning_rate = tf.placeholder(tf.float32, shape=[])
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_step = opt.minimize(loss,var_list=variables_graph)
## Run the learning phase ##
with tf.Session(config=config) as sess:
# Launch the data reader
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(tf.global_variables_initializer())
lr = lr_old
# Run the loading of the weights for the learning network and the copy network
if itera > 0:
void0 = sess.run([(variables_graph[i]).assign(save_weights[i]) for i in range(len(variables_graph))])
void1 = sess.run(op_assign)
for epoch in range(epochs):
print("Batch of classes {} out of {} batches".format(
itera + 1, nb_groups))
print('Epoch %i' % epoch)
for i in range(int(np.ceil(len(files_from_cl)/batch_size))):
loss_class_val, _ ,sc,lab = sess.run([loss_class, train_step,scores,label_batch_0], feed_dict={learning_rate: lr})
loss_batch.append(loss_class_val)
# Plot the training error every 10 batches
if len(loss_batch) == 10:
print(np.mean(loss_batch))
loss_batch = []
# Plot the training top 1 accuracy every 80 batches
if (i+1)%80 == 0:
stat = []
stat += ([ll in best for ll, best in zip(lab, np.argsort(sc, axis=1)[:, -1:])])
stat =np.average(stat)
print('Training accuracy %f' %stat)
# Decrease the learning by 5 every 10 epoch after 20 epochs at the first learning rate
if epoch in lr_strat:
lr /= lr_factor
coord.request_stop()
coord.join(threads)
# copy weights to store network
save_weights = sess.run([variables_graph[i] for i in range(len(variables_graph))])
utils_resnet.save_model(save_path+'model-iteration'+str(nb_cl)+'-%i.pickle' % itera, scope='ResNet18', sess=sess)
# Reset the graph
tf.reset_default_graph()
## Exemplars management part ##
nb_protos_cl = int(np.ceil(nb_proto*nb_groups*1./(itera+1))) # Reducing number of exemplars for the previous classes
files_from_cl = files_train[itera]
inits,scores,label_batch,loss_class,file_string_batch,op_feature_map = utils_icarl.reading_data_and_preparing_network(files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing, nb_groups, nb_cl, save_path)
with tf.Session(config=config) as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
void3 = sess.run(inits)
# Load the training samples of the current batch of classes in the feature space to apply the herding algorithm
Dtot,processed_files,label_dico = utils_icarl.load_class_in_feature_space(files_from_cl, batch_size, scores, label_batch, loss_class, file_string_batch, op_feature_map, sess)
processed_files = np.array([x.decode() for x in processed_files])
# Herding procedure : ranking of the potential exemplars
print('Exemplars selection starting ...')
for iter_dico in range(nb_cl):
ind_cl = np.where(label_dico == order[iter_dico+itera*nb_cl])[0]
D = Dtot[:,ind_cl]
files_iter = processed_files[ind_cl]
mu = np.mean(D,axis=1)
w_t = mu
step_t = 0
while not(len(files_protoset[itera*nb_cl+iter_dico]) == nb_protos_cl) and step_t<1.1*nb_protos_cl:
tmp_t = np.dot(w_t,D)
ind_max = np.argmax(tmp_t)
w_t = w_t + mu - D[:,ind_max]
step_t += 1
if files_iter[ind_max] not in files_protoset[itera*nb_cl+iter_dico]:
files_protoset[itera*nb_cl+iter_dico].append(files_iter[ind_max])
coord.request_stop()
coord.join(threads)
# Reset the graph
tf.reset_default_graph()
# Class means for iCaRL and NCM
print('Computing theoretical class means for NCM and mean-of-exemplars for iCaRL ...')
for iteration2 in range(itera+1):
files_from_cl = files_train[iteration2]
inits,scores,label_batch,loss_class,file_string_batch,op_feature_map = utils_icarl.reading_data_and_preparing_network(files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing, nb_groups, nb_cl, save_path)
with tf.Session(config=config) as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
void2 = sess.run(inits)
Dtot,processed_files,label_dico = utils_icarl.load_class_in_feature_space(files_from_cl, batch_size, scores, label_batch, loss_class, file_string_batch, op_feature_map, sess)
processed_files = np.array([x.decode() for x in processed_files])
for iter_dico in range(nb_cl):
ind_cl = np.where(label_dico == order[iter_dico+iteration2*nb_cl])[0]
D = Dtot[:,ind_cl]
files_iter = processed_files[ind_cl]
current_cl = order[range(iteration2*nb_cl,(iteration2+1)*nb_cl)]
# Normal NCM mean
class_means[:,order[iteration2*nb_cl+iter_dico],1,itera] = np.mean(D,axis=1)
class_means[:,order[iteration2*nb_cl+iter_dico],1,itera] /= np.linalg.norm(class_means[:,order[iteration2*nb_cl+iter_dico],1,itera])
# iCaRL approximated mean (mean-of-exemplars)
# use only the first exemplars of the old classes: nb_protos_cl controls the number of exemplars per class
ind_herding = np.array([np.where(files_iter == files_protoset[iteration2*nb_cl+iter_dico][i])[0][0] for i in range(min(nb_protos_cl,len(files_protoset[iteration2*nb_cl+iter_dico])))])
D_tmp = D[:,ind_herding]
class_means[:,order[iteration2*nb_cl+iter_dico],0,itera] = np.mean(D_tmp,axis=1)
class_means[:,order[iteration2*nb_cl+iter_dico],0,itera] /= np.linalg.norm(class_means[:,order[iteration2*nb_cl+iter_dico],0,itera])
coord.request_stop()
coord.join(threads)
# Reset the graph
tf.reset_default_graph()
# Pickle class means and protoset
with open(str(nb_cl)+'class_means.pickle','wb') as fp:
cPickle.dump(class_means,fp)
with open(str(nb_cl)+'files_protoset.pickle','wb') as fp:
cPickle.dump(files_protoset,fp)
|
quarkc/test/ffi/expected/py/dependencies/docs/conf.py | datawire/quark | 112 | 12732336 | # -*- coding: utf-8 -*-
#
# dependencies documentation build configuration file, created by Quark
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'dependencies'
copyright = u'2015, dependencies authors'
author = u'dependencies authors'
version = '0.0.1'
release = '0.0.1'
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'alabaster'
html_static_path = ['_static']
htmlhelp_basename = 'dependenciesdoc'
latex_elements = {}
latex_documents = [
(master_doc, 'dependencies.tex', u'dependencies Documentation',
u'dependencies authors', 'manual'),
]
man_pages = [
(master_doc, 'dependencies', u'dependencies Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'dependencies', u'dependencies Documentation',
author, 'dependencies', 'One line description of dependencies.',
'Miscellaneous'),
]
|
misc/update/python/backfill_threaded.py | sy3kic/nZEDb | 472 | 12732351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys, os, time
import threading
try:
import queue
except ImportError:
import Queue as queue
import subprocess
import string
import signal
import datetime
import lib.info as info
from lib.info import bcolors
conf = info.readConfig()
cur = info.connect()
start_time = time.time()
pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
print(bcolors.HEADER + "\nBackfill Threaded Started at {}".format(datetime.datetime.now().strftime("%H:%M:%S")) + bcolors.ENDC)
#get values from db
cur[0].execute("SELECT (SELECT value FROM settings WHERE setting = 'backfillthreads') as a, (SELECT value FROM tmux WHERE setting = 'backfill') as c, (SELECT value FROM tmux WHERE setting = 'backfill_groups') as d, (SELECT value FROM tmux WHERE setting = 'backfill_order') as e, (SELECT value FROM tmux WHERE setting = 'backfill_days') as f")
dbgrab = cur[0].fetchall()
run_threads = int(dbgrab[0][0])
type = int(dbgrab[0][1])
groups = int(dbgrab[0][2])
intorder = int(dbgrab[0][3])
intbackfilltype = int(dbgrab[0][4])
#get the correct oder by for the query
if intorder == 1:
group = "ORDER BY first_record_postdate DESC"
elif intorder == 2:
group = "ORDER BY first_record_postdate ASC"
elif intorder == 3:
group = "ORDER BY name ASC"
elif intorder == 4:
group = "ORDER BY name DESC"
elif intorder == 5:
group = "ORDER BY first_record DESC"
else:
group = "ORDER BY first_record ASC"
#backfill days or safe backfill date
if intbackfilltype == 1:
backfilldays = "backfill_target"
elif intbackfilltype == 2:
backfilldays = "datediff(curdate(),(SELECT value FROM settings WHERE setting = 'safebackfilldate'))"
#exit is set to safe backfill
if len(sys.argv) == 1 and type == 4:
print(bcolors.ERROR + "Tmux is set for Safe Backfill, no groups to process." + bcolors.ENDC)
info.disconnect(cur[0], cur[1])
sys.exit()
#query to grab backfill groups
if len(sys.argv) > 1 and sys.argv[1] == "all":
# Using string formatting is not the correct way to do this, but using +group is even worse
# removing the % before the variables at the end of the query adds quotes/escapes strings
cur[0].execute("SELECT name, first_record FROM groups WHERE first_record != 0 AND backfill = 1 %s" % (group))
else:
if conf['DB_SYSTEM'] == "mysql":
cur[0].execute("SELECT name, first_record FROM groups WHERE first_record != 0 AND first_record_postdate IS NOT NULL AND backfill = 1 AND (NOW() - interval %s DAY) < first_record_postdate %s LIMIT %s" % (backfilldays, group, groups))
elif conf['DB_SYSTEM'] == "pgsql":
cur[0].execute("SELECT name, first_record FROM groups WHERE first_record != 0 AND first_record_postdate IS NOT NULL AND backfill = 1 AND (NOW() - interval '%s DAYS') < first_record_postdate %s LIMIT %s" % (backfilldays, group, groups))
datas = cur[0].fetchall()
#close connection to mysql
info.disconnect(cur[0], cur[1])
if not datas:
print(bcolors.ERROR + "No Groups enabled for backfill" + bcolors.ENDC)
sys.exit()
my_queue = queue.Queue()
time_of_last_run = time.time()
class queue_runner(threading.Thread):
def __init__(self, my_queue):
threading.Thread.__init__(self)
self.my_queue = my_queue
def run(self):
global time_of_last_run
while True:
try:
my_id = self.my_queue.get(True, 1)
except:
if time.time() - time_of_last_run > 3:
return
else:
if my_id:
time_of_last_run = time.time()
if len(sys.argv) > 1 and sys.argv[1] == "all":
subprocess.call(["php", pathname+"/../nix/multiprocessing/.do_not_run/switch.php", "python backfill_all_quick "+my_id])
else:
subprocess.call(["php", pathname+"/../nix/multiprocessing/.do_not_run/switch.php", "python backfill "+my_id])
time.sleep(.03)
self.my_queue.task_done()
def main(args):
global time_of_last_run
time_of_last_run = time.time()
print(bcolors.HEADER + "We will be using a max of {} threads, a queue of {} groups".format(run_threads, "{:,}".format(len(datas))) + bcolors.ENDC)
time.sleep(2)
def signal_handler(signal, frame):
sys.exit()
signal.signal(signal.SIGINT, signal_handler)
if True:
#spawn a pool of place worker threads
for i in range(run_threads):
p = queue_runner(my_queue)
p.setDaemon(False)
p.start()
#now load some arbitrary jobs into the queue
for gnames in datas:
time.sleep(.03)
my_queue.put("%s %s" % (gnames[0], type))
my_queue.join()
print(bcolors.HEADER + "\nBackfill Threaded Completed at {}".format(datetime.datetime.now().strftime("%H:%M:%S")) + bcolors.ENDC)
print(bcolors.HEADER + "Running time: {}\n\n".format(str(datetime.timedelta(seconds=time.time() - start_time))) + bcolors.ENDC)
if __name__ == '__main__':
main(sys.argv[1:])
|
modules/nltk_contrib/mit/six863/tagging/tagparse.py | h4ck3rm1k3/NLP-project | 123 | 12732377 | <reponame>h4ck3rm1k3/NLP-project<filename>modules/nltk_contrib/mit/six863/tagging/tagparse.py
from nltk.parse import chart
from nltk import cfg
from drawchart import ChartDemo
from nltk.tokenize.regexp import wordpunct
#from nltk_contrib.mit.six863.kimmo import *
import re, pickle
def chart_tagger(tagger):
def insert_tags(thechart, tokens):
"""
Initialize a chart parser based on the results of a tagger.
"""
tagged_tokens = list(tagger.tag(tokens))
for i in range(len(tagged_tokens)):
word, tag = tagged_tokens[i]
leafedge = chart.LeafEdge(word, i)
thechart.insert(chart.TreeEdge((i, i+1),
cfg.Nonterminal(tag), [word], dot=1), [leafedge])
return insert_tags
def chart_kimmo(kimmorules):
def insert_tags(thechart, tokens):
for i in range(len(tokens)):
word = tokens[i]
results = kimmorules.recognize(word.lower())
for surface, feat in results:
match = re.match(r"PREFIX\('.*?'\)(.*?)\(.*", feat)
if match: pos = match.groups()[0]
else: pos = feat.split('(')[0]
print surface, pos
leafedge = chart.LeafEdge(word, i)
thechart.insert(chart.TreeEdge((i, i+1),
cfg.Nonterminal(pos), [word], dot=1), [leafedge])
return insert_tags
def tagged_chart_parse(sentence, grammar, tagger):
tokens = list(wordpunct(sentence))
demo = ChartDemo(grammar, tokens, initfunc=chart_tagger(tagger))
demo.mainloop()
def kimmo_chart_parse(sentence, grammar, kimmo):
tokens = list(wordpunct(sentence))
demo = ChartDemo(grammar, tokens, initfunc=chart_kimmo(kimmo))
demo.mainloop()
def read(filename):
f = open(filename)
return f.read()
def main():
sentence = 'The quick brown fox jumped over the lazy dog'
grammar = cfg.parse_grammar(read('demo.cfg'))
# load from pickle so it's faster
tagger = pickle.load(open('demo_tagger.pickle'))
tagged_chart_parse(sentence, grammar, tagger)
#kimmo = load('english.yaml')
#kimmo_chart_parse(sentence, grammar, kimmo)
if __name__ == '__main__': main()
|
ai_economist/foundation/base/base_agent.py | sarahlc888/ai-economist | 795 | 12732385 | # Copyright (c) 2020, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import random
import numpy as np
from ai_economist.foundation.base.registrar import Registry
class BaseAgent:
"""Base class for Agent classes.
Instances of Agent classes are created for each agent in the environment. Agent
instances are stateful, capturing location, inventory, endogenous variables,
and any additional state fields created by environment components during
construction (see BaseComponent.get_additional_state_fields in base_component.py).
They also provide a simple API for getting/setting actions for each of their
registered action subspaces (which depend on the components used to build
the environment).
Args:
idx (int or str): Index that uniquely identifies the agent object amongst the
other agent objects registered in its environment.
multi_action_mode (bool): Whether to allow the agent to take one action for
each of its registered action subspaces each timestep (if True),
or to limit the agent to take only one action each timestep (if False).
"""
name = ""
def __init__(self, idx=None, multi_action_mode=None):
assert self.name
if idx is None:
idx = 0
if multi_action_mode is None:
multi_action_mode = False
if isinstance(idx, str):
self._idx = idx
else:
self._idx = int(idx)
self.multi_action_mode = bool(multi_action_mode)
self.single_action_map = (
{}
) # Used to convert single-action-mode actions to the general format
self.action = dict()
self.action_dim = dict()
self._action_names = []
self._multi_action_dict = {}
self._unique_actions = 0
self._total_actions = 0
self.state = dict(loc=[0, 0], inventory={}, escrow={}, endogenous={})
self._registered_inventory = False
self._registered_endogenous = False
self._registered_components = False
self._noop_action_dict = dict()
# Special flag to allow logic for multi-action-mode agents
# that are not given any actions.
self._passive_multi_action_agent = False
# If this gets set to true, we can make masks faster
self._one_component_single_action = False
self._premask = None
@property
def idx(self):
"""Index used to identify this agent. Must be unique within the environment."""
return self._idx
def register_inventory(self, resources):
"""Used during environment construction to populate inventory/escrow fields."""
assert not self._registered_inventory
for entity_name in resources:
self.inventory[entity_name] = 0
self.escrow[entity_name] = 0
self._registered_inventory = True
def register_endogenous(self, endogenous):
"""Used during environment construction to populate endogenous state fields."""
assert not self._registered_endogenous
for entity_name in endogenous:
self.endogenous[entity_name] = 0
self._registered_endogenous = True
def _incorporate_component(self, action_name, n):
extra_n = (
1 if self.multi_action_mode else 0
) # Each sub-action has a NO-OP in multi action mode)
self.action[action_name] = 0
self.action_dim[action_name] = n + extra_n
self._action_names.append(action_name)
self._multi_action_dict[action_name] = False
self._unique_actions += 1
if self.multi_action_mode:
self._total_actions += n + extra_n
else:
for action_n in range(1, n + 1):
self._total_actions += 1
self.single_action_map[int(self._total_actions)] = [
action_name,
action_n,
]
def register_components(self, components):
"""Used during environment construction to set up state/action spaces."""
assert not self._registered_components
for component in components:
n = component.get_n_actions(self.name)
if n is None:
continue
# Most components will have a single action-per-agent, so n is an int
if isinstance(n, int):
if n == 0:
continue
self._incorporate_component(component.name, n)
# They can also internally handle multiple actions-per-agent,
# so n is an tuple or list
elif isinstance(n, (tuple, list)):
for action_sub_name, n_ in n:
if n_ == 0:
continue
if "." in action_sub_name:
raise NameError(
"Sub-action {} of component {} "
"is illegally named.".format(
action_sub_name, component.name
)
)
self._incorporate_component(
"{}.{}".format(component.name, action_sub_name), n_
)
# If that's not what we got something is funky.
else:
raise TypeError(
"Received unexpected type ({}) from {}.get_n_actions('{}')".format(
type(n), component.name, self.name
)
)
for k, v in component.get_additional_state_fields(self.name).items():
self.state[k] = v
# Currently no actions are available to this agent. Give it a placeholder.
if len(self.action) == 0 and self.multi_action_mode:
self._incorporate_component("PassiveAgentPlaceholder", 0)
self._passive_multi_action_agent = True
elif len(self.action) == 1 and not self.multi_action_mode:
self._one_component_single_action = True
self._premask = np.ones(1 + self._total_actions, dtype=np.float32)
self._registered_components = True
self._noop_action_dict = {k: v * 0 for k, v in self.action.items()}
verbose = False
if verbose:
print(self.name, self.idx, "constructed action map:")
for k, v in self.single_action_map.items():
print("single action map:", k, v)
for k, v in self.action.items():
print("action:", k, v)
for k, v in self.action_dim.items():
print("action_dim:", k, v)
@property
def action_spaces(self):
"""
if self.multi_action_mode == True:
Returns an integer array with length equal to the number of action
subspaces that the agent registered. The i'th element of the array
indicates the number of actions associated with the i'th action subspace.
In multi_action_mode, each subspace includes a NO-OP.
Note: self._action_names describes which action subspace each element of
the array refers to.
Example:
>> self.multi_action_mode
True
>> self.action_spaces
[2, 5]
>> self._action_names
["Build", "Gather"]
# [1 Build action + Build NO-OP, 4 Gather actions + Gather NO-OP]
if self.multi_action_mode == False:
Returns a single integer equal to the total number of actions that the
agent can take.
Example:
>> self.multi_action_mode
False
>> self.action_spaces
6
>> self._action_names
["Build", "Gather"]
# 1 NO-OP + 1 Build action + 4 Gather actions.
"""
if self.multi_action_mode:
action_dims = []
for m in self._action_names:
action_dims.append(np.array(self.action_dim[m]).reshape(-1))
return np.concatenate(action_dims).astype(np.int32)
n_actions = 1 # (NO-OP)
for m in self._action_names:
n_actions += self.action_dim[m]
return n_actions
@property
def loc(self):
"""2D list of [row, col] representing agent's location in the environment."""
return self.state["loc"]
@property
def endogenous(self):
"""Dictionary representing endogenous quantities (i.e. "Labor").
Example:
>> self.endogenous
{"Labor": 30.25}
"""
return self.state["endogenous"]
@property
def inventory(self):
"""Dictionary representing quantities of resources in agent's inventory.
Example:
>> self.inventory
{"Wood": 3, "Stone": 20, "Coin": 1002.83}
"""
return self.state["inventory"]
@property
def escrow(self):
"""Dictionary representing quantities of resources in agent's escrow.
https://en.wikipedia.org/wiki/Escrow
Escrow is used to manage any portion of the agent's inventory that is
reserved for a particular purpose. Typically, something enters escrow as part
of a contractual arrangement to disburse that something when another
condition is met. An example is found in the ContinuousDoubleAuction
Component class (see ../components/continuous_double_auction.py). When an
agent creates an order to sell a unit of Wood, for example, the component
moves one unit of Wood from the agent's inventory to its escrow. If another
agent buys the Wood, it is moved from escrow to the other agent's inventory. By
placing the Wood in escrow, it prevents the first agent from using it for
something else (i.e. building a house).
Notes:
The inventory and escrow share the same keys. An agent's endowment refers
to the total quantity it has in its inventory and escrow.
Escrow is provided to simplify inventory management but its intended
semantics are not enforced directly. It is up to Component classes to
enforce these semantics.
Example:
>> self.inventory
{"Wood": 0, "Stone": 1, "Coin": 3}
"""
return self.state["escrow"]
def inventory_to_escrow(self, resource, amount):
"""Move some amount of a resource from agent inventory to agent escrow.
Amount transferred is capped to the amount of resource in agent inventory.
Args:
resource (str): The name of the resource to move (i.e. "Wood", "Coin").
amount (float): The amount to be moved from inventory to escrow. Must be
positive.
Returns:
Amount of resource actually transferred. Will be less than amount argument
if amount argument exceeded the amount of resource in the inventory.
Calculated as:
transferred = np.minimum(self.state["inventory"][resource], amount)
"""
assert amount >= 0
transferred = float(np.minimum(self.state["inventory"][resource], amount))
self.state["inventory"][resource] -= transferred
self.state["escrow"][resource] += transferred
return float(transferred)
def escrow_to_inventory(self, resource, amount):
"""Move some amount of a resource from agent escrow to agent inventory.
Amount transferred is capped to the amount of resource in agent escrow.
Args:
resource (str): The name of the resource to move (i.e. "Wood", "Coin").
amount (float): The amount to be moved from escrow to inventory. Must be
positive.
Returns:
Amount of resource actually transferred. Will be less than amount argument
if amount argument exceeded the amount of resource in escrow.
Calculated as:
transferred = np.minimum(self.state["escrow"][resource], amount)
"""
assert amount >= 0
transferred = float(np.minimum(self.state["escrow"][resource], amount))
self.state["escrow"][resource] -= transferred
self.state["inventory"][resource] += transferred
return float(transferred)
def total_endowment(self, resource):
"""Get the combined inventory+escrow endowment of resource.
Args:
resource (str): Name of the resource
Returns:
The amount of resource in the agents inventory and escrow.
"""
return self.inventory[resource] + self.escrow[resource]
def reset_actions(self, component=None):
"""Reset all actions to the NO-OP action (the 0'th action index).
If component is specified, only reset action(s) for that component.
"""
if not component:
self.action.update(self._noop_action_dict)
else:
for k, v in self.action.items():
if "." in component:
if k.lower() == component.lower():
self.action[k] = v * 0
else:
base_component = k.split(".")[0]
if base_component.lower() == component.lower():
self.action[k] = v * 0
def has_component(self, component_name):
"""Returns True if the agent has component_name as a registered subaction."""
return bool(component_name in self.action)
def get_random_action(self):
"""
Select a component at random and randomly choose one of its actions (other
than NO-OP).
"""
random_component = random.choice(self._action_names)
component_action = random.choice(
list(range(1, self.action_dim[random_component]))
)
return {random_component: component_action}
def get_component_action(self, component_name, sub_action_name=None):
"""
Return the action(s) taken for component_name component, or None if the
agent does not use that component.
"""
if sub_action_name is not None:
return self.action.get(component_name + "." + sub_action_name, None)
matching_names = [
m for m in self._action_names if m.split(".")[0] == component_name
]
if len(matching_names) == 0:
return None
if len(matching_names) == 1:
return self.action.get(matching_names[0], None)
return [self.action.get(m, None) for m in matching_names]
def set_component_action(self, component_name, action):
"""Set the action(s) taken for component_name component."""
if component_name not in self.action:
raise KeyError(
"Agent {} of type {} does not have {} registered as a subaction".format(
self.idx, self.name, component_name
)
)
if self._multi_action_dict[component_name]:
self.action[component_name] = np.array(action, dtype=np.int32)
else:
self.action[component_name] = int(action)
def populate_random_actions(self):
"""Fill the action buffer with random actions. This is for testing."""
for component, d in self.action_dim.items():
if isinstance(d, int):
self.set_component_action(component, np.random.randint(0, d))
else:
d_array = np.array(d)
self.set_component_action(
component, np.floor(np.random.rand(*d_array.shape) * d_array)
)
def parse_actions(self, actions):
"""Parse the actions array to fill each component's action buffers."""
if self.multi_action_mode:
assert len(actions) == self._unique_actions
if len(actions) == 1:
self.set_component_action(self._action_names[0], actions[0])
else:
for action_name, action in zip(self._action_names, actions):
self.set_component_action(action_name, int(action))
# Single action mode
else:
# Action was supplied as an index of a specific subaction.
# No need to do any lookup.
if isinstance(actions, dict):
if len(actions) == 0:
return
assert len(actions) == 1
action_name = list(actions.keys())[0]
action = list(actions.values())[0]
if action == 0:
return
self.set_component_action(action_name, action)
# Action was supplied as an index into the full set of combined actions
else:
action = int(actions)
# Universal NO-OP
if action == 0:
return
action_name, action = self.single_action_map.get(action)
self.set_component_action(action_name, action)
def flatten_masks(self, mask_dict):
"""Convert a dictionary of component action masks into a single mask vector."""
if self._one_component_single_action:
self._premask[1:] = mask_dict[self._action_names[0]]
return self._premask
no_op_mask = [1]
if self._passive_multi_action_agent:
return np.array(no_op_mask).astype(np.float32)
list_of_masks = []
if not self.multi_action_mode:
list_of_masks.append(no_op_mask)
for m in self._action_names:
if m not in mask_dict:
raise KeyError("No mask provided for {} (agent {})".format(m, self.idx))
if self.multi_action_mode:
list_of_masks.append(no_op_mask)
list_of_masks.append(mask_dict[m])
return np.concatenate(list_of_masks).astype(np.float32)
agent_registry = Registry(BaseAgent)
"""The registry for Agent classes.
This creates a registry object for Agent classes. This registry requires that all
added classes are subclasses of BaseAgent. To make an Agent class available through
the registry, decorate the class definition with @agent_registry.add.
Example:
from ai_economist.foundation.base.base_agent import BaseAgent, agent_registry
@agent_registry.add
class ExampleAgent(BaseAgent):
name = "Example"
pass
assert agent_registry.has("Example")
AgentClass = agent_registry.get("Example")
agent = AgentClass(...)
assert isinstance(agent, ExampleAgent)
Notes:
The foundation package exposes the agent registry as: foundation.agents
An Agent class that is defined and registered following the above example will
only be visible in foundation.agents if defined/registered in a file that is
imported in ../agents/__init__.py.
"""
|
test/tests/test_compatibility.py | nolim1t/specter-diy | 279 | 12732407 | <filename>test/tests/test_compatibility.py
from unittest import TestCase
from apps.compatibility import *
import json
from io import BytesIO
WALLET_SOFTWARE = b'{"label": "blah", "blockheight": 0, "descriptor": "wsh(sortedmulti(1,[fb7c1f11/48h/1h/0h/2h]tpubDExnGppazLhZPNadP8Q5Vgee2QcvbyAf9GvGaEY7ALVJREaG2vdTqv1MHRoDtPaYP3y1DGVx7wrKKhsLhs26GY263uE6Wi3qNbi71AHZ6p7/0/*,[33a2bf0c/48h/1h/0h/2h]tpubDF4cAhFDn6XSPhQtFECSkQm35oEzVyHHAiPa4Qy83fBtPw9nFJAodN6xF6nY7y2xKMGc5nbDFZfAac88oaurVzrCUxyhmc9J8W5tg3N5NkS/0/*))#vk844svv", "devices": [{"type": "specter", "label": "ability"}, {"type": "coldcard", "label": "hox"}]}'
COLDCARD_FILE = """
# Coldcard Multisig setup file (created on Specter Desktop)
#
Name: blah
Policy: 1 of 2
Derivation: m/48'/1'/0'/2'
Format: P2WSH
FB7C1F11: tpubDExnGppazLhZPNadP8Q5Vgee2QcvbyAf9GvGaEY7ALVJREaG2vdTqv1MHRoDtPaYP3y1DGVx7wrKKhsLhs26GY263uE6Wi3qNbi71AHZ6p7
33A2BF0C: tpubDF4cAhFDn6XSPhQtFECSkQm35oEzVyHHAiPa4Qy83fBtPw9nFJAodN6xF6nY7y2xKMGc5nbDFZfAac88oaurVzrCUxyhmc9J8W5tg3N5NkS
"""
EXPECTED = ('blah', 'wsh(sortedmulti(1,[fb7c1f11/48h/1h/0h/2h]tpubDExnGppazLhZPNadP8Q5Vgee2QcvbyAf9GvGaEY7ALVJREaG2vdTqv1MHRoDtPaYP3y1DGVx7wrKKhsLhs26GY263uE6Wi3qNbi71AHZ6p7/{0,1}/*,[33a2bf0c/48h/1h/0h/2h]tpubDF4cAhFDn6XSPhQtFECSkQm35oEzVyHHAiPa4Qy83fBtPw9nFJAodN6xF6nY7y2xKMGc5nbDFZfAac88oaurVzrCUxyhmc9J8W5tg3N5NkS/{0,1}/*))')
class CompatibilityTest(TestCase):
def test_import(self):
self.assertEqual(EXPECTED, parse_software_wallet_json(json.load(BytesIO(WALLET_SOFTWARE))))
self.assertEqual(EXPECTED, parse_cc_wallet_txt(BytesIO(COLDCARD_FILE.encode())))
|
plenum/test/metrics/conftest.py | andkononykhin/plenum | 148 | 12732411 | <gh_stars>100-1000
import pytest
from plenum.common.constants import KeyValueStorageType
from storage.helper import initKeyValueStorage
from storage.kv_store import KeyValueStorage
db_no = 0
@pytest.yield_fixture(params=[KeyValueStorageType.Rocksdb,
KeyValueStorageType.Leveldb,
KeyValueStorageType.BinaryFile])
def storage(request, tdir) -> KeyValueStorage:
global db_no
db = initKeyValueStorage(request.param, tdir, 'metrics_db_{}'.format(db_no))
db_no += 1
yield db
db.close()
|
scitbx/source_generators/lbfgs_fem.py | rimmartin/cctbx_project | 155 | 12732419 | from __future__ import absolute_import, division, print_function
import os
def run():
import libtbx.load_env
src_dir = libtbx.env.under_dist(
module_name="scitbx", path="lbfgs", test=os.path.isdir)
import fable.read
all_fprocs = fable.read.process(
file_names=[os.path.join(src_dir, f) for f in ["sdrive.f", "lbfgs.f"]])
namespace = "scitbx::lbfgs_fem"
functions_public = set(["lbfgs", "blockdata_lb2"])
functions_detail = set(["lb1", "daxpy", "ddot", "mcstep", "mcsrch"])
functions_program = set(["one_pass"])
import fable.cout
functions_hpp = fable.cout.process(
all_fprocs=all_fprocs,
namespace=namespace,
fem_do_safe=False,
suppress_program=True,
suppress_common=False,
suppress_functions=functions_detail.union(functions_program),
suppress_function_definitions=functions_public)
functions_cpp = fable.cout.process(
all_fprocs=all_fprocs,
namespace=namespace,
fem_do_safe=False,
suppress_program=True,
suppress_common=True,
suppress_functions=functions_program)
functions_cpp[0] = "#include <scitbx/lbfgs_fem.hpp>"
sdrive_cpp = fable.cout.process(
all_fprocs=all_fprocs,
namespace=namespace,
fem_do_safe=False,
suppress_common=True,
suppress_functions=functions_detail.union(functions_public))
sdrive_cpp[0] = functions_cpp[0]
#
def make_target_dir(path):
result = libtbx.env.under_build(path=path)
if (not os.path.isdir(result)):
os.makedirs(result)
assert os.path.isdir(result)
return result
target_dir = make_target_dir(path="include/scitbx")
with open(os.path.join(target_dir, "lbfgs_fem.hpp"), "w") as fh:
fh.write("\n".join(functions_hpp))
target_dir = make_target_dir(path="scitbx/lbfgs")
with open(os.path.join(target_dir, "lbfgs_fem.cpp"), "w") as fh:
fh.write("\n".join(functions_cpp))
with open(os.path.join(target_dir, "sdrive_fem.cpp"), "w") as fh:
fh.write("\n".join(sdrive_cpp))
if __name__ == "__main__":
run()
|
python_toolbox/wx_tools/widgets/hue_selection_dialog/textual.py | hboshnak/python_toolbox | 119 | 12732495 | <reponame>hboshnak/python_toolbox
# Copyright 2009-2011 <NAME>.
# This program is distributed under the LGPL2.1 license.
'''
Defines the `Textual` class.
See its documentation for more details.
'''
from __future__ import division
import wx
from python_toolbox import freezing
from python_toolbox import wx_tools
from python_toolbox.wx_tools.widgets.cute_panel import CutePanel
def ratio_to_round_degrees(ratio):
return int(ratio * 360)
def degrees_to_ratio(degrees):
return degrees / 360
class Textual(CutePanel):
'''Display (and allow modifying) the hue as a number 0-359.'''
def __init__(self, hue_selection_dialog):
wx.Panel.__init__(self, parent=hue_selection_dialog, size=(75, 100))
self.set_good_background_color()
self.SetHelpText(
'Set the hue in angles (0°-359°).'
)
self.hue_selection_dialog = hue_selection_dialog
self.hue = hue_selection_dialog.hue
self.main_v_sizer = wx.BoxSizer(wx.VERTICAL)
self.hue_static_text = wx.StaticText(self, label='&Hue:')
self.main_v_sizer.Add(self.hue_static_text, 0,
wx.ALIGN_LEFT | wx.BOTTOM, border=5)
self.h_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.main_v_sizer.Add(self.h_sizer, 0)
self.spin_ctrl = wx.SpinCtrl(self, min=0, max=359,
initial=ratio_to_round_degrees(self.hue),
size=(70, -1), style=wx.SP_WRAP)
if wx_tools.is_mac:
self.spin_ctrl.SetValue(ratio_to_round_degrees(self.hue))
self.h_sizer.Add(self.spin_ctrl, 0)
self.degree_static_text = wx.StaticText(self, label=unichr(176))
self.h_sizer.Add(self.degree_static_text, 0)
self.SetSizerAndFit(self.main_v_sizer)
self.Bind(wx.EVT_SPINCTRL, self._on_spin, source=self.spin_ctrl)
self.Bind(wx.EVT_TEXT, self._on_text, source=self.spin_ctrl)
value_freezer = freezing.FreezerProperty()
def update(self):
'''Update to show the new hue.'''
if not self.value_freezer.frozen and \
self.hue != self.hue_selection_dialog.hue:
self.hue = self.hue_selection_dialog.hue
self.spin_ctrl.SetValue(ratio_to_round_degrees(self.hue))
def _on_spin(self, event):
self.hue_selection_dialog.setter(
degrees_to_ratio(self.spin_ctrl.Value)
)
def _on_text(self, event):
with self.value_freezer:
self.hue_selection_dialog.setter(
degrees_to_ratio(self.spin_ctrl.Value)
)
def set_focus_on_spin_ctrl_and_select_all(self):
'''
The "select all" part works only on Windows and generic `wx.SpinCtrl`
implementations.
'''
self.spin_ctrl.SetFocus()
self.spin_ctrl.SetSelection(-1, -1)
|
dataflows/processors/conditional.py | cschloer/dataflows | 160 | 12732500 | from .. import DataStreamProcessor
class conditional(DataStreamProcessor):
def __init__(self, predicate, flow):
super().__init__()
self.predicate = predicate
self.flow = flow
def _process(self):
ds = self.source._process()
if self.predicate(ds.dp):
if callable(self.flow):
flow = self.flow(ds.dp)
else:
flow = self.flow
return flow.datastream(ds)
else:
return ds
|
caffe2/python/operator_test/string_ops_test.py | KevinKecc/caffe2 | 585 | 12732522 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def _string_lists(alphabet=None):
return st.lists(
elements=st.text(alphabet=alphabet, average_size=3),
min_size=0,
max_size=3)
class TestStringOps(hu.HypothesisTestCase):
@given(strings=_string_lists())
def test_string_prefix(self, strings):
length = 3
# although we are utf-8 encoding below to avoid python exceptions,
# StringPrefix op deals with byte-length prefixes, which may produce
# an invalid utf-8 string. The goal here is just to avoid python
# complaining about the unicode -> str conversion.
strings = np.array(
[a.encode('utf-8') for a in strings], dtype=np.object
)
def string_prefix_ref(strings):
return (
np.array([a[:length] for a in strings], dtype=object),
)
op = core.CreateOperator(
'StringPrefix',
['strings'],
['stripped'],
length=length)
self.assertReferenceChecks(
hu.cpu_do,
op,
[strings],
string_prefix_ref)
@given(strings=_string_lists())
def test_string_suffix(self, strings):
length = 3
strings = np.array(
[a.encode('utf-8') for a in strings], dtype=np.object
)
def string_suffix_ref(strings):
return (
np.array([a[-length:] for a in strings], dtype=object),
)
op = core.CreateOperator(
'StringSuffix',
['strings'],
['stripped'],
length=length)
self.assertReferenceChecks(
hu.cpu_do,
op,
[strings],
string_suffix_ref)
@given(strings=st.text(alphabet=['a', 'b'], average_size=3))
def test_string_starts_with(self, strings):
prefix = 'a'
strings = np.array(
[str(a) for a in strings], dtype=np.object
)
def string_starts_with_ref(strings):
return (
np.array([a.startswith(prefix) for a in strings], dtype=bool),
)
op = core.CreateOperator(
'StringStartsWith',
['strings'],
['bools'],
prefix=prefix)
self.assertReferenceChecks(
hu.cpu_do,
op,
[strings],
string_starts_with_ref)
@given(strings=st.text(alphabet=['a', 'b'], average_size=3))
def test_string_ends_with(self, strings):
suffix = 'a'
strings = np.array(
[str(a) for a in strings], dtype=np.object
)
def string_ends_with_ref(strings):
return (
np.array([a.endswith(suffix) for a in strings], dtype=bool),
)
op = core.CreateOperator(
'StringEndsWith',
['strings'],
['bools'],
suffix=suffix)
self.assertReferenceChecks(
hu.cpu_do,
op,
[strings],
string_ends_with_ref)
if __name__ == "__main__":
import unittest
unittest.main()
|
tests/integration/cartography/intel/pagerduty/test_schedules.py | ramonpetgrave64/cartography | 2,322 | 12732614 | <reponame>ramonpetgrave64/cartography
import cartography.intel.pagerduty.schedules
import tests.data.pagerduty.schedules
TEST_UPDATE_TAG = 123456789
def test_load_schedule_data(neo4j_session):
schedule_data = tests.data.pagerduty.schedules.LIST_SCHEDULES_DATA
cartography.intel.pagerduty.schedules.load_schedule_data(
neo4j_session,
schedule_data,
TEST_UPDATE_TAG,
)
expected_nodes = {
"PI7DH85",
}
nodes = neo4j_session.run(
"""
MATCH (n:PagerDutySchedule) RETURN n.id;
""",
)
actual_nodes = {n['n.id'] for n in nodes}
assert actual_nodes == expected_nodes
expected_layers = {
"PI7DH85-Night Shift",
}
layers = neo4j_session.run(
"""
MATCH (:PagerDutySchedule{id:"PI7DH85"})-[:HAS_LAYER]->(n:PagerDutyScheduleLayer)
RETURN n.id;
""",
)
actual_layers = {n['n.id'] for n in layers}
assert actual_layers == expected_layers
|
pyclue/tf1/models/engine/__init__.py | CLUEbenchmark/PyCLUE | 122 | 12732618 | <reponame>CLUEbenchmark/PyCLUE
#!/usr/bin/python3
"""
@Author: <NAME>
@Site: https://github.com/liushaoweihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
utils/ngworead.py | haygcao/UnicomDailyTask | 148 | 12732621 | <reponame>haygcao/UnicomDailyTask
# -*- coding: utf8 -*-
from utils.config import BASE_DIR
from Crypto.Cipher import AES
from Crypto.Util.Padding import unpad, pad
from hashlib import md5
import base64
import execjs
import json
def auth_sign(appId, timestamp, key='<KEY>'):
return md5(''.join([appId, key, str(timestamp)]).encode('utf8')).hexdigest()
def encrypt(plaintext, accesstoken):
key = accesstoken[16:].encode('utf8')
iv = '16-Bytes--String'.encode('utf8')
data = json.dumps(plaintext, ensure_ascii=False, separators=(',', ':',)).encode('utf8')
data = pad(data, 16)
cipher = AES.new(key, AES.MODE_CBC, iv)
buf = cipher.encrypt(data).hex().encode('utf8')
return base64.b64encode(buf).decode('utf8')
def decrypt(ciphertext, accesstoken):
key = accesstoken[16:].encode('utf8')
iv = '16-Bytes--String'.encode('utf8')
data = bytes.fromhex(base64.b64decode(ciphertext).decode('utf8'))
cipher = AES.new(key, AES.MODE_CBC, iv)
buf = cipher.decrypt(data)
buf = unpad(buf, 16)
return json.loads(buf)
def cryptojs_encrypt(plaintext):
key = 'null'
iv = '16-Bytes--String'
with open(BASE_DIR + '/utils/crypto-js.js', 'r', encoding='utf8') as fp:
script = fp.read()
ctx = execjs.compile(script)
result = ctx.call('encrypt', key, iv, plaintext)
return result
def cryptojs_decrypt(ciphertext):
key = 'null'
iv = '16-Bytes--String'
with open(BASE_DIR + '/utils/crypto-js.js', 'r', encoding='utf8') as fp:
script = fp.read()
ctx = execjs.compile(script)
result = ctx.call('decrypt', key, iv, ciphertext)
return result
if __name__ == '__main__':
pass
|
machina/models/__init__.py | AswinRetnakumar/Machina | 302 | 12732625 | from machina.models.base import BaseModel
from machina.models.deterministic_state_model import DeterministicSModel
|
scripts/staging/sklearn/tests/util.py | mgd-hin/systemds | 372 | 12732636 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import sys
import os
import subprocess
import difflib
import logging
def get_systemds_root():
try:
return os.environ['SYSTEMDS_ROOT']
except KeyError as error:
raise KeyError(f"SYSTEMDS_ROOT is not set.\nError\n{error}")
def get_sklearn_root():
return f'{get_systemds_root()}/scripts/staging/sklearn'
def invoke_systemds(path):
root = get_systemds_root()
try:
script_path = os.path.relpath(path, os.getcwd())
result = subprocess.run([root + "/bin/systemds", script_path, '-nvargs input_X=tests/input_X.csv input_Y=tests/input_Y.csv'],
check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
timeout=10000)
logging.debug('*' * 100)
logging.debug('\n' + result.stdout.decode('utf-8'))
logging.debug('\n' + result.stderr.decode('utf-8'))
logging.debug('*' * 100)
# It looks like python does not notice systemds errors
# Is 0 returned in error cases?
# Check if there is any error and raise manually.
if len(result.stderr) != 0 or 'error' in str(result.stdout).lower():
raise subprocess.CalledProcessError(returncode=result.returncode, cmd=result.args,
stderr=result.stderr, output=result.stdout)
except subprocess.CalledProcessError as systemds_error:
logging.error("Failed to run systemds!")
logging.error("Error code: " + str(systemds_error.returncode))
logging.error("Stdout:")
logging.error(systemds_error.output.decode("utf-8"))
logging.error("Stderr:")
logging.error(systemds_error.stderr.decode("utf-8"))
return False
logging.info("Successfully executed script.")
return True
def test_script(path):
logging.info('#' * 30)
logging.info('Running generated script on systemds.')
result = invoke_systemds(path)
logging.info('Finished test.')
return result
# Compares two script using diff
def compare_script(actual, expected):
try:
f_expected = open(f'{get_sklearn_root()}/tests/expected/{expected}')
f_actual = open(f'{get_sklearn_root()}/{actual}')
diff = difflib.ndiff(f_actual.readlines(), f_expected.readlines())
changes = [l.strip() for l in diff if not l.startswith(' ')]
logging.info('#' * 30)
if len(changes) == 0:
logging.info('Actual script matches expected script.')
return True
else:
logging.info('Actual script does not match expected script.')
logging.info('Legend:')
logging.info(' "+ " ... line unique to actual script')
logging.info(' "- " ... line unique to expected script')
logging.info(' "? " ... linue not present in either script')
logging.info('#' * 30)
logging.info('\n' + '\n'.join(changes))
logging.info('#' * 30)
return False
except Exception as e:
logging.error('Failed to compare script.')
logging.error(e)
return False |
tests/integration/workflows/go_modules/utils.py | ekmixon/aws-lambda-builders | 180 | 12732678 | from elftools.elf.elffile import ELFFile
def get_executable_arch(path):
"""
Returns the architecture of an executable binary
Parameters
----------
path : str
path to the Go binaries generated
Returns
-------
str
Architecture type of the generated binaries
"""
with open(str(path), "rb") as f:
e = ELFFile(f)
return e.get_machine_arch()
|
tests/core/wrapper/test_solc_wrapper.py | Jonasmpi/py-solc | 153 | 12732681 | from __future__ import unicode_literals
import pytest
import json
import os
from solc import get_solc_version
from solc.wrapper import (
solc_wrapper,
)
def is_benign(err):
return not err or err in (
'Warning: This is a pre-release compiler version, please do not use it in production.\n',
)
def test_help():
output, err, _, _ = solc_wrapper(help=True, success_return_code=1)
assert output
assert 'Solidity' in output
assert is_benign(err)
def test_version():
output, err, _, _ = solc_wrapper(version=True)
assert output
assert 'Version' in output
assert is_benign(err)
def test_providing_stdin(FOO_SOURCE):
output, err, _, _ = solc_wrapper(stdin=FOO_SOURCE, bin=True)
assert output
assert 'Foo' in output
assert is_benign(err)
def test_providing_single_source_file(contracts_dir, FOO_SOURCE):
source_file_path = os.path.join(contracts_dir, 'Foo.sol')
with open(source_file_path, 'w') as source_file:
source_file.write(FOO_SOURCE)
output, err, _, _ = solc_wrapper(source_files=[source_file_path], bin=True)
assert output
assert 'Foo' in output
assert is_benign(err)
def test_providing_multiple_source_files(contracts_dir, FOO_SOURCE, BAR_SOURCE):
source_file_a_path = os.path.join(contracts_dir, 'Foo.sol')
source_file_b_path = os.path.join(contracts_dir, 'Bar.sol')
with open(source_file_a_path, 'w') as source_file:
source_file.write(FOO_SOURCE)
with open(source_file_b_path, 'w') as source_file:
source_file.write(BAR_SOURCE)
output, err, _, _ = solc_wrapper(source_files=[source_file_a_path, source_file_b_path], bin=True)
assert output
assert 'Foo' in output
assert 'Bar' in output
assert is_benign(err)
@pytest.mark.requires_standard_json
def test_providing_standard_json_input(FOO_SOURCE, BAR_SOURCE):
stdin = json.dumps({
"language": "Solidity",
"sources": {
"Foo.sol": {
"content": FOO_SOURCE
},
"Bar.sol": {
"content": BAR_SOURCE
}
},
"settings":
{
"outputSelection": {
"*": {
"*": [ "abi", "evm.bytecode.link_references", "evm.bytecode.object", "devdoc", "metadata", "userdoc" ]
}
}
}
})
output, err, _, _ = solc_wrapper(stdin=stdin, standard_json=True)
output = json.loads(output)
assert output
assert 'Foo.sol' in output['contracts']
assert 'Bar.sol' in output['contracts']
assert is_benign(err)
|
examples/property_prediction/MTL/model/__init__.py | siboehm/dgl-lifesci | 390 | 12732683 | <filename>examples/property_prediction/MTL/model/__init__.py
from .gcn import GCNRegressor, GCNRegressorBypass
from .gat import GATRegressor, GATRegressorBypass
from .mpnn import MPNNRegressor, MPNNRegressorBypass
from .attentivefp import AttentiveFPRegressor, AttentiveFPRegressorBypass |
nautobot/core/runner/importer.py | psmware-ltd/nautobot | 384 | 12732698 | <filename>nautobot/core/runner/importer.py<gh_stars>100-1000
"""
logan.importer
~~~~~~~~~~~~~~
:copyright: (c) 2012 <NAME>.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
try:
unicode
except NameError:
basestring = unicode = str # Python 3
try:
execfile
except NameError: # Python3
def execfile(afile, globalz=None, localz=None):
with open(afile, "r") as fh:
exec(fh.read(), globalz, localz)
import sys
try:
from django.utils.importlib import import_module # django<=1.9
except ImportError:
from importlib import import_module
from .settings import load_settings, create_module
installed = False
def install(name, config_path, default_settings, **kwargs):
"""Install our custom module importer logic.
Args:
name (str): Module name to handle specially (e.g., "nautobot_config")
config_path (str): Absolute path to the module in question (e.g., "/opt/nautobot/nautobot_config.py")
default_settings (str): Settings module name to inherit settings from (e.g., "nautobot.core.settings")
"""
global installed
if installed:
# TODO: reinstall
return
# Ensure that our custom importer for the config module takes precedence over standard Python import machinery
sys.meta_path.insert(0, LoganImporter(name, config_path, default_settings, **kwargs))
installed = True
class ConfigurationError(Exception):
pass
class LoganImporter(object):
"""Implementation of importlib.abc.MetaPathFinder interface."""
def __init__(self, name, config_path, default_settings=None, allow_extras=True, callback=None):
"""Instantiate the custom meta path finder.
Args:
name (str): Module name to handle specially (e.g., "nautobot_config")
config_path (str): Absolute path to the module in question (e.g., "/opt/nautobot/nautobot_config.py")
default_settings (str): Settings module name to inherit settings from (e.g., "nautobot.core.settings")
allow_extras (bool): Whether to allow extension of settings variables via "EXTRA_<setting>" values
callback (func): Callback function to invoke after loading the module into settings
"""
self.name = name
self.config_path = config_path
self.default_settings = default_settings
self.allow_extras = allow_extras
self.callback = callback
self.validate()
def __repr__(self):
return "<%s for '%s' (%s)>" % (type(self), self.name, self.config_path)
def validate(self):
# TODO(dcramer): is there a better way to handle validation so it
# is lazy and actually happens in LoganLoader?
try:
execfile(self.config_path, {"__file__": self.config_path})
except Exception as e:
exc_info = sys.exc_info()
raise ConfigurationError(unicode(e), exc_info[2])
def find_module(self, fullname, path=None):
"""Meta path finder API function implementation.
Ref: https://docs.python.org/3/library/importlib.html#importlib.abc.MetaPathFinder.find_module
TODO: find_module() API is deprecated, convert this to find_spec() instead.
"""
# Only find/load the module matching self.name - otherwise let the standard Python import machinery handle it
if fullname != self.name:
return
return LoganLoader(
name=self.name,
config_path=self.config_path,
default_settings=self.default_settings,
allow_extras=self.allow_extras,
callback=self.callback,
)
class LoganLoader(object):
"""Implementation of importlib.abc.Loader interface."""
def __init__(self, name, config_path, default_settings=None, allow_extras=True, callback=None):
self.name = name
self.config_path = config_path
self.default_settings = default_settings
self.allow_extras = allow_extras
self.callback = callback
def load_module(self, fullname):
"""Loader API function implementation.
TODO: load_module() API is deprecated, convert this to create_module()/exec_module() instead.
"""
try:
return self._load_module(fullname)
except Exception as e:
exc_info = sys.exc_info()
raise ConfigurationError(unicode(e), exc_info[2])
def _load_module(self, fullname):
# TODO: is this needed?
if fullname in sys.modules:
return sys.modules[fullname] # pragma: no cover
if self.default_settings:
default_settings_mod = import_module(self.default_settings)
else:
default_settings_mod = None
settings_mod = create_module(self.name)
# Django doesn't play too nice without the config file living as a real file, so let's fake it.
settings_mod.__file__ = self.config_path
# install the default settings for this app
load_settings(default_settings_mod, allow_extras=self.allow_extras, settings=settings_mod)
# install the custom settings for this app
load_settings(self.config_path, allow_extras=self.allow_extras, settings=settings_mod)
if self.callback:
self.callback(settings_mod)
return settings_mod
|
pollbot/display/__init__.py | tigerdar004/RweddingPoll | 112 | 12732703 | <reponame>tigerdar004/RweddingPoll
# Import for easier re-export
from .poll import * # noqa
from .settings import * # noqa
|
train.py | DoranLyong/person-reid-tiny-baseline | 202 | 12732779 | import os
from torch.backends import cudnn
from config import Config
from utils.logger import setup_logger
from datasets import make_dataloader
from model import make_model
from solver import make_optimizer, WarmupMultiStepLR
from loss import make_loss
from processor import do_train
if __name__ == '__main__':
cfg = Config()
if not os.path.exists(cfg.LOG_DIR):
os.mkdir(cfg.LOG_DIR)
logger = setup_logger('{}'.format(cfg.PROJECT_NAME), cfg.LOG_DIR)
logger.info("Running with config:\n{}".format(cfg.CFG_NAME))
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.DEVICE_ID
cudnn.benchmark = True
# This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.
train_loader, val_loader, num_query, num_classes = make_dataloader(cfg)
model = make_model(cfg, num_class=num_classes)
loss_func, center_criterion = make_loss(cfg, num_classes=num_classes)
optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion)
scheduler = WarmupMultiStepLR(optimizer, cfg.STEPS, cfg.GAMMA,
cfg.WARMUP_FACTOR,
cfg.WARMUP_EPOCHS, cfg.WARMUP_METHOD)
do_train(
cfg,
model,
center_criterion,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler, # modify for using self trained model
loss_func,
num_query
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.