id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6673919
|
<reponame>Zadigo/django_template_project<filename>accounts/forms/passwords.py
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.forms.fields import CharField, EmailField
from django.forms.widgets import EmailInput, PasswordInput
from django.utils.translation import gettext_lazy as _
class CustomPasswordResetForm(PasswordResetForm):
email = EmailField(
label=_('Email'),
max_length=254,
widget=EmailInput(
attrs={
'class': 'form-control',
'autocomplete': 'email',
'placeholder': 'Email'
}
)
)
def save(self, request, from_email, **kwargs):
super().save(
from_email=from_email,
subject_template_name='includes/emails/password_reset_subject.txt',
email_template_name='includes/emails/password_reset_email.html',
request=request,
**kwargs
)
class CustomSetPasswordForm(SetPasswordForm):
new_password1 = CharField(
label=_('Nouveau mot de passe'),
widget=PasswordInput(
attrs={'class': 'form-control', 'placeholder': 'Nouveau mot de passe','autocomplete': 'off'}
),
strip=False
)
new_password2 = CharField(
label=_('Nouveau mot de passe confirmation'),
widget=PasswordInput(
attrs={'class': 'form-control', 'placeholder': 'Nouveau mot de passe confirmation', 'autocomplete': 'off'}
),
strip=False,
)
class CustomChangePasswordForm(CustomSetPasswordForm):
old_password = CharField(
label=_("Old password"),
strip=False,
widget=PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Ancien mot de passe',
'autocomplete': 'current-password', 'autofocus': True
}
),
)
|
StarcoderdataPython
|
278218
|
import os
import unittest
import numpy as np
from . import semvec_utils as semvec
class TestSemvecUtils(unittest.TestCase):
def setUp(self) -> None:
# These few lines should enable the test setup to find the test data, wherever the test is run from.
this_dir = os.path.dirname(__file__)
semvecpy_root_dir = os.path.split(os.path.split(this_dir)[0])[0]
test_data_dir = os.path.join(semvecpy_root_dir, "test_data")
# vectors trained as follows:
# java -cp semanticvectors-5.9.jar pitt.search.semanticvectors.ESP -luceneindexpath predication_index -vectortype binary -dimension 64 -trainingcycles 8 -mutablepredicatevectors
self.predicate_vectors = semvec.readfile(os.path.join(test_data_dir, "predicatevectors.bin"))
self.semantic_vectors = semvec.readfile(os.path.join(test_data_dir, "semanticvectors.bin"))
self.elemental_vectors = semvec.readfile(os.path.join(test_data_dir, "elementalvectors.bin"))
def test_compareterms1(self):
result = semvec.compare_terms(term1="P(HAS_CURRENCY)", term2="P(CAPITAL_OF)",
elemental_vectors=self.elemental_vectors, semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
self.assertEqual(-0.0625, result)
def test_compareterms2(self):
result = semvec.compare_terms(term1="S(pretoria_(executive))", term2="E(south_africa)",
elemental_vectors=self.elemental_vectors, semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
self.assertEqual(-0.15625, result)
def test_compareterms3(self):
result = semvec.compare_terms(term1="S(pretoria_(executive))*E(south_africa)", term2="P(CAPITAL_OF)",
elemental_vectors=self.elemental_vectors, semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
self.assertEqual(0.84375, result)
def test_compareterms4(self):
with self.assertRaises(semvec.TermNotFoundError):
semvec.compare_terms(term1="S(pretoria_(executive))*E(south_africa)", term2="P(not_a_term)",
elemental_vectors=self.elemental_vectors, semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
def test_compareterms5(self):
with self.assertRaises(semvec.TermNotFoundError):
semvec.compare_terms(term1="S(not_a_term)*E(south_africa)", term2="P(CAPITAL_OF)",
elemental_vectors=self.elemental_vectors, semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
def test_compareterms6(self):
with self.assertRaises(semvec.TermNotFoundError):
semvec.compare_terms(term1="S(pretoria_(executive))*E(not_a_term)", term2="P(CAPITAL_OF)",
elemental_vectors=self.elemental_vectors, semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
def test_compareterms7(self):
with self.assertRaises(semvec.MalformedQueryError):
semvec.compare_terms(term1="F(pretoria_(executive))*E(south_africa)", term2="P(CAPITAL_OF)",
elemental_vectors=self.elemental_vectors, semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
def test_comparetermsbatch1(self):
terms = ["P(HAS_CURRENCY)|P(CAPITAL_OF)",
"S(pretoria_(executive))|E(south_africa)",
"S(pretoria_(executive))*E(south_africa)|P(CAPITAL_OF)"]
result = semvec.compare_terms_batch(terms=terms, elemental_vectors=self.elemental_vectors,
semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
self.assertListEqual([-0.0625, -0.15625, 0.84375], result)
def test_comparetermsbatch2(self):
terms = []
result = semvec.compare_terms_batch(terms=terms, elemental_vectors=self.elemental_vectors,
semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors)
self.assertListEqual([], result)
def test_search1(self):
result = semvec.search("S(pretoria_(executive))*E(south_africa)",
search_vectors=self.predicate_vectors,
elemental_vectors=self.elemental_vectors,
semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors,
search_type="boundproduct")
self.assertListEqual([
[0.843750, "CAPITAL_OF"],
[0.031250, "CAPITAL_OF-INV"],
[0.000000, "HAS_CURRENCY-INV"],
[-0.031250, "HAS_CURRENCY"],
[-0.187500, "HAS_NATIONAL_ANIMAL"],
[-0.406250, "HAS_NATIONAL_ANIMAL-INV"],
], result)
def test_search2(self):
result = semvec.search("CAPITAL_OF",
search_vectors=self.predicate_vectors,
elemental_vectors=self.elemental_vectors,
semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors,
search_type="single_term")
result2 = semvec.search("P(CAPITAL_OF)",
search_vectors=self.predicate_vectors,
elemental_vectors=self.elemental_vectors,
semantic_vectors=self.semantic_vectors,
predicate_vectors=self.predicate_vectors,
search_type="boundproduct")
self.assertListEqual([
[1.000000, "CAPITAL_OF"],
[0.000000, "CAPITAL_OF-INV"],
[-0.031250, "HAS_CURRENCY-INV"],
[-0.062500, "HAS_CURRENCY"],
[-0.218750, "HAS_NATIONAL_ANIMAL"],
[-0.375000, "HAS_NATIONAL_ANIMAL-INV"],
], result)
self.assertListEqual(result, result2)
def test_pathfinder(self):
#refernce pathfinder data
rog_test = ['0',
'13',
'29 26',
'18 25 47',
'51 44 54 8',
'23 34 43 12 17',
'17 23 43 18 15 27',
'18 15 27 36 62 47 47',
'45 33 55 65 36 67 67 33',
'15 13 39 35 73 44 41 20 20',
'15 11 37 41 73 42 41 24 44 35',
'22 28 35 22 70 32 45 24 47 43 46',
'41 31 48 73 56 65 68 35 49 66 11 63',
'48 35 54 72 56 74 76 35 53 61 21 69 27',
'23 28 51 48 72 48 47 49 80 42 44 43 71 73',
'20 47 73 48 73 55 63 54 64 57 53 64 67 73 52',
'33 65 64 53 53 55 72 59 55 68 59 66 67 66 64 16',
'18 49 74 26 67 26 58 54 71 46 50 43 58 75 51 13 12',
'28 58 76 46 62 37 57 60 69 57 52 58 61 74 58 26 27 11',
'22 49 70 50 62 47 60 60 73 63 55 62 64 75 62 9 23 29 32',
'30 64 47 48 73 49 63 63 75 64 58 70 71 74 63 17 32 35 40 8',
'31 55 78 54 68 53 67 63 72 57 64 66 75 74 63 18 34 34 39 9 19',
'45 53 35 45 45 39 55 56 49 51 50 66 64 64 50 40 38 52 56 27 23 43',
'35 69 76 65 59 74 78 72 75 77 73 75 74 78 24 11 16 17 45 33 60 49 10',
'62 63 15 49 43 27 56 62 58 62 69 70 69 73 78 47 44 61 65 30 14 58 11 32']
#different 'r' parameters too test
rogrs = [1,1.01,1.05,1.1,1.15,1.2,1.4,1.6,1.8,2,3,4,5,6, np.inf]
#reference results from canoonical Pathfinder implementationo
rogref = [[ 119 , 104 , 103 , 103 ],
[ 102 , 89 , 87 , 87 ],
[ 95 , 83 , 81 , 81 ],
[ 86 , 75 , 70 , 70 ],
[ 76 , 66 , 62 , 61 ],
[ 72 , 65 , 60 , 59 ],
[ 63 , 53 , 53 , 52 ],
[ 56 , 51 , 51 , 50 ],
[ 50 , 47 , 45 , 45 ],
[ 47 , 44 , 42 , 42 ],
[ 39 , 37 , 36 , 34 ],
[ 35 , 31 , 31 , 29 ],
[ 32 , 30 , 29 , 27 ],
[ 32 , 30 , 28 , 26 ],
[ 32 , 28 , 27 , 25 ]]
# fill in rest
xs = [np.asarray(x.split(' '), dtype=np.int) for x in rog_test]
xs = np.asarray(xs)
nuxs = []
for x in xs:
toadd = xs.shape[0] - x.shape[0]
nuxs.append(np.concatenate([np.asarray(x), np.zeros(toadd, dtype=np.int)]))
nuxs = np.asarray(nuxs)
#make symmetric
nuxs = nuxs + nuxs.T - np.diag(np.diag(nuxs))
#print(nuxs)
#Test cases contributed by <NAME>
#Test Pathfinder for all combinations four different 'q' parameter values
#and 15 'r' parameter values, with reference results
#from the canonical implementation
for i, r in enumerate(rogrs):
qs = [np.sum( semvec.pathfinder(q, r, nuxs, cosines=False) > 0) // 2 for q in [2, 3, 4, 24]]
np.testing.assert_almost_equal(qs,rogref[i])
print('r', 'reference:',rogref[i],'this implementation:',qs, 'q of [2,3,4,24]')
testfl = np.asarray([[1, 0.95, 0.24], [0.95, 1, 0.95], [0.24, 0.95, 1]])
ansfl = np.asarray([[1, 0.95, 0],[0.95, 1, 0.95],[0, 0.95 ,1 ]])
pruned = semvec.pathfinder(8, 1, testfl);
np.testing.assert_almost_equal(pruned,ansfl)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6594493
|
<filename>utils/q15tofloat.py
arr = raw_input('Enter Q15 vector separated by spaces in hex decimal: ')
arr = arr.split()
for l in arr:
i = int(l,16)
if(i > 0xFFFF):
print l +'\033[91m' +' : Out of range!'
continue
if(i > 32767): #negative
i = i - 0x10000
num = float(i)/float(32768)
print num
|
StarcoderdataPython
|
9623759
|
<gh_stars>0
#!/usr/bin/env python3
import sys
from riley.commands import ListPodcasts, FetchEpisodes, ListEpisodes, Insert, \
DownloadEpisodes, WhereIsConfig, DownloadBest
class ManagementUtility:
subcommands = {
'list': ListEpisodes,
'insert': Insert,
'podcasts': ListPodcasts,
'fetch': FetchEpisodes,
'download': DownloadEpisodes,
'download-best': DownloadBest,
'config': WhereIsConfig,
}
def execute(self, argv):
if len(argv) > 1:
subcommand = argv[1]
else:
subcommand = 'list'
argv += ['list']
try:
command = self.subcommands[subcommand]()
except AttributeError:
sys.exit('%s is not a valid subcommand.' % subcommand)
command.execute(argv)
def main():
ManagementUtility().execute(sys.argv)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4924574
|
<gh_stars>0
"""Tests for day 10."""
from day_10.solution import (
calculate_syntax_score_of_navigation_subsystem,
calculate_completion_score_of_navigation_subsystem,
)
_TEST_INPUT = """[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]"""
def test_part_one_example_solution_is_recovered():
assert calculate_syntax_score_of_navigation_subsystem(_TEST_INPUT) == 26397
def test_part_two_example_solution_is_recovered():
assert calculate_completion_score_of_navigation_subsystem(_TEST_INPUT) == 288957
|
StarcoderdataPython
|
6594588
|
<reponame>superisaac/django-mljson-data
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-15 10:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('boss', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subordinates', to='staff.Staff')),
],
),
]
|
StarcoderdataPython
|
6444615
|
import pytest
from scout.load.report import load_delivery_report
from scout.exceptions import DataNotFoundError, IntegrityError
def test_load_delivery_report_bad_case_id(adapter):
## GIVEN no cases in database
assert adapter.case_collection.find_one() is None
## WHEN trying to load a report for a case_id that does not exist in the data base
case_id = "id_of_non_existing_case"
report_path = "a_dummy_path"
## THEN an exception should be raised
with pytest.raises(DataNotFoundError):
load_delivery_report(adapter=adapter, case_id=case_id, report_path=report_path)
def test_load_delivery_report_using_case_id_without_update_fail(adapter, case_obj):
adapter.case_collection.insert_one(case_obj)
## GIVEN a case exist, with a delivery report
case_obj = adapter.case_collection.find_one()
assert case_obj.get("delivery_report")
## WHEN trying to load a report for a case_id that does exist in the data base without update
# flag
case_id = case_obj["_id"]
report_path2 = "report_test_path2"
## THEN a report should not have been added to that case
with pytest.raises(IntegrityError):
load_delivery_report(adapter=adapter, case_id=case_id, report_path=report_path2)
updated_case_obj = adapter.case_collection.find_one()
assert updated_case_obj.get("delivery_report") != report_path2
def test_load_delivery_report_using_case_id_with_update_success(adapter, case_obj):
adapter.case_collection.insert_one(case_obj)
## GIVEN a case exist, with a delivery report
case_obj = adapter.case_collection.find_one()
assert case_obj.get("delivery_report")
## WHEN trying to load a report for a case_id that does exist in the data base
case_id = case_obj["_id"]
report_path = "report_test_path"
update = True
load_delivery_report(adapter=adapter, case_id=case_id, report_path=report_path, update=update)
# THEN a report should have been added to that case
updated_case_obj = adapter.case_collection.find_one()
assert updated_case_obj["delivery_report"] == report_path
|
StarcoderdataPython
|
11363564
|
# hello world of python
# vrc6 sawtooth volume map (0..15 -> 0..42)
output = open("sawVolumeMap.txt", "w")
output.write(";--------------------------------------------------------------------------------------------\n")
output.write("@sawVolumeMap:\n")
output.write(";--------------------------------------------------------------------------------------------\n")
output.write("\t.byte ")
for i in range(0, 16):
value = int(round(i * 42.0 / 15.0))
output.write(hex(int(value)).replace("0x", "").upper().zfill(2) + "h")
if (i != 15):
output.write(", ")
output.write("\n")
output.close()
|
StarcoderdataPython
|
1633313
|
def vers():
major = "1"
minor = "0"
release = "0"
pre = "alpha"
version = ''.join([major,".",minor,".",release,":",pre])
return version
|
StarcoderdataPython
|
8002691
|
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask_jwt_extended.exceptions import NoAuthorizationError
from flask_restplus import Api
from jwt import ExpiredSignatureError
from permission import PermissionDeniedException
# cria blueprint para API
api_bp = Blueprint('api', __name__, url_prefix='/api')
# TODO: ajustar para colocar usuario e senha no authorize da doc
# # define qual a regra para acesso da api
# authorizations = {
# 'apikey': {
# 'type': 'apiKey',
# 'in': 'header',
# 'name': 'Authorization'
# }
# }
# api = Api(api_bp, authorizations=authorizations, security=['apikey'])
api = Api(api_bp)
'''
TODO: adicionar atributo 'action' para retorno dos errorhandler
para a aplicacao que esta consumindo a api qual acao pode ser tomada
para resolucao do erro
TODO: criar mapa de acoes possiveis
'''
# nao tem um usuario logado
@api.errorhandler(NoAuthorizationError)
def api_authorization_error_handler(error):
return { 'message': 'você não está autorizado para acessar este recurso' }, 401
# usuario logado nao tem permissao para usar o recurso
@api.errorhandler(PermissionDeniedException)
def api_permission_error_handler(error):
return { 'message': error.args[0] }, 401
# sessao do usuario expirou
@api.errorhandler(ExpiredSignatureError)
def api_expired_session_error_handler(error):
return { 'message': 'sessão expirada' }, 401
# captura excecoes genericas
@api.errorhandler
def api_error_handler(error):
# TODO: adicionar log para erros que entram neste handler
return { 'message': error.args[0] }, 500
# registra apis
from . import metas
# encapsula registro do modulo
def init_app(app):
app.register_blueprint(api_bp)
|
StarcoderdataPython
|
3384692
|
#
# This software is delivered under the terms of the MIT License
#
# Copyright (c) 2009 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Simple base class for plugins implementation
# Ref to http://martyalchin.com/2008/jan/10/simple-plugin-framework/
#
__all__ = ['PluginMount', 'PluginLoader', 'SourceManager']
import os, sys
verbose = 0
class PluginMount(type):
def __init__(cls, name, bases, attrs):
cls.plugin_map = {}
if not hasattr(cls, 'plugins'):
# This branch only executes when processing the mount point itself.
# So, since this is a new plugin type, not an implementation, this
# class shouldn't be registered as a plugin. Instead, it sets up a
# list where plugins can be registered later.
cls.plugins = []
else:
# This must be a plugin implementation, which should be registered.
# Simply appending it to the list is all that's needed to keep
# track of it later.
cls.plugins.append(cls)
def get_plugin(cls, name):
try:
p = cls.plugin_map[name]
except KeyError:
for p in cls.plugins:
if p.plugin_name_ == name:
cls.plugin_map[name] = p
return p
raise Exception, "Plugin not found: " + name
return p
class PluginLoader:
""" PluginLoader is a static class that loads all the availble
plugins from the plugins directory
"""
def __init__(self):
pdir = os.path.dirname(sys.argv[0])
pluginpath = os.path.join(pdir, "plugins")
try: # might not be a filesystem path
files = os.listdir(pluginpath)
sys.path.insert(0,pluginpath)
except OSError:
files = []
for file in files:
if file.endswith('.py'):
name = file.rsplit('.', 1)[0]
if verbose != 0:
print "Loading plugin " + name
__import__(name)
class SourceManager:
""" SourceManager plugins must derive from this class.
Methods that must be implemented by SourceManager plugins are:
name(), get_actual_revision(), get_head_revision(),
extract(), update(), commit(), rebase(), deliver(),
dump(), list().
Class attributes that must be available:
plugin_name_, plugin_description_
"""
__metaclass__ = PluginMount
loader = PluginLoader()
|
StarcoderdataPython
|
14330
|
<filename>pylinsql/timing.py
import asyncio
import functools
import time
def _log_func_timing(f, args, kw, sec: float):
print("func: %r args: [%r, %r] took: %2.4f sec" % (f.__name__, args, kw, sec))
def timing(f):
"Decorator to log"
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def wrap(*args, **kw):
ts = time.time()
result = await f(*args, **kw)
te = time.time()
_log_func_timing(f, args, kw, te - ts)
return result
else:
@functools.wraps(f)
def wrap(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
_log_func_timing(f, args, kw, te - ts)
return result
return wrap
|
StarcoderdataPython
|
354275
|
#!/usr/bin/env python
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2017 <NAME>, <NAME>, <NAME> all rights reserved,
<NAME> (<EMAIL>)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
<NAME>
"""
"""
A setup.py script to use setuptools, which gives egg goodness, etc.
Adapted from the original NumPy src (numpy.scipy.org).
"""
FRYING_EGGS = True
from setuptools import setup
execfile('setup.py')
|
StarcoderdataPython
|
3480973
|
#! /usr/bin/env python2.7
from itertools import ifilter
class Sequence:
def __init__( self, previous, current, operation ):
self._previous, self._current, self._operation = previous, current, operation
self._threshold = 0
def __iter__( self ):
return self
def __call__(self, threshold ):
self._threshold = threshold
return self
def previous( self ):
return self._previous
def current( self ):
return self._current
def next( self ):
result = self._operation( self._previous, self._current )
if result > self._threshold: raise StopIteration
self._previous, self._current = self._current, result
return self._current
__next__ = next
if __name__ == "__main__":
plain_fibonacci = Sequence( 0, 1, lambda previous, current: previous + current )
evens_only_fibonacci = Sequence( 0, 2, lambda previous, current: previous + 4 * current )
sum = lambda validator, sequence, threshold = 4000000, sumator = lambda x, y: x + y: reduce( sumator, map( lambda iterable: reduce( sumator, ifilter( validator, iterable )), [[ sequence.previous(), sequence.current() ], sequence( threshold ) ] ) )
print( "plain fibonacci: " + str( sum( lambda x: 0 == x % 2, plain_fibonacci ) ))
print( "evens only: " + str( sum( lambda x: True, evens_only_fibonacci ) ))
|
StarcoderdataPython
|
9629500
|
import json, os
import numpy as np
from subprocess import call
def make_directory_tree(path_to_make, sep='/'):
"""
Args:
path_to_make (str) - relative path of directory to make
sep (str) - os-dependent path separator
Returns:
None (makes directory of interest)
"""
path_pieces = path_to_make.split(sep)
for i in range(len(path_pieces)):
parent = sep.join(path_pieces[:i+1])
if not os.path.exists(parent):
os.mkdir(parent)
def read_json(fjson):
"""
Args:
fjson (str) - file name of json to read
Returns:
dictionary stored in fjson
"""
with open(fjson) as f:
return json.load(f)
def write_json(d, fjson):
"""
Args:
d (dict) - dictionary to write
fjson (str) - file name of json to write
Returns:
written dictionary
"""
with open(fjson, 'w') as f:
json.dump(d, f)
return d
def gcd(a,b):
"""
Args:
a (float, int) - some number
b (float, int) - another number
Returns:
greatest common denominator (int) of a and b
"""
while b:
a, b = b, a%b
return a
def list_of_dicts_to_dict(l, major_key, other_keys):
"""
Args:
l (list) - list of dictionaries
major_key (tuple, str, float, int) - key to orient output dictionary on
other_keys (list) - list of keys (tuple, str, float, int) to include in output dictionary
Returns:
dictionary representation of information in l
"""
return {d[major_key] : {other_key : d[other_key] for other_key in other_keys} for d in l}
def H_from_E(els_to_amts, E, mus):
"""
Args:
els_to_amts (dict) - {element (str) : amount of element in formula (int) for element in formula}
formula (str) - chemical formula
E (float) - total energy per atom
mus (dict) - {el (str) : elemental energy (float)}
Returns:
formation energy per atom (float)
"""
atoms_in_fu = np.sum(list(els_to_amts.values()))
stoich_weighted_elemental_energies = np.sum([mus[el]*els_to_amts[el] for el in els_to_amts])
E_per_fu = E*atoms_in_fu
Ef_per_fu = E_per_fu - stoich_weighted_elemental_energies
return Ef_per_fu / atoms_in_fu
def get_pbs_q(f_qstat='qstat.txt', f_jobs='qjobs.txt', username='cbartel'):
"""
Args:
f_qstat (str) - path to write detailed queue information
f_jobs (str) - path to write job IDs
username (str) - user name on HPC
Returns:
list of job IDs in the queue (str)
"""
from subprocess import call
if os.path.exists(f_qstat):
os.remove(f_qstat)
if os.path.exists(f_jobs):
os.remove(f_jobs)
with open(f_qstat, 'wb') as f:
call(['qstat', '-f', '-u', username], stdout=f)
with open(f_jobs, 'wb') as f:
call(['grep', 'Job_Name', f_qstat], stdout=f)
with open(f_jobs) as f:
jobs_in_q = [line.split(' = ')[1][:-1] for line in f]
return jobs_in_q
def is_slurm_job_in_queue(job_name, user_name='tg857781', fqueue='q.out'):
with open(fqueue, 'w') as f:
call(['squeue','-u', user_name, '--name=%s' % job_name], stdout=f)
names_in_q = []
with open(fqueue) as f:
for line in f:
if 'PARTITION' not in line:
names_in_q.append([v for v in line.split(' ') if len(v) > 0][2])
if len(names_in_q) == 0:
return False
else:
return True
def get_stampede2_queue_counts(fqueue):
with open(fqueue, 'w') as f:
call(['squeue','-u', 'tg857781'], stdout=f)
with open(fqueue) as f:
normal = 0
skx = 0
for line in f:
if 'PARTITION' not in line:
queue = [v for v in line.split(' ') if len(v) > 0][1]
if queue == 'normal':
normal += 1
elif queue == 'skx':
skx += 1
total = normal + skx
return {'normal' : normal,
'skx' : skx,
'total' : total}
|
StarcoderdataPython
|
1653726
|
A, B = map(int, input().split())
print(A * B - (A + B - 1))
|
StarcoderdataPython
|
6454810
|
import graphene
from .query import Query
from .mutation import Mutation
schema = graphene.Schema(query=Query, mutation=Mutation)
|
StarcoderdataPython
|
8085848
|
<reponame>zmwangx/ncov
#!/usr/bin/env python3
import datetime
import re
import sys
import bs4
from scraper import logger, network_retry, fetch_dom, DataEntry
@network_retry
def get_article(url):
with fetch_dom(url) as dom:
s = bs4.BeautifulSoup(dom, "html.parser")
body = s.select_one("#article-box").get_text().strip()
print(body)
return body
date_pattern = re.compile(r"^\s*2020年(?P<month>\d+)月(?P<day>\d+)日0时?(-|—)24时")
patterns = {
"hb_new_confirmed": r"新增\w+病例(?P<hb_new_confirmed>\d+)例",
"hb_new_death": r"新增(死亡|病亡)(病例)?(?P<hb_new_death>\d+)例",
"hb_new_cured": r"新增出院(病例)?(?P<hb_new_cured>\d+)例",
"hb_remaining_severe": r"(?<!危)重症(病例)?(?P<hb_remaining_severe>\d+)例",
"hb_remaining_critical": r"危重症(病例)?(?P<hb_remaining_critical>\d+)例",
"hb_cured": r"(?<!新增)出院(病例)?(?P<hb_cured>\d+)例",
"hb_death": r"(?<!新增)(死亡|病亡)(病例)?(?P<hb_death>\d+)例",
"hb_total_confirmed": r"累计报告\w+病例(?P<hb_total_confirmed>\d+)例",
"hb_remaining_suspected": r"现有疑似病例(?P<hb_remaining_suspected>\d+)(例|人)",
}
introduced = {
"hb_new_cured": "01-29",
"hb_remaining_suspected": "02-08",
}
def parse_article(body):
m = date_pattern.match(body)
month = int(m["month"])
day = int(m["day"])
date = datetime.date(2020, month, day)
date_str = f"{month:02}-{day:02}"
print(date_str)
data = dict(date=date)
for category, pattern in patterns.items():
if m := re.search(pattern, body, re.M):
if m[category]:
count = int(m[category])
else:
count = int(m[f"{category}2"])
data[category] = count
print(f"{count}\t{category}")
continue
if category in introduced and date_str < introduced[category]:
continue
logger.critical(f"{date}: no match for {category}: {pattern!r}")
if "hb_remaining_critical" in data:
data["hb_remaining_severe"] += data["hb_remaining_critical"]
del data["hb_remaining_critical"]
print(
f"{data['hb_remaining_severe']}\thb_remainig_severe + hb_remaining_critical"
)
else:
del data["hb_remaining_severe"]
if all(k in data for k in ("hb_total_confirmed", "hb_cured", "hb_death")):
data["hb_remaining_confirmed"] = (
data["hb_total_confirmed"] - data["hb_cured"] - data["hb_death"]
)
return data
def main():
for url in (
"http://wjw.hubei.gov.cn/fbjd/dtyw/202002/t20200212_2024650.shtml", # 02-11
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200211_2023521.shtml", # 02-10
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200210_2022515.shtml", # 02-09
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200209_2021933.shtml", # 02-08
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200208_2021419.shtml", # 02-07
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200207_2020606.shtml", # 02-06
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200206_2019848.shtml", # 02-05
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200205_2019294.shtml", # 02-04
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200204_2018743.shtml", # 02-03
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200203_2018273.shtml", # 02-02
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200202_2017659.shtml", # 02-01
"http://wjw.hubei.gov.cn/fbjd/tzgg/202002/t20200201_2017101.shtml", # 01-31
"http://wjw.hubei.gov.cn/fbjd/tzgg/202001/t20200131_2016681.shtml", # 01-30
"http://wjw.hubei.gov.cn/fbjd/tzgg/202001/t20200130_2016306.shtml", # 01-29
"http://wjw.hubei.gov.cn/fbjd/tzgg/202001/t20200129_2016108.shtml", # 01-28
"http://wjw.hubei.gov.cn/fbjd/tzgg/202001/t20200129_2016107.shtml", # 01-27
"http://wjw.hubei.gov.cn/fbjd/tzgg/202001/t20200129_2016119.shtml", # 01-26
"http://wjw.hubei.gov.cn/fbjd/tzgg/202001/t20200129_2016112.shtml", # 01-25
"http://wjw.hubei.gov.cn/fbjd/tzgg/202001/t20200125_2014856.shtml", # 01-24
"http://wjw.hubei.gov.cn/fbjd/dtyw/202001/t20200124_2014626.shtml", # 01-23
):
body = get_article(url)
data = parse_article(body)
date = data["date"]
print(data)
entry = DataEntry.get(date=date)
for key in patterns:
if key == "hb_remaining_critical":
continue
val = data.get(key)
existing_val = getattr(entry, key)
if existing_val is not None and val is not None and existing_val != val:
logger.critical(
f"{date} {key} discrepancy: NHC value {existing_val}, Hubei HC value {val}"
)
sys.exit(1)
DataEntry.update(**data).where(DataEntry.date == date).execute()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
5128334
|
import cscraper, time, os, argparse
path_of_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
parser = argparse.ArgumentParser(description="Parse sites for contact data - emails and phones.")
parser.add_argument('-q', '--query', help="provide a search query.") # make possible providing multiple queries.
parser.add_argument('-d', '--debug', help="display additional information.", action="store_true")
parser.add_argument('-n', '--num', help="provide a number of searches (defaults to 30).", type=int)
def main():
args = parser.parse_args()
city = "kraków"
unquoted_search = "{0} {1}".format(args.query, city)
if args.num is not None:
links_num = args.num
else:
links_num = 30
links = cscraper.simple_get_links(unquoted_search, links_num)
print("Finding emails and phones...")
data = cscraper.parse_through_sites(links)
print("Writing file...")
cscraper.make_folder(path_of_folder)
cscraper.write_to_csv(data, unquoted_search, args.num, path_of_folder)
print("File written.")
time.sleep(3)
def debug_search_parse_write():
search = "agencje reklamowe kraków"
number_of_links = 70
links = simple_get_links(search, number_of_links)
print("Finding emails and phones...")
data = parse_through_sites(links)
print("Writing file...")
make_folder()
write_data(data, search, number_of_links)
print("File written.")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3562038
|
a=1
b=2
c=3
d=b*b -4*a*c
d=d**0.5
r1=(-b + d)/(2*a)
r2=(-b - d)/(2*a)
print(r1,r2)
|
StarcoderdataPython
|
12860855
|
<reponame>iamvukasin/filminds
from abc import ABC, abstractmethod
import tmdbsimple as tmdb
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.utils.decorators import method_decorator
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import MovieSerializer
from app.models import Movie, SearchedMovie, User, CollectedMovie
MAX_NUM_CASTS = 4
class AddCollectedMovie(ABC, APIView):
"""
Adds the given movie to the user's favorites or watch list based
on list_type property.
"""
@method_decorator(login_required)
def get(self, request, pk):
user = User.get_user(request.user)
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
try:
collected_item = CollectedMovie.objects.filter(user=user, movie=movie).get()
collected_item.type = self.list_type
except CollectedMovie.DoesNotExist:
collected_item = CollectedMovie(
user=user,
movie=movie,
type=self.list_type
)
collected_item.save()
# success status
return Response('')
@property
@abstractmethod
def list_type(self):
pass
class MovieAddToFavorites(AddCollectedMovie):
"""
Adds the given movie to the user's favorites list.
"""
list_type = CollectedMovie.TYPE_WISH
class MovieAddToWatched(AddCollectedMovie):
"""
Adds the given movie to the user's watch list.
"""
list_type = CollectedMovie.TYPE_WATCH
class RemoveCollectedMovie(APIView):
"""
Removes the given movie to the user's favorites or watch list.
"""
@method_decorator(login_required)
def get(self, request, pk):
user = User.get_user(request.user)
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
CollectedMovie.objects.filter(user=user, movie=movie).delete()
# success status
return Response('')
class MovieInfo(APIView):
"""
Returns movie information from the database (data defined in Movie
model + cast information), if the movie has been already added. If
not, gets the information from TMDB, saves to the database and
then returns it.
"""
def get(self, request, pk):
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
# insert movie into searched movies table
if request.user.is_authenticated:
SearchedMovie.increment_search_count(User.get_user(request.user), movie)
serializer = MovieSerializer(movie)
data = serializer.data
# get actors from TMDB
movie_credits = tmdb.Movies(pk).credits()
data['cast'] = []
for cast in movie_credits['cast'][:MAX_NUM_CASTS]:
cast_data = {k: v for k, v in cast.items() if k in {'character', 'name', 'profile_path'}}
# set default profile photo if no photo is received
# from TMDB
if cast_data['profile_path'] is None:
cast_data['profile_path'] = ''
else:
cast_data['profile_path'] = f'https://image.tmdb.org/t/p/w276_and_h350_face{cast_data["profile_path"]}'
data['cast'].append(cast_data)
return Response(data)
|
StarcoderdataPython
|
6612194
|
<filename>createInstaller/mac/createCompiledCode.py<gh_stars>1-10
"""
############ R INSTALL#################
cp -R /Library/Frameworks/R.framework.bak /Applications/Red-R.app/R
find ./ -name *.dylib -or -name *.so -exec install_name_tool -change /Library/Frameworks/R.framework/Versions/2.11/Resources/lib/libgfortran.2.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libgfortran.2.dylib {} \;
find ./ -name *.dylib -or -name *.so -exec install_name_tool -change /Library/Frameworks/R.framework/Versions/2.11/Resources/lib/libreadline.5.2.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libreadline.5.2.dylib {} \;
find ./ -name *.dylib -or -name *.so -exec install_name_tool -change /Library/Frameworks/R.framework/Versions/2.11/Resources/lib/libRblas.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libRblas.dylib {} \;
find ./ -name *.dylib -or -name *.so -exec install_name_tool -change /Library/Frameworks/R.framework/Versions/2.11/Resources/lib/libR.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libR.dylib {} \;
find ./ -name *.dylib -or -name *.so -exec install_name_tool -change /Library/Frameworks/R.framework/Versions/2.11/Resources/lib/libRlapack.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libRlapack.dylib {} \;
install_name_tool -id /Applications/Red-R.app/R/R.framework/Resources/lib/libR.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libR.dylib
install_name_tool -id /Applications/Red-R.app/R/R.framework/Resources/lib/libRblas.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libRblas.0.dylib
install_name_tool -id /Applications/Red-R.app/R/R.framework/Resources/lib/libRblas.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libRblas.dylib
install_name_tool -id /Applications/Red-R.app/R/R.framework/Resources/lib/libRblas.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libRblas.vecLib.dylib
install_name_tool -id /Applications/Red-R.app/R/R.framework/Resources/lib/libRlapack.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libRlapack.dylib
install_name_tool -id /Applications/Red-R.app/R/R.framework/Resources/lib/libgcc_s.1.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libgcc_s.1.dylib
install_name_tool -id /Applications/Red-R.app/R/R.framework/Resources/lib/libgfortran.2.dylib /Applications/Red-R.app/R/R.framework/Resources/lib/libgfortran.2.dylib
##########################Python INSTALL###################################
python2.6 setup.py build --r-home=/Applications/Red-R.app/R/R.framework/Resources/
import os
os.environ['R_HOME'] = '/Applications/Red-R.app/R/R.framework/Resources'
import rpy3.robjects
rm -rf /Users/anupparikh/redr/trunk/mac/rpy3
cp -R /Users/anupparikh/redr/trunk/rpy3-setup/build/lib.macosx-10.5-i386-2.6/rpy3 /Users/anupparikh/redr/trunk/mac/
"""
import os, sys, shutil, re
#print sys.argv
#base = 'C:/Users/anup/Documents/red/develop/makeInstallers/code/red-trunk'
base = sys.argv[1]
sys.argv = ['createCompiledCode.py','py2app']
from distutils.core import setup
import py2app,shutil
AppDir = '/Applications/Red-R.app/'
##cleanup
shutil.rmtree('build',True)
shutil.rmtree(os.path.join(base,'dist'),True)
#sys.path.insert(0, '/Users/anup/redr/trunk')
sys.path.insert(0, base)
sys.path.insert(0, os.path.join(base,'canvas'))
sys.path.insert(0, os.path.join(base,'mac'))
# sys.path.insert(0, os.path.join(base,'canvas','rpy'))
#print sys.path
info = {}
import glob
files = [os.path.basename(x).split('.')[0] for x in glob.glob(os.path.join(base,'canvas','*.py'))]
#print files
setup(name="Red-R",
version="0.1",
author="<NAME>",
author_email="<EMAIL>",
url="http://www.red-r.org",
license="GNU General Public License (GPL)",
app=[os.path.join(base,"canvas","red-RCanvas.pyw")],
#data_files = dataFiles,
options={"py2app": {
"argv_emulation":1,
"iconfile": '/Users/anupparikh/redr/trunk/canvas/icons/redR.icns',
#"prefer_ppc":1,
"dist_dir": os.path.join(base,'dist'),
"site_packages":1,
"excludes": ['libraries'],
"includes": ["sip",'redrrpy','rpy3', 'PyQt4', 'OWColorPalette','docutils','Image', 'OWGraphTools','PyQt4.QtNetwork','PyQt4', 'PyQt4.Qwt5','PyQt4.QtSvg','PyQt4.Qwt5'] + files
#
}})
#shutil.rmtree(os.path.join(base,'dist','docutils'),True)
shutil.copytree('/Users/anupparikh/redr/R/',os.path.join(base,'dist','Red-R.app','R'))
shutil.copytree(os.path.join(base,'libraries'),
os.path.join(base,'dist','Red-R.app','Contents','libraries'))
shutil.copytree(os.path.join(base,'canvas','icons'),
os.path.join(base,'dist','Red-R.app','Contents','canvas','icons'))
#shutil.copytree(os.path.join(base,'mac'),
#os.path.join(base,'dist','Red-R.app','Contents','mac'))
shutil.copytree('/Users/anupparikh/redr/installIncludes/docutils',
os.path.join(base,'dist','Red-R.app','Contents','Resources','docutils'))
shutil.copyfile(os.path.join(base,'canvas','redRMacUpdater.py'), os.path.join(base,'dist','Red-R.app','Contents','redRMacUpdater.py'))
import datetime
d = datetime.datetime.now()
svn = os.popen("svnversion %s" % base).read()
print svn
m = re.match('\d+:(\d+)',svn)
if m:
svnVersion = m.group(1)
else:
m = re.match('(\d+)',svn)
svnVersion = m.group(1)
fh = open(os.path.join(base,'dist','Red-R.app','Contents','version.txt'),'w')
fh.write("""!define DATE "%s"
!define SVNVERSION "%s"
!define NAME "Red-R"
!define REDRVERSION "%s"
!define OS "mac"
!define TYPE "compiled"
!define RVERSION "R-2.11.1"
""" % (d.strftime('%Y.%m.%d'), svnVersion,'1.85alpha'))
fh.close()
shutil.copyfile(os.path.join(base,'licence.txt'),os.path.join(base,'dist','Red-R.app','Contents','licence.txt'))
shutil.copytree('/Users/anupparikh/redr/installIncludes/qt_menu.nib',
os.path.join(base,'dist','Red-R.app','Contents','Resources','qt_menu.nib'))
shutil.copyfile('/Users/anupparikh/redr/installIncludes/redR.icns',
os.path.join(base,'dist','Red-R.app','Contents','Resources','PythonApplet.icns'))
###########Move to /Applications dir###################
os.system('rm -rf /Applications/Red-R.app/Contents')
os.system('cp -R /Users/anupparikh/redr/trunk/dist/Red-R.app/Contents /Applications/Red-R.app/')
os.system('ln -s /Users/anupparikh/redr/trunk/dist/Red-R.app/Contents/libraries /Users/anupparikh/redr/trunk/dist/Red-R.app/Contents/Resources/libraries')
os.system('cp /usr/local/lib/libgcc_s.1.dylib /Applications/Red-R.app/Contents/Frameworks/libgcc_s.1.dylib')
os.system('mv %s/Contents/MacOS/Red-R %s/Contents/MacOS/Red-R.exe' % (AppDir,AppDir))
fh = open('%s/Contents/MacOS/Red-R' % AppDir,'w')
fh.write("""#!/bin/bash
export R_HOME=/Applications/Red-R.app/R/R.framework/Resources
source /Applications/Red-R.app/R/R.framework/Resources/etc/i386/ldpaths
/Applications/Red-R.app/Contents/MacOS/Red-R.exe
""")
fh.close()
os.system('chmod +x %s/Contents/MacOS/Red-R' % AppDir)
shutil.rmtree('build',True)
|
StarcoderdataPython
|
1707212
|
<gh_stars>1-10
import torch.nn.functional as F
# Default hyperparameters
SEED = 10 # Random seed
NB_EPISODES = 10000 # Max nb of episodes
NB_STEPS = 1000 # Max nb of steps per episodes
UPDATE_EVERY_NB_EPISODE = 4 # Nb of episodes between learning process
MULTIPLE_LEARN_PER_UPDATE = 3 # Nb of multiple learning process performed in a row
BUFFER_SIZE = int(1e5) # Replay buffer size
BATCH_SIZE = 256 # Batch size #128
ACTOR_FC1_UNITS = 512 # Number of units for L1 in the actor model #256
ACTOR_FC2_UNITS = 256 # Number of units for L2 in the actor model #128
CRITIC_FCS1_UNITS = 512 # Number of units for L1 in the critic model #256
CRITIC_FC2_UNITS = 256 # Number of units for L2 in the critic model #128
NON_LIN = F.relu # Non linearity operator used in the model #F.leaky_relu
LR_ACTOR = 1e-4 # Learning rate of the actor #1e-4
LR_CRITIC = 5e-3 # Learning rate of the critic #1e-3
WEIGHT_DECAY = 0 # L2 weight decay #0.0001
GAMMA = 0.995 # Discount factor #0.99
TAU = 1e-3 # For soft update of target parameters
CLIP_CRITIC_GRADIENT = False # Clip gradient during Critic optimization
ADD_OU_NOISE = True # Toggle Ornstein-Uhlenbeck noisy relaxation process
THETA = 0.15 # k/gamma -> spring constant/friction coefficient [Ornstein-Uhlenbeck]
MU = 0. # x_0 -> spring length at rest [Ornstein-Uhlenbeck]
SIGMA = 0.2 # root(2k_B*T/gamma) -> Stokes-Einstein for effective diffision [Ornstein-Uhlenbeck]
NOISE = 1.0 # Initial Noise Amplitude
NOISE_REDUCTION = 0.995 # Noise amplitude decay ratio
|
StarcoderdataPython
|
48302
|
#!/usr/bin/env python3
import unittest
import os
import sys
import requests
import utils_test
from multiprocessing import Process
import time
sys.path.append(os.path.abspath('engram'))
import engram
class TestRedirect(utils_test.EngramTestCase):
def test_index(self):
"""
Story: Bookmark pages loads.
In order to access the bookmarks
I want to be able to use the endpoint /bookmarks
Scenario: requesting /bookmarks gets a response.
Given a running engram server on localhost:5000
When someone sends /bookmarks
Then the server sends back a html page
And the response has status 200.
"""
index_response = requests.get('http://localhost:5000/', timeout = 10)
assert index_response.status_code == 200
assert index_response.headers['content-type'] == "text/html; charset=utf-8"
unittest.main()
|
StarcoderdataPython
|
5187065
|
<reponame>arinazorina/PyTeleBot1
# Телеграм-бот v.004
import telebot # pyTelegramBotAPI 4.3.1
from telebot import types
import botGames # бот-игры, файл botGames.py
import menuBot
from menuBot import Menu # в этом модуле есть код, создающий экземпляры классов описывающих моё меню
import DZ # домашнее задание от первого урока
import fun # развлечения
#import speech # работа с речью
#import SECRET # секретные ключи, пароли
bot = telebot.TeleBot('5015391881:AAEq_NV09_lq4cPdyBUYlwBu-HtUmF6wpHI') # Создаем экземпляр бота
# -----------------------------------------------------------------------
# Функция, обрабатывающая команды
@bot.message_handler(commands="start")
def command(message):
chat_id = message.chat.id
bot.send_sticker(chat_id, "CAACAgIAAxkBAAIaeWJEeEmCvnsIzz36cM0oHU96QOn7AAJUAANBtVYMarf4xwiNAfojBA")
txt_message = f"Привет, {message.from_user.first_name}! Я тестовый бот для курса программирования на языке Python"
bot.send_message(chat_id, text=txt_message, reply_markup=Menu.getMenu(chat_id, "Главное меню").markup)
# -----------------------------------------------------------------------
# Получение стикеров от юзера
@bot.message_handler(content_types=['sticker'])
def get_messages(message):
chat_id = message.chat.id
bot.send_message(chat_id, "Это " + message.content_type)
sticker = message.sticker
bot.send_message(message.chat.id, sticker)
# глубокая инспекция объекта
# import inspect,pprint
# i = inspect.getmembers(sticker)
# pprint.pprint(i)
# -----------------------------------------------------------------------
# Получение аудио от юзера
@bot.message_handler(content_types=['audio'])
def get_messages(message):
chat_id = message.chat.id
bot.send_message(chat_id, "Это " + message.content_type)
audio = message.audio
bot.send_message(chat_id, audio)
# -----------------------------------------------------------------------
# Получение голосовухи от юзера
@bot.message_handler(content_types=['voice'])
def get_messages(message):
chat_id = message.chat.id
bot.send_message(chat_id, "Это " + message.content_type)
voice = message.voice
# bot.send_message(message.chat.id, voice)
#import speech
#fileInfo = bot.get_file(voice.file_id)
#audioData = bot.download_file(fileInfo.file_path)
#bot.send_message(chat_id, speech.getTextFromVoice(audioData))
# -----------------------------------------------------------------------
# Получение фото от юзера
@bot.message_handler(content_types=['photo'])
def get_messages(message):
chat_id = message.chat.id
bot.send_message(chat_id, "Это " + message.content_type)
photo = message.photo
bot.send_message(message.chat.id, photo)
# -----------------------------------------------------------------------
# Получение видео от юзера
@bot.message_handler(content_types=['video'])
def get_messages(message):
chat_id = message.chat.id
bot.send_message(chat_id, "Это " + message.content_type)
video = message.video
bot.send_message(message.chat.id, video)
# -----------------------------------------------------------------------
# Получение документов от юзера
@bot.message_handler(content_types=['document'])
def get_messages(message):
chat_id = message.chat.id
mime_type = message.document.mime_type
bot.send_message(chat_id, "Это " + message.content_type + " (" + mime_type + ")")
document = message.document
bot.send_message(message.chat.id, document)
if message.document.mime_type == "video/mp4":
bot.send_message(message.chat.id, "This is a GIF!")
# -----------------------------------------------------------------------
# Получение координат от юзера
#<EMAIL>(content_types=['location'])
#def get_messages(message):
#chat_id = message.chat.id
#bot.send_message(chat_id, "Это " + message.content_type)
#location = message.location
#bot.send_message(message.chat.id, location)
#from Weather import WeatherFromPyOWN
#pyOWN = WeatherFromPyOWN()
#bot.send_message(chat_id, pyOWN.getWeatherAtCoords(location.latitude, location.longitude))
#bot.send_message(chat_id, pyOWN.getWeatherForecastAtCoords(location.latitude, location.longitude))
# -----------------------------------------------------------------------
# Получение контактов от юзера
@bot.message_handler(content_types=['contact'])
def get_messages(message):
chat_id = message.chat.id
bot.send_message(chat_id, "Это " + message.content_type)
contact = message.contact
bot.send_message(message.chat.id, contact)
# -----------------------------------------------------------------------
# Получение сообщений от юзера
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
chat_id = message.chat.id
ms_text = message.text
cur_user = menuBot.Users.getUser(chat_id)
if cur_user is None:
cur_user = menuBot.Users(chat_id, message.json["from"])
# проверка = мы нажали кнопку подменю, или кнопку действия
subMenu = menuBot.goto_menu(bot, chat_id, ms_text) # попытаемся использовать текст как команду меню, и войти в него
if subMenu is not None:
return # мы вошли в подменю, и дальнейшая обработка не требуется
# проверим, является ли текст текущий команды кнопкой действия
cur_menu = Menu.getCurMenu(chat_id)
if cur_menu is not None and ms_text in cur_menu.buttons: # проверим, что команда относится к текущему меню
module = cur_menu.module
if module != "": # проверим, есть ли обработчик для этого пункта меню в другом модуле, если да - вызовем его (принцип инкапсуляции)
exec(module + ".get_text_messages(bot, cur_user, message)")
if ms_text == "Помощь":
send_help(bot, chat_id)
# =======================================
elif ms_text.isdigit():
ending_game = None
winner = None
for game in botGames.activeGames.values():
for player in game.players.values():
if (player.id == message.from_user.id):
if (game.winner is None and game.checkEndGame()):
if (int(ms_text) != game.result):
try_message = bot.send_message(chat_id, text="Попробуй еще раз!")
game.message_to_delete.append(try_message)
else:
ending_game = game
winner = player
break
if (ending_game is not None):
ending_game.winner = winner
ending_game.endGame()
else: # ======================================= случайный текст
bot.send_message(chat_id, text="Мне жаль, я не понимаю вашу команду: " + ms_text)
menuBot.goto_menu(bot, chat_id, "Главное меню")
# -----------------------------------------------------------------------
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
# если требуется передать один или несколько параметров в обработчик кнопки,
# используйте методы Menu.getExtPar() и Menu.setExtPar()
# call.data это callback_data, которую мы указали при объявлении InLine-кнопки
# После обработки каждого запроса вызовете метод answer_callback_query(), чтобы Telegram понял, что запрос обработан
chat_id = call.message.chat.id
message_id = call.message.id
cur_user = menuBot.Users.getUser(chat_id)
if cur_user is None:
cur_user = menuBot.Users(chat_id, call.message.json["from"])
tmp = call.data.split("|")
menu = tmp[0] if len(tmp) > 0 else ""
cmd = tmp[1] if len(tmp) > 1 else ""
par = tmp[2] if len(tmp) > 2 else ""
if menu == "GameRPSm":
botGames.callback_worker(bot, cur_user, cmd, par, call) # обработчик кнопок игры находится в модули игры
# -----------------------------------------------------------------------
def send_help(bot, chat_id):
bot.send_message(chat_id, "Автор: <NAME>")
markup = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton(text="Напишите автору", url="https://www.instagram.com/mschelou/")
markup.add(btn1)
img = open('я.jpg', 'rb')
bot.send_photo(chat_id, img, reply_markup=markup)
#bot.send_message(chat_id, "Автор: <NAME>")
#markup = types.InlineKeyboardMarkup()
#btn1 = types.InlineKeyboardButton(text="Напишите автору", url="https://t.me/user59387")
#markup.add(btn1)
#img = open('Швец Андрей.png', 'rb')
#bot.send_photo(chat_id, img, reply_markup=markup)
bot.send_message(chat_id, "Активные пользователи чат-бота:")
for el in menuBot.Users.activeUsers:
bot.send_message(chat_id, menuBot.Users.activeUsers[el].getUserHTML(), parse_mode='HTML')
# ---------------------------------------------------------------------
bot.polling(none_stop=True, interval=0) # Запускаем бота
|
StarcoderdataPython
|
4988753
|
def make_pizza(size, *toppings):
print("\nMaking a "+str(size)+"-inch size with the following toppings:")
for topping in toppings:
print("-"+topping)
|
StarcoderdataPython
|
6600848
|
<reponame>geekygamer1134/myPythonCode
import pyautogui
import time
import speech_recognition as sr
import os
from pydub import AudioSegment
from pydub.silence import split_on_silence
def test():
im1 = pyautogui.screenshot()
pix1 = im1.getpixel((384,216))
pix2 = im1.getpixel((384*2,216*2))
pix3 = im1.getpixel((384*3,216*3))
pix4 = im1.getpixel((384*4,216*4))
pix5 = im1.getpixel(((384*5)-1,(216*5)-1))
print(pix1,pix2,pix3,pix4,pix5)
#https://www.geeksforgeeks.org/python-speech-recognition-on-large-audio-files/
#TODO CREATE A FUNCTION THAT TAKES IN AUDIO AND MAKES A WAV FILE AND REWRITES OVER IT EVERY 30 SECONDS
#TODO MAKE SETNIFMENTAL ANALISIS ON WORDS
#OR MAYBE JUST CRAWN THROUGH GUINUSS TO DOWNLOAD LRYICS OF SONG
def silence_based_conversion(path):
song = AudioSegment.from_wav(path)
fh = open("reconized.txt", "w+")
chunks = split_on_silence(song, min_silence_len=2500, silence_thresh = -32) #for clear audio do 2500, -32
print(chunks)
try:
os.mkdir('audio_chunks')
except(FileExistsError):
print("file exists ln 30")
os.chdir('audio_chunks')
i=0
for chunk in chunks:
chunk_silent = AudioSegment.silent(duration = 1000)
audio_chunk = chunk_silent + chunk + chunk_silent
print(f"saving chunk{i}.wav")
print("saving chunk{0}.wav".format(i))
audio_chunk.export("./chunk{0}.wav".format(i), bitrate ='192k', format ="wav")
filename = 'chunk' + str(i)+'.wav'
print(f'Processing chunk {i}')
file = filename
r = sr.Recognizer()
with sr.AudioFile(file) as source:
audio_listened = r.listen(source)
try:
rec = r.recognize_google(audio_listened)
fh.write(rec+". ")
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results. check your internet connection")
i += 1
os.chdir('..')
if __name__ == '__main__':
for x in range(0,100):
test()
time.sleep(.5)
print('Enter the audio file path')
path = input()
silence_based_conversion(path)
|
StarcoderdataPython
|
9644495
|
<gh_stars>1-10
# Problem Link :
# Excel-Sheet Link :
# youtube Video Link :
# o(n^2) O(1)
def Two_No_Sum_1(Array_1, Target_sum):
length = len(Array_1)
for i in range(length):
for j in range(length):
if(Array_1[i]+Array_1[j] == Target_sum and i != j):
return Array_1[i], Array_1[j]
return 0, 0
# o(n) O(n)
def Two_No_Sum_2(Array_1, Target_sum):
length = len(Array_1)
Target = []
for i in range(length):
if(Array_1[i] in Target):
return Array_1[i], (Target_sum - Array_1[i])
else:
Target.append(Target_sum-Array_1[i])
return 0, 0
# o(nlog(n)) O(1)
def Two_No_Sum_3(Array_1, Target_sum):
Left_Ptr = 0
Right_Ptr = len(Array_1) - 1
while Left_Ptr < Right_Ptr:
Sum = Array_1[Left_Ptr] + Array_1[Right_Ptr]
if (Sum == Target_sum):
return[Array_1[Left_Ptr], Array_1[Right_Ptr]]
elif (Sum < Target_sum):
Left_Ptr += 1
elif (Sum > Target_sum):
Right_Ptr -= 1
return 0, 0
Array_ip = [2, 5, 7, 10, 13, 17, 19]
Target = 20
a, b = Two_No_Sum_3(Array_ip, Target)
print(f"The value of i is {a} and j is : {b} ")
|
StarcoderdataPython
|
226866
|
<gh_stars>1-10
from bspider.agent import log
from bspider.core.api import BaseService, GetSuccess, PostSuccess, DeleteSuccess, PatchSuccess
from bspider.core import AgentCache
class ProjectService(BaseService):
def __init__(self):
self.cache = AgentCache()
def add_project(self, project_id, name, config, rate, status):
self.cache.set_project(project_id, name, config, rate, status)
log.info(f'add project:project_id->{project_id} project_name->{name} {status} success')
return PostSuccess()
def get_projects(self):
return GetSuccess(data=self.cache.get_projects())
def get_project(self, project_id):
return GetSuccess(data=self.cache.get_project(project_id))
def update_project(self, project_id, changes):
self.cache.update_project(project_id, changes)
log.info(f'update project:{project_id} success')
return PatchSuccess()
def delete_project(self, project_id):
self.cache.delete_project(project_id)
log.info(f'delete project:{project_id} success')
return DeleteSuccess()
|
StarcoderdataPython
|
1693732
|
# Copyright (c) 2013, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import datetime
from frappe.utils import get_url_to_form,cstr
from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day
from frappe.core.doctype.sms_settings.sms_settings import send_sms
def execute():
for us in frappe.get_all('User',fields=['name','email','full_name']):
data1=[]
data2=[]
data3=[]
for ds in frappe.get_all('DocShare',filters={'user':us.name,'share_doctype':'Task'}):
doc=frappe.get_doc('DocShare',ds.name)
task,project,status,subject,time,start_date,end_date=frappe.db.get_value('Task',{'name':doc.share_name},['name','project','status','subject','expected_time','exp_start_date','exp_end_date'])
if start_date and end_date:
start_date=start_date.strftime("%d-%m-%Y")
end_date=end_date.strftime("%d-%m-%Y")
else:
start_date='-'
end_date='-'
if not project:
project='-'
if task and status=='Open':
data1.append({'project':project,'task_id':task,'task_name':subject,'start_date':start_date,'end_date':end_date,'status':status,'time':time})
if task and status=='Overdue':
data2.append({'project':project,'task_id':task,'task_name':subject,'start_date':start_date,'end_date':end_date,'status':status})
for todo in frappe.get_all('ToDo',filters={'owner':us.name,'reference_type':'Task'}):
doc=frappe.get_doc('ToDo',todo.name)
task,project,status,subject,time,start_date,end_date=frappe.db.get_value('Task',{'name':doc.reference_name},['name','project','status','subject','expected_time','exp_start_date','exp_end_date'])
if start_date and end_date:
start_date=start_date.strftime("%d-%m-%Y")
end_date=end_date.strftime("%d-%m-%Y")
else:
start_date='-'
end_date='-'
if not project:
project='-'
if task and status=='Open':
data1.append({'project':project,'task_id':task,'task_name':subject,'start_date':start_date,'end_date':end_date,'status':status,'time':time})
if task and status=='Overdue':
data2.append({'project':project,'task_id':task,'task_name':subject,'start_date':start_date,'end_date':end_date,'status':status})
open_task_send_msg(data1,us)
overdue_task_send_msg(data2,us)
for ds in frappe.get_all('DocShare',filters={'user':us.name,'share_doctype':'Issue'}):
doc=frappe.get_doc('DocShare',ds.name)
issue,customer,status,subject,date=frappe.db.get_value('Issue',{'name':doc.share_name},['name','customer','status','subject','opening_date'])
if date:
date=date.strftime("%d-%m-%Y")
else:
date='-'
if not customer:
customer='-'
if issue and status=='Open':
data3.append({'customer':customer,'issue_id':issue,'issue_name':subject,'status':status,'date':date})
for todo in frappe.get_all('ToDo',filters={'owner':us.name,'reference_type':'Issue'}):
doc=frappe.get_doc('ToDo',todo.name)
issue,customer,status,subject,date=frappe.db.get_value('Issue',{'name':doc.reference_name},['name','customer','status','subject','opening_date'])
if date:
date=date.strftime("%d-%m-%Y")
else:
date='-'
if not customer:
customer='-'
if issue and status=='Open':
data3.append({'customer':customer,'issue_id':issue,'issue_name':subject,'status':status,'date':date})
open_issue_send_msg(data3,us)
def open_task_send_msg(data,us):
msg=''
if len(data)>0:
msg="""<p>Hi {0}</p><br>""".format(us.get('full_name'))
msg+="""<b>{0} Task</b><br>""".format(data[0].get('status'))
msg += """</u></b></p><table class='table table-bordered'><tr>
<th>Task ID</th><th>Subject</th><th>Estimated Hrs</th><th>Expected Start Date</th><th>Expected End Date</th><th>Project</th><th>User Name</th>"""
for d in data:
msg += "<tr><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Task',d.get('task_id') ), str(d.get('task_id'))) + "</td><td>" + str(d.get('task_name')) + "</td><td>" + str(d.get('time')) + "</td><td>" + str(d.get('start_date')) + "</td><td>" + str(d.get('end_date')) + "</td><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Project',d.get('project') ), str(d.get('project'))) + "</td><td>" + str(us.get('full_name')) + "</td></tr>"
msg += "</table>"
frappe.sendmail(recipients=us.email,subject='Task Notification',message = msg)
def overdue_task_send_msg(data,us):
msg=''
if len(data)>0:
msg="""<p>Hi {0}</p><br>""".format(us.get('full_name'))
msg+="""<b>{0} Task</b><br>""".format(data[0].get('status'))
msg += """</u></b></p><table class='table table-bordered'><tr>
<th>Task ID</th><th>Subject</th><th>Expected Start Date</th><th>Expected End Date</th><th>Project</th><th>User Name</th>"""
for d in data:
msg += "<tr><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Task',d.get('task_id') ), str(d.get('task_id'))) + "</td><td>" + str(d.get('task_name')) + "</td><td>" + str(d.get('start_date')) + "</td><td>" + str(d.get('end_date')) + "</td><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Project',d.get('project') ), str(d.get('project'))) + "</td><td>" + str(us.get('full_name')) + "</td></tr>"
msg += "</table>"
frappe.sendmail(recipients=us.email,subject='Task Notification',message = msg)
def open_issue_send_msg(data,us):
msg=''
if len(data)>0:
msg="""<p>Hi {0}</p><br>""".format(us.get('full_name'))
msg+="""<b>{0} Issue</b><br>""".format(data[0].get('status'))
msg += """</u></b></p><table class='table table-bordered'><tr>
<th>Issue ID</th><th>Subject</th><th>Date</th><th>Customer</th><th>Project</th>"""
for d in data:
project=frappe.db.get_value('Issue',d.get('issue_id'),'project')
if not project:
project='-'
msg += "<tr><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Issue',d.get('issue_id') ), str(d.get('issue_id'))) + "</td><td>" + str(d.get('issue_name')) + "</td><td>" + str(d.get('date')) + "</td><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Customer',d.get('customer') ), str(d.get('customer'))) + "</td><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Project',project ), str(project)) + "</td></tr>"
msg += "</table>"
frappe.sendmail(recipients=us.email,subject='Issue Notification',message = msg)
def weekly_auto_email():
from datetime import date
import calendar
current_date = date.today()
if calendar.day_name[current_date.weekday()] == 'Sunday':
start_date=add_days(today(), -7)
end_date = today()
next_date=start_date
date_list=[]
while end_date!=next_date:
date_list.append(next_date)
next_date=add_days(next_date, 1)
import datetime
for emp in frappe.get_all('Employee',filters={'status':'Active'}):
timesheet_date=[]
remaining_hr=[]
for t in frappe.get_all('Timesheet',filters={'employee':emp.name,'start_date': ['between', [start_date, end_date]]},fields=['name','start_date','total_hours']):
if t.get('start_date') and t.get('total_hours')>=8:
timesheet_date.append(t.get('start_date').strftime('%Y-%m-%d'))
else:
remaining_hr.append({t.get('start_date').strftime('%Y-%m-%d'):t.get('total_hours')})
email_dates=date_list
if timesheet_date:
diff=list(set(date_list) - set(timesheet_date))
email_dates=sorted(diff, key=lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))
timesheet_auto_email(emp.name,email_dates,remaining_hr)
def timesheet_auto_email(employee,dates,wk_hr):
msg=''
if len(dates)>0:
from datetime import date
email,name,holiday_list=frappe.db.get_value('Employee',employee,['prefered_email','employee_name','holiday_list'])
leave_dates=[]
for d in frappe.get_all('Leave Application',filters={'employee':employee,'status':'Approved','docstatus':1},fields=['from_date','to_date']):
next_date=getdate(d.get('from_date'))
leave_dates.append(next_date.isoformat())
while getdate(d.get('to_date'))!= next_date:
next_date=add_days(next_date, 1)
leave_dates.append(next_date.isoformat())
msg="""<p>Hi {0}</p><br>""".format(name)
msg+="""<b>Timesheet Records</b><br>"""
msg += """</u></b></p><table class='table table-bordered'><tr>
<th>Date</th><th>Timesheet</th><th>Actual Hours</th>"""
holidays=[]
from datetime import datetime
for hl in frappe.get_all('Holiday',filters={'parent':holiday_list},fields=['holiday_date']):
holidays.append(hl.get('holiday_date').strftime("%Y-%m-%d"))
for d in dates:
if d not in holidays and d not in leave_dates:
hr=0
timesheet=''
for h in wk_hr:
if h.get(d):
timesheet=frappe.db.get_value('Timesheet',{'employee':employee,'start_date':d},'name')
hr=h.get(d)
msg += "<tr><td>" + str(d) + "</td><td>" + """<a href="{0}">{1}</a>""".format(get_url_to_form('Timesheet',str(timesheet) ), str(timesheet)) + "</td><td>" + str(hr) + "</td></tr>"
msg += "</table>"
frappe.sendmail(recipients=email,subject='Timesheet Notification',message = msg)
def sent_mail_from_rent_transaction(doc,method):
if doc.status=='Rent Transffered':
from datetime import datetime
date = datetime.strptime(doc.posting_date, '%Y-%M-%d')
msg="""<p>Hello {0} Sir ,</p><br>""".format(doc.owner_name)
msg+="""<p>Homzhub has successfully initiated the transfer of house rent for the month of {0} , {1} , towards the property bearing the address {2} .</p><br>""".format(date.strftime('%B'),date.strftime('%Y'),(doc.property_address_details).replace('<br>',','))
msg+="""<p>The rent transfer was initiated on {0} , {1} {2} , {3}. You can find the rent receipt attached along with this mail for your records.</p><br>""".format(date.strftime('%A'),date.strftime('%B'),date.strftime('%d'),date.strftime('%Y'))
msg+="""<p>Please reply back to this mail if you have any queries, in the next couple of working days.</p>"""
for d in frappe.get_all('Customer',{'name':doc.owner},'email_id'):
frappe.sendmail(recipients=d.email_id,subject='Your Rent Transffered',message = msg)
def send_sms_after_rent_transaction(doc,method):
for tn in doc.get('tenant_list'):
contact_list=[]
mob=frappe.db.get_value('Customer',tn.tenant,'mobile_no')
if mob:
contact_list.append(mob)
if len(contact_list)>0:
send_sms(contact_list,cstr("Dear {0}, The rent of Rs. {1} will be deducted from your account in the next 3 working days. Please maintain the required balance. - Homzhub".format(tn.tenant_name,doc.rent_amount)))
|
StarcoderdataPython
|
1889939
|
<gh_stars>0
from wildq import wildq
def test_usage():
assert wildq.usage() == 0
|
StarcoderdataPython
|
6698037
|
<gh_stars>1-10
# %%
import networkx as nx
# from networkx.algorithms import centrality
from networkx.readwrite import gexf
import pandas as pd
import matplotlib.pyplot as plt
from networkx.drawing import layout
## https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.drawing.nx_agraph.graphviz_layout.html
# %%
print('start')
G = gexf.read_gexf('3mj/subgraph--1-start.gexf')
print('generate layout')
sp = layout.kamada_kawai_layout(G)
degrees = pd.DataFrame(dict(G.degree).items(), columns=['tag', 'degree'])
print(nx.__version__)
print('before')
print(nx.info(G))
nodes_zero_degree = (degrees.query('degree == 0')['tag'].to_list())
G.remove_nodes_from(nodes_zero_degree)
print('after')
print(nx.info(G))
# %%
df_decompose = pd.read_csv('3mj/decomposed_3mj.csv')[['iteration', 'tag','filename']]
# %%
def plot_decompose(r):
ego_node = df_decompose.query(f'iteration == {r.iteration+1}')['tag'].iloc[0]
print(r.iteration, ego_node, sp[ego_node])
g_node = gexf.read_gexf(r.filename)
# sp1 = layout.kamada_kawai_layout(g_node)
nx.draw(g_node, pos=sp, node_color='#0C84B6', node_size=5, with_labels=False )
nx.draw_networkx_edges(g_node, pos=sp, alpha=0.04, edge_color='#D8E7F5')
plt.text(sp[ego_node][0], sp[ego_node][1], r.tag, fontsize=8,
bbox={'facecolor': 'white', 'edgecolor': 'none', 'alpha': 0.5})
nx.draw_networkx_nodes(g_node, pos=sp, nodelist=[ego_node], node_size=10, node_color='r')
plt.axis('off')
# plt.show()
plt.savefig('images/'+r.filename.replace('.gexf','.png'), dpi=500)
plt.show()
# %%
df_decompose.iloc[[1,199],:].apply(plot_decompose, axis=1)
# for r in df_decompose.iloc[0,:]:
# print(type(r.iteration))
# ego_node = 'technical-issues'
# nx.draw(G, pos=sp, node_color='#0C84B6', node_size=10, with_labels=False, edge_color='#D8E7F5')
# # Draw ego as large and red
# nx.draw_networkx_nodes(G, pos=sp, nodelist=[ego_node], node_size=30, node_color='#0E59A2')
# plt.axis('off')
# plt.savefig('images/test.png', dpi=500)
# %%
df_decompose.apply(plot_decompose, axis=1)
# %%
|
StarcoderdataPython
|
11301848
|
<filename>turnovertools/fftools.py
#!/usr/bin/env python3
import datetime
from heapq import heappush, heappop
import itertools
import numpy as np
import os
import subprocess
import signal
import sys
import time
from timeit import timeit
import cv2
import ffmpeg
# from skimage.measure import compare_ssim as ssim
from timecode import Timecode
from turnovertools import fftools
##
# Close any asynchronous subprocesses on sigint
def handle_sigint(signum, frame):
fr = frame
while fr is not None:
for var, value in fr.f_locals.items():
print(var, ':', type(value))
fr = fr.f_back
sys.exit()
# signal.signal(signal.SIGINT, handle_sigint)
##
# ffmpeg functions
def build_ffmpeg(vid, frameno=0, format='image2', dur=None,
scale=None, interval=1, ss=None, **kwargs):
input_args = {}
if ss is not None:
input_args['ss'] = ss
command = ffmpeg.input(vid, **input_args)
output_args = { 'format': format,
'vcodec': 'bmp' }
if dur is not None:
output_args['vframes'] = dur
for key, value in kwargs.items():
output_args[key] = value
if interval > 1:
command = ffmpeg.filter(command, 'select',
'not(mod(n,{}))'.format(interval))
output_args['vsync'] = 0
if scale is not None:
width, height = scale
aspect = width / height
command = (
ffmpeg
.filter(command, 'pad', h='iw/{}'.format(aspect), y='(oh-ih)/2')
.filter('scale', '{}x{}'.format(width, height))
)
command = ffmpeg.output(command, 'pipe:', **output_args)
return command
def extract_frame(vid, frameno=0, format='image2',
dur=None, scale=None, interval=1, **kwargs):
command = build_ffmpeg(vid, frameno=frameno, format=format, dur=1,
scale=scale, **kwargs)
try:
frame, error = ffmpeg.run(command, capture_stdout=True, capture_stderr=True)
except ffmpeg._run.Error as e:
print(e.stderr)
return frame
def frame_iterator(process, size):
while True:
img = process.stdout.read(size)
if not img:
break
yield img
def interval_stream_frames(vid, interval=2, probe=None, **kwargs):
if probe is None:
probe = ffmpeg.probe(vid)
vidinfo = next((stream for stream in probe['streams'] if
stream['codec_type'] == 'video'), None)
fps = vidinfo['r_frame_rate']
try:
fps = float(fps)
except ValueError:
left, right = fps.split('/')
fps = float(left) / float(right)
ss_interval = interval / fps
if 'nb_frames' in vidinfo:
nb_frames = int(vidinfo['nb_frames'])
else:
nb_frames = int(fps * float(vidinfo['duration']))
if nb_frames < interval:
yield extract_frame(vid, **kwargs)
return
for i in range(nb_frames // interval):
yield extract_frame(vid, ss=ss_interval*i, **kwargs)
def stream_frames(vid, size=None, frameno=0, dur=None, format='image2pipe',
scale=None, **kwargs):
command = build_ffmpeg(vid, frameno=frameno, format=format,
dur=dur, scale=scale, **kwargs)
if size is None:
size = len(extract_frame(vid, scale=scale, **kwargs))
process = ffmpeg.run_async(command, pipe_stdout=True,
pipe_stderr=True)
try:
yield from frame_iterator(process, size)
finally:
print('Calling cleanup.')
process.communicate()
def probe_clip(video):
"""Probes a video file and returns a clip object with various
metadata."""
clip = lambda: None
clip.mediapath = video
probe = ffmpeg.probe(video)
vid_stream = next(stream for stream in probe['streams'] if
stream['codec_type'] == 'video')
clip.framerate = vid_stream['r_frame_rate']
clip.duration = Timecode(clip.framerate,
start_seconds=float(probe['format']['duration']))
clip.duration_seconds = probe['format']['duration']
if 'timecode' in probe['format']['tags']:
clip.src_start_tc = Timecode(
clip.framerate, probe['format']['tags']['timecode'])
else:
clip.src_start_tc = Timecode(clip.framerate, '00:00:00:00')
clip.src_end_tc = clip.src_start_tc + clip.duration
width = vid_stream['width']
height = vid_stream['height']
clip.scale = (int(width), int(height))
aw, ah = vid_stream['display_aspect_ratio'].split(':')
clip.aspect_ratio = float(aw) / float(ah)
clip.bitrate = int(probe['format']['bit_rate'])
return clip
def probe_timecode(video):
"""Probes a video file and returns the timecode as a Timecode object."""
probe = ffmpeg.probe(video)
tc_string = probe['format']['tags']['timecode']
vid_streams = next(stream for stream in probe['streams'] if
stream['codec_type'] == 'video')
fr = vid_streams['r_frame_rate']
return Timecode(fr, tc_string)
##
# frame processing functions
# removing scikit-image functions for now
#def compare(image, other):
# image = cvdecode(image)
# other = cvdecode(other)
# return ssim(image, other, multichannel=True)
def mse(image, other):
image = cvdecode(image)
other = cvdecode(other)
err = np.sum((image.astype('float') - other.astype('float')) ** 2)
err /= float(image.shape[0] * other.shape[1])
return err
def cvdecode(image):
image = np.frombuffer(image, dtype='uint8')
image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
return image
##
# search functions
def find_frame(frame, vid, interval=1, threshold=150):
min = None
match = None
i = 0
for i, candidate in enumerate(vid):
err = mse(frame, candidate)
if min is None or err < min:
min = err
match = candidate
if err <= threshold:
break
return min, match
def find_in_dir(src, basepath, frame_num=0, pix_fmt='gray9be',
scale=(960,540), interval=64):
goal = extract_frame(src, scale=scale, pix_fmt=pix_fmt)
matches = []
for dirpath, dirs, files in os.walk(basepath):
start = time.time()
print('Searching directory {} containing {} files.'.
format(dirpath, len(files)))
for candidate in (os.path.join(dirpath, x) for x in files):
try:
probe = ffmpeg.probe(candidate)
except ffmpeg.Error:
continue
if interval > 1:
video = interval_stream_frames(candidate,
interval=interval,
probe=probe,
scale=scale,
pix_fmt=pix_fmt)
else:
video = stream_frames(candidate, scale=scale,
pix_fmt=pix_fmt)
err, match = find_frame(goal, video)
if match is None:
continue
try:
heappush(matches, (err,candidate,match))
except TypeError:
print(err, candidate, len(match))
print('Searched in {} seconds.'.format(time.time() - start))
while len(matches) > 0:
yield heappop(matches)
def search_report(src, basepath, outpath, **kwargs):
start = datetime.datetime.today()
reportpath = os.path.join(outpath, 'search_report_{}{:02}{:02}-{:02}{:02}.txt'.
format(start.year, start.month,
start.day, start.hour,
start.minute))
search = find_in_dir(src, basepath, **kwargs)
for i, (err, matchpath, img) in enumerate(search):
imgpath = os.path.join(outpath,
'result_{:08}_{:.2f}.bmp'.format(i, err))
with open(imgpath, 'wb') as fh:
fh.write(img)
with open(reportpath, 'at') as fh:
fh.write('{:08},{:.2f},{}\n'.format(i, err, matchpath))
return reportpath
def profile():
sample = '/Volumes/sync.Cocoon-3_1/Cocoon_Camera Masters/20160408/4-8-2016 5A Mural Wall/Drone Camera/Drone Roll 2/DJI_0036.MOV'
print(timeit(lambda: list(stream_frames(sample, pix_fmt='gray9be',
scale=(960,540))), number=1))
print(timeit(lambda: list(stream_frames(sample, scale=(1920,1080))), number=1))
if __name__ == '__main__':
src = sys.argv[1]
searchpath = sys.argv[2]
outpath = sys.argv[3]
search_report(src, searchpath, outpath, interval=1)
|
StarcoderdataPython
|
4977947
|
from ...imports import *
from ... import utils as U
class ZeroShotClassifier():
"""
interface to Zero Shot Topic Classifier
"""
def __init__(self, model_name='facebook/bart-large-mnli', device=None):
"""
ZeroShotClassifier constructor
Args:
model_name(str): name of a BART NLI model
device(str): device to use (e.g., 'cuda', 'cpu')
"""
if 'mnli' not in model_name and 'xnli' not in model_name:
raise ValueError('ZeroShotClasifier requires an MNLI or XNLI model')
try:
import torch
except ImportError:
raise Exception('ZeroShotClassifier requires PyTorch to be installed.')
self.torch_device = device
if self.torch_device is None: self.torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
from transformers import AutoModelForSequenceClassification, AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForSequenceClassification.from_pretrained(model_name).to(self.torch_device)
def predict(self, doc, topic_strings=[], include_labels=False):
"""
zero-shot topic classification
Args:
doc(str): text of document
topic_strings(list): a list of strings representing topics of your choice
Example:
topic_strings=['political science', 'sports', 'science']
Returns:
inferred probabilities
"""
if topic_strings is None or len(topic_strings) == 0:
raise ValueError('topic_strings must be a list of strings')
true_probs = []
for topic_string in topic_strings:
premise = doc
hypothesis = 'This text is about %s.' % (topic_string)
input_ids = self.tokenizer.encode(premise, hypothesis, return_tensors='pt').to(self.torch_device)
logits = self.model(input_ids)[0]
# we throw away "neutral" (dim 1) and take the probability of
# "entailment" (2) as the probability of the label being true
# reference: https://joeddav.github.io/blog/2020/05/29/ZSL.html
entail_contradiction_logits = logits[:,[0,2]]
probs = entail_contradiction_logits.softmax(dim=1)
true_prob = probs[:,1].item()
true_probs.append(true_prob)
if include_labels:
true_probs = list(zip(topic_strings, true_probs))
return true_probs
|
StarcoderdataPython
|
1914185
|
# -*- coding: utf-8 -*-
import asyncio
import irc3
from ircb.storeclient import ChannelStore
@irc3.plugin
class IrcbPlugin(object):
def __init__(self, bot):
self.bot = bot
@irc3.event(irc3.rfc.JOIN)
def on_join(self, mask, channel, **kw):
def callback():
yield from ChannelStore.create_or_update(
dict(
channel=channel,
network_id=self.bot.config.id,
status='1'
)
)
asyncio.Task(callback())
@irc3.event(irc3.rfc.PART)
def on_part(self, mask, channel, **kw):
def callback():
yield from ChannelStore.create_or_update(
dict(
channel=channel,
network_id=self.bot.config.id,
status='3'
)
)
asyncio.Task(callback())
|
StarcoderdataPython
|
6484942
|
import datetime
import logging
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from finorch.utils.job_status import JobStatus
Base = declarative_base()
class Job(Base):
__tablename__ = 'job'
id = Column(Integer, primary_key=True)
batch_id = Column(Integer, unique=True, nullable=True)
identifier = Column(String(40), unique=True)
start_time = Column(DateTime, default=datetime.datetime.now, nullable=False)
status = Column(Integer, default=JobStatus.PENDING)
class Database:
def __init__(self, exec_path):
"""
Initialises the database.
:param exec_path: The path where the job output is kept. This is where the sqlite database will be stored.
"""
logging.getLogger('sqlalchemy').setLevel(logging.ERROR)
# Set up the sqlite database
self.engine = create_engine(f"sqlite:///{exec_path / 'db.sqlite3'}")
Base.metadata.create_all(self.engine)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def add_job(self, job_identifier, batch_id=None):
"""
Inserts a new job with the specified job identifier
:param job_identifier: The job identifier
:return: None
"""
job = Job(
identifier=job_identifier,
batch_id=batch_id
)
self.session.add(job)
self.session.commit()
return True
def get_job_status(self, job_identifier):
"""
Gets the status of the specified job
:param job_identifier: The identifier of the job
:return: The status of the job (int) if the job was found, otherwise a Tuple of (None, *reason*)
"""
results = self.session.query(Job).filter(Job.identifier == job_identifier)
if results.count() != 1:
return None, f"Job with with identifier {job_identifier} not found"
return results.first().status
def update_job_status(self, job_identifier, new_status):
"""
Updates the status of a specified job
:param job_identifier: The identifier of the job
:param new_status: The new status for the job
:return: None
"""
results = self.session.query(Job).filter(Job.identifier == job_identifier)
if results.count() != 1:
return None, f"Job with with identifier {job_identifier} not found"
results.first().status = new_status
self.session.commit()
return True
def get_jobs(self):
"""
Gets the list of jobs
:return: A list of dictionaries containing job information
"""
data = [r._asdict() for r in self.session.query(Job.id, Job.identifier, Job.start_time, Job.status).all()]
return data
def get_job_batch_id(self, job_identifier):
"""
Gets the batch id of the specified job
:param job_identifier: The identifier of the job
:return: The batch_id of the job or None
"""
results = self.session.query(Job).filter(Job.identifier == job_identifier)
if results.count() != 1:
return None, f"Job with with identifier {job_identifier} not found"
return results.first().batch_id
|
StarcoderdataPython
|
9749813
|
# %%
#######################################
def pandasget_dataframe_info(data_frame: pandas.DataFrame):
import pandas
if isinstance(data_frame, pandas.DataFrame):
return data_frame.info()
|
StarcoderdataPython
|
4873553
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
VERSION = '0.3'
setup(
name='mtda',
version=VERSION,
scripts=['mtda-cli'],
packages=find_packages(exclude=["demos"]),
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
description='Mentor Test Device Agent',
long_description='''
mtda is a small agent abstracting hardware controls and interfaces for a
connected test device. The main purpose of this tool is to allow developers
and testers to remotely access and control hardware devices.
''',
url='https://stash.alm.mentorg.com/projects/PSP/repos/mtda',
license='TBD',
keywords='remote test',
classifiers=[
"Topic :: Utilities",
"Environment :: Console",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.0",
"Topic :: Software Development :: Embedded Systems",
],
install_requires=[
"pyserial>=2.6",
"python-daemon>=2.0",
"pyusb>=1.0",
"pyzmq>=15.0",
"requests",
"RPi.GPIO",
"zerorpc>=0.6.0"
],
)
|
StarcoderdataPython
|
5019043
|
from u import *
from modules import AdaptiveEmbedding, ProjectedAdaptiveLogSoftmax
mask_type = torch.uint8 if torch.__version__.startswith('1.1') else torch.bool
class Decoder(nn.Module):
def __init__(self, c):
super(Decoder, self).__init__()
n_embed = c.n_embed
self.ln1 = nn.LayerNorm(n_embed)
self.qkv = nn.Linear(n_embed, c.n_head * (2 * c.n_k + c.n_v))
if c.pos_emb == 'trained':
self.pos_emb = nn.Parameter(torch.Tensor(c.n_k, c.n_seq + 1))
nn.init.normal_(self.pos_emb, 0, 0.02)
self.out = nn.Linear(c.n_head * c.n_v, n_embed, bias=False)
self.dropout = nn.Dropout(c.dropout)
self.ln2 = nn.LayerNorm(c.n_embed)
self.fc = nn.Sequential(
nn.Linear(c.n_embed, c.n_inner),
nn.ReLU(inplace=True),
nn.Dropout(c.dropout),
nn.Linear(c.n_inner, c.n_embed),
nn.Dropout(c.dropout),
)
self.c = c
def forward(self, x, prev=None):
# x: (n_group * n_seq, n_batch, n_embed)
# pos_emb: (n_k, n_seq + 1)
# mask: (2 * n_seq, 2 * n_seq) parallelogram
c = self.c
n_s = min(c.n_seq, x.size(0))
n_g = x.size(0) // n_s
n_b = x.size(1)
n_h = c.n_head
n_k = c.n_k
n_v = c.n_v
qkv = self.qkv(self.ln1(x)).reshape(n_g * n_s, n_b * n_h, 2 * n_k + n_v)
q, kv = qkv.split([n_k, n_k + n_v], dim=-1)
q = q.reshape(n_g, n_s, n_b * n_h, n_k)
padding = prev if prev is not None else torch.zeros((n_s, n_b * n_h, n_k + n_v), dtype=kv.dtype, device=kv.device)
kv = torch.cat((padding, kv))
k, v = kv.unfold(0, 2 * n_s, n_s).split([n_k, n_v], dim=2) # (n_g, n_bh, n_kv, 2 * n_s)
qk = torch.einsum('gsbk,gbkt->gbst', q, k) # (n_g, n_bh, n_s, 2 * n_s)
qk = qk.reshape(n_g, n_b * n_h, -1).unfold(2, n_s + 1, 2 * n_s + 1) # (n_g, n_bh, n_s, n_s + 1)
pos_emb = self.pos_emb
qe = torch.einsum('gsbk,kt->gbst', q, pos_emb.to(q.dtype))
attn = qk + qe
attn.mul_(n_k ** -0.5)
if prev is None:
mask = torch.triu(torch.ones(attn.shape[2:], dtype=mask_type, device=attn.device), 1).flip([1])
attn[0].masked_fill_(mask, -np.inf)
attn = attn.softmax(dim=-1)
attn = F.pad(attn, (0, n_s))
attn = attn.reshape(n_g, n_b * n_h, -1).unfold(2, 2 * n_s, 2 * n_s) # (n_g, n_bh, n_s, 2 * n_s)
attnv = torch.einsum('gbst,gbvt->gsbv', attn, v) # (n_g, n_s, n_bh, n_v)
attn_out = self.out(attnv.reshape(n_g * n_s, n_b, n_h * n_v)) # (n_g * n_s, n_b, n_embed)
attn_out = self.dropout(attn_out)
out = x + attn_out
next = kv[-n_s:].detach()
out = out + self.fc(self.ln2(out))
return out, next
class Transformer(nn.Module):
def __init__(self, c):
super(Transformer, self).__init__()
self.c = c.setdefault(quantizing=False)
self.embed = AdaptiveEmbedding(c)
self.dropout = nn.Dropout(c.dropout)
self.layers = nn.ModuleList(Decoder(c) for _ in range(c.n_layers))
self.loss = ProjectedAdaptiveLogSoftmax(c)
# tie output embedding weights to input embedding weights
for layer_embed, layer_loss in zip(self.embed.layers, self.loss.layers):
layer_loss.weight = layer_embed.weight
def forward(self, inputs, labels, prevs=None, soft_labels=None, soft_probs=None, current_step=0.):
# inputs: (n_group * n_seq, n_batch)
# labels: (n_group * n_seq, n_batch)
c = self.c
n_gs = inputs.size(0)
n_s = c.n_seq
if n_gs % n_s != 0:
padding = torch.zeros((n_s - n_gs % n_s, inputs.size(1)), dtype=inputs.dtype, device=inputs.device)
inputs = torch.cat((inputs, padding))
x = self.embed(inputs)
x = self.dropout(x)
prevs = prevs or [None] * c.n_layers
nexts = []
for layer, prev in zip(self.layers, prevs):
x, prev = layer(x, prev=prev)
nexts.append(prev)
x = self.dropout(x)
x = x[:n_gs]
if c.get('distill') and self.training:
soft_labels_reshape = soft_labels.reshape(-1, soft_labels.size(2))
soft_probs_reshape = soft_probs.reshape(-1, soft_probs.size(2))
loss, hiddens = self.loss(hidden=x.reshape(-1, x.size(2)), target=labels.reshape(-1),
soft_labels=soft_labels_reshape, soft_probs=soft_probs_reshape,
current_step=current_step)
loss = loss.reshape(labels.shape)
extras = {}
if c.use_cache:
extras['lambda'] = self.loss.last_lambda
extras['theta'] = self.loss.last_theta
return dict(loss=loss.mean(), state=nexts, hiddens=hiddens, current_step=current_step, **extras)
loss, hiddens = self.loss(x.reshape(-1, x.size(2)), labels.reshape(-1), keep_order=c.get('keep_order', False))
if c.get('gen_soft'):
return loss, hiddens
loss = loss.reshape(labels.shape)
if not c.get('loss_no_mean'):
loss = loss.mean()
extras = {}
if c.use_cache:
extras['lambda'] = self.loss.last_lambda
extras['theta'] = self.loss.last_theta
if c.quantizing:
return loss, nexts
return dict(loss=loss, state=nexts, hiddens=hiddens, **extras)
|
StarcoderdataPython
|
3369922
|
# Copyright 2022 DeepL SE (https://www.deepl.com)
# Use of this source code is governed by an MIT
# license that can be found in the LICENSE file.
import argparse
import deepl
import logging
import os
import pathlib
import sys
from typing import List
# Program name for integration with click.testing
name = "python -m deepl"
env_auth_key = "DEEPL_AUTH_KEY"
env_server_url = "DEEPL_SERVER_URL"
env_proxy_url = "DEEPL_PROXY_URL"
def action_usage(translator: deepl.Translator):
"""Action function for the usage command."""
usage_result = translator.get_usage()
print(usage_result)
def action_languages(translator: deepl.Translator, glossary: bool):
"""Action function for the languages command."""
if glossary:
glossary_languages = translator.get_glossary_languages()
print("Language pairs supported for glossaries: (source, target)")
for language_pair in glossary_languages:
print(f"{language_pair.source_lang}, {language_pair.target_lang}")
else:
source_languages = translator.get_source_languages()
target_languages = translator.get_target_languages()
print("Source languages available:")
for language in source_languages:
print(f"{language.code}: {language.name}")
print("Target languages available:")
for language in target_languages:
if language.supports_formality:
print(f"{language.code}: {language.name} (supports formality)")
else:
print(f"{language.code}: {language.name}")
def action_document(
translator: deepl.Translator, file: List[str], dest: str, **kwargs
):
"""Action function for the document command."""
if not os.path.exists(dest):
os.makedirs(dest, exist_ok=True)
elif not os.path.isdir(dest):
raise Exception("Destination already exists, and is not a directory")
for this_file in file:
output_path = os.path.join(dest, os.path.basename(this_file))
translator.translate_document_from_filepath(
this_file, output_path, **kwargs
)
def action_text(
translator: deepl.Translator,
show_detected_source: bool = False,
**kwargs,
):
"""Action function for the text command."""
output_list = translator.translate_text(**kwargs)
for output in output_list:
if show_detected_source:
print(f"Detected source language: {output.detected_source_lang}")
print(output.text)
def action_glossary(
translator: deepl.Translator,
subcommand: str,
**kwargs,
):
# Call action function corresponding to command with remaining args
globals()[f"action_glossary_{subcommand}"](translator, **kwargs)
pass
def action_glossary_create(
translator: deepl.Translator, entry_list, file, **kwargs
):
if file:
if entry_list:
raise deepl.DeepLException(
"The --file argument cannot be used together with "
"command-line entries"
)
file_contents = pathlib.Path(file).read_text("UTF-8")
entry_dict = deepl.convert_tsv_to_dict(file_contents)
elif entry_list and entry_list[0] == "-":
entry_dict = deepl.convert_tsv_to_dict(sys.stdin.read())
else:
entry_dict = deepl.convert_tsv_to_dict("\n".join(entry_list), "=")
glossary = translator.create_glossary(entries=entry_dict, **kwargs)
print(f"Created {glossary}")
print_glossaries([glossary])
def print_glossaries(glossaries):
headers = [
"Glossary ID",
"Name",
"Ready",
"Source",
"Target",
"Count",
"Created",
]
data = [
[
glossary.glossary_id,
glossary.name,
str(glossary.ready),
glossary.source_lang,
glossary.target_lang,
str(glossary.entry_count),
str(glossary.creation_time),
]
for glossary in glossaries
]
data.insert(0, headers)
col_max_widths = [
max(len(row[col_num]) for row in data)
for col_num in range(len(headers))
]
for row in data:
print(
"\t".join(
[col.ljust(width) for col, width in zip(row, col_max_widths)]
)
)
def action_glossary_list(translator: deepl.Translator):
glossaries = translator.list_glossaries()
print_glossaries(glossaries)
def action_glossary_get(translator: deepl.Translator, **kwargs):
glossary = translator.get_glossary(**kwargs)
print_glossaries([glossary])
def action_glossary_entries(translator: deepl.Translator, glossary_id):
glossary_entries = translator.get_glossary_entries(glossary=glossary_id)
print(deepl.convert_dict_to_tsv(glossary_entries))
def action_glossary_delete(
translator: deepl.Translator, glossary_id_list: str
):
for glossary_id in glossary_id_list:
translator.delete_glossary(glossary_id)
print(f"Glossary with ID {glossary_id} successfully deleted.")
def get_parser(prog_name):
"""Constructs and returns the argument parser for all commands."""
parser = argparse.ArgumentParser(
prog=prog_name,
description="Translate text using the DeepL API "
"(https://www.deepl.com/docs-api).",
epilog="If you encounter issues while using this program, please "
"report them at https://github.com/DeepLcom/deepl-python/issues",
)
parser.add_argument(
"--version",
action="version",
version=f"deepl-python v{deepl.__version__}",
)
parser.add_argument(
"--verbose",
"-v",
action="count",
dest="verbose",
default=0,
help="print additional information, can be supplied multiple times "
"for more verbose output",
)
parser.add_argument(
"--auth-key",
default=None,
help="authentication key as given in your DeepL account; the "
f"{env_auth_key} environment variable is used as secondary fallback",
)
parser.add_argument(
"--server-url",
default=None,
metavar="URL",
help=f"alternative server URL for testing; the {env_server_url} "
f"environment variable may be used as secondary fallback",
)
parser.add_argument(
"--proxy-url",
default=None,
metavar="URL",
help="proxy server URL to use for all connections; the "
f"{env_proxy_url} environment variable may be used as secondary "
"fallback",
)
# Note: add_subparsers param 'required' is not available in py36
subparsers = parser.add_subparsers(metavar="command", dest="command")
def add_common_arguments(subparser: argparse.ArgumentParser):
"""Adds arguments shared between text and document commands to the
subparser."""
subparser.add_argument(
"--to",
"--target-lang",
dest="target_lang",
required=True,
help="language into which the text should be translated",
)
subparser.add_argument(
"--from",
"--source-lang",
dest="source_lang",
help="language of the text to be translated; unless using a "
"glossary, this argument is optional and if it is omitted DeepL "
"will auto-detect the source language.",
)
subparser.add_argument(
"--formality",
type=str,
choices=[enum.value for enum in deepl.Formality],
default=deepl.Formality.DEFAULT.value,
help="desired formality for translation",
)
subparser.add_argument(
"--glossary-id",
dest="glossary",
type=str,
help="ID of glossary to use for translation",
)
# create the parser for the "text" command
parser_text = subparsers.add_parser(
"text", help="translate text(s)", description="translate text(s)"
)
add_common_arguments(parser_text)
parser_text.add_argument(
"--split-sentences",
type=str,
choices=[enum.value for enum in deepl.SplitSentences],
default=deepl.SplitSentences.DEFAULT.value,
help="control sentence splitting before translation, see API for "
"information",
)
parser_text.add_argument(
"--preserve-formatting",
action="store_true",
help="leave original formatting unchanged during translation",
)
parser_text.add_argument(
"text",
nargs="+",
type=str,
help="text to be translated. Wrap text in quotes to prevent the shell "
'from splitting sentences into words. Alternatively, use "-" to read '
"from standard-input.",
)
parser_text.add_argument(
"--show-detected-source",
action="store_true",
help="print detected source language for each text",
)
tag_handling_group = parser_text.add_argument_group(
"tag-handling",
description="Arguments controlling tag handling, for example XML. "
"The -tags arguments accept multiple arguments, as comma-"
"separated lists and as repeated arguments. For example, these are "
'equivalent: "--ignore-tags a --ignore-tags b,c" and "--ignore-tags '
'a,b,c".',
)
tag_handling_group.add_argument(
"--tag-handling",
type=str,
choices=["xml"],
default=None,
help="activate processing of formatting tags, for example 'xml'",
)
tag_handling_group.add_argument(
"--outline-detection-off",
dest="outline_detection",
default=True,
action="store_false",
help="disable automatic tag selection",
)
tag_handling_group.add_argument(
"--non-splitting-tags",
type=str,
action="append",
metavar="tag",
help="specify tags that may occur within sentences",
)
tag_handling_group.add_argument(
"--splitting-tags",
type=str,
action="append",
metavar="tag",
help="specify tags that separate text into sentences",
)
tag_handling_group.add_argument(
"--ignore-tags",
type=str,
action="append",
metavar="tag",
help="specify tags containing text that should not be translated",
)
# create the parser for the "document" command
parser_document = subparsers.add_parser(
"document",
help="translate document(s)",
description="translate document(s)",
)
add_common_arguments(parser_document)
parser_document.add_argument(
"file", nargs="+", help="file(s) to be translated."
)
parser_document.add_argument(
"dest", help="destination directory to store translated files."
)
# create the parser for the "usage" command
usage_help_str = "print usage information for the current billing period"
subparsers.add_parser(
"usage", help=usage_help_str, description=usage_help_str
)
# create the parser for the "languages" command
languages_help_str = "print available languages"
parser_languages = subparsers.add_parser(
"languages", help=languages_help_str, description=languages_help_str
)
parser_languages.add_argument(
"--glossary",
help="list language pairs supported for glossaries.",
action="store_true",
)
# create the parser for the "glossary" command
parser_glossary = subparsers.add_parser(
"glossary",
help="create, list, and remove glossaries",
description="manage glossaries using subcommands",
)
# Note: add_subparsers param 'required' is not available in py36
glossary_subparsers = parser_glossary.add_subparsers(
metavar="subcommand", dest="subcommand"
)
parser_glossary_create = glossary_subparsers.add_parser(
"create",
help="create a new glossary",
description="create a new glossary using entries specified in "
"a TSV file, standard-input, or provided via command-line",
)
parser_glossary_create.add_argument(
"--name", required=True, help="name to be associated with glossary."
)
parser_glossary_create.add_argument(
"--from",
"--source-lang",
dest="source_lang",
required=True,
help="language in which source entries of the glossary are specified.",
)
parser_glossary_create.add_argument(
"--to",
"--target-lang",
dest="target_lang",
required=True,
help="language in which target entries of the glossary are specified.",
)
parser_glossary_create.add_argument(
"entry_list",
nargs="*",
type=str,
metavar="SOURCE=TARGET",
help="one or more entries to add to glossary, may be repeated. "
'Alternatively, use "-" to read entries from standard-input in TSV '
"format (see --file argument). These arguments cannot be used "
"together with the --file argument.",
)
parser_glossary_create.add_argument(
"--file",
type=str,
help="file to read glossary entries from. File must be in "
"tab-separated values (TSV) format: one entry-pair per line, each "
"line contains the source entry, a tab, then the target entry. Empty "
"lines are ignored.",
)
parser_glossary_list = glossary_subparsers.add_parser(
"list",
help="list available glossaries",
description="list available glossaries",
)
_ = parser_glossary_list # Suppress unused variable warning
parser_glossary_get = glossary_subparsers.add_parser(
"get",
help="print details about one glossary",
description="print details about one glossary",
)
parser_glossary_get.add_argument(
"glossary_id",
metavar="id",
type=str,
help="ID of glossary to retrieve",
)
parser_glossary_entries = glossary_subparsers.add_parser(
"entries",
help="get entries contained in a glossary",
description="get entries contained in a glossary, and print them to "
"standard-output in tab-separated values (TSV) format: one entry-pair "
"per line, each line contains the source entry, a tab, then the "
"target entry.",
)
parser_glossary_entries.add_argument(
"glossary_id",
metavar="id",
type=str,
help="ID of glossary to retrieve",
)
parser_glossary_delete = glossary_subparsers.add_parser(
"delete",
help="delete one or more glossaries",
description="delete one or more glossaries",
)
parser_glossary_delete.add_argument(
"glossary_id_list",
metavar="id",
nargs="+",
type=str,
help="ID of glossary to delete",
)
return parser, parser_glossary
def main(args=None, prog_name=None):
parser, parser_glossary = get_parser(prog_name)
args = parser.parse_args(args)
if args.command is None:
# Support for Python 3.6 - subcommands cannot be required
sys.stderr.write("Error: command is required\n")
parser.print_help(sys.stderr)
sys.exit(1)
logger = logging.getLogger("deepl")
if args.verbose == 1:
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
elif args.verbose >= 2:
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
else:
logger.setLevel(logging.WARNING)
server_url = args.server_url or os.getenv(env_server_url)
auth_key = args.auth_key or os.getenv(env_auth_key)
proxy_url = args.proxy_url or os.getenv(env_proxy_url)
try:
if auth_key is None:
raise Exception(
f"Please provide authentication key via the {env_auth_key} "
"environment variable or --auth_key argument"
)
# Note: the get_languages() call to verify language codes is skipped
# because the CLI makes one API call per execution.
translator = deepl.Translator(
auth_key=auth_key,
server_url=server_url,
proxy=proxy_url,
skip_language_check=True,
)
if args.command == "text":
if len(args.text) == 1 and args.text[0] == "-":
args.text = [sys.stdin.read()]
elif args.command == "glossary":
if args.subcommand is None:
# Support for Python 3.6 - subcommands cannot be required
sys.stderr.write("Error: glossary subcommand is required\n")
parser_glossary.print_help(sys.stderr)
sys.exit(1)
# Remove global args so they are not unrecognised in action functions
del args.verbose, args.server_url, args.auth_key, args.proxy_url
args = vars(args)
# Call action function corresponding to command with remaining args
command = args.pop("command")
globals()[f"action_{command}"](translator, **args)
except Exception as exception:
sys.stderr.write(f"Error: {exception}\n")
sys.exit(1)
if __name__ == "__main__":
main(prog_name="deepl")
|
StarcoderdataPython
|
11227007
|
"""Materializing permission
Revision ID: c3a8f8611885
Revises: <PASSWORD>
Create Date: 2016-04-25 08:54:04.303859
"""
# revision identifiers, used by Alembic.
revision = 'c3a8f8611885'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
from caravel import db
from caravel import models
def upgrade():
bind = op.get_bind()
op.add_column('slices', sa.Column('perm', sa.String(length=2000), nullable=True))
session = db.Session(bind=bind)
for slc in session.query(models.Slice).all():
if slc.datasource:
slc.perm = slc.datasource.perm
session.merge(slc)
session.commit()
db.session.close()
def downgrade():
# Use batch_alter_table because dropping columns is not supported in SQLite
with op.batch_alter_table('slices') as batch_op:
batch_op.drop_column('perm')
|
StarcoderdataPython
|
286303
|
<reponame>mingchen-lab/deeptrio
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 27 19:32:21 2021
@author: zju
"""
import numpy as np
def preprocess(pair_file, seq_file):
with open(pair_file, 'r') as f:
lines = f.readlines()
proteins_1 = [line.strip().split('\t')[0] for line in lines]
proteins_2 = [line.strip().split('\t')[1] for line in lines]
labels = [line.strip().split('\t')[2] for line in lines]
protein_list = list(set(proteins_1 + proteins_2))
protein_seq = {}
with open(seq_file, 'r') as f:
lines = f.readlines()
for i in range(len(lines)):
line = lines[i].strip().split('\t')
protein_seq[line[0]] = line[1]
# return proteins_1, proteins_2, labels, protein_seq
amino_acid ={'A':1,'C':2,'D':3,'E':4,'F':5,
'G':6,'H':7,'I':8,'K':9,'L':10,
'M':11,'N':12,'P':13,'Q':14,'R':15,'S':16,
'T':17,'V':18,'W':19,'Y':20,'U':21,'X':22,'B':0}
# positive and negative setting
k1 = []
k2 = []
k3 = []
k_h = []
for i in range(len(labels)):
protein_1 = proteins_1[i]
protein_2 = proteins_2[i]
label = labels[i]
seq_1 = protein_seq[protein_1]
seq_2 = protein_seq[protein_2]
a1 = np.zeros([1500,], dtype = int)
a2 = np.zeros([1500,], dtype = int)
a3 = np.zeros([3,], dtype = float)
k = 0
for AA in seq_1:
a1[k] = amino_acid[AA]
k += 1
k1.append(a1)
k = 0
for AA in seq_2:
a2[k] = amino_acid[AA]
k += 1
k2.append(a2)
if int(label) == 0:
a3[1] = 1
elif int(label) == 1:
a3[0] = 1
else:
print('error')
break
k3.append(a3)
k_h.append(np.array([protein_1, protein_2]))
m1 = np.stack(k1, axis=0)
m2 = np.stack(k2, axis=0)
m3 = np.stack(k3, axis=0)
m_h = np.stack(k_h, axis=0)
# single protein setting
k1 = []
k2 = []
k3 = []
for protein in protein_list:
seq_1 = protein_seq[protein]
seq_2 = 'B'
label = 2
a1 = np.zeros([1500,], dtype = int)
a2 = np.zeros([1500,], dtype = int)
a3 = np.zeros([3,], dtype = float)
k = 0
for AA in seq_1:
a1[k] = amino_acid[AA]
k += 1
k1.append(a1)
k = 0
for AA in seq_2:
a2[k] = amino_acid[AA]
k += 1
k2.append(a2)
if int(label) == 2:
a3[2] = 1
else:
print('error')
break
k3.append(a3)
n1 = np.stack(k1, axis=0)
n2 = np.stack(k2, axis=0)
n3 = np.stack(k3, axis=0)
return m1, m2, m3, n1, n2, n3, m_h
|
StarcoderdataPython
|
129764
|
<reponame>bdewitte123/velbus-aio
"""
:author: <NAME> <<EMAIL>>
"""
from __future__ import annotations
from velbusaio.command_registry import register_command
from velbusaio.message import Message
COMMAND_CODE = 0xE8
class TempSensorSettingsPart1(Message):
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
self.set_attributes(priority, address, rtr)
def data_to_binary(self):
"""
:return: bytes
"""
return bytes([COMMAND_CODE])
register_command(COMMAND_CODE, TempSensorSettingsPart1)
|
StarcoderdataPython
|
3479507
|
<filename>Procesar KW Explorer Ahref.py
#!/usr/bin/env python
# coding: utf-8
# Author: Jlmarin
# Web: https://jlmarin.eu
import argparse
import sys
import pandas as pd
from nltk import SnowballStemmer
import spacy
import es_core_news_sm
from tqdm import tqdm
from unidecode import unidecode
import glob
import re
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--save', help='Nombre del archivo al guardar')
parser.add_argument('-c', '--clean', nargs='?', const=1, type=bool, default=True, help='Elimina todos los duplicados del listado')
parser.add_argument('-i', '--intent', nargs='?', const=1, type=bool, default=True, help='Activa el procesado de las intenciones de busqueda')
parser.add_argument('-l', '--location', nargs='?', const=1, type=bool, default=True, help='Nombre del archivo con la base de datos de las localizaciones')
args = parser.parse_args()
pd.options.mode.chained_assignment = None
nlp = es_core_news_sm.load()
spanishstemmer=SnowballStemmer('spanish')
def normalize(text):
text = unidecode(str(text))
doc = nlp(text)
words = [t.orth_ for t in doc if not t.is_punct | t.is_stop]
lexical_tokens = [t.lower() for t in words if len(t) > 3 and t.isalpha()]
return lexical_tokens
def raiz(kw):
#Calculamos la raiz semantica
stems = [spanishstemmer.stem(wd) for wd in kw]
raiz = " ".join(sorted(stems))
return raiz
# Abrimos todos los archivos CSV y los agregamos a un dataframe
archivos=[]
files = glob.glob("entrada/*.csv")
loop = tqdm(total = len(files), position = 0, leave = False)
for f in files:
loop.set_description("Unificando archivos...".format(f))
archivos.append(pd.read_csv(f))
loop.update(1)
df=pd.concat(archivos,ignore_index='True')
loop.close()
print('Archivos cargados... OK')
# Eliminamos duplicados
if args.clean:
df = df.drop_duplicates()
print('Duplicados eliminados... OK')
# Bucle principal de procesado
loop = tqdm(total = len(df.index), position = 0, leave = False)
df['Raiz semantica'] = ''
print(df)
for i in df.index:
loop.set_description("Calculando raices...".format(i))
kw_a = normalize(df.loc[i,'Keyword'])
#Calculamos la raiz semantica
df.loc[i,'Raiz semantica'] = raiz(kw_a)
loop.update(1)
#print('Kw ' + str(index) + ' de ' + str(len(df.index)))
loop.close()
print('Calculado raices semanticas... OK')
df = df.sort_values(by=['Raiz semantica', 'Volume'], ascending=[True,False])
df = df.reset_index(drop=True)
# Agrupamos las keywords segun su raiz semantica y el volumen de busquedas
loop = tqdm(total = len(df.index), position = 0, leave = False)
df['Grupo'] = ''
for i in df.index:
loop.set_description("Agrupando...".format(i))
if i == 0:
df.loc[i,'Grupo'] = df.loc[i,'Keyword']
elif df.loc[i,'Raiz semantica'] == df.loc[i-1,'Raiz semantica']:
df.loc[i,'Grupo'] = df.loc[i-1,'Grupo']
else:
df.loc[i,'Grupo'] = df.loc[i,'Keyword']
loop.update(1)
loop.close()
print('Agrupado... OK')
df.to_csv('kw_procesado.csv', index=False)
print('Archivo kw_procesado.csv creado... OK')
gdf = (df.groupby('Grupo', as_index=False)
.agg({'Volume':'sum','Clicks':'sum','Difficulty':'mean','CPC':'mean','CPS':'mean','Return Rate':'mean','Keyword':' | '.join}))
# Detectamos la intencion de busqueda de la kw: Informacional, transacional, navegacional
if args.intent:
intenciones = pd.read_csv('Data/intenciones.csv')
loop = tqdm(total = len(intenciones.index), position = 0, leave = False)
gdf['Intencion'] = ''
for i in intenciones.index:
loop.set_description("Detectando intenciones de busqueda...".format(i))
row = gdf[gdf['Grupo'].str.match(str(intenciones.loc[i,'Patron']))]
if row is not None:
gdf.loc[row.index,'Intencion'] = intenciones.loc[i,'Tipo']
loop.update(1)
loop.close()
print('Intenciones de busqueda... OK')
# Detectamos la ubicacion de la palabra clave.
if args.location:
ubicaciones = pd.read_csv('Data/ubicaciones.csv')
loop = tqdm(total = len(ubicaciones.index), position = 0, leave = False)
gdf['Ubicacion'] = ''
gdf['Tipo ubicacion'] = ''
for i in ubicaciones.index:
loop.set_description("Detectando ubicaciones...".format(i))
row = gdf[gdf['Grupo'].str.match(str(ubicaciones.loc[i,'Ubicacion']))]
if row is not None:
gdf.loc[row.index,'Ubicacion'] = ubicaciones.loc[i,'Ubicacion']
gdf.loc[row.index,'Tipo ubicacion'] = ubicaciones.loc[i,'Tipo']
loop.update(1)
loop.close()
print('Ubicaciones... OK')
gdf.to_csv('kw_agrupado.csv',index=False)
print('Archivo kw_agrupado.csv creado... OK')
print('Proceso finalizado... OK')
|
StarcoderdataPython
|
9733499
|
<filename>setup.py
# --------------------------------------------
# Copyright 2019, <NAME>
# @Author: <NAME>
# @Date: 2019-1-22 13:50:49
# --------------------------------------------
from os import path
from setuptools import setup, find_packages
file_path = path.abspath(path.dirname(__file__))
with open(path.join(file_path, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
package_metadata = {
'name': 'django-react-tools',
'version': '0.2.13',
'description': 'Tools for helping integrate ReactJS into a Django project.',
'long_description': long_description,
'url': 'https://github.com/renderbox/django-react-tools',
'author': '<NAME>',
'author_email': '<EMAIL>',
'license': '',
'classifiers': [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
}
setup(
**package_metadata,
packages=find_packages(),
python_requires=">=3.6",
install_requires=[
'Django>=2.1',
"requests",
],
extras_require={
'dev': [],
'test': [],
'prod': [],
'build': [],
'docs': [
'coverage==4.4.1',
'Sphinx==1.6.4'],
}
)
|
StarcoderdataPython
|
5152417
|
<reponame>peppelinux/djangosaml2_spid<filename>src/djangosaml2_spid/apps.py
from django.apps import AppConfig
class Djangosaml2SpidConfig(AppConfig):
name = 'djangosaml2_spid'
|
StarcoderdataPython
|
8044000
|
import numpy as _numpy
from fdrtd.plugins.simon.caches.cache import Cache
from fdrtd.plugins.simon.microprotocols.microprotocol import Microprotocol
class MicroprotocolSecureMatrixMultiplication(Microprotocol):
def __init__(self, microservice, properties, myself):
super().__init__(microservice, properties, myself)
self.register_cache('input', Cache())
self.register_cache('dimX', Cache())
self.register_cache('dimY', Cache())
self.register_cache('intermediateV', Cache())
self.register_cache('intermediateW', Cache())
self.register_cache('final', Cache())
self.register_stage(0, ['input'], self.stage_0)
self.register_stage(1, ['dimX', 'dimY'], self.stage_1)
self.register_stage(2, ['intermediateV'], self.stage_2)
self.register_stage(3, ['intermediateW'], self.stage_3)
self.register_stage(4, ['final'], self.stage_4)
self.M = None
self.n = self.p = self.q = 0
self.v = None
def stage_0(self, args):
self.M = _numpy.array(args['input'])
self.network.broadcast(self.M.shape, 'dimX' if self.network.myself == 0 else 'dimY')
return 1, None
def stage_1(self, args):
self.p = args['dimX'][0]
self.n = args['dimX'][1]
if args['dimY'][0] != args['dimX'][1]:
raise RuntimeError("matrix shapes not compatible")
self.q = args['dimY'][1]
if self.network.myself == 0:
xt = self.M.transpose()
q, r = _numpy.linalg.qr(xt, mode='complete')
g = int((self.p*self.n)/(self.p+self.q))
z = q[:, self.p:self.p+g]
self.v = _numpy.identity(self.n) - _numpy.matmul(z, z.transpose())
self.network.broadcast(self.v.tolist(), 'intermediateV')
return 2, None
def stage_2(self, args):
if self.network.myself == 1:
v = args['intermediateV']
w = _numpy.matmul(v, self.M)
self.network.broadcast(w.tolist(), 'intermediateW')
return 3, None
def stage_3(self, args):
if self.network.myself == 0:
w = args['intermediateW']
result = _numpy.matmul(self.M, w)
self.network.broadcast(result.tolist(), 'final')
return 4, None
def stage_4(self, args):
return -1, {'inputs': 2, # self.n,
'result': {
'product': args['final']}}
|
StarcoderdataPython
|
4922442
|
# https://leetcode.com/problems/subdomain-visit-count
class Solution:
def subdomainVisits(self, cpdomains):
dic = {}
for cp in cpdomains:
num, domain = cp.split(" ")
domain_list = domain.split(".")
N = len(domain_list)
for i in range(N):
key = ".".join(domain_list[i:N])
if key not in dic.keys():
dic[key] = 0
dic[key] += int(num)
ans = []
for k, v in dic.items():
string = str(v) + " " + k
ans.append(string)
return ans
|
StarcoderdataPython
|
1633545
|
#!/usr/bin/env python3
"""
This example uses a configuration file in JSON format to
process the events and apply pre-selection cuts to the images
(charge and number of pixels).
An HDF5 file is written with image MC and moment parameters
(e.g. length, width, image amplitude, etc.).
"""
import numpy as np
from tqdm import tqdm
from ctapipe.core import Tool
from ctapipe.core.traits import Unicode, List, Dict, Bool
from ctapipe.io import EventSourceFactory, HDF5TableWriter
from ctapipe.calib import CameraCalibrator
from ctapipe.utils.CutFlow import CutFlow
from ctapipe.image import hillas_parameters, tailcuts_clean
class SimpleEventWriter(Tool):
name = 'ctapipe-simple-event-writer'
description = Unicode(__doc__)
infile = Unicode(help='input file to read', default='').tag(config=True)
outfile = Unicode(help='output file name', default_value='output.h5').tag(config=True)
progress = Bool(help='display progress bar', default_value=True).tag(config=True)
aliases = Dict({
'infile': 'EventSourceFactory.input_url',
'outfile': 'SimpleEventWriter.outfile',
'max-events': 'EventSourceFactory.max_events',
'progress': 'SimpleEventWriter.progress'
})
classes = List([EventSourceFactory, CameraCalibrator, CutFlow])
def setup(self):
self.log.info('Configure EventSourceFactory...')
self.event_source = EventSourceFactory.produce(
config=self.config, tool=self, product='HESSIOEventSource'
)
self.event_source.allowed_tels = self.config['Analysis']['allowed_tels']
self.calibrator = CameraCalibrator(
config=self.config, tool=self, eventsource=self.event_source
)
self.writer = HDF5TableWriter(
filename=self.outfile, group_name='image_infos', overwrite=True
)
# Define Pre-selection for images
preselcuts = self.config['Preselect']
self.image_cutflow = CutFlow('Image preselection')
self.image_cutflow.set_cuts(dict(
no_sel=None,
n_pixel=lambda s: np.count_nonzero(s) < preselcuts['n_pixel']['min'],
image_amplitude=lambda q: q < preselcuts['image_amplitude']['min']
))
# Define Pre-selection for events
self.event_cutflow = CutFlow('Event preselection')
self.event_cutflow.set_cuts(dict(
no_sel=None
))
def start(self):
self.log.info('Loop on events...')
for event in tqdm(
self.event_source,
desc='EventWriter',
total=self.event_source.max_events,
disable=~self.progress):
self.event_cutflow.count('no_sel')
self.calibrator.calibrate(event)
for tel_id in event.dl0.tels_with_data:
self.image_cutflow.count('no_sel')
camera = event.inst.subarray.tel[tel_id].camera
dl1_tel = event.dl1.tel[tel_id]
# Image cleaning
image = dl1_tel.image[0] # Waiting for automatic gain selection
mask = tailcuts_clean(camera, image, picture_thresh=10, boundary_thresh=5)
cleaned = image.copy()
cleaned[~mask] = 0
# Preselection cuts
if self.image_cutflow.cut('n_pixel', cleaned):
continue
if self.image_cutflow.cut('image_amplitude', np.sum(cleaned)):
continue
# Image parametrisation
params = hillas_parameters(camera, cleaned)
# Save Ids, MC infos and Hillas informations
self.writer.write(camera.cam_id, [event.r0, event.mc, params])
def finish(self):
self.log.info('End of job.')
self.image_cutflow()
self.event_cutflow()
self.writer.close()
if __name__ == '__main__':
tool = SimpleEventWriter()
tool.run()
|
StarcoderdataPython
|
1717117
|
<reponame>Yo-main/akingbee.com
class BaseError(Exception):
pass
class NotInitialized(Exception):
pass
class AlreadyInitialized(Exception):
pass
|
StarcoderdataPython
|
1982843
|
# # <NAME>, 2019
# My program reads in a text file and outputs every second line.
# The program takes the filename of the textfile from an argument on the command line.
with open("moby-dick.txt", 'r') as f:
# Opens text file "moby-dick.txt" saved in the pands-problem-set directory, the file is opened in read only
# f = text which is read by the program
count = 1
# Index value is set to 1 adapted from https://stackoverflow.com/a/30551984
for line in f:
# defines line in f which will neet below conditions
if not line.isspace():
# If line is not a blank line adapted from https://stackoverflow.com/a/2369538
# isspace() checks whether the string consists of whitespace ref https://www.tutorialspoint.com/python/string_isspace.htm
count = count + 1
# Add 1 to index value
if count % 2 == 0:
# Argument to test if count of line number is divisible by 2 with no remainder # adapted program from solution found in https://stackoverflow.com/a/44425842
# As every secound line will have a count which is an even number, it will be divisible by 2 with no remainder.
print(line)
# Print line if argument is true, if false, go to next line
f.closed
# File is now closed
|
StarcoderdataPython
|
11298588
|
<reponame>MartinXPN/DIIN-in-Keras<filename>preprocess.py
from __future__ import print_function
import argparse
import io
import json
import os
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from util import get_snli_file_path, get_word2vec_file_path, ChunkDataManager
def pad(x, maxlen):
if len(x) <= maxlen:
pad_width = ((0, maxlen - len(x)), (0, 0))
return np.pad(x, pad_width=pad_width, mode='constant', constant_values=0)
res = x[:maxlen]
return np.array(res, copy=False)
class BasePreprocessor(object):
def __init__(self):
self.word_to_id = {}
self.char_to_id = {}
self.vectors = []
self.part_of_speech_to_id = {}
self.unique_words = set()
self.unique_parts_of_speech = set()
@staticmethod
def load_data(file_path):
"""
Load jsonl file by default
"""
with open(file_path) as f:
lines = f.readlines()
text = '[' + ','.join(lines) + ']'
return json.loads(text)
@staticmethod
def load_word_vectors(file_path, separator=' ', normalize=True, max_words=None):
"""
:return: words[], np.array(vectors)
"""
seen_words = set()
words = []
vectors = []
vector_size = None
print('Loading', file_path)
with io.open(file_path, mode='r', encoding='utf-8') as f:
for line in tqdm(f):
values = line.replace(' \n', '').split(separator)
word = values[0]
if len(values) < 10 or word in seen_words:
print('Invalid word:', word)
continue
seen_words.add(word)
vec = np.asarray(values[1:], dtype='float32')
if normalize:
vec /= np.linalg.norm(vec, ord=2)
if vector_size is None:
vector_size = len(vec)
elif len(vec) != vector_size:
print('Skipping', word)
continue
words.append(word)
vectors.append(vec)
if max_words and len(words) >= max_words:
break
vectors = np.array(vectors, dtype='float32', copy=False)
return words, vectors
def get_words_with_part_of_speech(self, sentence):
"""
:return: words, parts_of_speech
"""
raise NotImplementedError
def get_sentences(self, sample):
"""
:param sample: sample from data
:return: premise, hypothesis
"""
raise NotImplementedError
def get_all_words_with_parts_of_speech(self, file_paths):
"""
:param file_paths: paths to files where the data is stored
:return: words, parts_of_speech
"""
all_words = []
all_parts_of_speech = []
for file_path in file_paths:
data = self.load_data(file_path=file_path)
for sample in tqdm(data):
premise, hypothesis = self.get_sentences(sample)
premise_words, premise_speech = self.get_words_with_part_of_speech(premise)
hypothesis_words, hypothesis_speech = self.get_words_with_part_of_speech(hypothesis)
all_words += premise_words + hypothesis_words
all_parts_of_speech += premise_speech + hypothesis_speech
self.unique_words = set(all_words)
self.unique_parts_of_speech = set(all_parts_of_speech)
@staticmethod
def get_not_present_word_vectors(not_present_words, word_vector_size, normalize):
res_words = []
res_vectors = []
for word in not_present_words:
vec = np.random.uniform(size=word_vector_size)
if normalize:
vec /= np.linalg.norm(vec, ord=2)
res_words.append(word)
res_vectors.append(vec)
return res_words, res_vectors
def init_word_to_vectors(self, vectors_file_path, needed_words, normalize=False, max_loaded_word_vectors=None):
"""
Initialize:
{word -> vec} mapping
{word -> id} mapping
[vectors] array
:param max_loaded_word_vectors: maximum number of words to load from word-vec file
:param vectors_file_path: file where word-vectors are stored (Glove .txt file)
:param needed_words: words for which to keep word-vectors
:param normalize: normalize word vectors
"""
needed_words = set(needed_words)
words, self.vectors = self.load_word_vectors(file_path=vectors_file_path,
normalize=normalize,
max_words=max_loaded_word_vectors)
word_vector_size = self.vectors.shape[-1]
self.vectors = list(self.vectors)
present_words = needed_words.intersection(words)
not_present_words = needed_words - present_words
print('#Present words:', len(present_words), '\t#Not present words', len(not_present_words))
not_present_words, not_present_vectors = self.get_not_present_word_vectors(not_present_words=not_present_words,
word_vector_size=word_vector_size,
normalize=normalize)
words, self.vectors = zip(*[(word, vec) for word, vec in zip(words, self.vectors) if word in needed_words])
words = list(words) + not_present_words
self.vectors = list(self.vectors) + not_present_vectors
print('Initializing word mappings...')
self.word_to_id = {word: i for i, word in enumerate(words)}
self.vectors = np.array(self.vectors, copy=False)
assert len(self.word_to_id) == len(self.vectors)
print(len(self.word_to_id), 'words in total are now initialized!')
def init_chars(self, words):
"""
Init char -> id mapping
"""
chars = set()
for word in words:
chars = chars.union(set(word))
self.char_to_id = {char: i+1 for i, char in enumerate(chars)}
print('Chars:', chars)
def init_parts_of_speech(self, parts_of_speech):
self.part_of_speech_to_id = {part: i+1 for i, part in enumerate(parts_of_speech)}
print('Parts of speech:', parts_of_speech)
def save_word_vectors(self, file_path):
np.save(file_path, self.vectors)
def get_label(self, sample):
return NotImplementedError
def get_labels(self):
raise NotImplementedError
def label_to_one_hot(self, label):
label_set = self.get_labels()
res = np.zeros(shape=(len(label_set)), dtype=np.bool)
i = label_set.index(label)
res[i] = 1
return res
def parse_sentence(self, sentence, max_words, chars_per_word):
# Words
words, parts_of_speech = self.get_words_with_part_of_speech(sentence)
word_ids = [self.word_to_id[word] for word in words]
# Syntactical features
syntactical_features = [self.part_of_speech_to_id[part] for part in parts_of_speech]
syntactical_one_hot = np.eye(len(self.part_of_speech_to_id) + 2)[syntactical_features] # Convert to 1-hot
# Chars
chars = [[self.char_to_id[c] for c in word] for word in words]
chars = pad_sequences(chars, maxlen=chars_per_word, padding='post', truncating='post')
return (words, parts_of_speech, np.array(word_ids, copy=False),
syntactical_features, pad(syntactical_one_hot, max_words),
pad(chars, max_words))
def parse_one(self, premise, hypothesis, max_words_p, max_words_h, chars_per_word):
"""
:param premise: sentence
:param hypothesis: sentence
:param max_words_p: maximum number of words in premise
:param max_words_h: maximum number of words in hypothesis
:param chars_per_word: number of chars in each word
:return: (premise_word_ids, hypothesis_word_ids,
premise_chars, hypothesis_chars,
premise_syntactical_one_hot, hypothesis_syntactical_one_hot,
premise_exact_match, hypothesis_exact_match)
"""
(premise_words, premise_parts_of_speech, premise_word_ids,
premise_syntactical_features, premise_syntactical_one_hot,
premise_chars) = self.parse_sentence(sentence=premise, max_words=max_words_p, chars_per_word=chars_per_word)
(hypothesis_words, hypothesis_parts_of_speech, hypothesis_word_ids,
hypothesis_syntactical_features, hypothesis_syntactical_one_hot,
hypothesis_chars) = self.parse_sentence(sentence=hypothesis, max_words=max_words_h, chars_per_word=chars_per_word)
def calculate_exact_match(source_words, target_words):
source_words = [word.lower() for word in source_words]
target_words = [word.lower() for word in target_words]
target_words = set(target_words)
res = [(word in target_words) for word in source_words]
return np.array(res, copy=False)
premise_exact_match = calculate_exact_match(premise_words, hypothesis_words)
hypothesis_exact_match = calculate_exact_match(hypothesis_words, premise_words)
return (premise_word_ids, hypothesis_word_ids,
premise_chars, hypothesis_chars,
premise_syntactical_one_hot, hypothesis_syntactical_one_hot,
premise_exact_match, hypothesis_exact_match)
def parse(self, input_file_path, max_words_p=33, max_words_h=20, chars_per_word=13):
"""
:param input_file_path: file to parse data from
:param max_words_p: maximum number of words in premise
:param max_words_h: maximum number of words in hypothesis
:param chars_per_word: number of chars in each word (padding is applied if not enough)
:return: (premise_word_ids, hypothesis_word_ids,
premise_chars, hypothesis_chars,
premise_syntactical_one_hot, hypothesis_syntactical_one_hot,
premise_exact_match, hypothesis_exact_match)
"""
# res = [premise_word_ids, hypothesis_word_ids, premise_chars, hypothesis_chars,
# premise_syntactical_one_hot, hypothesis_syntactical_one_hot, premise_exact_match, hypothesis_exact_match]
res = [[], [], [], [], [], [], [], [], []]
data = self.load_data(input_file_path)
for sample in tqdm(data):
# As stated in paper: The labels are "entailment", "neutral", "contradiction" and "-".
# "-" shows that annotators can't reach consensus with each other, thus removed during training and testing
label = self.get_label(sample=sample)
if label == '-':
continue
premise, hypothesis = self.get_sentences(sample=sample)
sample_inputs = self.parse_one(premise, hypothesis,
max_words_h=max_words_h, max_words_p=max_words_p,
chars_per_word=chars_per_word)
label = self.label_to_one_hot(label=label)
sample_result = list(sample_inputs) + [label]
for res_item, parsed_item in zip(res, sample_result):
res_item.append(parsed_item)
res[0] = pad_sequences(res[0], maxlen=max_words_p, padding='post', truncating='post', value=0.) # input_word_p
res[1] = pad_sequences(res[1], maxlen=max_words_h, padding='post', truncating='post', value=0.) # input_word_h
res[6] = pad_sequences(res[6], maxlen=max_words_p, padding='post', truncating='post', value=0.) # exact_match_p
res[7] = pad_sequences(res[7], maxlen=max_words_h, padding='post', truncating='post', value=0.) # exact_match_h
return res
class SNLIPreprocessor(BasePreprocessor):
def get_words_with_part_of_speech(self, sentence):
parts = sentence.split('(')
words = []
parts_of_speech = []
for p in parts:
if ')' in p:
res = p.split(' ')
parts_of_speech.append(res[0])
words.append(res[1].replace(')', ''))
return words, parts_of_speech
def get_sentences(self, sample):
return sample['sentence1_parse'], sample['sentence2_parse']
def get_label(self, sample):
return sample['gold_label']
def get_labels(self):
return 'entailment', 'contradiction', 'neutral'
def preprocess(p, h, chars_per_word, preprocessor, save_dir, data_paths,
word_vector_save_path, normalize_word_vectors, max_loaded_word_vectors=None, word_vectors_load_path=None,
include_word_vectors=True, include_chars=True,
include_syntactical_features=True, include_exact_match=True):
preprocessor.get_all_words_with_parts_of_speech([data_path[1] for data_path in data_paths])
print('Found', len(preprocessor.unique_words), 'unique words')
print('Found', len(preprocessor.unique_parts_of_speech), 'unique parts of speech')
# Init mappings of the preprocessor
preprocessor.init_word_to_vectors(vectors_file_path=get_word2vec_file_path(word_vectors_load_path),
needed_words=preprocessor.unique_words,
normalize=normalize_word_vectors,
max_loaded_word_vectors=max_loaded_word_vectors)
preprocessor.init_chars(words=preprocessor.unique_words)
preprocessor.init_parts_of_speech(parts_of_speech=preprocessor.unique_parts_of_speech)
# Process and save the data
preprocessor.save_word_vectors(word_vector_save_path)
for dataset, input_path in data_paths:
data = preprocessor.parse(input_file_path=input_path,
max_words_p=p,
max_words_h=h,
chars_per_word=chars_per_word)
# Determine which part of data we need to dump
if not include_exact_match: del data[6:8] # Exact match feature
if not include_syntactical_features: del data[4:6] # Syntactical POS tags
if not include_chars: del data[2:4] # Character features
if not include_word_vectors: del data[0:2] # Word vectors
data_saver = ChunkDataManager(save_data_path=os.path.join(save_dir, dataset))
data_saver.save([np.array(item) for item in data])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--p', default=32, help='Maximum words in premise', type=int)
parser.add_argument('--h', default=32, help='Maximum words in hypothesis', type=int)
parser.add_argument('--chars_per_word', default=16, help='Number of characters in one word', type=int)
parser.add_argument('--max_word_vecs', default=None, help='Maximum number of word vectors', type=int)
parser.add_argument('--save_dir', default='data/', help='Save directory of data', type=str)
parser.add_argument('--dataset', default='snli', help='Which preprocessor to use', type=str)
parser.add_argument('--word_vec_load_path', default=None, help='Path to load word vectors', type=str)
parser.add_argument('--word_vec_save_path', default='data/word-vectors.npy', help='Path to save vectors', type=str)
parser.add_argument('--normalize_word_vectors', action='store_true')
parser.add_argument('--omit_word_vectors', action='store_true')
parser.add_argument('--omit_chars', action='store_true')
parser.add_argument('--omit_syntactical_features', action='store_true')
parser.add_argument('--omit_exact_match', action='store_true')
args = parser.parse_args()
if args.dataset == 'snli':
snli_preprocessor = SNLIPreprocessor()
path = get_snli_file_path()
train_path = os.path.join(path, 'snli_1.0_train.jsonl')
test_path = os.path.join(path, 'snli_1.0_test.jsonl')
dev_path = os.path.join(path, 'snli_1.0_dev.jsonl')
preprocess(p=args.p, h=args.h, chars_per_word=args.chars_per_word,
preprocessor=snli_preprocessor,
save_dir=args.save_dir,
data_paths=[('train', train_path), ('test', test_path), ('dev', dev_path)],
word_vectors_load_path=args.word_vec_load_path,
normalize_word_vectors=args.normalize_word_vectors,
word_vector_save_path=args.word_vec_save_path,
max_loaded_word_vectors=args.max_word_vecs,
include_word_vectors=not args.omit_word_vectors,
include_chars=not args.omit_chars,
include_syntactical_features=not args.omit_syntactical_features,
include_exact_match=not args.omit_exact_match)
else:
raise ValueError('couldn\'t find implementation for specified dataset')
|
StarcoderdataPython
|
3215132
|
<reponame>dtrizna/speakeasy<gh_stars>100-1000
# Copyright (C) 2021 FireEye, Inc. All Rights Reserved.
import os
import sys
import cmd
import shlex
import fnmatch
import logging
import binascii
import argparse
import traceback
import hexdump
import speakeasy
import speakeasy.winenv.arch as e_arch
from speakeasy.errors import SpeakeasyError
if sys.platform != 'win32':
import readline # noqa (used by cmd)
class DebuggerException(Exception):
pass
def get_logger():
"""
Get the default logger for speakeasy
"""
logger = logging.getLogger('sedbg')
if not logger.handlers:
sh = logging.StreamHandler()
logger.addHandler(sh)
logger.setLevel(logging.INFO)
return logger
class Breakpoint(object):
_id = 0
def __init__(self, address):
if isinstance(address, int):
self.address = address
else:
self.address = address.lower()
self.id = Breakpoint._id
Breakpoint._id += 1
class SpeakeasyDebugger(cmd.Cmd):
prompt = '(sedbg) '
file = None
def __init__(self, target=None, is_sc=False, arch=None, data=None, logger=None, se_inst=None):
super(SpeakeasyDebugger, self).__init__()
self.target = target
self.is_sc = is_sc
self.arch = arch
self.logger = logger
if not se_inst:
self.se = speakeasy.Speakeasy(logger=self.logger)
else:
self.se = se_inst
self.loaded_modules = []
self.loaded_shellcode = []
self.targets = []
self.breakpoints = {}
self.init_state()
if self.is_sc and not self.arch:
raise DebuggerException('Architecture required when debugging shellcode')
if self.target:
if not self.is_sc:
# Load the initial target module
self.load_module(self.target)
else:
self.load_shellcode(self.target, self.arch)
def init_state(self):
if self.se:
self.se.add_code_hook(self.code_hook)
self.se.add_api_hook(self.api_hook, '*', '*') # hook every API
self.step = False
self.running = False
self._do_stop = False
self.exit = False
self.step_over = 0
self.next_pc = 0
def error(self, msg):
self.logger.error('[-] ' + msg)
def info(self, msg):
self.logger.info(msg)
def log_disasm(self, addr, size):
ds = self.se.disasm(addr, size, False)[0]
out = '0x%x: %s %s' % (ds.address, ds.mnemonic, ds.op_str)
self.info(out)
def format_hexdump(self, data, address=0):
output = []
for line in hexdump.hexdump(data, result='generator'):
offset = line[: line.find(':')]
rest = line[line.find(':'):]
offset = int.from_bytes(binascii.unhexlify(offset), 'big')
if address > 0xFFFFFFFF:
fmt = r'%016X'
else:
fmt = r'%08X'
addr = fmt % (offset + address)
output.append(addr + rest)
return '\n'.join(output)
def _break(self, addr):
'''
Return execution back to the debugger and do not execute the
current instruction.
'''
self.step = False
self._do_stop = True
self.next_pc = addr
self.se.stop()
def api_hook(self, emu, api_name, func, params):
'''
Hook called for API calls
'''
rv = func(params)
addr = emu.get_ret_address()
bp = self.breakpoints.get(api_name.lower())
if bp:
self.info('\nBreakpoint %d hit for %s' % (bp.id, api_name))
self.step = True
return rv
elif '.' in api_name:
fn = api_name.split('.')[1]
bp = self.breakpoints.get(fn.lower())
if bp:
self.info('\nBreakpoint %d hit for %s' % (bp.id, api_name))
self.step = True
return rv
for addr, bp in self.breakpoints.items():
if not isinstance(addr, int):
if fnmatch.fnmatch(api_name.lower(), addr.lower()):
self.info('\nBreakpoint %d hit for %s' % (bp.id, api_name))
self.step = True
return rv
return rv
def code_hook(self, emu, addr, size, ctx):
'''
Hook called for each instruction while debugging
'''
if self._do_stop:
self.next_pc = addr
self._do_stop = False
return True
if self.breakpoints:
bp = self.breakpoints.get(addr)
if bp:
self.log_disasm(addr, size)
self.info('\nBreakpoint %d hit for 0x%x' % (bp.id, addr))
self._break(addr)
return True
if self.step:
sres, eres = emu.get_reserved_ranges()
if sres < addr < eres:
addr = emu.get_ret_address()
self.log_disasm(addr, size)
self._break(addr)
return True
def stop(self):
'''
Stop running the emulator
'''
self.se.stop()
self.running = False
def convert_bin_str(self, hstr):
'''
Convert a hex string to an int
'''
# Was a register supplied? Read it.
regs = self.se.get_all_registers()
val = regs.get(hstr.lower())
if val:
hstr = val
if hstr.startswith('0x'):
int_val = int(hstr, 16)
else:
int_val = int(hstr, 10)
return int_val
def dump_mem(self, address, length):
'''
Dump memory (until an invalid memory read or max length occurs)
'''
data = []
try:
for i in range(length):
data.append(self.se.mem_read(address + i, 1))
except SpeakeasyError:
self.error("Failed memory read at address: 0x%x" % (address + i))
return b''.join(data)
def write_mem(self, address, data):
'''
Write memory (until an invalid memory read or max length occurs)
'''
try:
for i, b in enumerate(bytes(data)):
self.se.mem_write(address + i, data[i: i + 1])
except Exception:
self.error("Failed memory write at address: 0x%x" % (address + i))
finally:
return
def do_maps(self, args):
'''
Get a list of all memory maps in the emulation space
Usage:
maps
'''
self.info('Base\t\t Size\t Tag')
for mm in self.se.get_mem_maps():
line = '0x%016x 0x%08x %s' % (mm.get_base(), mm.get_size(), mm.get_tag())
self.info(line)
def do_bl(self, args):
'''
List all current breakpoints and their IDs
Usage:
bl
'''
self.info('Breakpoints:')
for addr, bp in self.breakpoints.items():
if isinstance(addr, int):
line = '%d: 0x%016x' % (bp.id, addr)
else:
line = '%d: %s' % (bp.id, addr)
self.info(line)
def do_bp(self, args):
'''
Set a breakpoint at the specified address or API name
Usage:
bp [ <breakpoint_addr> | <api_name> ]
bp 0x10001020
'''
split_args = shlex.split(args)
address = split_args[0]
try:
address = self.convert_bin_str(address)
bp = Breakpoint(address)
msg = '[*] Breakpoint %d set at address 0x%x' % (bp.id, address)
rv = address
except Exception:
orig = address
address = address.lower()
bp = Breakpoint(address)
msg = '[*] Breakpoint %d set at %s' % (bp.id, orig)
rv = None
self.breakpoints.update({address: bp})
self.info(msg)
return rv
def do_bc(self, args):
'''
Remove a breakpoint by ID
Usage:
bc <breakpoint_id>
bc 1
'''
split_args = shlex.split(args)
try:
_id = int(split_args[0])
except Exception:
self.error('Invalid breakpoint id')
return None
for addr, bp in self.breakpoints.items():
if _id == bp.id:
self.info('[*] Removing breakpoint %d' % (_id))
self.breakpoints.pop(addr)
return addr
def do_disas(self, args):
'''
Disassemble an address
Usage:
disas <address> [length]
'''
split_args = shlex.split(args)
if not split_args:
self.error('Invalid arguments: disas <address> [size]')
return
address = ''
length = '0x10'
address = split_args[0]
try:
length = split_args[1]
except IndexError:
# Use the default length
pass
try:
addr = self.convert_bin_str(address)
length = self.convert_bin_str(length)
instrs = self.se.disasm(addr, length, False)
except ValueError:
self.error('Invalid arguments')
return
except SpeakeasyError:
self.error('Failed to disassemble at address: %s' % (address))
return
for i in instrs:
self.info('0x%x: %s %s' % (i.address, i.mnemonic, i.op_str))
def load_module(self, module):
'''
Load a module into the emulation space
'''
if not os.path.exists(module):
self.error('Can\'t find module: %s' % (module))
else:
module = self.se.load_module(module)
self.loaded_modules.append(module)
def load_shellcode(self, sc_path, arch):
'''
Load shellcode into the emulation space
'''
if self.is_sc:
arch = arch.lower()
if arch in ('x86', 'i386'):
arch = e_arch.ARCH_X86
elif arch in ('x64', 'amd64'):
arch = e_arch.ARCH_AMD64
else:
raise Exception('Unsupported architecture: %s' % arch)
if not os.path.exists(sc_path):
self.error('Can\'t find shellcode: %s' % (sc_path))
else:
sc = self.se.load_shellcode(sc_path, arch)
self.loaded_shellcode.append(sc)
return sc
def do_restart(self, arg):
'''
Restart emulation from the entry point
'''
self.se = speakeasy.Speakeasy(logger=self.logger)
if self.target:
if not self.is_sc:
# Load the initial target module
self.load_module(self.target)
else:
self.load_shellcode(self.target, self.arch)
self.init_state()
self.do_run(None)
def do_load_module(self, arg):
'''
Wrapper to load a module
'''
self.load_module(arg)
def do_eb(self, args):
'''
Edit bytes at the specified address
Usage:
eb <address> <byte_string>
Example:
eb 0x401000 9090909090c3
'''
split_args = shlex.split(args)
if len(split_args) < 2:
self.error('Invalid arguments: eb <address> <byte_string>')
return
address = split_args[0]
address = self.convert_bin_str(address)
data = ''.join(split_args[1:])
# Do some basic normalization
if data.startswith('0x'):
data = data[2:]
data = data.replace(' ', '')
if len(data) % 2:
data = '0' + data
data = binascii.unhexlify(data)
self.write_mem(address, data)
def do_db(self, args):
'''
Dump bytes from emulated memory
Usage:
db <address> [length]
Example:
db 0x401000
'''
split_args = shlex.split(args)
if len(split_args) < 1:
self.error('Invalid arguments: db <address> <size>')
return
address = split_args[0]
address = self.convert_bin_str(address)
decoy = self.se.emu.get_mod_from_addr(address)
if decoy:
self.se.emu.map_decoy(decoy)
if len(split_args) == 1:
address = split_args[0]
address = self.convert_bin_str(address)
data = self.dump_mem(address, 0x50)
elif len(split_args) == 2:
address, length = split_args
address = self.convert_bin_str(address)
length = self.convert_bin_str(length)
data = self.dump_mem(address, length)
output = self.format_hexdump(data, address=address)
self.info(output)
def do_lm(self, args):
'''
List user modules loaded into the emulation space
Usage:
lm
'''
ums = self.se.get_user_modules()
self.info('Start\t\t\tEnd\t\t\tName\t\tPath')
for um in ums:
base = '0x%016x' % um.get_base()
end = '0x%016x' % (um.get_base() + um.get_image_size())
name = um.get_base_name().ljust(16)
path = um.get_emu_path()
self.info('%s\t%s\t%s%s' % (base, end, name, path))
def do_lmk(self, args):
'''
List kernel modules loaded into the emulation space
Usage:
lmk
'''
kms = self.se.get_sys_modules()
self.info('Start\t\t\tEnd\t\t\tName\t\tPath')
for km in kms:
base = '0x%016x' % km.get_base()
end = '0x%016x' % (km.get_base() + km.get_image_size())
name = km.get_base_name().ljust(16)
path = km.get_emu_path()
self.info('%s\t%s\t%s%s' % (base, end, name, path))
def do_reg(self, arg):
'''
Read or write the contents of the emulated cpu registers
Usage:
reg
reg <reg_to_read>
reg <reg_to_write>=<value>
'''
# Is the user requesting all registers?
regs = self.se.get_all_registers()
if not arg:
o = ''
for i, (r, v) in enumerate(regs.items()):
o += '%s=%s ' % (r, v)
if not ((i + 1) % 3):
o += '\n'
self.info(o)
return
# Is the user trying to modify a register?
reg_write = [a.strip() for a in arg.split('=')]
if len(reg_write) > 1:
if len(reg_write) != 2:
self.error('Invalid register write syntax: (e.g. eax=0')
return
reg, val = reg_write
if not regs.get(reg):
self.error('Invalid register: %s' % (reg))
return
try:
int_val = self.convert_bin_str(val)
except ValueError:
self.error('Invalid write value')
return
if int_val is not None:
self.se.reg_write(reg, int_val)
return
val = regs.get(arg.lower())
if not val:
self.error('Invalid register: %s' % (arg))
else:
self.info('%s=%s' % (arg, val))
def do_run(self, arg):
'''Begin emulation of a loaded module'''
if not self.is_sc and not len(self.loaded_modules):
self.error('No modules have been loaded yet')
if not self.running:
if not self.is_sc:
if len(self.loaded_modules) == 1:
self.se.run_module(self.loaded_modules[0],
all_entrypoints=False)
else:
self.se.run_shellcode(self.loaded_shellcode[0], 0)
self.running = True
else:
self.step = False
self.se.resume(self.next_pc, count=-1)
def do_stepi(self, arg):
'''
Step into an instruction
'''
if not self.running:
self.step = True
self.running = True
if not self.is_sc:
self.se.run_module(self.loaded_modules[0],
all_entrypoints=False)
else:
self.se.run_shellcode(self.loaded_shellcode[0], 0)
else:
self.step = True
self.se.resume(self.next_pc, count=1)
def do_stack(self, arg):
'''
Show the current stack layout
'''
stack = self.se.emu.format_stack(16)
ptr_size = self.se.emu.get_ptr_size()
ptr_fmt = '0x%0' + str(ptr_size * 2) + 'x'
for loc in stack:
sp, ptr, tag = loc
if tag:
fmt = 'sp=0x%x:\t' + ptr_fmt + '\t->\t%s'
fmt = fmt % (sp, ptr, tag)
else:
fmt = 'sp=0x%x:\t' + ptr_fmt + '\t'
fmt = fmt % (sp, ptr)
self.info(fmt.expandtabs(5))
def do_strings(self, arg):
'''
Scan all memory segments for strings
'''
tgt_tag_prefixes = ('emu.stack', 'api')
for mmap in self.se.emu.get_mem_maps():
tag = mmap.get_tag()
base = mmap.get_base()
if (tag and tag.startswith(tgt_tag_prefixes) and
tag != self.se.emu.input.get('mem_tag')):
data = self.se.mem_read(mmap.get_base(), mmap.get_size()-1)
ansi_strings = self.se.emu.get_ansi_strings(data)
for offset, astr in ansi_strings:
addr = base + offset
self.info('0x%x: %s' % (addr, astr))
uni_strings = self.se.emu.get_unicode_strings(data)
for offset, wstr in uni_strings:
addr = base + offset
self.info('0x%x: %s' % (addr, wstr))
def do_exit(self, arg):
'''
Quit debugging
'''
self.exit = True
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Debug a Windows binary with speakeasy')
parser.add_argument('-t', '--target', action='store', dest='target',
required=True, help='Path to input file to emulate')
parser.add_argument('-r', '--raw', action='store_true', dest='raw',
required=False, help='Attempt to emulate file as-is '
'with no parsing (e.g. shellcode)')
parser.add_argument('-a', '--arch', action='store', dest='arch',
required=False,
help='Force architecture to use during emulation (for '
'multi-architecture files or shellcode). '
'Supported archs: [ x86 | amd64 ]')
args = parser.parse_args()
dbg = SpeakeasyDebugger(args.target, args.raw, args.arch, logger=get_logger())
dbg.info('Welcome to the speakeasy debugger')
while True:
try:
dbg.cmdloop()
if dbg.exit:
break
except KeyboardInterrupt:
dbg.info('\n[*] User exited')
break
# Catch all other exceptions here
except Exception:
dbg.info(traceback.format_exc())
|
StarcoderdataPython
|
6642837
|
a = 5.3
b = 0.000000003
print(a)
print(b)
c = 1.0 / 3.0
print(c)
d = 10.0 * 0.5
print(d)
e = 6.0 + 10.5
print(e)
f = 19.6 - 4.3
print(f)
g = 4.3 - 19.6
print(g)
print("4.5" * 0.5)
|
StarcoderdataPython
|
11354459
|
<filename>hack/opamps/opamp_spaces.py
import logging
import re
from fonduer.candidates import MentionNgrams
from fonduer.candidates.models.implicit_span_mention import TemporaryImplicitSpanMention
logger = logging.getLogger(__name__)
class MentionNgramsCurrent(MentionNgrams):
def __init__(self, n_max=2, split_tokens=["-", "/"]):
super(MentionNgrams, self).__init__(n_max=n_max, split_tokens=split_tokens)
def apply(self, doc):
for ts in MentionNgrams.apply(self, doc):
m = re.match(r"^(±)?\s*(\d+)\s*(\.)?\s*(\d*)$", ts.get_span())
if m:
# Handle case that random spaces are inserted (e.g. "± 2 . 3")
temp = ""
if m.group(1):
temp += m.group(1)
if m.group(2):
temp += m.group(2)
if m.group(3):
temp += m.group(3)
if m.group(4):
temp += m.group(4)
yield TemporaryImplicitSpanMention(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key="opamp_exp",
position=0,
text=temp,
words=[temp],
lemmas=[temp],
pos_tags=[ts.get_attrib_tokens("pos_tags")[-1]],
ner_tags=[ts.get_attrib_tokens("ner_tags")[-1]],
dep_parents=[ts.get_attrib_tokens("dep_parents")[-1]],
dep_labels=[ts.get_attrib_tokens("dep_labels")[-1]],
page=[ts.get_attrib_tokens("page")[-1]]
if ts.sentence.is_visual()
else [None],
top=[ts.get_attrib_tokens("top")[-1]]
if ts.sentence.is_visual()
else [None],
left=[ts.get_attrib_tokens("left")[-1]]
if ts.sentence.is_visual()
else [None],
bottom=[ts.get_attrib_tokens("bottom")[-1]]
if ts.sentence.is_visual()
else [None],
right=[ts.get_attrib_tokens("right")[-1]]
if ts.sentence.is_visual()
else [None],
meta=None,
)
else:
yield ts
|
StarcoderdataPython
|
8009996
|
<reponame>totalpunch/TPD-Pete
import subprocess
from .boto import BotoTool
class AWSCliTool(object):
@classmethod
def getRegion(cls, profile):
""" Get AWS region of a profile
"""
# Check if the AWS Cli is available
if cls.hasAWSCli() is False:
# Use boto3
return BotoTool.getRegion(profile)
# Open the AWS configuration
command = "cat ~/.aws/config"
# Request the config from AWS Cli
result = subprocess.run(command, shell=True, capture_output=True)
# Check the response
if result.returncode != 0:
raise Exception("Could not successfully get the profiles from AWS Cli. Has you run `aws configure`?")
# Parse the output
lines = (result.stdout).decode()
# Remember the region and profile of the config
region = None
configProfile = None
# Walk throught the lines
for line in lines.split("\n"):
# Check line length
if len(line) == 0:
continue
# Check if the next lines belong to a profile
if line[0] == "[":
# Save the config profile
configProfile = line[1:-1]
# Check if this is the region
elif line[:6] == "region":
# Check the profile
if configProfile == "default" and region is None:
region = line[9:]
elif configProfile == profile:
region = line[9:]
return region
@classmethod
def getProfiles(cls):
""" Get all AWS profiles
"""
# Check if the AWS Cli is available
if cls.hasAWSCli() is False:
# Use boto3
return BotoTool.getProfiles()
# Build the command
command = "cat ~/.aws/credentials | grep -o '\[[^]]*\]'"
# Request all the profiles from AWS Cli
result = subprocess.run(command, shell=True, capture_output=True)
# Check the response
if result.returncode != 0:
raise Exception("Could not successfully get the profiles from AWS Cli. Has you run `aws configure`?")
# Parse the output
output = (result.stdout).decode()
return [(line[1:-1]).strip() for line in output.split("\n")]
@classmethod
def getS3Buckets(cls, profile=None):
""" Get your S3 bucket
"""
# Check if the AWS Cli is available
if cls.hasAWSCli() is False:
# Use boto3
return BotoTool.getS3Buckets(profile)
# Build the command
command = "aws s3 ls"
# Check if there is an profile
if profile is not None:
command = command + " --profile %s" % profile
# Execute the command
result = subprocess.run(command, shell=True, capture_output=True)
# Check the response
if result.returncode != 0:
raise Exception("Could not successfully get the S3 buckets from AWS Cli. Do you have the right permissions?")
# Parse the output
output = (result.stdout).decode()
return [(line[19:]).strip() for line in output.split("\n")]
@classmethod
def hasAWSCli(cls):
""" Check if the AWS Cli is available
"""
# Build the command
command = "aws help"
# Request the config from AWS Cli
result = subprocess.run(command, shell=True, capture_output=True)
# Check the response
if result.returncode != 0:
return False
return True
|
StarcoderdataPython
|
8025285
|
import rospy
import smach
import smach_ros
import threading
import time
from apc_msgs.srv import DoSegmentation,DoSegmentationRequest,FillUnfillBinsCollisionModel,FillUnfillBinsCollisionModelRequest
from sensor_msgs.msg import Image
################################################################################
#
# NOTE: Image captured here as the shelf cropping service returns a transformed
# point cloud (instead of kf_world) with respect to camera_rgb_optical_frame and
# image should be captured at this time
#
################################################################################
class ShelfBasedCropCloud(smach.State):
def __init__(self, action='crop_cloud'):
smach.State.__init__(self, input_keys=['cloud_data','next_item_to_pick'],
output_keys=['cloud_data'], outcomes=['succeeded', 'failed'])
# wait for the service to appear
rospy.loginfo('Waiting for /ros_kinfu/get_point_cloud to come up ...')
self.image = None
self.got_image = False
change_bin_srv_name = '/apc_3d_vision/split_cloud_change_bin'
self.action_ = action
srv_name = '/apc_3d_vision/split_cloud'
try:
rospy.wait_for_service(srv_name, timeout=1)
rospy.wait_for_service(change_bin_srv_name, timeout=1)
except:
rospy.logerr('Service of %s not available. Restart and try again.' % srv_name)
# rospy.signal_shutdown('')
if self.action_ == 'crop_kinfu_cloud':
self.image_sub = rospy.Subscriber('/kinect2/qhd/image_color',Image,self.image_callback)
else:
self.image_sub = rospy.Subscriber('/realsense/rgb/image_raw',Image,self.image_callback)
self.service = rospy.ServiceProxy(srv_name, DoSegmentation)
self.change_bin_service = rospy.ServiceProxy(change_bin_srv_name, FillUnfillBinsCollisionModel)
def image_callback(self,image):
if not self.got_image:
self.image = image
# ==========================================================
def execute(self, userdata):
try:
req = FillUnfillBinsCollisionModelRequest()
print self.action_
if self.action_ == 'crop_cloud':
req.bin_id.data = userdata['next_item_to_pick']['bin']
if self.action_ == 'crop_tote':
req.bin_id.data = 'tote'
if self.action_ == 'crop_kinfu_cloud':
req.bin_id.data = 'kinect'
res = self.change_bin_service.call(req)
print "Change bin service response = ", res
print self.action_
req = DoSegmentationRequest()
req.input_cloud = userdata['cloud_data']['full_cloud']
if self.action_ == 'crop_cloud':
req.input_cloud.header.frame_id = 'kf_world' ### BAD, why isn't this set?
if self.action_ == 'crop_tote':
req.input_cloud.header.frame_id = 'kf_world' ### BAD, why isn't this set?
if self.action_ == 'crop_kinfu_cloud':
req.input_cloud.header.frame_id = 'kinect2_rgb_optical_frame'
while self.image is None:
rospy.loginfo('Waiting for image ...')
time.sleep(0.15)
self.got_image = True
print "Seg cloud height: ", req.input_cloud.height, "\nSeg cloud width: ", req.input_cloud.width
res = self.service.call(req)
# print "Crop shelf service response = ", res
# if res.success.data:
userdata['cloud_data']['cropped_cloud'] = res.segmented_cloud
userdata['cloud_data']['image'] = self.image
self.got_image = False
return 'succeeded'
except Exception as e:
print e
print "Shelf crop state failed!!!!"
self.got_image = False
return 'failed'
|
StarcoderdataPython
|
6547520
|
from pytrends.request import TrendReq
import pandas as pd
from datetime import datetime, timedelta
from Preprocessing.helpers import date_to_datestring
from Preprocessing.base_class import Preprocessor
class Searchtrends(Preprocessor):
def __init__(self, interval, start_time, end_time):
"""
Initialise shared parameters.
:interval: the time interval at which the training data will be collected and batched
:start_time: earliest point from which data will be collected, as a datetime object
:end_time: final point at which data will be collected, as a datetime object
"""
self.pytrends = TrendReq(hl='en-US', tz=0)
self.interval = interval
self.start_time = start_time
self.end_time = end_time
def get_training_data(self, topic):
"""
Call API to collect target dataset over the defined time period. Returns fully formatted data as a
dataframe and summarized into intervals.
Note that training data here has not yet been split into data vs. targets
:topic: this will be the API specific target. E.g. a reddit subreddit or GDAX currency pair
"""
# Get topic suggestion
suggestion = self.pytrends.suggestions(topic)[0]['mid']
# For each time period chunk, call downloader
data = pd.DataFrame() # Empty list to append data
delta = timedelta(days=180) # Keep to 6 month periods to ensure daily intervals
slice_start = self.start_time
while slice_start != self.end_time:
slice_end = min(slice_start + delta, self.end_time)
print("downloading {} data from {} to {}".format(topic, slice_start, slice_end))
df = self.trend_downloader(
topic=[topic],
start=slice_start,
end=slice_end,
)
slice_start = slice_end
data = data.append(df)
return data
def trend_downloader(self, topic, start, end):
"""
For a specific time slice, requests search trend data by region normalized to 100 and combines it
:start: in datetime format
:end: in datetime format
"""
timeframe = date_to_datestring(start) + " " + date_to_datestring(end)
# Global data
self.pytrends.build_payload(topic, cat=0, timeframe=timeframe, geo='', gprop='')
WW_data = self.pytrends.interest_over_time()
WW_data.columns= ['Worldwide', 'isPartial']
data = WW_data['Worldwide'].to_frame()
# US Data
self.pytrends.build_payload(topic, cat=0, timeframe=timeframe, geo='US', gprop='')
US_data = self.pytrends.interest_over_time()
US_data.columns= ['US', 'isPartial']
data = data.join(US_data['US'])
# UK Data
self.pytrends.build_payload(topic, cat=0, timeframe=timeframe, geo='GB', gprop='')
GB_data = self.pytrends.interest_over_time()
GB_data.columns= ['GB', 'isPartial']
data = data.join(GB_data['GB'])
# UK Data
self.pytrends.build_payload(topic, cat=0, timeframe=timeframe, geo='FR', gprop='')
FR_data = self.pytrends.interest_over_time()
FR_data.columns= ['FR', 'isPartial']
data = data.join(FR_data['FR'])
# Germany Data
self.pytrends.build_payload(topic, cat=0, timeframe=timeframe, geo='DE', gprop='')
DE_data = self.pytrends.interest_over_time()
DE_data.columns= ['DE', 'isPartial']
data = data.join(DE_data['DE'])
# Russia Data
self.pytrends.build_payload(topic, cat=0, timeframe=timeframe, geo='RU', gprop='')
RU_data = self.pytrends.interest_over_time()
RU_data.columns= ['RU', 'isPartial']
data = data.join(RU_data['RU'])
# Korea Data
self.pytrends.build_payload(topic, cat=0, timeframe=timeframe, geo='KR', gprop='')
KR_data = self.pytrends.interest_over_time()
KR_data.columns= ['KR', 'isPartial']
data = data.join(KR_data['KR'])
return data
def get_test_data(self, topic):
"""
Call API to collect data for 1 time period only starting from now. Returns fully formatted data in dataframe.
Note that this function will be significantly simpler than get_training_data since there is no need to loop through
multiple time periods and aggregate multiple API calls
:topic: this will be the API specific target. E.g. a reddit subreddit or GDAX currency pair
"""
raise NotImplementedError("{} must override step()".format(self.__class__.__name__))
|
StarcoderdataPython
|
4915338
|
<filename>tests/models/xapi/fields/test_objects.py
"""Tests for the xAPI object fields"""
from hypothesis import given, provisional, settings
from hypothesis import strategies as st
from ralph.models.xapi.navigation.fields.objects import PageObjectField
@settings(max_examples=1)
@given(st.builds(PageObjectField, id=provisional.urls()))
def test_models_xapi_fields_object_page_object_field(field):
"""Tests that a page object field contains a definition with the expected values."""
assert field.definition.type == "http://activitystrea.ms/schema/1.0/page"
assert field.definition.name == {"en": "page"}
|
StarcoderdataPython
|
9677715
|
from typing import List, Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def binaryTreePaths(self, root: Optional[TreeNode]) -> List[str]:
if root is None:
return []
def traversal(node, path):
if node.left is None and node.right is None:
answer.append("->".join(path))
if node.left:
path.append(str(node.left.val))
traversal(node.left, path)
path.pop()
if node.right:
path.append(str(node.right.val))
traversal(node.right, path)
path.pop()
answer = []
path = [str(root.val)]
traversal(root, path)
return answer
print("*".join(["1", "2"]))
|
StarcoderdataPython
|
12857526
|
<filename>registry/smart_contract/migrations/0009_auto_20180717_1242.py
# Generated by Django 2.0.7 on 2018-07-17 12:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('smart_contract', '0008_useraccept_company'),
]
operations = [
migrations.CreateModel(
name='Competence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('competence_name', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='comment',
name='competence',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smart_contract.Competence'),
),
]
|
StarcoderdataPython
|
5044650
|
"""
********************************************************************************
compas_ags.diagrams
********************************************************************************
.. currentmodule:: compas_ags.diagrams
Graphs
======
.. autosummary::
:toctree: generated/
FormGraph
Diagrams
========
.. autosummary::
:toctree: generated/
Diagram
FormDiagram
ForceDiagram
"""
from __future__ import absolute_import
from .formgraph import * # noqa: F401 F403
from .diagram import * # noqa: F401 F403
from .formdiagram import * # noqa: F401 F403
from .forcediagram import * # noqa: F401 F403
__all__ = [name for name in dir() if not name.startswith('_')]
|
StarcoderdataPython
|
9637379
|
from newsblur_web.celeryapp import app
from utils import log as logging
@app.task()
def IndexSubscriptionsForSearch(user_id):
from apps.search.models import MUserSearch
user_search = MUserSearch.get_user(user_id)
user_search.index_subscriptions_for_search()
@app.task()
def IndexSubscriptionsChunkForSearch(feed_ids, user_id):
logging.debug(" ---> Indexing: %s for %s" % (feed_ids, user_id))
from apps.search.models import MUserSearch
user_search = MUserSearch.get_user(user_id)
user_search.index_subscriptions_chunk_for_search(feed_ids)
@app.task()
def IndexFeedsForSearch(feed_ids, user_id):
from apps.search.models import MUserSearch
MUserSearch.index_feeds_for_search(feed_ids, user_id)
@app.task()
def FinishIndexSubscriptionsForSearch(results, user_id, start):
logging.debug(" ---> Indexing finished for %s" % (user_id))
from apps.search.models import MUserSearch
user_search = MUserSearch.get_user(user_id)
user_search.finish_index_subscriptions_for_search(start)
|
StarcoderdataPython
|
5152654
|
<gh_stars>1-10
# Copyright (c) 2020 Cisco and/or its affiliates.
#
# This software is licensed to you under the terms of the Cisco Sample
# Code License, Version 1.1 (the "License"). You may obtain a copy of the
# License at
#
# https://developer.cisco.com/docs/licenses
#
# All use of the material herein must be in accordance with the terms of
# the License. All rights not expressly granted by the License are
# reserved. Unless required by applicable law or agreed to separately in
# writing, software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied.
from openpyxl import Workbook
from openpyxl.worksheet.table import Table , TableStyleInfo
import argparse
import json
import pprint
import sys
import yaml
# Control debug
debug = False
class Logging(Exception):
"""
Exception class handling the exception raised by this script
"""
def fatal(self, msg):
"""
Prints an error message and aborts program execution
"""
sys.stderr.write(msg+"\n")
sys.exit(1)
def warning(self, msg):
"""
Prints a warning message to stderr
"""
sys.stderr.write(msg+"\n")
def print_message(self, msg):
"""
Prints a message to stdout
"""
print(msg)
LOG = Logging()
def read_arguments():
"""
"""
parser = argparse.ArgumentParser("Usage: state-discovery.py")
parser.add_argument("--input", "-i", dest="input_file", help=" Discovery Input File Name", default="discovery_results.json", required=False)
parser.add_argument("--output", "-o", dest="output_file", help=" Output Excel File Name", default="discovery_results_state.xlsx", required=False)
parser.add_argument("--composer", "-c", dest="composer", help=" Excel Composer File", default="./composer.yaml", required=False)
args = parser.parse_args()
return args
def load_yaml(file):
"""
"""
try:
with open(file, "r") as file:
dictionary = yaml.load(file, Loader=yaml.SafeLoader)
file.close()
return(dictionary)
except:
LOG.fatal("Error occured while importing %s as YAML input" % file)
def load_json(file):
"""
"""
try:
with open(file, "r") as file:
dictionary = json.load(file)
file.close()
return(dictionary)
except:
LOG.fatal("Error occured while importing %s as JSON input" % file)
def get_child_headers(headers):
result_headers = []
# loop through entries if provided headers is a dictionary
if type(headers) == dict:
for header_name in headers:
if header_name.startswith("__"):
child_headers = get_child_headers(headers[header_name])
result_headers2 = result_headers + child_headers
result_headers = result_headers2
else:
result_headers.append(header_name)
else:
LOG.warning("Non-dictionary child headers where specivied in excel composer. Entry: \"%s\"" % headers)
return(result_headers)
def get_child_data(input_data, key_dict):
result_entries = []
# loop through entries, if input is a list
if type(input_data) == list:
for entry in input_data:
result_item = {}
for header_name in key_dict:
if header_name.startswith("__"):
parent_header_name = header_name[2:]
child_results = get_child_data(entry[parent_header_name], key_dict[header_name])
if type(child_results) == list:
if len(child_results) == 1:
result_item.update(dict(child_results[0]))
else:
for child_entry in child_results:
child_entry.update(result_item)
result_entries.append(child_entry)
else:
## To be implemented
Logging.warning("Error while getting child data in dict format")
else:
try:
result_item[header_name] = entry[key_dict[header_name]]
except KeyError:
# Handle cases where the dictionary key are not defined
result_item[header_name] = ""
result_entries.append(result_item)
elif type(input_data) == dict:
result_item = {}
for header_name in key_dict:
if header_name.startswith("__"):
parent_header_name = header_name[2:]
child_results = get_child_data(input_data[parent_header_name], key_dict[header_name])
result_entries = result_entries + child_results
else:
try:
result_item[header_name] = input_data[key_dict[header_name]]
except KeyError:
# Handle cases where the dictionary key are not defined
result_item[header_name] = ""
result_entries.append(result_item)
else:
for entry in input_data:
result_item = {}
for header_name in key_dict.keys():
if header_name.startswith("__"):
parent_header_name = header_name[2:]
child_results = get_child_data(entry[parent_header_name], entry[key_dict[header_name]])
result_entries = result_entries + child_results
else:
try:
result_item[header_name] = entry[key_dict[header_name]]
except KeyError:
# Handle cases where the dictionary key are not defined
result_item[header_name] = ""
result_entries.append(result_item)
return(result_entries)
def flatten_list(baseline, input, entry_name):
result = []
if len(baseline) > 0:
for b in baseline:
for i in input:
entry = {}
entry = b.copy()
if type(i) == dict:
entry.update(i)
else:
tmp_dict = {}
tmp_dict[entry_name] = i
entry.update(tmp_dict)
result.append(entry)
else:
for i in input:
entry = {}
if type(i) == dict:
entry.update(i)
else:
tmp_dict = {}
tmp_dict[entry_name] = i
entry.update(tmp_dict)
result.append(entry)
return(result)
def analyse_data(composer, sheet_name, discovered_state):
analysed_data = []
# loop through discovered_state on a per-device basis
for device in discovered_state:
device_analysed_data = []
LOG.print_message(" - Crunshing data from device \"%s\"" % device)
data_entries = {}
analyzed_entry = {}
# Loop through the sheet keys and gather data
for header_name in composer[sheet_name].keys():
data_field_name = composer[sheet_name][header_name]
# Handle special data field named __device_name__
if data_field_name == "__device_name__":
data_entries["device_name"] = device
# Handle nested data fields (keys starting with "__")
elif header_name.startswith("__"):
parent_header_name = header_name[2:]
child_data = get_child_data(discovered_state[device][parent_header_name], data_field_name)
data_entries[header_name] = child_data
else:
data_entries[header_name] = discovered_state[device][data_field_name]
# Check if any of the found data entries are lists
list_entries = []
non_list_entries = []
for entry in data_entries.keys():
if type(data_entries[entry]) == list:
list_entries.append(entry)
else:
non_list_entries.append(entry)
# Build analysed data entry
if len(non_list_entries) > 0: # Starting with non-list entries, as these doesn't need to be flattend
for entry in non_list_entries:
analyzed_entry[entry] = data_entries[entry]
device_analysed_data.append(analyzed_entry)
if len(list_entries) > 0:
for entry in list_entries: # Flatten list entries
flattend_data = flatten_list(device_analysed_data, data_entries[entry], entry)
device_analysed_data = flattend_data
analysed_data = analysed_data + device_analysed_data
return(analysed_data)
def create_worksheet(excel_workbook,sheet_name,headers,analysed_data):
"""
"""
sheet = excel_workbook.create_sheet(title = sheet_name)
if len(headers) > 0:
for i in range(0,len(headers)):
sheet.cell(column=i+1, row=1 , value = headers[i])
row_id = 2
for element in analysed_data:
for i in range(0,len(headers)):
try:
if len(str(element[headers[i]])) == 1:
sheet.cell(column = i+1, row = row_id, value = "{0}".format(str(element[headers[i]][0])))
else:
sheet.cell(column = i+1, row = row_id, value = "{0}".format(",".join(str(element[headers[i]]))))
sheet.cell(column = i+1, row = row_id, value = "{0}".format(str(element[headers[i]])))
except:
sheet.cell(column = i+1, row = row_id, value = "")
row_id = row_id + 1
if len(headers) > 26: ### Super Ugly !! Fix That
table_cells = "A1:A" + chr(64+len(headers)-26)
else:
table_cells = "A1:" + chr(64+len(headers)) + str(row_id-1)
style = TableStyleInfo(name = "TableStyleMedium9" , showRowStripes="True" )
table = Table(displayName = sheet_name , ref = table_cells)
table.tableStyleInfo = style
sheet.add_table(table)
return excel_workbook
def create_workbook(composer, discovered_state):
excel_workbook = Workbook()
position = 0
for sheet in composer.keys():
LOG.print_message("Defining headers for sheet \"%s\"" % sheet)
# Define sheet headers
headers = []
for key in composer[sheet].keys():
# Skip entries starting with "__" as this indicates that attributes are nested in the structure
if not key.startswith("__"):
headers.append(key)
else:
child_headers = get_child_headers(composer[sheet][key])
headers = headers + child_headers
# Analyze data
LOG.print_message("Analyzing data for sheet \"%s\"" % sheet)
analysed_data = analyse_data(composer, sheet, discovered_state)
# Create worksheet
LOG.print_message("Creating workbook for sheet \"%s\"" % sheet)
excel_workbook = create_worksheet(excel_workbook, sheet, headers, analysed_data)
return excel_workbook
def main():
# Read arguments
args = read_arguments()
# Define dictionary to store command output
output = {}
# Read list of commands and inforamtion about how to compose the Excel
LOG.print_message("Reading discovery output")
discovery_output = load_json(args.input_file)
LOG.print_message("Reading Excel composer")
excel_composer = load_yaml(args.composer)
# Compose Excel with discovered output
excel_workbook = create_workbook(excel_composer, discovery_output)
# Saving Excel to disk
LOG.print_message("Saving workbook to disk")
excel_workbook.save(args.output_file)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5109445
|
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.log import TemplateLogNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphLogTests(TemplateGraphTestClient):
def test_log_node_from_xml_default_values(self):
template = ET.fromstring("""
<template>
<log>Text</log>
</template>
""")
root = self.parser.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateLogNode)
def test_log_node_from_xml_logging(self):
template = ET.fromstring("""
<template>
<log output="logging">Text</log>
</template>
""")
root = self.parser.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateLogNode)
def test_log_node_from_xml_logging_level(self):
template = ET.fromstring("""
<template>
<log output="logging" level="debug>Text</log>
</template>
""")
root = self.parser.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateLogNode)
def test_log_node_from_xml_print(self):
template = ET.fromstring("""
<template>
<log output="print">Text</log>
</template>
""")
root = self.parser.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateLogNode)
|
StarcoderdataPython
|
6577243
|
"""Handle the loading and initialization of game sessions."""
from __future__ import annotations
from typing import Optional
import copy
import lzma
import pickle
import traceback
from PIL import Image # type: ignore
import tcod
from engine import Engine
from game_map import GameWorld
import color
import entity_factories
import input_handlers
import components.ai as ai
import components.config as cfg
# Load the background image. Pillow returns an object convertable into a NumPy array.
background_image = Image.open("data/menu_background.png")
def new_game() -> Engine:
"""Return a brand new game session as an Engine instance."""
map_width = cfg.w #80
map_height = cfg.h #43
room_max_size = 18 #10
room_min_size = 18 #6
max_rooms = 1 # 30
if entity_factories.player.ai.is_dqn:
player = entity_factories.player # for dqn agent
else:
player = copy.deepcopy(entity_factories.player)
engine = Engine(player=player)
engine.game_world = GameWorld(
engine=engine,
max_rooms=max_rooms,
room_min_size=room_min_size,
room_max_size=room_max_size,
map_width=map_width,
map_height=map_height,
)
engine.game_world.generate_floor()
engine.update_fov()
engine.message_log.add_message("Hello and welcome, adventurer, to yet another dungeon!", color.welcome_text)
dagger = copy.deepcopy(entity_factories.dagger)
leather_armor = copy.deepcopy(entity_factories.leather_armor)
dagger.parent = player.inventory
leather_armor.parent = player.inventory
player.inventory.items.append(dagger)
player.equipment.toggle_equip(dagger, add_message=False)
player.inventory.items.append(leather_armor)
player.equipment.toggle_equip(leather_armor, add_message=False)
# auto movements
#n = 0
prev_score = 0
game = True
while game:
if engine.player.is_alive:
engine.handle_player_turns()
engine.handle_enemy_turns()
engine.update_fov()
engine.game_map.explored |= engine.game_map.visible
for i in engine.game_map.entities:
# run 100 rounds on each map
if i.name == cfg.prey and i.ai.is_new_round() and i.ai.get_rounds()%3==0:
print('==================',i.ai.get_rounds(),'======================')
# win rate every 100 rounds
print('win rate', (i.ai.get_score()-prev_score)/3)
prev_score = i.ai.get_score()
#print('thief score:',i.ai.get_score())
engine.player.ai.save_agent()
if i.ai.get_rounds()%3==0:
print('------------------------------END--------------------------')
game = False
break
if engine.player.level.requires_level_up:
level_up = input_handlers.LevelUpEventHandler(engine)
else:
engine.update_fov()
break
### aie
return engine
def load_game(filename: str) -> Engine:
"""Load an Engine instance from a file."""
with open(filename, "rb") as f:
engine = pickle.loads(lzma.decompress(f.read()))
assert isinstance(engine, Engine)
return engine
class MainMenu(input_handlers.BaseEventHandler):
"""Handle the main menu rendering and input."""
def on_render(self, console: tcod.Console) -> None:
"""Render the main menu on a background image."""
console.draw_semigraphics(background_image, 0, 0)
console.print(
console.width // 2,
console.height // 2 - 4,
"TOMBS OF THE ANCIENT KINGS",
fg=color.menu_title,
alignment=tcod.CENTER,
)
console.print(
console.width // 2,
console.height - 2,
"By <NAME>",
fg=color.menu_title,
alignment=tcod.CENTER,
)
menu_width = 24
for i, text in enumerate(["[N] Play a new game", "[C] Continue last game", "[Q] Quit"]):
console.print(
console.width // 2,
console.height // 2 - 2 + i,
text.ljust(menu_width),
fg=color.menu_text,
bg=color.black,
alignment=tcod.CENTER,
bg_blend=tcod.BKGND_ALPHA(64),
)
def ev_keydown(self, event: tcod.event.KeyDown) -> Optional[input_handlers.BaseEventHandler]:
if event.sym in (tcod.event.K_q, tcod.event.K_ESCAPE):
raise SystemExit()
elif event.sym == tcod.event.K_c:
try:
return input_handlers.MainGameEventHandler(load_game("savegame.sav"))
except FileNotFoundError:
return input_handlers.PopupMessage(self, "No saved game to load.")
except Exception as exc:
traceback.print_exc() # Print to stderr.
return input_handlers.PopupMessage(self, f"Failed to load save:\n{exc}")
elif event.sym == tcod.event.K_n:
return input_handlers.MainGameEventHandler(new_game())
return None
|
StarcoderdataPython
|
9697213
|
<reponame>natgeosociety/marapp-metrics
"""
Copyright 2018-2020 National Geographic Society
Use of this software does not constitute endorsement by National Geographic
Society (NGS). The NGS name and NGS logo may not be used for any purpose without
written permission from NGS.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
import ee
GOOGLE_SERVICE_ACCOUNT = os.environ.get("GOOGLE_SERVICE_ACCOUNT")
def initialize_google_ee():
"""Initialize the EE library."""
if GOOGLE_SERVICE_ACCOUNT:
credentials = ee.ServiceAccountCredentials(
None, key_data=GOOGLE_SERVICE_ACCOUNT
)
ee.Initialize(credentials)
else:
ee.Initialize()
def map_function(image, scale, reducers, keep_geom, best_effort, max_pixels, band=True):
def reducer_wrapper(feat):
geom = feat.geometry()
for key, reducer in reducers.items():
result = image.reduceRegion(
reducer=reducer,
geometry=geom,
scale=scale,
maxPixels=max_pixels,
bestEffort=best_effort,
crs="EPSG:4326",
)
if not keep_geom:
feat = feat.setGeometry(None)
if band:
result = result.get(key)
feat = feat.set({key: result})
return feat
return reducer_wrapper
def simple_mask_function(im, mask_im, **kwargs):
"""
Applies a simple mask onto im with a single QA value from mask_im.
"""
mask = None
for k, v in kwargs.items():
if str(k) == "gt":
mask = mask_im.gt(v)
elif str(k) == "gte":
mask = mask_im.gte(v)
elif str(k) == "lt":
mask = mask_im.lt(v)
elif str(k) == "lte":
mask = mask_im.lte(v)
elif str(k) == "eq":
mask = mask_im.eq(v)
elif str(k) == "eq_or":
v = list(v)
mask = mask_im.eq(v[0]).Or(mask_im.eq(v[1]))
elif str(k) == "range":
v = list(v)
mask = mask_im.gte(v[0]).And(mask_im.lt(v[1]))
if mask is not None:
return im.updateMask(mask)
def filter_fires(im):
"""
Earth engine QA filter for fires
"""
burn_dates = im.select("BurnDate")
valid_dates = burn_dates.gt(0).And(burn_dates.lt(367))
valid_qa = im.select("QA").lte(4)
# keep QA values 1-4 (5 is detection over agricultural areas)
mask = valid_dates.And(valid_qa)
return im.updateMask(mask)
|
StarcoderdataPython
|
1729750
|
<reponame>ForrestPi/FaceProjects
import setuptools
setuptools.setup(
name = "qualityface",
version = "1.0.3",
author = "<NAME>",
author_email = "<EMAIL>",
description="Quality face in Pytorch",
long_description="Quality Face model which decides how suitable of an input face for face recognition system",
long_description_content_type='text/markdown',
url = "https://github.com/siriusdemon/pytorch-QualityFace",
packages=setuptools.find_packages(),
package_data = {
'qualityface': ['checkpoints/last.pth'],
},
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
install_requires = [
'opencv-python',
'numpy',
'siriusbackbone',
'pillow',
]
)
|
StarcoderdataPython
|
8018518
|
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .species import split_species_prefix
from .allele_name import parse_allele_name, AlleleName
from .allele_parse_error import AlleleParseError
def infer_alpha_chain(beta):
"""
Given a parsed beta chain of a class II MHC, infer the most frequent
corresponding alpha chain.
"""
if beta.gene.startswith("DRB"):
return AlleleName(species="HLA", gene="DRA1", allele_family="01", allele_code="01")
elif beta.gene.startswith("DPB"):
# Most common alpha chain for DP is DPA*01:03 but we really
# need to change this logic to use a lookup table of pairwise
# frequencies for inferring the alpha-beta pairing
return AlleleName(
species="HLA", gene="DPA1", allele_family="01", allele_code="03")
elif beta.gene.startswith("DQB"):
# Most common DQ alpha (according to wikipedia)
# DQA1*01:02
return AlleleName(
species="HLA", gene="DQA1", allele_family="01", allele_code="02")
return None
def parse_classi_or_classii_allele_name(name, infer_pair=True):
"""
Handle different forms of both single and alpha-beta allele names.
Alpha-beta alleles may look like:
DPA10105-DPB110001
HLA-DPA1*01:05-DPB1*100:01
hla-dpa1*0105-dpb1*10001
dpa1*0105-dpb1*10001
HLA-DPA1*01:05/DPB1*100:01
Other class II alleles may look like:
DRB1_0102
DRB101:02
HLA-DRB1_0102
"""
species, name = split_species_prefix(name)
# Handle the case where alpha/beta pairs are separated with a /.
name = name.replace("/", "-")
# Ignored underscores, such as with DRB1_0102
name = name.replace("_", "*")
parts = name.split("-")
if len(parts) == 2:
alpha_string, beta_string = parts
alpha = parse_allele_name(alpha_string)
beta = parse_allele_name(beta_string)
return (alpha, beta)
elif len(parts) == 1:
parsed = parse_allele_name(name, species)
if parsed.species == "HLA" and infer_pair:
alpha = infer_alpha_chain(parsed)
if alpha is not None:
return (alpha, parsed)
return (parsed,)
else:
raise AlleleParseError(
"Allele has too many parts: %s" % name)
|
StarcoderdataPython
|
9627381
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 12:43:34 2019
@author: bendowdell
"""
# =============================================================================
# Part A: House Hunting
# You have graduated from MIT and now have a great job! You move to the San Francisco Bay Area and
# decide that you want to start saving to buy a house. As housing prices are very high in the Bay Area,
# you realize you are going to have to save for several years before you can afford to make the down
# payment on a house. In Part A, we are going to determine how long it will take you to save enough
# money to make the down payment given the following assumptions:
# 1. Call the cost of your dream home total_cost .
# 2. Call the portion of the cost needed for a down payment portion_down_payment . For
# simplicity, assume that portion_down_payment = 0.25 (25%).
# 3. Call the amount that you have saved thus far current_savings . You start with a current
# savings of $0.
# 4. Assume that you invest your current savings wisely, with an annual return of r (in other words,
# at the end of each month, you receive an additional current_savings*r/12 funds to put into
# your savings – the 12 is because r is an annual rate). Assume that your investments earn a
# return of r = 0.04 (4%).
# 5. Assume your annual salary is annual_salary .
# 6. Assume you are going to dedicate a certain amount of your salary each month to saving for
# the down payment. Call that portion_saved . This variable should be in decimal form (i.e. 0.1
# for 10%).
# 7. At the end of each month, your savings will be increased by the return on your investment,
# plus a percentage of your monthly salary (annual salary / 12).
# Write a program to calculate how many months it will take you to save up enough money for a down
# payment. You will want your main variables to be floats, so you should cast user inputs to floats.
# 1Your program should ask the user to enter the following variables:
# 1. The starting annual salary (annual_salary)
# 2. The portion of salary to be saved (portion_saved)
# 3. The cost of your dream home (total_cost)
#
# Test Case 1
# >>>
# Enter your annual salary: 120000
# Enter the percent of your salary to save, as a decimal: . 10
# Enter the cost of your dream home: 1000000
# Number of months: 183
# >>>
# Test Case 2
# >>>
# Enter your annual salary: 80000
# Enter the percent of your salary to save, as a decimal: . 15
# Enter the cost of your dream home: 500000
# Number of months: 105
# >>>
# =============================================================================
# Begin by getting the basic input from the user
# Ask the user for their annual_salary and cast it as an int
annual_salary = float(input('Enter your annual salary: '))
# Ask the user for the percentage of annual_salary to save
portion_saved = float(input('Enter the percent of your salary to save, as a decimal: '))
# Ask the user for the cost of the house to purchase
total_cost = float(input('Enter the cost of your dream home: '))
# Now calculate how many months it will take to save up for the down payment
portion_down_payment = 0.25*total_cost
current_savings = 0.00
# calculate monthly salary
monthly_salary = annual_salary / 12
# calculate monthly savings
monthly_savings = monthly_salary * portion_saved
# initialize a counter for months
months = 0
# current_savings are invested on a monthly basis with a rate of return r
r = 0.04
# use a while loop to iteratively add to current_savings to determine
# how many months are required to save up to portion_down_payment
# 7. At the end of each month, your savings will be increased by the return on your investment,
# plus a percentage of your monthly salary (annual salary / 12).
while current_savings < portion_down_payment:
current_savings += (current_savings*r/12) + monthly_savings
months += 1
print('Number of months: {}'.format(months))
|
StarcoderdataPython
|
342963
|
#!/usr/bin/env python
"""This module contains the PathSet Class.
Working with ISIS can result in a lot of files to keep track of.
The PathSet Class is simply a mutable set that only takes
:class:`pathlib.Path` objects. If you need to keep track of a
bunch of files (typically to delete them after a set of processing
calls), you can use a :class:`.PathSet` to keep track of them, and then
delete them, like so::
import kalasiris as isis
to_delete = isis.PathSet()
input_p = Path('some.fits')
output_p = input_p.with_suffix('.done.cub')
isis_cub = to_delete.add(input_p.with_suffix('.cub'))
isis.lorri2isis(input_fits, to= isis_cub)
first = to_delete.add(input_p..with_suffix('.1.cub'))
isis.some_progeram(isis_cub, to=first)
second = to_delete.add(input_p..with_suffix('.2.cub'))
isis.some_program(first, to=second)
isis.final_step(second, to=output_p)
to_delete.unlink()
"""
# Copyright 2019-2020, <NAME> (<EMAIL>)
#
# Reuse is permitted under the terms of the license.
# The AUTHORS file and the LICENSE file are at the
# top level of this library.
from pathlib import Path
class PathSet(set):
"""A class for containing a set of :class:`pathlib.Path` objects."""
def __init__(self, iterable=None):
if iterable:
for value in iterable:
if not isinstance(value, Path):
raise TypeError("only accepts pathlib.Path objects")
super().__init__(iterable)
else:
super().__init__()
def add(self, elem) -> Path:
"""This variation on add() returns the element."""
if not isinstance(elem, Path):
raise TypeError("only accepts pathlib.Path objects")
if elem in self:
raise ValueError(
f"The {elem} object is already a member of the PathSet."
)
super().add(elem)
return elem
def unlink(self):
"""Just runs Path.unlink() on all members."""
for p in self:
p.unlink()
|
StarcoderdataPython
|
8062426
|
<gh_stars>1-10
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
import sys
import os
import platform
from glob import glob
from setuptools import setup, find_packages
NAME = "pycopia3-QA"
VERSION = "1.0"
CACHEDIR="/var/cache/pycopia"
ISLINUX = platform.system() == "Linux"
if ISLINUX:
DISTNAME, _version, _distid = platform.linux_distribution()
else:
DISTNAME = ""
# Some services, such as the Pyro nameserver, are set up to run as the
# "tester" psuedo-user. This also creates the "testers" group that testing
# personnel should also be a member of.
def system_setup():
if ISLINUX:
import os, pwd, grp
if os.getuid() == 0:
if DISTNAME.startswith("Gentoo"):
try:
pwent = pwd.getpwnam("tester")
except KeyError:
os.system("groupadd testers")
os.system("useradd -c Tester -g testers "
"-G users.uucp,audio,cdrom,dialout,video,games,usb,crontab,messagebus,plugdev "
"-m tester")
print ("Remember to change password for new user tester.")
#os.system("passwd tester")
pwent = pwd.getpwnam("tester")
if not os.path.isdir(CACHEDIR):
tgrp = grp.getgrnam("testers")
os.mkdir(CACHEDIR)
os.chown(CACHEDIR, pwent.pw_uid, tgrp.gr_gid)
os.chmod(CACHEDIR, 0o770)
if ISLINUX:
DATA_FILES = [
('/etc/pycopia', glob("etc/*.dist")),
]
if DISTNAME.startswith("Gentoo"):
DATA_FILES.append(('/etc/init.d', glob("etc/init.d/gentoo/*")))
elif DISTNAME.startswith("Red") or DISTNAME.startswith("Cent"):
DATA_FILES.append(('/etc/init.d', glob("etc/init.d/redhat/*")))
if os.path.isdir("/etc/systemd/system"):
DATA_FILES.append(('/etc/systemd/system', glob("etc/systemd/system/*")))
SCRIPTS = glob("bin/*")
WEBSITE = os.environ.get("WEBSITE", "localhost")
DATA_FILES.extend([
#(os.path.join("/var", "www", WEBSITE, 'htdocs'), glob("doc/html/*.html")),
#(os.path.join("/var", "www", WEBSITE, 'cgi-bin'), glob("doc/html/cgi-bin/*.py")),
(os.path.join("/var", "www", WEBSITE, 'media', 'js'), glob("media/js/*.js")),
(os.path.join("/var", "www", WEBSITE, 'media', 'css'), glob("media/css/*.css")),
#(os.path.join("/var", "www", WEBSITE, 'media', 'images'), glob("media/images/*.png")),
])
else:
DATA_FILES = []
SCRIPTS = []
setup (name=NAME, version=VERSION,
namespace_packages = ["pycopia"],
packages = find_packages(),
# install_requires = [
# 'pycopia3-CLI',
# 'chardet>=2.2',
# ],
dependency_links = [
"http://www.pycopia.net/download/"
],
scripts = SCRIPTS,
data_files = DATA_FILES,
package_data = {"": ['*.glade']},
test_suite = "test.QATests",
description = "Pycopia packages to support professional QA roles.",
long_description = """Pycopia packages to support professional QA roles.
A basic QA automation framework. Provides base classes for test cases,
test suites, test runners, reporting, lab models, terminal emulators,
remote control, and other miscellaneous functions.
""",
license = "LGPL",
author = "<NAME>",
author_email = "<EMAIL>",
keywords = "pycopia QA framework",
url = "http://www.pycopia.net/",
#download_url = "ftp://ftp.pycopia.net/pub/python/%s.%s.tar.gz" % (NAME, VERSION),
classifiers = ["Operating System :: POSIX",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Intended Audience :: Developers"],
)
if "install" in sys.argv:
system_setup()
|
StarcoderdataPython
|
6565120
|
<reponame>mariuslihet/CRM<filename>common/views.py
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.http.response import Http404
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.hashers import check_password
from common.models import User
from common.forms import UserForm, LoginForm, ChangePasswordForm
def admin_required(function):
def wrapper(request, *args, **kwargs):
user = request.user
if user.is_authenticated:
if user.role == "ADMIN":
return function(request, *args, **kwargs)
else:
raise Http404
else:
return redirect("common:login")
return wrapper
@login_required
def home(request):
return render(request, 'index.html')
@login_required
def change_pass(request):
error, errors = "", ""
form = ChangePasswordForm()
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
user = request.user
if not check_password(request.POST['CurrentPassword'], user.password):
error = "Invalid old password"
else:
user.set_password(request.POST.get('Newpassword'))
user.is_active = True
user.save()
return HttpResponseRedirect('/')
else:
errors = form.errors
return render(request, "change_password.html", {'error': error,
'errors': errors})
@login_required
def profile(request):
user = request.user
user_obj = User.objects.get(id=user.id)
return render(request, "profile.html", {'user_obj': user_obj})
def login_crm(request):
if request.user.is_authenticated:
return HttpResponseRedirect('/')
if request.method == 'POST':
form = LoginForm(request.POST, request=request)
if form.is_valid():
user = authenticate(username=request.POST.get('email'), password=request.POST.get('password'))
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
else:
return render(request, "login.html", {"error": True, "message": "Your Account is InActive. Please Contact Administrator"})
else:
return render(request, "login.html", {"error": True, "message": "Your Account is not Found. Please Contact Administrator"})
else:
return render(request, "login.html", {"error": True, "message": "Your username and password didn't match. Please try again."})
return render(request, 'login.html')
def forgot_password(request):
return render(request, 'forgot_password.html')
def logout_crm(request):
logout(request)
request.session.flush()
return redirect("common:login")
@admin_required
def users_list(request):
users_list = User.objects.all()
page = request.POST.get('per_page')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
username = request.POST.get('username')
email = request.POST.get('email')
active_users = User.objects.filter(is_active=True)
inactive_users = User.objects.filter(is_active=False)
if first_name:
users_list = users_list.filter(first_name__icontains=first_name)
if last_name:
users_list = users_list.filter(last_name__icontains=last_name)
if username:
users_list = users_list.filter(username__icontains=username)
if email:
users_list = users_list.filter(email__icontains=email)
return render(request, "list.html", {
'users': users_list, 'active_users': active_users,
'per_page': page, 'inactive_users': inactive_users
})
@admin_required
def create_user(request):
user_form = UserForm()
if request.method == 'POST':
user_form = UserForm(request.POST)
if user_form.is_valid():
user = user_form.save(commit=False)
if request.POST.get("password"):
user.set_password(request.POST.get("password"))
user.save()
return redirect("common:users_list")
else:
return render(request, 'create.html', {'user_form': user_form, "errors": user_form.errors})
else:
return render(request, 'create.html', {'user_form': user_form})
@admin_required
def view_user(request, user_id):
users_list = User.objects.all()
user_obj = User.objects.filter(id=user_id)
active_users = User.objects.filter(is_active=True)
inactive_users = User.objects.filter(is_active=False)
return render(request, "list.html", {
'users': users_list, 'user_obj': user_obj,
'active_users': active_users, 'inactive_users': inactive_users
})
def edit_user(request, user_id):
user_obj = get_object_or_404(User, id=user_id)
user_form = UserForm(instance=user_obj)
if request.method == 'POST':
user_form = UserForm(request.POST, instance=user_obj)
if user_form.is_valid():
user_form.save()
return redirect("common:profile")
else:
return render(request, 'create.html', {'user_form': user_form, "errors": user_form.errors})
else:
return render(request, 'create.html', {'user_form': user_form, 'user_obj': user_obj})
@admin_required
def remove_user(request, user_id):
user_obj = get_object_or_404(User, id=user_id)
user_obj.delete()
return redirect("common:users_list")
|
StarcoderdataPython
|
9715945
|
from machin.model.nets.base import static_module_wrapper as smw
from machin.frame.algorithms.a2c import A2C
from machin.utils.learning_rate import gen_learning_rate_func
from machin.utils.logging import default_logger as logger
from machin.utils.helper_classes import Counter
from machin.utils.conf import Config
from machin.env.utils.openai_gym import disable_view_window
from torch.optim.lr_scheduler import LambdaLR
from torch.distributions import Categorical
import pytest
import torch as t
import torch.nn as nn
import gym
from test.frame.algorithms.utils import unwrap_time_limit, Smooth
from test.util_fixtures import *
from test.util_platforms import linux_only
class Actor(nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_num)
def forward(self, state, action=None):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
probs = t.softmax(self.fc3(a), dim=1)
dist = Categorical(probs=probs)
act = action if action is not None else dist.sample()
act_entropy = dist.entropy()
act_log_prob = dist.log_prob(act.flatten())
return act, act_log_prob, act_entropy
class Critic(nn.Module):
def __init__(self, state_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 1)
def forward(self, state):
v = t.relu(self.fc1(state))
v = t.relu(self.fc2(v))
v = self.fc3(v)
return v
class TestA2C:
# configs and definitions
@pytest.fixture(scope="class")
def train_config(self):
disable_view_window()
c = Config()
# Note: online policy algorithms such as PPO and A2C does not
# work well in Pendulum (reason unknown)
# and MountainCarContinuous (sparse returns)
c.env_name = "CartPole-v0"
c.env = unwrap_time_limit(gym.make(c.env_name))
c.observe_dim = 4
c.action_num = 2
c.max_episodes = 1000
c.max_steps = 200
c.replay_size = 10000
c.solved_reward = 150
c.solved_repeat = 5
return c
@pytest.fixture(scope="function")
def a2c(self, train_config, device, dtype):
c = train_config
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
)
return a2c
@pytest.fixture(scope="function")
def a2c_vis(self, train_config, device, dtype, tmpdir):
# not used for training, only used for testing apis
c = train_config
tmp_dir = tmpdir.make_numbered_dir()
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
visualize=True,
visualize_dir=str(tmp_dir),
)
return a2c
@pytest.fixture(scope="function")
def a2c_lr(self, train_config, device, dtype):
# not used for training, only used for testing apis
c = train_config
actor = smw(
Actor(c.observe_dim, c.action_num).type(dtype).to(device), device, device
)
critic = smw(Critic(c.observe_dim).type(dtype).to(device), device, device)
lr_func = gen_learning_rate_func([(0, 1e-3), (200000, 3e-4)], logger=logger)
with pytest.raises(TypeError, match="missing .+ positional argument"):
_ = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
lr_scheduler=LambdaLR,
)
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
lr_scheduler=LambdaLR,
lr_scheduler_args=((lr_func,), (lr_func,)),
)
return a2c
@pytest.fixture(scope="function")
def a2c_train(self, train_config):
c = train_config
# cpu is faster for testing full training.
actor = smw(Actor(c.observe_dim, c.action_num), "cpu", "cpu")
critic = smw(Critic(c.observe_dim), "cpu", "cpu")
a2c = A2C(
actor,
critic,
t.optim.Adam,
nn.MSELoss(reduction="sum"),
replay_device="cpu",
replay_size=c.replay_size,
)
return a2c
########################################################################
# Test for A2C acting
########################################################################
def test_act(self, train_config, a2c, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
a2c.act({"state": state})
########################################################################
# Test for A2C action evaluation
########################################################################
def test_eval_action(self, train_config, a2c, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
a2c._eval_act({"state": state}, {"action": action})
########################################################################
# Test for A2C criticizing
########################################################################
def test__criticize(self, train_config, a2c, dtype):
c = train_config
state = t.zeros([1, c.observe_dim], dtype=dtype)
a2c._criticize({"state": state})
########################################################################
# Test for A2C storage
########################################################################
def test_store_step(self, train_config, a2c, dtype):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
a2c.store_transition(
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"value": 0,
"gae": 0,
"terminal": False,
}
)
@pytest.mark.parametrize("gae_lambda", [0.0, 0.5, 1.0])
def test_store_episode(self, train_config, a2c, dtype, gae_lambda):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
episode = [
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
a2c.gae_lambda = gae_lambda
a2c.store_episode(episode)
########################################################################
# Test for A2C update
########################################################################
def test_update(self, train_config, a2c_vis, dtype):
c = train_config
old_state = state = t.zeros([1, c.observe_dim], dtype=dtype)
action = t.zeros([1, 1], dtype=t.int)
a2c_vis.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a2c_vis.update(
update_value=True, update_policy=True, concatenate_samples=True,
)
a2c_vis.entropy_weight = 1e-3
a2c_vis.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a2c_vis.update(
update_value=False, update_policy=False, concatenate_samples=True,
)
########################################################################
# Test for A2C save & load
########################################################################
# Skipped, it is the same as base
########################################################################
# Test for A2C lr_scheduler
########################################################################
def test_lr_scheduler(self, train_config, a2c_lr, dtype):
a2c_lr.update_lr_scheduler()
########################################################################
# Test for A2C config & init
########################################################################
def test_config_init(self, train_config):
c = train_config
config = A2C.generate_config({})
config["frame_config"]["models"] = ["Actor", "Critic"]
config["frame_config"]["model_kwargs"] = [
{"state_dim": c.observe_dim, "action_num": c.action_num},
{"state_dim": c.observe_dim},
]
a2c = A2C.init_from_config(config)
old_state = state = t.zeros([1, c.observe_dim], dtype=t.float32)
action = t.zeros([1, 1], dtype=t.int)
a2c.store_episode(
[
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": 0,
"terminal": False,
}
for _ in range(3)
]
)
a2c.update()
########################################################################
# Test for A2C full training.
########################################################################
@linux_only
@pytest.mark.parametrize("gae_lambda", [0.0, 0.5, 1.0])
def test_full_train(self, train_config, a2c_train, gae_lambda):
c = train_config
a2c_train.gae_lambda = gae_lambda
# begin training
episode, step = Counter(), Counter()
reward_fulfilled = Counter()
smoother = Smooth()
terminal = False
env = c.env
while episode < c.max_episodes:
episode.count()
# batch size = 1
total_reward = 0
state = t.tensor(env.reset(), dtype=t.float32)
tmp_observations = []
while not terminal and step <= c.max_steps:
step.count()
with t.no_grad():
old_state = state
# agent model inference
action = a2c_train.act({"state": old_state.unsqueeze(0)})[0]
state, reward, terminal, _ = env.step(action.item())
state = t.tensor(state, dtype=t.float32).flatten()
total_reward += float(reward)
tmp_observations.append(
{
"state": {"state": old_state.unsqueeze(0)},
"action": {"action": action},
"next_state": {"state": state.unsqueeze(0)},
"reward": float(reward),
"terminal": terminal or step == c.max_steps,
}
)
# update
a2c_train.store_episode(tmp_observations)
a2c_train.update()
smoother.update(total_reward)
step.reset()
terminal = False
logger.info(f"Episode {episode} total reward={smoother.value:.2f}")
if smoother.value > c.solved_reward:
reward_fulfilled.count()
if reward_fulfilled >= c.solved_repeat:
logger.info("Environment solved!")
return
else:
reward_fulfilled.reset()
pytest.fail("A2C Training failed.")
|
StarcoderdataPython
|
3279083
|
import logging
from httpclient.client import Client
from openmanoapi.config import BASE_URL
logger = logging.getLogger(__name__)
class Tenant(object):
""" Class for Tenant API
See more: https://osm.etsi.org/wikipub/index.php/RO_Northbound_Interface#Tenants
"""
def __init__(self):
self.__client = Client(verify_ssl_cert=True)
def get_list(self, headers=None, query_params=None):
"""Fetch the list of Openmano tenants
Args:
headers (dict, optional): the required HTTP headers, e.g., Accept: application/json
query_params (dict, optional): Additional arguments will be passed to the request.
Returns:
obj: a requests object
Examples:
>>> from httpclient.client import Client
>>> from openmanoapi.tenants import Tenant
>>> tn = Tenant()
>>> tenants = tn.get_list()
>>> print(int(tenants.status_code))
200
>>> print(tenants.json())
{"tenants": [{"created_at": "2018-05-03T16:00:04", "description": null, "uuid": "f35d06af-ed24-40ca-87c1-4e6ae81008b4", "name": "osm"} ] }
Openmano cli:
$ openmano tenant-list -d
"""
endpoint = '{}/tenants'.format(BASE_URL)
response = self.__client.get(endpoint, headers=headers, query_params=query_params)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
"".format(response.url, response.status_code, response.headers, response.text))
return response
def get(self, openmano_tenant_id, headers=None, query_params=None):
"""Fetch details for an Openmano tenant by given tenant ID
Args:
openmano_tenant_id (str): The tenant UUID
headers (dict, optional): the required HTTP headers, e.g., Accept: application/json
query_params (dict, optional): Additional arguments will be passed to the request.
Returns:
obj: a requests object
Examples:
>>> from httpclient.client import Client
>>> from openmanoapi.tenants import Tenant
>>> tn = Tenant()
>>> tenant = tn.get('f35d06af-ed24-40ca-87c1-4e6ae81008b4')
>>> print(int(tenant.status_code))
200
>>> print(tenant.json())
Openmano cli:
$ openmano tenant-list {openmano_tenant_id} -d
"""
endpoint = '{}/tenants/{}'.format(BASE_URL, openmano_tenant_id)
response = self.__client.get(endpoint, headers=headers, query_params=query_params)
logger.debug("Request `GET {}` returns HTTP status `{}`, headers `{}` and body `{}`."
"".format(response.url, response.status_code, response.headers, response.text))
return response
|
StarcoderdataPython
|
6401752
|
<gh_stars>0
from django.apps import AppConfig
class PathsConfig(AppConfig):
name = 'paths'
|
StarcoderdataPython
|
1855838
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.core.exceptions import ValidationError
from todo.models import Todo
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
def clean_email(self):
email = self.cleaned_data.get("email")
username = self.cleaned_data.get("username")
if (
email
and User.objects.filter(email=email).exclude(username=username).exists()
):
raise forms.ValidationError(
"A user with this email address already exists."
)
return email
class Meta:
model = User
fields = (
"username",
"first_name",
"last_name",
"email",
"<PASSWORD>",
"<PASSWORD>",
)
class TodoForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
self.instanace = kwargs.get("instance")
super(TodoForm, self).__init__(*args, **kwargs)
self.fields["assignee"].queryset = User.objects.filter(
profile__domain=self.user.profile.domain, profile__is_approved=True
)
def clean(self):
super(TodoForm, self).clean()
status = self.cleaned_data.get("status")
assignee = self.cleaned_data.get("assignee")
if status == Todo.DONE and assignee != self.user:
raise ValidationError(
"ERROR: You are not allowed to mark this task complete."
)
return self.cleaned_data
def save(self, commit=True):
todo = super(TodoForm, self).save(commit=False)
if not todo.id:
todo.assignor = self.user
if commit:
todo.save()
return todo
class Meta:
model = Todo
fields = "__all__"
exclude = ("assignor",)
|
StarcoderdataPython
|
8127815
|
from main.models import Societe, Facture, Dossier
from datetime import datetime
def Greetings():
new_societe = Societe(
nom="karlson",
localisation="Douala Cameroon",
active=True,
telephone="654451039",
ville="Douala",
pays="Cameroon",
code_postal="100245",
address=" Ange Raphael"
)
new_societe.save()
def updateFacture():
current_day = datetime.now().day
current_month = datetime.now().month
current_year = datetime.now().year
all_dossier = Dossier.objects.all()
for dossier in all_dossier:
end_date = dossier.facture.date
end_year = end_date.year
end_month = end_date.month
end_day = end_date.day
if end_year >= current_year:
if end_month >= current_month:
if end_day >= current_day:
dossier.statut = 'R'
dossier.save()
|
StarcoderdataPython
|
330484
|
from abc import ABCMeta, abstractmethod
class INoise(metaclass=ABCMeta):
def __init__(self):
raise NotImplementedError("This object is an interface that has no implementation.")
@property
@abstractmethod
def NOISE_LIST(self):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise1d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise2d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise3d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
|
StarcoderdataPython
|
4801334
|
<reponame>GlenDC/threefold-wallet-electron<filename>src/tfchain/polyfill/http.py
def http_get(address, endpoint, headers=None):
request = None
resource = address+endpoint
__pragma__("js", "{}", """
request = new Request(resource, {method: 'GET'});
""")
if isinstance(headers, dict):
for key, value in headers.items():
__pragma__("js", "{}", """
request.headers.append(key, value);
""")
p = None
__pragma__("js", "{}", """
p = fetch(request).then(function(response) {
if (response.status === 200) {
return response.json().then(function(data) {
return {
code: 200,
address: address,
endpoint: endpoint,
data: data,
};
});
}
return response.text().then(function(data) {
let message;
try {
message = JSON.parse(data).message;
} catch(e) { console.debug("failed to parse (GET) error message", e); };
return {
code: response.status,
address: address,
endpoint: endpoint,
data: message || ("GET request to " + resource + " failed with status code " + response.status),
};
}).catch(() => {
return {
code: response.status,
address: address,
endpoint: endpoint,
data: "GET request to " + resource + " failed with status code " + response.status,
};
});
});
""")
return p
def http_post(address, endpoint, data, headers=None):
request = None
resource = address+endpoint
__pragma__("js", "{}", """
request = new Request(resource, {method: 'POST', body: data});
""")
if isinstance(headers, dict):
for key, value in headers.items():
__pragma__("js", "{}", """
request.headers.append(key, value);
""")
p = None
__pragma__("js", "{}", """
p = fetch(request).then(function(response) {
if (response.status === 200) {
return response.json().then(function(data) {
return {
code: 200,
address: address,
endpoint: endpoint,
data: data,
};
});
}
return response.text().then(function(data) {
let message;
try {
message = JSON.parse(data).message;
} catch(e) { console.debug("failed to parse (POST) error message", e); };
return {
code: response.status,
address: address,
endpoint: endpoint,
data: message || ("POST request to " + resource + " failed with status code " + response.status),
};
}).catch(() => {
return {
code: response.status,
address: address,
endpoint: endpoint,
data: "POST request to " + resource + " failed with status code " + response.status,
};
});
});
""")
return p
|
StarcoderdataPython
|
1688556
|
import torch.nn as nn
from HeadNeRFOptions import BaseOptions
from RenderUtils import ExtractLandMarkPosition, SoftSimpleShader
import torch
import torch.nn.functional as F
import FaceModels
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
PerspectiveCameras, RasterizationSettings, TexturesVertex, PointLights, blending, MeshRenderer, MeshRasterizer, HardPhongShader)
import numpy as np
class NL3DMMRenderer(nn.Module):
def __init__(self, img_size, opt: BaseOptions):
super().__init__()
self.opt = opt
self.img_h = img_size
self.img_w = img_size
self.build_info()
self.build_nl3dmm()
self.build_tool_funcs()
self.set_3dmmdecoder_eval()
def build_nl3dmm(self):
self.decoder_3dmm = FaceModels.Linear_3DMM(self.opt)
self.decoder_nl3dmm_new = FaceModels.NonLinear_3DMM(self.opt)
def build_info(self):
topo_info = np.load("ConfigFiles/nl_3dmm_topo_info.npz")
tris = torch.as_tensor(topo_info['fv_indices']).long()
vert_tris = torch.as_tensor(topo_info['corr_vf_indices']).long()
self.register_buffer("tris", tris)
self.register_buffer("corr_vf_indices", vert_tris)
self.a0 = np.pi
self.a1 = 2 * np.pi / np.sqrt(3.0)
self.a2 = 2 * np.pi / np.sqrt(8.0)
self.c0 = 1 / np.sqrt(4 * np.pi)
self.c1 = np.sqrt(3.0) / np.sqrt(4 * np.pi)
self.c2 = 3 * np.sqrt(5.0) / np.sqrt(12 * np.pi)
self.d0 = 0.5/ np.sqrt(3.0)
def build_tool_funcs(self):
self.extract_lm3d_func = ExtractLandMarkPosition()
def set_3dmmdecoder_eval(self):
self.decoder_3dmm.eval()
self.decoder_nl3dmm_new.eval()
def train(self, mode=True):
r"""Sets the module in training mode."""
self.training = mode
for module in self.children():
module.train(mode)
self.set_3dmmdecoder_eval()
return self
def calc_geometry_Albedo(self, iden_codes, text_codes, expr_codes):
batch_vps = self.decoder_nl3dmm_new(iden_codes, expr_codes)
batch_vcs = self.decoder_3dmm(text_codes)
return batch_vps, batch_vcs
def calc_normal(self, geometry):
vert_1 = geometry[:, self.tris[:, 0], :]
vert_2 = geometry[:, self.tris[:, 1], :]
vert_3 = geometry[:, self.tris[:, 2], :]
nnorm = torch.cross(vert_2 - vert_1, vert_3 - vert_1, 2)
tri_normal = F.normalize(nnorm, dim=2)
tri_normal = F.pad(tri_normal, [0, 0, 0, 1, 0, 0], mode="constant", value=0)
v_norm = tri_normal[:, self.corr_vf_indices, :].sum(2)
vert_normal = F.normalize(v_norm, dim=-1)
return vert_normal
def build_color(self, batch_vcolor, batch_norm, batch_gamma):
"""
batch_vcolor: [1, n_v, 3]
batch_norm: [B, n_v, 3]
batch_gamma: [B, 27]
"""
# n_b, num_vertex, _ = batch_vcolor.size()
n_b, num_vertex, _ = batch_norm.size()
gamma = batch_gamma.view(-1, 9, 3)
norm = batch_norm.view(-1, 3)
nx, ny, nz = norm[:, 0], norm[:, 1], norm[:, 2]
Y0 = torch.ones_like(nx) * self.a0 * self.c0
arrH = []
arrH.append(Y0)
arrH.append(-self.a1 * self.c1 * ny)
arrH.append(self.a1 * self.c1 * nz)
arrH.append(-self.a1 * self.c1 * nx)
arrH.append(self.a2 * self.c2 * nx * ny)
arrH.append(-self.a2 * self.c2 * ny * nz)
arrH.append(self.a2 * self.c2 * self.d0 * (3 * nz.pow(2) - 1))
arrH.append(-self.a2 * self.c2 * nx * nz)
arrH.append(self.a2 * self.c2 * 0.5 * (nx.pow(2) - ny.pow(2)))
H = torch.stack(arrH, 1)
Y = H.view(n_b, num_vertex, 9)
lighting = Y.bmm(gamma)
face_color = batch_vcolor * lighting
return face_color
def calc_ProjUV(self, cam_vps, batch_inmat):
tv = cam_vps[:, :, 2:3] + 1e-7
temp_uvs = cam_vps / tv
uv = torch.bmm(temp_uvs, batch_inmat.permute(0, 2, 1))
# uv = bmm_self_define_dim3(temp_uvs, batch_inmat, mat_2_is_trans=True)
return uv[:, :, :2]
def generate_renderer(self, batch_inmats):
cur_device = batch_inmats.device
batch_size = batch_inmats.size(0)
cur_dtype = batch_inmats.dtype
#cameras:
half_w = self.img_w * 0.5
half_h = self.img_h * 0.5
focal_info = torch.stack([batch_inmats[:, 0, 0] / half_w, batch_inmats[:, 1, 1] / half_w], dim=-1)
center_info = torch.stack([batch_inmats[:, 0, 2] / half_w - 1.0, batch_inmats[:, 1, 2] / half_h - 1.0], dim=-1)
iden_mat = torch.eye(3)
iden_mat[0, 0] = -1.0
iden_mat[1, 1] = -1.0
temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
cameras = PerspectiveCameras(
focal_length=focal_info,
principal_point=center_info,
R=temp_Rmat,
T=temp_Vec,
device=cur_device
)
# focal_info = torch.stack([batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]], dim=-1)
# center_info = torch.stack([batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]], dim=-1)
# iden_mat = torch.eye(3)
# iden_mat[0, 0] = -1.0
# iden_mat[1, 1] = -1.0
# temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
# temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
# cameras = PerspectiveCameras(
# focal_length=focal_info,
# principal_point=center_info,
# R=temp_Rmat,
# T=temp_Vec,
# in_ndc=False,
# image_size = [[self.img_h, self.img_w] * batch_size],
# device=cur_device
# )
# light
lights = PointLights(
location=[[0.0, 0.0, 1e5]],
ambient_color=[[1, 1, 1]],
specular_color=[[0., 0., 0.]],
diffuse_color=[[0., 0., 0.]], device=cur_device
)
raster_settings = RasterizationSettings(
image_size=(self.img_h, self.img_w),
# blur_radius=0.000001,
# faces_per_pixel=10,
blur_radius=0,
faces_per_pixel=1,
)
blend_params = blending.BlendParams(background_color=[0, 0, 0])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=SoftSimpleShader(
lights=lights,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
return renderer
def render_img(self,
batch_vps, batch_vcs, illu_sh,
c2l_Scales, c2l_Rmats, c2l_Tvecs,
batch_Rmats, batch_Tvecs, batch_inmats,
):
batch_size = batch_vps.size(0)
live_vps = torch.bmm(c2l_Scales * batch_vps, c2l_Rmats.permute(0, 2, 1)) + c2l_Tvecs.view(-1, 1, 3)
cam_vps = torch.bmm(live_vps, batch_Rmats.permute(0, 2, 1)) + batch_Tvecs.view(-1, 1, 3)
vns = self.calc_normal(cam_vps)
sh_vcs = self.build_color(batch_vcs, vns, illu_sh)
face_color = TexturesVertex(sh_vcs)
meshes = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), face_color)
cur_renderer = self.generate_renderer(batch_inmats)
rendered_res = cur_renderer(meshes)
rendered_res /= 255.0
mask_c3b = (rendered_res[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_img = rendered_res[:, :, :, :3]
rendered_img = torch.clamp(rendered_img, min=0.0, max=1.0)
lm_3d_posi = self.extract_lm3d_func(cam_vps)
proj_lm2d = self.calc_ProjUV(lm_3d_posi, batch_inmats)
return rendered_img, mask_c3b, proj_lm2d, sh_vcs
def generate_renderer_for_eval(self, batch_inmats):
cur_device = batch_inmats.device
batch_size = batch_inmats.size(0)
cur_dtype = batch_inmats.dtype
#cameras:
# half_w = self.img_w * 0.5
# half_h = self.img_h * 0.5
focal_info = torch.stack([batch_inmats[:, 0, 0], batch_inmats[:, 1, 1]], dim=-1)
center_info = torch.stack([batch_inmats[:, 0, 2], batch_inmats[:, 1, 2]], dim=-1)
iden_mat = torch.eye(3)
iden_mat[0, 0] = -1.0
iden_mat[1, 1] = -1.0
temp_Rmat = iden_mat.unsqueeze(0).expand(batch_size, -1, -1)
temp_Vec = torch.zeros((batch_size, 3), dtype=cur_dtype)
cameras = PerspectiveCameras(
focal_length=focal_info,
principal_point=center_info,
R=temp_Rmat,
T=temp_Vec,
in_ndc=False,
image_size = [[self.img_h, self.img_w] * batch_size],
device=cur_device
)
# light
lights = PointLights(
location=[[0.0, 0.0, 1e5]],
ambient_color=[[1, 1, 1]],
specular_color=[[0., 0., 0.]],
diffuse_color=[[0., 0., 0.]], device=cur_device
)
raster_settings = RasterizationSettings(
image_size=(self.img_h, self.img_w),
# blur_radius=0.000001,
# faces_per_pixel=10,
blur_radius=0,
faces_per_pixel=1,
)
blend_params = blending.BlendParams(background_color=[0, 0, 0])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=SoftSimpleShader(
lights=lights,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
lights_phong = PointLights(
location=[[0.0, 0.0, -1e5]],
ambient_color=[[0.5, 0.5, 0.5]],
specular_color=[[0.2, 0.2, 0.2]],
diffuse_color=[[0.3, 0.3, 0.3]], device=cur_device
)
renderer_phong = MeshRenderer(
rasterizer=MeshRasterizer(
raster_settings=raster_settings,
cameras=cameras
),
shader=HardPhongShader(
lights=lights_phong,
blend_params=blend_params,
cameras=cameras
),
).to(cur_device)
return renderer, renderer_phong
def render_img_for_eval(self,
batch_vps, batch_vcs, illu_sh,
batch_Rmats, batch_Tvecs, batch_inmats
):
batch_size = batch_vps.size(0)
cam_vps = torch.bmm(batch_vps, batch_Rmats.permute(0, 2, 1)) + batch_Tvecs.view(-1, 1, 3)
vns = self.calc_normal(cam_vps)
sh_vcs = self.build_color(batch_vcs, vns, illu_sh)
face_color = TexturesVertex(sh_vcs)
meshes = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), face_color)
cur_renderer, renderer_phong = self.generate_renderer_for_eval(batch_inmats)
rendered_res = cur_renderer(meshes)
rendered_res /= 255.0
mask_c3b = (rendered_res[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_img = rendered_res[:, :, :, :3]
rendered_img = torch.clamp(rendered_img, min=0.0, max=1.0)
lm_3d_posi = self.extract_lm3d_func(cam_vps)
proj_lm2d = self.calc_ProjUV(lm_3d_posi, batch_inmats)
color_phong = torch.ones_like(cam_vps)
color_phong = TexturesVertex(color_phong)
meshes_phong = Meshes(cam_vps, self.tris.unsqueeze(0).expand(batch_size, -1, -1), color_phong)
rendered_phong = renderer_phong(meshes_phong)
phong_mask_c3b = (rendered_phong[:, :, :, 3:]).detach().expand(-1, -1, -1, 3) > 0.0001
rendered_phong = rendered_phong[:, :, :, :3]
return rendered_img, mask_c3b, proj_lm2d, sh_vcs, rendered_phong, phong_mask_c3b
def forward(self,
iden_codes, text_codes, expr_codes, cur_sh,
batch_Rmats, batch_Tvecs, batch_inmats, eval = False, **kwargs
):
batch_vps = self.decoder_nl3dmm_new(iden_codes, expr_codes, scale = 0.01)
batch_vcs = self.decoder_3dmm(text_codes)
if eval:
return self.render_img_for_eval(batch_vps, batch_vcs, cur_sh,
batch_Rmats, batch_Tvecs, batch_inmats)
else:
c2l_Scales, c2l_Rmats, c2l_Tvecs = kwargs["c2l_Scales"], kwargs["c2l_Rmats"], kwargs["c2l_Tvecs"]
return self.render_img(batch_vps, batch_vcs, cur_sh,
c2l_Scales, c2l_Rmats, c2l_Tvecs,
batch_Rmats, batch_Tvecs, batch_inmats)
|
StarcoderdataPython
|
4867817
|
<reponame>BDAthlon/2017-Triple_Helix-1
# -*- coding: utf-8 -*-
"""Comment models."""
from glyphrepository.database import Column, Model, SurrogatePK, db, reference_col, relationship
class Comment(SurrogatePK, Model):
"""A comment."""
__tablename__ = 'comments'
name = Column(db.String(80), unique=False, nullable=False)
rating = Column(db.Integer, unique=False, nullable=False)
comment = Column(db.String, unique=False, nullable=True)
glyph_id = reference_col('glyphs', nullable=True)
glyph = relationship('Glyph', backref='comments')
user_id = reference_col('users', nullable=True)
user = relationship('User', backref='comments')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
def get_display_rating(self):
ratings = {"-1": "not rated", "1": "rated 1/5", "2": "rated 2/5", "3": "rated 3/5", "4": "rated 4/5",
"5": "rated 5/5"}
return ratings[str(self.rating)]
|
StarcoderdataPython
|
6704038
|
<filename>tests/test_plugin.py<gh_stars>0
import json
import os
import shutil
import subprocess
import unittest
import sys
TAGS = ("python2", "python3")
TESTDIR = "/tmp/test-linuxdeploy-plugin-python"
ROOTDIR = os.path.realpath(os.path.dirname(__file__) + "/..").strip()
_is_python2 = sys.version_info[0] == 2
def get_version(recipe):
path = os.path.join(ROOTDIR, "appimage", "recipes", recipe + ".sh")
with open(path) as f:
for line in f:
if line.startswith("export PYTHON_VERSION="):
version = line.split("=")[-1]
return version.strip().replace('"', "").replace("'", "")
else:
raise ValueError("version not found")
def system(command, env=None):
"""Wrap system calls
"""
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
out, _ = p.communicate()
if not _is_python2:
out = out.decode()
if p.returncode != 0:
raise RuntimeError(os.linesep.join(
("", "COMMAND:", command, "OUTPUT:", out)))
return out
class PluginTest(unittest.TestCase):
"""Unit tests for the python plugin
"""
def __init__(self, *args, **kwargs):
if _is_python2:
super(PluginTest, self).__init__(*args, **kwargs)
else:
super().__init__(*args, **kwargs)
# Configure the test environment
if not "ARCH" in os.environ:
os.environ["ARCH"] = system("arch").strip()
user = os.getenv("USER")
if (user is None) or (user == "root"):
user = "beta"
home = "/tmp/home/" + user
os.environ["USER"] = user
os.environ["HOME"] = home
if not os.path.exists(home):
for tag in TAGS:
version = get_version(tag)
os.makedirs(os.path.join(home, ".local", "lib",
"python" + version[:3], "site-packages"))
bindir = os.path.join(home, ".local", "bin")
os.environ["PATH"] = ":".join((bindir, os.environ["PATH"]))
if not os.path.exists(TESTDIR):
os.makedirs(TESTDIR)
os.chdir(TESTDIR)
for tag in TAGS:
appimage = "{:}-{:}.AppImage".format(tag, os.environ["ARCH"])
shutil.copy(
os.path.join(ROOTDIR, "appimage", appimage),
os.path.join(TESTDIR, appimage))
def test_python3_base(self):
"""Test the base functionalities of a Python 3 AppImage
"""
self.check_base("python3")
def test_python3_modules(self):
"""Test the modules availability of a Python 3 AppImage
"""
self.check_modules("python3")
def test_python3_venv(self):
"""Test venv from a Python 3 AppImage
"""
self.check_venv("python3")
def test_python2_base(self):
"""Test the base functionalities of a Python 2 AppImage
"""
self.check_base("python2")
def check_base(self, tag):
"""Check the base functionalities of a Python AppImage
"""
version = get_version(tag)
appimage = "python{:}-{:}.AppImage".format(
version[0], os.getenv("ARCH"))
# Check the Python system configuration
python = os.path.join(TESTDIR, appimage)
cfg = self.get_python_config(python)
v = [int(vi) for vi in version.split(".")]
self.assertEqual(cfg["version"][:3], v)
self.assertEqual(cfg["executable"], python)
self.assertEqual(cfg["prefix"], os.path.join(cfg["appdir"], "usr",
"python"))
site_packages = os.path.join("lib",
"python{:}.{:}".format(*cfg["version"][:2]), "site-packages")
self.assertEqual(cfg["path"][-1], os.path.join(cfg["appdir"],
"usr", "python", site_packages))
user_packages = os.path.join(cfg["home"], ".local", site_packages)
self.assertTrue(user_packages in cfg["path"])
# Check pip install
system("./{:} -m pip uninstall test-pip-install -y || exit 0".format(
appimage))
r = system("./{:} -m pip install --user test-pip-install".format(
appimage))
r = system("test-pip-install").strip()
self.assertEqual(r, "running Python {:} from {:}".format(
version, os.path.join(TESTDIR, appimage)))
def check_venv(self, tag):
"""Check venv from a Python AppImage
"""
version = get_version(tag)
appimage = "python{:}-{:}.AppImage".format(
version[0], os.getenv("ARCH"))
# Generate a virtual environment
if os.path.exists("ENV"):
shutil.rmtree("ENV")
system("./{:} -m venv ENV".format(appimage))
envdir = TESTDIR + "/ENV"
python = envdir + "/bin/python"
self.assertTrue(os.path.exists(python))
# Bootstrap pip
def bash(cmd):
return system("/bin/bash -c '. ENV/bin/activate; {:}'".format(cmd))
bash("python -m ensurepip")
pip = "pip" + version[0]
self.assertTrue(os.path.exists("ENV/bin/" + pip))
# Check the Python system configuration
cfg = self.get_python_config("python", setup="ENV/bin/activate")
v = [int(vi) for vi in version.split(".")]
self.assertEqual(cfg["version"][:3], v)
self.assertEqual(cfg["executable"], str(python))
self.assertEqual(cfg["prefix"], str(envdir))
site_packages = os.path.join("lib",
"python{:}.{:}".format(*cfg["version"][:2]), "site-packages")
self.assertEqual(cfg["path"][-1], str(os.path.join(envdir,
site_packages)))
self.assertTrue(os.path.join(cfg["home"], ".local",
site_packages) not in cfg["path"])
# Check pip install
system("{:} uninstall test-pip-install -y || exit 0".format(pip))
bash("{:} uninstall test-pip-install -y".format(pip))
bash("{:} install test-pip-install".format(pip))
r = bash("test-pip-install").strip()
bash("{:} uninstall test-pip-install -y".format(pip))
self.assertEqual(r, "running Python {:} from {:}".format(
version, str(python)))
def check_modules(self, tag):
"""Check the modules availability of a Python AppImage
"""
version = get_version(tag)
appimage = "python{:}-{:}.AppImage".format(
version[0], os.getenv("ARCH"))
def import_(module):
system("./{:} -c 'import {:}'".format(appimage, module))
modules = {
"a": ["abc", "aifc", "argparse", "array", "ast", "asynchat",
"asyncio", "asyncore", "atexit", "audioop"],
"b": ["base64", "bdb", "binascii", "binhex", "bisect", "builtins",
"bz2"],
"c": ["calendar", "cgi", "cgitb", "chunk", "cmath", "cmd", "code",
"codecs", "codeop", "collections", "colorsys", "compileall",
"concurrent", "configparser", "contextlib", "contextvars",
"copy", "copyreg", "cProfile", "crypt", "csv", "ctypes",
"curses"],
"d": ["dataclasses", "datetime", "dbm", "decimal", "difflib",
"dis", "distutils", "doctest"],
"e": ["email", "encodings", "ensurepip", "enum", "errno"],
"f": ["faulthandler", "fcntl", "filecmp", "fileinput", "fnmatch",
"fractions", "ftplib", "functools"],
"g": ["gc", "getopt", "getpass", "gettext", "glob", "grp", "gzip"],
"h": ["hashlib", "heapq", "hmac", "html", "http"],
"i": ["imaplib", "imghdr", "importlib", "inspect", "io",
"ipaddress", "itertools"],
"j": ["json"],
"k": ["keyword"],
"l": ["lib2to3", "linecache", "locale", "logging", "lzma"],
"m": ["mailbox", "mailcap", "marshal", "math", "mimetypes", "mmap",
"modulefinder", "multiprocessing"],
"n": ["netrc", "nis", "nntplib", "numbers"],
"t": ["tkinter"]
}
for sublist in modules.values():
for module in sublist:
import_(module)
def get_python_config(self, python, setup=None):
"""Get the config loaded by the given Python instance
"""
cfg_file = "cfg.json"
if os.path.exists(cfg_file):
os.remove(cfg_file)
script = """\
import json
import os
import sys
with open("{:}", "w+") as f:
json.dump({{"path": sys.path, "executable": sys.executable,
"prefix": sys.prefix, "user": os.getenv("USER"),
"home": os.getenv("HOME"), "version": tuple(sys.version_info),
"appdir": os.getenv("APPDIR")}}, f)
""".format(cfg_file)
script_file = "script.py"
with open(script_file, "w") as f:
f.write(script)
if setup:
system("/bin/bash -c '. {:}; {:} {:}'".format(
setup, python, script_file))
else:
system("{:} {:}".format(python, script_file))
with open(cfg_file) as f:
return json.load(f)
if __name__ == "__main__":
unittest.main(failfast=True)
|
StarcoderdataPython
|
272860
|
from django import forms
from django.contrib import admin
from .models import Attachment, Property, Session, Upload
from .utils import import_class
class AttachmentAdmin (admin.ModelAdmin):
list_display = ('file_path', 'file_name', 'file_size', 'content_type', 'context', 'date_created')
readonly_fields = ('data',)
class PropertyForm (forms.ModelForm):
def clean_model(self):
model = self.cleaned_data.get('model')
if model:
try:
import_class(model)
except ImportError:
raise forms.ValidationError("Improper path to lookup model.")
return self.cleaned_data['model']
class PropertyAdmin (admin.ModelAdmin):
form = PropertyForm
list_display = ('label', 'slug', 'data_type', 'choices', 'model', 'required')
prepopulated_fields = {'slug': ('label',)}
filter_horizontal = ('content_type',)
class UploadInline (admin.TabularInline):
model = Upload
extra = 0
class SessionAdmin (admin.ModelAdmin):
list_display = ('uuid', 'user', 'template', 'context', 'date_created')
inlines = (UploadInline,)
admin.site.register(Attachment, AttachmentAdmin)
admin.site.register(Session, SessionAdmin)
admin.site.register(Property, PropertyAdmin)
|
StarcoderdataPython
|
12842055
|
<filename>TFQ/barren_plateaus/bp_tfq.py
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
import matplotlib.pyplot as plt
# https://www.tensorflow.org/quantum/tutorials/barren_plateaus#2_generating_random_circuits
def generate_circuit(qubits, depth, param):
circuit = cirq.Circuit()
for qubit in qubits:
circuit += cirq.ry(np.pi / 4.0)(qubit)
for d in range(depth):
for i, qubit in enumerate(qubits):
random_n = np.random.uniform()
random_rot = np.random.uniform(0, 2.0 * np.pi) if i != 0 or d != 0 else param
if random_n > 2. / 3.:
circuit += cirq.rz(random_rot)(qubit)
elif random_n > 1. / 3.:
circuit += cirq.ry(random_rot)(qubit)
else:
circuit += cirq.rx(random_rot)(qubit)
for src, dest in zip(qubits, qubits[1:]):
circuit += cirq.CZ(src, dest)
return circuit
def grad_variance(circuits, qubits, symbol, reps, ops):
if ops == "all":
readout_ops = sum([cirq.Z(i) for i in qubits])
else:
readout_ops = [cirq.Z(qubits[0]) * cirq.Z(qubits[1])]
rep = reps
diff = tfq.differentiators.ParameterShift()
expectation = tfq.layers.SampledExpectation(differentiator=diff)
circuit_tensor = tfq.convert_to_tensor(circuits)
values_tensor = tf.convert_to_tensor(np.random.uniform(0, 2 * np.pi, (len(circuits), 1)).astype(np.float32))
with tf.GradientTape() as tape:
tape.watch(values_tensor)
forward = expectation(circuit_tensor, operators=readout_ops, repetitions=rep, symbol_names=[symbol], symbol_values=values_tensor)
grads = tape.gradient(forward, values_tensor)
grad_var = tf.math.reduce_std(grads, axis=0)
return grad_var.numpy()[0]
def q_loop(range_q, depth, n_cir, reps, op):
varss = []
for i in range(2, range_q//2):
i = 2 * i
print(i, range_q)
qubits = [cirq.GridQubit(0, j) for j in range(i)]
symbol = sympy.symbols("param")
circuits = [generate_circuit(qubits, depth, symbol) for _ in range(n_cir)]
varss.append(grad_variance(circuits, qubits, symbol, reps, op))
return varss
def d_loop(q, range_d, n_cir, reps, op):
varss = []
for i in range(1, range_d//20):
i = 20 * i
print(i, range_d)
qubits = [cirq.GridQubit(0, j) for j in range(q)]
symbol = sympy.symbols("param")
circuits = [generate_circuit(qubits, i, symbol) for _ in range(n_cir)]
varss.append(grad_variance(circuits, qubits, symbol, reps, op))
return varss
n_cir = 100
qs = 16
d = 100
results_all = q_loop(qs, d, n_cir, 1000, "all")
results_paper = q_loop(qs, d, n_cir, 1000, "one")
xs = [i * 2 for i in range(2, qs//2)]
plt.plot(xs, results_all, label="sum(Z) Measure")
plt.plot(xs, results_paper, label="ZZ Measure")
plt.xlabel("Number of Qubits")
plt.ylabel("Variance of Gradients")
plt.legend()
plt.show()
ds = 160
n_cir = 100
op = "one"
results_12 = d_loop(12, ds, n_cir, 1000, op)
results_6 = d_loop(6, ds, n_cir, 1000, op)
xs = [20 * i for i in range(1, ds//20)]
plt.plot(xs, results_12, label='12 Qubits')
plt.plot(xs, results_6, label='6 Qubits')
plt.xlabel("Depth")
plt.ylabel("Variance of Gradients")
plt.legend()
plt.show()
|
StarcoderdataPython
|
11283703
|
import sys, os
from io import BytesIO
import sympy
from PIL import Image, ImageOps, ImageChops
rootdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
srcdir = os.path.join(rootdir, 'src')
sys.path.insert(0, srcdir)
latexsources = []
import fitfunctions
for name in fitfunctions.__all__:
cls = getattr(fitfunctions, name)
latexsources.append(('fitfunc_%s' % cls.name, cls.expr_latex))
with open(os.path.join(srcdir, 'lateximgs.py'), 'w') as f:
for name, src in latexsources:
buf = BytesIO()
sympy.preview('\\[\n%s\n\\]' % src.strip(),
output='png',
viewer='BytesIO',
outputbuffer=buf,
euler=False)
im = Image.open(buf)
alpha = ImageOps.invert(im.convert('L'))
im = ImageChops.constant(im, 0)
im.putalpha(alpha)
buf2 = BytesIO()
im.save(buf2, 'png')
f.write('%s = %s\n' % (name, repr(buf2.getvalue())))
|
StarcoderdataPython
|
261569
|
#!/usr/bin/env python3
__author__ = 'Zirx'
# -*- coding: utf-8 -*-
from http.cookiejar import CookieJar
from urllib.parse import urlencode, unquote
from urllib.error import URLError
from urllib.request import HTTPCookieProcessor, build_opener, Request
from bs4 import BeautifulSoup
import re
import sys
import xml.dom.minidom
TELECON_ADDR = 'http://59.173.2.28/'
UNICOM_ADDR = 'http://192.168.127.12/'
DEFAULT_PAGE = 'default3.aspx'
MAIN_PAGE = 'xsmainfs.aspx'
GRADE_PAGE = 'xscj.aspx'
TIMETABLE_PAGE = 'xsgrkb.aspx'
INFO_PAGE = 'xstop.aspx'
TIMEOUT = 6
class Content:
def __init__(self, num, password, serverno):
self.num = num
self.password = password
self.warning = False
self.wrong_password = False
self.system_error = False
self.timeout = False
self.stuinfo_ok = False
self.timetable_ok = False
self.transcript_ok = False
self.stuinfo = []
self.timetable = []
self.transcript = []
self.cj = CookieJar()
self.opener = build_opener(HTTPCookieProcessor(self.cj))
if not self.num or not self.password:
self.warning = True
self.opener.close()
return
if serverno == 0:
self.addr = TELECON_ADDR
elif serverno == 1:
self.addr = UNICOM_ADDR
else:
self.warning = True
self.opener.close()
return
if self.check_password():
self.get_stuinfo()
self.get_timetable()
self.get_transcript()
else:
#"INCORRECT PASSWORD ,set the flag and do it in generator"
self.wrong_password = True
self.opener.close()
return
def check_password(self):
try:
open_handler = self.opener.open(self.addr, timeout=TIMEOUT)
except URLError as e:
if str(e.reason) == "timed out":
self.timeout = True
self.opener.close()
return
recv = open_handler.read().decode('GB2312')
#it's hard to test system error
if recv.find("系统警告") != -1:
self.system_error = True
self.opener.close()
return
else:
"get VIEWSTATE"
soup = BeautifulSoup(recv, from_encoding='GB2312')
viewstateblock = soup.find(attrs={"name": "__VIEWSTATE"})
viewstate = viewstateblock['value']
"post and get return info"
data = urlencode({'tbYHM': self.num,
'tbPSW': self.password,
'ddlSF': '学生'.encode('GB2312'),
'__VIEWSTATE': viewstate,
'imgDL.x': '0',
'imgDL.y': '0'})
data = data.encode('GB2312')
request = Request(self.addr + DEFAULT_PAGE)
try:
recv_index = self.opener.open(request, data, timeout=TIMEOUT)
except URLError as e:
if str(e.reason) == "timed out":
self.timeout = True
self.opener.close()
return
res = recv_index.read().decode('GB2312')
if res.find("密码不正确") != -1:
self.wrong_password = True
self.opener.close()
return False
elif res.find("系统警告") != -1:
self.system_error = True
self.opener.close()
return
else:
return True
def get_stuinfo(self):
try:
info = self.opener.open(self.addr + INFO_PAGE, timeout=TIMEOUT)
except URLError as e:
if str(e.reason) == "timed out":
self.timeout = True
self.opener.close()
return
info_s = info.read().decode("GBK")
if info_s.find("系统警告") != -1:
self.system_error = True
self.opener.close()
return
si = BeautifulSoup(info_s, from_encoding='GB2312')
soup = BeautifulSoup(si.prettify(), from_encoding='GB2312')
info_block = soup.find(attrs={'id': '_div'})
temp = info_block.find_all('span')
for x in temp:
self.stuinfo.append(x.string.strip())
if self.stuinfo:
self.stuinfo_ok = True
#"FINFISH"
def get_timetable(self):
try:
timeable_info = self.opener.open(self.addr + TIMETABLE_PAGE + "?xh=" + self.num + "&type=xs",
timeout=TIMEOUT)
except URLError as e:
if str(e.reason) == "timed out":
self.timeout = True
self.opener.close()
return
timeable_info_s = timeable_info.read().decode("GBK")
if timeable_info_s.find("系统警告") != -1:
self.system_error = True
self.opener.close()
return
soup = BeautifulSoup(timeable_info_s, from_encoding='GB2312')
table = soup.find(attrs={'id': 'table6'})
table = str(table).replace('<br>', '\n')
table = str(table).replace('<br/>', '\n')
table = str(table).replace('</br>', '')
table = re.sub(r'<td.*?>第.+?节</td>', '', table)
soup = BeautifulSoup(table)
total = []
item = []
for row in soup.find_all('tr')[2:-8]:
for col in row.find_all('td'):
p = col.string.strip()
item.append(p)
total.append(item)
item = []
tables = []
tables.append(total[0])
tables.append(total[2])
tables.append(total[5])
tables.append(total[7])
monday = []
tuesday = []
wednesday = []
thursday = []
friday = []
for i in tables:
monday.append(i[0])
tuesday.append(i[1])
wednesday.append(i[2])
thursday.append(i[3])
friday.append(i[4])
res = []
res.append(monday)
res.append(tuesday)
res.append(wednesday)
res.append(thursday)
res.append(friday)
self.timetable = res
self.timetable_ok = True
#some students has it's empty timetable
#so this if is useless
#if self.timetable[0][0]:
# self.timetable_ok = True
def get_transcript(self):
try:
grade_info = self.opener.open(self.addr + GRADE_PAGE + "?xh=" + self.num, timeout=TIMEOUT)
except URLError as e:
if str(e.reason) == "timed out":
self.timeout = True
self.opener.close()
return
#
#GBK???
#
grade_info_s = grade_info.read().decode("GBK")
if grade_info_s.find("系统警告") != -1:
self.system_error = True
self.opener.close()
return
soup = BeautifulSoup(grade_info_s, from_encoding='GB2312');
s = soup.find(attrs={"id": "DataGrid1"})
rs = BeautifulSoup(s.prettify())
tab = rs.find_all('tr')
table = []
item = []
for row in tab:
for col in row.find_all('td'):
item.append(col.string.strip())
table.append(item)
item = []
#the 1 line is table head
self.transcript = table[1:]
if self.transcript:
self.transcript_ok = True
class XMLGenerator:
def __init__(self, content):
self.content = content
self.mainnode = xml.dom.minidom.Document()
self.node = self.mainnode.createElement("root")
#Mininode is the root of XML Document
#if wanna to add a node to root
#need to pass root as argument to function
self.add_server_status()
self.add_status()
if self.content.stuinfo_ok:
self.add_sutinfo()
if self.content.timetable_ok:
self.add_timetable()
if self.content.transcript_ok:
self.add_transcript()
self.mainnode.appendChild(self.node)
#if server is full, the CGI server will generate this
#information and transfer it to client
#so this function in python is always return normal information
def add_server_status(self):
temp = self.mainnode.createElement("serverstatus")
temp.setAttribute("value", "ok")
self.node.appendChild(temp)
#waring info is serious error ,interrupt
def add_status(self):
temp = self.mainnode.createElement("status")
temp.setAttribute("syserror", str(self.content.system_error))
temp.setAttribute("password", str(<PASSWORD>))
temp.setAttribute("stuinfo", str(self.content.stuinfo_ok))
temp.setAttribute("timeable", str(self.content.timetable_ok))
temp.setAttribute("transcript", str(self.content.transcript_ok))
self.node.appendChild(temp)
def add_sutinfo(self):
temp = self.mainnode.createElement("stuinfo")
temp.setAttribute("name", self.content.stuinfo[0])
temp.setAttribute("major", self.content.stuinfo[1])
temp.setAttribute("class", self.content.stuinfo[2])
temp.setAttribute("id", self.content.stuinfo[3])
self.node.appendChild(temp)
def add_timetable(self):
#every element in timetable is a sigle string
#so there need parse again
x = self.mainnode.createElement("timetable_root")
for item in self.content.timetable:
#just like ['土力学与基础工程I', '2节/周', '苏明会z[01-15]', '2-407']
for subitem in item:
temparray = subitem.split("\n")
temp = self.mainnode.createElement("timetable")
if temparray[0]:
temp.setAttribute("classname", temparray[0])
temp.setAttribute("times", temparray[1])
temp.setAttribute("teacher", temparray[2])
temp.setAttribute("classroom", temparray[3])
x.appendChild(temp)
else:
temp.setAttribute("classname", "")
temp.setAttribute("times", "")
temp.setAttribute("teacher", "")
temp.setAttribute("classroom", "")
x.appendChild(temp)
self.node.appendChild(x)
def add_transcript(self):
#example ['2011-2012', '1', '中国近现代史纲要', '必修课', '刘娟z', '考查', '89', '', '', '3.9', '2']
x = self.mainnode.createElement("transcript_root")
for item in self.content.transcript:
temp = self.mainnode.createElement("transcript")
temp.setAttribute("academic_year", item[0])
temp.setAttribute("semester", item[1])
temp.setAttribute("course_name", item[2])
temp.setAttribute("course_type", item[3])
temp.setAttribute("course_teacher", item[4])
temp.setAttribute("assessment_methods", item[5])
temp.setAttribute("total_mark", item[6])
temp.setAttribute("makeup_mark", item[7])
temp.setAttribute("rebuild_mark", item[8])
temp.setAttribute("grade_point", item[9])
temp.setAttribute("credit", item[10])
x.appendChild(temp)
self.node.appendChild(x)
def add_data(self):
return self.mainnode
if __name__ == "__main__":
a = sys.argv[1]
b = unquote(sys.argv[2])
c = sys.argv[3]
s = Content(a, b, int(c))
#print(s.num)
#print(s.password)
#print(s.wrong_password)
#print(s.addr)
#print(s.stuinfo_ok)
#print(s.stuinfo)
#print(s.timetable_ok)
#print(s.timetable)
#print(s.transcript_ok)
#print(s.transcript)
#print('HERE:')
#for x in s.transcript:
#print(x)
#with open("info.txt", "w", encoding="UTF-8") as tmp:
#tmp.write(str(s.stuinfo))
#tmp.write("\r\n")
#tmp.write(str(s.timetable))
#tmp.write("\r\n")
#tmp.write(str(s.transcript))
x = XMLGenerator(s)
outbyte = x.mainnode.toprettyxml(encoding="UTF-8")
out = str(outbyte, encoding = "UTF-8")
print("HTTP/1.1 200 OK\r\nServer: Zirconi's\r\nContent-Type: application/xml; charset=UTF-8\r\nContent-length:%d\r\n" % len(outbyte))
print(out)
#with open(r"./final.xml", "wb") as q:
# q.write(x.mainnode.toprettyxml(encoding="UTF-8"))
|
StarcoderdataPython
|
108303
|
# fortune_docker/users/urls.py
|
StarcoderdataPython
|
27194
|
import torch
from torch import dtype, nn
import torch.nn.functional as F
class PAM_Module(nn.Module):
def __init__(self, num, sizes,mode=None):
super(PAM_Module, self).__init__()
self.sizes = sizes
self.mode = mode
for i in range(num):
setattr(self, "query" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
setattr(self, "value" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
setattr(self, "key" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
def forward(self, feat_sources, feat_targets):
"""calculate the attention weight and alpha"""
ret_feats, ret_alphas = [], []
for i, query in enumerate(feat_targets):
Bt, Ct, Ht, Wt = query.size()
pro_query = getattr(self, "query"+str(i)
)(query).view(Bt, -1, Ht*Wt).permute(0, 2, 1)
attentions, means = [], []
for j, key in enumerate(feat_sources):
pro_key = getattr(self, "key" + str(j))(key).view(Bt, -1, Ht * Wt)
energy = torch.bmm(pro_query, pro_key)
means.append(energy.mean().item())
attentions.append(torch.softmax(energy, dim=-1))
if self.mode.find('alpha')>=0:
ret_alphas.append(torch.softmax(torch.tensor(means), dim=0))
else:
ret_alphas.append(torch.tensor(means).mean())
if self.mode in ['all', 'pam', 'cam', 'alpha_cam', 'alpha_cam', 'alpha_all']:
attention = torch.stack(attentions, dim=0).sum(0)
value = getattr(self, "value" + str(i))(query).view(Bt, -1, Ht * Wt)
out = torch.bmm(value, attention.permute(0, 2, 1)).view(Bt, Ct, Ht, Wt)
ret_feats.append(out)
if self.mode.find('alpha') >= 0:
ret_alphas = torch.stack(ret_alphas, dim=0)
else:
ret_alphas = torch.softmax(torch.tensor(ret_alphas), dim=0)
return ret_feats, ret_alphas
class CAM_Module(nn.Module):
def __init__(self, num, sizes, mode=None):
super(CAM_Module, self).__init__()
self.sizes = sizes
self.mode = mode
for i in range(num):
setattr(self, "value" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
def forward(self, feat_sources, feat_targets):
ret_feats, ret_alphas = [], []
for i, query in enumerate(feat_targets):
Bt, Ct, Ht, Wt = query.size()
pro_query = query.view(Bt, Ct, -1)
attentions, means = [], []
for j, key in enumerate(feat_sources):
pro_key = key.view(Bt, Ct, -1).permute(0, 2, 1)
energy = torch.bmm(pro_query, pro_key)
means.append(energy.mean().item())
attentions.append(torch.softmax(energy, dim=-1))
if self.mode.find('alpha') >= 0:
ret_alphas.append(torch.softmax(torch.tensor(means), dim=0))
else:
ret_alphas.append(torch.tensor(means).mean())
if self.mode in ['all', 'pam', 'cam', 'alpha_cam', 'alpha_cam', 'alpha_all']:
attention = torch.stack(attentions, dim=0).sum(0)
value = getattr(self, "value"+str(i))(query).view(Bt, Ct, -1)
out = torch.bmm(attention, value).view(Bt, Ct, Ht, Wt)
ret_feats.append(out)
if self.mode.find('alpha') >= 0:
ret_alphas = torch.stack(ret_alphas, dim=0)
else:
ret_alphas = torch.softmax(torch.tensor(ret_alphas), dim=0)
return ret_feats, ret_alphas
class ConvReg(nn.Module):
def __init__(self, s_shape, t_shape, factor=1):
super(ConvReg, self).__init__()
s_N, s_C, s_H, s_W = s_shape
t_N, t_C, t_H, t_W = t_shape
if s_H == 2 * t_H:
self.conv = nn.Conv2d(
s_C, t_C // factor, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(
s_C, t_C // factor, kernel_size=4, stride=2, padding=1)
elif s_H >= t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=(1 + s_H - t_H, 1 + s_W - t_W))
else:
raise NotImplemented(
'student size {}, teacher size {}'.format(s_H, t_H))
def forward(self, x):
x = self.conv(x)
return x
class Fit(nn.Module):
def __init__(self, s_shape, t_shape, factor=1):
super(Fit, self).__init__()
_, s_C, s_H, s_W = s_shape
_, t_C, t_H, t_W = t_shape
if s_H == 2*t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(
s_C, t_C//factor, kernel_size=4, stride=2, padding=1)
elif s_H == t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=1, stride=1, padding=0)
else:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=(1+s_H-t_H, 1 + s_W-t_W))
# if channels:
# self.conv = nn.Conv2d(s_C,channels,kernel_size=(1+s_H-t_H, 1+s_W-t_W))
# else:
# self.conv = nn.Conv2d(s_C,t_C//factor,kernel_size=(1+s_H-t_H, 1+s_W-t
def forward(self, x):
x = self.conv(x)
return x
# torch.Size([16, 128, 28, 28]) torch.Size([16, 256, 14, 14]) torch.Size([16, 512, 7, 7])
class Project(nn.Module):
def __init__(self, origin_sizes, new_size=torch.Size([-1, 16, 14, 14]), factor=1):
super(Project, self).__init__()
for i, size_o in enumerate(origin_sizes):
setattr(self, "target"+str(i),
Fit(size_o, new_size, factor=factor))
setattr(self, "source"+str(i),
Fit(size_o, new_size, factor=factor))
def forward(self, feat_sources, feat_targets):
new_feat_sources, new_feat_targets = [], []
for i, source in enumerate(feat_sources):
new_feat_sources.append(getattr(self, "source" + str(i))(source))
for i, target in enumerate(feat_targets):
new_feat_targets.append(getattr(self, "target" + str(i))(target))
return new_feat_sources, new_feat_targets
class DAAttention(nn.Module):
def __init__(self, origin_sizes, new_size=torch.Size([-1, 32, 7, 7]), factor=1, mode="all"):
super(DAAttention, self).__init__()
self.pro = Project(origin_sizes, new_size=new_size, factor=factor)
self.mode = mode
self.layer_num = len(origin_sizes)
if mode in ['all', 'alpha', 'pam', 'alpha_pam', 'alpha_all']:
self.pam = PAM_Module(self.layer_num, new_size, self.mode)
if mode in ['all', 'alpha', 'cam', 'alpha_cam', 'alpha_all']:
self.cam = CAM_Module(self.layer_num, new_size, self.mode)
self.C = new_size[1]
self.H = new_size[2]
self.W = new_size[3]
def forward(self, feat_sources, feat_targets):
new_feat_sources, new_feat_targets = self.pro(
feat_sources, feat_targets)
if self.mode in ['pam', 'all', 'alpha', 'alpha_pam', 'alpha_all']:
feat_pam, alpha_pam = self.pam(new_feat_sources, new_feat_targets)
if self.mode in ['cam', 'all', 'alpha', 'alpha_cam', 'alpha_all']:
feat_cam, alpha_cam = self.cam(new_feat_sources, new_feat_targets)
ret_alpha = None
ret_targets, ret_sources = [], []
for i in range(self.layer_num):
if self.mode in ['all', 'alpha_all']:
ret_targets.append(((feat_pam[i] + feat_cam[i]) * 0.5).view(-1, self.C * self.H * self.W))
ret_alpha = (alpha_cam+alpha_pam) * 0.5
elif self.mode == 'cam':
ret_targets.append(feat_cam[i].view(-1, self.C * self.H * self.W))
ret_alpha = alpha_cam
elif self.mode == 'pam':
ret_targets.append(feat_pam[i].view(-1, self.C * self.H * self.W))
ret_alpha = alpha_pam
elif self.mode in ['alpha', 'alpha_pam', 'alpha_cam']:
if self.mode == 'alpha':ret_alpha = (alpha_pam + alpha_cam) * 0.5
elif self.mode == 'alpha_cam': ret_alpha = alpha_cam
elif self.mode == 'alpha_pam': ret_alpha = alpha_pam
elif self.mode[:3] == 'noa':
ret_targets.append(new_feat_targets[i].view(-1, self.C * self.H * self.W))
ret_sources.append(new_feat_sources[i].view(-1, self.C * self.H * self.W))
return ret_sources, ret_alpha, ret_targets
if __name__ == '__main__':
# feat_source1 = torch.rand((16,512,28,28))
# feat_source2 = torch.rand((16,1024,14,14))
# feat_source3 = torch.rand((16,2048,7,7))
# feat_target1 = torch.rand((16, 512, 28, 28))
# feat_target2 = torch.rand((16, 1024, 14, 14))
# feat_target3 = torch.rand((16, 2048, 7, 7))
# att = DAAttention([feat_source1.size(),feat_source2.size(),feat_source3.size()])
# out,alpha = att([feat_source1,feat_source2,feat_source3],[feat_target1,feat_target2,feat_target3])
# print(out[0].size(),alpha.size())
# print(out[1].size(),alpha.size())
# print(out[2].size(),alpha.size())
# import sys
# sys.path.append('../..')
# sys.path.append('..')
# from models.fullnet import FLDGFullNet
# from models.backbone import resnet18
# backbone = resnet18()
# net = FLDGFullNet(backbone, 7)
# data = torch.rand((16, 3, 224, 224))
# a, b, c, d, e = net(data)
# print(c.size(), d.size(), e.size())
# torch.Size([16, 128, 28, 28]) torch.Size([16, 256, 14, 14]) torch.Size([16, 512, 7, 7])
import torch
a = torch.rand(3, 3)
print(a, a[0, 0].item())
|
StarcoderdataPython
|
6683321
|
import matplotlib as plt
import numpy as np
#I hold x a line while defining new values for each y
x = np.linspace(0, 20, 2000)
#1*x[1] + 0*x[2] <= 5
#y0*0=5-x #No initialization with respect to y0 because it is zero.
#0*x[1] + 1*x[2] <= 5
y1=5+x*0
#1*x[1] + 0*x[2] >= 1
#y2*0=1-x #No inititialization
#0*x[1] + 1*x[2] >= 1
y3=1-x*0
#1*x[1] + 1*x[2] <= 6
y4=6-x
#TODO: HOW TO DRAW THE ABOVE LINEAR EQUATIONS ELEGANTLY in matplotlib?
#Drawing the lines by end points because of the zeroes.
plt.plot(x,y4,label=r'$x[1]+x[2]<=6$')
plt.plot([5,5],[10,-10]) #x < 5
plt.plot([10,-2],[5,5]) #y2 < 5
plt.plot([1,1],[10,-10], 'r-') #x >= 1
plt.plot([10,-2],[1,1],'b--') #y3 >= 1
#TODO: how to fill the upper triangle only?
# http://benalexkeen.com/linear-programming-with-python-and-pulp-part-1/
plt.fill_between(x, y5, y6, where=y5>y6, color='grey', alpha=0.5)
plt.show()
|
StarcoderdataPython
|
151046
|
# The isBadVersion API is already defined for you.
# def isBadVersion(version: int) -> int:
class Solution:
def firstBadVersion(self, n: int) -> int:
start, end = 1, n
while start < end:
mid = start + (end - start) // 2
check = isBadVersion(mid)
if check:
end = mid
else:
start = mid + 1
return start
|
StarcoderdataPython
|
6575763
|
<filename>manager.py
# General Package
import os, sys
from pymongo import settings
# Set the path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# General Packages
from flask_script import Manager, Server
# User Pacakages
import settings
from application import create_app, create_db, config_app
from utilities.databaselayer import DatabaseLayer
# App
app = create_app()
# Database
settings.database = DatabaseLayer(create_db(app))
# Config App
config_app(app)
# Manager
manager = Manager(app)
# Turn on debugger by default and reloader
manager.add_command("runserver", Server(
use_debugger = True,
use_reloader = True,
host = os.getenv('IP', '0.0.0.0'),
port = int(os.getenv('PORT', 5000)))
)
if __name__ == "__main__":
manager.run()
|
StarcoderdataPython
|
12854488
|
<gh_stars>0
print("Thank you Jesus")
# Read a value from standard input a value
# input("Thank you")
# Evaluate expression
x = 1
print(x)
x += 3
print(x)
# loops
if x > 1:
print("great than 1")
else:
print("less than 1")
n = 3
while n > 1:
print(n)
n -= 1
# Arithmetic operator
print({100 % 3}, {100 / 3})
z = 1
print(z, type(z))
z = "dsf"
print(z, type(z))
print(17 // 3) # floor division discards the fractional part
print(5 ** 2) # 5 squared
# use raw strings by adding an r before the first quote
print(r'C:\some\name')
# String literals can span multiple lines. One way is using triple-quotes: """...""" or '''...'''
print("""\
dsfdsfds
ddsf
dfdf
dfdfds
""")
w = 'thanks'
print(w[:3] + w[3:])
# All slice operations return a new list containing the requested elements.
# This means that the following slice returns a new (shallow) copy of the list:
squares = [1, 4, 9, 16, 25]
for n in squares:
print(n, end='-')
if n == 16:
squares.insert(n, 100)
print('\n')
print(squares[:])
squares.append(10)
print(squares[:])
squares = []
print(squares[:])
# Fibonacci
a, b = 0, 1
while b < 25:
print(b)
a, b = b, a + b
# Fibonacci
a, b = 0, 1
while b < 1000:
print(b, end=',')
a, b = b, a + b
print('\n')
# Loops
for i in range(3, 15, 4):
print(i)
a = ['Mary', 'had', 'a', 'little', 'lamb']
for i in range(len(a)):
print(i, a[i])
print(list(range(5)))
def f(ab, l=[]):
l.append(ab)
return l
print(f(1))
print(f(2))
print(f(3))
# if __name__ == "__main__":
# import sys
#
# print(int(sys.argv[1]))
import sys
print(dir(sys))
print('12'.zfill(5))
print('We are the {} who say "{}!"'.format('knights', 'Ni'))
print('{0} and {1}'.format('spam', 'eggs'))
# Formatting: https://docs.python.org/3.6/tutorial/inputoutput.html
quit(1)
|
StarcoderdataPython
|
11304437
|
<reponame>J03D03/VaRA-Tool-Suite
"""Module for the :class:`BugProvider`."""
import logging
import typing as tp
from benchbuild.project import Project
import varats.provider.bug.bug as bug
from varats.project.project_util import (
get_primary_project_source,
is_git_source,
)
from varats.provider.provider import Provider
from varats.utils.github_util import get_github_repo_name_for_project
LOG = logging.getLogger(__name__)
class BugProvider(Provider):
"""Provides bug information for a project."""
def __init__(
self, project: tp.Type[Project], github_project_name: tp.Optional[str]
) -> None:
super().__init__(project)
self.__github_project_name = github_project_name
@classmethod
def create_provider_for_project(
cls, project: tp.Type[Project]
) -> tp.Optional['BugProvider']:
primary_source = get_primary_project_source(project.NAME)
if is_git_source(primary_source):
# If project has Github repo, pass name as second arg, None ow.
return BugProvider(
project, get_github_repo_name_for_project(project)
)
return None
@classmethod
def create_default_provider(
cls, project: tp.Type[Project]
) -> 'BugProvider':
return BugDefaultProvider(project)
def find_all_pygit_bugs(self) -> tp.FrozenSet[bug.PygitBug]:
"""
Creates a set for all bugs of the provider's project.
Returns:
A set of PygitBugs.
"""
resulting_bugs: tp.Set[bug.PygitBug] = set()
if self.__github_project_name:
resulting_bugs = resulting_bugs.union(
bug.find_all_issue_pygit_bugs(self.project.NAME)
)
resulting_bugs = resulting_bugs.union(
bug.find_all_commit_message_pygit_bugs(self.project.NAME)
)
return frozenset(resulting_bugs)
def find_all_raw_bugs(self) -> tp.FrozenSet[bug.RawBug]:
"""
Creates a set for all bugs of the provider's project.
Returns:
A set of RawBugs.
"""
resulting_bugs: tp.Set[bug.RawBug] = set()
if self.__github_project_name:
resulting_bugs = resulting_bugs.union(
bug.find_all_issue_raw_bugs(self.project.NAME)
)
resulting_bugs = resulting_bugs.union(
bug.find_all_commit_message_raw_bugs(self.project.NAME)
)
return frozenset(resulting_bugs)
def find_pygit_bug_by_fix(self,
fixing_commit: str) -> tp.FrozenSet[bug.PygitBug]:
"""
Find the bug associated to some fixing commit in the provider's project,
if there is any.
Args:
fixing_commit: Commit Hash of the potentially fixing commit
Returns:
A set of PygitBugs fixed by fixing_commit
"""
resulting_bugs: tp.Set[bug.PygitBug] = set()
if self.__github_project_name:
resulting_bugs = resulting_bugs.union(
bug.find_issue_pygit_bugs_by_fix(
self.project.NAME, fixing_commit
)
)
resulting_bugs = resulting_bugs.union(
bug.find_commit_message_pygit_bugs_by_fix(
self.project.NAME, fixing_commit
)
)
return frozenset(resulting_bugs)
def find_raw_bug_by_fix(self,
fixing_commit: str) -> tp.FrozenSet[bug.RawBug]:
"""
Find the bug associated to some fixing commit in the provider's project,
if there is any.
Args:
fixing_commit: Commit Hash of the potentially fixing commit
Returns:
A set of RawBugs fixed by fixing_commit
"""
resulting_bugs: tp.Set[bug.RawBug] = set()
if self.__github_project_name:
resulting_bugs = resulting_bugs.union(
bug.find_issue_raw_bugs_by_fix(
self.project.NAME, fixing_commit
)
)
resulting_bugs = resulting_bugs.union(
bug.find_commit_message_raw_bugs_by_fix(
self.project.NAME, fixing_commit
)
)
return frozenset(resulting_bugs)
def find_pygit_bug_by_introduction(
self, introducing_commit: str
) -> tp.FrozenSet[bug.PygitBug]:
"""
Create a (potentially empty) list of bugs introduced by a certain commit
to the provider's project.
Args:
introducing_commit: commit hash of the introducing commit to look
for
Returns:
A set of PygitBugs introduced by introducing_commit
"""
resulting_bugs: tp.Set[bug.PygitBug] = set()
if self.__github_project_name:
resulting_bugs = resulting_bugs.union(
bug.find_issue_pygit_bugs_by_introduction(
self.project.NAME, introducing_commit
)
)
resulting_bugs = resulting_bugs.union(
bug.find_commit_message_pygit_bugs_by_introduction(
self.project.NAME, introducing_commit
)
)
return frozenset(resulting_bugs)
def find_raw_bug_by_introduction(
self, introducing_commit: str
) -> tp.FrozenSet[bug.RawBug]:
"""
Create a (potentially empty) list of bugs introduced by a certain
commit.
Args:
introducing_commit: commit hash of the introducing commit to look
for
Returns:
A set of RawBugs introduced by introducing_commit
"""
resulting_bugs: tp.Set[bug.RawBug] = set()
if self.__github_project_name:
resulting_bugs = resulting_bugs.union(
bug.find_issue_raw_bugs_by_introduction(
self.project.NAME, introducing_commit
)
)
resulting_bugs = resulting_bugs.union(
bug.find_commit_message_raw_bugs_by_introduction(
self.project.NAME, introducing_commit
)
)
return frozenset(resulting_bugs)
class BugDefaultProvider(BugProvider):
"""Default implementation of the :class:`Bug provider` for projects that do
not (yet) support bugs."""
def __init__(self, project: tp.Type[Project]) -> None:
# pylint: disable=E1003
super(BugProvider, self).__init__(project)
def find_all_pygit_bugs(self) -> tp.FrozenSet[bug.PygitBug]:
return frozenset()
def find_all_raw_bugs(self) -> tp.FrozenSet[bug.RawBug]:
return frozenset()
def find_pygit_bug_by_fix(self,
fixing_commit: str) -> tp.FrozenSet[bug.PygitBug]:
return frozenset()
def find_raw_bug_by_fix(self,
fixing_commit: str) -> tp.FrozenSet[bug.RawBug]:
return frozenset()
def find_pygit_bug_by_introduction(
self, introducing_commit: str
) -> tp.FrozenSet[bug.PygitBug]:
return frozenset()
def find_raw_bug_by_introduction(
self, introducing_commit: str
) -> tp.FrozenSet[bug.RawBug]:
return frozenset()
|
StarcoderdataPython
|
11233588
|
''' from __nonstandard__ import where_clause
shows how one could use `where` as a keyword to introduce a code
block that would be ignored by Python. The idea was to use this as
a _pythonic_ notation as an alternative for the optional type hinting described
in PEP484. **This idea has been rejected; it is included just for fun.**
Note that this transformation **cannot** be used in the console.
For more details, please see two of my recent blog posts:
https://aroberge.blogspot.ca/2015/12/revisiting-old-friend-yet-again.html
https://aroberge.blogspot.ca/2015/01/type-hinting-in-python-focus-on.html
I first suggested this idea more than 12 years ago! ;-)
https://aroberge.blogspot.ca/2005/01/where-keyword-and-python-as-pseudo.html
'''
from io import StringIO
import tokenize
def transform_source(text):
'''removes a "where" clause which is identified by the use of "where"
as an identifier and ends at the first DEDENT (i.e. decrease in indentation)'''
toks = tokenize.generate_tokens(StringIO(text).readline)
result = []
where_clause = False
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue == "where":
where_clause = True
elif where_clause and toktype == tokenize.DEDENT:
where_clause = False
continue
if not where_clause:
result.append((toktype, tokvalue))
return tokenize.untokenize(result)
|
StarcoderdataPython
|
289332
|
"""
Tests for dit.math.sampling.
"""
from __future__ import division
import pytest
import numpy as np
import dit.math.sampling as module
import dit.example_dists
from dit.exceptions import ditException
#sample(dist, size=None, rand=None, prng=None):
def test_sample1():
# Basic sample
d = dit.example_dists.Xor()
dit.math.prng.seed(0)
x = module.sample(d)
assert x == '101'
# with log dist
dit.math.prng.seed(0)
d.set_base(3.5)
x = module.sample(d)
assert x == '101'
def test_sample2():
# Specified prng
d = dit.example_dists.Xor()
dit.math.prng.seed(0)
x = module.sample(d, prng=dit.math.prng)
assert x == '101'
def test_sample3():
# Specified rand number
d = dit.example_dists.Xor()
x = module.sample(d, rand=.3)
assert x == '011'
def test_sample4():
# More than one random number
d = dit.example_dists.Xor()
dit.math.prng.seed(0)
x = module.sample(d, 6)
assert x == ['101', '101', '101', '101', '011', '101']
def test_sample5():
# Bad prng
d = dit.example_dists.Xor()
with pytest.raises(ditException):
module.sample(d, prng=3)
def test_sample6():
# Not enough rands
d = dit.example_dists.Xor()
with pytest.raises(ditException):
module.sample(d, 5, rand=[.1]*3)
def test_sample_discrete_python1():
# Specified rand number
d = dit.example_dists.Xor()
x = module._sample_discrete__python(d.pmf, .5)
assert x == 2
def test_sample_discrete_python2():
# Specified rand number
d = dit.example_dists.Xor()
x = module._samples_discrete__python(d.pmf, np.array([.5, .3, .2]))
assert np.allclose(x, np.array([2, 1, 0]))
def test_sample_discrete_python3():
# Specified rand number
d = dit.example_dists.Xor()
out = np.zeros((3,))
module._samples_discrete__python(d.pmf, np.array([.5, .3, .2]), out=out)
assert np.allclose(out, np.array([2, 1, 0]))
def test_ball_smoke():
dit.math.prng.seed(0)
x = module.ball(3)
x_ = np.array([ 0.21324626, 0.4465436 , -0.65226253])
assert np.allclose(x, x_)
dit.math.prng.seed(0)
x = module.ball(3, prng=dit.math.prng)
assert np.allclose(x, x_)
def test_base_with_size():
# size 3, 1
dit.math.prng.seed(0)
x = module.ball(3, 1)
x_ = np.array([[ 0.21324626, 0.4465436 , -0.65226253]])
assert np.allclose(x, x_)
# size 4, 4
dit.math.prng.seed(0)
x = module.ball(4, 4)
x_ = np.array([
[ 0.69375635, -0.36303705, 0.35293677, -0.05622584],
[-0.06238751, 0.24817385, 0.08706278, 0.87899164],
[ 0.70592518, 0.1128636 , 0.41171971, 0.30951042],
[ 0.72885111, -0.1000816 , 0.15272267, -0.41665039]
])
assert np.allclose(x, x_)
def test_2ball():
dit.math.prng.seed(0)
x = module._2ball(3, dit.math.prng)
x_ = np.array([
[-0.93662222, -0.29662586],
[-0.14853979, -0.66826269],
[-0.31184301, -0.23494632]
])
assert x.shape == (3, 2)
assert np.allclose(x, x_)
def test_3ball_cylinder():
dit.math.prng.seed(0)
x = module._3ball_cylinder(3, dit.math.prng)
x_ = np.array([
[-0.7520198 , 0.31101413, 0.08976637],
[ 0.68515146, -0.55406718, -0.1526904 ],
[ 0.77215823, -0.17942272, 0.29178823]
])
assert x.shape == (3, 3)
assert np.allclose(x, x_)
def test_norm_smoketest():
d = np.array([.2, .3, .5])
# prng is None
dit.math.prng.seed(0)
x = module.norm(d)
x_ = np.array([ 0.49606291, 0.13201838, 0.37191871])
assert np.allclose(x, x_)
# prng is not None
dit.math.prng.seed(0)
x = module.norm(d, prng=dit.math.prng)
assert np.allclose(x, x_)
# size is not None
dit.math.prng.seed(0)
x = module.norm(d, size=1)
assert np.allclose(x, np.asarray([x_]))
def test_norm_spherical_cov():
d = np.array([.2, .3, .5])
dit.math.prng.seed(0)
x = module.norm(d, .3)
x_ = np.array([ 0.34790127, 0.20240029, 0.44969844])
assert np.allclose(x, x_)
def test_norm_diagonal_cov():
d = np.array([.2, .3, .5])
dit.math.prng.seed(0)
x = dit.math.norm(d, np.array([.3, .5]))
x_ = np.array([ 0.33458841, 0.40485058, 0.26056101])
assert np.allclose(x, x_)
def test_norm_cov():
d = np.array([.2, .3, .5])
dit.math.prng.seed(0)
ilrcov = np.array([[.3, .2],[.2, .4]])
x = dit.math.norm(d, ilrcov, size=3)
x_ = np.array([
[ 0.07400846, 0.27603643, 0.64995511],
[ 0.09984353, 0.44856934, 0.45158713],
[ 0.07260608, 0.18948779, 0.73790613]
])
assert np.allclose(x, x_)
def test_norm_badshape_cov():
d = np.array([.2, .3, .5])
dit.math.prng.seed(0)
ilrcov = np.array([[.3, .5, .1],[.5, .4, .3], [.2, .5, .3]])
with pytest.raises(ditException):
dit.math.norm(d, ilrcov)
ilrcov = np.array([.3, .5, .1])
with pytest.raises(ditException):
dit.math.norm(d, ilrcov)
ilrcov = np.array(np.random.rand(16).reshape(2,2,4))
with pytest.raises(ditException):
dit.math.norm(d, ilrcov)
def test_norm_toomany():
d = np.array([[.2, .3, .5], [.5, .2, .3]])
with pytest.raises(ditException):
dit.math.norm(d)
def test__annulus2_nosize():
# When size is None, it should just return the sample.
prng = np.random.RandomState()
samples = dit.math.sampling._annulus2(0, 1, size=None, prng=prng)
assert samples.shape == (2,)
def test__annulus2_size():
# When size is not None, it should return an array of the samples.
prng = np.random.RandomState()
samples = dit.math.sampling._annulus2(0, 1, size=1, prng=prng)
assert samples.shape == (1,2)
def test_annulus2_nosize():
# When size is not None, it should return an array of the samples.
dit.math.prng.seed(0)
pmf = np.array([1/3, 1/3, 1/3])
sample = dit.math.sampling.annulus2(pmf, 0, 1, size=None)
assert sample == pytest.approx(0.2398208154908835)
def test_annulus2_size():
# When size is not None, it should return an array of the samples.
prng = np.random.RandomState()
pmf = np.array([1/3, 1/3, 1/3])
samples = dit.math.sampling.annulus2(pmf, 0, 1, size=3, prng=prng)
assert samples.shape == (3,3)
|
StarcoderdataPython
|
3576038
|
from gui.vlist.vlist import VList
|
StarcoderdataPython
|
3262506
|
<gh_stars>10-100
# Copyright 2015 Brocade Communications System, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from oslo_log import log as logging
from oslo_utils import strutils
from tackerclient.v1_0 import client as tacker_client
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.contrib.developer.profiler import api as profiler
LOG = logging.getLogger(__name__)
SUPPORTED_VIM_TYPES = (
('openstack', 'OpenStack'),
('kubernetes', 'Kubernetes')
)
AUTH_METHODS = (
('basic', _('Basic')),
('bearer_token', _('Bearer Token'))
)
CERT_TRUE_TYPE = 'True'
CERT_FALSE_TYPE = 'False'
CERT_VERIFY_TYPES = (
(CERT_TRUE_TYPE, _("True")),
(CERT_FALSE_TYPE, _("False"))
)
@memoized
def tackerclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = tacker_client.Client(
token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'nfv-orchestration'),
insecure=insecure, ca_cert=cacert)
return c
@profiler.trace
def vnf_list(request, **params):
LOG.debug("vnf_list(): params=%s", params)
vnfs = tackerclient(request).list_vnfs(**params).get('vnfs')
return vnfs
@profiler.trace
def vnfd_list(request, **params):
LOG.debug("vnfd_list(): params=%s", params)
vnfds = tackerclient(request).list_vnfds(**params).get('vnfds')
return vnfds
@profiler.trace
def create_vnfd(request, tosca_body=None, **params):
LOG.debug("create_vnfd(): params=%s", params)
vnfd_instance = tackerclient(request).create_vnfd(body=tosca_body)
return vnfd_instance
@profiler.trace
def create_vnf(request, vnf_arg, **params):
LOG.debug("create_vnf(): vnf_arg=%s", str(vnf_arg))
vnf_instance = tackerclient(request).create_vnf(body=vnf_arg)
return vnf_instance
@profiler.trace
def get_vnfd(request, vnfd_id):
LOG.debug("vnfd_get(): vnfd_id=%s", str(vnfd_id))
vnfd = tackerclient(request).show_vnfd(vnfd_id)
return vnfd
@profiler.trace
def get_vnf(request, vnf_id):
LOG.debug("vnf_get(): vnf_id=%s", str(vnf_id))
vnf_instance = tackerclient(request).show_vnf(vnf_id)
return vnf_instance
@profiler.trace
def delete_vnf(request, vnf_id):
LOG.debug("delete_vnf():vnf_id=%s", str(vnf_id))
tackerclient(request).delete_vnf(vnf_id)
@profiler.trace
def delete_vnfd(request, vnfd_id):
LOG.debug("delete_vnfd():vnfd_id=%s", str(vnfd_id))
tackerclient(request).delete_vnfd(vnfd_id)
@profiler.trace
def create_vim(request, vim_arg):
LOG.debug("create_vim(): vim_arg=%s", strutils.mask_password(vim_arg))
vim_instance = tackerclient(request).create_vim(body=vim_arg)
return vim_instance
@profiler.trace
def get_vim(request, vim_id):
LOG.debug("vim_get(): vim_id=%s", str(vim_id))
vim_instance = tackerclient(request).show_vim(vim_id)
return vim_instance
@profiler.trace
def delete_vim(request, vim_id):
LOG.debug("delete_vim():vim_id=%s", str(vim_id))
tackerclient(request).delete_vim(vim_id)
@profiler.trace
def vim_list(request, **params):
LOG.debug("vim_list(): params=%s", params)
vims = tackerclient(request).list_vims(**params).get('vims')
return vims
@profiler.trace
def events_list(request, resource_id):
params = {'resource_id': resource_id}
events = tackerclient(request).list_events(**params).get('events')
LOG.debug("events_list() params=%s events=%s l=%s", params, events,
len(events))
return events
@profiler.trace
def vnffg_list(request, **params):
LOG.debug("vnffg_list(): params=%s", params)
vnffgs = tackerclient(request).list_vnffgs(**params).get('vnffgs')
return vnffgs
@profiler.trace
def vnffgd_list(request, **params):
LOG.debug("vnffgd_list(): params=%s", params)
vnffgds = tackerclient(request).list_vnffgds(**params).get('vnffgds')
return vnffgds
@profiler.trace
def create_vnffgd(request, tosca_body=None, **params):
LOG.debug("create_vnffgd(): params=%s", params)
vnffgd_instance = tackerclient(request).create_vnffgd(body=tosca_body)
return vnffgd_instance
@profiler.trace
def create_vnffg(request, vnffg_arg, **params):
LOG.debug("create_vnffg(): vnf_arg=%s", str(vnffg_arg))
vnffg_instance = tackerclient(request).create_vnffg(body=vnffg_arg)
return vnffg_instance
@profiler.trace
def get_vnffgd(request, vnffgd_id):
LOG.debug("vnffgd_get(): vnffgd_id=%s", str(vnffgd_id))
vnffgd = tackerclient(request).show_vnffgd(vnffgd_id)
return vnffgd
@profiler.trace
def get_vnffg(request, vnffg_id):
LOG.debug("vnffg_get(): vnffg_id=%s", str(vnffg_id))
vnffg_instance = tackerclient(request).show_vnffg(vnffg_id)
return vnffg_instance
@profiler.trace
def delete_vnffg(request, vnffg_id):
LOG.debug("delete_vnffg():vnffg_id=%s", str(vnffg_id))
tackerclient(request).delete_vnffg(vnffg_id)
@profiler.trace
def delete_vnffgd(request, vnffgd_id):
LOG.debug("delete_vnffgd():vnffgd_id=%s", str(vnffgd_id))
tackerclient(request).delete_vnffgd(vnffgd_id)
@profiler.trace
def create_nsd(request, tosca_body=None, **params):
LOG.debug("create_nsd(): params=%s", params)
nsd_instance = tackerclient(request).create_nsd(body=tosca_body)
return nsd_instance
@profiler.trace
def nsd_list(request, **params):
LOG.debug("nsd_list(): params=%s", params)
nsds = tackerclient(request).list_nsds(**params).get('nsds')
return nsds
@profiler.trace
def get_nsd(request, nsd_id):
LOG.debug("nsd_get(): nsd_id=%s", str(nsd_id))
nsd = tackerclient(request).show_nsd(nsd_id)
return nsd
@profiler.trace
def delete_nsd(request, nsd_id):
LOG.debug("delete_nsd():nsd_id=%s", str(nsd_id))
tackerclient(request).delete_nsd(nsd_id)
@profiler.trace
def get_ns(request, ns_id):
LOG.debug("ns_get(): ns_id=%s", str(ns_id))
ns_instance = tackerclient(request).show_ns(ns_id)
return ns_instance
@profiler.trace
def delete_ns(request, ns_id):
LOG.debug("delete_ns():ns_id=%s", str(ns_id))
tackerclient(request).delete_ns(ns_id)
@profiler.trace
def ns_list(request, **params):
LOG.debug("ns_list(): params=%s", params)
nss = tackerclient(request).list_nss(**params).get('nss')
return nss
@profiler.trace
def create_ns(request, ns_arg, **params):
LOG.debug("create_ns(): ns_arg=%s", str(ns_arg))
ns_instance = tackerclient(request).create_ns(body=ns_arg)
return ns_instance
|
StarcoderdataPython
|
275100
|
from globals import *
import alife
import logging
import random
import os
def prettify_string_array(array, max_length):
"""Returns a human readable string from an array of strings."""
_string = ''
_i = 0
for entry in array:
if len(_string) > max_length:
_string += ', and %s more.' % (_i+1)
break
if _i == 0:
_string += entry
elif 0<_i<len(array)-1:
_string += ', %s' % entry
elif _i == len(array)-1:
_string += ' and %s.' % entry
_i += 1
return _string
def get_name(life):
return ' '.join(life['name'])
def get_real_direction(direction, short=False):
if abs(direction)<22 or abs(direction-360)<22:
if short:
return 'e'
return 'east'
elif abs(direction-45)<22:
if short:
return 'ne'
return 'northeast'
elif abs(direction-90)<22:
if short:
return 'n'
return 'north'
elif abs(direction-135)<22:
if short:
return 'nw'
return 'northwest'
elif abs(direction-180)<22:
if short:
return 'w'
return 'west'
elif abs(direction-225)<22:
if short:
return 'sw'
return 'southwest'
elif abs(direction-270)<22:
if short:
return 's'
return 'south'
elif abs(direction-315)<22:
if short:
return 'se'
return 'southeast'
else:
if short:
return 'e'
return 'east'
def get_real_distance(distance):
"""Returns the real-life representation of a distance."""
if SETTINGS['distance unit'] == 'Yards':
return distance*YARDS
else:
return distance*METERS
def get_real_distance_string(distance, round_up=False):
_distance = get_real_distance(distance)
_mods = ''
if round_up:
_distance = int(round(_distance))
if not _distance == 1:
_mods = 's'
if SETTINGS['distance unit'] == 'Yards':
return '%s yd%s' % (_distance, _mods)
return '%s m%s' % (_distance, _mods)
def get_name_ownership(life, pronoun=False):
if pronoun:
if life['type'] == 'humanoid':
return 'his'
else:
return 'its'
return '%s\'s' % ' '.join(life['name'])
def get_introduction(life, posession=False):
if 'player' in life:
if posession:
return 'Your'
return 'You'
if life['type'] == 'humanoid':
if posession:
return '%s\'s' % get_name(life)
else:
return get_name(life)
else:
#TODO: Check limb conditions
if posession:
return 'The %s\'s' % life['species']
else:
return 'The %s' % life['species']
def _load_strings(a, directory, filenames):
for filename in [f for f in filenames if f.count('.txt')]:
_map_name = filename.strip('.txt')
TEXT_MAP[_map_name] = []
with open(os.path.join(directory, filename), 'r') as e:
TEXT_MAP[_map_name].extend([line.strip() for line in e.readlines()])
def load_strings():
#TODO: Use better walk, like one in profiles.py
try:
os.path.walk(TEXT_DIR, _load_strings, None)
load_dialog()
except Exception, e:
raise Exception(e)
def load_dialog():
with open(os.path.join(TEXT_DIR, 'dialog.txt')) as f:
for line in f.readlines():
line = line.rstrip()
if not line or line.startswith('#'):
continue
try:
_gist, _requirements, _text, _result = line.split(':')
except:
raise Exception('Error in dialog (wrong number of arguments): %s' % line)
_dialog = {'gist': _gist,
'requirements': _requirements.split(','),
'text': _text,
'result': _result}
if _gist in DIALOG_TOPICS:
DIALOG_TOPICS[_gist].append(_dialog)
else:
DIALOG_TOPICS[_gist] = [_dialog]
logging.debug('Loaded dialog.')
def generate_place_name():
if not TEXT_MAP['places']:
return 'Zoolandia %s' % WORLD_INFO['ticks']
return TEXT_MAP['places'].pop(random.randint(0, len(TEXT_MAP['places'])-1))
def generate_scheme_title():
return TEXT_MAP['nouns'][random.randint(0, len(TEXT_MAP['nouns'])-1)]
def generate_first_and_last_name_from_species(species):
_map_first_names = '%s_first_names' % species
_map_last_names = '%s_last_names' % species
if not TEXT_MAP[_map_first_names] or not TEXT_MAP[_map_last_names]:
return ('Wayne', 'Brady')
_first_name = TEXT_MAP[_map_first_names].pop(random.randint(0, len(TEXT_MAP[_map_first_names])-1))
_last_name = TEXT_MAP[_map_last_names].pop(random.randint(0, len(TEXT_MAP[_map_last_names])-1))
return (_first_name, _last_name)
def format_injury(injury):
if injury['lodged_item']:
return 'a %s lodged in the %s' % (ITEMS[injury['lodged_item']]['name'], injury['limb'])
elif injury['artery_ruptured']:
return 'a ruptured artery in the %s' % injury['limb']
elif injury['cut']:
return 'a cut to the %s' % injury['limb']
return 'nothing in particular.'
def generate_memory_phrase(memory):
_details = [key for key in memory.keys() if not key == 'text']
_memory_age = WORLD_INFO['ticks']-memory['time_created']
_topic = memory['text']
if _topic == 'friendly':
return '%s seems like a good guy.' % (' '.join(LIFE[memory['target']]['name']))
else:
print 'DIDNT HAVE A PHRASE FOR',_topic
|
StarcoderdataPython
|
6586793
|
<filename>utils/make_syngcn_data.py<gh_stars>0
import argparse
import spacy
import bisect
from pathlib import Path
from tqdm import tqdm
from file_loader import Fileloader
class Text2format:
def __init__(self, voc2id, id2freq, max_len):
self.did = 0
self.voc2id = voc2id
self.id2freq = id2freq
self.de2id = dict()
self.max_len = max_len
def text2format(self, texts: list) -> list:
nlp = spacy.load('ja_ginza')
docs = nlp.pipe(texts, disable=['ner'])
output = []
for doc in docs:
if len(doc) < 2:
print("skip column because token length is less than 2")
continue
elif len(doc) > self.max_len:
print("skip column because token length is larger than max_len:{}".format(self.max_len))
continue
doc_info = []
token_ids = []
token_deps = []
token_index_memorizer = []
for token in doc:
if token.text not in self.voc2id:
token_index_memorizer.append(token.i)
continue
token_ids.append(str(self.voc2id[token.text]))
for token in doc:
if token.dep_ == "ROOT" or str(token.head) not in self.voc2id or str(token.text) not in self.voc2id:
if str(token.head) not in self.voc2id:
print(len(doc), token.i, token.text, token.head)
continue
if token.dep_ not in self.de2id:
self.de2id[token.dep_] = self.did
self.did += 1
dep = '{}|{}|{}'.format(token_index_adjuster(token.head.i, token_index_memorizer),
token_index_adjuster(token.i, token_index_memorizer),
self.de2id[token.dep_])
token_deps.append(dep)
doc_info.append(str(len(token_ids)))
doc_info.append(str(len(token_deps)))
doc_info.extend(token_ids)
doc_info.extend(token_deps)
output.append(' '.join(doc_info))
return output
def text2format_all(self, file_pathes: list, format: str, text_fields: list = None):
output_all = []
loader = Fileloader(format, text_fields)
for file_path in tqdm(file_pathes):
texts = loader.load(file_path)
output = self.text2format(texts)
output_all.extend(output)
return output_all
def token_index_adjuster(n: int, memorizer: list) -> int:
if not(memorizer):
return n
else:
minus = bisect.bisect_right(memorizer, n)
return n - minus
def read_suppliment_file(voc2id_file: Path, id2freq_file: Path) -> (dict, dict):
voc2id = dict()
id2freq = dict()
with voc2id_file.open(mode='r') as f:
for line in f:
vi = line.strip().split()
i = vi[-1]
voc = ' '.join(vi[:-1])
voc2id[voc] = int(i)
with id2freq_file.open(mode='r') as f:
for line in f:
i, f = line.strip().split()
id2freq[int(i)] = int(f)
return voc2id, id2freq
def save(output_dir: Path, output: list, formatter: Text2format):
output_dir.mkdir(parents=True, exist_ok=True)
output_file = output_dir / 'data.txt'
de2id_file = output_dir / 'de2id.txt'
with output_file.open(mode='w') as f:
for line in output:
print(line, file=f)
with de2id_file.open(mode='w') as f:
for k, v in formatter.de2id.items():
print('{}\t{}'.format(k, str(v)), file=f)
def main(args):
target_dir = Path(args.indir)
output_dir = Path(args.outdir)
voc2id_file = Path(args.v2id)
id2freq_file = Path(args.id2f)
file_format = args.format
text_fields = args.text_fields.strip().split(',')
max_len = args.max_len
file_pathes = list(target_dir.glob("**/*"))
voc2id, id2freq = read_suppliment_file(voc2id_file, id2freq_file)
formatter = Text2format(voc2id, id2freq, max_len)
output = formatter.text2format_all(file_pathes, file_format, text_fields)
save(output_dir, output, formatter)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='indir', help="target dir")
parser.add_argument('-v', dest='v2id', help="file path to vocab2id")
parser.add_argument('-f', dest='id2f', help="file path to id2freq")
parser.add_argument('-o', dest='outdir', help="output file name")
parser.add_argument('--format', default='txt', help="select file format txt or jsonl")
parser.add_argument('--text_fields', help="set json's textfields as csv")
parser.add_argument('--max_len', default=80, type=int, help='set maximum length of a sentence')
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.