hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef1431dac95c7e69b7262a40569f5236bf33fd8b | 4,217 | py | Python | code/service/flaskapp/flaskapp.py | Wyss/evolvulator | c026b9e7425cabba8b7a5b49024173c6f9667337 | [
"MIT"
]
| 1 | 2016-01-05T20:10:10.000Z | 2016-01-05T20:10:10.000Z | code/service/flaskapp/flaskapp.py | Wyss/evolvulator | c026b9e7425cabba8b7a5b49024173c6f9667337 | [
"MIT"
]
| null | null | null | code/service/flaskapp/flaskapp.py | Wyss/evolvulator | c026b9e7425cabba8b7a5b49024173c6f9667337 | [
"MIT"
]
| null | null | null | """
Copyright (c) 2012 Wyss Institute at Harvard University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
http://www.opensource.org/licenses/mit-license.php
"""
"""
flaskapp.py
"""
from flask import Flask, g, request, render_template, jsonify, make_response, send_from_directory
from werkzeug.exceptions import HTTPException, NotFound
from os.path import dirname, basename, split, abspath
from os.path import join as op_join
import random
import sys
from experimentcore.exp_dbifc import setupExperiment
exp_dict = setupExperiment('evolvulator')
app = Flask(__name__)
app.config.from_object(__name__)
app.wsport = 9000 # default
@app.route('/favicon.ico')
def favicon():
return send_from_directory(op_join(app.root_path, 'static'),
'faviconSQ.ico', mimetype='image/png')
#end def
@app.route('/')
def index():
# simply render all of the available jobs
job_list = [{'jobname':key} for key in exp_dict.keys()] # create
return render_template('index.html', joblist=job_list)
# end def
@app.route('/experiment/_update_parameters/<job_name>')
def update_parameters(job_name=""):
print "Received parameters"
if job_name in exp_dict:
dbifc = exp_dict[job_name] # get the correct database interface
new_params = request.args
updatedindex = dbifc.updateParamDict(new_params)
print "we've updated", updatedindex
return jsonify(result="updated"+str(updatedindex))
else:
return request.args
# end def
@app.route("/experiment/<job_name>")
def show_experiment(job_name=""):
# print "begin show experiment"
if job_name in exp_dict:
dbifc = exp_dict[job_name] # get the correct database interface
# print "begin listify params"
params = dbifc.jsonListifyParameters()
core_params = {'jobname':job_name, 'url':dbifc.getURL()}
print "Show experiment"
try:
return render_template('experiment.html', core=core_params, parameters=params, wsport=app.wsport)
except:
print "Unexpected error:", sys.exc_info()[0]
print "In: ", inspect.stack()[1][3]
print "Bad rendering of Template"
return
else:
return page_not_found(NotFound(), errormessage="This ain't no job")
# end def
@app.errorhandler(404)
def page_not_found(error, errormessage=""):
return render_template('error404.html', message=errormessage), 404
@app.errorhandler(500)
def server_problem(error):
return render_template('error500.html', message=error), 500
@app.route('/experiment/_get_data/<job_name>')
def get_data(job_name=""):
print "Get data"
if job_name in exp_dict:
dbifc = exp_dict[job_name] # get the correct database interface
print "getting data"
data = edb.getDataDict(g.db)
return jsonify(data)
# end def
# @app.before_request
# def before_request():
# """Make sure we are connected to the database each request."""
# g.db = edb.connectToDB(thedatabase)
#
#
# @app.teardown_request
# def teardown_request(exception):
# """Closes the database again at the end of the request."""
# if hasattr(g, 'db'):
# g.db.close()
# # end def | 35.436975 | 109 | 0.708798 | 0 | 0 | 0 | 0 | 2,168 | 0.51411 | 0 | 0 | 2,179 | 0.516718 |
ef1825ce5af0c1bb4c24887ac8d1e612fd32ac97 | 5,383 | py | Python | ena-dts/framework/rst.py | amzn/amzn-ec2-ena-utilities | 99502ff5bb025dc71727d4991ea5e29a4e9388c6 | [
"MIT-0"
]
| 7 | 2021-04-29T05:23:56.000Z | 2022-03-23T02:26:55.000Z | ena-dts/framework/rst.py | amzn/amzn-ec2-ena-utilities | 99502ff5bb025dc71727d4991ea5e29a4e9388c6 | [
"MIT-0"
]
| null | null | null | ena-dts/framework/rst.py | amzn/amzn-ec2-ena-utilities | 99502ff5bb025dc71727d4991ea5e29a4e9388c6 | [
"MIT-0"
]
| 4 | 2021-06-10T19:02:57.000Z | 2021-12-06T01:31:06.000Z | # BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import re
from exception import VerifyFailure
"""
Generate Rst Test Result Report
Example:
import rst
rst.write_title("Test Case: " + test_case.__name__)
out = table.draw()
rst.write_text('\n' + out + '\n\n')
rst.write_result("PASS")
Result:
<copyright>
<Prerequisites>
Test Case: CASE
---------------
Result: PASS
"""
path2Plan = 'test_plans'
path2Result = 'output'
class RstReport(object):
def __init__(self, crbName, target, nic, suite, perf=False):
"""
copy desc from #Name#_test_plan.rst to TestResult_#Name#.rst
"""
try:
path = [path2Result, crbName, target, nic]
# ensure the level folder exist
for node in range(0, len(path)):
if not os.path.exists('/'.join(path[:node + 1])):
for level in range(node, len(path)):
os.mkdir('/'.join(path[:level + 1]))
break
self.rstName = "%s/TestResult_%s.rst" % ('/'.join(path), suite)
rstReport = open(self.rstName, 'w')
if perf is True:
self.rstAnnexName = "%s/TestResult_%s_Annex.rst" % (
'/'.join(path), suite)
rstAnnexReport = open(self.rstAnnexName, 'w')
f = open("%s/%s_test_plan.rst" % (path2Plan, suite), 'r')
for line in f:
if line[:13] == "Prerequisites":
break
rstReport.write(line)
if perf is True:
rstAnnexReport.write(line)
f.close()
rstReport.close()
except Exception as e:
raise VerifyFailure("RST Error: " + str(e))
def clear_all_rst(self, crbName, target):
path = [path2Result, crbName, target]
shutil.rmtree('/'.join(path), True)
def write_title(self, text):
"""
write case title Test Case: #Name#
-----------------
"""
line = "\n%s\n" % text
with open(self.rstName, "a") as f:
f.write(line)
f.write('-' * len(line) + '\n')
def write_annex_title(self, text):
"""
write annex to test case title Annex to #Name#
-----------------
"""
line = "\n%s\n" % text
with open(self.rstAnnexName, "a") as f:
f.write(line)
f.write('-' * len(line) + '\n')
def write_text(self, text, annex=False):
rstFile = self.rstAnnexName if annex else self.rstName
with open(rstFile, "a") as f:
f.write(text)
def write_frame(self, text, annex=False):
self.write_text("\n::\n\n", annex)
parts = re.findall(r'\S+', text)
text = ""
length = 0
for part in parts:
if length + len(part) > 75:
text = text + "\n" + " " + part
length = len(part)
else:
length = length + len(part)
text = text + " " + part
self.write_text(text, annex)
self.write_text("\n\n", annex)
def write_result(self, result):
with open(self.rstName, "a") as f:
f.write("\nResult: " + result + "\n")
def include_image(self, image, width=90):
"""
Includes an image in the RST file.
The argument must include path, name and extension.
"""
with open(self.rstName, "a") as f:
f.write(".. image:: %s\n :width: %d%%\n\n" % (image, width))
def report(self, text, frame=False, annex=False):
"""
Save report text into rst file.
"""
if frame:
self.write_frame(text, annex)
else:
self.write_text(text, annex)
| 33.228395 | 75 | 0.583132 | 3,341 | 0.620658 | 0 | 0 | 0 | 0 | 0 | 0 | 2,629 | 0.488389 |
ef191d6989e1e630c43331526ecc6be3b87686af | 331 | py | Python | tests/test_models.py | kajigga/dj-pylti | 2388719ee799b3033a9ab7ccf28667e69bcd8cd6 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_models.py | kajigga/dj-pylti | 2388719ee799b3033a9ab7ccf28667e69bcd8cd6 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_models.py | kajigga/dj-pylti | 2388719ee799b3033a9ab7ccf28667e69bcd8cd6 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dj-pylti
------------
Tests for `dj-pylti` models module.
"""
from django.test import TestCase
from dj_pylti import models
class TestDj_pylti(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| 12.730769 | 35 | 0.607251 | 146 | 0.441088 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.347432 |
ef19d273749fc5c7cda4c1d9c7f1b0e4fb378f5e | 30,467 | py | Python | mutation.py | nklapste/mutation | 28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa | [
"MIT"
]
| null | null | null | mutation.py | nklapste/mutation | 28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa | [
"MIT"
]
| null | null | null | mutation.py | nklapste/mutation | 28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa | [
"MIT"
]
| null | null | null | """Mutation.
Usage:
mutation play [--verbose] [--exclude=<globs>] [--only-deadcode-detection] [--include=<globs>] [--sampling=<s>] [--randomly-seed=<n>] [--max-workers=<n>] [<file-or-directory> ...] [-- TEST-COMMAND ...]
mutation replay [--verbose] [--max-workers=<n>]
mutation list
mutation show MUTATION
mutation apply MUTATION
mutation (-h | --help)
mutation --version
Options:
--verbose Show more information.
-h --help Show this screen.
--version Show version.
"""
import asyncio
import fnmatch
import functools
import itertools
import os
import random
import re
import shlex
import sys
import time
from ast import Constant
from concurrent import futures
from contextlib import contextmanager
from copy import deepcopy
from datetime import timedelta
from difflib import unified_diff
from uuid import UUID
import lexode
import parso
import pygments
import pygments.formatters
import pygments.lexers
import zstandard as zstd
from aiostream import pipe, stream
from astunparse import unparse
from coverage import Coverage
from docopt import docopt
from humanize import precisedelta
from loguru import logger as log
from lsm import LSM
from pathlib3x import Path
from termcolor import colored
from tqdm import tqdm
from ulid import ULID
__version__ = (0, 4, 4)
MINUTE = 60 # seconds
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MONTH = 31 * DAY
def humanize(seconds):
if seconds < 1:
precision = "seconds"
elif seconds // DAY != 0:
precision = "days"
elif seconds // DAY != 0:
precision = "hours"
elif seconds // HOUR != 0:
precision = "minutes"
else:
precision = "seconds"
return precisedelta(timedelta(seconds=seconds), minimum_unit=precision)
PRONOTION = "https://youtu.be/ihZEaj9ml4w?list=PLOSNaPJYYhrtliZqyEWDWL0oqeH0hOHnj"
log.remove()
if os.environ.get("DEBUG", False):
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="TRACE",
colorize=True,
enqueue=True,
)
else:
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="INFO",
colorize=True,
enqueue=True,
)
# The function patch was taken somewhere over the rainbow...
_hdr_pat = re.compile(r"^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@$")
def patch(diff, source):
"""Apply unified diff patch to string s to recover newer string. If
revert is True, treat s as the newer string, recover older string.
"""
s = source.splitlines(True)
p = diff.splitlines(True)
t = ""
i = sl = 0
(midx, sign) = (1, "+")
while i < len(p) and p[i].startswith(("---", "+++")):
i += 1 # skip header lines
while i < len(p):
m = _hdr_pat.match(p[i])
if not m:
raise Exception("Cannot process diff")
i += 1
l = int(m.group(midx)) - 1 + (m.group(midx + 1) == "0")
t += "".join(s[sl:l])
sl = l
while i < len(p) and p[i][0] != "@":
if i + 1 < len(p) and p[i + 1][0] == "\\":
line = p[i][:-1]
i += 2
else:
line = p[i]
i += 1
if len(line) > 0:
if line[0] == sign or line[0] == " ":
t += line[1:]
sl += line[0] != sign
t += "\n" + "".join(s[sl:])
return t
def glob2predicate(patterns):
def regex_join(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
regexes = (fnmatch.translate(pattern) for pattern in patterns)
regex = re.compile(regex_join(regexes))
def predicate(path):
return regex.match(path) is not None
return predicate
def node_iter(node, level=1):
yield node
for child in node.children:
if not getattr(child, "children", False):
yield child
continue
yield from node_iter(child, level + 1)
def node_copy_tree(node, index):
root = node.get_root_node()
root = deepcopy(root)
iterator = itertools.dropwhile(
lambda x: x[0] != index, zip(itertools.count(0), node_iter(root))
)
index, node = next(iterator)
return root, node
@contextmanager
def timeit():
start = time.perf_counter()
yield lambda: time.perf_counter() - start
class Mutation(type):
ALL = set()
DEADCODE = set()
deadcode_detection = False
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
obj = cls()
type(cls).ALL.add(obj)
if cls.deadcode_detection:
type(cls).DEADCODE.add(obj)
class StatementDrop(metaclass=Mutation):
deadcode_detection = True
NEWLINE = "a = 42\n"
def predicate(self, node):
return "stmt" in node.type and node.type != "expr_stmt"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
index = new.parent.children.index(new)
passi = parso.parse("pass").children[0]
passi.prefix = new.get_first_leaf().prefix
new.parent.children[index] = passi
newline = parso.parse(type(self).NEWLINE).children[0].children[1]
new.parent.children.insert(index + 1, newline)
yield root, new
class DefinitionDrop(metaclass=Mutation):
deadcode_detection = True
def predicate(self, node):
# There is also node.type = 'lambdadef' but lambadef are
# always part of a assignation statement. So, that case is
# handled in StatementDrop.
return node.type in ("classdef", "funcdef")
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
new.parent.children.remove(new)
yield root, new
def chunks(iterable, n):
"""Yield successive n-sized chunks from iterable."""
it = iter(iterable)
while chunk := tuple(itertools.islice(it, n)):
yield chunk
class MutateNumber(metaclass=Mutation):
COUNT = 5
def predicate(self, node):
return node.type == "number"
def mutate(self, node, index):
value = eval(node.value)
if isinstance(value, int):
def randomize(x):
return random.randint(0, x)
else:
def randomize(x):
return random.random() * x
for size in range(8, 32):
if value < 2 ** size:
break
count = 0
while count != self.COUNT:
count += 1
root, new = node_copy_tree(node, index)
new.value = str(randomize(2 ** size))
if new.value == node.value:
continue
yield root, new
class MutateString(metaclass=Mutation):
def predicate(self, node):
# str or bytes.
return node.type == "string"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
value = eval(new.value)
if isinstance(value, bytes):
value = b"coffeebad" + value
else:
value = "mutated string " + value
value = Constant(value=value, kind="")
value = unparse(value).strip()
new.value = value
yield root, new
class MutateKeyword(metaclass=Mutation):
KEYWORDS = set(["continue", "break", "pass"])
SINGLETON = set(["True", "False", "None"])
# Support xor operator ^
BOOLEAN = set(["and", "or"])
TARGETS = KEYWORDS | SINGLETON | BOOLEAN
def predicate(self, node):
return node.type == "keyword" and node.value in type(self).TARGETS
def mutate(self, node, index):
value = node.value
for targets in [self.KEYWORDS, self.SINGLETON, self.BOOLEAN]:
if value in targets:
break
else:
raise NotImplementedError
for target in targets:
if target == value:
continue
root, new = node_copy_tree(node, index)
new.value = target
yield root, new
class Comparison(metaclass=Mutation):
def predicate(self, node):
return node == "comparison"
def mutate(self, node, index):
root, new = node_copy_tree(node, index)
not_test = parso.parse("not ({})".format(new.get_code()))
index = new.parent.children.index(new)
new.parent.children[index] = not_test
return root, new
class MutateOperator(metaclass=Mutation):
BINARY = ["+", "-", "%", "|", "&", "//", "/", "*", "^", "**", "@"]
BITWISE = ["<<", ">>"]
COMPARISON = ["<", "<=", "==", "!=", ">=", ">"]
ASSIGNEMENT = ["="] + [x + "=" for x in BINARY + BITWISE]
# TODO support OPERATORS_CONTAINS = ["in", "not in"]
OPERATORS = [
BINARY,
BITWISE,
BITWISE,
COMPARISON,
ASSIGNEMENT,
]
def predicate(self, node):
return node.type == "operator"
def mutate(self, node, index):
for operators in type(self).OPERATORS:
if node.value not in operators:
continue
for new_operator in operators:
if node.value == new_operator:
continue
root, new = node_copy_tree(node, index)
new.value = new_operator
yield root, new
def diff(source, target, filename=""):
lines = unified_diff(
source.split("\n"), target.split("\n"), filename, filename, lineterm=""
)
out = "\n".join(lines)
return out
def mutate(node, index, mutations):
for mutation in mutations:
if not mutation.predicate(node):
continue
yield from mutation.mutate(node, index)
def interesting(new_node, coverage):
if getattr(new_node, "line", False):
return new_node.line in coverage
return new_node.get_first_leaf().line in coverage
def deltas_compute(source, path, coverage, mutations):
ast = parso.parse(source)
ignored = 0
for (index, node) in zip(itertools.count(0), node_iter(ast)):
for root, new_node in mutate(node, index, mutations):
if not interesting(new_node, coverage):
ignored += 1
continue
target = root.get_code()
delta = diff(source, target, path)
yield delta
if ignored > 1:
msg = "Ignored {} mutations from file at {}"
msg += " because there is no associated coverage."
log.trace(msg, ignored, path)
async def pool_for_each_par_map(loop, pool, f, p, iterator):
zx = stream.iterate(iterator)
zx = zx | pipe.map(lambda x: loop.run_in_executor(pool, p, x))
async with zx.stream() as streamer:
limit = pool._max_workers
unfinished = []
while True:
tasks = []
for i in range(limit):
try:
task = await streamer.__anext__()
except StopAsyncIteration:
limit = 0
else:
tasks.append(task)
tasks = tasks + list(unfinished)
if not tasks:
break
finished, unfinished = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
for finish in finished:
out = finish.result()
f(out)
limit = pool._max_workers - len(unfinished)
def mutation_create(item):
path, source, coverage, mutation_predicate = item
if not coverage:
msg = "Ignoring file {} because there is no associated coverage."
log.trace(msg, path)
return []
log.trace("Mutating file: {}...", path)
mutations = [m for m in Mutation.ALL if mutation_predicate(m)]
deltas = deltas_compute(source, path, coverage, mutations)
# return the compressed deltas to save some time in the
# mainthread.
out = [(path, zstd.compress(x.encode("utf8"))) for x in deltas]
log.trace("There is {} mutations for the file `{}`", len(out), path)
return out
def install_module_loader(uid):
db = LSM(".mutation.okvslite")
mutation_show(uid.hex)
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
with open(path) as f:
source = f.read()
patched = patch(diff, source)
import imp
components = path[:-3].split("/")
while components:
for pythonpath in sys.path:
filepath = os.path.join(pythonpath, "/".join(components))
filepath += ".py"
ok = os.path.exists(filepath)
if ok:
module_path = ".".join(components)
break
else:
components.pop()
continue
break
if module_path is None:
raise Exception("sys.path oops!")
patched_module = imp.new_module(module_path)
try:
exec(patched, patched_module.__dict__)
except Exception:
# TODO: syntaxerror, do not produce those mutations
exec("", patched_module.__dict__)
sys.modules[module_path] = patched_module
def pytest_configure(config):
mutation = config.getoption("mutation", default=None)
if mutation is not None:
uid = UUID(hex=mutation)
install_module_loader(uid)
def pytest_addoption(parser, pluginmanager):
parser.addoption("--mutation", dest="mutation", type=str)
def for_each_par_map(loop, pool, inc, proc, items):
out = []
for item in items:
item = proc(item)
item = inc(item)
out.append(item)
return out
def mutation_pass(args): # TODO: rename
command, uid, timeout = args
command = command + ["--mutation={}".format(uid.hex)]
out = run(command, timeout=timeout, silent=True)
if out == 0:
msg = "no error with mutation: {} ({})"
log.trace(msg, " ".join(command), out)
with database_open(".") as db:
db[lexode.pack([2, uid])] = b"\x00"
return False
else:
# TODO: pass root path...
with database_open(".") as db:
del db[lexode.pack([2, uid])]
return True
PYTEST = "pytest --exitfirst --no-header --tb=no --quiet --assert=plain"
PYTEST = shlex.split(PYTEST)
def coverage_read(root):
coverage = Coverage(".coverage") # use pathlib
coverage.load()
data = coverage.get_data()
filepaths = data.measured_files()
out = dict()
root = root.resolve()
for filepath in filepaths:
key = str(Path(filepath).relative_to(root))
value = set(data.lines(filepath))
print(key)
out[key] = value
return out
def database_open(root, recreate=False):
root = root if isinstance(root, Path) else Path(root)
db = root / ".mutation.okvslite"
if recreate and db.exists():
log.trace("Deleting existing database...")
for file in root.glob(".mutation.okvslite*"):
file.unlink()
if not recreate and not db.exists():
log.error("No database, can not proceed!")
sys.exit(1)
db = LSM(str(db))
return db
def run(command, timeout=None, silent=True):
if timeout and timeout < 60:
timeout = 60
if timeout:
command.insert(0, "timeout {}".format(timeout))
command.insert(0, "PYTHONDONTWRITEBYTECODE=1")
if silent and not os.environ.get("DEBUG"):
command.append("> /dev/null 2>&1")
return os.system(" ".join(command))
def sampling_setup(sampling, total):
if sampling is None:
return lambda x: x, total
if sampling.endswith("%"):
# randomly choose percent mutations
cutoff = float(sampling[:-1]) / 100
def sampler(iterable):
for item in iterable:
value = random.random()
if value < cutoff:
yield item
total = int(total * cutoff)
elif sampling.isdigit():
# otherwise, it is the first COUNT mutations that are used.
total = int(sampling)
def sampler(iterable):
remaining = total
for item in iterable:
yield item
remaining -= 1
if remaining == 0:
return
else:
msg = "Sampling passed via --sampling option must be a positive"
msg += " integer or a percentage!"
log.error(msg)
sys.exit(2)
if sampling:
log.info("Taking into account sampling there is {} mutations.", total)
return sampler, total
# TODO: the `command` is a hack, maybe there is a way to avoid the
# following code: `if command is not None.
def check_tests(root, seed, arguments, command=None):
max_workers = arguments["--max-workers"] or (os.cpu_count() - 1) or 1
max_workers = int(max_workers)
log.info("Let's check that the tests are green...")
if arguments["<file-or-directory>"] and arguments["TEST-COMMAND"]:
log.error("<file-or-directory> and TEST-COMMAND are exclusive!")
sys.exit(1)
if command is not None:
command = list(command)
if max_workers > 1:
command.extend(
[
# Use pytest-xdist to make sure it is possible to run the
# tests in parallel
"--numprocesses={}".format(max_workers),
]
)
else:
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
if max_workers > 1:
command.append(
# Use pytest-xdist to make sure it is possible to run
# the tests in parallel
"--numprocesses={}".format(max_workers)
)
command.extend(
[
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
)
with timeit() as alpha:
out = run(command)
if out == 0:
log.info("Tests are green 💚")
alpha = alpha() * max_workers
else:
msg = "Tests are not green... return code is {}..."
log.warning(msg, out)
log.warning("I tried the following command: `{}`", " ".join(command))
# Same command without parallelization
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
command += [
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
with timeit() as alpha:
out = run(command)
if out != 0:
msg = "Tests are definitly red! Return code is {}!!"
log.error(msg, out)
log.error("I tried the following command: `{}`", " ".join(command))
sys.exit(2)
# Otherwise, it is possible to run the tests but without
# parallelization.
msg = "Setting max_workers=1 because tests do not pass in parallel"
log.warning(msg)
max_workers = 1
alpha = alpha()
msg = "Time required to run the tests once: {}..."
log.info(msg, humanize(alpha))
return alpha, max_workers
def mutation_only_deadcode(x):
return getattr(x, "deadcode_detection", False)
def mutation_all(x):
return True
async def play_create_mutations(loop, root, db, max_workers, arguments):
# Go through all files, and produce mutations, take into account
# include pattern, and exclude patterns. Also, exclude what has
# no coverage.
include = arguments.get("--include") or "*.py"
include = include.split(",")
include = glob2predicate(include)
exclude = arguments.get("--exclude") or "*test*"
exclude = exclude.split(",")
exclude = glob2predicate(exclude)
filepaths = root.rglob("*.py")
filepaths = (x for x in filepaths if include(str(x)) and not exclude(str(x)))
# setup coverage support
coverage = coverage_read(root)
only_dead_code = arguments["--only-deadcode-detection"]
if only_dead_code:
mutation_predicate = mutation_only_deadcode
else:
mutation_predicate = mutation_all
def make_item(filepath):
with filepath.open() as f:
content = f.read()
out = (
str(filepath),
content,
coverage.get(str(filepath), set()),
mutation_predicate,
)
return out
items = (make_item(x) for x in filepaths if coverage.get(str(x), set()))
# Start with biggest files first, because that is those that will
# take most time, that way, it will make most / best use of the
# workers.
items = sorted(items, key=lambda x: len(x[1]), reverse=True)
# prepare to create mutations
total = 0
log.info("Crafting mutations from {} files...", len(items))
with tqdm(total=len(items), desc="Files") as progress:
def on_mutations_created(items):
nonlocal total
progress.update()
total += len(items)
for path, delta in items:
# TODO: replace ULID with a content addressable hash.
uid = ULID().to_uuid()
# delta is a compressed unified diff
db[lexode.pack([1, uid])] = lexode.pack([path, delta])
with timeit() as delta:
with futures.ProcessPoolExecutor(max_workers=max_workers) as pool:
await pool_for_each_par_map(
loop, pool, on_mutations_created, mutation_create, items
)
log.info("It took {} to compute mutations...", humanize(delta()))
log.info("The number of mutation is {}!", total)
return total
async def play_mutations(loop, db, seed, alpha, total, max_workers, arguments):
# prepare to run tests against mutations
command = list(arguments["TEST-COMMAND"] or PYTEST)
command.append("--randomly-seed={}".format(seed))
command.extend(arguments["<file-or-directory>"])
eta = humanize(alpha * total / max_workers)
log.success("It will take at most {} to run the mutations", eta)
timeout = alpha * 2
uids = db[lexode.pack([1]) : lexode.pack([2])]
uids = ((command, lexode.unpack(key)[1], timeout) for (key, _) in uids)
# sampling
sampling = arguments["--sampling"]
sampler, total = sampling_setup(sampling, total)
uids = sampler(uids)
step = 10
gamma = time.perf_counter()
remaining = total
log.info("Testing mutations in progress...")
with tqdm(total=100) as progress:
def on_progress(_):
nonlocal remaining
nonlocal step
nonlocal gamma
remaining -= 1
if (remaining % step) == 0:
percent = 100 - ((remaining / total) * 100)
now = time.perf_counter()
delta = now - gamma
eta = (delta / step) * remaining
progress.update(int(percent))
progress.set_description("ETA {}".format(humanize(eta)))
msg = "Mutation tests {:.2f}% done..."
log.debug(msg, percent)
log.debug("ETA {}...", humanize(eta))
for speed in [10_000, 1_000, 100, 10, 1]:
if total // speed == 0:
continue
step = speed
break
gamma = time.perf_counter()
with timeit() as delta:
with futures.ThreadPoolExecutor(max_workers=max_workers) as pool:
await pool_for_each_par_map(
loop, pool, on_progress, mutation_pass, uids
)
errors = len(list(db[lexode.pack([2]) : lexode.pack([3])]))
if errors > 0:
msg = "It took {} to compute {} mutation failures!"
log.error(msg, humanize(delta()), errors)
else:
msg = "Checking that the test suite is strong against mutations took:"
msg += " {}... And it is a success 💚"
log.info(msg, humanize(delta()))
return errors
async def play(loop, arguments):
root = Path(".")
seed = arguments["--randomly-seed"] or int(time.time())
log.info("Using random seed: {}".format(seed))
random.seed(seed)
alpha, max_workers = check_tests(root, seed, arguments)
with database_open(root, recreate=True) as db:
# store arguments used to execute command
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command += arguments["<file-or-directory>"]
command = dict(
command=command,
seed=seed,
)
value = list(command.items())
db[lexode.pack((0, "command"))] = lexode.pack(value)
# let's create mutations!
count = await play_create_mutations(loop, root, db, max_workers, arguments)
# Let's run tests against mutations!
await play_mutations(loop, db, seed, alpha, count, max_workers, arguments)
def mutation_diff_size(db, uid):
_, diff = lexode.unpack(db[lexode.pack([1, uid])])
out = len(zstd.decompress(diff))
return out
def replay_mutation(db, uid, alpha, seed, max_workers, command):
log.info("* Use Ctrl+C to exit.")
command = list(command)
command.append("--randomly-seed={}".format(seed))
max_workers = 1
if max_workers > 1:
command.append("--numprocesses={}".format(max_workers))
timeout = alpha * 2
while True:
ok = mutation_pass((command, uid, timeout))
if not ok:
mutation_show(uid.hex)
msg = "* Type 'skip' to go to next mutation or just enter to retry."
log.info(msg)
skip = input().startswith("s")
if skip:
db[lexode.pack([2, uid])] = b"\x01"
return
# Otherwise loop to re-test...
else:
del db[lexode.pack([2, uid])]
return
def replay(arguments):
root = Path(".")
with database_open(root) as db:
command = db[lexode.pack((0, "command"))]
command = lexode.unpack(command)
command = dict(command)
seed = command.pop("seed")
random.seed(seed)
command = command.pop("command")
alpha, max_workers = check_tests(root, seed, arguments, command)
with database_open(root) as db:
while True:
uids = (
lexode.unpack(k)[1] for k, v in db[lexode.pack([2]) :] if v == b"\x00"
)
uids = sorted(
uids,
key=functools.partial(mutation_diff_size, db),
reverse=True,
)
if not uids:
log.info("No mutation failures 👍")
sys.exit(0)
while uids:
uid = uids.pop(0)
replay_mutation(db, uid, alpha, seed, max_workers, command)
def mutation_list():
with database_open(".") as db:
uids = ((lexode.unpack(k)[1], v) for k, v in db[lexode.pack([2]) :])
uids = sorted(uids, key=lambda x: mutation_diff_size(db, x[0]), reverse=True)
if not uids:
log.info("No mutation failures 👍")
sys.exit(0)
for (uid, type) in uids:
log.info("{}\t{}".format(uid.hex, "skipped" if type == b"\x01" else ""))
def mutation_show(uid):
uid = UUID(hex=uid)
log.info("mutation show {}", uid.hex)
log.info("")
with database_open(".") as db:
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
terminal256 = pygments.formatters.get_formatter_by_name("terminal256")
python = pygments.lexers.get_lexer_by_name("python")
print(diff)
for line in diff.split("\n"):
if line.startswith("+++"):
delta = colored("+++", "green", attrs=["bold"])
highlighted = pygments.highlight(line[3:], python, terminal256)
log.info(delta + highlighted.rstrip())
elif line.startswith("---"):
delta = colored("---", "red", attrs=["bold"])
highlighted = pygments.highlight(line[3:], python, terminal256)
log.info(delta + highlighted.rstrip())
elif line.startswith("+"):
delta = colored("+", "green", attrs=["bold"])
highlighted = pygments.highlight(line[1:], python, terminal256)
log.info(delta + highlighted.rstrip())
elif line.startswith("-"):
delta = colored("-", "red", attrs=["bold"])
highlighted = pygments.highlight(line[1:], python, terminal256)
log.info(delta + highlighted.rstrip())
else:
highlighted = pygments.highlight(line, python, terminal256)
log.info(highlighted.rstrip())
def mutation_apply(uid):
uid = UUID(hex=uid)
with database_open(".") as db:
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
with open(path, "r") as f:
source = f.read()
patched = patch(diff, source)
with open(path, "w") as f:
f.write(patched)
def main():
arguments = docopt(__doc__, version=__version__)
if arguments.get("--verbose", False):
log.remove()
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="DEBUG",
colorize=True,
enqueue=True,
)
log.debug("Mutation at {}", PRONOTION)
log.trace(arguments)
if arguments["replay"]:
replay(arguments)
sys.exit(0)
if arguments.get("list", False):
mutation_list()
sys.exit(0)
if arguments.get("show", False):
mutation_show(arguments["MUTATION"])
sys.exit(0)
if arguments.get("apply", False):
mutation_apply(arguments["MUTATION"])
sys.exit(0)
# Otherwise run play.
loop = asyncio.get_event_loop()
loop.run_until_complete(play(loop, arguments))
loop.close()
if __name__ == "__main__":
main()
| 28.961027 | 202 | 0.573046 | 4,729 | 0.155156 | 4,735 | 0.155353 | 107 | 0.003511 | 6,647 | 0.218085 | 5,718 | 0.187605 |
ef1a0f68bf7e4627785fe119d1363f10a767d348 | 1,058 | py | Python | main.py | bijilap/ColorRecognition | a070645e5bda40c0d06d03db468f31c79b63d0bd | [
"Apache-2.0"
]
| 2 | 2018-03-29T12:15:04.000Z | 2019-01-09T02:09:41.000Z | main.py | bijilap/ColorRecognition | a070645e5bda40c0d06d03db468f31c79b63d0bd | [
"Apache-2.0"
]
| null | null | null | main.py | bijilap/ColorRecognition | a070645e5bda40c0d06d03db468f31c79b63d0bd | [
"Apache-2.0"
]
| null | null | null | import argparse
from ColorDetector import ColorDetector
def main():
detector = ColorDetector()
parser = argparse.ArgumentParser()
# --k : number of clusters, --image: image path, --debug: debug level
parser.add_argument("--k", nargs=1, type=int, help='maximum number of colors to be identified. Default:10')
parser.add_argument("--n", nargs=1, type=int, help='number of top dominant colors to be displayed')
parser.add_argument("--image", nargs=1, required=True, help='full path of image to be processed')
parser.add_argument("--debug", nargs=1, type=int, help='debug level: 1 for debug mode, 0: no log messages')
args = parser.parse_args()
img_name = None
n = 4
if args.k:
detector.NUM_OF_CLUSTERS = int(args.k[0])
if args.image:
img_name = args.image[0]
if args.debug:
detector.log_level = int(args.debug[0])
if args.n:
n = int(args.n[0])
image = detector.readImage(img_name)
detector.getDominantColors(image, n)
if __name__ == "__main__":
main()
| 30.228571 | 111 | 0.660681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.279773 |
ef1bad6bf6953bfcc6d21e0a6fe6026bfa17d421 | 286 | py | Python | desafio64.py | DantonMatheus/desafios-python | 709a3f1774596fc536dd4b882c78a6b951c92a9c | [
"MIT"
]
| null | null | null | desafio64.py | DantonMatheus/desafios-python | 709a3f1774596fc536dd4b882c78a6b951c92a9c | [
"MIT"
]
| null | null | null | desafio64.py | DantonMatheus/desafios-python | 709a3f1774596fc536dd4b882c78a6b951c92a9c | [
"MIT"
]
| null | null | null | print('===== DESAFIO 64 =====')
num = 0
cont = 0
soma = 0
num = int(input('Digite um número [999 para SAIR]: '))
while num != 999:
soma += num
cont += 1
num = int(input('Digite um número [999 para SAIR]: '))
print(f'Você digitou {cont} números! A soma entre eles é {soma}')
| 26 | 65 | 0.594406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.546392 |
ef1beeeb227406f72c9053a339254f85199fda6b | 2,062 | py | Python | app/app.py | tigpt/docker-flask-postgres | ba0b192afe77e6946c8e49574def3533ea0f1181 | [
"MIT"
]
| null | null | null | app/app.py | tigpt/docker-flask-postgres | ba0b192afe77e6946c8e49574def3533ea0f1181 | [
"MIT"
]
| null | null | null | app/app.py | tigpt/docker-flask-postgres | ba0b192afe77e6946c8e49574def3533ea0f1181 | [
"MIT"
]
| null | null | null | from elasticapm.contrib.flask import ElasticAPM
import os
from flask import Flask, request, render_template
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
APP = Flask(__name__)
APP.config['ELASTIC_APM'] = {
}
apm = ElasticAPM(APP)
APP.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
APP.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://%s:%s@%s/%s' % (
# ARGS.dbuser, ARGS.dbpass, ARGS.dbhost, ARGS.dbname
os.environ['DBUSER'], os.environ['DBPASS'], os.environ['DBHOST'], os.environ['DBNAME']
)
# initialize the database connection
DB = SQLAlchemy(APP)
# initialize database migration management
MIGRATE = Migrate(APP, DB)
from models import *
@APP.route('/')
def view_registered_guests():
guests = Guest.query.all()
return render_template('guest_list.html', guests=guests)
@APP.route('/register', methods = ['GET'])
def view_registration_form():
return render_template('guest_registration.html')
@APP.route('/register', methods = ['POST'])
def register_guest():
name = request.form.get('name')
email = request.form.get('email')
partysize = request.form.get('partysize')
if not partysize or partysize=='':
partysize = 1
guest = Guest(name, email, partysize)
DB.session.add(guest)
DB.session.commit()
return render_template('guest_confirmation.html',
name=name, email=email, partysize=partysize)
# bad query
@APP.route('/bad_query')
def view_registered_guests_bad_query():
for _ in range(20):
guests = Guest.query.all()
return render_template('guest_list.html', guests=guests)
# error message
@APP.route('/hello')
def apm_message_hello():
apm.capture_message('hello, world!')
return render_template('apm_hello.html')
# Error
@APP.route('/error')
def apm_error():
try:
1 / 0
except ZeroDivisionError:
apm.capture_exception()
return render_template('apm_error.html')
# Unhandled error
@APP.route('/fatal_error')
def apm_fatal_error():
1 / 0
return render_template('apm_error.html') | 25.775 | 90 | 0.70805 | 0 | 0 | 0 | 0 | 1,296 | 0.628516 | 0 | 0 | 568 | 0.275461 |
ef1c14040a2c37814d24485011b2191f84d572dc | 325 | py | Python | pytify/strategy.py | EngineeringIsLife/Pytify | ae9a351144cb8f5556740d33cdf29073ffd2dc1e | [
"MIT"
]
| null | null | null | pytify/strategy.py | EngineeringIsLife/Pytify | ae9a351144cb8f5556740d33cdf29073ffd2dc1e | [
"MIT"
]
| null | null | null | pytify/strategy.py | EngineeringIsLife/Pytify | ae9a351144cb8f5556740d33cdf29073ffd2dc1e | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from sys import platform
def get_pytify_class_by_platform():
if 'linux' in platform:
from linux import Linux
return Linux
elif 'darwin' in platform:
from darwin import Darwin
return Darwin
else:
raise Exception('%s is not supported.' % platform)
| 21.666667 | 58 | 0.630769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.184615 |
ef1e04b7ef6eaf43f6fa7d6f871605144e4d447e | 8,836 | py | Python | scrapers/meetings/fetch_meetings.py | spudmind/spud | 86e44bca4efd3cd6358467e1511048698a45edbc | [
"MIT"
]
| 2 | 2015-04-11T12:22:41.000Z | 2016-08-18T11:12:06.000Z | scrapers/meetings/fetch_meetings.py | spudmind/spud | 86e44bca4efd3cd6358467e1511048698a45edbc | [
"MIT"
]
| 84 | 2015-01-22T14:33:49.000Z | 2015-04-01T23:15:29.000Z | scrapers/meetings/fetch_meetings.py | spudmind/spud | 86e44bca4efd3cd6358467e1511048698a45edbc | [
"MIT"
]
| 1 | 2015-04-16T03:10:39.000Z | 2015-04-16T03:10:39.000Z | # -*- coding: utf-8 -*-
from datetime import datetime
import logging
import os.path
import requests
import time
import urllib
from bs4 import BeautifulSoup
from utils import mongo
class FetchMeetings:
def __init__(self, **kwargs):
# fetch the logger
self._logger = logging.getLogger("spud")
self.BASE_URL = "https://www.gov.uk"
# initial search query stuff
self.search_term = "meetings"
self.search_filter = "transparency-data"
# database stuff
self.db = mongo.MongoInterface()
self.COLLECTION_NAME = "meetings_fetch"
if kwargs["refreshdb"]:
self.db.drop(self.COLLECTION_NAME)
# local directory to save fetched files to
self.STORE_DIR = "store"
# get the current path
self.current_path = os.path.dirname(os.path.abspath(__file__))
# if True, avoid downloading where possible
self.dryrun = kwargs["dryrun"]
def fetch_all_publications(self):
self._logger.debug("Searching %s for '%s' with filter '%s' ..." % (self.BASE_URL, self.search_term, self.search_filter))
search_tmpl = "%s/government/publications?keywords=%s&publication_filter_option=%s&page=%%d" % (self.BASE_URL, urllib.quote_plus(self.search_term), self.search_filter)
page = 1
total_pages = "unknown"
collections = {}
publications = {}
while True:
if total_pages != "unknown" and page > total_pages:
# no more search results
break
# search gov.uk for results
self._logger.debug(" Fetching results page %d / %s ..." % (page, total_pages))
r = requests.get(search_tmpl % page)
time.sleep(0.5)
soup = BeautifulSoup(r.text)
if total_pages == "unknown":
total_pages = int(soup.find(class_="page-numbers").text[5:])
publication_soups = soup.find_all(class_="document-row")
for pub_soup in publication_soups:
# find collections (we'll use these to find more publications)
collection_soup = pub_soup.find(class_="document-collections")
if collection_soup:
collection_text = collection_soup.a.text
collection_url = "%s%s" % (self.BASE_URL, collection_soup.a["href"])
if collection_url not in collections and self.search_term in collection_text.lower():
collections[collection_url] = {
"url": collection_url,
"name": collection_text,
}
continue
# any remaining publications are not part of a collection
pub_title = pub_soup.h3.a
pub_url = "%s%s" % (self.BASE_URL, pub_title["href"])
if self.search_term in pub_title.text.lower() and pub_url not in publications:
department = pub_soup.find(class_="organisations")
if department.abbr is not None:
department = department.abbr["title"]
else:
department = department.text
publications[pub_url] = {
"source": {
"linked_from_url": pub_url,
},
"collection": None,
"title": pub_title.text,
"published_at": pub_soup.find(class_="public_timestamp").text.strip(),
"department": department,
}
page += 1
self._logger.debug("Found %d collections, and %d publications not part of collections." % (len(collections), len(publications)))
publications = self.fetch_pubs_from_collections(collections.values(), publications)
return publications.values()
def fetch_pubs_from_collections(self, collections, publications={}):
self._logger.debug("Searching %d collections for more publications ..." % len(collections))
for collection in collections:
r = requests.get(collection["url"])
time.sleep(0.5)
soup = BeautifulSoup(r.text)
department = soup.find(class_="organisation-link").text
publication_soups = soup.find_all(class_="publication")
for pub_soup in publication_soups:
pub_title = pub_soup.h3.a
pub_url = "%s%s" % (self.BASE_URL, pub_title["href"])
if self.search_term in pub_title.text.lower() and pub_url not in publications:
publications[pub_url] = {
"source": {
"linked_from_url": pub_url,
},
"collection": collection["name"],
"title": pub_title.text,
"published_at": pub_soup.find(class_="public_timestamp").text,
"department": department,
}
self._logger.debug("Done searching.")
return publications
def fetch_file(self, url, filename):
self._logger.debug(" Fetching: %s" % url)
full_path = os.path.join(self.current_path, self.STORE_DIR, filename)
urllib.urlretrieve(url, full_path)
time.sleep(0.5)
def save_to_db(self, publication):
publication["source"]["fetched"] = False
# existing = self.db.find_one(self.COLLECTION_NAME, {"url": publication["source"]["url"]})
# if existing is None:
self.db.save(self.COLLECTION_NAME, publication, manipulate=False)
def get_all_unfetched(self):
all_not_fetched = []
page = 1
while True:
not_fetched, meta = self.db.query(self.COLLECTION_NAME, query={"source.fetched": False}, page=page)
all_not_fetched += not_fetched
page += 1
if not meta["has_more"]:
return all_not_fetched
def run(self):
publications = self.fetch_all_publications()
self._logger.debug("Searching %d publication pages for attachments ..." % len(publications))
for pub in publications:
r = requests.get(pub["source"]["linked_from_url"])
time.sleep(0.5)
soup = BeautifulSoup(r.text)
attachment_soups = soup.find_all(class_="attachment")
for attachment_soup in attachment_soups:
attachment_title = attachment_soup.h2.text
if self.search_term not in attachment_title.lower():
continue
attachment = pub.copy()
attachment["title"] = attachment_title
download_soup = attachment_soup.find(class_="download")
if download_soup is not None:
# download link (usually to a csv) is available
rel_url = download_soup.a["href"]
attachment["file_type"] = rel_url.split(".")[-1].upper()
elif attachment_soup.h2.a is not None:
# heading link (usually to a pdf)
rel_url = attachment_soup.h2.a["href"]
attachment["file_type"] = attachment_soup.find(class_="type").text
else:
self._logger.error(attachment_soup)
raise Exception("Unknown attachment type.")
attachment["source"]["url"] = "%s%s" % (self.BASE_URL, rel_url)
attachment["filename"] = os.path.join("-".join(rel_url.split("/")[-2:]))
self.save_to_db(attachment)
if attachment_soups == []:
# the data is inline - embedded in the page.
# NB this is very unusual.
pub["source"]["url"] = pub["source"]["linked_from_url"]
pub["filename"] = os.path.join("%s.html" % pub["source"]["url"].split("/")[-1])
pub["file_type"] = "HTML"
self.save_to_db(pub)
self._logger.debug("Found %d attachments in total." % self.db.count(self.COLLECTION_NAME))
if not self.dryrun:
not_fetched = self.get_all_unfetched()
self._logger.debug("Fetching %d attachments ..." % len(not_fetched))
for pub in not_fetched:
self.fetch_file(pub["source"]["url"], pub["filename"])
pub["source"]["fetched"] = str(datetime.now())
self.db.update(self.COLLECTION_NAME, {"source.url": pub["source"]["url"]}, pub)
self._logger.debug("Attachments fetched.")
def fetch(**kwargs):
# TODO! this is temporary!
# import requests_cache
# requests_cache.install_cache("meetings")
FetchMeetings(**kwargs).run()
| 46.26178 | 175 | 0.563943 | 8,490 | 0.960842 | 0 | 0 | 0 | 0 | 0 | 0 | 1,980 | 0.224083 |
ef20178603cd20e2dd144ff595f24f1bbc671045 | 282 | py | Python | django_mediamosa/templatetags/mediamosa_extras.py | UGentPortaal/django-mediamosa | 553a725cd02e8dd2489bf25a613c9b98155cf90d | [
"BSD-3-Clause"
]
| null | null | null | django_mediamosa/templatetags/mediamosa_extras.py | UGentPortaal/django-mediamosa | 553a725cd02e8dd2489bf25a613c9b98155cf90d | [
"BSD-3-Clause"
]
| null | null | null | django_mediamosa/templatetags/mediamosa_extras.py | UGentPortaal/django-mediamosa | 553a725cd02e8dd2489bf25a613c9b98155cf90d | [
"BSD-3-Clause"
]
| null | null | null | from django import template
register = template.Library()
@register.filter
def mimetype(value, mime_type):
mediafiles = []
for mediafile in value:
if mediafile.metadata.get('mime_type') == mime_type:
mediafiles.append(mediafile)
return mediafiles
| 21.692308 | 60 | 0.695035 | 0 | 0 | 0 | 0 | 220 | 0.780142 | 0 | 0 | 11 | 0.039007 |
ef21cfd36477df2859e374f71d6a0bbf86ff8519 | 561 | py | Python | tests/settings.py | managedbyq/mbq.atomiq | 23edd33e8b958cfd9257ea62a107d8bb793ff3b9 | [
"Apache-2.0"
]
| null | null | null | tests/settings.py | managedbyq/mbq.atomiq | 23edd33e8b958cfd9257ea62a107d8bb793ff3b9 | [
"Apache-2.0"
]
| 9 | 2018-09-17T20:50:43.000Z | 2018-12-07T21:19:56.000Z | tests/settings.py | managedbyq/mbq.atomiq | 23edd33e8b958cfd9257ea62a107d8bb793ff3b9 | [
"Apache-2.0"
]
| null | null | null | import os
import boto3
import dj_database_url
from mbq import env, metrics
SECRET_KEY = 'fake-key'
DEBUG = True
ATOMIQ = {
'env': 'Test',
'service': 'test-service',
}
database_url = os.environ.get('DATABASE_URL', 'mysql://root:@mysql:3306/atomiqdb')
DATABASES = {
'default': dj_database_url.parse(database_url),
}
INSTALLED_APPS = [
'mbq.atomiq',
]
USE_TZ = True
boto3.setup_default_session(
region_name='us-east-1',
)
ENV = env.get_environment("ENV_NAME")
metrics.init('mbq.atomiq', env=ENV, constant_tags={"env": ENV.long_name})
| 16.5 | 82 | 0.695187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.270945 |
ef246213ff135ecbc464dc2dd429de5edde34475 | 720 | py | Python | backend/util.py | ahangchen/Rasp-Person-Sensor | 77d0e41b1a80cf9012f66c7bd44f062edbc6825d | [
"MIT"
]
| 2 | 2018-02-26T10:00:29.000Z | 2018-03-16T11:39:34.000Z | backend/util.py | ahangchen/Rasp-Person-Sensor | 77d0e41b1a80cf9012f66c7bd44f062edbc6825d | [
"MIT"
]
| null | null | null | backend/util.py | ahangchen/Rasp-Person-Sensor | 77d0e41b1a80cf9012f66c7bd44f062edbc6825d | [
"MIT"
]
| null | null | null | import json
import requests
def upload_file(upload_url, file_path):
files = {'file': open(file_path, 'rb')}
response = requests.post(upload_url, files=files)
ret = response.content.decode('utf-8')
ret_json = json.loads(ret)
print ret_json
return ret_json['data']
def post_json(post_url, post_data):
headers = {'content-type': 'application/json'}
response = requests.post(post_url, data=json.dumps(post_data), headers=headers)
return response.content.decode('utf-8')
def post_form(post_url, post_data):
headers = {'content-type': 'x-www-form-urlencoded'}
response = requests.post(post_url, params=post_data, headers=headers)
return response.content.decode('utf-8')
| 27.692308 | 83 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.147222 |
ef249d4819e51ded253cba64970d4792e29e13ee | 4,761 | py | Python | hard-gists/2338529/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/2338529/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/2338529/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | """
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
"""
from tempfile import NamedTemporaryFile as namedtmp
import time
from M2Crypto import X509, EVP, RSA, ASN1
__author__ = '[email protected]'
__all__ = ['mk_temporary_cacert', 'mk_temporary_cert']
def mk_ca_issuer():
"""
Our default CA issuer name.
"""
issuer = X509.X509_Name()
issuer.C = "US"
issuer.CN = "ca_testing_server"
issuer.ST = 'CA'
issuer.L = 'San Francisco'
issuer.O = 'ca_yelp'
issuer.OU = 'ca_testing'
return issuer
def mk_cert_valid(cert, days=365):
"""
Make a cert valid from now and til 'days' from now.
Args:
cert -- cert to make valid
days -- number of days cert is valid for from now.
"""
t = long(time.time())
now = ASN1.ASN1_UTCTIME()
now.set_time(t)
expire = ASN1.ASN1_UTCTIME()
expire.set_time(t + days * 24 * 60 * 60)
cert.set_not_before(now)
cert.set_not_after(expire)
def mk_request(bits, cn='localhost'):
"""
Create a X509 request with the given number of bits in they key.
Args:
bits -- number of RSA key bits
cn -- common name in the request
Returns a X509 request and the private key (EVP)
"""
pk = EVP.PKey()
x = X509.Request()
rsa = RSA.gen_key(bits, 65537, lambda: None)
pk.assign_rsa(rsa)
x.set_pubkey(pk)
name = x.get_subject()
name.C = "US"
name.CN = cn
name.ST = 'CA'
name.O = 'yelp'
name.OU = 'testing'
x.sign(pk,'sha1')
return x, pk
def mk_cacert():
"""
Make a CA certificate.
Returns the certificate, private key and public key.
"""
req, pk = mk_request(1024)
pkey = req.get_pubkey()
cert = X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
mk_cert_valid(cert)
cert.set_issuer(mk_ca_issuer())
cert.set_subject(cert.get_issuer())
cert.set_pubkey(pkey)
cert.add_ext(X509.new_extension('basicConstraints', 'CA:TRUE'))
cert.add_ext(X509.new_extension('subjectKeyIdentifier', cert.get_fingerprint()))
cert.sign(pk, 'sha1')
return cert, pk, pkey
def mk_cert():
"""
Make a certificate.
Returns a new cert.
"""
cert = X509.X509()
cert.set_serial_number(2)
cert.set_version(2)
mk_cert_valid(cert)
cert.add_ext(X509.new_extension('nsComment', 'SSL sever'))
return cert
def mk_casigned_cert():
"""
Create a CA cert + server cert + server private key.
"""
# unused, left for history.
cacert, pk1, _ = mk_cacert()
cert_req, pk2 = mk_request(1024, cn='testing_server')
cert = mk_cert(cacert)
cert.set_subject(cert_req.get_subject())
cert.set_pubkey(cert_req.get_pubkey())
cert.sign(pk1, 'sha1')
return cacert, cert, pk2
def mk_temporary_cacert():
"""
Create a temporary CA cert.
Returns a tuple of NamedTemporaryFiles holding the CA cert and private key.
"""
cacert, pk1, pkey = mk_cacert()
cacertf = namedtmp()
cacertf.write(cacert.as_pem())
cacertf.flush()
pk1f = namedtmp()
pk1f.write(pk1.as_pem(None))
pk1f.flush()
return cacertf, pk1f
def mk_temporary_cert(cacert_file, ca_key_file, cn):
"""
Create a temporary certificate signed by the given CA, and with the given common name.
If cacert_file and ca_key_file is None, the certificate will be self-signed.
Args:
cacert_file -- file containing the CA certificate
ca_key_file -- file containing the CA private key
cn -- desired common name
Returns a namedtemporary file with the certificate and private key
"""
cert_req, pk2 = mk_request(1024, cn=cn)
if cacert_file and ca_key_file:
cacert = X509.load_cert(cacert_file)
pk1 = EVP.load_key(ca_key_file)
else:
cacert = None
pk1 = None
cert = mk_cert()
cert.set_subject(cert_req.get_subject())
cert.set_pubkey(cert_req.get_pubkey())
if cacert and pk1:
cert.set_issuer(cacert.get_issuer())
cert.sign(pk1, 'sha1')
else:
cert.set_issuer(cert.get_subject())
cert.sign(pk2, 'sha1')
certf = namedtmp()
certf.write(cert.as_pem())
certf.write(pk2.as_pem(None))
certf.flush()
return certf
if __name__ == '__main__':
cacert, cert, pk = mk_casigned_cert()
with open('cacert.crt', 'w') as f:
f.write(cacert.as_pem())
with open('cert.crt', 'w') as f:
f.write(cert.as_pem())
f.write(pk.as_pem(None))
# Sanity checks...
cac = X509.load_cert('cacert.crt')
print cac.verify(), cac.check_ca()
cc = X509.load_cert('cert.crt')
print cc.verify(cac.get_pubkey())
# protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
| 23.924623 | 87 | 0.710985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,057 | 0.432052 |
ef25471191ad1db593810b69150f45edb9dc331e | 2,615 | py | Python | WickContractions/ops/indexed.py | chrisculver/WickContractions | a36af32bdd049789faf42d24d168c4073fc45ed0 | [
"MIT"
]
| 2 | 2021-08-03T17:32:09.000Z | 2021-08-03T18:28:31.000Z | WickContractions/ops/indexed.py | chrisculver/WickContractions | a36af32bdd049789faf42d24d168c4073fc45ed0 | [
"MIT"
]
| null | null | null | WickContractions/ops/indexed.py | chrisculver/WickContractions | a36af32bdd049789faf42d24d168c4073fc45ed0 | [
"MIT"
]
| null | null | null | from collections import deque
class IndexedObject:
"""Container for an object that has indices
:param name: Name of the object
:param indices: Indices attached to it
"""
def __init__(self,name,indices):
"""Constructor
"""
self.name = name
self.indices = indices
def cyclic_permute_indices(self):
"""Return the object with it's indices cyclicly permuted once.
"""
tmp=deque(self.indices)
tmp.rotate(1)
self.indices=list(tmp)
def __str__(self):
"""String printer
"""
idx_str = ''
for i in range(len(self.indices)):
idx_str += self.indices[i]
if(i!=len(self.indices)-1):
idx_str += ' '
return self.name + '_{' + idx_str + '}'
def __eq__(self, other):
"""Equality comparison
"""
return (self.name == other.name) and (self.indices==other.indices)
def __lt__(self, other):
"""Less then operator
"""
if(self.name != other.name):
return (self.name < other.name)
else:
return (self.indices < other.indices)
class IndexedFunction(IndexedObject):
"""Container for an object with indices and arguments
:param name: Name of the object
:param indices: Indices attached to the argument
:param arguments: Arguments the object depends on
"""
def __init__(self, name, indices, arguments):
"""Constructor
"""
self.name = name
self.indices = indices
self.arguments = arguments
def __str__(self):
"""String printer
"""
idx_str = ''
for i in range(len(self.indices)):
idx_str += self.indices[i]
if(i!=len(self.indices)-1):
idx_str += ' '
arg_str = ''
for i in range(len(self.arguments)):
arg_str += self.arguments[i]
if(i!=len(self.arguments)-1):
arg_str += ','
return self.name + '(' + arg_str + ')_{' + idx_str + '}'
def __eq__(self, other):
"""Equality comparison
"""
return (self.name == other.name) and (self.indices==other.indices) and (self.arguments==self.arguments)
def __lt__(self, other):
"""Less then operator
"""
if(self.name != other.name):
return (self.name < other.name)
else:
self_strings = self.indices + self.arguments
other_strings = other.indices + other.arguments
return (self_strings < other_strings) | 30.406977 | 111 | 0.549522 | 2,581 | 0.986998 | 0 | 0 | 0 | 0 | 0 | 0 | 705 | 0.269598 |
ef25c53ea4c0fb58041ed1cd6cded53b4e340d23 | 10,942 | py | Python | v0/aia_eis_v0/ml_sl/rf/dt_main.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
]
| 1 | 2022-03-02T12:57:19.000Z | 2022-03-02T12:57:19.000Z | v0/aia_eis_v0/ml_sl/rf/dt_main.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
]
| null | null | null | v0/aia_eis_v0/ml_sl/rf/dt_main.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
]
| null | null | null | import copy
from utils.file_utils.dataset_reader_pack.ml_dataset_reader import get_TV_T_dataset, get_T_V_T_dataset
from ml_sl.rf.dt_0 import Node, save_node, load_node
from ml_sl.ml_data_wrapper import pack_list_2_list, single_point_list_2_list, reform_labeled_dataset_list
from ml_sl.ml_data_wrapper import split_labeled_dataset_list
from utils.file_utils.filename_utils import get_date_prefix
from ml_sl.ml_critrions import cal_accuracy, cal_kappa, cal_accuracy_on_2, cal_accuracy_on_3
label_list = [2, 4, 5, 6, 7, 8, 9]
# Import dataset (Training, validation, Test)
ml_dataset_pickle_file_path = '../../datasets/ml_datasets/normed'
tr_dataset, va_dataset, te_dataset = get_T_V_T_dataset(file_path=ml_dataset_pickle_file_path)
tr_va_dataset, test_dataset = get_TV_T_dataset(file_path=ml_dataset_pickle_file_path)
tr_label_list, tr_data_list = split_labeled_dataset_list(tr_dataset)
va_label_list, va_data_list = split_labeled_dataset_list(va_dataset)
tr_va_label_list, tr_va_data_list = split_labeled_dataset_list(tr_va_dataset)
te_label_list, te_data_list = split_labeled_dataset_list(te_dataset)
# --------------------- 1-No Pruning ---------------------
def dt_no_pruning(training_dataset, validation_dataset, test_dataset, label_list=[2,4,5,6,7,8,9]):
tr_va_dataset = training_dataset + validation_dataset
reformed_tr_va_dataset = reform_labeled_dataset_list(tr_va_dataset)
# 1.1- Use [training+validation]-dataset to train a Decision Tree (DT), DT0,
dt = Node(reformed_labeled_dataset_list = reformed_tr_va_dataset, level = 0)
dt.create_child_node()
# 1.2- save DT
dt_file_name = get_date_prefix() + 'dt_no_pruning_pickle.file'
save_node(node=dt, file_name=dt_file_name)
# 1.3- Test the performance(accuracy, kappa) of DT0 on test-dataset
test_label_list = [t[0] for t in test_dataset]
sample_label_prob_dict_list = []
empty_sample_label_prob_dict = {}
for label in label_list:
empty_sample_label_prob_dict[label] = 0.0
for t_d in test_dataset:
t_d = single_point_list_2_list(t_d[1])
pre = dt.classify(unlabeled_data_list=t_d)
sample_label_prob_dict = copy.deepcopy(empty_sample_label_prob_dict)
sample_label_prob_dict[pre] += 1
sample_label_prob_dict_list.append(sample_label_prob_dict)
acc = cal_accuracy(sample_label_prob_dict_list, test_label_list)
kappa = cal_kappa(sample_label_prob_dict_list, test_label_list)
return acc, kappa
#------------- Train on tr, tested on va #-------------
# acc,kappa = dt_no_pruning(training_dataset=tr_dataset, validation_dataset=[], test_dataset=tr_dataset)
# print(acc,kappa) # --> 1.0 1.0
#------------- Train on tr, tested on va #-------------
# if __name__ == '__main__':
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# Running condition-1
# acc, kappa = dt_no_pruning(training_dataset, validation_dataset, test_dataset)
# print('Accuracy: {0}, Kappa: {1}'.format(acc, kappa))
# Running condition-2
# acc, kappa = dt_no_pruning(training_dataset, validation_dataset=[], test_dataset=validation_dataset)
# print('Accuracy: {0}, Kappa: {1}'.format(acc, kappa))
"""
Running condition-1
Train on [Training+validation]-dataset
Test on test-dataset
1-Accuracy: 0.45054945054945056, Kappa: 0.3173293323330833
2-Accuracy: 0.45054945054945056, Kappa: 0.3173293323330833
Running condition-2
Train on [Training]-dataset
Test on validation-dataset
1-Accuracy: 0.5319148936170213, Kappa: 0.42762247439800716
2-Accuracy: 0.5319148936170213, Kappa: 0.42762247439800716
"""
def load_dt_no_pruning(training_dataset, validation_dataset, test_dataset, label_list=[2,4,5,6,7,8,9]):
dt = load_node(file_name='2020_04_11_dt_no_pruning_pickle.file', file_path='dt_res')
# 1.3- Test the performance(accuracy, kappa) of DT0 on test-dataset
test_label_list = [t[0] for t in test_dataset]
sample_label_prob_dict_list = []
empty_sample_label_prob_dict = {}
for label in label_list:
empty_sample_label_prob_dict[label] = 0.0
for t_d in test_dataset:
t_d = single_point_list_2_list(t_d[1])
pre = dt.classify(unlabeled_data_list=t_d)
sample_label_prob_dict = copy.deepcopy(empty_sample_label_prob_dict)
sample_label_prob_dict[pre] += 1
sample_label_prob_dict_list.append(sample_label_prob_dict)
acc = cal_accuracy(sample_label_prob_dict_list, test_label_list)
acc_on_2 = cal_accuracy_on_2(sample_label_prob_dict_list, test_label_list)
acc_on_3 = cal_accuracy_on_3(sample_label_prob_dict_list, test_label_list)
kappa = cal_kappa(sample_label_prob_dict_list, test_label_list)
print('Decision Tree with no pruning: Accuracy on 1 = {0}, Accuracy on 2 = {1}, Accuracy on 3 = {2}, Kappa={3}'.format(
acc, acc_on_2, acc_on_3, kappa))
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# load_dt_no_pruning(training_dataset, validation_dataset, test_dataset, label_list=[2,4,5,6,7,8,9])
# Decision Tree with no pruning: Accuracy on 1 = 0.4945054945054945, Accuracy on 2 = 0.5164835164835165,
# Accuracy on 3 = 0.6923076923076923, Kappa=0.3706209592542475
# --------------------- 1-No Pruning ---------------------
"""
EA-Revise
用于产生EA-Revise时要求的结果, DT在GS阶段只有 【no pruning / posterior pruning】,在文中没提到DT的GS结果,只需计算Final res
而 DT 的 final config 是 no pruning,在 tr+va 上训练,在te上测试
"""
def dtFinalRes():
# load dt model
dt = load_node(file_name='2020_04_11_dt_no_pruning_pickle.file', file_path='dt_res')
# Test the performance(accuracy, kappa) of DT-final on TrVa-dataset
trVaSample_label_prob_dict_list = []
teSample_label_prob_dict_list = []
empty_sample_label_prob_dict = {}
for label in label_list:
empty_sample_label_prob_dict[label] = 0.0
# tested on trVa-dataset
for t_d in tr_va_dataset:
t_d = single_point_list_2_list(t_d[1])
pre = dt.classify(unlabeled_data_list=t_d)
sample_label_prob_dict = copy.deepcopy(empty_sample_label_prob_dict)
sample_label_prob_dict[pre] += 1
trVaSample_label_prob_dict_list.append(sample_label_prob_dict)
# tested on te-dataset
for t_d in test_dataset:
t_d = single_point_list_2_list(t_d[1])
pre = dt.classify(unlabeled_data_list=t_d)
sample_label_prob_dict = copy.deepcopy(empty_sample_label_prob_dict)
sample_label_prob_dict[pre] += 1
teSample_label_prob_dict_list.append(sample_label_prob_dict)
trVaAcc = cal_accuracy(trVaSample_label_prob_dict_list, tr_va_label_list)
trVaKappa = cal_kappa(trVaSample_label_prob_dict_list, tr_va_label_list)
teAcc = cal_accuracy(teSample_label_prob_dict_list, te_label_list)
teKappa = cal_kappa(teSample_label_prob_dict_list, te_label_list)
print('Final res: trVaAcc={0}, trVaKappa={1},trVaAK={2},teAcc={3},teKappa={4},teAK={5}'.format(
trVaAcc, trVaKappa, trVaAcc+trVaKappa,
teAcc,teKappa,teAcc+teKappa
))
# dtFinalRes()
"""
node = pickle.load(file) ModuleNotFoundError: No module named 'ml_sl'
Final res:
trVaAcc=0.9163568773234201, trVaKappa=0.897055384288296, trVaAK=1.813412261611716,
teAcc=0.4945054945054945, teKappa=0.3706209592542475, teAK=0.8651264537597421
"""
# --------------------- 2-Pruning ---------------------
def dt_pruning(training_dataset, validation_dataset, test_dataset, label_list=[2,4,5,6,7,8,9]):
reformed_tr_dataset_list = reform_labeled_dataset_list(training_dataset)
# 2.1- Use training-dataset to train a Decision Tree, DT
dt = Node(reformed_labeled_dataset_list=reformed_tr_dataset_list, level=0)
dt.create_child_node()
# 2.2- Use validation-dataset to prune DT1
dt.post_pruning_1(reform_labeled_dataset_list(validation_dataset))
# 2.3- save model
dt_file_name = get_date_prefix() + 'dt_pruning_pickle.file'
save_node(node=dt, file_name=dt_file_name)
# 2.4- Test the performance(accuracy, kappa) of DT on test-dataset
test_label_list = [t[0] for t in test_dataset]
sample_label_prob_dict_list = []
empty_sample_label_prob_dict = {}
for label in label_list:
empty_sample_label_prob_dict[label] = 0.0
for t_d in test_dataset:
t_d = single_point_list_2_list(t_d[1])
pre = dt.classify(unlabeled_data_list=t_d)
sample_label_prob_dict = copy.deepcopy(empty_sample_label_prob_dict)
sample_label_prob_dict[pre] += 1
sample_label_prob_dict_list.append(sample_label_prob_dict)
acc = cal_accuracy(sample_label_prob_dict_list, test_label_list)
kappa = cal_kappa(sample_label_prob_dict_list, test_label_list)
return acc, kappa
# if __name__ == '__main__':
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# acc, kappa = dt_pruning(training_dataset, validation_dataset, test_dataset, label_list=[2, 4, 5, 6, 7, 8, 9])
# print('Accuracy: {0}, Kappa: {1}'.format(acc, kappa))
"""
1- Accuracy: 0.4835164835164835, Kappa: 0.3591549295774648
2- Accuracy: 0.4835164835164835, Kappa: 0.3591549295774648
"""
def load_dt_pruning(test_dataset, label_list=[2,4,5,6,7,8,9]):
dt = load_node(file_name='2020_04_11_dt_pruning_pickle_1.file', file_path='dt_res')
# 2.4- Test the performance(accuracy, kappa) of DT on test-dataset
test_label_list = [t[0] for t in test_dataset]
sample_label_prob_dict_list = []
empty_sample_label_prob_dict = {}
for label in label_list:
empty_sample_label_prob_dict[label] = 0.0
for t_d in test_dataset:
t_d = single_point_list_2_list(t_d[1])
pre = dt.classify(unlabeled_data_list=t_d)
sample_label_prob_dict = copy.deepcopy(empty_sample_label_prob_dict)
sample_label_prob_dict[pre] += 1
sample_label_prob_dict_list.append(sample_label_prob_dict)
acc = cal_accuracy(sample_label_prob_dict_list, test_label_list)
acc_on_2 = cal_accuracy_on_2(sample_label_prob_dict_list, test_label_list)
acc_on_3 = cal_accuracy_on_3(sample_label_prob_dict_list, test_label_list)
kappa = cal_kappa(sample_label_prob_dict_list, test_label_list)
print('Decision Tree with pruning: Accuracy on 1 = {0}, Accuracy on 2 = {1}, Accuracy on 3 = {2}, Kappa={3}'.format(
acc, acc_on_2, acc_on_3, kappa))
# training_dataset, validation_dataset, test_dataset = get_T_V_T_dataset(file_path='../../datasets/ml_datasets/normed')
# load_dt_pruning(test_dataset, label_list=[2,4,5,6,7,8,9])
# Decision Tree with pruning: Accuracy on 1 = 0.4835164835164835, Accuracy on 2 = 0.5054945054945055,
# Accuracy on 3 = 0.6703296703296703, Kappa = 0.3591549295774648
# --------------------- 2-Pruning --------------------- | 46.961373 | 123 | 0.732681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,324 | 0.392022 |
ef29d7cb4df5849c15653808babb4473a2403757 | 874 | py | Python | python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py | GG-yuki/bugs | aabd576e9e57012a3390007af890b7c6ab6cdda8 | [
"MIT"
]
| null | null | null | python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py | GG-yuki/bugs | aabd576e9e57012a3390007af890b7c6ab6cdda8 | [
"MIT"
]
| null | null | null | python/sagiri-bot/SAGIRIBOT/data_manage/update_data/update_setting.py | GG-yuki/bugs | aabd576e9e57012a3390007af890b7c6ab6cdda8 | [
"MIT"
]
| null | null | null | from SAGIRIBOT.basics.aio_mysql_excute import execute_sql
async def update_setting(group_id, setting_name, new_setting_value) -> None:
"""
Update setting to database
Args:
group_id: Group id
setting_name: Setting name
new_setting_value: New setting value
Examples:
await update_setting(12345678, "setu", True)
Return:
None
"""
str_key_word = ["speakMode", "switch", "music", "r18Process"]
sql_key_word = ["repeat", "real", "limit"]
if setting_name in sql_key_word:
setting_name = '`'+setting_name+'`'
if setting_name in str_key_word:
sql = "UPDATE setting SET %s='%s' WHERE groupId=%d" % (setting_name, new_setting_value, group_id)
else:
sql = "UPDATE setting SET %s=%s WHERE groupId=%d" % (setting_name, new_setting_value, group_id)
await execute_sql(sql)
| 31.214286 | 105 | 0.662471 | 0 | 0 | 0 | 0 | 0 | 0 | 813 | 0.930206 | 407 | 0.465675 |
ef2afd3b3d3cc23390816b111f6a8ec32454a594 | 486 | py | Python | setup.py | fmaida/caro-diario | adc5018f2ef716b49db39aa9189ab1e803fcd357 | [
"MIT"
]
| null | null | null | setup.py | fmaida/caro-diario | adc5018f2ef716b49db39aa9189ab1e803fcd357 | [
"MIT"
]
| null | null | null | setup.py | fmaida/caro-diario | adc5018f2ef716b49db39aa9189ab1e803fcd357 | [
"MIT"
]
| null | null | null | from distutils.core import setup
setup(
name = 'caro-diario',
packages = ['caro-diario'], # this must be the same as the name above
version = '0.1',
description = 'Diario',
author = 'Francesco Maida',
author_email = '[email protected]',
url = 'https://github.com/fmaida/caro-diario.git', # use the URL to the github repo
download_url = '', # I'll explain this in a second
keywords = ['diario', 'logging', 'esempio'], # arbitrary keywords
classifiers = [],
)
| 34.714286 | 85 | 0.67284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.572016 |
ef2c168f7b4d969663dc1ed93f01785a68c36dd1 | 3,695 | py | Python | cVQE/operators/converters/tensoredop_distributor.py | gblazq/cVQE | 5a566103c35696ec0cf2b016c38d71de696e0e29 | [
"Apache-2.0"
]
| 1 | 2021-09-16T12:43:21.000Z | 2021-09-16T12:43:21.000Z | cVQE/operators/converters/tensoredop_distributor.py | gblazq/cVQE | 5a566103c35696ec0cf2b016c38d71de696e0e29 | [
"Apache-2.0"
]
| null | null | null | cVQE/operators/converters/tensoredop_distributor.py | gblazq/cVQE | 5a566103c35696ec0cf2b016c38d71de696e0e29 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 Guillermo Blázquez
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
from qiskit.aqua.operators.converters import ConverterBase
from qiskit.aqua.operators.list_ops import TensoredOp, SummedOp
from qiskit.aqua.operators.primitive_ops import PauliOp
class TensoredOpDistributor(ConverterBase):
"""
A converter that applies the distributive property of tensor products.
Only works with operators that consist on TensoredOps, SummedOps or PauliOps.
E.g. if op = (I - Z)^(X + 2*Y), the converter will return
(I^X) + 2(I^Z) - (Z^X) - 2(Z^Y)
"""
#TODO: check coefficients
def convert(self, operator):
"""
Apply the distributive property to TensoredOps. If the operator is a SummedOp, apply
it to each of the summands. If it's a PauliOp, return the operator.
Args:
operator: the Operator to convert.
Returns:
the converted Operator
Raises:
TypeError: if the operator is not a TensoredOp, SummedOp or PauliOp
"""
if isinstance(operator, TensoredOp):
return reduce(self._convert, operator).reduce()
elif isinstance(operator, SummedOp):
return SummedOp([self.convert(op) for op in operator], coeff=operator.coeff).reduce()
elif isinstance(operator, PauliOp):
return operator
else:
raise TypeError('TensoredOpDistributor can only distribute TensoredOps, SummedOps or PauliOps')
def _convert(self, op1, op2):
"""
Distribute the tensor product over two operands in a TensoredOp
Args:
op1: the first operator in the tensor product
op2: the second operator in the tensor product
Returns:
the result of op1^op2, distributing the tensor product if possible
Raises:
TypeError: if any of the two operators is not a TensoredOp, SummedOp or PauliOp
"""
if isinstance(op1, PauliOp) and isinstance(op2, PauliOp):
return op1^op2
elif (isinstance(op1, PauliOp) or isinstance(op1, TensoredOp)) and isinstance(op2, SummedOp):
return SummedOp([self._convert(op1, o) for o in op2.oplist], coeff=op2.coeff)
elif isinstance(op1, PauliOp) and isinstance(op2, TensoredOp):
return self._convert(op1, self.convert(op2))
elif isinstance(op1, SummedOp) and (isinstance(op2, PauliOp) or isinstance(op2, TensoredOp)):
return SummedOp([self._convert(o, op2) for o in op1.oplist], coeff=op1.coeff)
elif isinstance(op1, SummedOp) and isinstance(op2, SummedOp):
return SummedOp([self._convert(o1, o2) for o1 in op1.oplist for o2 in op2.oplist])
elif isinstance(op1, TensoredOp) and isinstance(op2, PauliOp):
return self._convert(self.convert(op1), op2)
elif isinstance(op1, TensoredOp) and isinstance(op2, TensoredOp):
return self._convert(self.convert(op1), self.convert(op2))
else:
raise TypeError('TensoredOpDistributor can only distribute operators consisting on PauliOps, SummedOps or TensoredOps')
| 43.988095 | 131 | 0.677402 | 2,901 | 0.784903 | 0 | 0 | 0 | 0 | 0 | 0 | 1,881 | 0.508929 |
ef313c50d5c6317ec48b8b4af0c2b6702fb01991 | 8,027 | py | Python | tests/test_core_deformation.py | matmodlab/matmodlab2 | 97bb858e2b625cca5f3291db5d50bdbb6352e976 | [
"BSD-3-Clause"
]
| 6 | 2017-02-14T02:04:56.000Z | 2022-02-03T04:53:32.000Z | tests/test_core_deformation.py | tjfulle/matmodlab2 | 97bb858e2b625cca5f3291db5d50bdbb6352e976 | [
"BSD-3-Clause"
]
| 10 | 2017-01-21T00:00:06.000Z | 2017-01-22T07:39:44.000Z | tests/test_core_deformation.py | tjfulle/matmodlab2 | 97bb858e2b625cca5f3291db5d50bdbb6352e976 | [
"BSD-3-Clause"
]
| 3 | 2018-10-20T22:53:59.000Z | 2022-01-13T07:17:24.000Z | # -*- coding: utf-8 -*-
"""
This file contains tests for tensor.py
"""
import sys
import pathlib
import pytest
import numpy as np
from testing_utils import isclose
# Ensure that 'matmodlab2' is imported from parent directory.
sys.path.insert(0, str(pathlib.Path(__file__).absolute().parent.parent))
import matmodlab2
import matmodlab2.core.deformation as df
def vec_isclose(name, comp, gold, rtol=1.0e-12, atol=1.0e-12):
print("===== {0}".format(name))
print("comp:", comp)
print("gold:", gold)
print("diff:", gold - comp)
PASS = np.allclose(comp, gold, rtol=rtol, atol=atol)
print("PASS" if PASS else "FAIL")
return PASS
deformation_measures_db = [
{"name": "Uniaxial Extension",
"eps": np.array([0.042857142857142857143,0,0,0,0,0]),
"depsdt": np.array([0.10000000000000000000,0,0,0,0,0]),
"subtests": [
{
"k": 2,
"u": np.array([1.0419761445034553738,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.095971486993739310740,0,0,0,0,0]),
"d": np.array([0.092105263157894736842,0,0,0,0,0]),
},
{
"k": 1,
"u": np.array([1.0428571428571428571,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.10000000000000000000,0,0,0,0,0]),
"d": np.array([0.095890410958904109589,0,0,0,0,0]),
},
{
"k": 0,
"u": np.array([1.0437887715175541853,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.10437887715175541853,0,0,0,0,0]),
"d": np.array([0.10000000000000000000,0,0,0,0,0]),
},
{
"k": -1,
"u": np.array([1.0447761194029850746,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.10915571396747605257,0,0,0,0,0]),
"d": np.array([0.10447761194029850746,0,0,0,0,0]),
},
{
"k": -2,
"u": np.array([1.0458250331675944350,1.0000000000000000000,1.0000000000000000000,0,0,0]),
"dudt": np.array([0.11438711300270564133,0,0,0,0,0]),
"d": np.array([0.10937500000000000000,0,0,0,0,0]),
},
],
},
{"name": "Uniaxial Extension with rotation",
"eps": np.array([0.026196877156206737235,0.016660265700936119908,0,0.020891312403896220150,0,0]),
"depsdt": np.array([-0.0045059468741139683829,0.10450594687411396838,0,0.063726469853100399588,0,0]),
"subtests": [
{
"k": 2,
"u": np.array([1.0256583576911247384,1.0163177868123306353,1.0000000000000000000,0.020461857461098139159,0,0]),
"dudt": np.array([-0.0056192451222061811013,0.10159073211594549184,0,0.061454775148472809312,0,0]),
"d": np.array([-0.0066876940055755266344,0.098792957163470263477,0,0.059274595960483676859,0,0]),
},
{
"k": 1,
"u": np.array([1.0261968771562067372,1.0166602657009361199,1.0000000000000000000,0.020891312403896220150,0,0]),
"dudt": np.array([-0.0045059468741139683829,0.10450594687411396838,0,0.063726469853100399588,0,0]),
"d": np.array([-0.0056693735828201687630,0.10155978454172427835,0,0.061415383576480024658,0,0]),
},
{
"k": 0,
"u": np.array([1.0267663449262200007,1.0170224265913341846,1.0000000000000000000,0.021345447796308002806,0,0]),
"dudt": np.array([-0.0032560207940279426371,0.10763489794578336117,0,0.066186651517750065998,0,0]),
"d": np.array([-0.0045260401459293278687,0.10452604014592932787,0,0.063731056011271402912,0,0]),
},
{
"k": -1,
"u": np.array([1.0273698716557383822,1.0174062477472466924,1.0000000000000000000,0.021826744302578140456,0,0]),
"dudt": np.array([-0.0018481668596687927090,0.11100388082714484528,0,0.068860299997538432155,0,0]),
"d": np.array([-0.0032383326989564664762,0.10771594463925497394,0,0.066244519079865882721,0,0]),
},
{
"k": -2,
"u": np.array([1.0280110311733133167,1.0178140019942811183,1.0000000000000000000,0.022338051955872830687,0,0]),
"dudt": np.array([-0.00025673980976010909772,0.11464385281246575042,0,0.071777050608761226760,0,0]),
"d": np.array([-0.0017829682784827673453,0.11115796827848276735,0,0.068982906840537447349,0,0]),
},
],
},
]
@pytest.mark.parametrize('a', np.linspace(0.0, 1.0e+1, 10))
@pytest.mark.parametrize('t', np.linspace(0.0, 1.0e+1, 10))
def test_deformation_measures_from_strain_uni_strain(a, t):
""" Verify that we are converting from strain to D correctly. """
# Setup
eps = np.array([a * t, 0.0, 0.0, 0.0, 0.0, 0.0])
depsdt = np.array([a, 0.0, 0.0, 0.0, 0.0, 0.0])
d_g = np.array([a, 0.0, 0.0, 0.0, 0.0, 0.0])
# Test
d = df.rate_of_strain_to_rate_of_deformation(depsdt, eps, 0)
assert vec_isclose("D", d, d_g)
# Teardown
pass
def test_deformation_measures_from_strain_dissertation_test():
""" Verify that we are converting from strain to D correctly. """
a = 0.5
t = 0.1
# Setup (inputs)
st = np.sin(np.pi * t)
ct = np.cos(np.pi * t)
sht = np.sinh(a * t)
eat = np.exp(a * t)
eps = np.array([a * t * np.cos(np.pi * t / 2.0) ** 2,
a * t * np.sin(np.pi * t / 2.0) ** 2,
0.0,
a * t * np.sin(np.pi * t) / 2.0,
0.0, 0.0])
depsdt = np.array([a / 2.0 * (1.0 + ct - np.pi * t * st),
a / 2.0 * (1.0 - ct + np.pi * t * st),
0.0,
a / 2.0 * (np.pi * t * ct + st),
0.0, 0.0])
# Setup (expected outputs)
d_g = np.array([(a + a * ct - np.pi * st * sht) / 2.0,
(a - a * ct + np.pi * st * sht) / 2.0,
0.0,
(a * st + np.pi * ct * sht) / 2.0,
0.0, 0.0])
# Test
d = df.rate_of_strain_to_rate_of_deformation(depsdt, eps, 0)
assert vec_isclose("D", d, d_g)
# Teardown
pass
def test_deformation_measures_from_strain_dissertation_static():
""" Verify that we are converting from strain to D correctly. """
# Setup (inputs)
eps=np.array([2.6634453918413015230,0.13875241035650067478,0,0.60791403008229297100,0,0])
depsdt=np.array([-0.66687706806142212351,1.9745693757537298158,0,4.2494716756395844993,0,0])
# Setup (expected outputs)
d_g=np.array([-4.3525785227788080461,5.6602708304711157384,0,11.902909607738023219,0,0])
# Test
d = df.rate_of_strain_to_rate_of_deformation(depsdt, eps, 0)
assert vec_isclose("D", d, d_g)
# Teardown
pass
@pytest.mark.parametrize('db', deformation_measures_db)
@pytest.mark.parametrize('idx', [0, 1, 2, 3, 4])
def test_deformation_measures_from_strain_db(db, idx):
"""
Test the deformation measures for various values of kappa.
"""
# Setup (inputs)
kappa = db['subtests'][idx]['k']
print("Test name, kappa: {0}, {1}".format(db['name'], kappa))
eps = db['eps']
depsdt = db['depsdt']
# Setup (expected outputs)
d_g = db['subtests'][idx]['d']
print("kappa=", kappa)
# Test
d = df.rate_of_strain_to_rate_of_deformation(depsdt, eps, kappa)
assert vec_isclose("D", d, d_g)
# Teardown
pass
def test_scalar_volume_strain_to_tensor():
ev = 1.
kappa = 0
e = df.scalar_volume_strain_to_tensor(ev, kappa)
assert isclose(np.sum(e[:3]), ev)
kappa = 1
e = df.scalar_volume_strain_to_tensor(ev, kappa)
eij = ((kappa * ev + 1.) ** (1. / 3.) - 1.) / kappa
assert isclose(e[0], eij)
kappa = -2
try:
e = df.scalar_volume_strain_to_tensor(ev, kappa)
raise Exception('Expected ValueError')
except ValueError as e:
assert e.args[0] == '1 + kappa * ev must be positive'
| 38.042654 | 123 | 0.590133 | 0 | 0 | 0 | 0 | 1,199 | 0.149371 | 0 | 0 | 1,064 | 0.132553 |
ef3678c7e21e6c165bc6c6b597bc9cfc9cfa52bc | 10,380 | py | Python | examples/tutorial/example4.py | sathiscode/trumania | bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc | [
"Apache-2.0"
]
| 97 | 2018-01-15T19:29:31.000Z | 2022-03-11T00:27:34.000Z | examples/tutorial/example4.py | sathiscode/trumania | bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc | [
"Apache-2.0"
]
| 10 | 2018-01-15T22:44:55.000Z | 2022-02-18T09:44:10.000Z | examples/tutorial/example4.py | sathiscode/trumania | bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc | [
"Apache-2.0"
]
| 33 | 2018-01-15T19:34:23.000Z | 2022-03-05T22:39:33.000Z | from trumania.core import circus
import trumania.core.population as population
import trumania.core.random_generators as gen
import trumania.core.operations as ops
import trumania.core.story as story
import trumania.components.time_patterns.profilers as profilers
import trumania.core.util_functions as util_functions
import trumania.components.db as DB
import pandas as pd
# each step?() function below implement one step of the fourth example of the
# tutorial documented at
# https://realimpactanalytics.atlassian.net/wiki/display/LM/Data+generator+tutorial
# this is essentially a modification of example3, with some supplementary
# features demonstrating persistence
def build_music_repo():
# this time we create a "detached" population, not connected to a circus
repo = population.Population(
circus=None,
size=5,
ids_gen=gen.SequencialGenerator(prefix="GENRE_"))
repo.create_attribute(
name="genre_name",
init_values=["blues", "jazz", "electro", "pop", "rock"])
repo.create_relationship(name="songs", seed=18)
return repo
def add_song_to_repo(repo_population):
songs = population.Population(
circus=None,
size=0,
ids_gen=gen.SequencialGenerator(prefix="SONG_"))
# since the size of the population is 0, we can create attribute without
# providing any initialization
songs.create_attribute(name="artist_name")
songs.create_attribute(name="song_genre")
songs.create_attribute(name="title")
songs.create_attribute(name="duration_seconds")
songs.create_attribute(name="recording_year")
song_id_gen = gen.SequencialGenerator(prefix="S_")
# generate artist names from a list of randomly generated ones, so we have
# some redundancy in the generated dataset
artist_name_gen = gen.NumpyRandomGenerator(
method="choice",
a=gen.FakerGenerator(
method="name",
seed=1234).generate(size=200),
seed=5678)
title_gen = gen.FakerGenerator(method="sentence",
seed=78961,
nb_words=4,
variable_nb_words=True)
# generates recording years within a desired date range
year_gen = gen.FakerGenerator(
method="date_time_between_dates",
seed=184,
datetime_start=pd.Timestamp("1910-10-20"),
datetime_end=pd.Timestamp("2016-12-02")) \
.map(f=lambda d: d.year)
duration_gen = gen.ParetoGenerator(xmin=60,
seed=9874,
force_int=True,
a=1.2)
repo_genre_rel = repo_population.get_attribute("genre_name")
for genre_id, genre_name in repo_genre_rel.get_values().items():
# an operation capable of creating songs of that genre
init_attribute = ops.Chain(
artist_name_gen.ops.generate(named_as="artist_name"),
title_gen.ops.generate(named_as="title"),
year_gen.ops.generate(named_as="recording_year"),
duration_gen.ops.generate(named_as="duration_seconds"),
gen.ConstantGenerator(value=genre_name).ops.generate(named_as="song_genre")
)
# dataframe of emtpy songs: just with one SONG_ID column for now
song_ids = song_id_gen.generate(size=1000)
emtpy_songs = story.Story.init_story_data(
member_id_field_name="SONG_ID",
active_ids=song_ids
)
# we can already adds the generated songs to the music repo relationship
repo_population.get_relationship("songs").add_grouped_relations(
from_ids=[genre_id],
grouped_ids=[song_ids]
)
# here we generate all desired columns in the dataframe
initialized_songs, _ = init_attribute(emtpy_songs)
initialized_songs.drop(["SONG_ID"], axis=1, inplace=True)
# this works because the columns of init_attribute match exactly the
# ones of the attributes of the populations
songs.update(initialized_songs)
# makes sure year and duration are handled as integer
songs.get_attribute("recording_year").transform_inplace(int)
songs.get_attribute("duration_seconds").transform_inplace(int)
return songs
def build_circus(name):
return circus.Circus(
name=name,
master_seed=12345,
start=pd.Timestamp("1 Jan 2017 00:00"),
step_duration=pd.Timedelta("1h"))
def add_listener(the_circus):
users = the_circus.create_population(
name="user", size=5,
ids_gen=gen.SequencialGenerator(prefix="user_"))
users.create_attribute(
name="FIRST_NAME",
init_gen=gen.FakerGenerator(method="first_name",
seed=next(the_circus.seeder)))
users.create_attribute(
name="LAST_NAME",
init_gen=gen.FakerGenerator(method="last_name",
seed=next(the_circus.seeder)))
def add_listen_and_share_stories_with_details(the_circus):
users = the_circus.populations["user"]
# using this timer means POS are more likely to trigger a re-stock during
# day hours rather that at night.
timer_gen = profilers.HighWeekDaysTimerGenerator(
clock=the_circus.clock, seed=next(the_circus.seeder))
# this generate activity level distributed as a "truncated normal
# distribution", i.e. very high and low activities are prevented.
bounded_gaussian_activity_gen = gen.NumpyRandomGenerator(
method="normal",
seed=next(the_circus.seeder),
loc=timer_gen.activity(n=20, per=pd.Timedelta("1 day")),
scale=5
).map(ops.bound_value(lb=10, ub=30))
listen = the_circus.create_story(
name="listen_events",
initiating_population=users,
member_id_field="UID",
timer_gen=timer_gen,
activity_gen=bounded_gaussian_activity_gen
)
share = the_circus.create_story(
name="share_events",
initiating_population=users,
member_id_field="UID",
timer_gen=timer_gen,
activity_gen=bounded_gaussian_activity_gen
)
repo = the_circus.populations["music_repository"]
songs = the_circus.populations["songs"]
select_genre_and_song = ops.Chain(
users.ops.lookup(
id_field="UID",
select={
"FIRST_NAME": "USER_FIRST_NAME",
"LAST_NAME": "USER_LAST_NAME",
}
),
# picks a genre at random
repo.ops.select_one(named_as="GENRE"),
# picks a song at random for that genre
repo.get_relationship("songs").ops.select_one(
from_field="GENRE",
named_as="SONG_ID"),
# now also reporting details of listened or shared songs
songs.ops.lookup(
id_field="SONG_ID",
select={
"artist_name": "SONG_ARTIST",
"title": "SONG_TITLE",
"recording_year": "SONG_YEAR",
"duration_seconds": "SONG_DURATION",
}
),
)
listen.set_operations(
select_genre_and_song,
ops.FieldLogger("listen_events")
)
share.set_operations(
select_genre_and_song,
# picks a user this song is shared to
users.ops.select_one(named_as="SHARED_TO_UID"),
# note we could post-check when user shared a song to their own uid
# here, in which case we can use DropRow to discard that share event
ops.FieldLogger("share_events")
)
def step1():
# this creates 2 populations: music_repo and songs
music_repo = build_music_repo()
songs = add_song_to_repo(music_repo)
# saves them to persistence
DB.remove_namespace(namespace="tutorial_example4")
DB.save_population(music_repo, namespace="tutorial_example4",
population_id="music_repository")
DB.save_population(songs, namespace="tutorial_example4",
population_id="songs")
# build a new circus then loads and attach the persisted population to it
example4_circus = build_circus(name="example4_circus")
example4_circus.load_population(namespace="tutorial_example4",
population_id="music_repository")
example4_circus.load_population(namespace="tutorial_example4",
population_id="songs")
add_listener(example4_circus)
def step2():
# this creates 2 populations: music_repo and songs
music_repo = build_music_repo()
songs = add_song_to_repo(music_repo)
# saves them to persistence
DB.remove_namespace(namespace="tutorial_example4")
DB.save_population(music_repo, namespace="tutorial_example4",
population_id="music_repository")
DB.save_population(songs, namespace="tutorial_example4",
population_id="songs")
# build a new circus then loads and attach the persisted population to it
example4_circus = build_circus(name="example4_circus")
example4_circus.load_population(namespace="tutorial_example4",
population_id="music_repository")
example4_circus.load_population(namespace="tutorial_example4",
population_id="songs")
add_listener(example4_circus)
# This saves the whole circus to persistence, with all its populations,
# relationships, generators,...
# This is independent from the 2 populations saved above: this time we no longer
# have direct control on the namespace: the persistence mechanism use the
# circus name as namespace
example4_circus.save_to_db(overwrite=True)
# example4bis should be an exact deep copy of example4_circus
example4bis = circus.Circus.load_from_db(circus_name="example4_circus")
# Stories are not serialized to CSV but rather serialized in code,
# using humans as transducers
add_listen_and_share_stories_with_details(example4bis)
example4bis.run(
duration=pd.Timedelta("5 days"),
log_output_folder="output/example4",
delete_existing_logs=True)
if __name__ == "__main__":
util_functions.setup_logging()
step2()
| 34.832215 | 87 | 0.657225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,450 | 0.33237 |
ef377a0c8139bd037fffc10567802d319f904716 | 1,104 | py | Python | Hackerrank/Python/class-1-dealing-with-complex-numbers.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
]
| 1 | 2021-01-10T13:29:21.000Z | 2021-01-10T13:29:21.000Z | Hackerrank/Python/class-1-dealing-with-complex-numbers.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
]
| null | null | null | Hackerrank/Python/class-1-dealing-with-complex-numbers.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
]
| null | null | null | import math
class Complex(object):
def __init__(self, real, imaginary):
self.real = real
self.imaginary = imaginary
def __add__(self, no):
return (Complex(self.real + no.real, self.imaginary + no.imaginary))
def __sub__(self, no):
return (Complex(self.real - no.real, self.imaginary - no.imaginary))
def __mul__(self, no):
return (Complex(self.real * no.real - self.imaginary * no.imaginary, self.real * no.imaginary + self.imaginary * no.real))
def __truediv__(self, no):
product = self * Complex(no.real, -no.imaginary)
sq = no.real**2 + no.imaginary**2
return (Complex(product.real/sq, product.imaginary/sq))
def mod(self):
return (Complex((self.real**2 + self.imaginary**2)**0.5, 0))
def __str__(self):
return f'{self.real:.2f}{self.imaginary:+.2f}i'
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n') | 33.454545 | 130 | 0.588768 | 880 | 0.797101 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.048913 |
ef3d18dad9fb4f3ea7850ca0af729153b0fd6bb6 | 1,828 | py | Python | hyperparameter_tuner/run_command_generator.py | chutien/zpp-mem | 470dec89dda475f7272b876f191cef9f8266a6dc | [
"MIT"
]
| 1 | 2019-10-22T11:33:23.000Z | 2019-10-22T11:33:23.000Z | hyperparameter_tuner/run_command_generator.py | chutien/zpp-mem | 470dec89dda475f7272b876f191cef9f8266a6dc | [
"MIT"
]
| null | null | null | hyperparameter_tuner/run_command_generator.py | chutien/zpp-mem | 470dec89dda475f7272b876f191cef9f8266a6dc | [
"MIT"
]
| null | null | null | from itertools import product
from hyperparameter_tuner.single_parameter_generator import single_parameter_generator as sgen
class run_command_generator():
def __init__(self, single_parameter_generator_list, command_prefix="python ../experiment.py",
output_path="./results"):
for gen in single_parameter_generator_list:
assert isinstance(gen, sgen)
self.single_parameter_generator_list = single_parameter_generator_list
self.run_command = command_prefix
self.output_path = output_path
def run_commands(self):
all_parrams_gennerator = self.single_parameter_generator_list[0].params()
for p in self.single_parameter_generator_list[1:]:
all_parrams_gennerator = product(all_parrams_gennerator, p.params())
for train_params in all_parrams_gennerator:
command = str(train_params).replace('(', '').replace(')', '').replace('\'', '').replace(',', '')
stripped_command = command.replace(' ', '_').replace('-', '').replace('.', '')
output_path = f"{self.output_path}/{stripped_command}"
command = f"{self.run_command} {command} >{output_path}.out 2>{output_path}.err"
yield command
def default_commands_generator(command_prefix="python experiment.py", output_path="./hyperparameter_tuner/results"):
return run_command_generator([sgen("name", ["vgg_16"]),
sgen("learning_rate", [0.001, 0.005, 0.01, 0.03, 0.07, 0.1, 0.5, 1]),
sgen("batch_size", [20, 25, 30, 35, 50, 75]),
], command_prefix=command_prefix, output_path=output_path).run_commands()
if __name__ == '__main__':
commands = default_commands_generator()
for c in commands:
print(c)
| 46.871795 | 116 | 0.650438 | 1,115 | 0.609956 | 685 | 0.374726 | 0 | 0 | 0 | 0 | 289 | 0.158096 |
ef3d7706ee027142a3cc848598e7a4e1a2e3f600 | 1,718 | py | Python | utils/storage/redisPSCO/python/storage/storage_object.py | TANGO-Project/compss-tango | d9e007b6fe4f8337d4f267f95f383d8962602ab8 | [
"Apache-2.0"
]
| 3 | 2018-03-05T14:52:22.000Z | 2019-02-08T09:58:24.000Z | utils/storage/redisPSCO/python/storage/storage_object.py | TANGO-Project/compss-tango | d9e007b6fe4f8337d4f267f95f383d8962602ab8 | [
"Apache-2.0"
]
| null | null | null | utils/storage/redisPSCO/python/storage/storage_object.py | TANGO-Project/compss-tango | d9e007b6fe4f8337d4f267f95f383d8962602ab8 | [
"Apache-2.0"
]
| null | null | null | #
# Copyright 2017 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''Redis Storage Object implementation for the PyCOMPSs Python Binding
@author: srodrig1
'''
import uuid
import storage.api
class storage_object(object):
'''Storage Object
'''
def __init__(self):
'''Constructor method
'''
# Id will be None until persisted
self.pycompss_psco_identifier = None
def makePersistent(self, identifier = None):
'''Stores the object in the Redis database
'''
storage.api.makePersistent(self, identifier)
def make_persistent(self, identifier = None):
'''Support for underscore notation
'''
self.makePersistent(identifier)
def deletePersistent(self):
'''Deletes the object from the Redis database
'''
storage.api.deletePersistent(self)
def delete_persistent(self):
'''Support for underscore notation
'''
self.deletePersistent()
def getID(self):
'''Gets the ID of the object
'''
return self.pycompss_psco_identifier
'''Add support for camelCase
'''
StorageObject = storage_object
| 28.163934 | 75 | 0.679278 | 904 | 0.526193 | 0 | 0 | 0 | 0 | 0 | 0 | 1,065 | 0.619907 |
ef3ec4855031980afb1650987b97c64ce63c1807 | 5,476 | py | Python | origin_response_test.py | dnsinogeorgos/lambdas | 4294089b311585c18e101e776aa2e8ca211413cd | [
"Apache-2.0"
]
| null | null | null | origin_response_test.py | dnsinogeorgos/lambdas | 4294089b311585c18e101e776aa2e8ca211413cd | [
"Apache-2.0"
]
| null | null | null | origin_response_test.py | dnsinogeorgos/lambdas | 4294089b311585c18e101e776aa2e8ca211413cd | [
"Apache-2.0"
]
| null | null | null | # pylint: disable=C0114
import unittest
from origin_response import lambda_handler
event = {
"Records": [
{
"cf": {
"config": {"requestId": "thisfakeidisthisfakeidisthisfakeidis"},
"request": {"uri": ""},
"response": {"headers": {}, "status": 0},
}
}
]
}
class TestLambdaHandler(unittest.TestCase): # pylint: disable=C0115
def test_success(self):
"""
This sends a success response
Should return identical result with injected security headers
"""
event["Records"][0]["cf"]["request"]["uri"] = "/v2/examplefolder/"
response = event["Records"][0]["cf"]["response"]
result = lambda_handler(event, None)
headers = response["headers"]
headers["strict-transport-security"] = [
{"value": "max-age=63072000; includeSubdomains; preload"}
]
headers["content-security-policy"] = [{"value": "default-src 'self' https://*"}]
headers["x-content-type-options"] = [{"value": "nosniff"}]
headers["x-frame-options"] = [{"value": "DENY"}]
headers["x-xss-protection"] = [{"value": "1; mode=block"}]
headers["referrer-policy"] = [{"value": "strict-origin-when-cross-origin"}]
self.assertEqual(result, response)
def test_not_found_with_slash(self):
"""
This sends a not found response for a URI with trailing slash
Should return identical result with injected security headers
"""
event["Records"][0]["cf"]["request"]["uri"] = "/v2/examplefolder/"
response = event["Records"][0]["cf"]["response"]
response["status"] = 404
result = lambda_handler(event, None)
headers = response["headers"]
headers["strict-transport-security"] = [
{"value": "max-age=63072000; includeSubdomains; preload"}
]
headers["content-security-policy"] = [
{
"value": "default-src 'self' https://*.everypay.gr:443; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.everypay.gr:443 http://www.google-analytics.com https://www.google-analytics.com https://ssl.google-analytics.com http://html5shim.googlecode.com https://html5shim.googlecode.com https://ajax.googleapis.com https://netdna.bootstrapcdn.com https://code.jquery.com https://*.doubleclick.net https://www.googletagmanager.com https://connect.facebook.net; img-src 'self' data: https://*.everypay.gr:443 http://www.google-analytics.com https://www.google-analytics.com https://ssl.google-analytics.com https://*.doubleclick.net; style-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.everypay.gr:443 https://fonts.googleapis.com; font-src 'self' https://*.everypay.gr:443 https://themes.googleusercontent.com https://fonts.gstatic.com; frame-src 'self' https://*; object-src 'self' https://*.everypay.gr:443" # pylint: disable=C0301 # noqa: E501
}
]
headers["x-content-type-options"] = [{"value": "nosniff"}]
headers["x-frame-options"] = [{"value": "DENY"}]
headers["x-xss-protection"] = [{"value": "1; mode=block"}]
headers["referrer-policy"] = [{"value": "strict-origin-when-cross-origin"}]
self.assertEqual(result, response)
def test_not_found_without_slash(self):
"""
This sends a not found response for a URI without trailing slash
Should return 302 redirect result tp the original URI appended
with "/" and injected security headers
"""
event["Records"][0]["cf"]["request"]["uri"] = "/v2/examplefolder"
response = event["Records"][0]["cf"]["response"]
response["status"] = 404
result = lambda_handler(event, None)
headers = response["headers"]
headers["strict-transport-security"] = [
{"value": "max-age=63072000; includeSubdomains; preload"}
]
headers["content-security-policy"] = [
{
"value": "default-src 'self' https://*.everypay.gr:443; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.everypay.gr:443 http://www.google-analytics.com https://www.google-analytics.com https://ssl.google-analytics.com http://html5shim.googlecode.com https://html5shim.googlecode.com https://ajax.googleapis.com https://netdna.bootstrapcdn.com https://code.jquery.com https://*.doubleclick.net https://www.googletagmanager.com https://connect.facebook.net; img-src 'self' data: https://*.everypay.gr:443 http://www.google-analytics.com https://www.google-analytics.com https://ssl.google-analytics.com https://*.doubleclick.net; style-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.everypay.gr:443 https://fonts.googleapis.com; font-src 'self' https://*.everypay.gr:443 https://themes.googleusercontent.com https://fonts.gstatic.com; frame-src 'self' https://*; object-src 'self' https://*.everypay.gr:443" # pylint: disable=C0301 # noqa: E501
}
]
headers["x-content-type-options"] = [{"value": "nosniff"}]
headers["x-frame-options"] = [{"value": "DENY"}]
headers["x-xss-protection"] = [{"value": "1; mode=block"}]
headers["referrer-policy"] = [{"value": "strict-origin-when-cross-origin"}]
headers["location"] = [{"key": "Location", "value": "/v2/examplefolder/"}]
response["status"] = 302
self.assertEqual(result, response)
if __name__ == "__main__":
unittest.main()
| 58.255319 | 974 | 0.626004 | 5,070 | 0.925858 | 0 | 0 | 0 | 0 | 0 | 0 | 3,726 | 0.680424 |
ef3f29141380c4970504779ca0adbe37edfcc48e | 377 | py | Python | lang/Python/abstract-type-2.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
]
| null | null | null | lang/Python/abstract-type-2.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
]
| null | null | null | lang/Python/abstract-type-2.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
]
| null | null | null | from abc import ABCMeta, abstractmethod
class BaseQueue(metaclass=ABCMeta):
"""Abstract Class
"""
def __init__(self):
self.contents = list()
@abstractmethod
def Enqueue(self, item):
pass
@abstractmethod
def Dequeue(self):
pass
def Print_Contents(self):
for i in self.contents:
print(i, end=' ')
| 17.136364 | 39 | 0.594164 | 334 | 0.885942 | 0 | 0 | 108 | 0.286472 | 0 | 0 | 28 | 0.074271 |
ef4114aeaf1e0c3215bf5aee9d278bc0e2171dca | 338 | py | Python | apps/permissions/router.py | yhkl-dev/JAutoOps | e42342fc6d814813dcac2e0154cd5dfdc1adf4c1 | [
"MIT"
]
| null | null | null | apps/permissions/router.py | yhkl-dev/JAutoOps | e42342fc6d814813dcac2e0154cd5dfdc1adf4c1 | [
"MIT"
]
| null | null | null | apps/permissions/router.py | yhkl-dev/JAutoOps | e42342fc6d814813dcac2e0154cd5dfdc1adf4c1 | [
"MIT"
]
| null | null | null | from rest_framework.routers import DefaultRouter
from .views import PermissionsViewset, GroupPermissionsViewset
permission_router = DefaultRouter()
permission_router.register(r'permissions', PermissionsViewset, basename="permissions")
permission_router.register(r'grouppermissions', GroupPermissionsViewset, basename="grouppermissions") | 48.285714 | 101 | 0.866864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.189349 |
ef41254ab69ff27661576195222b554a1c94e4da | 6,158 | py | Python | src/inscriptis/model/canvas/__init__.py | rlskoeser/inscriptis | e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb | [
"Apache-2.0"
]
| 90 | 2016-01-29T15:09:21.000Z | 2022-03-08T15:08:57.000Z | src/inscriptis/model/canvas/__init__.py | rlskoeser/inscriptis | e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb | [
"Apache-2.0"
]
| 27 | 2016-01-14T10:30:10.000Z | 2022-03-24T08:00:31.000Z | src/inscriptis/model/canvas/__init__.py | rlskoeser/inscriptis | e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb | [
"Apache-2.0"
]
| 20 | 2016-01-14T12:50:55.000Z | 2022-03-04T07:26:30.000Z | #!/usr/bin/env python
# encoding: utf-8
"""Classes used for rendering (parts) of the canvas.
Every parsed :class:`~inscriptis.model.html_element.HtmlElement` writes its
textual content to the canvas which is managed by the following three classes:
- :class:`Canvas` provides the drawing board on which the HTML page is
serialized and annotations are recorded.
- :class:`~inscriptis.model.canvas.block.Block` contains the current line to
which text is written.
- :class:`~inscriptis.model.canvas.prefix.Prefix` handles indentation
and bullets that prefix a line.
"""
from inscriptis.annotation import Annotation
from inscriptis.html_properties import WhiteSpace, Display
from inscriptis.model.canvas.block import Block
from inscriptis.model.html_element import HtmlElement
from inscriptis.model.canvas.prefix import Prefix
class Canvas:
r"""The text Canvas on which Inscriptis writes the HTML page.
Attributes:
margin: the current margin to the previous block (this is required to
ensure that the `margin_after` and `margin_before` constraints of
HTML block elements are met).
current_block: A :class:`~inscriptis.model.canvas.block.Block` which
merges the input text into a block (i.e., line).
blocks: a list of strings containing the completed blocks (i.e.,
text lines). Each block spawns at least one line.
annotations: the list of recorded
:class:`~inscriptis.annotation.Annotation`\s.
_open_annotations: a map of open tags that contain annotations.
"""
__slots__ = ('annotations', 'blocks', 'current_block', '_open_annotations',
'margin')
def __init__(self):
self.margin = 1000 # margin to the previous block
self.current_block = Block(0, Prefix())
self.blocks = []
self.annotations = []
self._open_annotations = {}
def open_tag(self, tag: HtmlElement) -> None:
"""Register that a tag is opened.
Args:
tag: the tag to open.
"""
if tag.annotation:
self._open_annotations[tag] = self.current_block.idx
if tag.display == Display.block:
self.open_block(tag)
def open_block(self, tag: HtmlElement):
"""Open an HTML block element."""
# write missing bullets, if no content has been written
if not self._flush_inline() and tag.list_bullet:
self.write_unconsumed_bullet()
self.current_block.prefix.register_prefix(tag.padding_inline,
tag.list_bullet)
# write the block margin
required_margin = max(tag.previous_margin_after, tag.margin_before)
if required_margin > self.margin:
required_newlines = required_margin - self.margin
self.current_block.idx += required_newlines
self.blocks.append('\n' * (required_newlines - 1))
self.margin = required_margin
def write_unconsumed_bullet(self):
"""Write unconsumed bullets to the blocks list."""
bullet = self.current_block.prefix.unconsumed_bullet
if bullet:
self.blocks.append(bullet)
self.current_block.idx += len(bullet)
self.current_block = self.current_block.new_block()
self.margin = 0
def write(self, tag: HtmlElement, text: str,
whitespace: WhiteSpace = None) -> None:
"""Write the given text to the current block."""
self.current_block.merge(text, whitespace or tag.whitespace)
def close_tag(self, tag: HtmlElement) -> None:
"""Register that the given tag tag is closed.
Args:
tag: the tag to close.
"""
if tag.display == Display.block:
# write missing bullets, if no content has been written so far.
if not self._flush_inline() and tag.list_bullet:
self.write_unconsumed_bullet()
self.current_block.prefix.remove_last_prefix()
self.close_block(tag)
if tag in self._open_annotations:
start_idx = self._open_annotations.pop(tag)
# do not record annotations with no content
if start_idx == self.current_block.idx:
return
for annotation in tag.annotation:
self.annotations.append(
Annotation(start_idx, self.current_block.idx, annotation))
def close_block(self, tag: HtmlElement):
"""Close the given HtmlElement by writing its bottom margin.
Args:
tag: the HTML Block element to close
"""
if tag.margin_after > self.margin:
required_newlines = tag.margin_after - self.margin
self.current_block.idx += required_newlines
self.blocks.append('\n' * (required_newlines - 1))
self.margin = tag.margin_after
def write_newline(self):
if not self._flush_inline():
self.blocks.append('')
self.current_block = self.current_block.new_block()
def get_text(self) -> str:
"""Provide a text representation of the Canvas."""
self._flush_inline()
return '\n'.join(self.blocks)
def _flush_inline(self) -> bool:
"""Attempt to flush the content in self.current_block into a new block.
Notes:
- If self.current_block does not contain any content (or only
whitespaces) no changes are made.
- Otherwise the content of current_block is added to blocks and a
new current_block is initialized.
Returns:
True if the attempt was successful, False otherwise.
"""
if not self.current_block.is_empty():
self.blocks.append(self.current_block.content)
self.current_block = self.current_block.new_block()
self.margin = 0
return True
return False
@property
def left_margin(self) -> int:
"""Return the length of the current line's left margin."""
return self.current_block.prefix.current_padding
| 38.248447 | 79 | 0.636733 | 5,311 | 0.862455 | 0 | 0 | 167 | 0.027119 | 0 | 0 | 2,610 | 0.423839 |
ef4351fb100c957415ebe720f79b5a02ebc2c300 | 9,324 | py | Python | tests/webtests/test_admin.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
]
| 8 | 2017-04-10T09:53:15.000Z | 2020-08-16T09:53:14.000Z | tests/webtests/test_admin.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
]
| 49 | 2017-04-13T22:51:48.000Z | 2019-08-15T22:53:25.000Z | tests/webtests/test_admin.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
]
| 12 | 2017-04-11T04:16:47.000Z | 2019-08-10T21:41:54.000Z |
# -*- coding: utf-8 -*-
"""
zoom.tests.webdriver_tests.test_admin
test admin app functions
"""
from zoom.testing.webtest import AdminTestCase
class SystemTests(AdminTestCase):
"""MyApp system tests"""
def add_user(self, first_name, last_name, email, username):
self.get('/admin')
self.get('/admin/users')
self.get('/admin/users/new')
self.fill(
dict(
first_name=first_name,
last_name=last_name,
email=email,
username=username,
)
)
self.chosen('memberships', ['managers'])
self.click('create_button')
def delete_user(self, username):
self.get('/admin')
self.get('/admin/users')
self.click_link(username)
self.click('id=delete-action')
self.click('name=delete_button')
def add_group(self, name, description):
self.get('/admin/groups/new')
self.fill(
dict(
name=name,
)
)
element = self.find('//*[@id="description"]')
element.send_keys(description)
self.click('create_button')
def delete_group(self, name):
self.get('/admin/groups')
self.click_link(name)
self.click('id=delete-action')
self.click('name=delete_button')
def test_admin_add_remove_user(self):
self.get('/admin/users')
self.assertDoesNotContain('sally')
self.add_user('Sally', 'Jones', '[email protected]', 'sally')
self.get('/admin/users')
self.assertContains('sally')
self.delete_user('sally')
self.get('/admin/users')
self.assertDoesNotContain('sally')
def test_deactivate_activate_user(self):
self.get('/admin/users')
self.assertDoesNotContain('sally')
try:
self.add_user('Sally', 'Jones', '[email protected]', 'sally')
self.assertContains('sally')
self.get('/admin/users/sally')
self.assertContains('[email protected]')
self.assertContains('Deactivate')
self.click('Deactivate')
self.assertNotContains('Deactivate')
self.assertContains('[email protected]')
self.assertContains('Activate')
self.click('Activate')
self.assertNotContains('Activate')
self.assertContains('Deactivate')
finally:
self.delete_user('sally')
self.get('/admin/users')
self.assertDoesNotContain('sally')
def test_index_search(self):
self.get('/admin')
self.fill(dict(q='sally'))
self.click('search-button')
self.assertContains('no records found')
self.add_user('Sally', 'Jones', '[email protected]', 'sally')
self.get('/admin')
self.fill(dict(q='sally'))
self.click('search-button')
self.assertNotContains('no records found')
self.assertContains('[email protected]')
self.delete_user('sally')
self.get('/admin')
self.fill(dict(q='sally'))
self.click('search-button')
self.assertContains('no records found')
def test_change_group_admin(self):
self.get('/admin/groups')
self.assertDoesNotContain('special_group')
self.add_group('special_group', 'special test group')
try:
self.get('/admin/groups')
self.assertContains('special_group')
self.get('/admin/groups')
self.find('//*[@name="link-to-special_group"]').click()
self.find('//*[@name="link-to-administrators"]')
self.find('//*[@id="edit-action"]').click()
self.click("//select[@id='admin_group_id']/option[text()='users']")
self.click('save_button')
self.find('//*[@name="link-to-users"]')
self.find('//*[@id="edit-action"]').click()
self.click("//select[@id='admin_group_id']/option[text()='administrators']")
self.click('save_button')
self.find('//*[@name="link-to-administrators"]')
finally:
self.delete_group('special_group')
self.get('/admin/groups')
self.assertDoesNotContain('special_group')
def test_add_remove_subgroup(self):
self.get('/admin')
# group 5 = content managers
self.get('/admin/groups')
self.assertContains('link-to-guests')
self.get('/admin/groups/5')
self.assertDoesNotContain('link-to-guests')
self.get('/admin/groups/5/edit')
self.assertDoesNotContain('link-to-guests')
try:
self.get('/admin/groups/5/edit')
self.chosen('subgroups', ['guests'])
self.click('id=save_button')
self.assertContains('link-to-guests')
finally:
# remove the subgroup we just added
self.get('/admin/groups/5/edit')
element = self.find('//*[@id="subgroups_chosen"]/ul/li[2]/a')
element.click()
self.click('id=save_button')
self.get('/admin/groups/5')
self.assertDoesNotContain('link-to-guests')
def test_add_remove_role(self):
self.get('/admin')
# group 5 = content managers
self.get('/admin/groups')
self.assertContains('link-to-guests')
self.get('/admin/groups/5')
self.assertDoesNotContain('link-to-guests')
self.get('/admin/groups/5/edit')
self.assertDoesNotContain('link-to-guests')
try:
self.get('/admin/groups/5/edit')
self.chosen('subgroups', ['guests'])
self.click('id=save_button')
self.assertContains('link-to-guests')
finally:
# remove the role we just added
self.get('/admin/groups/5/edit')
element = self.find('//*[@id="subgroups_chosen"]/ul/li[2]/a')
element.click()
self.click('id=save_button')
self.get('/admin/groups/5')
self.assertDoesNotContain('link-to-guests')
def test_add_remove_app(self):
# group 5 = content managers
self.get('/admin/groups/5')
self.assertDoesNotContain('Register')
try:
self.get('/admin/groups/5/edit')
self.chosen('apps', ['Register'])
self.click('id=save_button')
self.assertContains('Register')
finally:
# remove the app we just added
self.get('/admin/groups/5/edit')
element = self.find('//*[@id="apps_chosen"]/ul/li[1]/a')
element.click()
self.click('id=save_button')
self.get('/admin/groups/5')
self.assertDoesNotContain('Register')
def test_add_remove_several_apps(self):
# group 5 = content managers
self.get('/admin/groups/5')
self.assertDoesNotContain('Register')
# add an app
self.get('/admin/groups/5/edit')
self.chosen('apps', ['Register'])
self.click('id=save_button')
self.assertContains('Register')
# add anpther app
self.get('/admin/groups/5/edit')
self.chosen('apps', ['Forgot'])
self.click('id=save_button')
self.assertContains('Register')
self.assertContains('Forgot')
# add one more app
self.get('/admin/groups/5/edit')
self.chosen('apps', ['Sample'])
self.click('id=save_button')
self.assertContains('Register')
self.assertContains('Forgot')
self.assertContains('Sample')
# remove one of the apps we just added
self.get('/admin/groups/5/edit')
element = self.find('//*[@id="apps_chosen"]/ul/li[2]/a')
element.click()
self.click('id=save_button')
self.assertContains('Forgot')
self.assertDoesNotContain('Register')
self.assertContains('Sample')
# remove another one of the apps we just added
self.get('/admin/groups/5/edit')
element = self.find('//*[@id="apps_chosen"]/ul/li[2]/a')
element.click()
self.click('id=save_button')
self.assertContains('Forgot')
self.assertDoesNotContain('Register')
self.assertDoesNotContain('Sample')
# remove final app added
self.get('/admin/groups/5/edit')
element = self.find('//*[@id="apps_chosen"]/ul/li[1]/a')
element.click()
self.click('id=save_button')
self.assertDoesNotContain('Forgot')
self.assertDoesNotContain('Register')
self.assertDoesNotContain('Sample')
def test_add_remove_user(self):
# group 5 = content managers
self.get('/admin/groups/5')
self.assertDoesNotContain('link-to-guest')
try:
self.get('/admin/groups/5/edit')
self.chosen('users', ['guest'])
self.click('id=save_button')
self.assertContains('link-to-guest')
finally:
# remove the user we just added
self.get('/admin/groups/5/edit')
element = self.find('//*[@id="users_chosen"]/ul/li[1]/a')
element.click()
self.click('id=save_button')
self.get('/admin/groups/5')
self.assertDoesNotContain('link-to-guests')
| 30.976744 | 88 | 0.569284 | 9,166 | 0.983054 | 0 | 0 | 0 | 0 | 0 | 0 | 3,350 | 0.359288 |
ef44efdf1df1a7a380310f517a87f13a57e2f804 | 1,832 | py | Python | server/app.py | Catsvilles/Lofi | f3a783a5ba3e80e6c8f958990f6f09767d25a48e | [
"Apache-2.0"
]
| 27 | 2021-07-14T17:12:29.000Z | 2022-03-18T16:15:18.000Z | server/app.py | Catsvilles/Lofi | f3a783a5ba3e80e6c8f958990f6f09767d25a48e | [
"Apache-2.0"
]
| 3 | 2021-08-29T11:22:04.000Z | 2022-02-16T23:20:04.000Z | server/app.py | Catsvilles/Lofi | f3a783a5ba3e80e6c8f958990f6f09767d25a48e | [
"Apache-2.0"
]
| 4 | 2021-07-25T09:55:09.000Z | 2022-03-25T17:16:18.000Z | import json
import torch
from flask import Flask, request, jsonify
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from model.lofi2lofi_model import Decoder as Lofi2LofiDecoder
from model.lyrics2lofi_model import Lyrics2LofiModel
from server.lofi2lofi_generate import decode
from server.lyrics2lofi_predict import predict
device = "cpu"
app = Flask(__name__)
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=["30 per minute"]
)
lofi2lofi_checkpoint = "checkpoints/lofi2lofi_decoder.pth"
print("Loading lofi model...", end=" ")
lofi2lofi_model = Lofi2LofiDecoder(device=device)
lofi2lofi_model.load_state_dict(torch.load(lofi2lofi_checkpoint, map_location=device))
print(f"Loaded {lofi2lofi_checkpoint}.")
lofi2lofi_model.to(device)
lofi2lofi_model.eval()
lyrics2lofi_checkpoint = "checkpoints/lyrics2lofi.pth"
print("Loading lyrics2lofi model...", end=" ")
lyrics2lofi_model = Lyrics2LofiModel(device=device)
lyrics2lofi_model.load_state_dict(torch.load(lyrics2lofi_checkpoint, map_location=device))
print(f"Loaded {lyrics2lofi_checkpoint}.")
lyrics2lofi_model.to(device)
lyrics2lofi_model.eval()
@app.route('/')
def home():
return 'Server running'
@app.route('/decode', methods=['GET'])
def decode_input():
input = request.args.get('input')
number_list = json.loads(input)
json_output = decode(lofi2lofi_model, torch.tensor([number_list]).float())
response = jsonify(json_output)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/predict', methods=['GET'])
def lyrics_to_track():
input = request.args.get('input')
json_output = predict(lyrics2lofi_model, input)
response = jsonify(json_output)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
| 29.548387 | 90 | 0.771288 | 0 | 0 | 0 | 0 | 654 | 0.356987 | 0 | 0 | 337 | 0.183952 |
ef473c6a7f8ab89bcd75652de804e2198dfb2d97 | 1,153 | py | Python | cw-bitcoin-price.py | buraktokman/Crypto-Exchange-Data-Fetcher | 23e6ba542ff7a862af3247db2c04c2c10a5f3edf | [
"MIT"
]
| 1 | 2021-08-09T07:22:25.000Z | 2021-08-09T07:22:25.000Z | cw-bitcoin-price.py | buraktokman/Crypto-Exchange-Data-Fetcher | 23e6ba542ff7a862af3247db2c04c2c10a5f3edf | [
"MIT"
]
| null | null | null | cw-bitcoin-price.py | buraktokman/Crypto-Exchange-Data-Fetcher | 23e6ba542ff7a862af3247db2c04c2c10a5f3edf | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
'''
Cryptowat.ch API
https://cryptowat.ch/docs/api
https://api.cryptowat.ch/markets/prices '''
import urllib.request, json, datetime, time
from urllib.request import urlopen
from pathlib import Path
csv_file_price = Path(__file__).parents[0] / 'data' / 'cryptowatch-bitcoin-price2.csv'
def request(url):
with urllib.request.urlopen(url) as url:
data = json.loads(url.read().decode())
print(data)
return data['result']['price']['last'], data['result']['volume']
def main():
current_time = datetime.datetime.now(datetime.timezone.utc)
unix_timestamp = current_time.timestamp()
print(int(unix_timestamp))
url = 'https://api.cryptowat.ch/markets/prices'
try:
price, volume = request(url)
except Exception as e:
print(e)
#with open(csv_file_price, 'a') as f:
# f.write(str(int(unix_timestamp)) + ',' + price + '\n')
if __name__ == '__main__':
#main()
while True:
now = datetime.datetime.now()
while (now.second % 5):
now = datetime.datetime.now()
print(now.second)
time.sleep(0.5)
main()
| 26.813953 | 86 | 0.633998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.298352 |
ef477b67fc29e51e58555a187fcad861bf802178 | 3,516 | py | Python | Actor_critic/actor_critic_test.py | aniketSanap/RL-session | 68243121277c24509585f51fd01f53fe8d41f119 | [
"MIT"
]
| null | null | null | Actor_critic/actor_critic_test.py | aniketSanap/RL-session | 68243121277c24509585f51fd01f53fe8d41f119 | [
"MIT"
]
| null | null | null | Actor_critic/actor_critic_test.py | aniketSanap/RL-session | 68243121277c24509585f51fd01f53fe8d41f119 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""actor_critic.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17Gpya9yswf-xonOvKhoHpmQGhCYpq8x4
"""
# !pip install box2d-py
# !pip install gym[Box_2D]
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import gym
import os
class ActorCritic(nn.Module):
def __init__(self, num_actions):
super().__init__()
self.fc2 = nn.Linear(8, 2048)
self.fc3 = nn.Linear(2048, 512)
self.pi = nn.Linear(512, num_actions)
self.v = nn.Linear(512, 1)
def forward(self, x):
# x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
pi = self.pi(x)
v = self.v(x)
return pi, v
class Agent():
def __init__(self):
self.gamma = 0.99
self.log_probs = None
self.env = gym.make('LunarLander-v2')
num_actions = self.env.action_space.n
self.ac_network = ActorCritic(num_actions=num_actions).cuda()
self.MODEL_PATH = 'ac_model.pth'
if os.path.exists(self.MODEL_PATH):
print('Existing model found!')
self.ac_network.load_state_dict(torch.load(self.MODEL_PATH))
self.ac_network.eval()
else:
print('No existing model.')
self.optimizer = torch.optim.Adam(self.ac_network.parameters(), lr=1e-5)
self.MAX_FRAMES = 10000
self.NUM_EPISODES = 3000
def choose_action(self, state):
policy, _ = self.ac_network(torch.tensor(state).cuda())
probablities = F.softmax(policy)
action_distribution = torch.distributions.Categorical(probablities)
action = action_distribution.sample()
self.log_probs = action_distribution.log_prob(action)
return action.item()
def learn(self, state, next_state, reward, done):
# with torch.no_grad():
self.optimizer.zero_grad()
_, curr_value = self.ac_network(torch.tensor(state).cuda())
_, next_value = self.ac_network(torch.tensor(next_state).cuda())
reward = torch.tensor(reward).cuda()
if done:
advantage = reward - curr_value
else:
advantage = reward + self.gamma * next_value - curr_value
actor_loss = -self.log_probs * advantage
critic_loss = advantage ** 2
loss = actor_loss + critic_loss
# loss.item()
loss.backward()
self.optimizer.step()
return loss.item()
def play(self):
for i_episode in range(self.NUM_EPISODES):
observation = self.env.reset()
curr_state = observation
done = False
total_rewards = 0
total_loss = 0
num_frames = 0
while not done:
self.env.render()
action = self.choose_action(curr_state)
observation, reward, done, info = self.env.step(action)
# loss = self.learn(state=curr_state, next_state=observation, reward=reward, done=done)
curr_state = observation
total_rewards += reward
# total_loss += loss
num_frames += 1
print(f'Episode # {i_episode} done, total reward: {total_rewards}')
self.env.close()
agent = Agent()
agent.play()
torch.save(agent.ac_network.state_dict(), agent.MODEL_PATH) | 32.859813 | 103 | 0.593003 | 3,060 | 0.870307 | 0 | 0 | 0 | 0 | 0 | 0 | 571 | 0.1624 |
ef488748bc20e35c68916d75dae55ef743e1069d | 6,145 | py | Python | python/orz/sta2json.py | ViewFaceCore/OpenRoleZoo | 19cef3cdc5238374cedcf7068dc7a6ad8448c21b | [
"BSD-2-Clause"
]
| null | null | null | python/orz/sta2json.py | ViewFaceCore/OpenRoleZoo | 19cef3cdc5238374cedcf7068dc7a6ad8448c21b | [
"BSD-2-Clause"
]
| null | null | null | python/orz/sta2json.py | ViewFaceCore/OpenRoleZoo | 19cef3cdc5238374cedcf7068dc7a6ad8448c21b | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python
# coding: UTF-8
import os
import struct
from .sta import *
import json
import copy
import base64
from collections import OrderedDict
class Stream:
def __init__(self, byte):
self.byte = byte
self.index = 0
def read(self, size=None):
data = ''
if size is None:
data = self.byte[self.index:]
else:
data = self.byte[self.index:self.index+size]
self.index += len(data)
return data
def unpack_nil(stream, **kwargs):
stream.read(1)
return None
def unpack_int(stream, **kwargs):
return struct.unpack('=i', stream.read(4))[0]
def unpack_float(stream, **kwargs):
return struct.unpack('=f', stream.read(4))[0]
def unpack_string(stream, **kwargs):
length = struct.unpack('=i', stream.read(4))[0]
s = struct.unpack('=%ds' % length, stream.read(length))[0].decode()
return s
def unpack_binary(stream, **kwargs):
length = struct.unpack('=i', stream.read(4))[0]
s = struct.unpack('=%ds' % length, stream.read(length))[0]
mode = 0
if 'binary_mode' in kwargs:
mode = kwargs['binary_mode']
if mode == 0:
return '@base64@%s' % base64.b64encode(s)
elif mode == 1:
# save file
if 'getway' not in kwargs:
raise Exception("getway must be set.")
if 'workshop' not in kwargs:
raise Exception("workshop must be set.")
filename_ext = kwargs['getway'] + '.bin'
binary_filename = os.path.join(kwargs['workshop'], filename_ext)
s[8] = 1
with open(binary_filename, 'wb') as f:
f.write(s)
return '@file@%s' % filename_ext
elif mode == 2:
return '@binary@%d' % length
else:
return binary(s)
def unpack_list(stream, **kwargs):
local_kwargs = copy.copy(kwargs)
if 'getway' not in local_kwargs:
local_kwargs['getway'] = ''
getway = local_kwargs['getway']
obj = []
length = struct.unpack('=i', stream.read(4))[0]
for i in range(length):
local_kwargs['getway'] = getway + '_' + str(i)
obj.append(unpack_obj(stream, **local_kwargs))
return obj
def unpack_dict(stream, **kwargs):
local_kwargs = copy.copy(kwargs)
if 'getway' not in local_kwargs:
local_kwargs['getway'] = ''
getway = local_kwargs['getway']
obj = {}
length = struct.unpack('=i', stream.read(4))[0]
for i in range(length):
key = unpack_string(stream, **kwargs)
local_kwargs['getway'] = getway + '_' + key
value = unpack_obj(stream, **local_kwargs)
obj[key] = value
obj = OrderedDict(sorted(obj.items(), key=lambda item: item[0]))
return obj
def unpack_obj(stream, **kwargs):
"""
Convert an stream(sta format) to object(json format)
:param stream: Stream of binary sta file
:param workshop: path to write binary file
:param getway: the getway to all values
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return: unpacked object
"""
mark = struct.unpack('=b', stream.read(1))[0]
if mark == STA_NIL:
return unpack_nil(stream, **kwargs)
elif mark == STA_INT:
return unpack_int(stream, **kwargs)
elif mark == STA_FLOAT:
return unpack_float(stream, **kwargs)
elif mark == STA_STRING:
return unpack_string(stream, **kwargs)
elif mark == STA_BINARY:
return unpack_binary(stream, **kwargs)
elif mark == STA_LIST:
return unpack_list(stream, **kwargs)
elif mark == STA_DICT:
return unpack_dict(stream, **kwargs)
else:
raise Exception("Unsupported mark type: ", type(mark))
def sta2obj(sta_filename, **kwargs):
"""
Convert filename.sta to object
:param sta_filename: input sta filename
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return:
"""
byte = ''
with open(sta_filename, 'rb') as ifile:
byte = ifile.read()
stream = Stream(byte)
mark = struct.unpack('=i', stream.read(4))[0]
if mark != STA_MARK:
raise Exception("%s is not a valid sta file." % sta_filename)
# kwargs = {}
if 'binary_mode' not in kwargs:
kwargs['binary_mode'] = 0
obj = unpack_obj(stream, **kwargs)
return obj
def sta2json(sta_filename, json_filename=None, **kwargs):
"""
Convert filename.sta to filename.json.
:param sta_filename: input sta filename
:param json_filename: output json filename or path
:param binary_mode: 0(default): means write @base64@...
1: means @file@path
2: means write @binary@size
3: means str for binary memory
:return:
"""
filepath, filename_ext = os.path.split(sta_filename)
filename, ext = os.path.splitext(filename_ext)
if json_filename is None:
json_filename = os.path.join(filepath, filename + ".json")
if os.path.isdir(json_filename):
json_filename = os.path.join(json_filename, filename + ".json")
workshop, getway_ext = os.path.split(json_filename)
getway = os.path.splitext(getway_ext)[0]
if len(workshop) > 0 and not os.path.isdir(workshop):
raise Exception("%s/ is not a valid path." % workshop)
with open(json_filename, 'w') as ofile:
byte = ''
with open(sta_filename, 'rb') as ifile:
byte = ifile.read()
stream = Stream(byte)
mark = struct.unpack('=i', stream.read(4))[0]
if mark != STA_MARK:
raise Exception("%s is not a valid sta file." % sta_filename)
kwargs['workshop'] = workshop
kwargs['getway'] = getway
if 'binary_mode' not in kwargs:
kwargs['binary_mode'] = 1
obj = unpack_obj(stream, **kwargs)
json.dump(obj, ofile, indent=2)
| 28.449074 | 73 | 0.593979 | 331 | 0.053865 | 0 | 0 | 0 | 0 | 0 | 0 | 1,677 | 0.272905 |
ef4888a9795dbbe5df0abc36429c88521fbd3e99 | 1,494 | py | Python | 872 Leaf-Similar Trees.py | krishna13052001/LeetCode | cd6ec626bea61f0bd9e8493622074f9e69a7a1c3 | [
"MIT"
]
| 872 | 2015-06-15T12:02:41.000Z | 2022-03-30T08:44:35.000Z | 872 Leaf-Similar Trees.py | nadeemshaikh-github/LeetCode | 3fb14aeea62a960442e47dfde9f964c7ffce32be | [
"MIT"
]
| 8 | 2015-06-21T15:11:59.000Z | 2022-02-01T11:22:34.000Z | 872 Leaf-Similar Trees.py | nadeemshaikh-github/LeetCode | 3fb14aeea62a960442e47dfde9f964c7ffce32be | [
"MIT"
]
| 328 | 2015-06-28T03:10:35.000Z | 2022-03-29T11:05:28.000Z | #!/usr/bin/python3
"""
Consider all the leaves of a binary tree. From left to right order, the values
of those leaves form a leaf value sequence.
For example, in the given tree above, the leaf value sequence is (6, 7, 4, 9,
8).
Two binary trees are considered leaf-similar if their leaf value sequence is the
same.
Return true if and only if the two given trees with head nodes root1 and root2
are leaf-similar.
Note:
Both of the given trees will have between 1 and 100 nodes.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
"""
brute force, get all the leaf and then compare
to save space, use generator
O(lg n) space for the stack
"""
itr1 = self.dfs(root1)
itr2 = self.dfs(root2)
while True:
a = next(itr1, None)
b = next(itr2, None)
if a != b:
return False
if a is None and b is None:
break
return True
def dfs(self, node):
stk = [node]
# pre-order
while stk:
cur = stk.pop()
if not cur:
continue
if not cur.left and not cur.right:
yield cur.val
else:
stk.append(cur.right)
stk.append(cur.left)
| 25.758621 | 80 | 0.566934 | 965 | 0.645917 | 327 | 0.218876 | 0 | 0 | 0 | 0 | 675 | 0.451807 |
ef49bf29b2b0c7109f207a688a3d0065e565a25b | 332 | py | Python | skp_edu_docker/code/chatbot/common/chat_knowledge_mem_dict.py | TensorMSA/hoyai_docker | 12f0041e6306d8a6421585a4b51666bad30be442 | [
"MIT"
]
| 8 | 2017-06-16T00:19:12.000Z | 2020-08-13T03:15:57.000Z | skp_edu_docker/code/chatbot/common/chat_knowledge_mem_dict.py | TensorMSA/tensormsa_docker | 12f0041e6306d8a6421585a4b51666bad30be442 | [
"MIT"
]
| 21 | 2017-06-09T10:15:14.000Z | 2018-03-29T07:51:02.000Z | skp_edu_docker/code/chatbot/common/chat_knowledge_mem_dict.py | TensorMSA/hoyai_docker | 12f0041e6306d8a6421585a4b51666bad30be442 | [
"MIT"
]
| 4 | 2017-10-25T09:59:53.000Z | 2020-05-07T09:51:11.000Z | class ChatKnowledgeMemDict:
"""
class for storing dict data on the django memory
for speed up dict search action
"""
# cc_id - entitiy_id -value list
ngram = {}
ngram_order = {}
ngram_conf = {}
data = {}
data_conf = {}
data_order = {} #ordered proper_noun
synonym = {}
conf = {} | 23.714286 | 53 | 0.581325 | 332 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.463855 |
ef4f605e514f18c935ef699c3ca9417a54b457c9 | 2,465 | py | Python | apollo/auth.py | sorinbiriescu/Apollo_backend | b6fb68a26487a138e7efd691e7fdffaa5042a155 | [
"Apache-2.0"
]
| null | null | null | apollo/auth.py | sorinbiriescu/Apollo_backend | b6fb68a26487a138e7efd691e7fdffaa5042a155 | [
"Apache-2.0"
]
| null | null | null | apollo/auth.py | sorinbiriescu/Apollo_backend | b6fb68a26487a138e7efd691e7fdffaa5042a155 | [
"Apache-2.0"
]
| null | null | null | from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from passlib.context import CryptContext
from apollo.crud import query_first_user
from apollo.main import site_settings
from apollo.schemas import TokenData, UserModel
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="api/token")
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_user(username: str):
user = query_first_user(username)
if user:
return UserModel.from_orm(user)
def authenticate_user(username: str, password: str):
user = query_first_user(username)
if not user:
return False
if not verify_password(password, user.password):
return False
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, site_settings.SECRET_KEY, algorithm = site_settings.ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, site_settings.SECRET_KEY, algorithms=[site_settings.ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = get_user(username = token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: UserModel = Depends(get_current_user)):
if current_user == 1:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
| 30.060976 | 103 | 0.710345 | 0 | 0 | 0 | 0 | 0 | 0 | 951 | 0.385801 | 108 | 0.043813 |
ef519f677beac77f2c2e144f66d4be64d1cbd341 | 200 | py | Python | main.py | Gabriel-ino/Automated-Sticker-Hero | d76952cc35f051b7d9562912f0a063fed6f75068 | [
"MIT"
]
| null | null | null | main.py | Gabriel-ino/Automated-Sticker-Hero | d76952cc35f051b7d9562912f0a063fed6f75068 | [
"MIT"
]
| null | null | null | main.py | Gabriel-ino/Automated-Sticker-Hero | d76952cc35f051b7d9562912f0a063fed6f75068 | [
"MIT"
]
| null | null | null | from App import App
from utils.get_screen_size import get_screen_size
if __name__ == "__main__":
app = App()
h, w, ch = get_screen_size()
while True:
app.proccessing(h, w, ch)
| 16.666667 | 49 | 0.655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.05 |
ef51f52103e4f12bc37feb84f6d66527017f0c6d | 1,836 | py | Python | dgp/core/config_validators.py | dataspot/dgp | 553a255a4884b935cf2efecdc761050232f0f066 | [
"MIT"
]
| 1 | 2019-07-17T11:34:27.000Z | 2019-07-17T11:34:27.000Z | dgp/core/config_validators.py | datahq/dgp | f39592ce20ba67b73b08188f14585b6eb3d43f96 | [
"MIT"
]
| 2 | 2019-04-30T12:32:32.000Z | 2019-04-30T12:35:26.000Z | dgp/core/config_validators.py | dataspot/dgp | 553a255a4884b935cf2efecdc761050232f0f066 | [
"MIT"
]
| null | null | null | class BaseValidator():
MISSING = 0
INVALID = 1
def check(self, config):
raise NotImplementedError()
class Validator(BaseValidator):
def __init__(self, *validators):
self._validators = validators
def check(self, config):
errors = []
for validator in self._validators:
errors.extend(validator.check(config))
return errors
class ConfigValidationError():
def __init__(self, code, key, **kw):
self.code = code
self.key = key
self.options = kw
def __str__(self):
if self.code == BaseValidator.MISSING:
return 'Missing configuration: {} ({})'.format(self.options.get('description') or self.key, self.key)
elif self.code == BaseValidator.INVALID:
return 'Invalid configuration: {}'.format(self.options.get('description') or self.key)
def __iter__(self):
return iter((self.code, self.key, self.options))
class Required(BaseValidator):
def __init__(self, key, description=None):
self.key = key
self.description = description
def check(self, config):
if self.key in config:
return []
else:
return [ConfigValidationError(self.MISSING, self.key, description=self.description)]
class Empty(BaseValidator):
def __init__(self, key, description=None):
self.key = key
self.description = description
def check(self, config):
if self.key in config:
if not config[self.key]:
return []
else:
return [ConfigValidationError(self.INVALID, self.key,
description=self.description)]
else:
return [ConfigValidationError(self.MISSING, self.key,
description=self.description)]
| 27.402985 | 113 | 0.606209 | 1,823 | 0.992919 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.046296 |
ef53ba7f982e4f61582b4dfc595af89608ab9da3 | 3,695 | py | Python | third_party/graphy/graphy/common_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
]
| 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/graphy/graphy/common_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
]
| 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | third_party/graphy/graphy/common_test.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
]
| 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common.py."""
import warnings
from graphy import common
from graphy import graphy_test
from graphy.backends import google_chart_api
class CommonTest(graphy_test.GraphyTest):
def setUp(self):
self.chart = google_chart_api.LineChart()
def tearDown(self):
warnings.resetwarnings()
def testDependentAxis(self):
self.assertTrue(self.chart.left is self.chart.GetDependentAxis())
self.assertTrue(self.chart.bottom is self.chart.GetIndependentAxis())
def testAxisAssignment(self):
"""Make sure axis assignment works properly"""
new_axis = common.Axis()
self.chart.top = new_axis
self.assertTrue(self.chart.top is new_axis)
new_axis = common.Axis()
self.chart.bottom = new_axis
self.assertTrue(self.chart.bottom is new_axis)
new_axis = common.Axis()
self.chart.left = new_axis
self.assertTrue(self.chart.left is new_axis)
new_axis = common.Axis()
self.chart.right = new_axis
self.assertTrue(self.chart.right is new_axis)
def testAxisConstruction(self):
axis = common.Axis()
self.assertTrue(axis.min is None)
self.assertTrue(axis.max is None)
axis = common.Axis(-2, 16)
self.assertEqual(axis.min, -2)
self.assertEqual(axis.max, 16)
def testGetDependentIndependentAxes(self):
c = self.chart
self.assertEqual([c.left, c.right], c.GetDependentAxes())
self.assertEqual([c.top, c.bottom], c.GetIndependentAxes())
right2 = c.AddAxis(common.AxisPosition.RIGHT, common.Axis())
bottom2 = c.AddAxis(common.AxisPosition.BOTTOM, common.Axis())
self.assertEqual([c.left, c.right, right2], c.GetDependentAxes())
self.assertEqual([c.top, c.bottom, bottom2], c.GetIndependentAxes())
# TODO: remove once AddSeries is deleted
def testAddSeries(self):
warnings.filterwarnings('ignore')
chart = common.BaseChart()
chart.AddSeries(points=[1, 2, 3], style='foo',
markers='markers', label='label')
series = chart.data[0]
self.assertEqual(series.data, [1, 2, 3])
self.assertEqual(series.style, 'foo')
self.assertEqual(series.markers, 'markers')
self.assertEqual(series.label, 'label')
# TODO: remove once the deprecation warning is removed
def testDataSeriesStyles(self):
# Deprecated approach
warnings.filterwarnings('error')
self.assertRaises(DeprecationWarning, common.DataSeries, [1, 2, 3],
color='0000FF')
warnings.filterwarnings('ignore')
d = common.DataSeries([1, 2, 3], color='0000FF')
self.assertEqual('0000FF', d.color)
d.color = 'F00'
self.assertEqual('F00', d.color)
# TODO: remove once the deprecation warning is removed
def testDataSeriesArgumentOrder(self):
# Deprecated approach
warnings.filterwarnings('error')
self.assertRaises(DeprecationWarning, common.DataSeries, [1, 2, 3],
'0000FF', 'style')
# New order
style = common._BasicStyle('0000FF')
d = common.DataSeries([1, 2, 3], 'label', style)
self.assertEqual('label', d.label)
self.assertEqual(style, d.style)
if __name__ == '__main__':
graphy_test.main()
| 33.899083 | 74 | 0.707984 | 2,896 | 0.783762 | 0 | 0 | 0 | 0 | 0 | 0 | 1,010 | 0.273342 |
ef53e0e036cb078d36e154064142222b1dfe4d85 | 608 | py | Python | projects/utils_func/fetch_data.py | blitty-codes/ml-proyects | 97d41757cfb45209bbbb09e4c3b51e20c4328a30 | [
"Apache-2.0"
]
| null | null | null | projects/utils_func/fetch_data.py | blitty-codes/ml-proyects | 97d41757cfb45209bbbb09e4c3b51e20c4328a30 | [
"Apache-2.0"
]
| null | null | null | projects/utils_func/fetch_data.py | blitty-codes/ml-proyects | 97d41757cfb45209bbbb09e4c3b51e20c4328a30 | [
"Apache-2.0"
]
| null | null | null | # Download the data you
import os
import tarfile
import requests
def fetch_data(dataset_url):
file_name = dataset_url.split('/')[-1]
dataset_path = os.path.join("datasets", file_name.split('.')[0])
print(dataset_path)
print(f"File name: {file_name.split('.')[0]}")
os.makedirs(dataset_path, exist_ok=True)
data = requests.get(dataset_url)
tgz_path = os.path.join(dataset_path, f"{file_name}")
with open(tgz_path, 'wb') as file:
file.write(data.content)
dataset_tgz = tarfile.open(tgz_path)
dataset_tgz.extractall(path=dataset_path)
dataset_tgz.close()
| 27.636364 | 68 | 0.692434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.157895 |
ef54bb20c88dda93a302698251aa2e77667dc8a2 | 4,526 | py | Python | xpython/builtins.py | pmp-p/x-python | e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a | [
"MIT"
]
| null | null | null | xpython/builtins.py | pmp-p/x-python | e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a | [
"MIT"
]
| null | null | null | xpython/builtins.py | pmp-p/x-python | e5bdc15af1bf9cf696b2d9a8e1a02a4863b1fb8a | [
"MIT"
]
| null | null | null | """
A place to implement built-in functions.
We use the bytecode for these when doing cross-version interpreting
"""
from xpython.pyobj import Function, Cell, make_cell
from xdis import codeType2Portable, PYTHON_VERSION, IS_PYPY
def func_code(func):
if hasattr(func, "func_code"):
return func.func_code
else:
assert hasattr(func, "__code__"), "%s should be a function type; is %s" % (
func,
type(func),
)
return func.__code__
# This code was originally written by Darius Bacon,
# but follows code from PEP 3115 listed below.
# Rocky Bernstein did the xdis adaptions and
# added a couple of bug fixes.
def build_class(opc, func, name, *bases, **kwds):
"""
Like built-in __build_class__() in bltinmodule.c, but running in the
byterun VM.
See also: PEP 3115: https://www.python.org/dev/peps/pep-3115/ and
https://mail.python.org/pipermail/python-3000/2007-March/006338.html
"""
# Parameter checking...
if not (isinstance(func, Function)):
raise TypeError("func must be a PyVM function")
if not isinstance(name, str):
raise TypeError("name is not a string")
metaclass = kwds.pop("metaclass", None)
if metaclass is None:
metaclass = type(bases[0]) if bases else type
if isinstance(metaclass, type):
metaclass = calculate_metaclass(metaclass, bases)
if hasattr(metaclass, "__prepare__"):
prepare = metaclass.__prepare__
namespace = prepare(name, bases, **kwds)
else:
namespace = {}
python_implementation = "PyPy" if IS_PYPY else "CPython"
if not (
opc.version == PYTHON_VERSION
and python_implementation == opc.python_implementation
):
# convert code to xdis's portable code type.
class_body_code = codeType2Portable(func_code(func))
else:
class_body_code = func.func_code
# Execute the body of func. This is the step that would go wrong if
# we tried to use the built-in __build_class__, because __build_class__
# does not call func, it magically executes its body directly, as we
# do here (except we invoke our PyVM instead of CPython's).
#
# This behavior when interpreting bytecode that isn't the same as
# the bytecode using in the running Python can cause a SEGV, specifically
# between Python 3.5 running 3.4 or earlier.
frame = func._vm.make_frame(
code=class_body_code,
f_globals=func.func_globals,
f_locals=namespace,
closure=func.__closure__,
)
# rocky: cell is the return value of a function where?
cell = func._vm.eval_frame(frame)
# Add any class variables that may have been added in running class_body_code.
# See test_attribute_access.py for a simple example that needs the update below.
namespace.update(frame.f_locals)
# If metaclass is builtin "type", it can't deal with a xpython.pyobj.Cell object
# but needs a builtin cell object. make_cell() can do this.
if "__classcell__" in namespace and metaclass == type:
namespace["__classcell__"] = make_cell(namespace["__classcell__"].get())
try:
cls = metaclass(name, bases, namespace)
except TypeError:
# For mysterious reasons the above can raise a:
# __init__() takes *n* positional arguments but *n+1* were given.
# In particular for:
# class G(Generic[T]):
# pass
import types
cls = types.new_class(name, bases, kwds, exec_body=lambda ns: namespace)
pass
if isinstance(cell, Cell):
cell.set(cls)
return cls
# From Pypy 3.6
# def find_metaclass(bases, namespace, globals, builtin):
# if '__metaclass__' in namespace:
# return namespace['__metaclass__']
# elif len(bases) > 0:
# base = bases[0]
# if hasattr(base, '__class__'):
# return base.__class__
# else:
# return type(base)
# elif '__metaclass__' in globals:
# return globals['__metaclass__']
# else:
# try:
# return builtin.__metaclass__
# except AttributeError:
# return type
def calculate_metaclass(metaclass, bases):
"Determine the most derived metatype."
winner = metaclass
for base in bases:
t = type(base)
if issubclass(t, winner):
winner = t
elif not issubclass(winner, t):
raise TypeError("metaclass conflict", winner, t)
return winner
| 32.328571 | 84 | 0.650685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,350 | 0.519222 |
ef58bac3885ae00f40f0903957d207828fe3e0c6 | 857 | py | Python | config/object_detection_retinanet_config.py | kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection-- | 5baacf4475f3679b96ea2001994a575ec0a72bf0 | [
"Apache-2.0"
]
| null | null | null | config/object_detection_retinanet_config.py | kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection-- | 5baacf4475f3679b96ea2001994a575ec0a72bf0 | [
"Apache-2.0"
]
| null | null | null | config/object_detection_retinanet_config.py | kadirtereci/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection-- | 5baacf4475f3679b96ea2001994a575ec0a72bf0 | [
"Apache-2.0"
]
| null | null | null | # import the necessary packages
import os
# Set the dataset base path here
BASE_PATH = "/content/Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--/dataset"
# build the path to the annotations and input images
ANNOT_PATH = os.path.sep.join([BASE_PATH, 'annotations'])
IMAGES_PATH = os.path.sep.join([BASE_PATH, 'images'])
# degine the training/testing split
# If you have only training dataset then put here TRAIN_TEST_SPLIT = 1
TRAIN_TEST_SPLIT = 0.80
# build the path to the output training and test .csv files
TRAIN_CSV = os.path.sep.join([BASE_PATH, 'train.csv'])
TEST_CSV = os.path.sep.join([BASE_PATH, 'test.csv'])
# build the path to the output classes CSV files
CLASSES_CSV = os.path.sep.join([BASE_PATH, 'classes.csv'])
# build the path to the output predictions dir
OUTPUT_DIR = os.path.sep.join([BASE_PATH, 'predictions'])
| 35.708333 | 97 | 0.757293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.614936 |
ef593e9168b64350b18b0f9f56ed9f30d578e6cf | 4,199 | py | Python | CiTOCrawler/OC/script/static_lode.py | patmha/CiTOCrawler | 6c5027f42aacc2d250305e5e877bc271470acde5 | [
"BSD-3-Clause"
]
| null | null | null | CiTOCrawler/OC/script/static_lode.py | patmha/CiTOCrawler | 6c5027f42aacc2d250305e5e877bc271470acde5 | [
"BSD-3-Clause"
]
| null | null | null | CiTOCrawler/OC/script/static_lode.py | patmha/CiTOCrawler | 6c5027f42aacc2d250305e5e877bc271470acde5 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
__author__ = 'essepuntato'
import os
import requests
import codecs
import argparse
import re
class StaticLODE(object):
def __init__(self, doc_dir, onto_map, lang="en", imported_url=None,
lode_url="http://eelst.cs.unibo.it/apps/LODE", repl=None):
self.doc_dir = doc_dir
self.lode_url = lode_url + "/extract?owlapi=true&lang=%s&url=" % lang
self.imported_basepath = lode_url
self.imported_url = imported_url
self.onto_map = onto_map
self.repl = repl
def create_documentation(self):
for acronym in self.onto_map:
print "Prepare the documentation of '%s'" % acronym
ontology_url = self.onto_map[acronym]
print self.lode_url + ontology_url
cur_doc = requests.get(self.lode_url + ontology_url).text
if self.imported_url is not None:
cur_doc = cur_doc.replace(self.imported_basepath, self.imported_url)
if self.repl is not None:
orig_repl = self.repl.split("->")
cur_doc = re.sub(orig_repl[0], orig_repl[1], cur_doc)
cur_doc = re.sub("<dl><dt>Other visualisation:</dt><dd>"
"<a href=\"[^\"]+\">Ontology source</a></dd></dl>", "", cur_doc)
if not os.path.exists(self.doc_dir):
os.makedirs(self.doc_dir)
with codecs.open(self.doc_dir + os.sep + acronym + ".html", mode="w", encoding="utf-8") as f:
f.write(cur_doc)
print "\t ... done!"
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser("static_lode.py")
arg_parser.add_argument("-pu", "--prefix-url", dest="prefurl", required=True,
help="The prefix followed by a ':' plus the URL of the ontology to convert.")
arg_parser.add_argument("-o", "--output-dir", dest="output_dir", required=True,
help="The directory where to store the documentation files created.")
arg_parser.add_argument("-s", "--source-material-url", dest="source_material_url",
help="The directory that contains all the LODE related files for "
"presentation on the browser.")
arg_parser.add_argument("-l", "--lode-url", dest="lode_url",
default="http://eelst.cs.unibo.it/apps/LODE",
help="The URL where LODE is available.")
arg_parser.add_argument("-lang", "--language", dest="language", default="en",
help="The ISO code of the language used to retrieve the documentation "
"(default: 'en?).")
arg_parser.add_argument("-repl", "--string-replace", dest="string_replace",
help="A 'source->replace' regular expression for replacement of strings.")
args = arg_parser.parse_args()
all_ontologies_url = {}
split_input = args.prefurl.split(":", 1)
all_ontologies_url.update({split_input[0]: split_input[1]})
sl = StaticLODE(args.output_dir, all_ontologies_url, args.language,
args.source_material_url, args.lode_url, args.string_replace)
sl.create_documentation()
# How to call it for a specific ontology:
# python static_lode.py -pu fabio:http://purl.org/spar/fabio -o spar/ontology_documentations -s /static/lode
| 49.988095 | 112 | 0.635389 | 1,448 | 0.344844 | 0 | 0 | 0 | 0 | 0 | 0 | 1,870 | 0.445344 |
ef59c84efb2830bb4da68800485a32f52a474ab9 | 14,738 | py | Python | src/c4/cmany/cmake.py | biojppm/cmany | b20c24169d60077122ae29a0c09526913340fd5c | [
"MIT"
]
| 20 | 2017-05-17T18:43:08.000Z | 2021-02-13T16:20:53.000Z | src/c4/cmany/cmake.py | biojppm/cmany | b20c24169d60077122ae29a0c09526913340fd5c | [
"MIT"
]
| 8 | 2017-06-04T17:01:06.000Z | 2022-03-17T12:43:32.000Z | src/c4/cmany/cmake.py | biojppm/cmany | b20c24169d60077122ae29a0c09526913340fd5c | [
"MIT"
]
| 1 | 2017-06-04T13:09:19.000Z | 2017-06-04T13:09:19.000Z | import re
import os
from collections import OrderedDict as odict
from .conf import USER_DIR
from .util import cacheattr, setcwd, runsyscmd, logdbg
from . import util
from . import err
_cache_entry = r'^(.*?)(:.*?)=(.*)$'
def hascache(builddir):
c = os.path.join(builddir, 'CMakeCache.txt')
if os.path.exists(c):
return c
return None
def setcachevar(builddir, var, value):
setcachevars(builddir, odict([(var, value)]))
def getcachevar(builddir, var):
v = getcachevars(builddir, [var])
return v[var]
def setcachevars(builddir, varvalues):
with setcwd(builddir, silent=True):
with open('CMakeCache.txt', 'r') as f:
ilines = f.readlines()
olines = []
for l in ilines:
for k, v in varvalues.items():
if l.startswith(k + ':'):
n = re.sub(_cache_entry, r'\1\2=' + v, l)
l = n
olines.append(l)
with open('CMakeCache.txt', 'w') as f:
f.writelines(olines)
def getcachevars(builddir, varlist):
vlist = [v + ':' for v in varlist]
values = odict()
with setcwd(builddir, silent=True):
with open('CMakeCache.txt') as f:
for line in f:
for v in vlist:
if line.startswith(v):
ls = line.strip()
vt = re.sub(_cache_entry, r'\1', ls)
values[vt] = re.sub(_cache_entry, r'\3', ls)
return values
def loadvars(builddir):
"""if builddir does not exist or does not have a cache, returns an
empty odict"""
v = odict()
if builddir is None or not os.path.exists(builddir):
return v
c = os.path.join(builddir, 'CMakeCache.txt')
if os.path.exists(c):
with open(c, 'r') as f:
for line in f:
# logdbg("loadvars0", line.strip())
if not re.match(_cache_entry, line):
continue
ls = line.strip()
name = re.sub(_cache_entry, r'\1', ls)
vartype = re.sub(_cache_entry, r'\2', ls)[1:]
value = re.sub(_cache_entry, r'\3', ls)
# logdbg("loadvars1", name, vartype, value)
v[name] = CMakeCacheVar(name, value, vartype)
return v
# -----------------------------------------------------------------------------
class CMakeCache(odict):
def __init__(self, builddir=None):
super().__init__(loadvars(builddir))
self.dirty = False
self.cache_file = None
if builddir:
self.cache_file = os.path.join(builddir, 'CMakeCache.txt')
def __eq__(self, other):
"""code quality checkers complain that this class adds attributes
without overriding __eq__. So just fool them!"""
return super().__init__(other)
def getvars(self, names):
out = odict()
for n in names:
v = self.get(n)
out[n] = v
return out
def b(self, name, val, **kwargs):
"""set a boolean"""
return self.setvar(name, val, "BOOL", **kwargs)
def s(self, name, val, **kwargs):
"""set a string"""
return self.setvar(name, val, "STRING", **kwargs)
def p(self, name, val, **kwargs):
"""set a path to a dir"""
if util.in_windows():
val = re.sub(r'\\', r'/', val)
return self.setvar(name, val, "PATH", **kwargs)
def f(self, name, val, **kwargs):
"""set a path to a file"""
if util.in_windows():
val = re.sub(r'\\', r'/', val)
return self.setvar(name, val, "FILEPATH", **kwargs)
def i(self, name, val, **kwargs):
"""set a cmake internal var"""
return self.setvar(name, val, "INTERNAL", **kwargs)
def setvar(self, name, val, vartype=None, **kwargs):
v = self.get(name)
if v is not None:
changed = v.reset(val, vartype, **kwargs)
self.dirty |= changed
return changed
else:
v = CMakeCacheVar(name, val, vartype, dirty=True, **kwargs)
self[name] = v
self.dirty = True
return True
def commit(self, builddir):
if (not self.dirty
or builddir is None
or not os.path.exists(builddir)
or not os.path.exists(os.path.join(builddir, 'CMakeCache.txt'))):
return False
tmp = odict()
for _, v in self.items():
if not v.dirty:
continue
tmp[v.name] = v.val
setcachevars(builddir, tmp)
for _, v in self.items():
v.dirty = False
self.dirty = False
return True
# -------------------------------------------------------------------------
class CMakeCacheVar:
def __init__(self, name, val, vartype=None, dirty=False, from_input=False):
self.name = name
self.val = val
self.vartype = self._guess_var_type(name, val, vartype)
self.dirty = dirty
self.from_input = from_input
def _guess_var_type(self, name, val, vartype):
"""make an informed guess of the var type
@todo: add a test for this"""
if vartype is not None:
return vartype
elif val.upper() in ("ON", "OFF", "NO", "YES", "1", "0", "TRUE", "FALSE", "T", "F", "N", "Y"):
# https://cmake.org/pipermail/cmake/2007-December/018548.html
return "BOOL"
elif os.path.isfile(val) or "PATH" in name.upper():
return "FILEPATH"
elif os.path.isdir(val) or "DIR" in name.upper() or os.path.isabs(val):
return "PATH"
else:
return "STRING"
def reset(self, val, vartype='', **kwargs):
"""
:param val:
:param vartype:
:param kwargs:
force_dirty, defaults to False
from_input, defaults to None
:return:
"""
force_dirty = kwargs.get('force_dirty', False)
from_input = kwargs.get('from_input')
if from_input is not None:
self.from_input = from_input
if vartype == 'STRING' or (vartype is None and self.vartype == 'STRING'):
candidates = (val, val.strip("'"), val.strip('"'))
equal = False
for c in candidates:
if c == self.val:
equal = True
break
else:
equal = (self.val == val)
if not equal or (vartype is not None and vartype != self.vartype):
self.val = val
self.vartype = vartype if vartype is not None else self.vartype
self.dirty = True
return True
if force_dirty:
self.dirty = True
return force_dirty
def __repr__(self):
return self.name + ':' + self.vartype + '=' + self.val
def __str__(self):
return self.name + ':' + self.vartype + '=' + self.val
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class CMakeSysInfo:
"""encapsulates the results returned from
`cmake [-G <which_generator>][-T <toolset>][-A <architecture>] --system-information`.
This is used for selecting default values for system, compiler,
generator, etc."""
@staticmethod
def generator():
return cacheattr(__class__, '_generator_default',
lambda: __class__._getstr('CMAKE_GENERATOR', 'default'))
@staticmethod
def system_name(which_generator="default"):
return __class__.var('CMAKE_SYSTEM_NAME', which_generator, lambda v: v.lower())
@staticmethod
def architecture(which_generator="default"):
return __class__.var('CMAKE_SYSTEM_PROCESSOR', which_generator, lambda v: v.lower())
@staticmethod
def cxx_compiler(which_generator="default"):
return __class__.var('CMAKE_CXX_COMPILER', which_generator)
@staticmethod
def c_compiler(which_generator="default"):
return __class__.var('CMAKE_C_COMPILER', which_generator)
@staticmethod
def var(var_name, which_generator="default", transform_fn=lambda x: x):
gs = __class__._getstr
return cacheattr(__class__, '_{}_{}'.format(var_name, _genid(which_generator)),
lambda: transform_fn(gs(var_name, which_generator)))
@staticmethod
def info(which_generator="default"):
return cacheattr(__class__, '_info_' + _genid(which_generator),
lambda: __class__.system_info(which_generator))
@staticmethod
def _getstr(var_name, which_generator):
regex = r'^{} "(.*)"'.format(var_name)
for l in __class__.info(which_generator):
#logdbg(l.strip("\n"), l.startswith(var_name), var_name)
if l.startswith(var_name):
l = l.strip("\n").lstrip(" ").rstrip(" ")
#logdbg(var_name, "startswith :", l)
if re.match(regex, l):
s = re.sub(regex, r'\1', l)
#logdbg(var_name, "result: '" + s + "'")
return s
#logdbg("--------------------------------------\n", __class__.info(which_generator))
msg = "could not find variable {} in the output of `cmake --system-information -G '{}'`"
raise err.Error(msg, var_name, which_generator)
@staticmethod
def system_info(gen):
"""gen can be a string or a cmany.Generator object"""
from .generator import Generator
logdbg("CMakeSystemInfo: asked info for", gen)
p = _genid(gen)
d = os.path.join(USER_DIR, 'cmake_info', p)
p = os.path.join(d, 'info')
logdbg("CMakeSystemInfo: path=", p)
# https://stackoverflow.com/questions/7015587/python-difference-of-2-datetimes-in-months
if os.path.exists(p) and util.time_since_modification(p).months < 1:
logdbg("CMakeSystemInfo: asked info for", gen, "... found", p)
with open(p, "r") as f:
i = f.readlines()
if i:
return i
else:
logdbg("CMakeSystemInfo: info for gen", gen, "is empty...")
#
if isinstance(gen, Generator):
cmd = ['cmake'] + gen.configure_args() + ['--system-information']
logdbg("CMakeSystemInfo: from generator! '{}' ---> cmd={}".format(gen, cmd))
else:
if gen == "default" or gen == "":
logdbg("CMakeSystemInfo: default! '{}'".format(gen))
cmd = ['cmake', '--system-information']
else:
logdbg("CMakeSystemInfo: assume vs! '{}'".format(gen))
from . import vsinfo
gen = vsinfo.to_gen(gen)
if isinstance(gen, list):
cmd = ['cmake', '-G'] + gen + ['--system-information']
else:
if not (gen.startswith('vs') or gen.startswith('Visual Studio')):
raise Exception("unknown generator: {}".format(gen))
cmd = ['cmake', '-G', gen, '--system-information']
# remove export build commands as cmake reacts badly to it,
# generating an empty info string
_remove_invalid_args_from_sysinfo_cmd(cmd)
print("\ncmany: CMake information for generator '{}' was not found. Creating and storing... cmd={}".format(gen, cmd))
#
if not os.path.exists(d):
os.makedirs(d)
with setcwd(d):
out = runsyscmd(cmd, echo_output=False, capture_output=True)
logdbg("cmany: finished generating information for generator '{}'\n".format(gen), out, cmd)
out = out.strip()
if not out:
from err import InvalidGenerator
raise InvalidGenerator(gen, "for --system-information. cmd='{}'".format(cmd))
with open(p, "w") as f:
f.write(out)
i = out.split("\n")
return i
def _remove_invalid_args_from_sysinfo_cmd(cmd):
gotit = None
# remove compile commands args
for i, elm in enumerate(cmd):
if 'CMAKE_EXPORT_COMPILE_COMMANDS' in elm:
# can't strip out if compile commands is not given as one,
# because the command will become malformed when we remove
if elm not in ('-DCMAKE_EXPORT_COMPILE_COMMANDS=ON', '-DCMAKE_EXPORT_COMPILE_COMMANDS=OFF'):
raise Exception("malformed command")
gotit = i
if gotit is not None:
del cmd[gotit]
# remove architecture args
if '-A' in cmd:
i = cmd.index('-A')
del cmd[i+1]
del cmd[i]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def _genid(gen):
from .generator import Generator
p = gen.sysinfo_name if isinstance(gen, Generator) else gen
if isinstance(gen, list): p = " ".join(p)
p = re.sub(r'[() ]', '_', p)
return p
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# def get_toolchain_cache(toolchain):
# d = os.path.join(USER_DIR, 'toolchains', re.sub(os.sep, '+', toolchain))
# logdbg("toolchain cache: USER_DIR=", USER_DIR)
# logdbg("toolchain cache: d=", d)
# bd = os.path.join(d, 'build')
# logdbg("toolchain cache: bd=", bd)
# if not os.path.exists(d):
# os.makedirs(d)
# with setcwd(d):
# with open('main.cpp', 'w') as f:
# f.write("int main() {}")
# with open('CMakeLists.txt', 'w') as f:
# f.write("""
# cmake_minimum_required(VERSION 2.6)
# project(toolchain_test)
# add_executable(main main.cpp)
# """)
# if not os.path.exists(bd):
# os.makedirs(bd)
# with setcwd(bd):
# cmd = ['cmake', '-DCMAKE_TOOLCHAIN_FILE='+toolchain, '..']
# runsyscmd(cmd, echo_output=True)
# return loadvars(bd)
def extract_toolchain_compilers(toolchain):
with open(toolchain) as f:
lines = f.readlines()
out = odict()
for l in lines:
res = re.search(r'(set|SET)\ ?\(\ ?(CMAKE_.*?_COMPILER) (.*?)\ ?\)', l)
if res:
res = res.groups()
out[res[1]] = res[2]
return out
| 36.937343 | 125 | 0.519677 | 9,409 | 0.638418 | 0 | 0 | 4,619 | 0.313408 | 0 | 0 | 4,848 | 0.328946 |
ef5a62962aed890737736832f581c39140877b07 | 2,130 | py | Python | Python/Searching/2/quick_select.py | Tikam02/Data_Structure_Algorithms | 7c17f744975a72fa42f0f3f892c0b7e041cdef0c | [
"MIT"
]
| 5 | 2017-08-03T06:33:49.000Z | 2021-08-06T13:20:57.000Z | Python/Searching/2/quick_select.py | Tikam02/Data_Structure_Algorithms | 7c17f744975a72fa42f0f3f892c0b7e041cdef0c | [
"MIT"
]
| null | null | null | Python/Searching/2/quick_select.py | Tikam02/Data_Structure_Algorithms | 7c17f744975a72fa42f0f3f892c0b7e041cdef0c | [
"MIT"
]
| 6 | 2017-04-27T13:30:49.000Z | 2020-11-01T20:28:55.000Z | #!/usr/bin/env python
__author__ = "bt3"
import random
''' The simplest way...'''
def quickSelect(seq, k):
# this part is the same as quick sort
len_seq = len(seq)
if len_seq < 2: return seq
# we could use a random choice here doing
#pivot = random.choice(seq)
ipivot = len_seq // 2
pivot = seq[ipivot]
# O(n)
smallerList = [x for i,x in enumerate(seq) if x <= pivot and i != ipivot]
largerList = [x for i,x in enumerate(seq) if x > pivot and i != ipivot]
# here starts the different part
m = len(smallerList)
if k == m:
return pivot
elif k < m:
return quickSelect(smallerList, k)
else:
return quickSelect(largerList, k-m-1)
''' If you don't want to use pythons feature at all and
also select pivot randomly'''
def swap(seq, x, y):
tmp = seq[x]
seq[x] = seq[y]
seq[y] = tmp
def quickSelectHard(seq, k, left=None, right=None):
left = left or 0
right = right or len(seq) - 1
#ipivot = random.randint(left, right)
ipivot = len(seq)//2
pivot = seq[ipivot]
# Move pivot out of the sorting range
swap(seq, ipivot, right)
swapIndex, i = left, left
while i < right:
if seq[i] < pivot:
swap(seq, i, swapIndex)
swapIndex += 1
i += 1
# Move pivot to final position
swap(seq, right, swapIndex)
# Check if pivot matches, else recurse on the correct half
rank = len(seq) - swapIndex
if k == rank:
return seq[swapIndex]
elif k < rank:
return quickSelectHard(seq, k, swapIndex+1, right)
else:
return quickSelectHard(seq, k, left, swapIndex-1)
if __name__ == '__main__':
# Checking the Answer
seq = [10, 60, 100, 50, 60, 75, 31, 50, 30, 20, 120, 170, 200]
#seq = [3, 7, 2, 1, 4, 6, 5, 10, 9, 11]
# we want the middle element
k = len(seq) // 2
# Note that this only work for odd arrays, since median in
# even arrays is the mean of the two middle elements
print(quickSelect(seq, k))
print(quickSelectHard(seq, k))
import numpy
print numpy.median(seq) | 23.932584 | 78 | 0.597653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.307042 |
ef5b7b88dd380eec142de24fd5621ee02381ea01 | 3,744 | py | Python | RGB_extraction_maize_diversity.py | xiangjunli/Maize_Phenotype_Map | 15765c1a9a58bdf5cfca5602e09e9cbe74d12b98 | [
"BSD-3-Clause"
]
| 4 | 2018-02-06T21:15:31.000Z | 2018-07-28T14:00:17.000Z | RGB_extraction_maize_diversity.py | xiangjunli/Maize_Phenotype_Map | 15765c1a9a58bdf5cfca5602e09e9cbe74d12b98 | [
"BSD-3-Clause"
]
| null | null | null | RGB_extraction_maize_diversity.py | xiangjunli/Maize_Phenotype_Map | 15765c1a9a58bdf5cfca5602e09e9cbe74d12b98 | [
"BSD-3-Clause"
]
| 2 | 2020-02-07T18:26:09.000Z | 2020-10-16T15:52:56.000Z | import numpy as np
import cv2
import sys
import os
#######################RGB Image Data Analysis############################################################
###Should follow the data structure of image data: Genotype --> Replicates (Plants) --> Different Views --> Image captured by each Day###
# mfold defines the folder name that stores the data in our data structure
mfold = sys.argv[1]
# The ratio between pixels further zoom level and closer zoom level is 1:2.02, each pixel in closer zoom level is 0.746mm. This script generates values based on pixel counts.
# binary function is going to extract green pixels by defined threshold of (2*G)/(R+B) > 1.15
def binary(pic,upper,bottom,left,right):
mypic = []
myl = np.shape(pic)[0]
myw = np.shape(pic)[1]
x1 = left
x2 = right
y1 = upper
y2 = bottom
for iind,i in enumerate(pic):
if iind < y1 or iind > y2:
n = [0]*myw
else:
n = []
for jind,j in enumerate(i):
if j > 1.15:
if jind < x1 or jind > x2:
t = 0
else:
t = 255
else:
t = 0
n.append(t)
mypic.append(n)
mypic = np.array(mypic)
return mypic
# create a function to extract values of plant height, plant width and plant area pixel counts
def call_numeric(thresh):
hh = 0
ww = 0
aa = 0
areas = []
contours,hierarchy = cv2.findContours(thresh, 1, 2)
for c in contours:
areas.append(cv2.contourArea(c))
people = np.array(contours)
ages = np.array(areas)
inds = ages.argsort()
sortedcontours = people[inds]
cnt = sortedcontours[-1]
hull = cv2.convexHull(cnt)
x,y,w,h = cv2.boundingRect(cnt)
hh = str(h)
ww = str(w)
aa = str(cv2.contourArea(hull))
return hh,ww,aa,areas
whole = os.listdir(mfold)
# because two zoom levels were applied on the RGB images in different days, and we analyze plant images in two zoom levels
close = set([])
far = set([])
for i in range(1,27):
close.add('Day_'+str(i).zfill(3))
close.remove('Day_'+str(11).zfill(3))
for i in range(27,33):
far.add('Day_'+str(i).zfill(3))
far.add('Day_'+str(11).zfill(3))
# out is the file with extracted numeric values from RGB images
out = open('RGB_extraction.csv','w')
# create this file to trace some image files that can not load correctly to make sure the whole loop can go correctly
error = open('RGB_extraction_error.csv','w')
out.write('PlantID'+'\t'+'Date'+'\t'+'View'+'\t'+'Plant Height'+'\t'+'Plant Width'+'\t'+'Projected Plant Area'+'\n')
views = ['VIS SV 0','VIS SV 90']
for j1 in sorted(whole):
if j1 == 'Genotype_ZL022':continue
for i1 in os.listdir('{0}/{1}'.format(mfold,j1)):
for v in views:
for d1 in sorted(os.listdir('{0}/{1}/{2}/{3}/'.format(mfold,j1,i1,v))):
nlist = [i1,d1.replace('.png','')]
myview = 'View'+v.replace('VIS SV ','')
na = [myview,'NA','NA','NA']
date = d1.replace('.png','')
try:
abc = cv2.imread('{0}/{1}/{2}/{3}/{4}'.format(mfold,j1,i1,v,d1))
abc = abc.astype(np.float)
imgreen = (2*abc[:,:,1])/(abc[:,:,0]+abc[:,:,2])
if date in close:
thresh = binary(imgreen,50,1950,335,2280)
elif date in far:
thresh = binary(imgreen,50,1450,815,1780)
cv2.imwrite('test.jpg',thresh)
thresh = cv2.imread("test.jpg",cv2.CV_LOAD_IMAGE_GRAYSCALE)
h,w,area,areas0 = call_numeric(thresh)
total = max(areas0)
k = areas0.index(total)
del areas0[k]
for i in areas0:
total -= i
nlist.append(myview)
if date in far:
nlist.append(str(float(h)*2.02))
nlist.append(str(float(w)*2.02))
nlist.append(str(float(total)))
else:
nlist.append(h)
nlist.append(w)
nlist.append(total)
except:
nlist.extend(na)
error.write(j1+':'+i1+':'+v+':'+d1+'\n')
out.write('\t'.join(nlist)+'\n')
out.close()
error.close()
| 32 | 174 | 0.626603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,321 | 0.352831 |
ef5c0e5ff1790c1367e3395cb63ad1ddf91375ef | 4,620 | py | Python | cgtools/skinning.py | tneumann/cgtools | 8f77b6a4642fe79ac85b8449ebd3f72ea0e56032 | [
"MIT"
]
| 10 | 2019-05-02T14:08:32.000Z | 2021-03-15T16:07:19.000Z | cgtools/skinning.py | tneumann/cgtools | 8f77b6a4642fe79ac85b8449ebd3f72ea0e56032 | [
"MIT"
]
| null | null | null | cgtools/skinning.py | tneumann/cgtools | 8f77b6a4642fe79ac85b8449ebd3f72ea0e56032 | [
"MIT"
]
| 3 | 2019-05-02T14:08:33.000Z | 2021-02-10T03:47:29.000Z | import numpy as np
from . import vector as V
def rbm_to_dualquat(rbm):
import cgkit.cgtypes as cg
q0 = cg.quat().fromMat(cg.mat3(rbm[:3,:3].T.tolist()))
q0 = q0.normalize()
q0 = np.array([q0.w, q0.x, q0.y, q0.z])
t = rbm[:3, 3]
q1 = np.array([
-0.5*( t[0]*q0[1] + t[1]*q0[2] + t[2]*q0[3]),
0.5*( t[0]*q0[0] + t[1]*q0[3] - t[2]*q0[2]),
0.5*(-t[0]*q0[3] + t[1]*q0[0] + t[2]*q0[1]),
0.5*( t[0]*q0[2] - t[1]*q0[1] + t[2]*q0[0]) ])
return np.array(q0.tolist() + q1.tolist())
def dualquats_to_rbms(blendq):
qn = blendq[:,:4]
qd = blendq[:,4:]
len2 = np.sum(qn**2, axis=1)
w, x, y, z = qn[:,0], qn[:,1], qn[:,2], qn[:,3]
t0, t1, t2, t3 = qd[:,0], qd[:,1], qd[:,2], qd[:,3]
M = np.empty((len(blendq), 4, 4))
M[:,0,0] = w*w + x*x - y*y - z*z
M[:,0,1] = 2*x*y - 2*w*z
M[:,0,2] = 2*x*z + 2*w*y
M[:,1,0] = 2*x*y + 2*w*z
M[:,1,1] = w*w + y*y - x*x - z*z
M[:,1,2] = 2*y*z - 2*w*x;
M[:,2,0] = 2*x*z - 2*w*y
M[:,2,1] = 2*y*z + 2*w*x
M[:,2,2] = w*w + z*z - x*x - y*y
M[:,0,3] = -2*t0*x + 2*w*t1 - 2*t2*z + 2*y*t3
M[:,1,3] = -2*t0*y + 2*t1*z - 2*x*t3 + 2*w*t2
M[:,2,3] = -2*t0*z + 2*x*t2 + 2*w*t3 - 2*t1*y
M[:,3] = 0
M[:,3,3] = len2
M /= len2[:,np.newaxis,np.newaxis]
return M
def dq_skinning(pts, BW, dqs):
from scipy import weave
blendq = np.sum(BW[:,:,np.newaxis] * dqs[np.newaxis], axis=1)
code = """
using namespace blitz;
float M00, M01, M02, M03;
float M10, M11, M12, M13;
float M20, M21, M22, M23;
for (int i=0; i<num_pts; i++) {
float w = blendq(i,0);
float x = blendq(i,1);
float y = blendq(i,2);
float z = blendq(i,3);
float t0 = blendq(i,4);
float t1 = blendq(i,5);
float t2 = blendq(i,6);
float t3 = blendq(i,7);
float len2 = 1. / (w*w + x*x + y*y + z*z);
M00 = (w*w + x*x - y*y - z*z) * len2;
M01 = (2*x*y - 2*w*z) * len2;
M02 = (2*x*z + 2*w*y) * len2;
M10 = (2*x*y + 2*w*z) * len2;
M11 = (w*w + y*y - x*x - z*z) * len2;
M12 = (2*y*z - 2*w*x) * len2;
M20 = (2*x*z - 2*w*y) * len2;
M21 = (2*y*z + 2*w*x) * len2;
M22 = (w*w + z*z - x*x - y*y) * len2;
M03 = (-2*t0*x + 2*w*t1 - 2*t2*z + 2*y*t3) * len2;
M13 = (-2*t0*y + 2*t1*z - 2*x*t3 + 2*w*t2) * len2;
M23 = (-2*t0*z + 2*x*t2 + 2*w*t3 - 2*t1*y) * len2;
pts_transformed(i,0) = M00 * pts(i,0) + M01 * pts(i,1) + M02 * pts(i,2) + M03;
pts_transformed(i,1) = M10 * pts(i,0) + M11 * pts(i,1) + M12 * pts(i,2) + M13;
pts_transformed(i,2) = M20 * pts(i,0) + M21 * pts(i,1) + M22 * pts(i,2) + M23;
}
"""
pts_transformed = np.empty_like(pts)
num_pts = len(blendq)
num_bws = BW.shape[1]
weave.inline(code,
["num_pts", "num_bws", "blendq", "pts_transformed", "pts", "BW"],
type_converters=weave.converters.blitz)
return pts_transformed
def dq_skinning_py(pts, BW, dqs, inverse=False):
# blend in dual quaternion space
blendq = np.sum(BW[:,:,np.newaxis] * dqs[np.newaxis], axis=1)
# convert them back to rigid body motion (4x4)
M = dualquats_to_rbms(blendq)
if inverse == True:
print(M)
M = np.array(list(map(np.linalg.inv, M)))
# transform points with final matrix
return V.dehom( np.sum(M * V.hom(pts)[:,np.newaxis,:], axis=2) )
def blend_skinning(pts, BW, rbms, method='lbs'):
"""
perform blend skinning of pts given blend weights BW and the 4x4 rigid body motions in rbms
pts should be an array of points, so the shape should be (num_points, 3)
BW should be an array of blendweights, so the shape should be (num_points, num_rbms)
where num_rbms give the number of rigid body motion parts (joints)
rbms should be an array of shape (num_rbms, 4, 4) - one rigid body motions for each column in BW
supported methods are "lbs" (linear blend skinning)
and "dq" (dual quaternion skinning)
"""
# TODO use masked arrays to accellerate?
if method == 'lbs':
transformed_pts = np.tensordot(V.hom(pts), rbms, axes=(1, 2))
if transformed_pts.shape[-1] == 4:
transformed_pts = V.dehom(transformed_pts)
return np.sum(BW[:,:,np.newaxis] * transformed_pts, axis=1)
elif method == 'dq':
rbms = np.asanyarray(rbms)
dqs = np.array(list(map(rbm_to_dualquat, rbms)))
return dq_skinning(pts, BW, dqs)
else:
raise ValueError("Unknown skinning method")
| 37.868852 | 104 | 0.515368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,094 | 0.453247 |
ef5cca29cfc460b593d8a2ef7fb0d7625f148237 | 2,214 | py | Python | methods/self_attention.py | uyplayer/machine_learning_notice | 9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416 | [
"Apache-2.0"
]
| 1 | 2019-12-10T12:27:33.000Z | 2019-12-10T12:27:33.000Z | methods/self_attention.py | uyplayer/machine_learning_notice | 9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416 | [
"Apache-2.0"
]
| null | null | null | methods/self_attention.py | uyplayer/machine_learning_notice | 9f6c4a9a5e278321611d9be1e8fa46bf9a1bd416 | [
"Apache-2.0"
]
| null | null | null | # coding: utf-8
# Team : uyplayer team
# Author: uyplayer
# Date :2019/11/20 下午4:22
# Tool :PyCharm
'''
https://blog.csdn.net/c9Yv2cf9I06K2A9E/article/details/79739287
https://msd.misuland.com/pd/13340603045208861
'''
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size # 另一种语言的词汇量
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs): # forward的参数是decoder的输入
# decoder的input是另一种语言的词汇,要么是target,要么是上一个单元返回的output中概率最大的一个
# 初始的hidden用的是encoder的最后一个hidden输出
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
# 将embedded的256词向量和hidden的256词向量合在一起,变成512维向量
# 再用线性全连接变成10维(最长句子词汇数),在算softmax,看
attn_weight = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1
)
# torch.cat用于粘贴,dim=1指dim1方向粘贴
# torch.bmm是批矩阵乘操作,attention里将encoder的输出和attention权值相乘
# bmm: (1,1,10)*(1,10,256),权重*向量,得到attention向量
# unsqueeze用于插入一个维度(修改维度)
attn_applied = torch.bmm(attn_weight.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weight
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device) | 41.773585 | 88 | 0.653117 | 2,271 | 0.902623 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.356518 |
ef5e5867ee1d6b8b8d8f0bd5472d8f25ae61b5ab | 497 | py | Python | Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
]
| 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
]
| 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Aniyom Ebenezer/phase 1/python 2 basis/Day_21_Challenge_Solution/Question 6 Solution.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
]
| 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | """
Write a Python program that reads a date (from 2016/1/1 to 2016/12/31) and prints the day of the date.
Jan. 1, 2016, is Friday.
Note that 2016 is a leap year.
"""
from datetime import date
print("Input month and date(separated by a single space): ")
m, d = map(int, input().split())
weeks = {1: "Monday", 2: "Tuesday", 3: "Wednesday", 4:"Thursday", 5: "Friday", 6: "Saturday", 7: "sunday"}
w = date.isoweekday(date(2016, m, d))
print("Name of the date: ", weeks[w])
#Reference: w3resources | 33.133333 | 106 | 0.668008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.657948 |
ef5e8dee6b61a5247d6e4659a6ab926d4b74a1e7 | 347 | py | Python | test15.py | cherytony/test1 | 506ce4cab6f641beff817c81d7a616db29a7131d | [
"Apache-2.0"
]
| null | null | null | test15.py | cherytony/test1 | 506ce4cab6f641beff817c81d7a616db29a7131d | [
"Apache-2.0"
]
| null | null | null | test15.py | cherytony/test1 | 506ce4cab6f641beff817c81d7a616db29a7131d | [
"Apache-2.0"
]
| null | null | null | """
题目描述
给定n个字符串,请对n个字符串按照字典序排列。
输入描述:
输入第一行为一个正整数n(1≤n≤1000),下面n行为n个字符串(字符串长度≤100),字符串中只含有大小写字母。
输出描述:
数据输出n行,输出结果为按照字典序排列的字符串。
示例1
输入
9
cap
to
cat
card
two
too
up
boat
boot
输出
boat
boot
cap
card
cat
to
too
two
up
"""
list = []
n = int(input())
for i in range(0, n):
s = input()
list.append(s)
list.sort()
for i in list:
print(i)
| 8.069767 | 58 | 0.674352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.76588 |
ef5eca1f199c276a6c88e8c9f58fbad72f2074cb | 1,340 | py | Python | sim/cell.py | raalesir/sim | 9bd994b1dedd05ca88ab9f25cbca3bc28cadc04b | [
"MIT"
]
| null | null | null | sim/cell.py | raalesir/sim | 9bd994b1dedd05ca88ab9f25cbca3bc28cadc04b | [
"MIT"
]
| null | null | null | sim/cell.py | raalesir/sim | 9bd994b1dedd05ca88ab9f25cbca3bc28cadc04b | [
"MIT"
]
| null | null | null |
class CubicCell:
"""
represents confining cell for the polymer
"""
def __init__(self, a,b,c):
"""
init the cell
:param a: ``OX`` dimension
:type a: int
:param b: ``OY`` dimension
:type b: int
:param c: ``OZ`` dimension
:type c: int
"""
self.A = a
self.B = b
self.C = c
self.center = self.set_center()
def __str__(self):
return "the cell is a parallelogram with the size (X,Y,Z) = (%i, %i, %i)"%(self.A, self.B, self.C)
def set_center(self):
"""
calculates the center of the cell
:return: list of 3 numbers
:rtype: list
"""
return [self.A//2, self.B//2, self.C//2]
def get_center(self):
"""
returns the center of the cell
:return: list of integers
:rtype: list
"""
return self.center
class ForceCubicCell(CubicCell):
"""
cubic cell with force field
"""
def __init__(self, a,b ,c, f_f):
self.f_f = f_f
super().__init__(a,b,c)
def __str__(self):
return """the cell is a parallelogram with the size (X,Y,Z) = (%i, %i, %i) and %s force field with the origin at: %s"""\
%(self.A, self.B, self.C, self.f_f, self.f_f.origin)
| 20.30303 | 129 | 0.506716 | 1,331 | 0.993284 | 0 | 0 | 0 | 0 | 0 | 0 | 712 | 0.531343 |
ef5fbbee42c9df1a0ff003ab57c38b8bb1ccfe30 | 2,558 | py | Python | 0-EXP-TIRA-C10.py | webis-de/Luyckx2008 | a7b2711a354a71ba326ddb1e495a8343091e4d8c | [
"Unlicense"
]
| null | null | null | 0-EXP-TIRA-C10.py | webis-de/Luyckx2008 | a7b2711a354a71ba326ddb1e495a8343091e4d8c | [
"Unlicense"
]
| null | null | null | 0-EXP-TIRA-C10.py | webis-de/Luyckx2008 | a7b2711a354a71ba326ddb1e495a8343091e4d8c | [
"Unlicense"
]
| null | null | null | import jsonhandler
from LuyckxFeatures import *
import timblClassification as timbl
import os
import numpy as np
from collections import Counter
def parseC10(c10_path):
jsonhandler.loadJson(c10_path)
jsonhandler.loadTraining()
candidates = jsonhandler.candidates
unknowns = jsonhandler.unknowns
files = list()
for cand in candidates:
for fileName in jsonhandler.trainings[cand]:
files.append('%s/%s/%s' % (c10_path, cand, fileName) )
for unknown in unknowns:
files.append('%s/unknown/%s' % (c10_path, unknown) )
parseCorpus(files)
dictPath = "c10"
jsonhandler.loadJson(dictPath)
jsonhandler.loadTraining()
candidates = jsonhandler.candidates
unknowns = jsonhandler.unknowns
authors = list()
uAuthors = list()
for cand in candidates:
a = author(cand)
for fileName in jsonhandler.trainings[cand]:
fName = '%s/%s/%s' % (dictPath, cand, fileName)
pName = '%s/%s/%s' % (dictPath, cand, os.path.splitext(fileName)[0] + '.mbsp')
a.addDoc(fName, pName)
authors.append(a)
for unknown in unknowns:
fName = '%s/unknown/%s' % (dictPath, unknown)
pName = '%s/unknown/%s' % (dictPath, os.path.splitext(unknown)[0] + '.mbsp')
a = author(os.path.splitext(unknown)[0])
a.addDoc(fName, pName)
uAuthors.append(a)
docs = getAllDocuments(authors + uAuthors)
globalFeatures = dict.fromkeys((docs[0].features.keys()))
accuracy = dict.fromkeys((docs[0].features.keys()))
predict = dict.fromkeys((docs[0].features.keys()))
for idk, key in enumerate(globalFeatures.keys()):
globalFeatures[key] = globalFeature(key, docs)
train_fName = '%s/%s_training.c5' % (dictPath, key)
test_fName = '%s/%s_test.c5' % (dictPath, key)
exportC5(getAllDocuments(authors), authors, globalFeatures[key], 50, train_fName)
exportC5(getAllDocuments(uAuthors), uAuthors, globalFeatures[key], 50, test_fName)
noFeatures = len(Counter(globalFeatures[key].chi2).most_common(50))
predict[key] = timbl.classify(train_fName, test_fName, noFeatures)
os.remove(train_fName)
os.remove(test_fName)
# jsonhandler.storeJson(unknowns, predict)
jsonhandler.loadGroundTruth()
with open('%s/results' % dictPath, 'w') as rHandle:
for key in globalFeatures.keys():
cMatrix = timbl.confusionMatrix(jsonhandler.trueAuthors, predict[key])
accuracy[key] = np.sum(np.diag(cMatrix)) / np.sum(cMatrix)
rHandle.write('%s \t %.4f \n' % (key, accuracy[key]))
| 38.179104 | 86 | 0.670837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.079359 |
ef6043c616af761fa9470ba29ff276fd15c95e0d | 3,133 | py | Python | bus.py | resc863/Kakao_Chatbot | fe4a038de323ad733cd49e69c7ceb283a36bef0c | [
"MIT"
]
| 1 | 2020-08-01T13:42:26.000Z | 2020-08-01T13:42:26.000Z | bus.py | resc863/Kakao_Chatbot | fe4a038de323ad733cd49e69c7ceb283a36bef0c | [
"MIT"
]
| null | null | null | bus.py | resc863/Kakao_Chatbot | fe4a038de323ad733cd49e69c7ceb283a36bef0c | [
"MIT"
]
| 1 | 2021-08-24T14:02:32.000Z | 2021-08-24T14:02:32.000Z | from bs4 import BeautifulSoup
from multiprocessing import Pool
import requests
def lineid(lineno):
lineurl = "http://61.43.246.153/openapi-data/service/busanBIMS2/busInfo?lineno="+lineno+"&serviceKey=0XeO7nbthbiRoMUkYGGah20%2BfXizwc0A6BfjrkL6qhh2%2Fsl8j9PzfSLGKnqR%2F1v%2F%2B6AunxntpLfoB3Ryd3OInQ%3D%3D"
lineid2 = requests.get(lineurl).text
lineid1 = BeautifulSoup(lineid2, "html.parser")
lineid0 = lineid1.find('item')
lineid = lineid0.lineid.string
return lineid
def nextstop(l):
no = l[0]
lineno = l[1]
lineid1 = lineid(lineno)
url = "http://61.43.246.153/openapi-data/service/busanBIMS2/busInfoRoute?lineid="+lineid1+"&serviceKey=0XeO7nbthbiRoMUkYGGah20%2BfXizwc0A6BfjrkL6qhh2%2Fsl8j9PzfSLGKnqR%2F1v%2F%2B6AunxntpLfoB3Ryd3OInQ%3D%3D"
text = requests.get(url).text
soup = BeautifulSoup(text, "html.parser")
nextidx = 0
for item in soup.findAll('item'):
bstop = ""
if item.arsno == None:
bstop = "정보가 없습니다."
else:
bstop = item.arsno.string
curidx = int(item.bstopidx.string)
if bstop == no:
nextidx = curidx
nextidx = nextidx + 1
elif curidx == nextidx:
nextstop = item.bstopnm.string
return nextstop
def getinfo(x):
bus1="186190402"
bus2="186210101"
url1 = 'http://61.43.246.153/openapi-data/service/busanBIMS2/stopArr?serviceKey=ExhrDuBJZ28eMHPRIyFToDuqoT1Lx3ViPoI3uKVLS%2FyucnbaLbQISs4%2FSJWf0AzAV1gkbbtZK5GWvO9clF%2B1aQ%3D%3D&bstopid='+bus1
url2 = 'http://61.43.246.153/openapi-data/service/busanBIMS2/stopArr?serviceKey=ExhrDuBJZ28eMHPRIyFToDuqoT1Lx3ViPoI3uKVLS%2FyucnbaLbQISs4%2FSJWf0AzAV1gkbbtZK5GWvO9clF%2B1aQ%3D%3D&bstopid='+bus2
if x == '0':
html = requests.get(url1).text
else:
html = requests.get(url2).text
return html
def process(b):
result = b.lineno.string + "번 버스" + "\n"
lineno = b.lineno.string
if b.arsno == None:
no = "정보가 없습니다."
else:
no = b.arsno.string
if no == "정보가 없습니다":
nextstop1 = None
else:
l = [no, lineno]
nextstop1 = nextstop(l)
if nextstop1 == None:
result = result + "다음역: 정보가 없습니다.\n"
else:
result = result + "다음역:" + nextstop1 + "\n"
if b.min1==None:
result = result + "현재 최근버스시간이 존재하지않습니다.\n\n"
else:
result = result + b.min1.string + "분 뒤 도착" + "\n\n"
return result
def bus():
result = "양운고 앞 대림1차아파트 정보\n\n"
pool = Pool(processes=2)
html = pool.map(getinfo, '0')[0]
print("00000")
html1 = pool.map(getinfo, '1')[0]
print("22222")
soup = BeautifulSoup(html, "html.parser")
soup1 = BeautifulSoup(html1, "html.parser")
item=soup.findAll('item')
for b in item:
r = process(b)
result = result + r
print("111111")
result = result + "\n\n"
item=soup1.findAll('item')
for b in item:
r = process(b)
result = result + r
return result
if __name__ == "__main__":
print(bus())
| 27.243478 | 210 | 0.616981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,158 | 0.353804 |
ef60ce6fc063e157d7dfaad93f8114a633854b16 | 4,256 | py | Python | model_training.py | PatriceC/MLProjectISDP2020 | 64e83824690ccde2714d915c70fb00b20aa66a42 | [
"MIT"
]
| 1 | 2021-01-23T01:04:00.000Z | 2021-01-23T01:04:00.000Z | model_training.py | cor3ntino/Time-Series-Prediction-with-Deep-Learning-for-Road-Trafic-Data | e8eefdf2e630a53e09f88550357b67732f2bccd0 | [
"MIT"
]
| null | null | null | model_training.py | cor3ntino/Time-Series-Prediction-with-Deep-Learning-for-Road-Trafic-Data | e8eefdf2e630a53e09f88550357b67732f2bccd0 | [
"MIT"
]
| 1 | 2021-01-19T16:57:27.000Z | 2021-01-19T16:57:27.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 13:54:58 2020
@author: Patrice CHANOL & Corentin MORVAN--CHAUMEIL
"""
import numpy as np
import torch
import time
import visualisation
from datetime import datetime
def main(model, criterion, optimizer, scheduler, data_train_loader, data_test_loader, num_epochs, input_window, output_window, batch_size):
"""
Entrainement du modèle et Loss Test.
Parameters
----------
model : TYPE
DESCRIPTION. model to train
criterion : TYPE
DESCRIPTION. criterion to compute
optimizer : TYPE
DESCRIPTION.
scheduler : TYPE
DESCRIPTION.
data_loader_train : TYPE
DESCRIPTION. train set
data_loader_test : TYPE
DESCRIPTION. test set
num_epochs : TYPE
DESCRIPTION. number of epoch to compute
input_window : TYPE
DESCRIPTION. input windonw length
output_window : TYPE
DESCRIPTION. output windonw length
batch_size : TYPE
DESCRIPTION. batch_size
Returns
-------
model : TYPE
DESCRIPTION. trained model
test_loss_list : TYPE
DESCRIPTION. test loss
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dateTimeObj = datetime.now()
print('Début Entrainement : ', dateTimeObj.hour, 'H', dateTimeObj.minute)
test_loss_list = []
n_batches = len(data_train_loader)
# On va entrainer le modèle num_epochs fois
for epoch in range(1, num_epochs + 1):
# Temps epoch
epoch_start_time = time.time()
dateTimeObj = datetime.now()
print('Début epoch', epoch, ':', dateTimeObj.hour, 'H', dateTimeObj.minute)
# Modèle en mode entrainement
model.train()
# Pourcentage du Dataset réaliser
pourcentage = 0.
# Loss du batch en cours
test_loss_batch = []
# Temps pour réaliser 10%
start_time = time.time()
for batch, ((day_of_week, serie_input), serie_output) in enumerate(data_train_loader):
# Initializing a gradient as 0 so there is no mixing of gradient among the batches
optimizer.zero_grad()
# Forward pass
output = model.forward(day_of_week.to(device), serie_input.float().to(device))
loss = criterion(output, serie_output.float().to(device))
# Propagating the error backward
loss.backward()
# Normalisation des gradients si Transformer
if model.name_model == 'Transformer':
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.7)
# Optimizing the parameters
optimizer.step()
# Pourcentage réel réaliser
count_pourcentage = batch / n_batches
# Si on a réalisé 10% nouveau du Dataset, on test
if count_pourcentage >= pourcentage:
# Temps des 10%
T = time.time() - start_time
# Evaluation du modèel
model.eval()
with torch.no_grad():
for ((day_of_week_t, serie_input_t), serie_output_t) in data_test_loader:
output_t = model.forward(day_of_week_t.to(device), serie_input_t.float().to(device))
loss_t = criterion(output_t, serie_output_t.float().to(device))
test_loss_batch.append(loss_t.item())
test_loss = np.mean(test_loss_batch)
test_loss_list.append(test_loss)
print('-'*10)
print("Pourcentage: {}%, Test Loss : {}, Epoch: {}, Temps : {}s".format(round(100*pourcentage), test_loss, epoch, round(T)))
print('-'*10)
# Visualisation
visualisation.pred_vs_reality(model, input_window, output_window, epoch=epoch, pourcentage=round(100*pourcentage))
pourcentage += 0.1
start_time = time.time()
model.train()
print('Fin epoch : {}, Temps de l\'epoch : {}s'.format(epoch, round(time.time() - epoch_start_time)))
visualisation.forecast(model, input_window, output_window, epoch=epoch)
scheduler.step()
model.save()
return model, test_loss_list
| 34.322581 | 140 | 0.608083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,593 | 0.373243 |
ef61b3b08001b19237e5f7463a25cc96b621c9fe | 3,679 | py | Python | process_data.py | johnnyp2587/fx-drqn | 0ea8a4ad673a1883dd4630a69629c75c8f49148c | [
"MIT"
]
| 1 | 2021-01-30T11:50:54.000Z | 2021-01-30T11:50:54.000Z | process_data.py | johnnyp2587/fx-drqn | 0ea8a4ad673a1883dd4630a69629c75c8f49148c | [
"MIT"
]
| null | null | null | process_data.py | johnnyp2587/fx-drqn | 0ea8a4ad673a1883dd4630a69629c75c8f49148c | [
"MIT"
]
| 2 | 2021-01-30T11:50:57.000Z | 2021-02-04T15:43:54.000Z | import numpy as np
import pandas as pd
import datetime
def gen_cols(Pad, cur, lag):
currency = list(np.sort(Pad['currency pair'].unique()))
tmp = Pad[Pad['currency pair'] == cur].sort_values(by=['timestamp'])
for i in range(1,lag+1):
colname1 = 'bid_lag_' + str(i)
colname2 = 'ask_lag_' + str(i)
tmp[colname1] = np.log(tmp['bid price']) - np.log(tmp['bid price'].shift(i))
tmp[colname2] = np.log(tmp['ask price']) - np.log(tmp['ask price'].shift(i))
for ccy in currency:
if ccy == cur:
pass
else:
_tmp = Pad[Pad['currency pair'] == ccy].sort_values(by=['timestamp'])
mid = pd.DataFrame(np.mean(np.asarray([_tmp['bid price'].values,_tmp['ask price'].values]), axis=0))
for i in range(1,lag+1):
colname3 = ccy + '_lag_' + str(i)
tmp[colname3] = np.log(mid) - np.log(mid.shift(i))
tmp['date'] = tmp['timestamp'].astype(str).str[0:10]
tmp['dow'] = pd.to_datetime(tmp['date']).dt.dayofweek
tmp['hh'] = tmp['timestamp'].astype(str).str[11:13]
tmp['mm'] = tmp['timestamp'].astype(str).str[14:16]
tmp['ss'] = tmp['timestamp'].astype(str).str[17:19]
tmp['time_1'] = np.sin(np.pi*tmp['dow'].values/7)
tmp['time_2'] = np.sin(np.pi*tmp['hh'].astype('int64').values/24)
tmp['time_3'] = np.sin(np.pi*tmp['mm'].astype('int64').values/60)
tmp['time_4'] = np.sin(np.pi*tmp['ss'].astype('int64').values/60)
tmp = tmp.drop(['date', 'dow','hh','mm','ss'], axis=1)
tmp = tmp.reset_index(drop=True)
tmp = tmp[lag:]
return tmp
def CreateFeature(cur, lag, week_num):
date_list = ['0201','0203','0204','0205',
'0206','0207','0208','0210',
'0211','0212','0213','0214',
'0215','0217','0218','0219',
'0220','0221','0222','0224',
'0225','0226','0227','0228','0301']
train_week_1 = date_list[0:4]
train_week_2 = date_list[4:8]
train_week_3 = date_list[8:12]
train_week_4 = date_list[12:16]
train_week_5 = date_list[16:20]
eval_week_1 = date_list[4:6]
eval_week_2 = date_list[8:10]
eval_week_3 = date_list[12:14]
eval_week_4 = date_list[16:18]
eval_week_5 = date_list[20:22]
if week_num == 1:
train_week = train_week_1
eval_week = eval_week_1
elif week_num == 2:
train_week = train_week_2
eval_week = eval_week_2
elif week_num == 3:
train_week = train_week_3
eval_week = eval_week_3
elif week_num == 4:
train_week = train_week_4
eval_week = eval_week_4
elif week_num == 5:
train_week = train_week_5
eval_week = eval_week_5
Pad_train = None
Pad_eval = None
for train_date in train_week:
filename = '../pad/pad-' + train_date + '.csv'
tmp = pd.read_csv(filename)
if Pad_train is not None:
Pad_train = Pad_train.append(tmp)
else:
Pad_train = tmp
final_train = gen_cols(Pad_train,cur,lag)
trainname = './data/train_' + cur + '_lag_' + str(lag) + '_week' + str(week_num) + '.csv'
final_train.to_csv(trainname,index=False)
for eval_date in eval_week:
filename = '../pad/pad-' + eval_date + '.csv'
tmp = pd.read_csv(filename)
if Pad_eval is not None:
Pad_eval = Pad_eval.append(tmp)
else:
Pad_eval = tmp
final_eval = gen_cols(Pad_eval,cur,lag)
evalname = './data/eval_' + cur + '_lag_' + str(lag) + '_week' + str(week_num) + '.csv'
final_eval.to_csv(evalname,index=False)
if __name__=='__main__':
CreateFeature('EURUSD', 16, 1)
| 37.927835 | 113 | 0.580864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 601 | 0.16336 |
ef625fbf84f8e46aa31c085f3762960c2186790e | 3,863 | py | Python | benchmark.py | tgisaturday/minGPT | 3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b | [
"MIT"
]
| null | null | null | benchmark.py | tgisaturday/minGPT | 3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b | [
"MIT"
]
| null | null | null | benchmark.py | tgisaturday/minGPT | 3ff862f7fac8adbc3dcdf0693d996468fd4c3f7b | [
"MIT"
]
| null | null | null | import math
import os
from argparse import ArgumentParser
import numpy as np
import torch
from pytorch_lightning import Trainer
from pytorch_lightning import seed_everything
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.callbacks import XLAStatsMonitor
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningDataModule
from mingpt.lr_decay import LearningRateDecayCallback
from mingpt.model import GPT
class CharDataset(Dataset):
def __init__(self, data, block_size):
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
rank_zero_info('data has %d characters, %d unique.' % (data_size, vocab_size))
self.stoi = {ch: i for i, ch in enumerate(chars)}
self.itos = {i: ch for i, ch in enumerate(chars)}
self.block_size = block_size
self.vocab_size = vocab_size
self.data = data
def __len__(self):
return math.ceil(len(self.data) / (self.block_size + 1))
def __getitem__(self, idx):
# we're actually going to "cheat" and pick a spot in the dataset at random
i = np.random.randint(0, len(self.data) - (self.block_size + 1))
chunk = self.data[i:i + self.block_size + 1]
dix = [self.stoi[s] for s in chunk]
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y
class CharDataModule(LightningDataModule):
def __init__(self, batch_size, num_workers, block_size):
super().__init__()
self.batch_size = batch_size
self.num_workers = num_workers
self.block_size = block_size
def setup(self, stage=None):
if not os.path.exists("input.txt"):
os.system("wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt")
# you can download this file at https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt
text = open('input.txt', 'r').read() # don't worry we won't run out of file handles
self.train_dataset = CharDataset(text, self.block_size) # one line of poem is roughly 50 characters
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
if __name__ == '__main__':
seed_everything(42)
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parser)
parser.add_argument('--n_layer', default=22, type=int)
parser.add_argument('--n_head', default=16, type=int)
parser.add_argument('--n_embd', default=720, type=int)
parser.add_argument('--learning_rate', default=6e-4, type=float)
parser.add_argument('--block_size', default=128, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_workers', default=16, type=int)
args = parser.parse_args()
if not os.path.exists("input.txt"):
os.system("wget https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt")
dm = CharDataModule(args.batch_size, args.num_workers, args.block_size)
dm.setup()
model = GPT(
vocab_size=dm.train_dataset.vocab_size,
block_size=dm.train_dataset.block_size,
n_layer=args.n_layer,
n_head=args.n_head,
n_embd=args.n_embd,
learning_rate=args.learning_rate
)
lr_decay = LearningRateDecayCallback(
learning_rate=6e-4,
warmup_tokens=512 * 20,
final_tokens=2 * len(dm.train_dataset) * args.block_size
)
trainer = Trainer.from_argparse_args(
args,
max_epochs=5,
tpu_cores=8,
gradient_clip_val=1.0,
callbacks=[lr_decay, XLAStatsMonitor()],
)
trainer.fit(model, datamodule = dm )
| 36.443396 | 119 | 0.681077 | 1,898 | 0.491328 | 0 | 0 | 0 | 0 | 0 | 0 | 639 | 0.165415 |
ef62a93780f5d22fd2c5c963cb04b78649fda229 | 2,059 | py | Python | weather.py | corgiclub/CorgiBot_telegram | a63d91a74ee497b9a405e93bd3b303367ef95268 | [
"MIT"
]
| null | null | null | weather.py | corgiclub/CorgiBot_telegram | a63d91a74ee497b9a405e93bd3b303367ef95268 | [
"MIT"
]
| null | null | null | weather.py | corgiclub/CorgiBot_telegram | a63d91a74ee497b9a405e93bd3b303367ef95268 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*
import requests
import json
def get_weather(city: str) -> json:
req = requests.get("https://free-api.heweather.net/s6/weather?location="
"{}&key=89d6bbc3861844d59a6313c16448d293".format(city))
json_data = json.loads(req.text, encoding="UTF8")
return json_data
def get_info(city: str):
try:
resp = get_weather(city)
resp_basic = resp['HeWeather6'][0]['basic']
resp_update = resp['HeWeather6'][0]['update']
resp_now = resp['HeWeather6'][0]['now']
# resp_hourly = resp['HeWeather6'][0]['hourly']
resp_daily_forecast = resp['HeWeather6'][0]['daily_forecast']
resp_today = resp_daily_forecast[0]
resp_tomorrow = resp_daily_forecast[1]
status = resp['HeWeather6'][0]['status']
str_weather = ""
str_weather += "当前城市:{area}-{city}-{loc}\n".format(
area=resp_basic['admin_area'], city=resp_basic['parent_city'], loc=resp_basic['location'])
str_weather += "当前时间:{}\n".format(resp_update['loc'])
str_weather += "当前天气:{},温度:{}℃,体感温度:{}℃\n".format(resp_now['cond_txt'], resp_now['tmp'], resp_now['fl'])
str_weather += \
"今日天气:{d},温度:{min}~{max}℃ 风力:{sc}级 相对湿度:{hum}% 降水概率:{pop}% 紫外线强度:{uv}\n". \
format(d=resp_today['cond_txt_d'], min=resp_today['tmp_min'], max=resp_today['tmp_max'],
sc=resp_today['wind_sc'], hum=resp_today['hum'],
pop=resp_today['pop'], uv=resp_today['uv_index'])
str_weather += "明日天气:{d},温度:{min}~{max}℃ 风力:{sc}级 相对湿度:{hum}% 降水概率:{pop}% 紫外线强度:{uv}\n". \
format(d=resp_tomorrow['cond_txt_d'], min=resp_tomorrow['tmp_min'], max=resp_tomorrow['tmp_max'],
sc=resp_tomorrow['wind_sc'], hum=resp_tomorrow['hum'],
pop=resp_tomorrow['pop'], uv=resp_tomorrow['uv_index'])
str_weather += "NM$L天气预报播报完毕"
except Exception as e:
print(f"Exception: {e}")
status = -1
str_weather = None
return status, str_weather
| 44.76087 | 112 | 0.594463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 841 | 0.38106 |
ef63d9fcd4c7ced9c5506a721a486919e70bacc7 | 2,536 | py | Python | paz/datasets/ferplus.py | niqbal996/paz | f27205907367415d5b21f90e1a1d1d1ce598e889 | [
"MIT"
]
| 300 | 2020-10-29T08:02:05.000Z | 2022-03-30T21:47:32.000Z | paz/datasets/ferplus.py | albertofernandezvillan/paz | 9fbd50b993f37e1e807297a29c6044c09967c9cc | [
"MIT"
]
| 30 | 2020-10-29T12:40:32.000Z | 2022-03-31T14:06:35.000Z | paz/datasets/ferplus.py | albertofernandezvillan/paz | 9fbd50b993f37e1e807297a29c6044c09967c9cc | [
"MIT"
]
| 62 | 2020-10-29T12:34:13.000Z | 2022-03-29T05:21:45.000Z | import os
import numpy as np
from .utils import get_class_names
from ..abstract import Loader
from ..backend.image import resize_image
# IMAGES_PATH = '../datasets/fer2013/fer2013.csv'
# LABELS_PATH = '../datasets/fer2013/fer2013new.csv'
class FERPlus(Loader):
"""Class for loading FER2013 emotion classification dataset.
with FERPlus labels.
# Arguments
path: String. Path to directory that has inside the files:
`fer2013.csv` and `fer2013new.csv`
split: String. Valid option contain 'train', 'val' or 'test'.
class_names: String or list: If 'all' then it loads all default
class names.
image_size: List of length two. Indicates the shape in which
the image will be resized.
# References
- [FerPlus](https://www.kaggle.com/c/challenges-in-representation-\
learning-facial-expression-recognition-challenge/data)
- [FER2013](https://arxiv.org/abs/1608.01041)
"""
def __init__(self, path, split='train', class_names='all',
image_size=(48, 48)):
if class_names == 'all':
class_names = get_class_names('FERPlus')
super(FERPlus, self).__init__(path, split, class_names, 'FERPlus')
self.image_size = image_size
self.images_path = os.path.join(self.path, 'fer2013.csv')
self.labels_path = os.path.join(self.path, 'fer2013new.csv')
self.split_to_filter = {
'train': 'Training', 'val': 'PublicTest', 'test': 'PrivateTest'}
def load_data(self):
data = np.genfromtxt(self.images_path, str, '#', ',', 1)
data = data[data[:, -1] == self.split_to_filter[self.split]]
faces = np.zeros((len(data), *self.image_size))
for sample_arg, sample in enumerate(data):
face = np.array(sample[1].split(' '), dtype=int).reshape(48, 48)
face = resize_image(face, self.image_size)
faces[sample_arg, :, :] = face
emotions = np.genfromtxt(self.labels_path, str, '#', ',', 1)
emotions = emotions[emotions[:, 0] == self.split_to_filter[self.split]]
emotions = emotions[:, 2:10].astype(float)
N = np.sum(emotions, axis=1)
mask = N != 0
N, faces, emotions = N[mask], faces[mask], emotions[mask]
emotions = emotions / np.expand_dims(N, 1)
data = []
for face, emotion in zip(faces, emotions):
sample = {'image': face, 'label': emotion}
data.append(sample)
return data
| 39.015385 | 79 | 0.613565 | 2,293 | 0.90418 | 0 | 0 | 0 | 0 | 0 | 0 | 969 | 0.382098 |
ef651d134e566a45ca23483fc6b3987d980d24af | 863 | py | Python | code/array/container-with-most-water.py | windsuzu/leetcode-python | 240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf | [
"MIT"
]
| 1 | 2021-09-29T11:05:07.000Z | 2021-09-29T11:05:07.000Z | code/array/container-with-most-water.py | windsuzu/leetcode-python | 240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf | [
"MIT"
]
| null | null | null | code/array/container-with-most-water.py | windsuzu/leetcode-python | 240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf | [
"MIT"
]
| 1 | 2021-09-29T11:06:32.000Z | 2021-09-29T11:06:32.000Z | from typing import List
class Solution:
def maxArea(self, height: List[int]) -> int:
# We can create "left" and "right" pointers
# the initial width between "l" and "r" is already the maximum
l, r = 0, len(height) - 1
width = r - l
# We can use greedy method to move the lower line to the next line
# For example, if height[l] < height[r], then we move "l" to "l+1"
# if height[l] > height[r], then we move "r" to "r-1"
# if they are the same, then it's ok to move either one
res = 0
while l < r:
res = max(res, width * min(height[l], height[r]))
if height[l] <= height[r]:
l += 1
else:
r -= 1
width -= 1
return res | 30.821429 | 74 | 0.468134 | 838 | 0.971031 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.399768 |
ef68897796bf15cfbe41f5e79ff37ee0aa7a33e6 | 3,578 | py | Python | src/python/DipSimUtilities.py | ndeybach/DipSim | 091f147f933b000b6ab829ec7d10eef985c260b2 | [
"MIT"
]
| null | null | null | src/python/DipSimUtilities.py | ndeybach/DipSim | 091f147f933b000b6ab829ec7d10eef985c260b2 | [
"MIT"
]
| null | null | null | src/python/DipSimUtilities.py | ndeybach/DipSim | 091f147f933b000b6ab829ec7d10eef985c260b2 | [
"MIT"
]
| null | null | null | # This Python file uses the following encoding: utf-8
"""
MIT License
Copyright (c) 2020 Nils DEYBACH & Léo OUDART
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
It serves as a containers for various utility functions. They can be useful in a multitude of cases.
"""
from math import cos, sin, radians, degrees, acos, atan2, pi
from PySide2.QtCore import QRandomGenerator
from PySide2.QtGui import QVector3D, QColor, QQuaternion
######## NUMBER GENERATION #########
"""
Return randomly -1 or 1 as a random sign generator.
"""
def randomSignGenerator():
rndNum = QRandomGenerator.global_().bounded(0, 2)
if(rndNum == 0):
return -1.0
else: return 1.0
######## ANGLES CONVERTIONS #########
"""
Returns rotated quaternion from a rotation (theta) applied to original
direction around specified axis.
"""
def quaternionfromAxisAndAngle(theta, qvector3D=QVector3D(0, 0, 0)):
provVect = (qvector3D.normalized())
s = sin(radians(theta/2))
directionRot = QVector3D(s*provVect.x(), s*provVect.y(), s*provVect.z())
quat = QQuaternion(cos(radians(theta/2)), directionRot.x(), directionRot.y(), directionRot.z())
return quat
"""
Returns quaternion rotation from spherical position (following physics convention) with
a (1,0,0) oriention initialy.
phi, theta: angles in physics convention in degrees.
"""
def anglesSphToQuaternion(phi, theta):
x = sin(radians(theta))*cos(radians(phi))
y = sin(radians(theta))*sin(radians(phi))
z = cos(radians(theta))
fromVec = QVector3D(1, 0, 0)
toVec = QVector3D(x, y, z)
return QQuaternion.rotationTo(fromVec, toVec)
"""
Returns orientation (following physics convention) to a quaternion representing the rotation
needed to get a vector to follow the orientation
"""
def anglesQuaternionToSph(quaternion):
fromVect = QVector3D(1, 0, 0)
toVect = quaternion.rotatedVector(fromVect)
phi = atan2(toVect.y(), toVect.x())
theta = acos(toVect.z()/toVect.length())
return [phi, theta]
######## COLORS #########
def quaternionToColor(quaternion):
sphAngles = anglesQuaternionToSph(quaternion)
return angleSphToColor(sphAngles[0], sphAngles[1])
"""
Returns a color from a 3D vector of angles.
phi, theta: angles in physics convention in radians.
"""
def angleSphToColor(phi, theta):
return QColor.fromHsl(degrees(phi)%360, 255, (degrees(pi - theta)%181)*255/180)
"""
Returns a random color.
"""
def rndColorGenerator():
return QColor(QRandomGenerator.global_().bounded(0, 256), QRandomGenerator.global_().bounded(0, 256), QRandomGenerator.global_().bounded(0, 256))
| 35.425743 | 149 | 0.734768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,982 | 0.553786 |
3226aa7f7ea523e5b462c538450fa0bfe4a22a9b | 1,503 | py | Python | clusterresults/rundr12xpdf10k.py | rohinkumar/CorrelCalc | d7887448af8d3dc3170c00c0aae6ee2561b8a3d5 | [
"MIT"
]
| null | null | null | clusterresults/rundr12xpdf10k.py | rohinkumar/CorrelCalc | d7887448af8d3dc3170c00c0aae6ee2561b8a3d5 | [
"MIT"
]
| null | null | null | clusterresults/rundr12xpdf10k.py | rohinkumar/CorrelCalc | d7887448af8d3dc3170c00c0aae6ee2561b8a3d5 | [
"MIT"
]
| null | null | null | from correlcalc import *
bins = np.arange(0.002,0.062,0.002)
#corrdr12flcdmls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lcdm',weights='eq')
print("--------------------------------------------")
corrdr12flcls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lcdm',weights=True)
print("--------------------------------------------")
#corrdr12flcls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights='eq')
#print("--------------------------------------------")
#corrdr12olcls=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights='eq',geometry='open')
print("--------------------------------------------")
corrdr12flclsw=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights=True)
print("--------------------------------------------")
corrdr12flolsw=tpcf('/usr3/vstr/yrohin/Downloads/galaxy_DR12v5_CMASS_North.fits',bins,randfile='/usr3/vstr/yrohin/randcat_dr12cmn_2x_pdf10k.dat',estimator='ls',cosmology='lc',weights=True,geometry='open')
| 107.357143 | 204 | 0.685961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,181 | 0.785762 |
3227a055c835557ad7f0f841ab6676069d791695 | 10,965 | py | Python | verify/imagenet.py | CAS-LRJ/DeepPAC | 75059572c23474d32a762aca5640f4d799fd992a | [
"Apache-2.0"
]
| null | null | null | verify/imagenet.py | CAS-LRJ/DeepPAC | 75059572c23474d32a762aca5640f4d799fd992a | [
"Apache-2.0"
]
| null | null | null | verify/imagenet.py | CAS-LRJ/DeepPAC | 75059572c23474d32a762aca5640f4d799fd992a | [
"Apache-2.0"
]
| null | null | null | import torch
from torchvision import transforms
from PIL import Image
import numpy as np
import math
from sklearn.linear_model import LinearRegression
from .grid import Grid, grid_split
import torch.backends.cudnn as cudnn
'''
Global Constants:
TASK_NAME: Name of the verification task (deprecated)
PATH: The path of the model file. (Initialized in imagenet_verify)
mean, stdvar: The normalization parameters of the data. (Initialized in imagenet_verify, default mean=(0.4914,0.4822,0.4465) stdvar=(0.2023,0.1994,0.2010))
delta: The radius of the L-inf Ball. (Initialized in imagenet_verify, default 4/255)
significance, error: The significance and the error rate of the PAC-Model. (Initialized in imagenet_verify, default 0.01 and 0.001)
final_samples: The number of samples needed to calculate the final margin. (Initialized in imagenet_verify, default 1600, according to defualt error rate and significance)
Batchsize: The batchsize of sampling procedure. (Initialized in imagenet_verify, defualt 200)
device: Which device to be utilised by Pytorch. (Initialized in imagenet_verify, default 'cuda')
model: The Pytorch Network to be verified. (Initialized in imagenet_verify)
pretrans: The torchvision transform to process the image. (Resize and Tensorize)
normalization_trans: The normalization transform to normalize the data. (Initialized in imagenet_verify)
sampling_budget: The sampling limit for each stepwise splitting. (Initialized in imagenet_verify)
init_grid: The Grid for Imagenet Data (224*224)
Functions:
grid_batch_sample: Grid-based Sampling for Scenario Optimization (Untargetted)
scenario_optimization: Main Verification Function (Focused Learning, Stepwise-Splitting)
imagenet_verify: Entry Function
'''
pretrans = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
])
mean = (0.485, 0.456, 0.406)
stdvar = (0.229, 0.224, 0.225)
normalization_trans = transforms.Normalize(mean, stdvar)
sampling_budget = 20000
delta = 4/255
error = 1e-2
significance = 1e-3
Batchsize = 200
device = 'cuda'
init_grid = [Grid(0, 0, 224, 224)]
PATH = './models/imagenet_linf_4.pth'
def grid_batch_sample(grid_list, n_sample, batch_num, lower, upper, model, fixed_coeff=None, label=0):
global normalization_trans, device
feature_final = []
result_final = []
fixed_features = []
# Calculate the Iteration Number
n_iter = math.ceil(n_sample/batch_num)
model.eval()
for iter in range(n_iter):
samples = np.random.uniform(lower, upper, (batch_num,)+lower.shape)
samples_ = normalization_trans(
torch.tensor(samples)).float().to(device)
with torch.no_grad():
results_ = model(samples_).cpu().detach().numpy()
# Calculate the Untargeted Score Difference
results_ = np.max(np.delete(results_, label, 1),
1) - results_[:, label]
results_ = results_.reshape(batch_num, -1)
result_final.append(results_)
# Calculate the Fixed Constant
fixed_result_i = (samples.reshape(batch_num, -1) @
fixed_coeff.reshape(-1)).reshape((batch_num, -1))
fixed_features.append(fixed_result_i)
# Calculate the Grid Sum
feature_iter_i = []
for grid in grid_list:
for channel in range(3):
grid_data = samples[:, channel, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y]
grid_sum = np.sum(grid_data, axis=1, keepdims=True)
grid_sum = np.sum(grid_sum, axis=2, keepdims=True)
grid_sum = grid_sum.reshape(-1, 1)
feature_iter_i.append(grid_sum)
# Merge the Grid Sums
feature_iter_i = np.hstack(feature_iter_i)
feature_final.append(feature_iter_i)
# Merge the Batch Results
feature_final = np.vstack(feature_final)
result_final = np.vstack(result_final)
fixed_features = np.vstack(fixed_features)
return feature_final, result_final, fixed_features
def scenario_optimization(image, label):
global significance, error, init_grid, model, sampling_budget, delta
global pretrans, normalization_trans, Batchsize, final_samples
# Split into 7x7 small grids (32x32 split)
grid_list = grid_split(init_grid, 32, 32)
img = pretrans(image)
img_np = img.detach().numpy()
# Calculate the Lower and Upper Bounds
img_upper = np.clip(img_np+delta, 0., 1.)
img_lower = np.clip(img_np-delta, 0., 1.)
fixed_coeff = np.zeros((3, 224, 224))
# Grid Refinement Procedure
n_refine = 5
for refine_step in range(n_refine):
print('Stepwise Spliting #', refine_step, 'Start')
print('Sampling... (%d samples)' % sampling_budget)
features, scores, fixed_constant = grid_batch_sample(
grid_list, sampling_budget, Batchsize, img_lower, img_upper, model, fixed_coeff, label)
print('Constructing Template...')
# Linear Regression to construct the Coarse Model for Stepwise Splitting
reg = LinearRegression(fit_intercept=True).fit(
features, scores-fixed_constant)
coeff = np.array(reg.coef_).reshape(-1, 3)
# Use the L2 Norm to Identify the Important Grids
coeff_l2 = np.sqrt(np.sum(coeff*coeff, axis=1))
coeff_l2_index = np.argsort(coeff_l2)
coeff_l2_index_low = coeff_l2_index[:math.ceil(
len(coeff_l2_index)*0.75)]
coeff_l2_index_high = coeff_l2_index[math.ceil(
len(coeff_l2_index)*0.75):]
# Fix the Less Important Grids
for index in coeff_l2_index_low:
grid = grid_list[index]
fixed_coeff[0, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 0]
fixed_coeff[1, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 1]
fixed_coeff[2, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 2]
# Split the Rest Grids
print('Spliting...')
grid_list_i = []
for index in coeff_l2_index_high:
grid_list_i.append(grid_list[index])
grid_list = grid_split(grid_list_i, 2, 2)
del features, scores, fixed_constant
# Last Step, To Fix the Rest Grids
print('Last Step...')
features, scores, fixed_constant = grid_batch_sample(
grid_list, sampling_budget, Batchsize, img_lower, img_upper, model, fixed_coeff, label)
reg = LinearRegression(fit_intercept=True).fit(
features, scores-fixed_constant)
intercept = reg.intercept_
coeff = np.array(reg.coef_).reshape(-1, 3)
for index in range(len(coeff)):
grid = grid_list[index]
fixed_coeff[0, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 0]
fixed_coeff[1, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 1]
fixed_coeff[2, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 2]
del features, scores, fixed_constant
# Calculate the Margin
features, scores, fixed_constant = grid_batch_sample(
init_grid, final_samples, Batchsize, img_lower, img_upper, model, fixed_coeff, label)
eps_max = np.max(np.abs(scores-fixed_constant-intercept))
print('Margin: ', eps_max)
del features, scores, fixed_constant
safe = True
unsafe = False
# Calculate the Maximum of the Learned Model, Find The Potential Counter-Example
val_max = fixed_coeff[fixed_coeff < 0]@img_lower[fixed_coeff < 0] + \
fixed_coeff[fixed_coeff > 0]@img_upper[fixed_coeff > 0] + \
intercept+eps_max
print('Evaluated Delta Max Value: ', val_max)
if val_max > 0:
print('Potential Counter-example Found')
safe = False
# Examine the Potential Counter-Example
ce = np.zeros_like(img_lower)
ce[fixed_coeff <= 0] = img_lower[fixed_coeff <= 0]
ce[fixed_coeff > 0] = img_upper[fixed_coeff > 0]
with torch.no_grad():
ce = normalization_trans(torch.tensor(ce).unsqueeze(0)).to(device)
scores = model(ce)[0]
print('True Label: ', torch.argmax(scores), 'Score: ', torch.max(
scores), 'Original Label:', label, 'Scores: ', scores[label])
if torch.argmax(scores) != label:
unsafe = True
print('Conter-example Confirmed')
if safe:
print('Network is PAC-model robust with error rate',
error, 'and confidence level', 1-significance)
return 1
elif unsafe:
print('Unsafe. Adversarial Example Found.')
return 0
print('Unknown. Potential Counter-Example exists.')
return 2
def imagenet_verify(model_class, args):
global delta, PATH, error, significance, final_samples, normalization_trans, mean, stdvar, dataset, device, model, Batchsize, sampling_budget
PATH = args.model
delta = args.radius/255.
error = args.epsilon
significance = args.eta
Batchsize = args.batchsize
image_path = args.image
final_samples = math.ceil(2/error*(math.log(1/significance)+1))
final_samples = math.ceil(final_samples/Batchsize)*Batchsize
model = model_class()
model.load_state_dict(torch.load(PATH))
if getattr(args, 'mean') != None:
mean = args.mean
if getattr(args, 'std') != None:
stdvar = args.std
if getattr(args, 'budget') != None:
sampling_budget = args.budget
normalization_trans = transforms.Normalize(mean, stdvar)
if args.gpu == False:
device = 'cpu'
np.random.seed(0)
if device == 'cuda':
cudnn.deterministic = True
cudnn.benchmark = False
model = model.to(device)
model.eval()
image = Image.open(image_path).convert('RGB')
if getattr(args, 'label') != None:
label = args.label
else:
label = int(torch.argmax(model(normalization_trans(
pretrans(image)).unsqueeze(0).to(device))[0]).cpu())
print('True Label: ', label)
try:
print('Verification Radius(L-inf): ', args.radius)
print('Mean: ', mean)
print('Std: ', stdvar)
return scenario_optimization(image, label)
except Exception as err:
print('Error: Verification Failed')
print(err)
| 43.685259 | 188 | 0.639216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,908 | 0.265207 |
322854f1b6ad1bef2a63f035b0bf9ea507c22498 | 5,537 | py | Python | src/main.py | ronikleyton/script-backup-switch-huawei | 80c990afa3561c350823cb96e25174262d8d4ab1 | [
"MIT"
]
| null | null | null | src/main.py | ronikleyton/script-backup-switch-huawei | 80c990afa3561c350823cb96e25174262d8d4ab1 | [
"MIT"
]
| null | null | null | src/main.py | ronikleyton/script-backup-switch-huawei | 80c990afa3561c350823cb96e25174262d8d4ab1 | [
"MIT"
]
| null | null | null | from telnetlib import Telnet
from exception.exceptions import *
from datetime import date
import time
import os
from dotenv import load_dotenv
import json
load_dotenv()
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
f = open(f'{ROOT_DIR}/equipamentos.json')
equipamentos = json.load(f)['equipamentos']
def main(equipamento):
IP_SERVER_FTP = os.environ.get('IP_SERVER_FTP')
USER_FTP = os.environ.get('USER_FTP')
PASS_FTP = os.environ.get('PASS_FTP')
data_atual = date.today()
data_em_texto ="{}-{}-{}".format(data_atual.day, data_atual.month,data_atual.year)
r = '\r'
r = r.encode('ascii')
try:
equipamento.connection = Telnet(equipamento.ip, equipamento.port)
# Realizando Login
index, match_obj, text = equipamento.connection.expect(["Username:".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError(f"Falha na conexão, EQUIPAMENTO RESPONSE: {text}")
equipamento.connection.write(f"{equipamento.user}\r".encode('latin-1'))
index, match_obj, text = equipamento.connection.expect(["Password:".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError(f"Falha no usuário, EQUIPAMENTO RESPONSE: {text}")
equipamento.connection.write(f"{equipamento.password}\r".encode('latin-1'))
index, match_obj, text = equipamento.connection.expect([">".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao informar a senha")
equipamento.connection.write(b"save\r")
equipamento.connection.write(b"Y\r")
index, match_obj, text = equipamento.connection.expect([">".encode('latin-1')], timeout=2)
print("Acessou o switch.")
time.sleep(3)
index, match_obj, text = equipamento.connection.expect([">".encode('latin-1')], timeout=2)
ftp = "ftp -a %s %s"%(equipamento.ip,IP_SERVER_FTP)
ftp = ftp.encode('ascii')
equipamento.connection.write(ftp + r)
index, match_obj, text = equipamento.connection.expect([":".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao executar comando de conectar no ftp ")
equipamento.connection.write(USER_FTP.encode('ascii') + r)
index, match_obj, text = equipamento.connection.expect(["password:".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao Acessar o FTP-SERVER verifique a conexão e credenciais")
equipamento.connection.write(PASS_FTP.encode('ascii') + r)
index, match_obj, text = equipamento.connection.expect(["[ftp]".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao Acessar o FTP-SERVER")
equipamento.connection.write(b"binary\r")
index, match_obj, text = equipamento.connection.expect(["[ftp]".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao mudar ftp para binary")
equipamento.connection.write(b"cd backups\r")
index, match_obj, text = equipamento.connection.expect(["[ftp]".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao entrar na pasta Backups")
equipamento.connection.write(b"cd huawei\r")
index, match_obj, text = equipamento.connection.expect(["[ftp]".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao Entrar na pasta huawei")
criarPasta = "mkdir %s"%(equipamento.hostname)
criarPasta = criarPasta.encode('ascii')
equipamento.connection.write(criarPasta + r)
index, match_obj, text = equipamento.connection.expect(["[ftp]".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao Entrar na pasta huawei")
pasta = "cd %s"%(equipamento.hostname)
pasta = pasta.encode('ascii')
equipamento.connection.write(pasta + r)
index, match_obj, text = equipamento.connection.expect(["[ftp]".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao Entrar na pasta do switch")
put = "put vrpcfg.zip vrpcfg-%s.zip"%(data_em_texto)
put = put.encode('ascii')
equipamento.connection.write(put + r)
index, match_obj, text = equipamento.connection.expect(["[ftp]".encode('latin-1')], timeout=2)
if not match_obj:
raise CommandError("Falha ao salvar o arquivo de configuração no servidor.")
time.sleep(1.5)
#print (equipamento.connection.read_eager())
#print (equipamento.connection.read_all())
print('BackupFinalizado')
equipamento.connection.close()
except:
equipamento.connection.close()
raise ConnectionError()
class Equipamento:
def __init__(self,hostname, ip,port, user, password):
self.connection = None
self.hostname = hostname
self.ip = ip
self.port = port
self.user = user
self.password = password
for switch in equipamentos:
try:
USER = os.environ.get('USER')
PASS = os.environ.get('PASS')
PORT_TELNET = os.environ.get('PORT_TELNET')
print(f"Iniciando Backup no Switch {switch['hostname']}")
equipamento = Equipamento(switch['hostname'],switch['ip'],PORT_TELNET,USER,PASS)
main(equipamento)
except:
pass | 35.722581 | 106 | 0.641683 | 244 | 0.044027 | 0 | 0 | 0 | 0 | 0 | 0 | 1,274 | 0.229881 |
3228d6088055f54b7b82121a3d3e109e936942b3 | 1,623 | py | Python | setup.py | cakebread/musubi | 5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c | [
"BSD-2-Clause"
]
| 5 | 2015-05-18T13:18:26.000Z | 2020-01-14T08:24:08.000Z | setup.py | cakebread/musubi | 5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c | [
"BSD-2-Clause"
]
| null | null | null | setup.py | cakebread/musubi | 5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python
PROJECT = 'musubi'
VERSION = '0.2'
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = 'Uh oh, we may need a new hard drive.'
setup(
name=PROJECT,
version=VERSION,
description='Musubi is a command-line DNSBL checker and MX toolkit.',
long_description=long_description,
author='Rob Cakebread',
author_email='[email protected]',
url='https://github.com/cakebread/musubi',
download_url='https://github.com/cakebread/musubi/tarball/master',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Environment :: Console',
],
platforms=['Any'],
scripts=[],
provides=[],
install_requires=['requests', 'dnspython', 'IPy', 'distribute',
'cliff', 'cliff-tablib', 'gevent', 'greenlet'],
namespace_packages=[],
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'musubi = musubi.main:main'
],
'musubi.cli': [
'ips = musubi.ips:GetIPs',
'mx = musubi.mx:GetMX',
'spf = musubi.spf:GetSPF',
'scan = musubi.scan:Scan',
],
},
zip_safe=False,
)
| 29.509091 | 73 | 0.590265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 742 | 0.457178 |
3229164df79c432f6f7ad72e86350bc6d3ce6e18 | 1,048 | py | Python | airflow_ml_dags/images/airflow-preprocess/preprocess.py | made-ml-in-prod-2021/holyketzer | f693f2d5fce8cced03873e2b89cbe10617996c64 | [
"MIT"
]
| null | null | null | airflow_ml_dags/images/airflow-preprocess/preprocess.py | made-ml-in-prod-2021/holyketzer | f693f2d5fce8cced03873e2b89cbe10617996c64 | [
"MIT"
]
| 2 | 2021-05-21T09:09:23.000Z | 2021-06-05T08:13:40.000Z | airflow_ml_dags/images/airflow-preprocess/preprocess.py | made-ml-in-prod-2021/holyketzer | f693f2d5fce8cced03873e2b89cbe10617996c64 | [
"MIT"
]
| null | null | null | import os
import pandas as pd
import click
from datetime import date
@click.command("preprocess")
@click.option("--input-dir")
@click.option("--output-dir")
@click.option("--mode")
def preprocess(input_dir: str, output_dir, mode):
if mode == "data":
data = pd.read_csv(os.path.join(input_dir, "data.csv"))
data["FirstLength"] = data["First"].apply(len)
data["LastLength"] = data["Last"].apply(len)
file = "data.csv"
elif mode == "target":
data = pd.read_csv(os.path.join(input_dir, "target.csv"))
today = date.today()
data["Age"] = pd.to_datetime(data["Birthdate"]).apply(
lambda born: today.year - born.year - ((today.month, today.day) < (born.month, born.day))
)
data.drop(columns=["Birthdate"], inplace=True)
file = "target.csv"
else:
raise ValueError(f"unknown mode: '{mode}'")
os.makedirs(output_dir, exist_ok=True)
data.to_csv(os.path.join(output_dir, file), index=False)
if __name__ == '__main__':
preprocess()
| 29.942857 | 101 | 0.621183 | 0 | 0 | 0 | 0 | 929 | 0.88645 | 0 | 0 | 205 | 0.195611 |
32298c15e29bc9b924d33fac9a984d4c8170430a | 581 | py | Python | estrutura_while/barra-de-progresso.py | BEp0/Estudos_de_Python | da32a01d3f4462b3e6b1b6035106895afe9c7627 | [
"MIT"
]
| 1 | 2021-02-15T19:14:44.000Z | 2021-02-15T19:14:44.000Z | estrutura_while/barra-de-progresso.py | BEp0/Estudos_de_Python | da32a01d3f4462b3e6b1b6035106895afe9c7627 | [
"MIT"
]
| null | null | null | estrutura_while/barra-de-progresso.py | BEp0/Estudos_de_Python | da32a01d3f4462b3e6b1b6035106895afe9c7627 | [
"MIT"
]
| null | null | null | from time import sleep
from sys import stdout
def barra(v):
v = int(v)
print('[ ', end='')
for v in range(0, v):
print(f'-', end='', flush=True)
sleep(0.1)
print(' ]', end='\n')
def calcularNotas():
soma = 0
v = 0
for i in range(0, 2):
nota = float(input(f'\n{i + 1}º nota : '))
soma += nota
v = soma // 2
print('\nCALCULANDO: ', end='\b')
barra(v)
return print(f'MÉDIA FOI: {soma / 2}')
def main():
calcularNotas()
sleep(1)
print('\n__FIM__\n')
if __name__ == "__main__":
main()
| 17.088235 | 50 | 0.504303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.190395 |
3229bb9f7088946e3efcc3fcbb6cba8d90bd5930 | 4,329 | py | Python | models/show.py | wanderindev/fyyur | acf3a44ce7fae6b24576a320afd447c0595d76e5 | [
"MIT"
]
| null | null | null | models/show.py | wanderindev/fyyur | acf3a44ce7fae6b24576a320afd447c0595d76e5 | [
"MIT"
]
| null | null | null | models/show.py | wanderindev/fyyur | acf3a44ce7fae6b24576a320afd447c0595d76e5 | [
"MIT"
]
| 2 | 2020-07-16T22:02:13.000Z | 2020-11-22T21:16:28.000Z | from datetime import datetime
from sqlalchemy import or_
from app import db
from .mixin import ModelMixin
class Show(db.Model, ModelMixin):
__tablename__ = "shows"
id = db.Column(db.Integer, primary_key=True)
start_time = db.Column(db.DateTime, nullable=False)
artist_id = db.Column(
db.Integer, db.ForeignKey("artists.id"), nullable=False
)
venue_id = db.Column(
db.Integer, db.ForeignKey("venues.id"), nullable=False
)
def __init__(self, **kwargs):
super(Show, self).__init__(**kwargs)
@classmethod
def upcoming_shows_by_venue(cls, _venue_id):
shows = cls.query.filter(
cls.venue_id == _venue_id, Show.start_time > datetime.now()
).all()
return [
{
"artist_id": show.artist.id,
"artist_name": show.artist.name,
"artist_image_link": show.artist.image_link,
"start_time": show.start_time.isoformat(),
}
for show in shows
]
@classmethod
def past_shows_by_venue(cls, _venue_id):
shows = cls.query.filter(
cls.venue_id == _venue_id, Show.start_time < datetime.now()
).all()
return [
{
"artist_id": show.artist.id,
"artist_name": show.artist.name,
"artist_image_link": show.artist.image_link,
"start_time": show.start_time.isoformat(),
}
for show in shows
]
@classmethod
def upcoming_shows_by_artist(cls, _artist_id):
shows = cls.query.filter(
cls.artist_id == _artist_id, Show.start_time > datetime.now()
).all()
return [
{
"venue_id": show.venue.id,
"venue_name": show.venue.name,
"venue_image_link": show.venue.image_link,
"start_time": show.start_time.isoformat(),
}
for show in shows
]
@classmethod
def past_shows_by_artist(cls, _artist_id):
shows = cls.query.filter(
cls.artist_id == _artist_id, Show.start_time < datetime.now()
).all()
return [
{
"venue_id": show.venue.id,
"venue_name": show.venue.name,
"venue_image_link": show.venue.image_link,
"start_time": show.start_time.isoformat(),
}
for show in shows
]
@classmethod
def get_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
@classmethod
def get_show(cls, _id):
show = cls.get_by_id(_id)
return {
"venue_id": show.venue.id,
"venue_name": show.venue.name,
"artist_id": show.artist.id,
"artist_name": show.artist.name,
"artist_image_link": show.artist.image_link,
"start_time": show.start_time.isoformat(),
}
@classmethod
def get_shows(cls):
return [
{
"venue_id": show.venue.id,
"venue_name": show.venue.name,
"artist_id": show.artist.id,
"artist_name": show.artist.name,
"artist_image_link": show.artist.image_link,
"start_time": show.start_time.isoformat(),
}
for show in cls.query.all()
]
@classmethod
def search(cls, search_term):
from .artist import Artist
from .venue import Venue
shows = (
cls.query.join(Venue)
.join(Artist)
.filter(
or_(
Venue.name.ilike(f"%{search_term}%"),
Artist.name.ilike(f"%{search_term}%"),
)
)
.all()
)
return {
"data": [
{
"id": show.id,
"venue_name": show.venue.name,
"artist_name": show.artist.name,
"start_time": show.start_time,
}
for show in shows
],
"count": len(shows),
}
| 30.921429 | 74 | 0.495033 | 4,213 | 0.973204 | 0 | 0 | 3,697 | 0.854008 | 0 | 0 | 488 | 0.112728 |
322b0d39d0e86bb9ee65efcc180b2518cde85315 | 2,141 | py | Python | backend/sponsors/migrations/0001_initial.py | marcoacierno/pycon | 2b7b47598c4929769cc73e322b3fce2c89151e21 | [
"MIT"
]
| 56 | 2018-01-20T17:18:40.000Z | 2022-03-28T22:42:04.000Z | backend/sponsors/migrations/0001_initial.py | marcoacierno/pycon | 2b7b47598c4929769cc73e322b3fce2c89151e21 | [
"MIT"
]
| 2,029 | 2018-01-20T11:37:24.000Z | 2022-03-31T04:10:51.000Z | backend/sponsors/migrations/0001_initial.py | marcoacierno/pycon | 2b7b47598c4929769cc73e322b3fce2c89151e21 | [
"MIT"
]
| 17 | 2018-03-17T09:44:28.000Z | 2021-12-27T19:57:35.000Z | # Generated by Django 2.2.4 on 2019-08-30 21:56
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('conferences', '0007_auto_20190811_1953'),
]
operations = [
migrations.CreateModel(
name='SponsorLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('name', models.CharField(max_length=20, verbose_name='name')),
('conference', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sponsor_levels', to='conferences.Conference', verbose_name='conference')),
],
options={
'ordering': ('order',),
'abstract': False,
'unique_together': {('name', 'conference')},
},
),
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=200, verbose_name='name')),
('link', models.URLField(blank=True, verbose_name='published')),
('image', models.ImageField(blank=True, null=True, upload_to='sponsors', verbose_name='image')),
('level', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sponsors', to='sponsors.SponsorLevel', verbose_name='level')),
],
options={
'abstract': False,
},
),
]
| 44.604167 | 182 | 0.611397 | 1,960 | 0.91546 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.198038 |
322bb384475b3968baa795c394e1297ef1e165d8 | 156 | py | Python | __init__.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
]
| 1 | 2021-03-21T13:52:00.000Z | 2021-03-21T13:52:00.000Z | __init__.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
]
| null | null | null | __init__.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
]
| 2 | 2015-10-12T10:02:50.000Z | 2020-03-09T13:30:12.000Z | from .alien_config import AlienConfig
from .alien_connection import AlienConnection
from .alien_tag import AlienTag
from .alien_tag_list import AlienTagList | 39 | 45 | 0.878205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
322bb4e6bc6b91b44404b73d00ac6be4830c39c7 | 658 | py | Python | 01_Hello_PGP/solution.py | 3-24/id0-rsa.pub | 633e974a330d0dc09d37e423168974b7fba69830 | [
"MIT"
]
| 1 | 2020-03-29T16:10:54.000Z | 2020-03-29T16:10:54.000Z | 01_Hello_PGP/solution.py | 3-24/id0-rsa.pub | 633e974a330d0dc09d37e423168974b7fba69830 | [
"MIT"
]
| null | null | null | 01_Hello_PGP/solution.py | 3-24/id0-rsa.pub | 633e974a330d0dc09d37e423168974b7fba69830 | [
"MIT"
]
| null | null | null | from subprocess import run, PIPE
def check(password,filedata):
print("Trying passphrase={:s}".format(password))
cmd = run("gpg --pinentry-mode loopback --passphrase '{:s}' -d {:s}".format(password,filedata), shell=True, stdout=PIPE)
if cmd.returncode == 0:
output = cmd.stdout.decode('utf-8')
print('plaintext:')
print(output)
return True
else:
return False
def main():
f = open('/usr/share/dict/words','r')
lines = f.readlines()
for word in lines:
if "'" in word:
continue
word = word.strip()
if check(word,'message.txt'):
break
main()
| 24.37037 | 124 | 0.575988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.217325 |
322c0212f8148c0b38508aaf2672d99f9c4007b4 | 8,524 | py | Python | src/apodeixi/text_layout/tests_unit/test_column_layout.py | ChateauClaudia-Labs/apodeixi | dd668e210e92cabc2682ad3049781c06e58e3101 | [
"MIT"
]
| null | null | null | src/apodeixi/text_layout/tests_unit/test_column_layout.py | ChateauClaudia-Labs/apodeixi | dd668e210e92cabc2682ad3049781c06e58e3101 | [
"MIT"
]
| null | null | null | src/apodeixi/text_layout/tests_unit/test_column_layout.py | ChateauClaudia-Labs/apodeixi | dd668e210e92cabc2682ad3049781c06e58e3101 | [
"MIT"
]
| null | null | null | import sys as _sys
import pandas as _pd
from apodeixi.testing_framework.a6i_unit_test import ApodeixiUnitTest
from apodeixi.util.formatting_utils import DictionaryFormatter
from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace
from apodeixi.text_layout.column_layout import ColumnWidthCalculator
class Test_ColumnWidthCalculator(ApodeixiUnitTest):
def setUp(self):
super().setUp()
def test_sparse_layout(self):
self._shell_test_case('test_sparse_layout', viewport_width=50, viewport_height=40, max_word_length=20)
def test_thick_layout(self):
self._shell_test_case('test_thick_layout', viewport_width=100, viewport_height=40, max_word_length=20)
def _shell_test_case(self, name, viewport_width, viewport_height, max_word_length):
INPUT_FOLDER = self.input_data
INPUT_FILE = name + '_INPUT.csv'
OUTPUT_FOLDER = self.output_data
OUTPUT_FILE = name + '_OUTPUT.csv'
EXPECTED_FOLDER = self.expected_data
EXPECTED_FILE = name + '_EXPECTED.csv'
OUTPUT_COMPARISON_FILE = name + '_comparison_OUTPUT.txt'
EXPECTED_COMPARISON_FILE = name + '_comparison_EXPECTED.txt'
OUTPUT_EXPLAIN_FILE = name + '_explain_OUTPUT.txt'
EXPECTED_EXPLAIN_FILE = name + '_explain_EXPECTED.txt'
OUTPUT_RESULTS_FILE = name + '_results_OUTPUT.txt'
EXPECTED_RESULTS_FILE = name + '_results_EXPECTED.txt'
try:
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Testing computation of column widths")
data_df = self.load_csv(root_trace, INPUT_FOLDER + '/' + INPUT_FILE)
calc = ColumnWidthCalculator( data_df = data_df,
viewport_width = viewport_width,
viewport_height = viewport_height,
max_word_length = max_word_length)
result_dict = calc.calc(root_trace)
output_df = calc.analysis_df
output_explain = '\n'.join(calc.explanations)
# Save DataFrame, explain and results in case the assertion below fails, so that we can do
# a visual comparison of OUTPUT vs EXPECTED csv files
output_df.to_csv(OUTPUT_FOLDER + '/' + OUTPUT_FILE)
with open(OUTPUT_FOLDER + '/' + OUTPUT_EXPLAIN_FILE, 'w') as file:
file .write(output_explain)
# Make results readable by creating a pretty
result_nice = DictionaryFormatter().dict_2_nice(parent_trace = root_trace, a_dict = result_dict)
with open(OUTPUT_FOLDER + '/' + OUTPUT_RESULTS_FILE, 'w') as file:
file .write(result_nice)
# Load the output we just saved, which we'll use for regression comparison since in Pandas the act of loading will
# slightly change formats and we want to apply the same such changes as were applied to the expected output,
# to avoid frivolous differences that don't deserve to cause this test to fail
loaded_output_df = self.load_csv(root_trace, OUTPUT_FOLDER + '/' + OUTPUT_FILE)
# Now load the expected output.
expected_df = self.load_csv(root_trace, EXPECTED_FOLDER + '/' + EXPECTED_FILE)
check, comparison_dict = self._compare_dataframes( df1 = loaded_output_df,
df1_name = "output",
df2 = expected_df,
df2_name = "expected")
df_comparison_nice = DictionaryFormatter().dict_2_nice(parent_trace = root_trace,
a_dict = comparison_dict,
flatten=True)
with open(OUTPUT_FOLDER + '/' + OUTPUT_COMPARISON_FILE, 'w') as file:
file .write(df_comparison_nice)
with open(EXPECTED_FOLDER + '/' + EXPECTED_COMPARISON_FILE, 'r') as file:
expected_df_comparison = file.read()
with open(EXPECTED_FOLDER + '/' + EXPECTED_EXPLAIN_FILE, 'r') as file:
expected_explain = file.read()
with open(EXPECTED_FOLDER + '/' + EXPECTED_RESULTS_FILE, 'r') as file:
expected_result = file.read()
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self.assertEqual(df_comparison_nice, expected_df_comparison)
self.assertTrue(check)
self.assertEqual(output_explain, expected_explain)
self.assertEqual(result_nice, expected_result)
def _compare_dataframes(self, df1, df2, df1_name, df2_name):
'''
Helper method used in lieu of dataframe.equals, which fails for spurious reasons.
Under this method's policy, two dataframes are equal if they have the same columns, indices, and are
point-wise equal.
Method returns two things: a boolean result of the comparison, and a dictionary to pin point where there are
differences, if any
'''
# Prepare an explanation of where the dataframes differ, if they do differ. This visibility helps with debugging
comparison_dict = {}
cols_1 = set(df1.columns)
cols_2 = set(df2.columns)
# Ensure determinism with sort
common_cols = list(cols_1.intersection(cols_2))
common_cols.sort()
missing_in_1 = list(cols_2.difference(cols_1))
missing_in_1.sort()
missing_in_2 = list(cols_1.difference(cols_2))
missing_in_2.sort()
comparison_dict[df1_name + ' shape'] = str(df1.shape)
comparison_dict[df2_name + ' shape'] = str(df2.shape)
if len(missing_in_1) > 0:
comparison_dict[df1_name + ' missing columns'] = '\n'.join(missing_in_1)
if len(missing_in_2) > 0:
comparison_dict[df2_name + ' missing columns'] = '\n'.join(missing_in_2)
# Initialize true until profen false
check = True
if not df1.index.equals(df2.index):
check = False
else: # Compare element by element for the common_cols
cell_dict = {}
for row in df1.iterrows():
row1_nb = row[0]
row1_data = row[1]
for col in common_cols: # use common_cols that is a deterministic list
val1 = row1_data[col]
val2 = df2.iloc[row1_nb][col]
if val1 != val2:
check = False
coords = col + '.row' + str(row1_nb)
cell_dict[coords] = "values differ"
cell_dict[coords + '.' + df1_name] = str(val1)
cell_dict[coords + '.' + df2_name] = str(val2)
comparison_dict['elt-by-elt comparison'] = cell_dict
if check:
comparison_dict['Result of elt-by-elt comparison'] = "Everything matches"
return check, comparison_dict
if __name__ == "__main__":
# execute only if run as a script
def main(args):
T = Test_ColumnWidthCalculator()
T.setUp()
what_to_do = args[1]
if what_to_do=='sparse_layout':
T.test_small_text()
main(_sys.argv) | 51.660606 | 141 | 0.530737 | 7,859 | 0.921985 | 0 | 0 | 0 | 0 | 0 | 0 | 1,725 | 0.20237 |
322c5954da97025867a532a5c2f025836a221df3 | 944 | py | Python | evolute/operators/mate.py | ysglh/evolute | ea868e5d04e6bb59760a9b6dec709303637b9f10 | [
"MIT"
]
| 174 | 2018-08-15T21:48:30.000Z | 2022-03-13T01:34:48.000Z | evolute/operators/mate.py | ysglh/evolute | ea868e5d04e6bb59760a9b6dec709303637b9f10 | [
"MIT"
]
| null | null | null | evolute/operators/mate.py | ysglh/evolute | ea868e5d04e6bb59760a9b6dec709303637b9f10 | [
"MIT"
]
| 27 | 2018-05-16T16:25:36.000Z | 2021-11-02T20:51:38.000Z | import numpy as np
class MateBase:
def apply(self, ind1, ind2):
pass
def __call__(self, ind1, ind2):
return self.apply(ind1, ind2)
class LambdaMate(MateBase):
def __init__(self, function_ref, **kw):
self.kwargs = kw
self.apply = lambda ind1, ind2: function_ref(ind1, ind2, **self.kwargs)
class RandomPickMate(MateBase):
def apply(self, ind1, ind2):
return np.where(np.random.uniform(size=ind1.shape) < 0.5, ind1, ind2)
class SmoothMate(MateBase):
def apply(self, ind1, ind2):
return np.mean((ind1, ind2), axis=0)
DefaultMate = RandomPickMate
class ScatterMateWrapper(MateBase):
def __init__(self, base=DefaultMate, stdev=1.):
if isinstance(base, type):
base = base()
self.base = base
self.stdev = stdev
def apply(self, ind1, ind2):
return self.base(ind1, ind2) + np.random.randn(*ind1.shape) * self.stdev
| 20.977778 | 80 | 0.635593 | 879 | 0.931144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
322da51e0820f1bb72e55d0a9cb187b9bcde3c32 | 223 | py | Python | LandingPage/forms.py | Mihai925/EduCoding-Legacy | 7c6de105deb186c3442f8d7f9f1b9f99708f8fb6 | [
"MIT"
]
| null | null | null | LandingPage/forms.py | Mihai925/EduCoding-Legacy | 7c6de105deb186c3442f8d7f9f1b9f99708f8fb6 | [
"MIT"
]
| null | null | null | LandingPage/forms.py | Mihai925/EduCoding-Legacy | 7c6de105deb186c3442f8d7f9f1b9f99708f8fb6 | [
"MIT"
]
| null | null | null | __author__ = 'varun'
from django import forms
class ContactUsForm(forms.Form):
name = forms.CharField()
email = forms.CharField()
phone = forms.CharField()
message = forms.CharField(widget=forms.Textarea) | 22.3 | 52 | 0.713004 | 174 | 0.780269 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.03139 |
322e21d79121fc682dbbeaf19bfb0822ed607a7a | 4,236 | py | Python | pru/db/geo/geo_admin.py | euctrl-pru/rt-python | da5d0040e250bd159845a0d43bf0b73eab368863 | [
"MIT"
]
| null | null | null | pru/db/geo/geo_admin.py | euctrl-pru/rt-python | da5d0040e250bd159845a0d43bf0b73eab368863 | [
"MIT"
]
| null | null | null | pru/db/geo/geo_admin.py | euctrl-pru/rt-python | da5d0040e250bd159845a0d43bf0b73eab368863 | [
"MIT"
]
| null | null | null | #
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
#
"""
Administration operations for the geo db.
"""
import os
import socket
import time
from pru.db.geo.geo_init import load_airspace, remove_all_sectors, tear_down
from pru.db.geo.geo_init import load_airports, remove_all_airports
from pru.db.geo.geo_init import load_user_airspace, remove_all_user_defined_sectors
from pru.db.common_init import create as create_db, DB_TYPE_GEO
from pru.db.geo.geo_init import create as create_geo_db
from pru.logger import logger
import pru.db.context as ctx
log = logger(__name__)
def remove_geo_db():
"""
Remove the db
"""
remove_all_sectors()
remove_all_airports()
remove_all_user_defined_sectors()
tear_down()
def create_geo_database():
"""
Create a geo db.
"""
log.info("Starting to create the geo db")
log.info("Waiting for the database to be ready")
log.info(f"Testing connection on host: {ctx.geo_db_hostname} and port {ctx.geo_db_port}")
# We need to sleep and retry ubtil the db wakes up
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
s.connect((ctx.geo_db_hostname, int(ctx.geo_db_port)))
s.close()
break
except socket.error as ex:
log.debug("Database not ready..")
time.sleep(5) # 5 seconds between tests
log.info("Geo database is now ready.")
if create_db(DB_TYPE_GEO):
if create_geo_db():
log.info("Geo database creation is complete.")
return True
else:
log.info("Failed to make the airspace db, could not create the tables.")
else:
log.info("Failed to make the airspace db, could not create the database.")
def initialise_airspace(sector_file_path, reset=False):
"""
Uses the provided file path to load the sectors file,
may be csv or geojson.
If no sectors file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these sectors to the sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(sector_file_path):
if reset:
remove_all_sectors()
load_airspace(sector_file_path, context, connection)
return True
else:
return (False, "Path not found " + sector_file_path)
def initialise_airports(airports_file_path, reset=False):
"""
Uses the provided file path to load an airports file,
must be csv.
If no airports file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these airports to the sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(airports_file_path):
if reset:
remove_all_airports()
load_airports(airports_file_path, context, connection)
return True
else:
return (False, "Path not found " + airports_file_path)
def initialise_user_airspace(user_sector_file_path, reset=False):
"""
Uses the provided file path to load the users sectors file,
may be csv or geojson.
If no sectors file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these sectors to the user sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(user_sector_file_path):
if reset:
remove_all_user_defined_sectors()
load_user_airspace(user_sector_file_path, context, connection)
return True
else:
return (False, "Path not found " + user_sector_file_path)
| 31.377778 | 93 | 0.674929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,890 | 0.446176 |
322f9af92fcd6688ac16683be314d7931fa1f2eb | 4,040 | py | Python | tests/test_autogeometry.py | fabiommendes/easymunk | 420dfc4a006997c47887f6876876249674feb3cd | [
"MIT"
]
| 1 | 2021-07-02T11:59:07.000Z | 2021-07-02T11:59:07.000Z | tests/test_autogeometry.py | fabiommendes/easymunk | 420dfc4a006997c47887f6876876249674feb3cd | [
"MIT"
]
| null | null | null | tests/test_autogeometry.py | fabiommendes/easymunk | 420dfc4a006997c47887f6876876249674feb3cd | [
"MIT"
]
| 1 | 2022-01-14T20:18:35.000Z | 2022-01-14T20:18:35.000Z | from typing import List, Tuple
import easymunk as a
from easymunk import BB, Vec2d
class TestAutoGeometry:
def test_is_closed(self) -> None:
not_closed: List[Tuple[float, float]] = [(0, 0), (1, 1), (0, 1)]
closed: List[Tuple[float, float]] = [(0, 0), (1, 1), (0, 1), (0, 0)]
assert not a.is_closed(not_closed)
assert a.is_closed(closed)
def test_simplify_curves(self) -> None:
p1: List[Tuple[float, float]] = [(0, 0), (0, 10), (5, 11), (10, 10), (0, 10)]
expected = [(0, 0), (0, 10), (10, 10), (0, 10)]
actual = a.simplify_curves(p1, 1)
assert actual == expected
def test_simplify_vertexes(self) -> None:
p1: List[Tuple[float, float]] = [(0, 0), (0, 10), (5, 11), (10, 10), (0, 10)]
expected = [(0, 0), (0, 10), (10, 10), (0, 10)]
actual = a.simplify_vertexes(p1, 1)
assert actual == expected
def test_to_convex_hull(self) -> None:
p1: List[Tuple[float, float]] = [(0, 0), (0, 10), (5, 5), (10, 10), (10, 0)]
expected = [(0, 0), (10, 0), (10, 10), (0, 10), (0, 0)]
actual = a.to_convex_hull(p1, 1)
assert actual == expected
def test_convex_decomposition(self) -> None:
# TODO: Use a more complicated polygon as test case
p1: List[Tuple[float, float]] = [
(0, 0),
(5, 0),
(10, 10),
(20, 20),
(5, 5),
(0, 10),
(0, 0),
]
expected = [
[(5.0, 5.0), (6.25, 2.5), (20.0, 20.0), (5.0, 5.0)],
[(0.0, 0.0), (5.0, 0.0), (6.25, 2.5), (5.0, 5.0), (0.0, 10.0), (0.0, 0.0)],
]
actual = a.convex_decomposition(p1, 0.1)
actual.sort(key=len)
# TODO: The result of convex_decomposition is not stable between
# environments, so we cant have this assert here.
# assert actual == expected
def test_march_soft(self) -> None:
img = [
" xx ",
" xx ",
" xx ",
" xx ",
" xx ",
" xxxxx",
" xxxxx",
]
def sample_func(point: Tuple[float, float]) -> float:
x = int(point[0])
y = int(point[1])
if img[y][x] == "x":
return 1
return 0
pl_set = a.march_soft(BB(0, 0, 6, 6), 7, 7, 0.5, sample_func)
expected = [
[
(1.5, 6.0),
(1.5, 5.0),
(1.5, 4.0),
(1.5, 3.0),
(1.5, 2.0),
(1.5, 1.0),
(1.5, 0.0),
],
[
(3.5, 0.0),
(3.5, 1.0),
(3.5, 2.0),
(3.5, 3.0),
(3.5, 4.0),
(4.0, 4.5),
(5.0, 4.5),
(6.0, 4.5),
],
]
assert list(pl_set) == expected
def test_march_hard(self) -> None:
img = [
" xx ",
" xx ",
" xx ",
" xx ",
" xx ",
" xxxxx",
" xxxxx",
]
def sample_func(point: Tuple[float, float]) -> float:
x = int(point[0])
y = int(point[1])
if img[y][x] == "x":
return 1
return 0
actual = list(a.march_hard(BB(0, 0, 6, 6), 7, 7, 0.5, sample_func))
expected = [
[
(1.5, 6.0),
(1.5, 5.0),
(1.5, 4.0),
(1.5, 3.0),
(1.5, 2.0),
(1.5, 1.0),
(1.5, 0.0),
],
[
(3.5, 0.0),
(3.5, 1.0),
(3.5, 2.0),
(3.5, 3.0),
(3.5, 4.0),
(3.5, 4.5),
(4.0, 4.5),
(5.0, 4.5),
(6.0, 4.5),
],
]
assert actual == expected
| 28.652482 | 87 | 0.366832 | 3,953 | 0.978465 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.07995 |
32305cc75fdcf32a5f4bd15262b34af9e4f413d5 | 384 | py | Python | api/tests/test_app.py | guyskk/purepage | 64e2180b751e787f9fe477f9b212b31c84d34cfb | [
"MIT"
]
| 13 | 2016-05-03T07:56:43.000Z | 2019-08-03T05:58:58.000Z | api/tests/test_app.py | guyskk/purepage | 64e2180b751e787f9fe477f9b212b31c84d34cfb | [
"MIT"
]
| null | null | null | api/tests/test_app.py | guyskk/purepage | 64e2180b751e787f9fe477f9b212b31c84d34cfb | [
"MIT"
]
| 3 | 2016-06-04T12:49:34.000Z | 2019-04-24T08:51:34.000Z |
def test_client(client):
assert client.get("/").status_code == 200
def test_root(root):
me = root.user.get_me()
assert me["id"] == "root"
assert me["role"] == "root"
def test_user(user):
res = user("guyskk", email="[email protected]")
me = res.user.get_me()
assert me["id"] == "guyskk"
assert me["role"] == "normal"
assert "email" not in me
| 21.333333 | 53 | 0.596354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.226563 |
32306c14bb390e41af15482d3244081bad57ece0 | 13,144 | py | Python | darshan-util/pydarshan/darshan/backend/cffi_backend.py | gaocegege/darshan | 2d54cd8ec96d26db23e9ca421df48d2031a4c55e | [
"mpich2"
]
| null | null | null | darshan-util/pydarshan/darshan/backend/cffi_backend.py | gaocegege/darshan | 2d54cd8ec96d26db23e9ca421df48d2031a4c55e | [
"mpich2"
]
| null | null | null | darshan-util/pydarshan/darshan/backend/cffi_backend.py | gaocegege/darshan | 2d54cd8ec96d26db23e9ca421df48d2031a4c55e | [
"mpich2"
]
| null | null | null | # -*- coding: utf-8 -*-
import cffi
import ctypes
import numpy as np
import pandas as pd
from darshan.api_def_c import load_darshan_header
from darshan.discover_darshan import find_utils
from darshan.discover_darshan import check_version
API_def_c = load_darshan_header()
ffi = cffi.FFI()
ffi.cdef(API_def_c)
libdutil = None
libdutil = find_utils(ffi, libdutil)
def log_open(filename):
"""
Opens a darshan logfile.
Args:
filename (str): Path to a darshan log file
Return:
log handle
"""
b_fname = filename.encode()
handle = libdutil.darshan_log_open(b_fname)
log = {"handle": handle, 'modules': None, 'name_records': None}
return log
def log_close(log):
"""
Closes the logfile and releases allocated memory.
"""
libdutil.darshan_log_close(log['handle'])
#modules = {}
return
def log_get_job(log):
"""
Returns a dictionary with information about the current job.
"""
job = {}
jobrec = ffi.new("struct darshan_job *")
libdutil.darshan_log_get_job(log['handle'], jobrec)
job['uid'] = jobrec[0].uid
job['start_time'] = jobrec[0].start_time
job['end_time'] = jobrec[0].end_time
job['nprocs'] = jobrec[0].nprocs
job['jobid'] = jobrec[0].jobid
mstr = ffi.string(jobrec[0].metadata).decode("utf-8")
md = {}
for kv in mstr.split('\n')[:-1]:
k,v = kv.split('=', maxsplit=1)
md[k] = v
job['metadata'] = md
return job
def log_get_exe(log):
"""
Get details about the executable (path and arguments)
Args:
log: handle returned by darshan.open
Return:
string: executeable path and arguments
"""
exestr = ffi.new("char[]", 4096)
libdutil.darshan_log_get_exe(log['handle'], exestr)
return ffi.string(exestr).decode("utf-8")
def log_get_mounts(log):
"""
Returns a list of available mounts recorded for the log.
Args:
log: handle returned by darshan.open
"""
mntlst = []
mnts = ffi.new("struct darshan_mnt_info **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_mounts(log['handle'], mnts, cnt)
for i in range(0, cnt[0]):
mntlst.append((ffi.string(mnts[0][i].mnt_path).decode("utf-8"),
ffi.string(mnts[0][i].mnt_type).decode("utf-8")))
return mntlst
def log_get_modules(log):
"""
Return a dictionary containing available modules including information
about the contents available for each module in the current log.
Args:
log: handle returned by darshan.open
Return:
dict: Modules with additional info for current log.
"""
# use cached module index if already present
if log['modules'] != None:
return log['modules']
modules = {}
mods = ffi.new("struct darshan_mod_info **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_modules(log['handle'], mods, cnt)
for i in range(0, cnt[0]):
modules[ffi.string(mods[0][i].name).decode("utf-8")] = \
{'len': mods[0][i].len, 'ver': mods[0][i].ver, 'idx': mods[0][i].idx}
# add to cache
log['modules'] = modules
return modules
def log_get_name_records(log):
"""
Return a dictionary resovling hash to string (typically a filepath).
Args:
log: handle returned by darshan.open
hash: hash-value (a number)
Return:
dict: the name records
"""
# used cached name_records if already present
if log['name_records'] != None:
return log['name_records']
name_records = {}
nrecs = ffi.new("struct darshan_name_record **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_name_records(log['handle'], nrecs, cnt)
for i in range(0, cnt[0]):
name_records[nrecs[0][i].id] = ffi.string(nrecs[0][i].name).decode("utf-8")
# add to cache
log['name_records'] = name_records
return name_records
def log_lookup_name_records(log, ids=[]):
"""
Resolve a single hash to it's name record string (typically a filepath).
Args:
log: handle returned by darshan.open
hash: hash-value (a number)
Return:
dict: the name records
"""
name_records = {}
#cids = ffi.new("darshan_record_id *") * len(ids)
whitelist = (ctypes.c_ulonglong * len(ids))(*ids)
whitelist_cnt = len(ids)
whitelistp = ffi.from_buffer(whitelist)
nrecs = ffi.new("struct darshan_name_record **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_filtered_name_records(log['handle'], nrecs, cnt, ffi.cast("darshan_record_id *", whitelistp), whitelist_cnt)
for i in range(0, cnt[0]):
name_records[nrecs[0][i].id] = ffi.string(nrecs[0][i].name).decode("utf-8")
# add to cache
log['name_records'] = name_records
return name_records
def log_get_dxt_record(log, mod_name, mod_type, reads=True, writes=True, mode='dict'):
"""
Returns a dictionary holding a dxt darshan log record.
Args:
log: Handle returned by darshan.open
mod_name (str): Name of the Darshan module
mod_type (str): String containing the C type
Return:
dict: generic log record
Example:
The typical darshan log record provides two arrays, on for integer counters
and one for floating point counters:
>>> darshan.log_get_dxt_record(log, "DXT_POSIX", "struct dxt_file_record **")
{'rank': 0, 'read_count': 11, 'read_segments': array([...]), ...}
"""
modules = log_get_modules(log)
#name_records = log_get_name_records(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules[mod_name]['idx'], buf)
if r < 1:
return None
filerec = ffi.cast(mod_type, buf)
clst = []
rec['id'] = filerec[0].base_rec.id
rec['rank'] = filerec[0].base_rec.rank
rec['hostname'] = ffi.string(filerec[0].hostname).decode("utf-8")
#rec['filename'] = name_records[rec['id']]
wcnt = filerec[0].write_count
rcnt = filerec[0].read_count
rec['write_count'] = wcnt
rec['read_count'] = rcnt
rec['write_segments'] = []
rec['read_segments'] = []
size_of = ffi.sizeof("struct dxt_file_record")
segments = ffi.cast("struct segment_info *", buf[0] + size_of )
for i in range(wcnt):
seg = {
"offset": segments[i].offset,
"length": segments[i].length,
"start_time": segments[i].start_time,
"end_time": segments[i].end_time
}
rec['write_segments'].append(seg)
for i in range(rcnt):
i = i + wcnt
seg = {
"offset": segments[i].offset,
"length": segments[i].length,
"start_time": segments[i].start_time,
"end_time": segments[i].end_time
}
rec['read_segments'].append(seg)
if mode == "pandas":
rec['read_segments'] = pd.DataFrame(rec['read_segments'])
rec['write_segments'] = pd.DataFrame(rec['write_segments'])
return rec
def log_get_generic_record(log, mod_name, mod_type, mode='numpy'):
"""
Returns a dictionary holding a generic darshan log record.
Args:
log: Handle returned by darshan.open
mod_name (str): Name of the Darshan module
mod_type (str): String containing the C type
Return:
dict: generic log record
Example:
The typical darshan log record provides two arrays, on for integer counters
and one for floating point counters:
>>> darshan.log_get_generic_record(log, "POSIX", "struct darshan_posix_file **")
{'counters': array([...], dtype=int64), 'fcounters': array([...])}
"""
modules = log_get_modules(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules[mod_name]['idx'], buf)
if r < 1:
return None
rbuf = ffi.cast(mod_type, buf)
rec['id'] = rbuf[0].base_rec.id
rec['rank'] = rbuf[0].base_rec.rank
clst = []
for i in range(0, len(rbuf[0].counters)):
clst.append(rbuf[0].counters[i])
rec['counters'] = np.array(clst, dtype=np.int64)
cdict = dict(zip(counter_names(mod_name), rec['counters']))
flst = []
for i in range(0, len(rbuf[0].fcounters)):
flst.append(rbuf[0].fcounters[i])
rec['fcounters'] = np.array(flst, dtype=np.float64)
fcdict = dict(zip(fcounter_names(mod_name), rec['fcounters']))
if mode == "dict":
rec = {'counters': cdict, 'fcounter': fcdict}
if mode == "pandas":
rec = {
'counters': pd.DataFrame(cdict, index=[0]),
'fcounters': pd.DataFrame(fcdict, index=[0])
}
return rec
def counter_names(mod_name, fcnts=False):
"""
Returns a list of available counter names for the module.
By default only integer counter names are listed, unless fcnts is set to
true in which case only the floating point counter names are listed.
Args:
mod_name (str): Name of the module to return counter names.
fcnts (bool): Switch to request floating point counters instead of integer. (Default: False)
Return:
list: Counter names as strings.
"""
if mod_name == 'MPI-IO':
mod_name = 'MPIIO'
names = []
i = 0
if fcnts:
F = "f_"
else:
F = ""
end = "{0}_{1}NUM_INDICES".format(mod_name.upper(), F.upper())
var_name = "{0}_{1}counter_names".format(mod_name.lower(), F.lower())
while True:
try:
var = getattr(libdutil, var_name)
except:
var = None
if not var:
return None
name = ffi.string(var[i]).decode("utf-8")
if name == end:
break
names.append(name)
i += 1
return names
def fcounter_names(mod_name):
"""
Returns a list of available floating point counter names for the module.
Args:
mod_name (str): Name of the module to return counter names.
Return:
list: Available floiting point counter names as strings.
"""
return counter_names(mod_name, fcnts=True)
def log_get_bgq_record(log):
"""
Returns a darshan log record for BG/Q.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "BG/Q", "struct darshan_bgq_record **")
def log_get_hdf5_file_record(log):
"""
Returns a darshan log record for an HDF5 file.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "H5F", "struct darshan_hdf5_file **")
def log_get_hdf5_dataset_record(log):
"""
Returns a darshan log record for an HDF5 dataset.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "H5D", "struct darshan_hdf5_dataset **")
def log_get_lustre_record(log):
"""
Returns a darshan log record for Lustre.
Args:
log: handle returned by darshan.open
"""
modules = log_get_modules(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules['LUSTRE']['idx'], buf)
if r < 1:
return None
rbuf = ffi.cast("struct darshan_lustre_record **", buf)
rec['id'] = rbuf[0].base_rec.id
rec['rank'] = rbuf[0].base_rec.rank
clst = []
for i in range(0, len(rbuf[0].counters)):
clst.append(rbuf[0].counters[i])
rec['counters'] = np.array(clst, dtype=np.int64)
cdict = dict(zip(counter_names('LUSTRE'), rec['counters']))
# FIXME
ostlst = []
for i in range(0, cdict['LUSTRE_STRIPE_WIDTH']):
print(rbuf[0].ost_ids[i])
rec['ost_ids'] = np.array(ostlst, dtype=np.int64)
print(rec['ost_ids'])
sys.exit()
if mode == "dict":
rec = {'counters': cdict, 'fcounter': fcdict}
if mode == "pandas":
rec = {
'counters': pd.DataFrame(cdict, index=[0]),
'fcounters': pd.DataFrame(fcdict, index=[0])
}
return rec
def log_get_mpiio_record(log):
"""
Returns a darshan log record for MPI-IO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "MPI-IO", "struct darshan_mpiio_file **")
def log_get_pnetcdf_record(log):
"""
Returns a darshan log record for PnetCDF.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "PNETCDF", "struct darshan_pnetcdf_file **")
def log_get_posix_record(log):
"""
Returns a darshan log record for
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "POSIX", "struct darshan_posix_file **")
def log_get_stdio_record(log):
"""
Returns a darshan log record for STDIO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "STDIO", "struct darshan_stdio_file **")
| 24.295749 | 137 | 0.614197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,012 | 0.457395 |
32316e929a4d5ae59c28e0cfefeaa04b18e91623 | 1,017 | py | Python | authApp/views/userDetailView.py | juan-skill/django_vue_bank | 109f3b84086f4520a5220c311d9d3403a7adc3a2 | [
"MIT"
]
| null | null | null | authApp/views/userDetailView.py | juan-skill/django_vue_bank | 109f3b84086f4520a5220c311d9d3403a7adc3a2 | [
"MIT"
]
| null | null | null | authApp/views/userDetailView.py | juan-skill/django_vue_bank | 109f3b84086f4520a5220c311d9d3403a7adc3a2 | [
"MIT"
]
| null | null | null | from django.conf import settings
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework_simplejwt.backends import TokenBackend
from rest_framework.permissions import IsAuthenticated
from authApp.models.user import User
from authApp.serializers.userSerializer import UserSerializer
class UserDetailView(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
token = request.META.get('HTTP_AUTHORIZATION')[7:]
tokenBackend = TokenBackend(algorithm=settings.SIMPLE_JWT['ALGORITHM'])
valid_data = tokenBackend.decode(token,verify=False)
if valid_data['user_id'] != kwargs['pk']:
stringResponse = {'detail':'Unauthorized Request'}
return Response(stringResponse, status=status.HTTP_401_UNAUTHORIZED)
return super().get(request, *args, **kwargs)
| 39.115385 | 80 | 0.738446 | 680 | 0.668633 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.072763 |
32363a369f2abd8123a3c352cf5267f2cd8f6e3e | 882 | py | Python | pluggklockan.py | Vforsh03/Pluggklockan | 845dbe82476ad3ecd8664b7cd99ce74311b92830 | [
"MIT"
]
| null | null | null | pluggklockan.py | Vforsh03/Pluggklockan | 845dbe82476ad3ecd8664b7cd99ce74311b92830 | [
"MIT"
]
| null | null | null | pluggklockan.py | Vforsh03/Pluggklockan | 845dbe82476ad3ecd8664b7cd99ce74311b92830 | [
"MIT"
]
| null | null | null | import time
def countdown(time_sec, to_do):
while time_sec:
mins, secs = divmod(time_sec, 60)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
print(timeformat, end='\r')
time.sleep(1)
time_sec -= 1
if time_sec == 0:
print("Det här har du att göra: ")
for sak in to_do:
print(sak)
def main():
to_do = []
saker = int(input("Hur många saker ska du lägga till på listan?: "))
for _ in range(saker):
to_do.append(input("Sak: "))
while len(to_do) > 0:
tid = int(input("Hur många sekunder vill du tima: "))
countdown(tid, to_do)
to_do.remove(input("Vilken sak vill du ta bort? "))
print(to_do)
if len(to_do) == 0:
print("Du har inget att göra, gör vad fan du vill")
if __name__ == "__main__":
main()
| 25.941176 | 73 | 0.538549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.25618 |
32364b003eb60db5ffb76e4251c347561207ed8b | 1,397 | py | Python | gallery/views.py | mkbeh/Site-Nordic-Walking- | ba98f41db09ed448ecc4db175f65ef4fa2d64979 | [
"MIT"
]
| null | null | null | gallery/views.py | mkbeh/Site-Nordic-Walking- | ba98f41db09ed448ecc4db175f65ef4fa2d64979 | [
"MIT"
]
| 8 | 2021-04-08T21:57:55.000Z | 2022-03-12T00:50:38.000Z | gallery/views.py | mkbeh/Site-Nordic-Walking- | ba98f41db09ed448ecc4db175f65ef4fa2d64979 | [
"MIT"
]
| null | null | null | from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from .models import PhotoAlbum, VideoAlbum
from blog.utils import get_pagination_page
def albums_list(request):
album_specific_data = {'photo': (PhotoAlbum, 'Фото альбомы'), 'video': (VideoAlbum, 'Видео альбомы')}
album_type = request.path.split('/')[2]
album_obj, album_type = album_specific_data.get(album_type)
albums = album_obj.objects.all().order_by('-created')
page = get_pagination_page(request, albums)
return render(
request,
'gallery/album.html',
{'albums': page.object_list, 'page': page, 'album_type': album_type}
)
@cache_page(10*60)
def album_detail(request, album_type, album_name):
album_specific_data = {'photo': (PhotoAlbum, 50), 'video': (VideoAlbum, 4)}
album_obj, num_pages = album_specific_data.get(album_type)
obj = get_object_or_404(album_obj, name=album_name)
if album_type == 'photo':
files = obj.images_set.all()
template = 'gallery/photo_detail.html'
else:
files = obj.videos_set.all()
template = 'gallery/video_detail.html'
page = get_pagination_page(request, files, num_pages)
return render(
request,
template,
{'album_name': album_name, 'files': page.object_list, 'page': page, 'total_files': len(files)}
)
| 32.488372 | 105 | 0.689334 | 0 | 0 | 0 | 0 | 700 | 0.492958 | 0 | 0 | 238 | 0.167606 |
3236d1e8e71e93e12b492398d92736947474b9fb | 2,134 | py | Python | test/test_post.py | enjoy233/zhihu-py3 | bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc | [
"MIT"
]
| 1,321 | 2015-02-16T13:19:42.000Z | 2022-03-25T15:03:58.000Z | test/test_post.py | fru1tw4ter/zhihu-py3 | bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc | [
"MIT"
]
| 64 | 2015-07-03T12:30:08.000Z | 2022-03-01T00:55:50.000Z | test/test_post.py | fru1tw4ter/zhihu-py3 | bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc | [
"MIT"
]
| 551 | 2015-02-22T11:21:40.000Z | 2022-03-25T13:22:13.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
import unittest
import os
import json
from zhihu import Post
from test_utils import TEST_DATA_PATH
class ColumnTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
url = 'http://zhuanlan.zhihu.com/xiepanda/20202275'
post_path = os.path.join(TEST_DATA_PATH, 'column_post.json')
with open(post_path, 'r') as f:
post_json = json.load(f)
post_saved_path = os.path.join(TEST_DATA_PATH, 'post.md')
with open(post_saved_path, 'rb') as f:
cls.post_saved = f.read()
cls.post = Post(url)
cls.post.soup = post_json
cls.expected = {'column_in_name': 'xiepanda', 'slug': 20202275,
'column_name': '谢熊猫出没注意',
'author_name': '谢熊猫君', 'author_id': 'xiepanda',
'title': '为了做一个称职的吃货,他决定连着吃一百天转基因食物',
'upvote_num': 963, 'comment_num': 199}
def test_column_in_name(self):
self.assertEqual(self.expected['column_in_name'],
self.post.column_in_name)
def test_slug(self):
self.assertEqual(self.expected['slug'], self.post.slug)
def test_author(self):
self.assertEqual(self.expected['author_name'], self.post.author.name)
self.assertEqual(self.expected['author_id'], self.post.author.id)
def test_title(self):
self.assertEqual(self.expected['title'], self.post.title)
def test_upvote_num(self):
self.assertEqual(self.expected['upvote_num'], self.post.upvote_num)
def test_comment_num(self):
self.assertEqual(self.expected['comment_num'], self.post.comment_num)
def test_save(self):
save_name = 'post_save'
self.post.save(filepath=TEST_DATA_PATH, filename=save_name)
post_saved_path = os.path.join(TEST_DATA_PATH, save_name + '.md')
with open(post_saved_path, 'rb') as f:
post_saved = f.read()
os.remove(post_saved_path)
self.assertEqual(self.post_saved, post_saved)
| 34.419355 | 77 | 0.638238 | 1,989 | 0.901632 | 0 | 0 | 851 | 0.385766 | 0 | 0 | 447 | 0.202629 |
32370b765a15f6662dcf75810cbf2bc84feab958 | 69 | py | Python | tensorflow_toolkit/lpr/lpr/__init__.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
]
| 256 | 2020-09-09T03:27:57.000Z | 2022-03-30T10:06:06.000Z | tensorflow_toolkit/lpr/lpr/__init__.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
]
| 604 | 2020-09-08T12:29:49.000Z | 2022-03-31T21:51:08.000Z | tensorflow_toolkit/lpr/lpr/__init__.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
]
| 160 | 2020-09-09T14:06:07.000Z | 2022-03-30T14:50:48.000Z | from tfutils.helpers import import_transformer
import_transformer()
| 17.25 | 46 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3239f81ec2f0770c90334bbc02e94fc7a5de13e9 | 354 | py | Python | torch_lazy/nn/__init__.py | simaki/torch-lazy | e3ce23b118bdf36a019c029a67bf5ec84f89a4d7 | [
"BSD-3-Clause"
]
| null | null | null | torch_lazy/nn/__init__.py | simaki/torch-lazy | e3ce23b118bdf36a019c029a67bf5ec84f89a4d7 | [
"BSD-3-Clause"
]
| 18 | 2021-04-01T08:24:48.000Z | 2022-03-28T20:18:28.000Z | torch_lazy/nn/__init__.py | simaki/torch-lazy | e3ce23b118bdf36a019c029a67bf5ec84f89a4d7 | [
"BSD-3-Clause"
]
| 1 | 2021-07-22T19:29:12.000Z | 2021-07-22T19:29:12.000Z | from .modules.linear import LazyBilinear
from .modules.mlp import MLP
from .modules.mlp import LazyMLP
from .modules.normalization import LazyBatchNorm
from .modules.normalization import LazyBatchNorm1d
from .modules.normalization import LazyBatchNorm2d
from .modules.normalization import LazyBatchNorm3d
from .modules.normalization import LazyLayerNorm
| 39.333333 | 50 | 0.864407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
323a9cf1657540b38e66a69c1561146bd14bceb9 | 874 | py | Python | functest/lmfunctest.py | mitsuba-rei/lightmetrica-v3 | db5b7d5a9a245fb7c0d25124433c38d09b62813e | [
"MIT"
]
| 1 | 2019-11-20T13:24:58.000Z | 2019-11-20T13:24:58.000Z | functest/lmfunctest.py | mitsuba-rei/lightmetrica-v3 | db5b7d5a9a245fb7c0d25124433c38d09b62813e | [
"MIT"
]
| null | null | null | functest/lmfunctest.py | mitsuba-rei/lightmetrica-v3 | db5b7d5a9a245fb7c0d25124433c38d09b62813e | [
"MIT"
]
| null | null | null | import sys
import json
import numpy as np
import imageio
from argparse import Namespace
def loadenv(config_path):
"""Load configuration file of Lightmetrica environment"""
# Open config file
with open(config_path) as f:
config = json.load(f)
# Add root directory and binary directory to sys.path
if config['path'] not in sys.path:
sys.path.insert(0, config['path'])
if config['bin_path'] not in sys.path:
sys.path.insert(0, config['bin_path'])
return Namespace(**config)
# Environment configuration
env = loadenv('.lmenv')
def save(path, img):
"""Save image"""
imageio.imwrite(path, np.clip(np.power(img, 1/2.2) * 256, 0, 255).astype(np.uint8))
def rmse(img1, img2):
return np.sqrt(np.mean((img1 - img2) ** 2))
def rmse_pixelwised(img1, img2):
return np.sqrt(np.sum((img1 - img2) ** 2, axis=2) / 3) | 26.484848 | 87 | 0.662471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.241419 |
323ae527f5aea6328f8ca830f729b3e6114a8c51 | 503 | py | Python | algorithm implement (python)/mergesort.py | yedkk/algorithm-design | 433b70e8302ec91b74542e9144dd93fdb5b0f8d3 | [
"MIT"
]
| 2 | 2021-06-01T02:31:06.000Z | 2021-06-01T02:39:45.000Z | algorithm implement (python)/mergesort.py | yedkk/algorithm-design | 433b70e8302ec91b74542e9144dd93fdb5b0f8d3 | [
"MIT"
]
| null | null | null | algorithm implement (python)/mergesort.py | yedkk/algorithm-design | 433b70e8302ec91b74542e9144dd93fdb5b0f8d3 | [
"MIT"
]
| null | null | null | def getArray():
line = input()
line = line.strip().split(' ')[1:]
s = []
for x in line:
s.append(int(x))
return s
def merge(s1, s2):
n1 = len(s1)
n2 = len(s2)
p1 = 0
p2 = 0
s = []
while(p1 < n1 or p2 < n2):
if(p1 < n1 and (p2 >= n2 or s1[p1] < s2[p2])):
s.append(s1[p1])
p1 += 1
else:
s.append(s2[p2])
p2 += 1
return s
def output(s):
print (len(s), end = ' ')
print (' '.join(map(str, s)), end = '')
s1 = getArray()
s2 = getArray()
s = merge(s1, s2)
output(s)
| 13.236842 | 48 | 0.508946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.021869 |
323b7d2cb5ec3fee745d90ccfecbe50bdd67fcc2 | 1,276 | py | Python | src/CSVtoJSON.py | CloudSevenConsulting/DustyDynamo | 335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453 | [
"MIT"
]
| null | null | null | src/CSVtoJSON.py | CloudSevenConsulting/DustyDynamo | 335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453 | [
"MIT"
]
| null | null | null | src/CSVtoJSON.py | CloudSevenConsulting/DustyDynamo | 335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453 | [
"MIT"
]
| null | null | null | import csv
import json
from pprint import pprint
import os
stockData = ['RIO']
for i in range(0,len(stockData)):
csvfile = open(stockData[i]+'.csv', 'r')
fieldnames = ("NetworkTime","StockID","Open","High", "Low", "Close", "Adj Close", "Volume")
reader = csv.DictReader( csvfile, fieldnames)
data = open(stockData[i]+'.json', 'w')
data.write('[\n')
for row in reader:
data.write('{ \n' \
+ '"MoteTimestamp": "%s",' %row['NetworkTime'] \
+ '\n"MoteID": %s,' %row['StockID'] \
+ '\n "StockData":{' \
+ '\n "OpenPrice": %s,' %row['Open'] \
+ '\n "HighPrice": %s,' %row['High'] \
+ '\n "LowPrice": %s,' %row['Low'] \
+ '\n "ClosePrice": %s,' %row['Close'] \
+ '\n "Adj Close": %s,' %row['Adj Close'] \
+ '\n "VolumeNumber": %s' %row['Volume'] \
+ '\n }' \
+ '\n},\n'
)
data.close()
with open(stockData[i]+'.json', 'rb+') as filehandle:
filehandle.seek(-3, os.SEEK_END)
filehandle.truncate()
filehandle.close()
with open(stockData[i]+'.json', 'a') as filehandle:
filehandle.write("\n]")
| 29.674419 | 95 | 0.462382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.320533 |
323d0642bd0b2e71b6ea4028021ab212c0e0889f | 700 | py | Python | core/api.py | rastos/Mi-Fit-and-Zepp-workout-exporter | e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa | [
"MIT"
]
| 13 | 2021-04-13T14:27:58.000Z | 2022-02-09T18:32:37.000Z | core/api.py | rastos/Mi-Fit-and-Zepp-workout-exporter | e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa | [
"MIT"
]
| 3 | 2021-06-03T20:27:34.000Z | 2021-06-04T06:24:18.000Z | core/api.py | rastos/Mi-Fit-and-Zepp-workout-exporter | e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa | [
"MIT"
]
| 2 | 2021-06-03T20:29:54.000Z | 2021-08-13T22:28:59.000Z | import requests
class Api:
def __init__(self, token):
self.token = token
def get_history(self):
r = requests.get('https://api-mifit-de2.huami.com/v1/sport/run/history.json', headers={
'apptoken': self.token
}, params={
'source': 'run.mifit.huami.com',
})
r.raise_for_status()
return r.json()
def get_detail(self, track_id, source):
r = requests.get('https://api-mifit-de2.huami.com/v1/sport/run/detail.json', headers={
'apptoken': self.token
}, params={
'trackid': track_id,
'source': source,
})
r.raise_for_status()
return r.json()
| 24.137931 | 95 | 0.547143 | 681 | 0.972857 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.261429 |
323d1294966a8fc8cdc72a192c1cd2b6b80bbc84 | 1,431 | py | Python | lib/tools/tools_watch_cub.py | galena503/SCR | d5b6581808b4f2fac775e7ff48b3eef548164ca1 | [
"MIT"
]
| null | null | null | lib/tools/tools_watch_cub.py | galena503/SCR | d5b6581808b4f2fac775e7ff48b3eef548164ca1 | [
"MIT"
]
| null | null | null | lib/tools/tools_watch_cub.py | galena503/SCR | d5b6581808b4f2fac775e7ff48b3eef548164ca1 | [
"MIT"
]
| null | null | null |
import time,sys,os
import subprocess
class Tools_watch_sub():
def check_sub_process(self, SCR, SCRfield):
subs = SCR['sub_process']
command = SCRfield['command']
cmd_l = ['tasklist','/fo','csv']
pid_set = set('')
if command == 'bash':
ps_str = subprocess.Popen(bash_l, shell=True, stdout=subprocess.PIPE)
elif command == 'cmd':
ps_str = subprocess.Popen(cmd_l, shell=True, stdout=subprocess.PIPE)
for line in ps_str.stdout:
hexline = str(line)
pid = hexline.replace('\"','').split(',')[1]
pid_set.add(pid)
if pid == "PID":
continue
else:
if int(pid) in subs:
SCR.state[pid] = 1
SCR['sub_process'] = subs & pid_set
return SCR
def task_throw_sub(self, count):
st = 0
cmd = ['tasklist','/fo','csv']
subs = set('')
# winの場合
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
while True:
if st == 0:
st = 1
time.sleep(1)
#for sub_pid in subs:
# ps_line = line.split(',').replace('\"','')
# if str(sub_pid) == :
# print(str(sub_pid) + 'みつけたああああああああああああああああああ')
#print(os.getpid())
# log = popen_obj.returncode
#print(log)
#print(type(popen_obj.communicate()))
#print(popen_obj.communicate()) | 23.459016 | 81 | 0.533892 | 856 | 0.577988 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.294396 |
323de0cd069365ae5cc57c4534ae993e3a17cc39 | 7,616 | py | Python | Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py | vkuznet/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
]
| 8 | 2015-08-14T04:01:32.000Z | 2021-06-03T00:56:42.000Z | Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
]
| 162 | 2015-01-07T21:34:47.000Z | 2021-10-13T09:42:41.000Z | Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
]
| 16 | 2015-01-22T15:27:29.000Z | 2021-04-28T09:23:28.000Z | #!/usr/bin/env python
"""
DBS 3 Migrate REST model unittests
The DBS3 Migration Service must be stopped before executing the unittest. In addition, take care
that no instance is running on the same DB. Else the single unittests can happen to fail due to
race conditions with DBS3 Migration Service.
"""
from dbsserver_t.utils.DBSRestApi import DBSRestApi
from dbsserver_t.utils.DBSDataProvider import DBSBlockDataProvider, create_child_data_provider
from dbsserver_t.utils.TestTools import expectedFailure
from itertools import chain
import os
import socket
import unittest
class DBSMigrateModel_t(unittest.TestCase):
_data_provider = None
_saved_data = {}
def __init__(self, methodName='runTest'):
super(DBSMigrateModel_t, self).__init__(methodName)
if not self._data_provider:
self.setUpClass()
@classmethod
def setUpClass(cls):
cls._data_provider = DBSBlockDataProvider(num_of_blocks=1, num_of_files=10, num_of_runs=10, num_of_lumis=10)
### According to https://svnweb.cern.ch/trac/CMSDMWM/ticket/4068, blocks and dataset migration should use
### separate input data. _independent(_child)_data_provider will provide them.
cls._independent_data_provider = DBSBlockDataProvider(num_of_blocks=5, num_of_files=10, num_of_runs=10,
num_of_lumis=10)
cls._parent_data_provider = DBSBlockDataProvider(num_of_blocks=1, num_of_files=10,
num_of_runs=10, num_of_lumis=10)
cls._child_data_provider = create_child_data_provider(cls._parent_data_provider)
cls._independent_child_data_provider = create_child_data_provider(cls._independent_data_provider)
config = os.environ['DBS_TEST_CONFIG']
service = os.environ.get("DBS_TEST_SERVICE", "DBSMigrate")
#Use one specific database cms_dbs3_local3@int2r for migration unittests when migration_test=True
cls._migrate_api = DBSRestApi(config, service, migration_test=True)
cls._migration_url = 'https://%s/dbs/dev/global/DBSWriter' % (socket.getfqdn())
cls._writer_api = DBSRestApi(config, 'DBSWriter')
def setUp(self):
pass
@expectedFailure
def test_01_migration_removal(self):
"""test01: Clean-up old migration requests. Test to remove migration requests between different DBS instances\n"""
for status in sorted(self._migrate_api.list('status'), key=lambda status: status['migration_request_id']):
data = {'migration_rqst_id': status['migration_request_id']}
if status['migration_status'] in (0, 3, 9) and status['create_by'] == os.getlogin():
self._migrate_api.insert('remove', data)
else:
self.assertRaises(Exception, self._migrate_api.insert, 'remove', data)
def test_02_migration_request(self):
"""test02: Negative test to request a migration between different DBS instances before injecting data.
This is a negative test because the block was not inserted into the source DB.\n"""
for block_name in (block['block']['block_name'] for block in self._child_data_provider.block_dump()):
toMigrate = {'migration_url' : self._migration_url,
'migration_input' : block_name}
self.assertRaises(Exception, self._migrate_api.insert, 'submit', toMigrate)
def test_03_insert_data_to_migrate(self):
"""test03: Insert data to migrate into source DBS instance. This is has to be done for the next several tests.\n"""
for block in chain(self._data_provider.block_dump(),
self._independent_data_provider.block_dump(),
self._parent_data_provider.block_dump(),
self._child_data_provider.block_dump(),
self._independent_child_data_provider.block_dump()):
self._writer_api.insert('bulkblocks', block)
def test_04_migration_request(self):
"""test04: Test to request a migration between different DBS instances by block.\n"""
for block_name in (block['block']['block_name'] for block in self._child_data_provider.block_dump()):
toMigrate = {'migration_url' : self._migration_url,
'migration_input' : block_name}
result = self._migrate_api.insert('submit', toMigrate)
self._saved_data.setdefault('migration_rqst_ids', []).append(result['migration_details']['migration_request_id'])
self._saved_data.setdefault('migration_inputs', []).append(block_name)
def test_05_migration_request(self):
"""test05: Test to request a migration between different DBS instances by dataset.\n"""
datasets = set((block['dataset']['dataset']
for block in chain(self._child_data_provider.block_dump(),
self._independent_child_data_provider.block_dump())))
for dataset in datasets:
toMigrate = {'migration_url' : self._migration_url,
'migration_input' : dataset}
result = self._migrate_api.insert('submit', toMigrate)
self._saved_data.setdefault('migration_rqst_ids', []).append(result['migration_details']['migration_request_id'])
def test_06_migration_status(self):
"""test06: Test to check the status of an ongoing migration between different DBS instances by id. \n"""
status = self._migrate_api.list('status')
self.assertTrue(isinstance(status, list))
for migration_rqst_id in self._saved_data['migration_rqst_ids']:
status = self._migrate_api.list('status', migration_rqst_id)
self.assertEqual(len(status), 1)
def test_07_migration_status(self):
"""test07: Test to check the status of an ongoing migration between different DBS instances by block. \n"""
for migration_input in self._saved_data['migration_inputs']:
status = self._migrate_api.list('status', block_name=migration_input)
self.assertEqual(len(status), 1)
def test_08_migration_status(self):
"""test08: Test to check the status of an ongoing migration between different DBS instances by dataset. \n"""
datasets = set((block_name.split('#', 1)[0] for block_name in self._saved_data['migration_inputs']))
for dataset in datasets:
status = self._migrate_api.list('status', dataset=dataset)
self.assertTrue(len(status)>=1)
def test_09_migration_removal(self):
"test09: Test to remove a pending migration request between different DBS instances. \n"
for migration_rqst_id in self._saved_data['migration_rqst_ids']:
data = {'migration_rqst_id': migration_rqst_id}
self._migrate_api.insert('remove', data)
def test_99_save_data_to_disk(self):
"""test99: Save data to disk to re-use data for migration server unittests. \n"""
self._data_provider.save('migration_unittest_data.pkl')
self._independent_data_provider.save('migration_unittest_independent_data.pkl')
self._parent_data_provider.save('migration_unittest_parent_data.pkl')
self._independent_child_data_provider.save('migration_unittest_independent_child_data.pkl')
self._child_data_provider.save('migration_unittest_child_data.pkl')
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(DBSMigrateModel_t)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| 55.591241 | 125 | 0.689207 | 6,880 | 0.903361 | 0 | 0 | 1,991 | 0.261423 | 0 | 0 | 2,553 | 0.335215 |
323e018247ff04ecd6fd2937c2a4145cd45afc55 | 844 | py | Python | setup.py | sgang007/audio_chat_client | e2c1caf6ec1a781be0d22f516e55434099514da1 | [
"MIT"
]
| null | null | null | setup.py | sgang007/audio_chat_client | e2c1caf6ec1a781be0d22f516e55434099514da1 | [
"MIT"
]
| null | null | null | setup.py | sgang007/audio_chat_client | e2c1caf6ec1a781be0d22f516e55434099514da1 | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
# from distutils.core import setup
# import py2exe
# import sys
import os
del os.link
# sys.setrecursionlimit(5000)
with open('requirements.txt') as f:
required = f.read().splitlines()
def readme():
with open('README.md') as f:
return f.read()
setup(name='varta-chat',
version='1.0',
description='Audio Chat framework',
long_description=readme(),
url='https://github.com/sgang007/audio_chat_client',
author='Shubhojyoti Ganguly',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=required,
entry_points={
'console_scripts': [
'varta = client.__main__:key_listener',
]
},
include_package_data=True,
zip_safe=True)
| 23.444444 | 58 | 0.64455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.372038 |
323e28eb5aa06c996913613c2bfc7c17a0e85d7c | 2,334 | py | Python | kglib/tests/end_to_end/kgcn/diagnosis_debug.py | graknlabs/research | ae3ee07106739efd10f0627058210038ab5956d3 | [
"Apache-2.0"
]
| 13 | 2018-09-25T13:29:08.000Z | 2018-12-10T11:04:38.000Z | kglib/tests/end_to_end/kgcn/diagnosis_debug.py | graknlabs/research | ae3ee07106739efd10f0627058210038ab5956d3 | [
"Apache-2.0"
]
| 23 | 2018-09-17T20:31:44.000Z | 2018-12-14T11:21:52.000Z | kglib/tests/end_to_end/kgcn/diagnosis_debug.py | graknlabs/research | ae3ee07106739efd10f0627058210038ab5956d3 | [
"Apache-2.0"
]
| 1 | 2018-09-25T15:56:32.000Z | 2018-09-25T15:56:32.000Z | #
# Copyright (C) 2021 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
import unittest
from kglib.kgcn_tensorflow.examples.diagnosis.diagnosis import diagnosis_example
class TestDiagnosisExampleDebug(unittest.TestCase):
"""
A copy of the end-to-end test for local debugging. Requires a TypeDB server to be started in the background
manually. Run with:
bazel test //kglib/tests/end_to_end:diagnosis --test_output=streamed --spawn_strategy=standalone --action_env=PATH --test_arg=--<path/to/your/typedb/directory>
"""
def setUp(self):
self._typedb_binary_location = sys.argv.pop()
base_dir = os.getenv("TEST_SRCDIR") + "/" + os.getenv("TEST_WORKSPACE")
self._data_file_location = base_dir + sys.argv.pop()
self._schema_file_location = base_dir + sys.argv.pop()
def test_learning_is_done(self):
solveds_tr, solveds_ge = diagnosis_example(self._typedb_binary_location,
schema_file_path=self._schema_file_location,
seed_data_file_path=self._data_file_location)
self.assertGreaterEqual(solveds_tr[-1], 0.7)
self.assertLessEqual(solveds_tr[-1], 0.99)
self.assertGreaterEqual(solveds_ge[-1], 0.7)
self.assertLessEqual(solveds_ge[-1], 0.99)
if __name__ == "__main__":
# This handles the fact that additional arguments that are supplied by our py_test definition
# https://stackoverflow.com/a/38012249
unittest.main(argv=['ignored-arg'])
| 42.436364 | 163 | 0.707798 | 1,167 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 1,312 | 0.562125 |
324140adbf8ce6a27b7f51c371562021ff506dae | 1,668 | py | Python | python/math_utils.py | PROrock/codin-game-puzzles | a0444719f9a629fc97b1da6f175ecd462a9ff59b | [
"MIT"
]
| 1 | 2021-06-16T02:33:57.000Z | 2021-06-16T02:33:57.000Z | python/math_utils.py | PROrock/codin-game-puzzles | a0444719f9a629fc97b1da6f175ecd462a9ff59b | [
"MIT"
]
| null | null | null | python/math_utils.py | PROrock/codin-game-puzzles | a0444719f9a629fc97b1da6f175ecd462a9ff59b | [
"MIT"
]
| null | null | null | def signum(x):
if x > 0: return 1
if x < 0: return -1
return 0
# copy of Python 3.5 implementation - probably not needed
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def gcd(a, b):
"""Greatest common divisor"""
return _gcd_internal(abs(a), abs(b))
def _gcd_internal(a, b):
"""Greatest common divisor internal"""
# Impl. notes: Euler algorithm, both a and b are not negative
# There exists faster algorithm (which uses division by 2, which is faster)
# -> Stein's algorithm https://en.wikipedia.org/wiki/Binary_GCD_algorithm
# print a, b
if a == b:
return a
if b == 1:
return 1
if a == 0 or b == 0:
return max(a, b)
return gcd(b, a % b)
def combinations_generator(n, k):
"""Generates all combinations of list of length n with k ones (lexicographically sorted).
Storing only one indices and creating the combination list might be more performant.
"""
combination = [1 if i >= n - k else 0 for i in xrange(n)]
while True:
yield combination
combination = copy(combination)
# get first one with zero before it
one_indices = [idx for idx, value in enumerate(combination) if value]
for one_idx_idx, one_idx in enumerate(one_indices):
combination[one_idx] = 0
if one_idx > 0 and one_idx - 1 != one_indices[one_idx_idx - 1]:
for i in xrange(one_idx_idx + 1):
combination[one_idx - i - 1] = 1
break
else:
# all combinations generated, breaking
break
| 32.076923 | 93 | 0.607914 | 0 | 0 | 871 | 0.522182 | 0 | 0 | 0 | 0 | 604 | 0.36211 |
32426c09b1bd20f92239fee3f6494dab7ae72789 | 2,477 | py | Python | BASS_2_OM_testOnSyntheticData.py | oliviermirat/BASS | fe595fdc60795b09bb6c264b6da914a6e8e0c415 | [
"MIT"
]
| 1 | 2020-10-10T11:20:32.000Z | 2020-10-10T11:20:32.000Z | BASS_2_OM_testOnSyntheticData.py | oliviermirat/BASS | fe595fdc60795b09bb6c264b6da914a6e8e0c415 | [
"MIT"
]
| null | null | null | BASS_2_OM_testOnSyntheticData.py | oliviermirat/BASS | fe595fdc60795b09bb6c264b6da914a6e8e0c415 | [
"MIT"
]
| null | null | null | import sys
sys.path.insert(1, './GR_BASS/BASS_only_original/')
sys.path.insert(1, './GR_BASS/')
import bass as md
import numpy as np
import sys
import bassLibrary as bl
# BASS algorithm parameters
eps = 0.1
p_d = 0.2
Jthr = 0.15
seed = 0
# Creating synthetic data for process BASS on and to learn the GMM model
nbClasses = 5
classNames = ['a', 'b', 'c', 'd', 'e']
nbInstDataAnalyze = 4000
probElemDictAppear = 0.05
[dataToAnalyze1, dataForLearn] = bl.createSyntheticDataSet(nbClasses, nbInstDataAnalyze, [[3, 2, 1, 0], [0, 1, 2, 3]], [probElemDictAppear, probElemDictAppear])
l = int(len(dataToAnalyze1)/4)
lengths_data1 = np.array([l, l, l, l])
# Learning the model with the data previously created
model_fit = md.GMM_model(nbClasses)
model_fit.solve(dataForLearn)
# Launch BASS on the synthetic data previously created
posteriorProb1 = bl.getPosteriorProbabilities(dataToAnalyze1, lengths_data1, model_fit)
[P_w1, nbInstances1, w_dict1] = bl.launchBASS(posteriorProb1, lengths_data1, model_fit, eps, p_d, Jthr, seed)
[transmat_, stationary_probs_, a, b, c] = bl.launchMarkovianCompare(posteriorProb1, lengths_data1, model_fit, eps, p_d, Jthr, seed, w_dict1, classNames, 0, {'nameOfFile' : 'syntheticDataTest'})
# Comparing different dataset with different amounts of insertions
for idx, probElemDictAppear2 in enumerate([0.1, 0.05]):
print("Comparing two different dataset with SAME amounts of insertions. Probability: ", probElemDictAppear2)
[dataToAnalyze2, dataForLearn2] = bl.createSyntheticDataSet(nbClasses, nbInstDataAnalyze, [[3, 2, 1, 0], [0, 1, 2, 3]], [probElemDictAppear2, probElemDictAppear2])
l = int(len(dataToAnalyze2)/4)
lengths_data2 = np.array([l, l, l, l])
posteriorProb2 = bl.getPosteriorProbabilities(dataToAnalyze2, lengths_data2, model_fit)
[P_w2, nbInstances2, w_dict2] = bl.launchBASS(posteriorProb2, lengths_data2, model_fit, eps, p_d, Jthr, seed)
w_thr = 1e-4
p_ins = 0.2
mu = 1.0
H_beta_fac = 0
Sigma = dataToAnalyze1.shape[1]
std = 0.05
params = np.array([eps,p_d,p_ins, mu, w_thr,H_beta_fac, Jthr, Sigma, std], dtype =float)
bl.compareTwoBASSresults(w_dict1, w_dict2, params, model_fit, dataToAnalyze1, lengths_data1, dataToAnalyze2, lengths_data2, {'nameOfFile' : 'syntheticDataTest'}, classNames, str(idx)) # TODO: change compareTwoBASSresults for it to accept the posterior probabilities posteriorProb1 and posteriorProb2 instead of the data dataToAnalyze1 and dataToAnalyze2
| 43.45614 | 355 | 0.749697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 641 | 0.258781 |
3242f191734e1ec3faebeb7b0fb07f008db4254c | 108 | py | Python | auth/view/resource/create_reset_password_request.py | nicolaszein/auth | 90112f1a4d6f368714b19daad7e8a4226594b383 | [
"MIT"
]
| null | null | null | auth/view/resource/create_reset_password_request.py | nicolaszein/auth | 90112f1a4d6f368714b19daad7e8a4226594b383 | [
"MIT"
]
| null | null | null | auth/view/resource/create_reset_password_request.py | nicolaszein/auth | 90112f1a4d6f368714b19daad7e8a4226594b383 | [
"MIT"
]
| null | null | null | from pydantic import BaseModel, EmailStr
class CreateResetPasswordRequest(BaseModel):
email: EmailStr
| 18 | 44 | 0.814815 | 64 | 0.592593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3247a207cdb1e57a605f9bb8949d6c37632fda73 | 3,707 | py | Python | pymt/grids/map.py | mwtoews/pymt | 81a8469b0d0d115d21186ec1d1c9575690d51850 | [
"MIT"
]
| null | null | null | pymt/grids/map.py | mwtoews/pymt | 81a8469b0d0d115d21186ec1d1c9575690d51850 | [
"MIT"
]
| null | null | null | pymt/grids/map.py | mwtoews/pymt | 81a8469b0d0d115d21186ec1d1c9575690d51850 | [
"MIT"
]
| null | null | null | #! /bin/env python
"""
Examples
========
**Rectilinear**
Create a rectilinear grid that is 2x3::
(0) --- (1) --- (2)
| | |
| | |
| [0] | [1] |
| | |
| | |
(3) --- (4) --- (5)
Numbers in parens are node IDs, and numbers in square brackets are
cell IDs.
>>> g = RectilinearMap ([0, 2], [0, 1, 2])
>>> g.get_x ()
array([ 0., 1., 2., 0., 1., 2.])
>>> g.get_y ()
array([ 0., 0., 0., 2., 2., 2.])
Node 1 is shared by both cell 0, and 1; node 5 only is part of cell 1.
>>> g.get_shared_cells (1)
[0, 1]
>>> g.get_shared_cells (5)
[1]
Point (.5, 1.) is contained only within cell 0.
>>> g.is_in_cell (.5, 1., 0)
True
>>> g.is_in_cell (.5, 1., 1)
False
Point (1., 1.) is on a border and so is contained by both cells.
>>> g.is_in_cell (1, 1., 0)
True
>>> g.is_in_cell (1, 1., 1)
True
"""
from shapely.geometry import Point, asLineString, asPoint, asPolygon
from pymt.grids import (
Rectilinear,
Structured,
UniformRectilinear,
Unstructured,
UnstructuredPoints,
)
class UnstructuredMap(Unstructured):
name = "Unstructured"
def __init__(self, *args, **kwargs):
super(UnstructuredMap, self).__init__(*args, **kwargs)
self._point = {}
last_offset = 0
for (cell_id, offset) in enumerate(self._offset):
cell = self._connectivity[last_offset:offset]
last_offset = offset
for point_id in cell:
try:
self._point[point_id].append(cell_id)
except KeyError:
self._point[point_id] = [cell_id]
(point_x, point_y) = (self.get_x(), self.get_y())
self._polys = []
last_offset = 0
for (cell_id, offset) in enumerate(self._offset):
cell = self._connectivity[last_offset:offset]
last_offset = offset
(x, y) = (point_x.take(cell), point_y.take(cell))
if len(x) > 2:
self._polys.append(asPolygon(zip(x, y)))
elif len(x) == 2:
self._polys.append(asLineString(zip(x, y)))
else:
self._polys.append(asPoint(zip(x, y)))
def get_shared_cells(self, point_id):
"""
Parameters
----------
point_id: int
ID of a point in the grid.
Returns
-------
ndarray of int
Indices to cells that share a given node.
"""
return self._point[point_id]
def is_in_cell(self, x, y, cell_id):
"""Check if a point is in a cell.
Parameters
----------
x: float
x-coordinate of point to check.
y: float
y-coordinate of point to check.
cell_id: int
ID of the cell in the grid.
Returns
-------
bool
True if the point (x, y) is contained in the cell.
"""
pt = Point((x, y))
return self._polys[cell_id].contains(pt) or self._polys[cell_id].touches(pt)
class UnstructuredPointsMap(UnstructuredPoints):
name = "UnstructuredPoints"
def get_shared_cells(self, point_id): # pylint: disable=no-self-use
return []
def is_in_cell(self, x, y, cell_id): # pylint: disable=no-self-use
return False
class StructuredMap(Structured, UnstructuredMap):
name = "Structured"
class RectilinearMap(Rectilinear, UnstructuredMap):
name = "Rectilinear"
class UniformRectilinearMap(UniformRectilinear, UnstructuredMap):
name = "UniformRectilinear"
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| 23.916129 | 84 | 0.555705 | 2,498 | 0.67386 | 0 | 0 | 0 | 0 | 0 | 0 | 1,631 | 0.439978 |
3247e08ee12a7d9414679491f0b3e5ad060be2e8 | 27,447 | py | Python | jogo.py | AdamastorLinsFrancaNetto/jogo-academic-journey | ad312d255abe23e243ba39732e972cf45f092b08 | [
"MIT"
]
| null | null | null | jogo.py | AdamastorLinsFrancaNetto/jogo-academic-journey | ad312d255abe23e243ba39732e972cf45f092b08 | [
"MIT"
]
| null | null | null | jogo.py | AdamastorLinsFrancaNetto/jogo-academic-journey | ad312d255abe23e243ba39732e972cf45f092b08 | [
"MIT"
]
| null | null | null | import pygame
from conteudo import Conteudo, Nave, Tiro
import random
class Jogo:
def __init__(self):
self.fundo1 = Conteudo("arquivos/espaco.png", 0, 0)
self.fundo2 = Conteudo("arquivos/espaco.png", 0, -960)
self.nave = Nave("arquivos/nave1.png",630,750)
self.comando = Conteudo("arquivos/comando1.png", 40, 960)
self.comandoo = Conteudo("arquivos/comandoo1.png", 40, 960)
self.comandooo = Conteudo("arquivos/comandooo1.png", 40, 900)
self.comandoooo = Conteudo("arquivos/comandoooo1.png", 40, 960)
self.dialogo1 = Conteudo("arquivos/dialogo1.png", 330, 120)
self.dialogo2 = Conteudo("arquivos/dialogo4.png", 330, 120)
self.dialogo3 = Conteudo("arquivos/dialogo6.png", 330, 120)
self.dialogo4 = Conteudo("arquivos/dialogo8.png", 330, 120)
self.dialogo5 = Conteudo("arquivos/dialogo9.png", 330, 120)
self.armadura5 = Conteudo("arquivos/armadura5.png", 10, 10)
self.armadura4 = Conteudo("arquivos/armadura4.png", 10, 10)
self.armadura3 = Conteudo("arquivos/armadura3.png", 10, 10)
self.armadura2 = Conteudo("arquivos/armadura2.png", 10, 10)
self.armadura1 = Conteudo("arquivos/armadura1.png", 10, 10)
self.discernimento0 = Conteudo("arquivos/disc0.png", 820, 10)
self.discernimento1 = Conteudo("arquivos/disc1.png", 820, 10)
self.discernimento2 = Conteudo("arquivos/disc2.png", 820, 10)
self.discernimento3 = Conteudo("arquivos/disc3.png", 820, 10)
self.discernimento4 = Conteudo("arquivos/disc4.png", 820, 10)
self.discernimento5 = Conteudo("arquivos/disc5.png", 820, 10)
self.planetainimigo = Conteudo("arquivos/pr1.png", 910, -320)
self.planetaaliado = Conteudo("arquivos/pb1.png", 10, -600)
self.resiliencia0 = Conteudo("arquivos/resili0.png", 824, 56)
self.resiliencia1 = Conteudo("arquivos/resili1.png", 824, 56)
self.resiliencia2 = Conteudo("arquivos/resili2.png", 824, 56)
self.resiliencia3 = Conteudo("arquivos/resili3.png", 824, 56)
self.resiliencia4 = Conteudo("arquivos/resili4.png", 824, 56)
self.resiliencia5 = Conteudo("arquivos/resili5.png", 824, 56)
self.condecoracoes = Conteudo("arquivos/condecoracoes.png", 1010, 755)
self.condecoracao1 = Conteudo("arquivos/condecoracao1.png", 1010, 790)
self.condecoracao2 = Conteudo("arquivos/condecoracao2.png", 1100, 790)
self.condecoracao3 = Conteudo("arquivos/condecoracao3.png", 1190, 790)
self.destreza0 = Conteudo("arquivos/destreza0.png", 821, 104)
self.destreza1 = Conteudo("arquivos/destreza1.png", 821, 104)
self.destreza2 = Conteudo("arquivos/destreza2.png", 821, 104)
self.destreza3 = Conteudo("arquivos/destreza3.png", 821, 104)
self.destreza4 = Conteudo("arquivos/destreza4.png", 821, 104)
self.destreza5 = Conteudo("arquivos/destreza5.png", 821, 104)
self.gggg = Conteudo("arquivos/gggg1.png", 1000, -230)
self.ggg = Conteudo("arquivos/ggg1.png", 700, -180)
self.gg = Conteudo("arquivos/gg1.png", 400, -130)
self.g = Conteudo("arquivos/g1.png", 100, -100)
self.r = Conteudo("arquivos/r.png", 600, -50)
self.tiro = Tiro("arquivos/x1.png", -100,-100)
self.aste1 = Conteudo("arquivos/aste1.png", 840, -50)
self.aste2 = Conteudo("arquivos/aste2.png", 640, -120)
self.aste3 = Conteudo("arquivos/aste3.png", 440, -190)
self.aste11 = Conteudo("arquivos/aste11.png", 840, -50)
self.aste22 = Conteudo("arquivos/aste22.png", 640, -120)
self.aste33 = Conteudo("arquivos/aste33.png", 440, -190)
self.p1 = Conteudo("arquivos/p1.png", 0, -40)
self.p2 = Conteudo("arquivos/p2.png", 427, -40)
self.p3 = Conteudo("arquivos/p3.png", 854, -40)
self.i1 = Conteudo("arquivos/i1.png", 0, -40)
self.i2 = Conteudo("arquivos/i2.png", 427, -40)
self.i3 = Conteudo("arquivos/i3.png", 854, -40)
self.w1 = Conteudo("arquivos/w1.png", 0, -40)
self.w2 = Conteudo("arquivos/w2.png", 427, -40)
self.w3 = Conteudo("arquivos/w3.png", 854, -40)
self.f1 = Conteudo("arquivos/f1.png", 0, -40)
self.f2 = Conteudo("arquivos/f2.png", 427, -40)
self.f3 = Conteudo("arquivos/f3.png", 854, -40)
self.d1 = Conteudo("arquivos/d1.png", 0, -40)
self.d2 = Conteudo("arquivos/d2.png", 427, -40)
self.d3 = Conteudo("arquivos/d3.png", 854, -40)
self.fim = Conteudo("arquivos/fim.png", 0, 0)
self.boleana_dialogo = False
self.mudar_cena = False
self.foi = False
self.contagem_resili = 0
self.contagem_destre = 0
self.contagem_dialogo1 = 1
self.contagem_paliados = 1
self.contagem_pinimigos = 1
self.inicio_asteroides = 0
self.contagem_gggg = 1
self.contagem_ggg = 1
self.contagem_gg = 1
self.contagem_g = 1
self.contagem_r = 1
self.contagem_barreira = 0
self.inicio = 0
self.inicio1 = 0
self.final = 0
def draw(self, tela):
self.fundo1.draw(tela)
self.fundo2.draw(tela)
self.tiro.draw(tela)
self.nave.draw(tela)
self.comando.draw(tela)
if self.nave.contagem_discernimento == 5 and self.contagem_paliados == 6:
self.comandoo.draw(tela)
if self.nave.contagem_resiliencia == 5 and self.contagem_gggg == 6:
self.comandooo.draw(tela)
if self.inicio == 1:
self.comandoooo.draw(tela)
if self.comandoooo.personagens.rect[1] == 370:
self.dialogo4.draw(tela)
if self.inicio1 == 1:
self.condecoracao3.draw(tela)
if self.contagem_dialogo1 == 9:
self.dialogo5.draw(tela)
if self.final == 1:
self.fim.draw(tela)
self.armadura5.draw(tela)
self.armadura4.draw(tela)
self.armadura3.draw(tela)
self.armadura2.draw(tela)
self.armadura1.draw(tela)
if self.contagem_dialogo1 == 3:
self.planetaaliado.draw(tela)
if self.contagem_dialogo1 == 3:
self.planetainimigo.draw(tela)
self.aste1.draw(tela)
self.aste2.draw(tela)
self.aste3.draw(tela)
if self.contagem_dialogo1 == 3:
self.discernimento0.draw(tela)
if self.nave.contagem_discernimento == 1:
self.discernimento1.draw(tela)
if self.nave.contagem_discernimento == 2:
self.discernimento2.draw(tela)
if self.nave.contagem_discernimento == 3:
self.discernimento3.draw(tela)
if self.nave.contagem_discernimento == 4:
self.discernimento4.draw(tela)
if self.nave.contagem_discernimento == 5:
self.discernimento5.draw(tela)
if self.nave.contagem_resiliencia == 1:
self.resiliencia1.draw(tela)
if self.nave.contagem_resiliencia == 2:
self.resiliencia2.draw(tela)
if self.nave.contagem_resiliencia == 3:
self.resiliencia3.draw(tela)
if self.nave.contagem_resiliencia == 4:
self.resiliencia4.draw(tela)
if self.nave.contagem_resiliencia == 5:
self.resiliencia5.draw(tela)
if self.nave.contagem_destreza == 1:
self.destreza1.draw(tela)
if self.nave.contagem_destreza == 2:
self.destreza2.draw(tela)
if self.nave.contagem_destreza == 3:
self.destreza3.draw(tela)
if self.nave.contagem_destreza == 4:
self.destreza4.draw(tela)
if self.nave.contagem_destreza == 5:
self.destreza5.draw(tela)
if self.comando.personagens.rect[1] == 370:
self.dialogo1.draw(tela)
if self.comandoo.personagens.rect[1] == 370:
self.dialogo2.draw(tela)
if self.comandooo.personagens.rect[1] == 370:
self.dialogo3.draw(tela)
if self.contagem_resili == 1:
self.condecoracoes.draw(tela)
self.condecoracao1.draw(tela)
self.resiliencia0.draw(tela)
if self.contagem_destre == 1:
self.condecoracao2.draw(tela)
self.destreza0.draw(tela)
if self.inicio_asteroides == 1:
self.gggg.draw(tela)
self.ggg.draw(tela)
self.gg.draw(tela)
self.g.draw(tela)
self.r.draw(tela)
self.aste11.draw(tela)
self.aste22.draw(tela)
self.aste33.draw(tela)
if self.nave.contagem_resiliencia == 6:
self.comandooo.draw(tela)
if self.contagem_dialogo1 == 7:
self.p1.draw(tela)
self.p2.draw(tela)
self.p3.draw(tela)
if self.contagem_barreira == 1:
self.i1.draw(tela)
self.i2.draw(tela)
self.i3.draw(tela)
if self.contagem_barreira == 2:
self.w1.draw(tela)
self.w2.draw(tela)
self.w3.draw(tela)
if self.contagem_barreira == 3:
self.f1.draw(tela)
self.f2.draw(tela)
self.f3.draw(tela)
if self.contagem_barreira == 4:
self.d1.draw(tela)
self.d2.draw(tela)
self.d3.draw(tela)
def atualizacoes(self):
self.movimento_fundo()
self.nave.animacoes("nave", 2, 2)
self.comando.animacoes("comando", 2, 2)
self.comandoo.animacoes("comandoo", 2, 2)
self.comandooo.animacoes("comandooo", 2, 2)
self.comandoooo.animacoes("comandoooo", 2, 2)
self.tiro.animacoes("x",2,2)
self.planetas_inimigos()
self.planetas_aliados()
self.nave.colisao_planetas(self.planetainimigo.group, "planetainimigos")
self.nave.colisao_planetas(self.aste1.group, "aste1")
self.nave.colisao_planetas(self.aste2.group, "aste2")
self.nave.colisao_planetas(self.aste3.group, "aste3")
self.nave.colisao_planetas(self.planetaaliado.group, "planetaaliados")
self.tiro.colisao_tiro(self.planetainimigo.group, "planetainimigos")
self.tiro.colisao_tiro(self.planetaaliado.group, "planetaaliados")
self.tiro.colisao_tiroast1(self.aste1.group, "aste1")
self.tiro.colisao_tiroast1(self.aste2.group, "aste2")
self.tiro.colisao_tiroast1(self.aste3.group, "aste3")
self.nave.colisao_asteroides(self.gggg.group, "gggg")
self.nave.colisao_asteroides(self.ggg.group, "ggg")
self.nave.colisao_asteroides(self.gg.group, "gg")
self.nave.colisao_asteroides(self.g.group, "g")
self.nave.colisao_asteroides(self.r.group, "r")
self.nave.colisao_asteroides(self.aste11.group, "aste11")
self.nave.colisao_asteroides(self.aste22.group, "aste22")
self.nave.colisao_asteroides(self.aste33.group, "aste33")
self.tiro.colisao_tiroo(self.gggg.group, "gggg")
self.tiro.colisao_tiroo(self.ggg.group, "ggg")
self.tiro.colisao_tiroo(self.gg.group, "gg")
self.tiro.colisao_tiroo(self.g.group, "g")
self.tiro.colisao_tiroast2(self.aste11.group, "aste11")
self.tiro.colisao_tiroast2(self.aste22.group, "aste22")
self.tiro.colisao_tiroast2(self.aste33.group, "aste33")
self.nave.colisao_barreira(self.p1.group, "p1")
self.nave.colisao_barreira(self.p2.group, "p2")
self.nave.colisao_barreira(self.p3.group, "p3")
self.nave.colisao_barreira(self.i1.group, "i1")
self.nave.colisao_barreira(self.i2.group, "i2")
self.nave.colisao_barreira(self.i3.group, "i3")
self.nave.colisao_barreira(self.w1.group, "w1")
self.nave.colisao_barreira(self.w2.group, "w2")
self.nave.colisao_barreira(self.w3.group, "w3")
self.nave.colisao_barreira(self.f1.group, "f1")
self.nave.colisao_barreira(self.f2.group, "f2")
self.nave.colisao_barreira(self.f3.group, "f3")
self.nave.colisao_barreira(self.d1.group, "d1")
self.nave.colisao_barreira(self.d2.group, "d2")
self.nave.colisao_barreira(self.d3.group, "d3")
self.tiro.colisao_barreirat(self.p1.group, "p1")
self.tiro.colisao_barreirat(self.p2.group, "p2")
self.tiro.colisao_barreirat(self.p3.group, "p3")
self.tiro.colisao_barreirat(self.i1.group, "i1")
self.tiro.colisao_barreirat(self.i2.group, "i2")
self.tiro.colisao_barreirat(self.i3.group, "i3")
self.tiro.colisao_barreirat(self.w1.group, "w1")
self.tiro.colisao_barreirat(self.w2.group, "w2")
self.tiro.colisao_barreirat(self.w3.group, "w3")
self.tiro.colisao_barreirat(self.f1.group, "f1")
self.tiro.colisao_barreirat(self.f2.group, "f2")
self.tiro.colisao_barreirat(self.f3.group, "f3")
self.tiro.colisao_barreirat(self.d1.group, "d1")
self.tiro.colisao_barreirat(self.d2.group, "d2")
self.tiro.colisao_barreirat(self.d3.group, "d3")
self.quantidade_armadura()
self.quantidade_disernimento()
self.quantidade_resiliencia()
self.quantidade_destreza()
self.movimento_primeira()
self.movimento_segunda()
self.movimento_terceira()
self.asteroides()
self.disparado()
self.barreira()
self.movimento_quarta()
def movimento_primeira(self):
if self.nave.contagem_enter == 1:
self.comando.personagens.rect[1] -= 3
if self.comando.personagens.rect[1] <= 370:
self.comando.personagens.rect[1] = 370
self.nave.contagem_enter += 1
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao1.mpeg")
self.som_dialogo.play()
if self.contagem_dialogo1 == 3:
self.comando.personagens.rect[1] += 6
if self.comando.personagens.rect[1] >= 960:
self.comando.personagens.rect[1] = 960
self.comando.personagens.kill()
def movimento_segunda(self):
if self.nave.contagem_discernimento == 5 and self.contagem_paliados == 6 and self.contagem_dialogo1 == 3:
self.comandoo.personagens.rect[1] -= 3
if self.comandoo.personagens.rect[1] <= 370:
self.comandoo.personagens.rect[1] = 370
self.contagem_resili += 1
self.contagem_dialogo1 += 1
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao3.mpeg")
self.som_dialogo.play()
if self.contagem_dialogo1 == 5:
self.comandoo.personagens.rect[1] += 6
if self.comandoo.personagens.rect[1] >= 960:
self.comandoo.personagens.rect[1] = 960
self.comandoo.personagens.kill()
def movimento_terceira(self):
if self.nave.contagem_resiliencia == 5 and self.contagem_gggg == 6 and self.contagem_dialogo1 == 5:
self.comandooo.personagens.rect[1] -= 3
if self.comandooo.personagens.rect[1] <= 370:
self.comandooo.personagens.rect[1] = 370
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao4.mpeg")
self.som_dialogo.play()
self.contagem_destre += 1
self.contagem_dialogo1 += 1
if self.contagem_dialogo1 == 7:
self.comandooo.personagens.rect[1] += 6
if self.comandooo.personagens.rect[1] >= 960:
self.comandooo.personagens.rect[1] = 960
self.comandooo.personagens.kill()
def movimento_fundo(self):
self.fundo1.personagens.rect[1] += 4
self.fundo2.personagens.rect[1] += 4
if self.fundo1.personagens.rect[1] >= 960:
self.fundo1.personagens.rect[1] = 0
if self.fundo2.personagens.rect[1] >= 0:
self.fundo2.personagens.rect[1] = -960
def quantidade_armadura(self):
if self.nave.contagem_armadura == 4:
self.armadura5.personagens.kill()
if self.nave.contagem_armadura == 3:
self.armadura4.personagens.kill()
if self.nave.contagem_armadura == 2:
self.armadura3.personagens.kill()
if self.nave.contagem_armadura == 1:
self.armadura2.personagens.kill()
if self.nave.contagem_armadura == 0:
self.mudar_cena = True
def quantidade_disernimento(self):
if self.nave.contagem_discernimento == 1:
self.discernimento0.personagens.kill()
if self.nave.contagem_discernimento == 2:
self.discernimento1.personagens.kill()
if self.nave.contagem_discernimento == 3:
self.discernimento2.personagens.kill()
if self.nave.contagem_discernimento == 4:
self.discernimento3.personagens.kill()
if self.nave.contagem_discernimento == 5:
self.discernimento4.personagens.kill()
def quantidade_resiliencia(self):
if self.nave.contagem_resiliencia == 1:
self.resiliencia0.personagens.kill()
if self.nave.contagem_resiliencia == 2:
self.resiliencia1.personagens.kill()
if self.nave.contagem_resiliencia == 3:
self.resiliencia2.personagens.kill()
if self.nave.contagem_resiliencia == 4:
self.resiliencia3.personagens.kill()
if self.nave.contagem_resiliencia == 5:
self.resiliencia4.personagens.kill()
def quantidade_destreza(self):
if self.nave.contagem_destreza == 1:
self.destreza0.personagens.kill()
if self.nave.contagem_destreza == 2:
self.destreza1.personagens.kill()
if self.nave.contagem_destreza == 3:
self.destreza2.personagens.kill()
if self.nave.contagem_destreza == 4:
self.destreza3.personagens.kill()
if self.nave.contagem_destreza == 5:
self.destreza4.personagens.kill()
def dialogo(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_KP_ENTER:
self.boleana_dialogo = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_KP_ENTER:
self.boleana_dialogo = False
if self.boleana_dialogo:
self.dialogo1.personagens.kill()
self.contagem_dialogo1 +=1
self.dialogo1 = Conteudo("arquivos/dialogo" + str(self.contagem_dialogo1) + ".png", 330, 120)
if self.contagem_dialogo1 <= 2:
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao2.mpeg")
self.som_dialogo.play()
print("Nº dialogo:", self.contagem_dialogo1)
if self.contagem_dialogo1 == 5:
self.dialogo2.personagens.kill()
self.inicio_asteroides = 1
def planetas_inimigos(self):
if self.comando.personagens.rect[1] == 960 and self.contagem_pinimigos <= 5:
self.planetainimigo.personagens.rect[1] += 6
self.aste1.personagens.rect[1] += 7
self.aste2.personagens.rect[1] += 7
self.aste3.personagens.rect[1] += 7
if self.aste1.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.aste1.personagens.kill()
if self.contagem_pinimigos <= 4:
self.aste1 = Conteudo("arquivos/aste1.png", random.randrange(50, 1000), -50)
if self.aste2.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.aste2.personagens.kill()
if self.contagem_pinimigos <= 4:
self.aste2 = Conteudo("arquivos/aste2.png", random.randrange(50, 1000), -120)
if self.aste3.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.aste3.personagens.kill()
if self.contagem_pinimigos <= 4:
self.aste3 = Conteudo("arquivos/aste3.png", random.randrange(50, 1000), -190)
if self.planetainimigo.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.planetainimigo.personagens.kill()
self.contagem_pinimigos += 1
if self.contagem_pinimigos <= 5:
self.planetainimigo = Conteudo("arquivos/pr" + str(self.contagem_pinimigos) + ".png", random.randrange(50, 900), -320)
def planetas_aliados(self):
if self.comando.personagens.rect[1] == 960 and self.contagem_paliados <= 5:
self.planetaaliado.personagens.rect[1] += 6
if self.planetaaliado.personagens.rect[1] >= 960 and self.contagem_paliados <= 5:
self.planetaaliado.personagens.kill()
self.contagem_paliados += 1
if self.contagem_paliados <= 5:
self.planetaaliado = Conteudo("arquivos/pb" + str(self.contagem_paliados) + ".png", random.randrange(50, 900), -440)
def asteroides(self):
if self.inicio_asteroides == 1:
self.gggg.personagens.rect[1] += 4
self.ggg.personagens.rect[1] += 4
self.gg.personagens.rect[1] += 4
self.g.personagens.rect[1] += 4
self.r.personagens.rect[1] += 4
self.aste11.personagens.rect[1] += 7
self.aste22.personagens.rect[1] += 7
self.aste33.personagens.rect[1] += 7
if self.aste11.personagens.rect[1] >= 960 and self.contagem_gggg <= 4:
self.aste11.personagens.kill()
if self.contagem_gggg <= 4:
self.aste11 = Conteudo("arquivos/aste11.png", random.randrange(50, 1000), -50)
if self.aste22.personagens.rect[1] >= 960 and self.contagem_gggg <= 4:
self.aste22.personagens.kill()
if self.contagem_gggg <= 4:
self.aste22 = Conteudo("arquivos/aste22.png", random.randrange(50, 1000), -120)
if self.aste33.personagens.rect[1] >= 960 and self.contagem_gggg <= 4:
self.aste33.personagens.kill()
if self.contagem_gggg <= 4:
self.aste33 = Conteudo("arquivos/aste33.png", random.randrange(50, 1000), -190)
if self.gggg.personagens.rect[1] >= 960 and self.contagem_gggg <= 5:
self.gggg.personagens.kill()
self.contagem_gggg += 1
if self.contagem_gggg <= 5:
self.gggg = Conteudo("arquivos/gggg" + str(self.contagem_gggg) + ".png", random.randrange(50, 1000), -230)
if self.ggg.personagens.rect[1] >= 960 and self.contagem_ggg <= 5:
self.ggg.personagens.kill()
self.contagem_ggg += 1
if self.contagem_ggg <= 5:
self.ggg = Conteudo("arquivos/ggg" + str(self.contagem_ggg) + ".png", random.randrange(50, 1000), -180)
if self.gg.personagens.rect[1] >= 960 and self.contagem_gg <= 5:
self.gg.personagens.kill()
self.contagem_gg += 1
if self.contagem_gg <= 5:
self.gg = Conteudo("arquivos/gg" + str(self.contagem_gg) + ".png", random.randrange(50, 1000), -130)
if self.g.personagens.rect[1] >= 960 and self.contagem_g <= 5:
self.g.personagens.kill()
self.contagem_g += 1
if self.contagem_g <= 5:
self.g = Conteudo("arquivos/g" + str(self.contagem_g) + ".png", random.randrange(50, 1000), -100)
if self.r.personagens.rect[1] >= 960 and self.contagem_r <= 5:
self.r.personagens.kill()
self.contagem_r += 1
if self.contagem_r <= 5:
self.r = Conteudo("arquivos/r.png", random.randrange(50, 1000), -50)
def disparado(self):
if self.tiro.tiro:
self.tiro.personagens.rect[1] = (self.nave.personagens.rect[1] + 30)
self.tiro.personagens.rect[0] = (self.nave.personagens.rect[0] + 62)
self.foi = True
if self.foi:
if self.tiro.personagens.rect[1] >= 0:
self.tiro.personagens.rect[1] -= 15
if self.tiro.personagens.rect[1] == 0:
self.tiro.personagens.kill()
self.tiro.personagens.rect[1] = -200
self.tiro.personagens.rect[0] = -200
self.foi = False
if self.tiro.tiro:
self.tiro = Tiro("arquivos/x1.png", self.nave.personagens.rect[0] + 62, self.nave.personagens.rect[1] + 30)
def barreira(self):
if self.contagem_dialogo1 == 7:
self.p1.personagens.rect[1] += 3
self.p2.personagens.rect[1] += 3
self.p3.personagens.rect[1] += 3
if self.p1.personagens.rect[1] >= 960:
self.p1.personagens.kill()
self.p2.personagens.kill()
self.p3.personagens.kill()
self.contagem_barreira = 1
if self.contagem_barreira == 1:
self.i1.personagens.rect[1] += 3
self.i2.personagens.rect[1] += 3
self.i3.personagens.rect[1] += 3
if self.i1.personagens.rect[1] >= 960:
self.i1.personagens.kill()
self.i2.personagens.kill()
self.i3.personagens.kill()
self.contagem_barreira = 2
if self.contagem_barreira == 2:
self.w1.personagens.rect[1] += 3
self.w2.personagens.rect[1] += 3
self.w3.personagens.rect[1] += 3
if self.w1.personagens.rect[1] >= 960:
self.w1.personagens.kill()
self.w2.personagens.kill()
self.w3.personagens.kill()
self.contagem_barreira = 3
if self.contagem_barreira == 3:
self.f1.personagens.rect[1] += 3
self.f2.personagens.rect[1] += 3
self.f3.personagens.rect[1] += 3
if self.f2.personagens.rect[1] >= 960:
self.f1.personagens.kill()
self.f2.personagens.kill()
self.f3.personagens.kill()
self.contagem_barreira = 4
if self.contagem_barreira == 4:
self.d1.personagens.rect[1] += 3
self.d2.personagens.rect[1] += 3
self.d3.personagens.rect[1] += 3
if self.d1.personagens.rect[1] >= 960:
self.contagem_barreira = 5
self.d1.personagens.kill()
self.d2.personagens.kill()
self.d3.personagens.kill()
self.inicio = 1
def movimento_quarta(self):
if self.inicio == 1:
self.comandoooo.personagens.rect[1] -= 3
if self.comandoooo.personagens.rect[1] <= 370:
self.comandoooo.personagens.rect[1] = 370
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao5.mpeg")
self.som_dialogo.play()
self.inicio1 = 1
if self.contagem_dialogo1 == 8:
self.dialogo4.personagens.kill()
if self.contagem_dialogo1 == 9:
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao6.mpeg")
self.som_dialogo.play()
self.dialogo5.personagens.kill()
if self.contagem_dialogo1 == 10:
self.comandoooo.personagens.rect[1] += 6
if self.comandoooo.personagens.rect[1] >= 960:
self.comandoooo.personagens.rect[1] = 960
self.comandoooo.personagens.kill()
self.final = 1
| 47.651042 | 134 | 0.603709 | 27,372 | 0.997231 | 0 | 0 | 0 | 0 | 0 | 0 | 2,268 | 0.082629 |
32485d3d2f97d8719c9ad7891c585aced9f9c6ac | 1,308 | py | Python | xpresso/binders/dependants.py | adriangb/xpresso | 43fcc360f7b19c00e0b78480f96390bcb4d28053 | [
"MIT"
]
| 75 | 2022-01-18T02:17:57.000Z | 2022-03-24T02:30:04.000Z | xpresso/binders/dependants.py | adriangb/xpresso | 43fcc360f7b19c00e0b78480f96390bcb4d28053 | [
"MIT"
]
| 73 | 2022-01-18T03:01:27.000Z | 2022-03-27T16:41:38.000Z | xpresso/binders/dependants.py | adriangb/xpresso | 43fcc360f7b19c00e0b78480f96390bcb4d28053 | [
"MIT"
]
| 3 | 2022-01-18T22:47:06.000Z | 2022-01-25T02:03:53.000Z | import inspect
import typing
from di.api.dependencies import CacheKey
from di.dependant import Dependant, Marker
from xpresso._utils.typing import Protocol
from xpresso.binders.api import SupportsExtractor, SupportsOpenAPI
T = typing.TypeVar("T", covariant=True)
class SupportsMarker(Protocol[T]):
def register_parameter(self, param: inspect.Parameter) -> T:
...
class Binder(Dependant[typing.Any]):
def __init__(
self,
*,
openapi: SupportsOpenAPI,
extractor: SupportsExtractor,
) -> None:
super().__init__(call=extractor.extract, scope="connection")
self.openapi = openapi
self.extractor = extractor
@property
def cache_key(self) -> CacheKey:
return self.extractor
class BinderMarker(Marker):
def __init__(
self,
*,
extractor_marker: SupportsMarker[SupportsExtractor],
openapi_marker: SupportsMarker[SupportsOpenAPI],
) -> None:
self.extractor_marker = extractor_marker
self.openapi_marker = openapi_marker
def register_parameter(self, param: inspect.Parameter) -> Binder:
return Binder(
openapi=self.openapi_marker.register_parameter(param),
extractor=self.extractor_marker.register_parameter(param),
)
| 26.693878 | 70 | 0.683486 | 1,033 | 0.789755 | 0 | 0 | 76 | 0.058104 | 0 | 0 | 15 | 0.011468 |
3248e7edee7a47a71c97765cef8dd8859b78769c | 3,698 | py | Python | test/test_grid_to_triple.py | NCAR/geocat-f2py | fee07e680f61ca2ebfbb33f1554d9d85271fa32a | [
"Apache-2.0"
]
| 4 | 2021-02-20T20:02:11.000Z | 2021-11-24T13:35:32.000Z | test/test_grid_to_triple.py | NCAR/geocat-f2py | fee07e680f61ca2ebfbb33f1554d9d85271fa32a | [
"Apache-2.0"
]
| 27 | 2020-12-07T17:00:05.000Z | 2022-03-24T16:42:17.000Z | test/test_grid_to_triple.py | NCAR/geocat-f2py | fee07e680f61ca2ebfbb33f1554d9d85271fa32a | [
"Apache-2.0"
]
| 4 | 2021-01-07T01:50:11.000Z | 2021-07-07T13:05:42.000Z | import sys
import unittest as ut
import numpy as np
import xarray as xr
# Import from directory structure if coverage test, or from installed
# packages otherwise
if "--cov" in str(sys.argv):
from src.geocat.f2py import grid_to_triple
else:
from geocat.f2py import grid_to_triple
# Size of the grids
ny = 2
mx = 3
# Nominal input
data = np.asarray([2.740655, 2.745848, 4.893587, 2.965059, 1.707929,
0.746007]).reshape((ny, mx))
# Missing value = np.nan input
data_nan = data.copy()
data_nan[0, 1] = np.nan
data_nan[1, 2] = np.nan
# Missing value = -99 input
data_msg = data_nan.copy()
data_msg[np.isnan(data_msg)] = -99
# Coordinates
x = np.asarray([1.0, 3.0, 5.0])
y = np.asarray([2.0, 4.0])
# Expected output
out_expected = np.asarray([1, 3, 5, 1, 3, 5, 2, 2, 2, 4, 4, 4, 2.740655, 2.745848, 4.893587, 2.965059, 1.707929, 0.746007])\
.reshape((3, ny * mx))
out_expected_msg = np.asarray([1, 5, 1, 3, 2, 2, 4, 4, 2.740655, 4.893587, 2.965059, 1.707929])\
.reshape((3, 4))
class Test_grid_to_triple_float64(ut.TestCase):
def test_grid_to_triple_float64(self):
out = grid_to_triple(data, x, y)
np.testing.assert_array_equal(out_expected, out.values)
def test_grid_to_triple_float64_xr(self):
data_xr = xr.DataArray(
data,
coords={
'lat': y,
'lon': x,
},
dims=['lat', 'lon'],
)
out = grid_to_triple(data_xr, x, y)
np.testing.assert_array_equal(out_expected, out.values)
def test_grid_to_triple_float64_xr_x_y(self):
data_xr = xr.DataArray(data)
out = grid_to_triple(data_xr, x, y)
np.testing.assert_array_equal(out_expected, out.values)
def test_grid_to_triple_float64_nan(self):
out = grid_to_triple(data_nan, x, y)
np.testing.assert_array_equal(out_expected_msg, out.values)
def test_grid_to_triple_float64_nan_2(self):
out = grid_to_triple(data_nan, x, y, msg_py=np.nan)
np.testing.assert_array_equal(out_expected_msg, out.values)
def test_grid_to_triple_float64_msg(self):
out = grid_to_triple(data_msg, x, y, msg_py=-99)
np.testing.assert_array_equal(out_expected_msg, out.values)
class Test_grid_to_triple_float32(ut.TestCase):
def test_grid_to_triple_float32(self):
data_asfloat32 = data.astype(np.float32)
out = grid_to_triple(data_asfloat32, x.astype(np.float32),
y.astype(np.float32))
np.testing.assert_array_equal(out_expected.astype(np.float32), out)
def test_grid_to_triple_float32_nan(self):
data_asfloat32_nan = data_nan.astype(np.float32)
out = grid_to_triple(data_asfloat32_nan, x.astype(np.float32),
y.astype(np.float32))
np.testing.assert_array_equal(out_expected_msg.astype(np.float32), out)
def test_grid_to_triple_float32_nan_2(self):
data_asfloat32_nan = data_nan.astype(np.float32)
out = grid_to_triple(data_asfloat32_nan,
x.astype(np.float32),
y.astype(np.float32),
msg_py=np.nan)
np.testing.assert_array_equal(out_expected_msg.astype(np.float32), out)
def test_grid_to_triple_float32_msg(self):
data_asfloat32_msg = data_msg.astype(np.float32)
out = grid_to_triple(data_asfloat32_msg,
x.astype(np.float32),
y.astype(np.float32),
msg_py=-99)
np.testing.assert_array_equal(out_expected_msg.astype(np.float32), out)
| 32.156522 | 124 | 0.635479 | 2,648 | 0.716063 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.064089 |
3249b98ec0603abf9f97a5033a897bd1e2965b76 | 440 | py | Python | Cisco/Python/Modulo_3/for/exercicio1.py | ThiagoKS-7/Python_Essencials_1_cisco | a417747e873f69bb307c4d36205797b191b5b45a | [
"MIT"
]
| null | null | null | Cisco/Python/Modulo_3/for/exercicio1.py | ThiagoKS-7/Python_Essencials_1_cisco | a417747e873f69bb307c4d36205797b191b5b45a | [
"MIT"
]
| null | null | null | Cisco/Python/Modulo_3/for/exercicio1.py | ThiagoKS-7/Python_Essencials_1_cisco | a417747e873f69bb307c4d36205797b191b5b45a | [
"MIT"
]
| null | null | null | def main():
import time
# Write a for loop that counts to five.
# Body of the loop - print the loop iteration number and the word "Mississippi".
# Body of the loop - use: time.sleep(1)
# Write a print function with the final message.
for i in range(5):
print(f'{i + 1} Mississipi')
time.sleep(1)
print("Ready or not, here i come!")
if __name__ == '__main__':
main() | 27.5 | 89 | 0.584091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.611364 |
324db02ef7101b8e262f2ae0d6adf964eaf48e55 | 1,252 | py | Python | scripts/pegasus/build_test_sample_spm_no_bos.py | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 | [
"Apache-2.0"
]
| 50,404 | 2019-09-26T09:55:55.000Z | 2022-03-31T23:07:49.000Z | scripts/pegasus/build_test_sample_spm_no_bos.py | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 | [
"Apache-2.0"
]
| 13,179 | 2019-09-26T10:10:57.000Z | 2022-03-31T23:17:08.000Z | scripts/pegasus/build_test_sample_spm_no_bos.py | liminghao1630/transformers | 207594be81b8e5a8589c8b11c3b236924555d806 | [
"Apache-2.0"
]
| 13,337 | 2019-09-26T10:49:38.000Z | 2022-03-31T23:06:17.000Z | #!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script builds a small sample spm file tests/fixtures/test_sentencepiece_no_bos.model, with features needed by pegasus
# 1. pip install sentencepiece
#
# 2. wget https://raw.githubusercontent.com/google/sentencepiece/master/data/botchan.txt
# 3. build
import sentencepiece as spm
# pegasus:
# 1. no bos
# 2. eos_id is 1
# 3. unk_id is 2
# build a sample spm file accordingly
spm.SentencePieceTrainer.train('--input=botchan.txt --model_prefix=test_sentencepiece_no_bos --bos_id=-1 --unk_id=2 --eos_id=1 --vocab_size=1000')
# 4. now update the fixture
# mv test_sentencepiece_no_bos.model ../../tests/fixtures/
| 36.823529 | 148 | 0.761182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,160 | 0.926518 |
3252c61f7a71dbc22f9e4a1f7ba0cf98c90f9ea0 | 8,931 | py | Python | pytorch-transformers-extensions/examples/run_inference.py | deepchatterjeevns/nlp_projects | 8ea4a846138da0bcee2970907ea3340b1cdc74cb | [
"MIT"
]
| 21 | 2019-07-25T08:39:56.000Z | 2020-12-14T09:59:06.000Z | pytorch-transformers-extensions/examples/run_inference.py | deepchatterjeevns/nlp_projects | 8ea4a846138da0bcee2970907ea3340b1cdc74cb | [
"MIT"
]
| 1 | 2019-08-05T03:23:54.000Z | 2019-08-05T03:24:39.000Z | pytorch-transformers-extensions/examples/run_inference.py | deepchatterjeevns/nlp_projects | 8ea4a846138da0bcee2970907ea3340b1cdc74cb | [
"MIT"
]
| 15 | 2019-07-31T13:37:14.000Z | 2021-09-28T19:01:27.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Running inference for sequence classification on various datasets (Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import numpy as np
from scipy.special import softmax
import torch
from torch.utils.data import (DataLoader, SequentialSampler, TensorDataset)
from tqdm import tqdm, trange
from pytorch_transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer)
from utils_dataset import (compute_metrics, convert_examples_to_features,
output_modes, processors, InputExample)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
}
def inference(args, model, tokenizer, prefix=""):
inf_task = args.task_name
inf_dataset = load_example(args, inf_task, tokenizer)
inf_sampler = SequentialSampler(inf_dataset)
inf_dataloader = DataLoader(inf_dataset, sampler=inf_sampler, batch_size=1)
# Inference!
logger.info("***** Running inference {} *****".format(prefix))
preds = None
out_label_ids = None
for batch in tqdm(inf_dataloader, desc="Inferencing"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
inf_loss, logits = outputs[:2]
pred_arr = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
logger.info("pred_arr: %s", pred_arr)
pred_prob = np.squeeze(softmax(pred_arr, axis=1))
logger.info("[0]: %s, [1]: %s", pred_prob[0], pred_prob[1])
if args.output_mode == "classification":
pred = np.argmax(pred_arr, axis=1)
elif args.output_mode == "regression":
pred = np.squeeze(pred_arr)
if pred == 0:
logger.info("Text is negative with confidence: %d ", pred_prob[0]*100)
else:
logger.info("Text is positive with confidence: %d ", pred_prob[1]*100)
def load_example(args, task, tokenizer):
processor = processors[task]()
output_mode = output_modes[task]
logger.info("Creating features from input")
label_list = processor.get_labels()
examples = [InputExample(guid=0, text_a=args.text, text_b=None, label='1')]
features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args.model_type in ['xlnet'] else 1,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--text", default="None", type=str, required=True,
help="text to analyze")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger.warning("device: %s, ", args.device)
# Prepare task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
model.to(args.device)
logger.info("Inference parameters %s", args)
# Inference
inference(args, model, tokenizer)
if __name__ == "__main__":
main()
| 46.759162 | 163 | 0.665659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,532 | 0.283507 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.