hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29392d7c293c0b529284bdff29493ae4994d22ba | 206 | py | Python | example.py | n0emis/pycodimd | cec7135babe63f0c40fdb9eac7ede50e145cd512 | [
"MIT"
]
| 1 | 2020-04-20T22:06:49.000Z | 2020-04-20T22:06:49.000Z | example.py | n0emis/pycodimd | cec7135babe63f0c40fdb9eac7ede50e145cd512 | [
"MIT"
]
| null | null | null | example.py | n0emis/pycodimd | cec7135babe63f0c40fdb9eac7ede50e145cd512 | [
"MIT"
]
| null | null | null | from pycodimd import CodiMD
cmd = CodiMD('https://md.noemis.me')
#cmd.login('[email protected]','CorrectHorseBatteryStaple')
cmd.load_cookies()
print(cmd.history()[-1]['text']) # Print Name of latest Note
| 29.428571 | 61 | 0.73301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.548544 |
293ac2ae42d575f893f18bae2751d93e4e138ae8 | 75 | py | Python | PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
]
| null | null | null | PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
]
| null | null | null | PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
]
| null | null | null | import os
print('echoenv...', end=' ')
print('Hello,', os.environ['USER'])
| 18.75 | 35 | 0.613333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.386667 |
293afc12acd3adc92103d2c686f2476332649203 | 4,137 | py | Python | plix/displays.py | freelan-developers/plix | 69114b3e522330af802800e09a432c1a84220f88 | [
"MIT"
]
| 1 | 2017-05-22T11:52:01.000Z | 2017-05-22T11:52:01.000Z | plix/displays.py | freelan-developers/plix | 69114b3e522330af802800e09a432c1a84220f88 | [
"MIT"
]
| 4 | 2015-03-12T16:59:36.000Z | 2015-03-12T17:34:15.000Z | plix/displays.py | freelan-developers/plix | 69114b3e522330af802800e09a432c1a84220f88 | [
"MIT"
]
| 1 | 2018-03-04T21:43:33.000Z | 2018-03-04T21:43:33.000Z | """
Display command results.
"""
from __future__ import unicode_literals
from contextlib import contextmanager
from argparse import Namespace
from io import BytesIO
from colorama import AnsiToWin32
from chromalog.stream import stream_has_color_support
from chromalog.colorizer import Colorizer
from chromalog.mark.helpers.simple import (
warning,
important,
success,
error,
)
class BaseDisplay(object):
"""
Provides general display logic to its subclasses.
"""
@contextmanager
def command(self, index, command):
"""
Contextmanager that wraps calls to :func:`start_command` and
:func:`stop_command`.
:param index: The index of the command.
:param command: The command that is about to be executed, as an unicode
string.
"""
self.start_command(
index=index,
command=command,
)
result = Namespace(returncode=None)
try:
yield result
finally:
self.stop_command(
index=index,
command=command,
returncode=result.returncode,
)
class StreamDisplay(BaseDisplay):
"""
Displays commands output to an output stream.
"""
def __init__(self, stream, colorizer=Colorizer()):
"""
Initialize the :class:`StreamDisplay`.
:param stream: The stream to be attached too.
"""
super(StreamDisplay, self).__init__()
self.colorizer = colorizer
self.output_map = {}
if stream_has_color_support(stream):
self.stream = AnsiToWin32(stream).stream
else:
self.stream = stream
# Python 3 differentiates binary streams.
if hasattr(stream, 'buffer'):
self.binary_stream = stream.buffer
else:
self.binary_stream = stream
def format_output(self, message, *args, **kwargs):
"""
Format some output in regards to the output stream color-capability.
:param message: A message.
:returns: The formatted message.
"""
if stream_has_color_support(self.stream):
return self.colorizer.colorize_message(message, *args, **kwargs)
else:
return message.format(*args, **kwargs)
def set_context(self, commands):
"""
Set the context for display.
:param commands: The list of commands to be executed.
"""
self.longest_len = max(map(len, commands))
def start_command(self, index, command):
"""
Indicate that a command stopped.
:param index: The index of the command.
:param command: The command that is about to be executed, as an unicode
string.
"""
self.stream.write(self.format_output(
"{}) {}",
warning(important(index + 1)),
command,
))
self.stream.flush()
self.output_map[index] = BytesIO()
def stop_command(self, index, command, returncode):
"""
Indicate that a command stopped.
:param index: The index of the command.
:param command: The command that was executed, as an unicode string.
:param returncode: The exit status.
"""
self.stream.write(self.format_output(
"{}\t[{}]\n",
" " * (self.longest_len - len(command)),
success("success") if returncode == 0 else error("failed"),
))
if returncode != 0:
self.binary_stream.write(self.output_map[index].getvalue())
self.stream.write(self.format_output(
"{}) {} {}\n",
warning(important(index + 1)),
error("Command exited with"),
important(error(returncode)),
))
del self.output_map[index]
def command_output(self, index, data):
"""
Add some output for a command.
:param index: The index of the command.
:param data: The output data (as bytes).
"""
self.output_map[index].write(data)
| 27.397351 | 79 | 0.583273 | 3,735 | 0.902828 | 653 | 0.157844 | 673 | 0.162678 | 0 | 0 | 1,528 | 0.36935 |
293dd5d900ef2c6130d4549dd1b873aa939a8cba | 6,167 | py | Python | plugins/Autocomplete/plugin.py | mogad0n/Limnoria | f31e5c4b9a77e30918d6b93f69d69f3b8f910e3c | [
"BSD-3-Clause"
]
| 476 | 2015-01-04T17:42:59.000Z | 2021-08-13T07:40:54.000Z | plugins/Autocomplete/plugin.py | mogad0n/Limnoria | f31e5c4b9a77e30918d6b93f69d69f3b8f910e3c | [
"BSD-3-Clause"
]
| 491 | 2015-01-01T04:12:23.000Z | 2021-08-12T19:24:47.000Z | plugins/Autocomplete/plugin.py | mogad0n/Limnoria | f31e5c4b9a77e30918d6b93f69d69f3b8f910e3c | [
"BSD-3-Clause"
]
| 203 | 2015-01-02T18:29:43.000Z | 2021-08-15T12:52:22.000Z | ###
# Copyright (c) 2020-2021, The Limnoria Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot import conf, ircutils, ircmsgs, callbacks
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization("Autocomplete")
REQUEST_TAG = "+draft/autocomplete-request"
RESPONSE_TAG = "+draft/autocomplete-response"
def _commonPrefix(L):
"""Takes a list of lists, and returns their longest common prefix."""
assert L
if len(L) == 1:
return L[0]
for n in range(1, max(map(len, L)) + 1):
prefix = L[0][:n]
for item in L[1:]:
if prefix != item[:n]:
return prefix[0:-1]
assert False
def _getAutocompleteResponse(irc, msg, payload):
"""Returns the value of the +draft/autocomplete-response tag for the given
+draft/autocomplete-request payload."""
tokens = callbacks.tokenize(
payload, channel=msg.channel, network=irc.network
)
normalized_payload = " ".join(tokens)
candidate_commands = _getCandidates(irc, normalized_payload)
if len(candidate_commands) == 0:
# No result
return None
elif len(candidate_commands) == 1:
# One result, return it directly
commands = candidate_commands
else:
# Multiple results, return only the longest common prefix + one word
tokenized_candidates = [
callbacks.tokenize(c, channel=msg.channel, network=irc.network)
for c in candidate_commands
]
common_prefix = _commonPrefix(tokenized_candidates)
words_after_prefix = {
candidate[len(common_prefix)] for candidate in tokenized_candidates
}
commands = [
" ".join(common_prefix + [word]) for word in words_after_prefix
]
# strip what the user already typed
assert all(command.startswith(normalized_payload) for command in commands)
normalized_payload_length = len(normalized_payload)
response_items = [
command[normalized_payload_length:] for command in commands
]
return "\t".join(sorted(response_items))
def _getCandidates(irc, normalized_payload):
"""Returns a list of commands starting with the normalized_payload."""
candidates = set()
for cb in irc.callbacks:
cb_commands = cb.listCommands()
# copy them with the plugin name (optional when calling a command)
# at the beginning
plugin_name = cb.canonicalName()
cb_commands += [plugin_name + " " + command for command in cb_commands]
candidates |= {
command
for command in cb_commands
if command.startswith(normalized_payload)
}
return candidates
class Autocomplete(callbacks.Plugin):
"""Provides command completion for IRC clients that support it."""
def _enabled(self, irc, msg):
return (
conf.supybot.protocols.irc.experimentalExtensions()
and self.registryValue("enabled", msg.channel, irc.network)
)
def doTagmsg(self, irc, msg):
if REQUEST_TAG not in msg.server_tags:
return
if "msgid" not in msg.server_tags:
return
if not self._enabled(irc, msg):
return
msgid = msg.server_tags["msgid"]
text = msg.server_tags[REQUEST_TAG]
# using callbacks._addressed instead of callbacks.addressed, as
# callbacks.addressed would tag the m
payload = callbacks._addressed(irc, msg, payload=text)
if not payload:
# not addressed
return
# marks used by '_addressed' are usually prefixes (char, string,
# nick), but may also be suffixes (with
# supybot.reply.whenAddressedBy.nick.atEnd); but there is no way to
# have it in the middle of the message AFAIK.
assert payload in text
if not text.endswith(payload):
# If there is a suffix, it means the end of the text is used to
# address the bot, so it can't be a method to be completed.
return
autocomplete_response = _getAutocompleteResponse(irc, msg, payload)
if not autocomplete_response:
return
target = msg.channel or ircutils.nickFromHostmask(msg.prefix)
irc.queueMsg(
ircmsgs.IrcMsg(
server_tags={
"+draft/reply": msgid,
RESPONSE_TAG: autocomplete_response,
},
command="TAGMSG",
args=[target],
)
)
Class = Autocomplete
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 34.071823 | 79 | 0.66564 | 1,852 | 0.300308 | 0 | 0 | 0 | 0 | 0 | 0 | 2,765 | 0.448354 |
293e981880ad85e96c9f610aaeaa19c42550d236 | 2,237 | py | Python | utils/preprocess_twitter.py | arnavk/tumblr-emotions | 0ed03201ab833b8b400cb0cff6c5b064fac5edfb | [
"Apache-2.0"
]
| null | null | null | utils/preprocess_twitter.py | arnavk/tumblr-emotions | 0ed03201ab833b8b400cb0cff6c5b064fac5edfb | [
"Apache-2.0"
]
| null | null | null | utils/preprocess_twitter.py | arnavk/tumblr-emotions | 0ed03201ab833b8b400cb0cff6c5b064fac5edfb | [
"Apache-2.0"
]
| null | null | null | """
preprocess-twitter.py
python preprocess-twitter.py "Some random text with #hashtags, @mentions and http://t.co/kdjfkdjf (links). :)"
Script for preprocessing tweets by Romain Paulus
with small modifications by Jeffrey Pennington
with translation to Python by Motoki Wu
Translation of Ruby script to create features for GloVe vectors for Twitter data.
http://nlp.stanford.edu/projects/glove/preprocess-twitter.rb
"""
import sys
import regex as re
FLAGS = re.MULTILINE | re.DOTALL
def hashtag(text):
text = text.group()
hashtag_body = text[1:]
if hashtag_body.isupper():
result = " {} ".format(hashtag_body.lower())
else:
result = " ".join(["<hashtag>"] + re.split(r"(?=[A-Z])", hashtag_body, flags=FLAGS))
return result
def allcaps(text):
text = text.group()
return text.lower() + " <allcaps>"
def tokenize(text):
# Different regex parts for smiley faces
eyes = r"[8:=;]"
nose = r"['`\-]?"
# function so code less repetitive
def re_sub(pattern, repl):
return re.sub(pattern, repl, text, flags=FLAGS)
text = re_sub(r"https?:\/\/\S+\b|www\.(\w+\.)+\S*", "<url>")
text = re_sub(r"@\w+", "<user>")
text = re_sub(r"{}{}[)dD]+|[)dD]+{}{}".format(eyes, nose, nose, eyes), "<smile>")
text = re_sub(r"{}{}p+".format(eyes, nose), "<lolface>")
text = re_sub(r"{}{}\(+|\)+{}{}".format(eyes, nose, nose, eyes), "<sadface>")
text = re_sub(r"{}{}[\/|l*]".format(eyes, nose), "<neutralface>")
text = re_sub(r"/"," / ")
text = re_sub(r"<3","<heart>")
text = re_sub(r"[-+]?[.\d]*[\d]+[:,.\d]*", "<number>")
text = re_sub(r"#\S+", hashtag)
text = re_sub(r"([!?.]){2,}", r"\1 <repeat>")
text = re_sub(r"\b(\S*?)(.)\2{2,}\b", r"\1\2 <elong>")
## -- I just don't understand why the Ruby script adds <allcaps> to everything so I limited the selection.
# text = re_sub(r"([^a-z0-9()<>'`\-]){2,}", allcaps)
text = re_sub(r"([A-Z]){2,}", allcaps)
return text.lower()
if __name__ == '__main__':
_, text = sys.argv
if text == "test":
text = "I TEST alllll kinds of #hashtags and #HASHTAGS, @mentions and 3000 (http://t.co/dkfjkdf). w/ <3 :) haha!!!!!"
tokens = tokenize(text)
print(tokens)
| 34.415385 | 125 | 0.591417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,155 | 0.516316 |
2940e9042fa0fc027376618fe6d76d1057e9e9bd | 37,124 | py | Python | pyPLANES/pw/pw_classes.py | matael/pyPLANES | 7f591090446303884c9a3d049e42233efae0b7f4 | [
"MIT"
]
| null | null | null | pyPLANES/pw/pw_classes.py | matael/pyPLANES | 7f591090446303884c9a3d049e42233efae0b7f4 | [
"MIT"
]
| null | null | null | pyPLANES/pw/pw_classes.py | matael/pyPLANES | 7f591090446303884c9a3d049e42233efae0b7f4 | [
"MIT"
]
| 1 | 2020-12-15T16:24:08.000Z | 2020-12-15T16:24:08.000Z | #! /usr/bin/env python
# -*- coding:utf8 -*-
#
# pw_classes.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# Olivier Dazel <[email protected]>
# Mathieu Gaborit <[email protected]>
# Peter Göransson <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from mediapack import from_yaml
from mediapack import Air, PEM, EqFluidJCA
from pyPLANES.utils.io import initialisation_out_files_plain
from pyPLANES.core.calculus import PwCalculus
from pyPLANES.core.multilayer import MultiLayer
from pyPLANES.pw.pw_layers import FluidLayer
from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking
Air = Air()
# def initialise_PW_solver(L, b):
# nb_PW = 0
# dofs = []
# for _layer in L:
# if _layer.medium.MODEL == "fluid":
# dofs.append(nb_PW+np.arange(2))
# nb_PW += 2
# elif _layer.medium.MODEL == "pem":
# dofs.append(nb_PW+np.arange(6))
# nb_PW += 6
# elif _layer.medium.MODEL == "elastic":
# dofs.append(nb_PW+np.arange(4))
# nb_PW += 4
# interface = []
# for i_l, _layer in enumerate(L[:-1]):
# interface.append((L[i_l].medium.MODEL, L[i_l+1].medium.MODEL))
# return nb_PW, interface, dofs
class PwProblem(PwCalculus, MultiLayer):
"""
Plane Wave Problem
"""
def __init__(self, **kwargs):
PwCalculus.__init__(self, **kwargs)
termination = kwargs.get("termination","rigid")
self.method = kwargs.get("termination","global")
MultiLayer.__init__(self, **kwargs)
self.kx, self.ky, self.k = None, None, None
self.shift_plot = kwargs.get("shift_pw", 0.)
self.plot = kwargs.get("plot_results", [False]*6)
self.result = {}
self.outfiles_directory = False
if self.method == "global":
self.layers.insert(0,FluidLayer(Air,1.e-2))
if self.layers[1].medium.MEDIUM_TYPE == "fluid":
self.interfaces.append(FluidFluidInterface(self.layers[0],self.layers[1]))
self.nb_PW = 0
for _layer in self.layers:
if _layer.medium.MODEL == "fluid":
_layer.dofs = self.nb_PW+np.arange(2)
self.nb_PW += 2
elif _layer.medium.MODEL == "pem":
_layer.dofs = self.nb_PW+np.arange(6)
self.nb_PW += 6
elif _layer.medium.MODEL == "elastic":
_layer.dofs = self.nb_PW+np.arange(4)
self.nb_PW += 4
def update_frequency(self, f):
PwCalculus.update_frequency(self, f)
MultiLayer.update_frequency(self, f, self.k, self.kx)
def create_linear_system(self, f):
self.A = np.zeros((self.nb_PW-1, self.nb_PW), dtype=complex)
i_eq = 0
# Loop on the interfaces
for _int in self.interfaces:
if self.method == "global":
i_eq = _int.update_M_global(self.A, i_eq)
# for i_inter, _inter in enumerate(self.interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = self.interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_fluid_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_fluid_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = self.interface_pem_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_pem_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_pem_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "elastic":
# if _inter[1] == "fluid":
# i_eq = self.interface_elastic_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_elastic_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_elastic_elastic(i_eq, i_inter, Layers, dofs, M)
# if self.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = self.interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1] )
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = self.interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif Layers[-1].medium.MODEL == "elastic":
# i_eq = self.interface_elastic_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif self.backing == "transmission":
# i_eq = self.semi_infinite_medium(M, i_eq, Layers[-1], dofs[-1] )
self.F = -self.A[:, 0]*np.exp(1j*self.ky*self.layers[0].d) # - is for transposition, exponential term is for the phase shift
self.A = np.delete(self.A, 0, axis=1)
# print(self.A)
X = LA.solve(self.A, self.F)
# print(X)
# R_pyPLANES_PW = X[0]
# if self.backing == "transmission":
# T_pyPLANES_PW = X[-2]
# else:
# T_pyPLANES_PW = 0.
# X = np.delete(X, 0)
# del(dofs[0])
# for i, _ld in enumerate(dofs):
# dofs[i] -= 2
# if self.plot:
# self.plot_sol_PW(X, dofs)
# out["R"] = R_pyPLANES_PW
# out["T"] = T_pyPLANES_PW
# return out
# class Solver_PW(PwCalculus):
# def __init__(self, **kwargs):
# PwCalculus.__init__(self, **kwargs)
# ml = kwargs.get("ml")
# termination = kwargs.get("termination")
# self.layers = []
# for _l in ml:
# if _l[0] == "Air":
# mat = Air
# else:
# mat = from_yaml(_l[0]+".yaml")
# d = _l[1]
# self.layers.append(Layer(mat,d))
# if termination in ["trans", "transmission","Transmission"]:
# self.backing = "Transmission"
# else:
# self.backing = backing.rigid
# self.kx, self.ky, self.k = None, None, None
# self.shift_plot = kwargs.get("shift_pw", 0.)
# self.plot = kwargs.get("plot_results", [False]*6)
# self.result = {}
# self.outfiles_directory = False
# initialisation_out_files_plain(self)
# def write_out_files(self, out):
# self.out_file.write("{:.12e}\t".format(self.current_frequency))
# abs = 1-np.abs(out["R"])**2
# self.out_file.write("{:.12e}\t".format(abs))
# self.out_file.write("\n")
# def interface_fluid_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_rigid(self, M, ieq, L, d):
# SV, k_y = fluid_SV(self.kx, self.k, L.medium.K)
# M[ieq, d[0]] = SV[0, 0]*np.exp(-1j*k_y*L.thickness)
# M[ieq, d[1]] = SV[0, 1]
# ieq += 1
# return ieq
# def semi_infinite_medium(self, M, ieq, L, d):
# M[ieq, d[1]] = 1.
# ieq += 1
# return ieq
# def interface_pem_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium, self.kx)
# for _i in range(6):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+0][4]] = SV_1[_i, 4]
# M[ieq, d[iinter+0][5]] = SV_1[_i, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[_i, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[_i, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[4, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[4, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[4, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[4, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[4, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[4, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = SV_2[3, 2]
# M[ieq, d[iinter+1][3]] = SV_2[3, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[3, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[3, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[0, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = SV_2[1, 2]
# M[ieq, d[iinter+1][3]] = SV_2[1, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[1, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[1, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = (SV_2[3, 0]-SV_2[4, 0])
# M[ieq, d[iinter+1][1]] = (SV_2[3, 1]-SV_2[4, 1])
# M[ieq, d[iinter+1][2]] = (SV_2[3, 2]-SV_2[4, 2])
# M[ieq, d[iinter+1][3]] = (SV_2[3, 3]-SV_2[4, 3])*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = (SV_2[3, 4]-SV_2[4, 4])*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = (SV_2[3, 5]-SV_2[4, 5])*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[3, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[3, 3]
# M[ieq, d[iinter+1][0]] = SV_2[5, 0]
# M[ieq, d[iinter+1][1]] = SV_2[5, 1]
# M[ieq, d[iinter+1][2]] = SV_2[5, 2]
# M[ieq, d[iinter+1][3]] = SV_2[5, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[5, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[5, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium,self.kx)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[1, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[1, 3]
# M[ieq, d[iinter+0][4]] = SV_1[1, 4]
# M[ieq, d[iinter+0][5]] = SV_1[1, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = (SV_1[3, 0]-SV_1[4, 0])*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = (SV_1[3, 1]-SV_1[4, 1])*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = (SV_1[3, 2]-SV_1[4, 2])*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = (SV_1[3, 3]-SV_1[4, 3])
# M[ieq, d[iinter+0][4]] = (SV_1[3, 4]-SV_1[4, 4])
# M[ieq, d[iinter+0][5]] = (SV_1[3, 5]-SV_1[4, 5])
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[5, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[5, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[5, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[5, 3]
# M[ieq, d[iinter+0][4]] = SV_1[5, 4]
# M[ieq, d[iinter+0][5]] = SV_1[5, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[3, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[3, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# for _i in range(4):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium, self.kx, self.omega)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[4, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[4, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[4, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[4, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[4, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[4, 5]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[3, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[3, 3]
# M[ieq, d[iinter+0][4]] = SV_1[3, 4]
# M[ieq, d[iinter+0][5]] = SV_1[3, 5]
# ieq += 1
# return ieq
# def interface_elastic_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium, self.kx, self.omega)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# ieq += 1
# return ieq
# def interface_elastic_rigid(self, M, ieq, L, d):
# SV, k_y = elastic_SV(L.medium,self.kx, self.omega)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]
# M[ieq, d[3]] = SV[1, 3]
# ieq += 1
# M[ieq, d[0]] = SV[3, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[3, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[3, 2]
# M[ieq, d[3]] = SV[3, 3]
# ieq += 1
# return ieq
# def interface_pem_rigid(self, M, ieq, L, d):
# SV, k_y = PEM_SV(L.medium, self.kx)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[1, 3]
# M[ieq, d[4]] = SV[1, 4]
# M[ieq, d[5]] = SV[1, 5]
# ieq += 1
# M[ieq, d[0]] = SV[2, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[2, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[2, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[2, 3]
# M[ieq, d[4]] = SV[2, 4]
# M[ieq, d[5]] = SV[2, 5]
# ieq += 1
# M[ieq, d[0]] = SV[5, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[5, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[5, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[5, 3]
# M[ieq, d[4]] = SV[5, 4]
# M[ieq, d[5]] = SV[5, 5]
# ieq += 1
# return ieq
# def plot_sol_PW(self, X, dofs):
# x_start = self.shift_plot
# for _l, _layer in enumerate(self.layers):
# x_f = np.linspace(0, _layer.thickness,200)
# x_b = x_f-_layer.thickness
# if _layer.medium.MODEL == "fluid":
# SV, k_y = fluid_SV(self.kx, self.k, _layer.medium.K)
# pr = SV[1, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# pr += SV[1, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# ut = SV[0, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# ut += SV[0, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# # plt.figure(5)
# # plt.plot(x_start+x_f,np.abs(ut),'b')
# # plt.plot(x_start+x_f,np.imag(ut),'k')
# if _layer.medium.MODEL == "pem":
# SV, k_y = PEM_SV(_layer.medium, self.kx)
# ux, uy, pr, ut = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(3):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# uy += SV[5, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[5, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# pr += SV[4, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr += SV[4, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# ut += SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ut += SV[2, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# if _layer.medium.MODEL == "elastic":
# SV, k_y = elastic_SV(_layer.medium, self.kx, self.omega)
# ux, uy, pr, sig = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(2):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# uy += SV[3, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[3, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# pr -= SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr -= SV[2, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# sig -= SV[0, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# sig -= SV[0, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# # if self.plot[2]:
# # plt.figure(2)
# # plt.plot(x_start+x_f, np.abs(pr), 'r')
# # plt.plot(x_start+x_f, np.imag(pr), 'm')
# # plt.title("Sigma_yy")
# # if self.plot[2]:
# # plt.figure(3)
# # plt.plot(x_start+x_f, np.abs(sig), 'r')
# # plt.plot(x_start+x_f, np.imag(sig), 'm')
# # plt.title("Sigma_xy")
# x_start += _layer.thickness
# def PEM_SV(mat,ky):
# ''' S={0:\hat{\sigma}_{xy}, 1:u_y^s, 2:u_y^t, 3:\hat{\sigma}_{yy}, 4:p, 5:u_x^s}'''
# kx_1 = np.sqrt(mat.delta_1**2-ky**2)
# kx_2 = np.sqrt(mat.delta_2**2-ky**2)
# kx_3 = np.sqrt(mat.delta_3**2-ky**2)
# kx = np.array([kx_1, kx_2, kx_3])
# delta = np.array([mat.delta_1, mat.delta_2, mat.delta_3])
# alpha_1 = -1j*mat.A_hat*mat.delta_1**2-1j*2*mat.N*kx[0]**2
# alpha_2 = -1j*mat.A_hat*mat.delta_2**2-1j*2*mat.N*kx[1]**2
# alpha_3 = -2*1j*mat.N*kx[2]*ky
# SV = np.zeros((6,6), dtype=complex)
# SV[0:6, 0] = np.array([-2*1j*mat.N*kx[0]*ky, kx[0], mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 3] = np.array([ 2*1j*mat.N*kx[0]*ky,-kx[0],-mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 1] = np.array([-2*1j*mat.N*kx[1]*ky, kx[1], mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 4] = np.array([ 2*1j*mat.N*kx[1]*ky,-kx[1],-mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 2] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, alpha_3, 0., -kx[2]])
# SV[0:6, 5] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, -alpha_3, 0., kx[2]])
# return SV, kx
# def elastic_SV(mat,ky, omega):
# ''' S={0:\sigma_{xy}, 1: u_y, 2 \sigma_{yy}, 3 u_x}'''
# P_mat = mat.lambda_ + 2.*mat.mu
# delta_p = omega*np.sqrt(mat.rho/P_mat)
# delta_s = omega*np.sqrt(mat.rho/mat.mu)
# kx_p = np.sqrt(delta_p**2-ky**2)
# kx_s = np.sqrt(delta_s**2-ky**2)
# kx = np.array([kx_p, kx_s])
# alpha_p = -1j*mat.lambda_*delta_p**2 - 2j*mat.mu*kx[0]**2
# alpha_s = 2j*mat.mu*kx[1]*ky
# SV = np.zeros((4, 4), dtype=np.complex)
# SV[0:4, 0] = np.array([-2.*1j*mat.mu*kx[0]*ky, kx[0], alpha_p, ky])
# SV[0:4, 2] = np.array([ 2.*1j*mat.mu*kx[0]*ky, -kx[0], alpha_p, ky])
# SV[0:4, 1] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky,-alpha_s, -kx[1]])
# SV[0:4, 3] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky, alpha_s, kx[1]])
# return SV, kx
# def fluid_SV(kx, k, K):
# ''' S={0:u_y , 1:p}'''
# ky = np.sqrt(k**2-kx**2)
# SV = np.zeros((2, 2), dtype=complex)
# SV[0, 0:2] = np.array([ky/(1j*K*k**2), -ky/(1j*K*k**2)])
# SV[1, 0:2] = np.array([1, 1])
# return SV, ky
# def resolution_PW_imposed_displacement(S, p):
# # print("k={}".format(p.k))
# Layers = S.layers.copy()
# n, interfaces, dofs = initialise_PW_solver(Layers, S.backing)
# M = np.zeros((n, n), dtype=complex)
# i_eq = 0
# # Loop on the layers
# for i_inter, _inter in enumerate(interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_fluid_pem(i_eq, i_inter, Layers, dofs, M, p)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = interface_pem_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_pem_pem(i_eq, i_inter, Layers, dofs, M, p)
# if S.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# if Layers[0].medium.MODEL == "fluid":
# F = np.zeros(n, dtype=complex)
# SV, k_y = fluid_SV(p.kx, p.k, Layers[0].medium.K)
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]*np.exp(-1j*k_y*Layers[0].thickness)
# F[i_eq] = 1.
# elif Layers[0].medium.MODEL == "pem":
# SV, k_y = PEM_SV(Layers[0].medium, p.kx)
# M[i_eq, dofs[0][0]] = SV[2, 0]
# M[i_eq, dofs[0][1]] = SV[2, 1]
# M[i_eq, dofs[0][2]] = SV[2, 2]
# M[i_eq, dofs[0][3]] = SV[2, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[2, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[2, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# F = np.zeros(n, dtype=complex)
# F[i_eq] = 1.
# i_eq +=1
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]
# M[i_eq, dofs[0][2]] = SV[0, 2]
# M[i_eq, dofs[0][3]] = SV[0, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[0, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[0, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# i_eq += 1
# M[i_eq, dofs[0][0]] = SV[3, 0]
# M[i_eq, dofs[0][1]] = SV[3, 1]
# M[i_eq, dofs[0][2]] = SV[3, 2]
# M[i_eq, dofs[0][3]] = SV[3, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[3, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[3, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# X = LA.solve(M, F)
# # print("|R pyPLANES_PW| = {}".format(np.abs(X[0])))
# print("R pyPLANES_PW = {}".format(X[0]))
# plot_sol_PW(S, X, dofs, p)
| 48.911726 | 132 | 0.502721 | 4,392 | 0.118303 | 0 | 0 | 0 | 0 | 0 | 0 | 33,888 | 0.912808 |
29419686dd2aebba28a504da3cc741b420dcf049 | 9,001 | py | Python | mmtbx/conformation_dependent_library/mcl.py | pcxod/cctbx_project | d43dfb157cd7432292b30c0329b7491d5a356657 | [
"BSD-3-Clause-LBNL"
]
| null | null | null | mmtbx/conformation_dependent_library/mcl.py | pcxod/cctbx_project | d43dfb157cd7432292b30c0329b7491d5a356657 | [
"BSD-3-Clause-LBNL"
]
| 1 | 2020-05-26T17:46:17.000Z | 2020-05-26T17:55:19.000Z | mmtbx/conformation_dependent_library/mcl.py | pcxod/cctbx_project | d43dfb157cd7432292b30c0329b7491d5a356657 | [
"BSD-3-Clause-LBNL"
]
| 1 | 2022-02-08T10:11:07.000Z | 2022-02-08T10:11:07.000Z | from __future__ import absolute_import, division, print_function
import sys
import time
from cctbx.array_family import flex
from scitbx.math import superpose
from mmtbx.conformation_dependent_library import mcl_sf4_coordination
from six.moves import range
from mmtbx.conformation_dependent_library import metal_coordination_library
def get_pdb_hierarchy_from_restraints(code):
from mmtbx.monomer_library import server
from iotbx import pdb
mon_lib_server = server.server()
path = mon_lib_server.get_comp_comp_id_direct(code, return_filename=True)
cif_obj = server.read_cif(path)
ligand_inp=pdb.pdb_input(source_info="Model from %s" % path,
lines=flex.split_lines(""))
ligand_hierarchy = ligand_inp.construct_hierarchy()
model=pdb.hierarchy.model()
chain=pdb.hierarchy.chain()
chain.id='Z'
rg=pdb.hierarchy.residue_group()
ag=pdb.hierarchy.atom_group()
for block, loops in cif_obj.blocks.items():
if block=='comp_list': continue
for loop in loops.iterloops():
for row in loop.iterrows():
if '_chem_comp_atom.comp_id' not in row: break
ag.resname = row['_chem_comp_atom.comp_id']
atom = pdb.hierarchy.atom()
atom.name = row['_chem_comp_atom.atom_id']
atom.element = '%2s' % row['_chem_comp_atom.type_symbol']
atom.xyz = (
float(row['_chem_comp_atom.x']),
float(row['_chem_comp_atom.y']),
float(row['_chem_comp_atom.z']),
)
ag.append_atom(atom)
rg.append_atom_group(ag)
chain.append_residue_group(rg)
model.append_chain(chain)
ligand_hierarchy.append_model(model)
ligand_hierarchy.atoms().reset_i_seq()
return ligand_hierarchy
def update(grm,
pdb_hierarchy,
link_records=None,
log=sys.stdout,
verbose=False,
):
def _atom_id(a, show_i_seq=False):
if show_i_seq:
return '%s (%5d)' % (a.id_str(), a.i_seq)
else:
return '%s' % (a.id_str())
if link_records is None: link_records={}
link_records.setdefault('LINK', [])
hooks = [
["Iron sulfur cluster coordination",
mcl_sf4_coordination.get_sulfur_iron_cluster_coordination,
mcl_sf4_coordination.get_all_proxies,
],
['Zn2+ tetrahedral coordination',
metal_coordination_library.get_metal_coordination_proxies,
metal_coordination_library.get_proxies,
],
]
outl = ''
outl_debug = ''
for label, get_coordination, get_all_proxies in hooks:
rc = get_coordination(
pdb_hierarchy=pdb_hierarchy,
nonbonded_proxies=grm.pair_proxies(
sites_cart=pdb_hierarchy.atoms().extract_xyz()).nonbonded_proxies,
verbose=verbose,
)
bproxies, aproxies = get_all_proxies(rc)
if bproxies is None: continue
if len(bproxies):
outl += ' %s\n' % label
outl += ' %s\n' % label
atoms = pdb_hierarchy.atoms()
sf4_coordination = {}
for bp in bproxies:
sf4_ag = atoms[bp.i_seqs[0]].parent()
sf4_coordination.setdefault(sf4_ag.id_str(), [])
sf4_coordination[sf4_ag.id_str()].append((atoms[bp.i_seqs[0]],
atoms[bp.i_seqs[1]]))
link = (atoms[bp.i_seqs[0]], atoms[bp.i_seqs[1]], 'x,y,z')
if link not in link_records: link_records['LINK'].append(link)
for sf4, aas in sorted(sf4_coordination.items()):
outl += '%spdb="%s"\n' % (' '*6, sf4)
outl_debug += '%spdb="%s"\n' % (' '*6, sf4)
for aa in sorted(aas):
outl += '%s%s - %s\n' % (' '*8, _atom_id(aa[0]), _atom_id(aa[1]))
outl_debug += '%s%s - %s\n' % (' '*8,
_atom_id(aa[0], True),
_atom_id(aa[1], True))
if bproxies:
try:
grm.add_new_bond_restraints_in_place(
proxies=bproxies,
sites_cart=pdb_hierarchy.atoms().extract_xyz(),
)
except RuntimeError as e:
print('\n\n%s' % outl_debug)
raise e
#
done = []
remove = []
for i, angle in enumerate(aproxies):
i_seqs = list(angle.i_seqs)
i_seqs.sort()
if i_seqs in done:
remove.append(i)
else:
done.append(i_seqs)
if remove:
remove.reverse()
for r in remove:
del aproxies[r]
#
if aproxies:
outl += '%s%s' % (' '*6, 'Number of angles added : %d\n' % len(aproxies))
grm.add_angles_in_place(aproxies)
if outl:
print(' Dynamic metal coordination', file=log)
print(outl, file=log)
def _extract_sites_cart(ag, element=None):
selection = []
for atom in ag.atoms():
if element and atom.element.upper().strip()!=element.upper().strip():
continue
selection.append(atom.xyz)
return flex.vec3_double(selection)
def generate_sites_fixed(pdb_hierarchy, resname, element=None):
for ag in pdb_hierarchy.atom_groups():
if ag.resname.strip().upper()==resname.upper():
yield _extract_sites_cart(ag, element), ag
def superpose_ideal_residue_coordinates(pdb_hierarchy,
resname,
superpose_element=None,
):
element_lookup = {'SF4' : 'Fe',
'F3S' : 'S',
#'F4S' : 'S', # not done yet
#'CLF' : 'Fe', # too flexible
'DVT' : 'V',
}
from mmtbx.monomer_library import pdb_interpretation
t0=time.time()
rmsd_list = {}
if superpose_element is None:
superpose_element = element_lookup.get(resname, None)
if resname in pdb_interpretation.ideal_ligands:
ideal_hierarchy = get_pdb_hierarchy_from_restraints(resname)
else:
assert 0
sites_moving = _extract_sites_cart(ideal_hierarchy, superpose_element)
assert len(sites_moving), 'No atoms %s found' % superpose_element
for ideal_ag in ideal_hierarchy.atom_groups(): break
for sites_fixed, ag in generate_sites_fixed(pdb_hierarchy,
resname,
superpose_element,
):
assert sites_fixed.size() == sites_moving.size(), '%(resname)s residue is missing atoms' % locals()
import random
min_rmsd = 1e9
min_sites_cart = None
for i in range(100):
random.shuffle(sites_moving)
lsq_fit = superpose.least_squares_fit(
reference_sites = sites_fixed,
other_sites = sites_moving)
new_atoms = ideal_ag.detached_copy().atoms()
sites_new = new_atoms.extract_xyz()
sites_new = lsq_fit.r.elems * sites_new + lsq_fit.t.elems
rmsd = sites_fixed.rms_difference(lsq_fit.other_sites_best_fit())
if rmsd<min_rmsd:
min_rmsd=rmsd
min_sites_cart = sites_new
rmsd_list[ag.id_str()] = min_rmsd
sites_new = min_sites_cart
new_atoms.set_xyz(sites_new)
for atom1 in ag.atoms():
for atom2 in new_atoms:
if atom1.name.strip()==atom2.name.strip():
atom1.xyz=atom2.xyz
break
else:
assert 0, 'not all atoms updated - missing %s' % atom1.quote()
outl = ''
if rmsd_list:
outl = '\n %(resname)s Regularisation' % locals()
outl+= '\n residue rmsd'
for id_str, rmsd in sorted(rmsd_list.items()):
outl += '\n "%s" %0.1f' % (id_str, rmsd)
outl += '\n Time to superpose : %0.2fs\n' % (time.time()-t0)
return outl
def superpose_ideal_ligand_on_poor_ligand(ideal_hierarchy,
poor_hierarchy,
):
"""Function superpose an ideal ligand onto the mangled ligand from a
ligand fitting procedure
Args:
ideal_hierarchy (pdb_hierarchy): Ideal ligand
poor_hierarchy (pdb_hierarchy): Poor ligand with correct c.o.m. and same
atom names in order. Could become more sophisticated.
"""
sites_moving = flex.vec3_double()
sites_fixed = flex.vec3_double()
for atom1, atom2 in zip(ideal_hierarchy.atoms(), poor_hierarchy.atoms()):
assert atom1.name==atom2.name, '%s!=%s' % (atom1.quote(),atom2.quote())
sites_moving.append(atom1.xyz)
sites_fixed.append(atom2.xyz)
lsq_fit = superpose.least_squares_fit(
reference_sites = sites_fixed,
other_sites = sites_moving)
sites_new = ideal_hierarchy.atoms().extract_xyz()
sites_new = lsq_fit.r.elems * sites_new + lsq_fit.t.elems
# rmsd = sites_fixed.rms_difference(lsq_fit.other_sites_best_fit())
ideal_hierarchy.atoms().set_xyz(sites_new)
return ideal_hierarchy
if __name__=="__main__":
from iotbx import pdb
ideal_inp=pdb.pdb_input(sys.argv[1])
ideal_hierarchy = ideal_inp.construct_hierarchy()
poor_inp=pdb.pdb_input(sys.argv[2])
poor_hierarchy = poor_inp.construct_hierarchy()
ideal_hierarchy = superpose_ideal_ligand_on_poor_ligand(ideal_hierarchy, poor_hierarchy)
ideal_hierarchy.write_pdb_file('new.pdb')
| 37.504167 | 103 | 0.63093 | 0 | 0 | 205 | 0.022775 | 0 | 0 | 0 | 0 | 1,155 | 0.128319 |
294225b79ce42a07375fda887c5ff1ca0b02cbd1 | 15,778 | py | Python | tests/test_install.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
]
| null | null | null | tests/test_install.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
]
| null | null | null | tests/test_install.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
]
| null | null | null | from contextlib import contextmanager
import random
import shutil
import stat
import tempfile
import unittest
from os.path import join
from conda import install
from conda.install import (PaddingError, binary_replace, update_prefix,
warn_failed_remove, duplicates_to_remove)
from .decorators import skip_if_no_mock
from .helpers import mock
patch = mock.patch if mock else None
def generate_random_path():
return '/some/path/to/file%s' % random.randint(100, 200)
class TestBinaryReplace(unittest.TestCase):
def test_simple(self):
self.assertEqual(
binary_replace(b'xxxaaaaaxyz\x00zz', b'aaaaa', b'bbbbb'),
b'xxxbbbbbxyz\x00zz')
def test_shorter(self):
self.assertEqual(
binary_replace(b'xxxaaaaaxyz\x00zz', b'aaaaa', b'bbbb'),
b'xxxbbbbxyz\x00\x00zz')
def test_too_long(self):
self.assertRaises(PaddingError, binary_replace,
b'xxxaaaaaxyz\x00zz', b'aaaaa', b'bbbbbbbb')
def test_no_extra(self):
self.assertEqual(binary_replace(b'aaaaa\x00', b'aaaaa', b'bbbbb'),
b'bbbbb\x00')
def test_two(self):
self.assertEqual(
binary_replace(b'aaaaa\x001234aaaaacc\x00\x00', b'aaaaa',
b'bbbbb'),
b'bbbbb\x001234bbbbbcc\x00\x00')
def test_spaces(self):
self.assertEqual(
binary_replace(b' aaaa \x00', b'aaaa', b'bbbb'),
b' bbbb \x00')
def test_multiple(self):
self.assertEqual(
binary_replace(b'aaaacaaaa\x00', b'aaaa', b'bbbb'),
b'bbbbcbbbb\x00')
self.assertEqual(
binary_replace(b'aaaacaaaa\x00', b'aaaa', b'bbb'),
b'bbbcbbb\x00\x00\x00')
self.assertRaises(PaddingError, binary_replace,
b'aaaacaaaa\x00', b'aaaa', b'bbbbb')
class FileTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tmpfname = join(self.tmpdir, 'testfile')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_default_text(self):
with open(self.tmpfname, 'w') as fo:
fo.write('#!/opt/anaconda1anaconda2anaconda3/bin/python\n'
'echo "Hello"\n')
update_prefix(self.tmpfname, '/usr/local')
with open(self.tmpfname, 'r') as fi:
data = fi.read()
self.assertEqual(data, '#!/usr/local/bin/python\n'
'echo "Hello"\n')
def test_binary(self):
with open(self.tmpfname, 'wb') as fo:
fo.write(b'\x7fELF.../some-placeholder/lib/libfoo.so\0')
update_prefix(self.tmpfname, '/usr/local',
placeholder='/some-placeholder', mode='binary')
with open(self.tmpfname, 'rb') as fi:
data = fi.read()
self.assertEqual(
data,
b'\x7fELF.../usr/local/lib/libfoo.so\0\0\0\0\0\0\0\0'
)
class remove_readonly_TestCase(unittest.TestCase):
def test_takes_three_args(self):
with self.assertRaises(TypeError):
install._remove_readonly()
with self.assertRaises(TypeError):
install._remove_readonly(True)
with self.assertRaises(TypeError):
install._remove_readonly(True, True)
with self.assertRaises(TypeError):
install._remove_readonly(True, True, True, True)
@skip_if_no_mock
def test_calls_os_chmod(self):
some_path = generate_random_path()
with patch.object(install.os, 'chmod') as chmod:
install._remove_readonly(mock.Mock(), some_path, {})
chmod.assert_called_with(some_path, stat.S_IWRITE)
@skip_if_no_mock
def test_calls_func(self):
some_path = generate_random_path()
func = mock.Mock()
with patch.object(install.os, 'chmod'):
install._remove_readonly(func, some_path, {})
func.assert_called_with(some_path)
class rm_rf_file_and_link_TestCase(unittest.TestCase):
@contextmanager
def generate_mock_islink(self, value):
with patch.object(install, 'islink', return_value=value) as islink:
yield islink
@contextmanager
def generate_mock_isdir(self, value):
with patch.object(install, 'isdir', return_value=value) as isdir:
yield isdir
@contextmanager
def generate_mock_isfile(self, value):
with patch.object(install, 'isfile', return_value=value) as isfile:
yield isfile
@contextmanager
def generate_mock_os_access(self, value):
with patch.object(install.os, 'access', return_value=value) as os_access:
yield os_access
@contextmanager
def generate_mock_unlink(self):
with patch.object(install.os, 'unlink') as unlink:
yield unlink
@contextmanager
def generate_mock_rmtree(self):
with patch.object(install.shutil, 'rmtree') as rmtree:
yield rmtree
@contextmanager
def generate_mock_sleep(self):
with patch.object(install.time, 'sleep') as sleep:
yield sleep
@contextmanager
def generate_mock_log(self):
with patch.object(install, 'log') as log:
yield log
@contextmanager
def generate_mock_on_win(self, value):
original = install.on_win
install.on_win = value
yield
install.on_win = original
@contextmanager
def generate_mock_check_call(self):
with patch.object(install.subprocess, 'check_call') as check_call:
yield check_call
@contextmanager
def generate_mocks(self, islink=True, isfile=True, isdir=True, on_win=False, os_access=True):
with self.generate_mock_islink(islink) as mock_islink:
with self.generate_mock_isfile(isfile) as mock_isfile:
with self.generate_mock_os_access(os_access) as mock_os_access:
with self.generate_mock_isdir(isdir) as mock_isdir:
with self.generate_mock_unlink() as mock_unlink:
with self.generate_mock_rmtree() as mock_rmtree:
with self.generate_mock_sleep() as mock_sleep:
with self.generate_mock_log() as mock_log:
with self.generate_mock_on_win(on_win):
with self.generate_mock_check_call() as check_call:
yield {
'islink': mock_islink,
'isfile': mock_isfile,
'isdir': mock_isdir,
'os_access': mock_os_access,
'unlink': mock_unlink,
'rmtree': mock_rmtree,
'sleep': mock_sleep,
'log': mock_log,
'check_call': check_call,
}
def generate_directory_mocks(self, on_win=False):
return self.generate_mocks(islink=False, isfile=False, isdir=True,
on_win=on_win)
def generate_all_false_mocks(self):
return self.generate_mocks(False, False, False)
@property
def generate_random_path(self):
return generate_random_path()
@skip_if_no_mock
def test_calls_islink(self):
with self.generate_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['islink'].assert_called_with(some_path)
@skip_if_no_mock
def test_calls_unlink_on_true_islink(self):
with self.generate_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['unlink'].assert_called_with(some_path)
@skip_if_no_mock
def test_calls_unlink_on_os_access_false(self):
with self.generate_mocks(os_access=False) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['unlink'].assert_called_with(some_path)
@skip_if_no_mock
def test_does_not_call_isfile_if_islink_is_true(self):
with self.generate_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertFalse(mocks['isfile'].called)
@skip_if_no_mock
def test_calls_isfile_with_path(self):
with self.generate_mocks(islink=False, isfile=True) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['isfile'].assert_called_with(some_path)
@skip_if_no_mock
def test_calls_unlink_on_false_islink_and_true_isfile(self):
with self.generate_mocks(islink=False, isfile=True) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['unlink'].assert_called_with(some_path)
@skip_if_no_mock
def test_does_not_call_unlink_on_false_values(self):
with self.generate_mocks(islink=False, isfile=False) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertFalse(mocks['unlink'].called)
@skip_if_no_mock
def test_does_not_call_shutil_on_false_isdir(self):
with self.generate_all_false_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertFalse(mocks['rmtree'].called)
@skip_if_no_mock
def test_calls_rmtree_at_least_once_on_isdir_true(self):
with self.generate_directory_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['rmtree'].assert_called_with(
some_path, onerror=warn_failed_remove, ignore_errors=False)
@skip_if_no_mock
def test_calls_rmtree_only_once_on_success(self):
with self.generate_directory_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertEqual(1, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_raises_final_exception_if_it_cant_remove(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
some_path = self.generate_random_path
with self.assertRaises(OSError):
install.rm_rf(some_path)
@skip_if_no_mock
def test_retries_six_times_to_ensure_it_cant_really_remove(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
some_path = self.generate_random_path
with self.assertRaises(OSError):
install.rm_rf(some_path)
self.assertEqual(6, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_retries_as_many_as_max_retries_plus_one(self):
max_retries = random.randint(7, 10)
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
some_path = self.generate_random_path
with self.assertRaises(OSError):
install.rm_rf(some_path, max_retries=max_retries)
self.assertEqual(max_retries + 1, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_stops_retrying_after_success(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = [OSError, OSError, None]
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertEqual(3, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_pauses_for_same_number_of_seconds_as_max_retries(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
max_retries = random.randint(1, 10)
with self.assertRaises(OSError):
install.rm_rf(self.generate_random_path,
max_retries=max_retries)
expected = [mock.call(i) for i in range(max_retries)]
mocks['sleep'].assert_has_calls(expected)
@skip_if_no_mock
def test_logs_messages_generated_for_each_retry(self):
with self.generate_directory_mocks() as mocks:
random_path = self.generate_random_path
mocks['rmtree'].side_effect = OSError(random_path)
max_retries = random.randint(1, 10)
with self.assertRaises(OSError):
install.rm_rf(random_path, max_retries=max_retries)
log_template = "\n".join([
"Unable to delete %s" % random_path,
"%s" % OSError(random_path),
"Retrying after %d seconds...",
])
expected_call_list = [mock.call(log_template % i)
for i in range(max_retries)]
mocks['log'].debug.assert_has_calls(expected_call_list)
@skip_if_no_mock
def test_tries_extra_kwarg_on_windows(self):
with self.generate_directory_mocks(on_win=True) as mocks:
random_path = self.generate_random_path
mocks['rmtree'].side_effect = [OSError, None]
install.rm_rf(random_path)
expected_call_list = [
mock.call(random_path, ignore_errors=False, onerror=warn_failed_remove),
mock.call(random_path, onerror=install._remove_readonly)
]
mocks['rmtree'].assert_has_calls(expected_call_list)
self.assertEqual(2, mocks['rmtree'].call_count)
class duplicates_to_remove_TestCase(unittest.TestCase):
def test_1(self):
linked = ['conda-3.18.8-py27_0', 'conda-3.19.0',
'python-2.7.10-2', 'python-2.7.11-0',
'zlib-1.2.8-0']
keep = ['conda-3.19.0', 'python-2.7.11-0']
self.assertEqual(duplicates_to_remove(linked, keep),
['conda-3.18.8-py27_0', 'python-2.7.10-2'])
def test_2(self):
linked = ['conda-3.19.0',
'python-2.7.10-2', 'python-2.7.11-0',
'zlib-1.2.7-1', 'zlib-1.2.8-0', 'zlib-1.2.8-4']
keep = ['conda-3.19.0', 'python-2.7.11-0']
self.assertEqual(duplicates_to_remove(linked, keep),
['python-2.7.10-2', 'zlib-1.2.7-1', 'zlib-1.2.8-0'])
def test_3(self):
linked = ['python-2.7.10-2', 'python-2.7.11-0', 'python-3.4.3-1']
keep = ['conda-3.19.0', 'python-2.7.11-0']
self.assertEqual(duplicates_to_remove(linked, keep),
['python-2.7.10-2', 'python-3.4.3-1'])
def test_nokeep(self):
linked = ['python-2.7.10-2', 'python-2.7.11-0', 'python-3.4.3-1']
self.assertEqual(duplicates_to_remove(linked, []),
['python-2.7.10-2', 'python-2.7.11-0'])
def test_misc(self):
d1 = 'a-1.3-0'
self.assertEqual(duplicates_to_remove([], []), [])
self.assertEqual(duplicates_to_remove([], [d1]), [])
self.assertEqual(duplicates_to_remove([d1], [d1]), [])
self.assertEqual(duplicates_to_remove([d1], []), [])
d2 = 'a-1.4-0'
li = set([d1, d2])
self.assertEqual(duplicates_to_remove(li, [d2]), [d1])
self.assertEqual(duplicates_to_remove(li, [d1]), [d2])
self.assertEqual(duplicates_to_remove(li, []), [d1])
self.assertEqual(duplicates_to_remove(li, [d1, d2]), [])
if __name__ == '__main__':
unittest.main()
| 38.20339 | 97 | 0.609266 | 15,214 | 0.964254 | 2,936 | 0.186082 | 9,855 | 0.624604 | 0 | 0 | 1,693 | 0.107301 |
29422d091e83652a21c0e3588c5f7b69d97c82a9 | 728 | py | Python | django_elastic_appsearch/slicer.py | CorrosiveKid/django_elastic_appsearch | 85da7642aac566164b8bc06894e97a048fd3116e | [
"MIT"
]
| 11 | 2019-08-07T01:31:42.000Z | 2021-02-02T08:12:24.000Z | django_elastic_appsearch/slicer.py | CorrosiveKid/django_elastic_appsearch | 85da7642aac566164b8bc06894e97a048fd3116e | [
"MIT"
]
| 148 | 2019-08-01T04:22:28.000Z | 2021-05-10T19:06:31.000Z | django_elastic_appsearch/slicer.py | infoxchange/django_elastic_appsearch | 65229586f0392d8d8cb143ab625081c89fa4cb64 | [
"MIT"
]
| 6 | 2019-08-26T10:00:42.000Z | 2021-02-01T03:54:02.000Z | """A Queryset slicer for Django."""
def slice_queryset(queryset, chunk_size):
"""Slice a queryset into chunks."""
start_pk = 0
queryset = queryset.order_by('pk')
while True:
# No entry left
if not queryset.filter(pk__gt=start_pk).exists():
break
try:
# Fetch chunk_size entries if possible
end_pk = queryset.filter(pk__gt=start_pk).values_list(
'pk', flat=True)[chunk_size - 1]
# Fetch rest entries if less than chunk_size left
except IndexError:
end_pk = queryset.values_list('pk', flat=True).last()
yield queryset.filter(pk__gt=start_pk).filter(pk__lte=end_pk)
start_pk = end_pk
| 28 | 69 | 0.60989 | 0 | 0 | 689 | 0.946429 | 0 | 0 | 0 | 0 | 184 | 0.252747 |
29424d0f4478d5925df5fb2792f4b3b4b39494a0 | 402 | py | Python | newsite/news/urls.py | JasperStfun/Django_C | 1307f2e9c827f751e8640f50179f1b744c222d63 | [
"Unlicense"
]
| null | null | null | newsite/news/urls.py | JasperStfun/Django_C | 1307f2e9c827f751e8640f50179f1b744c222d63 | [
"Unlicense"
]
| null | null | null | newsite/news/urls.py | JasperStfun/Django_C | 1307f2e9c827f751e8640f50179f1b744c222d63 | [
"Unlicense"
]
| null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.news_home, name='news_home'),
path('create', views.create, name='create'),
path('<int:pk>', views.NewsDetailView.as_view(), name='news-detail'),
path('<int:pk>/update', views.NewsUpdateView.as_view(), name='news-update'),
path('<int:pk>/delete', views.NewsDeleteView.as_view(), name='news-delete'),
] | 36.545455 | 80 | 0.674129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.278607 |
29428a3c880266295d54c48af9bca30d4cdda98d | 412 | py | Python | module/phase_one/headers.py | cqr-cryeye-forks/Florid | 21ea7abbe5448dca0c485232bdcf870ba2648d68 | [
"Apache-2.0"
]
| 7 | 2020-03-22T02:44:26.000Z | 2022-02-23T01:57:29.000Z | module/phase_one/headers.py | h4zze1/Florid-Scanner | 0a8600ce2bdd24f16e45504b00c714ecbb8930af | [
"Apache-2.0"
]
| 1 | 2019-02-07T13:41:47.000Z | 2019-02-07T13:41:47.000Z | module/phase_one/headers.py | h4zze1/Florid-Scanner | 0a8600ce2bdd24f16e45504b00c714ecbb8930af | [
"Apache-2.0"
]
| 3 | 2020-03-22T02:44:27.000Z | 2021-08-03T00:52:38.000Z | import requests
import lib.common
MODULE_NAME = 'headers'
def run():
r = requests.get(lib.common.SOURCE_URL)
# X-Forwarded-By:
if 'X-Powered-By' in r.headers:
lib.common.RESULT_ONE_DICT['X-Powered-By'] = r.headers['X-Powered-By']
# Server:
if 'Server' in r.headers:
lib.common.RESULT_ONE_DICT['Server'] = r.headers['Server']
lib.common.ALIVE_LINE[MODULE_NAME] += 1
| 20.6 | 78 | 0.652913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.245146 |
2942faf9418139b387fac9d36b23ead11b7dcd5e | 1,234 | py | Python | ekorpkit/io/fetch/edgar/edgar.py | entelecheia/ekorpkit | 400cb15005fdbcaa2ab0c311e338799283f28fe0 | [
"CC-BY-4.0"
]
| 4 | 2022-02-26T10:54:16.000Z | 2022-02-26T11:01:56.000Z | ekorpkit/io/fetch/edgar/edgar.py | entelecheia/ekorpkit | 400cb15005fdbcaa2ab0c311e338799283f28fe0 | [
"CC-BY-4.0"
]
| 1 | 2022-03-25T06:37:12.000Z | 2022-03-25T06:45:53.000Z | ekorpkit/io/fetch/edgar/edgar.py | entelecheia/ekorpkit | 400cb15005fdbcaa2ab0c311e338799283f28fe0 | [
"CC-BY-4.0"
]
| null | null | null | import os
import requests
from bs4 import BeautifulSoup
from ekorpkit import eKonf
from ekorpkit.io.download.web import web_download, web_download_unzip
class EDGAR:
def __init__(self, **args):
self.args = eKonf.to_config(args)
self.base_url = self.args.base_url
self.url = self.args.url
self.output_dir = self.args.output_dir
os.makedirs(self.output_dir, exist_ok=True)
self.force_download = self.args.force_download
self.name = self.args.name
self.build()
def build(self):
if self.force_download or not os.listdir(self.output_dir):
self.download_edgar()
else:
print(f"{self.name} is already downloaded")
def download_edgar(self):
user_agent = "Mozilla/5.0"
headers = {"User-Agent": user_agent}
page = requests.get(self.url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
filelist = soup.find_all("a", class_="filename")
for file in filelist:
link = self.base_url + file.get("href")
file_path = self.output_dir + "/" + file.get_text().strip()
web_download(link, file_path, self.name, self.force_download)
| 31.641026 | 73 | 0.644246 | 1,078 | 0.873582 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.077796 |
2944646b37b0ab25dfa73f854ed036b7d6e77c63 | 3,470 | py | Python | HARK/ConsumptionSaving/tests/test_PerfForesightConsumerType.py | michiboo/HARK | de2aab467de19da2ce76de1b58fb420f421bc85b | [
"Apache-2.0"
]
| null | null | null | HARK/ConsumptionSaving/tests/test_PerfForesightConsumerType.py | michiboo/HARK | de2aab467de19da2ce76de1b58fb420f421bc85b | [
"Apache-2.0"
]
| null | null | null | HARK/ConsumptionSaving/tests/test_PerfForesightConsumerType.py | michiboo/HARK | de2aab467de19da2ce76de1b58fb420f421bc85b | [
"Apache-2.0"
]
| null | null | null | from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType
import numpy as np
import unittest
class testPerfForesightConsumerType(unittest.TestCase):
def setUp(self):
self.agent = PerfForesightConsumerType()
self.agent_infinite = PerfForesightConsumerType(cycles=0)
PF_dictionary = {
'CRRA' : 2.5,
'DiscFac' : 0.96,
'Rfree' : 1.03,
'LivPrb' : [0.98],
'PermGroFac' : [1.01],
'T_cycle' : 1,
'cycles' : 0,
'AgentCount' : 10000
}
self.agent_alt = PerfForesightConsumerType(
**PF_dictionary)
def test_default_solution(self):
self.agent.solve()
c = self.agent.solution[0].cFunc
self.assertEqual(c.x_list[0], -0.9805825242718447)
self.assertEqual(c.x_list[1], 0.01941747572815533)
self.assertEqual(c.y_list[0], 0)
self.assertEqual(c.y_list[1], 0.511321002804608)
self.assertEqual(c.decay_extrap, False)
def test_another_solution(self):
self.agent_alt.DiscFac = 0.90
self.agent_alt.solve()
self.assertAlmostEqual(
self.agent_alt.solution[0].cFunc(10).tolist(),
3.9750093524820787)
def test_checkConditions(self):
self.agent_infinite.checkConditions()
self.assertTrue(self.agent_infinite.AIC)
self.assertTrue(self.agent_infinite.GICPF)
self.assertTrue(self.agent_infinite.RIC)
self.assertTrue(self.agent_infinite.FHWC)
def test_simulation(self):
self.agent_infinite.solve()
# Create parameter values necessary for simulation
SimulationParams = {
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
self.agent_infinite(**SimulationParams) # This implicitly uses the assignParameters method of AgentType
# Create PFexample object
self.agent_infinite.track_vars = ['mNrmNow']
self.agent_infinite.initializeSim()
self.agent_infinite.simulate()
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40],
-23.008063500363942
)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100],
-27.164608851546927
)
## Try now with the manipulation at time step 80
self.agent_infinite.initializeSim()
self.agent_infinite.simulate(80)
self.agent_infinite.aNrmNow += -5. # Adjust all simulated consumers' assets downward by 5
self.agent_infinite.simulate(40)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40],
-23.008063500363942
)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100],
-29.140261331951606
)
| 35.408163 | 111 | 0.625072 | 3,354 | 0.966571 | 0 | 0 | 0 | 0 | 0 | 0 | 738 | 0.21268 |
2944814c5ae01dfc5daf1a2ce4f89caabba6e70c | 3,893 | py | Python | src-gen/openapi_server/models/config.py | etherisc/bima-bolt-api | 14201a3055d94ff9c42afbb755109a69e77248f4 | [
"Apache-2.0"
]
| null | null | null | src-gen/openapi_server/models/config.py | etherisc/bima-bolt-api | 14201a3055d94ff9c42afbb755109a69e77248f4 | [
"Apache-2.0"
]
| null | null | null | src-gen/openapi_server/models/config.py | etherisc/bima-bolt-api | 14201a3055d94ff9c42afbb755109a69e77248f4 | [
"Apache-2.0"
]
| null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.component import Component
from openapi_server import util
from openapi_server.models.component import Component # noqa: E501
class Config(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, mongo=None, s3=None, arc2=None, created_at=None): # noqa: E501
"""Config - a model defined in OpenAPI
:param mongo: The mongo of this Config. # noqa: E501
:type mongo: Component
:param s3: The s3 of this Config. # noqa: E501
:type s3: Component
:param arc2: The arc2 of this Config. # noqa: E501
:type arc2: Component
:param created_at: The created_at of this Config. # noqa: E501
:type created_at: datetime
"""
self.openapi_types = {
'mongo': Component,
's3': Component,
'arc2': Component,
'created_at': datetime
}
self.attribute_map = {
'mongo': 'mongo',
's3': 's3',
'arc2': 'arc2',
'created_at': 'created_at'
}
self._mongo = mongo
self._s3 = s3
self._arc2 = arc2
self._created_at = created_at
@classmethod
def from_dict(cls, dikt) -> 'Config':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Config of this Config. # noqa: E501
:rtype: Config
"""
return util.deserialize_model(dikt, cls)
@property
def mongo(self):
"""Gets the mongo of this Config.
:return: The mongo of this Config.
:rtype: Component
"""
return self._mongo
@mongo.setter
def mongo(self, mongo):
"""Sets the mongo of this Config.
:param mongo: The mongo of this Config.
:type mongo: Component
"""
if mongo is None:
raise ValueError("Invalid value for `mongo`, must not be `None`") # noqa: E501
self._mongo = mongo
@property
def s3(self):
"""Gets the s3 of this Config.
:return: The s3 of this Config.
:rtype: Component
"""
return self._s3
@s3.setter
def s3(self, s3):
"""Sets the s3 of this Config.
:param s3: The s3 of this Config.
:type s3: Component
"""
if s3 is None:
raise ValueError("Invalid value for `s3`, must not be `None`") # noqa: E501
self._s3 = s3
@property
def arc2(self):
"""Gets the arc2 of this Config.
:return: The arc2 of this Config.
:rtype: Component
"""
return self._arc2
@arc2.setter
def arc2(self, arc2):
"""Sets the arc2 of this Config.
:param arc2: The arc2 of this Config.
:type arc2: Component
"""
if arc2 is None:
raise ValueError("Invalid value for `arc2`, must not be `None`") # noqa: E501
self._arc2 = arc2
@property
def created_at(self):
"""Gets the created_at of this Config.
Creation timestamp, omit this property for post requests # noqa: E501
:return: The created_at of this Config.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Config.
Creation timestamp, omit this property for post requests # noqa: E501
:param created_at: The created_at of this Config.
:type created_at: datetime
"""
self._created_at = created_at
| 25.444444 | 96 | 0.579245 | 3,532 | 0.907269 | 0 | 0 | 2,348 | 0.603134 | 0 | 0 | 2,196 | 0.564089 |
2944fda074b1c1551c4b520622df91dd49749873 | 1,003 | py | Python | txt_annotation.py | bubbliiiing/classification-keras | b914c5d8526cccbeb3ae8d8f2fea4c8bbabf1d94 | [
"MIT"
]
| 30 | 2021-01-23T15:51:20.000Z | 2022-03-26T13:37:49.000Z | txt_annotation.py | PARILM/classification-keras | 91e558b5a128449b81acc4f6983f01e420b2039d | [
"MIT"
]
| 4 | 2021-01-22T08:58:57.000Z | 2022-03-17T14:21:07.000Z | txt_annotation.py | PARILM/classification-keras | 91e558b5a128449b81acc4f6983f01e420b2039d | [
"MIT"
]
| 10 | 2021-01-31T01:23:35.000Z | 2022-02-17T11:53:05.000Z | import os
from os import getcwd
#---------------------------------------------#
# 训练前一定要注意修改classes
# 种类顺序需要和model_data下的txt一样
#---------------------------------------------#
classes = ["cat", "dog"]
sets = ["train", "test"]
wd = getcwd()
for se in sets:
list_file = open('cls_' + se + '.txt', 'w')
datasets_path = "datasets/" + se
types_name = os.listdir(datasets_path)
for type_name in types_name:
if type_name not in classes:
continue
cls_id = classes.index(type_name)
photos_path = os.path.join(datasets_path, type_name)
photos_name = os.listdir(photos_path)
for photo_name in photos_name:
_, postfix = os.path.splitext(photo_name)
if postfix not in ['.jpg', '.png', '.jpeg']:
continue
list_file.write(str(cls_id) + ";" + '%s/%s'%(wd, os.path.join(photos_path, photo_name)))
list_file.write('\n')
list_file.close()
| 31.34375 | 101 | 0.521436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.25933 |
29451165b051aed5989a0318d992368267c8109d | 5,272 | py | Python | S4/S4 Library/simulation/relationships/sim_knowledge.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
]
| 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/relationships/sim_knowledge.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
]
| null | null | null | S4/S4 Library/simulation/relationships/sim_knowledge.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
]
| null | null | null | from protocolbuffers import SimObjectAttributes_pb2 as protocols
from careers.career_unemployment import CareerUnemployment
import services
import sims4
logger = sims4.log.Logger('Relationship', default_owner='jjacobson')
class SimKnowledge:
__slots__ = ('_rel_data', '_known_traits', '_knows_career', '_known_stats', '_knows_major')
def __init__(self, rel_data):
self._rel_data = rel_data
self._known_traits = None
self._knows_career = False
self._known_stats = None
self._knows_major = False
def add_known_trait(self, trait, notify_client=True):
if trait.is_personality_trait:
if self._known_traits is None:
self._known_traits = set()
self._known_traits.add(trait)
if notify_client:
self._rel_data.relationship.send_relationship_info()
else:
logger.error("Try to add non personality trait {} to Sim {}'s knowledge about to Sim {}", trait, self._rel_data.sim_id, self._rel_data.target_sim_id)
@property
def known_traits(self):
if self._known_traits is None:
return ()
return self._known_traits
@property
def knows_career(self):
return self._knows_career
def add_knows_career(self, notify_client=True):
self._knows_career = True
if notify_client:
self._rel_data.relationship.send_relationship_info()
def remove_knows_career(self, notify_client=True):
self._knows_career = False
if notify_client:
self._rel_data.relationship.send_relationship_info()
def get_known_careers(self):
if self._knows_career:
target_sim_info = self._rel_data.find_target_sim_info()
if target_sim_info is not None:
if target_sim_info.career_tracker.has_career:
careers = tuple(career for career in target_sim_info.careers.values() if career.is_visible_career if not career.is_course_slot)
if careers:
return careers
if target_sim_info.career_tracker.retirement is not None:
return (target_sim_info.career_tracker.retirement,)
else:
return (CareerUnemployment(target_sim_info),)
return ()
def get_known_careertrack_ids(self):
return (career_track.current_track_tuning.guid64 for career_track in self.get_known_careers())
def add_known_stat(self, stat, notify_client=True):
if self._known_stats is None:
self._known_stats = set()
self._known_stats.add(stat)
if notify_client:
self._rel_data.relationship.send_relationship_info()
def get_known_stats(self):
return self._known_stats
@property
def knows_major(self):
return self._knows_major
def add_knows_major(self, notify_client=True):
self._knows_major = True
if notify_client:
self._rel_data.relationship.send_relationship_info()
def remove_knows_major(self, notify_client=True):
self._knows_major = False
if notify_client:
self._rel_data.relationship.send_relationship_info()
def get_known_major(self):
if self._knows_major:
target_sim_info = self._rel_data.find_target_sim_info()
if target_sim_info is not None and target_sim_info.degree_tracker:
return target_sim_info.degree_tracker.get_major()
def get_known_major_career(self):
if self._knows_major:
target_sim_info = self._rel_data.find_target_sim_info()
if target_sim_info is not None and target_sim_info.career_tracker.has_career:
careers = tuple(career for career in target_sim_info.careers.values() if career.is_visible_career if career.is_course_slot)
if careers:
return careers
return ()
def get_save_data(self):
save_data = protocols.SimKnowledge()
for trait in self.known_traits:
save_data.trait_ids.append(trait.guid64)
save_data.knows_career = self._knows_career
if self._known_stats is not None:
for stat in self._known_stats:
save_data.stats.append(stat.guid64)
save_data.knows_major = self._knows_major
return save_data
def load_knowledge(self, save_data):
trait_manager = services.get_instance_manager(sims4.resources.Types.TRAIT)
stat_manager = services.get_instance_manager(sims4.resources.Types.STATISTIC)
for trait_inst_id in save_data.trait_ids:
trait = trait_manager.get(trait_inst_id)
if trait is not None:
if self._known_traits is None:
self._known_traits = set()
self._known_traits.add(trait)
for stat_id in save_data.stats:
if self._known_stats is None:
self._known_stats = set()
stat = stat_manager.get(stat_id)
if stat is not None:
self._known_stats.add(stat)
self._knows_career = save_data.knows_career
if hasattr(save_data, 'knows_major'):
self._knows_major = save_data.knows_major
| 39.939394 | 161 | 0.660281 | 5,048 | 0.957511 | 0 | 0 | 272 | 0.051593 | 0 | 0 | 182 | 0.034522 |
29452ec5be15d28b45cb5711c4822ec7f8c5c51e | 1,001 | py | Python | 233_number_of_digt_one.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
]
| 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 233_number_of_digt_one.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
]
| null | null | null | 233_number_of_digt_one.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
]
| 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # Given an integer n, count the total number of digit 1 appearing
# in all non-negative integers less than or equal to n.
#
# For example:
# Given n = 13,
# Return 6, because digit 1 occurred in the following numbers:
# 1, 10, 11, 12, 13.
#
class Solution:
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
# sum all the '1's inside the n numbers
count = 0
for i in range(1, n+1): # count including n
count += self.numberOfDigitOne(i)
return count
def numberOfDigitOne(self, n):
"""
function to count number of digit ones in a number n.
mod by 10 to test if 1st digit is 1;
then divide by 10 to get next digit;
next test if next digit is 1.
"""
result = 0
while n:
if n % 10 == 1:
result += 1
n = n / 10
return result
if __name__ == "__main__":
print Solution().countDigitOne(13)
| 22.75 | 65 | 0.548452 | 689 | 0.688312 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.564436 |
29458e025d37036dcc3d6da38653c530afc7e75e | 13,954 | py | Python | Search Algorithms.py | fzehracetin/A-Star-and-Best-First-Search | 78be430f0c3523aa78d9822ec8aa19615fd3e500 | [
"Apache-2.0"
]
| 1 | 2021-02-24T10:13:22.000Z | 2021-02-24T10:13:22.000Z | Search Algorithms.py | fzehracetin/A-Star-and-Best-First-Search | 78be430f0c3523aa78d9822ec8aa19615fd3e500 | [
"Apache-2.0"
]
| null | null | null | Search Algorithms.py | fzehracetin/A-Star-and-Best-First-Search | 78be430f0c3523aa78d9822ec8aa19615fd3e500 | [
"Apache-2.0"
]
| null | null | null | from PIL import Image
from math import sqrt
import numpy as np
import time
import matplotlib.backends.backend_tkagg
import matplotlib.pyplot as plt
class Point:
x: float
y: float
f: float
h: float
g: float
def __init__(self, x, y, f):
self.x = x
self.y = y
self.f = f
self.g = 0
self.h = 0
self.parent = None
def equal(self, other):
if self.x == other.x and self.y == other.y:
return True
class Output:
result_image: Image
total_time: float
n_elements: int
max_elements: int
def __init__(self, result_image, total_time, n_elements, max_elements):
self.result_image = result_image
self.total_time = total_time
self.n_elements = n_elements
self.max_elements = max_elements
self.name = None
def plot_times(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.total_time, other1.total_time, other2.total_time, other3.total_time])
fig.suptitle("Toplam Zamanlar")
fname = image_name.split('.')
plt.savefig(fname[0] + "times.png")
plt.show()
def plot_n_elements(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.n_elements, other1.n_elements, other2.n_elements, other3.n_elements])
fig.suptitle("Stack'ten Çekilen Toplam Eleman Sayısı")
fname = image_name.split('.')
plt.savefig(fname[0] + "n_elements.png")
plt.show()
def plot_max_elements(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.max_elements, other1.max_elements, other2.max_elements, other3.max_elements])
fig.suptitle("Stack'te Bulunan Maksimum Eleman Sayısı")
fname = image_name.split('.')
plt.savefig(fname[0] + "max_elements.png")
plt.show()
def distance(point, x, y):
return sqrt((point.x - x)**2 + (point.y - y)**2)
def insert_in_heap(heap, top, point):
heap.append(point)
i = top
parent = (i - 1)/2
while i >= 1 and heap[int(i)].f < heap[int(parent)].f:
heap[int(i)], heap[int(parent)] = heap[int(parent)], heap[int(i)] # swap
i = parent
parent = (i - 1) / 2
return
def calculate_weight(x, y, liste, top, point, visited, index1, index2):
if visited[int(x)][int(y)] == 0:
r, g, b = image.getpixel((x, y))
if x == end.x and y == end.y:
print("Path found.")
if r is 0:
r = 1
new_point = Point(x, y, 0)
new_point.parent = point
new_point.h = distance(end, x, y) * (256 - r)
new_point.g = 0
if index1 == 1: # a_star
new_point.g = new_point.parent.g + 256 - r
new_point.f = new_point.h + new_point.g # bfs'de g = 0
if index2 == 0: # stack
liste.append(new_point)
else: # heap
insert_in_heap(liste, top, new_point)
top += 1
visited[int(x)][int(y)] = 1
return top
def add_neighbours(point, liste, top, visited, index1, index2):
# print(point.x, point.y)
if (point.x == width - 1 and point.y == height - 1) or (point.x == 0 and point.y == 0) or \
(point.x == 0 and point.y == height - 1) or (point.x == width - 1 and point.y == 0):
# print("first if")
if point.x == width - 1 and point.y == height - 1:
constx = -1
consty = -1
elif point.x == 0 and point.y == 0:
constx = 1
consty = 1
elif point.x == width - 1 and point.y == 0:
constx = 1
consty = -1
else:
constx = -1
consty = 1
top = calculate_weight(point.x + constx, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + consty, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + constx, point.y + consty, liste, top, point, visited, index1, index2)
elif point.x == 0 or point.x == width - 1:
# print("nd if")
top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2)
if point.x == 0:
const = 1
else:
const = -1
top = calculate_weight(point.x + const, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + const, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + const, point.y, liste, top, point, visited, index1, index2)
elif point.y == 0 or point.y == height - 1:
# print("3rd if")
top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2)
if point.y == 0:
const = 1
else:
const = -1
top = calculate_weight(point.x - 1, point.y + const, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y + const, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + const, liste, top, point, visited, index1, index2)
else:
# print("4th if")
top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x - 1, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x - 1, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2)
return top
def paint(point):
yol = []
while not point.equal(start):
yol.append(point)
image.putpixel((int(point.x), int(point.y)), (60, 255, 0))
point = point.parent
end_time = time.time()
# image.show()
'''print("--------------YOL------------------")
for i in range(len(yol)):
print("x: {}, y:{}, distance:{}".format(yol[i].x, yol[i].y, yol[i].f))
print("------------------------------------")'''
return image, (end_time - start_time)
def bfs_and_a_star_with_stack(index):
stack = []
top = 0
found = False
point = None
stack.append(start)
visited = np.zeros((width, height))
visited[int(start.x)][int(start.y)] = 1
j = 0
max_element = 0
while stack and not found:
point = stack.pop(top)
# print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f))
top -= 1
if point.equal(end):
found = True
else:
top = add_neighbours(point, stack, top, visited, index, 0)
stack.sort(key=lambda point: point.f, reverse=True)
if len(stack) > max_element:
max_element = len(stack)
j += 1
if found:
result_image, total_time = paint(point)
# print("Stackten çekilen eleman sayısı: ", j)
# print("Stackteki maksimum eleman sayısı: ", max_element)
return result_image, total_time, j, max_element
def find_smallest_child(heap, i, top):
if 2 * i + 2 < top: # has two child
if heap[2*i + 1].f < heap[2*i + 2].f:
return 2*i + 1
else:
return 2*i + 2
elif 2*i + 1 < top: # has one child
return 2*i + 1
else: # has no child
return 0
def remove_min(heap, top):
if top == 0:
return None
min_point = heap[0]
top -= 1
heap[0] = heap[top]
del heap[top]
i = 0
index = find_smallest_child(heap, i, top)
while index != 0 and heap[i].f > heap[index].f:
heap[i], heap[index] = heap[index], heap[i]
i = index
index = find_smallest_child(heap, i, top)
return min_point, top
def bfs_and_a_star_with_heap(index):
heap = []
found = False
yol = []
point = None
heap.append(start)
visited = np.zeros((width, height))
visited[int(start.x)][int(start.y)] = 1
j = 0
top = 1
max_element = 0
while heap and not found:
point, top = remove_min(heap, top)
# print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f))
if point.equal(end):
found = True
else:
top = add_neighbours(point, heap, top, visited, index, 1)
if len(heap) > max_element:
max_element = len(heap)
j += 1
if found:
result_image, total_time = paint(point)
else:
return
return result_image, total_time, j, max_element
if __name__ == "__main__":
print("UYARI: Seçilecek görüntü exe dosyası ile aynı klasörde olmalıdır.")
image_name = input("Algoritmanın üzerinde çalışacağı görüntünün ismini giriniz (Örnek input: image.png): ")
print(image_name)
print("-------------------Algoritmalar------------------")
print("1- Best First Search with Stack")
print("2- Best First Search with Heap")
print("3- A* with Stack")
print("4- A* with Heap")
print("5- Analiz (tüm algoritmaların çalışmalarını ve kıyaslamalarını gör)")
alg = input("Algoritmayı ve veri yapısının numarasını seçiniz (Örnek input: 1): ")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
print("Görüntünün genişliği: {}, yüksekliği: {}".format(width, height))
print("NOT: Başlangıç ve bitiş noktasının koordinatları genişlik ve uzunluktan küçük olmalıdır.")
sx, sy = input("Başlangıç noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 350 100): ").split()
ex, ey = input("Bitiş noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 200 700): ").split()
start = Point(int(sx), int(sy), -1)
start.parent = -1
end = Point(int(ex), int(ey), -1)
start_time = time.time()
if int(alg) == 1:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0)
elif int(alg) == 2:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0)
elif int(alg) == 3:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1)
elif int(alg) == 4:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1)
elif int(alg) == 5:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0)
output1 = Output(result_image, total_time, n_elements, max_elements)
print(n_elements, total_time, max_elements)
output1.name = "BFS with Stack"
print("1/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0)
output2 = Output(result_image, total_time, n_elements, max_elements)
print(n_elements, total_time, max_elements)
output2.name = "BFS with Heap"
print("2/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1)
output3 = Output(result_image, total_time, n_elements, max_elements)
output3.name = "A* with Stack"
print(n_elements, total_time, max_elements)
print("3/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1)
output4 = Output(result_image, total_time, n_elements, max_elements)
output4.name = "A* with Heap"
print("4/4")
output1.plot_times(output2, output3, output4)
output1.plot_max_elements(output2, output3, output4)
output1.plot_n_elements(output2, output3, output4)
print("Bastırılan görüntüler sırasıyla BFS stack, BFS heap, A* stack ve A* heap şeklindedir.")
fname = image_name.split('.')
output1.result_image.show()
output1.result_image.save(fname[0] + "BFS_stack.png")
output2.result_image.show()
output2.result_image.save(fname[0] + "BFS_heap.png")
output3.result_image.show()
output3.result_image.save(fname[0] + "A_star_stack.png")
output4.result_image.show()
output4.result_image.save(fname[0] + "A_star_heap.png")
exit(0)
else:
print("Algoritma numarası hatalı girildi, tekrar deneyin.")
exit(0)
print("Stackten çekilen eleman sayısı: ", n_elements)
print("Stackteki maksimum eleman sayısı: ", max_elements)
print("Toplam süre: ", total_time)
result_image.show()
| 35.68798 | 124 | 0.580264 | 1,985 | 0.141191 | 0 | 0 | 0 | 0 | 0 | 0 | 2,083 | 0.148161 |
2945bb791202db0434b867efcbc0fdb23fb1256d | 624 | py | Python | time_test.py | Shb742/rnnoise_python | e370e85984d5909111c9e6e7e4a627bf4de76648 | [
"BSD-3-Clause"
]
| 32 | 2019-05-24T08:51:36.000Z | 2022-03-10T06:10:08.000Z | time_test.py | Shb742/rnnoise_python | e370e85984d5909111c9e6e7e4a627bf4de76648 | [
"BSD-3-Clause"
]
| 3 | 2020-08-06T09:40:51.000Z | 2021-04-21T08:50:20.000Z | time_test.py | Shb742/rnnoise_python | e370e85984d5909111c9e6e7e4a627bf4de76648 | [
"BSD-3-Clause"
]
| 5 | 2019-09-19T05:54:33.000Z | 2021-04-21T08:50:29.000Z | #Author Shoaib Omar
import time
import rnnoise
import numpy as np
def time_rnnoise(rounds=1000):
a = rnnoise.RNNoise()
timer = 0.0
st = time.time()
for i in range(rounds):
inp = np.random.bytes(960)
timer = (time.time() - st)
print(timer)
st = time.time()
for i in range(rounds):
inp = np.random.bytes(960)
va,out = a.process_frame(inp)
time_taken_per_frame = ((time.time()-st)-timer) /rounds
print("time taken for one frame - " + str(time_taken_per_frame ))
print("time in a frame - " +str(480.0/48000.0))
print(str((480.0/48000.0)/time_taken_per_frame )+"X faster than real")
a.destroy()
time_rnnoise() | 28.363636 | 71 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.141026 |
29461dc478380b16ce5a78cc8afb8aa1b8e6189a | 1,092 | py | Python | tests/test_shell.py | jakubtyniecki/pact | c23547a2aed1d612180528e33ec1ce021f9badb6 | [
"MIT"
]
| 2 | 2017-01-12T10:24:31.000Z | 2020-06-11T16:05:05.000Z | tests/test_shell.py | jakubtyniecki/pact | c23547a2aed1d612180528e33ec1ce021f9badb6 | [
"MIT"
]
| null | null | null | tests/test_shell.py | jakubtyniecki/pact | c23547a2aed1d612180528e33ec1ce021f9badb6 | [
"MIT"
]
| null | null | null |
""" shell sort tests module """
import unittest
import random
from sort import shell
from tests import helper
class ShellSortTests(unittest.TestCase):
""" shell sort unit tests class """
max = 100
arr = []
def setUp(self):
""" setting up for the test """
self.arr = random.sample(range(self.max), self.max)
def test_null_input(self):
""" should raise when input array is None """
# arrange
inp = None
# act
with self.assertRaises(TypeError) as ex:
shell.sort(inp)
# assert
self.assertEqual("'NoneType' object is not iterable", str(ex.exception))
def test_empty_input(self):
""" should return [] when input array is empty """
# arrange
inp = []
# act
res = shell.sort(inp)
# assert
self.assertEqual(len(inp), len(res))
def test_sort_a_given_array(self):
""" should sort a given array """
# act
res = shell.sort(self.arr[:])
# assert
self.assertTrue(helper.is_sorted(res))
| 22.285714 | 80 | 0.574176 | 977 | 0.894689 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.290293 |
2946888881fb3eee8c4a9270d71f7bab3158abad | 666 | py | Python | k8s_apps/admin/dump_inventory_file.py | AkadioInc/firefly | d6c48ff9999ffedcaa294fcd956eb97b90408583 | [
"BSD-2-Clause"
]
| null | null | null | k8s_apps/admin/dump_inventory_file.py | AkadioInc/firefly | d6c48ff9999ffedcaa294fcd956eb97b90408583 | [
"BSD-2-Clause"
]
| null | null | null | k8s_apps/admin/dump_inventory_file.py | AkadioInc/firefly | d6c48ff9999ffedcaa294fcd956eb97b90408583 | [
"BSD-2-Clause"
]
| null | null | null | import h5pyd
from datetime import datetime
import tzlocal
BUCKET="firefly-hsds"
inventory_domain = "/FIREfly/inventory.h5"
def formatTime(timestamp):
local_timezone = tzlocal.get_localzone() # get pytz timezone
local_time = datetime.fromtimestamp(timestamp, local_timezone)
return local_time
f = h5pyd.File(inventory_domain, "r", bucket=BUCKET)
table = f["inventory"]
for row in table:
filename = row[0].decode('utf-8')
if row[1]:
start = formatTime(row[1])
else:
start = 0
if row[2]:
stop = formatTime(row[2])
else:
stop = 0
print(f"{filename}\t{start}\t{stop}")
print(f"{table.nrows} rows")
| 22.965517 | 66 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.192192 |
2946dbe0237daa4f111129ff8959628dbb456b22 | 2,640 | py | Python | enaml/qt/qt_timer.py | xtuzy/enaml | a1b5c0df71c665b6ef7f61d21260db92d77d9a46 | [
"BSD-3-Clause-Clear"
]
| 1,080 | 2015-01-04T14:29:34.000Z | 2022-03-29T05:44:51.000Z | enaml/qt/qt_timer.py | xtuzy/enaml | a1b5c0df71c665b6ef7f61d21260db92d77d9a46 | [
"BSD-3-Clause-Clear"
]
| 308 | 2015-01-05T22:44:13.000Z | 2022-03-30T21:19:18.000Z | enaml/qt/qt_timer.py | xtuzy/enaml | a1b5c0df71c665b6ef7f61d21260db92d77d9a46 | [
"BSD-3-Clause-Clear"
]
| 123 | 2015-01-25T16:33:48.000Z | 2022-02-25T19:57:10.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.timer import ProxyTimer
from .QtCore import QTimer
from .qt_toolkit_object import QtToolkitObject
class QtTimer(QtToolkitObject, ProxyTimer):
""" A Qt implementation of an Enaml ProxyTimer.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QTimer)
#--------------------------------------------------------------------------
# Initialization
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying timer object.
"""
self.widget = QTimer()
def init_widget(self):
""" Initialize the widget.
"""
super(QtTimer, self).init_widget()
d = self.declaration
self.set_interval(d.interval)
self.set_single_shot(d.single_shot)
self.widget.timeout.connect(self.on_timeout)
def destroy(self):
""" A reimplemented destructor.
This stops the timer before invoking the superclass destructor.
"""
self.widget.stop()
super(QtTimer, self).destroy()
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_timeout(self):
""" Handle the timeout signal for the timer.
"""
d = self.declaration
if d is not None:
d.timeout()
#--------------------------------------------------------------------------
# ProxyTimer API
#--------------------------------------------------------------------------
def set_interval(self, interval):
""" Set the interval on the timer.
"""
self.widget.setInterval(interval)
def set_single_shot(self, single_shot):
""" Set the single shot flag on the timer.
"""
self.widget.setSingleShot(single_shot)
def start(self):
""" Start or restart the timer.
"""
self.widget.start()
def stop(self):
""" Stop the timer.
"""
self.widget.stop()
def is_running(self):
""" Get whether or not the timer is running.
"""
return self.widget.isActive()
| 27.789474 | 79 | 0.46553 | 2,139 | 0.810227 | 0 | 0 | 0 | 0 | 0 | 0 | 1,449 | 0.548864 |
29487962f697ad1bbd8acf9245d0ea5da17bae4f | 12,488 | py | Python | mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/test_mpesa_settings.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
]
| 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/test_mpesa_settings.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
]
| null | null | null | mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/test_mpesa_settings.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
]
| 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
from json import dumps
import frappe
import unittest
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings import process_balance_info, verify_transaction
from erpnext.accounts.doctype.pos_invoice.test_pos_invoice import create_pos_invoice
class TestMpesaSettings(unittest.TestCase):
def tearDown(self):
frappe.db.sql('delete from `tabMpesa Settings`')
frappe.db.sql('delete from `tabIntegration Request` where integration_request_service = "Mpesa"')
def test_creation_of_payment_gateway(self):
create_mpesa_settings(payment_gateway_name="_Test")
mode_of_payment = frappe.get_doc("Mode of Payment", "Mpesa-_Test")
self.assertTrue(frappe.db.exists("Payment Gateway Account", {'payment_gateway': "Mpesa-_Test"}))
self.assertTrue(mode_of_payment.name)
self.assertEquals(mode_of_payment.type, "Phone")
def test_processing_of_account_balance(self):
mpesa_doc = create_mpesa_settings(payment_gateway_name="_Account Balance")
mpesa_doc.get_account_balance_info()
callback_response = get_account_balance_callback_payload()
process_balance_info(**callback_response)
integration_request = frappe.get_doc("Integration Request", "AG_20200927_00007cdb1f9fb6494315")
# test integration request creation and successful update of the status on receiving callback response
self.assertTrue(integration_request)
self.assertEquals(integration_request.status, "Completed")
# test formatting of account balance received as string to json with appropriate currency symbol
mpesa_doc.reload()
self.assertEquals(mpesa_doc.account_balance, dumps({
"Working Account": {
"current_balance": "Sh 481,000.00",
"available_balance": "Sh 481,000.00",
"reserved_balance": "Sh 0.00",
"uncleared_balance": "Sh 0.00"
}
}))
integration_request.delete()
def test_processing_of_callback_payload(self):
create_mpesa_settings(payment_gateway_name="Payment")
mpesa_account = frappe.db.get_value("Payment Gateway Account", {"payment_gateway": 'Mpesa-Payment'}, "payment_account")
frappe.db.set_value("Account", mpesa_account, "account_currency", "KES")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "KES")
pos_invoice = create_pos_invoice(do_not_submit=1)
pos_invoice.append("payments", {'mode_of_payment': 'Mpesa-Payment', 'account': mpesa_account, 'amount': 500})
pos_invoice.contact_mobile = "093456543894"
pos_invoice.currency = "KES"
pos_invoice.save()
pr = pos_invoice.create_payment_request()
# test payment request creation
self.assertEquals(pr.payment_gateway, "Mpesa-Payment")
# submitting payment request creates integration requests with random id
integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
}, pluck="name")
callback_response = get_payment_callback_payload(Amount=500, CheckoutRequestID=integration_req_ids[0])
verify_transaction(**callback_response)
# test creation of integration request
integration_request = frappe.get_doc("Integration Request", integration_req_ids[0])
# test integration request creation and successful update of the status on receiving callback response
self.assertTrue(integration_request)
self.assertEquals(integration_request.status, "Completed")
pos_invoice.reload()
integration_request.reload()
self.assertEquals(pos_invoice.mpesa_receipt_number, "LGR7OWQX0R")
self.assertEquals(integration_request.status, "Completed")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "")
integration_request.delete()
pr.reload()
pr.cancel()
pr.delete()
pos_invoice.delete()
def test_processing_of_multiple_callback_payload(self):
create_mpesa_settings(payment_gateway_name="Payment")
mpesa_account = frappe.db.get_value("Payment Gateway Account", {"payment_gateway": 'Mpesa-Payment'}, "payment_account")
frappe.db.set_value("Account", mpesa_account, "account_currency", "KES")
frappe.db.set_value("Mpesa Settings", "Payment", "transaction_limit", "500")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "KES")
pos_invoice = create_pos_invoice(do_not_submit=1)
pos_invoice.append("payments", {'mode_of_payment': 'Mpesa-Payment', 'account': mpesa_account, 'amount': 1000})
pos_invoice.contact_mobile = "093456543894"
pos_invoice.currency = "KES"
pos_invoice.save()
pr = pos_invoice.create_payment_request()
# test payment request creation
self.assertEquals(pr.payment_gateway, "Mpesa-Payment")
# submitting payment request creates integration requests with random id
integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
}, pluck="name")
# create random receipt nos and send it as response to callback handler
mpesa_receipt_numbers = [frappe.utils.random_string(5) for d in integration_req_ids]
integration_requests = []
for i in range(len(integration_req_ids)):
callback_response = get_payment_callback_payload(
Amount=500,
CheckoutRequestID=integration_req_ids[i],
MpesaReceiptNumber=mpesa_receipt_numbers[i]
)
# handle response manually
verify_transaction(**callback_response)
# test completion of integration request
integration_request = frappe.get_doc("Integration Request", integration_req_ids[i])
self.assertEquals(integration_request.status, "Completed")
integration_requests.append(integration_request)
# check receipt number once all the integration requests are completed
pos_invoice.reload()
self.assertEquals(pos_invoice.mpesa_receipt_number, ', '.join(mpesa_receipt_numbers))
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "")
[d.delete() for d in integration_requests]
pr.reload()
pr.cancel()
pr.delete()
pos_invoice.delete()
def test_processing_of_only_one_succes_callback_payload(self):
create_mpesa_settings(payment_gateway_name="Payment")
mpesa_account = frappe.db.get_value("Payment Gateway Account", {"payment_gateway": 'Mpesa-Payment'}, "payment_account")
frappe.db.set_value("Account", mpesa_account, "account_currency", "KES")
frappe.db.set_value("Mpesa Settings", "Payment", "transaction_limit", "500")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "KES")
pos_invoice = create_pos_invoice(do_not_submit=1)
pos_invoice.append("payments", {'mode_of_payment': 'Mpesa-Payment', 'account': mpesa_account, 'amount': 1000})
pos_invoice.contact_mobile = "093456543894"
pos_invoice.currency = "KES"
pos_invoice.save()
pr = pos_invoice.create_payment_request()
# test payment request creation
self.assertEquals(pr.payment_gateway, "Mpesa-Payment")
# submitting payment request creates integration requests with random id
integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
}, pluck="name")
# create random receipt nos and send it as response to callback handler
mpesa_receipt_numbers = [frappe.utils.random_string(5) for d in integration_req_ids]
callback_response = get_payment_callback_payload(
Amount=500,
CheckoutRequestID=integration_req_ids[0],
MpesaReceiptNumber=mpesa_receipt_numbers[0]
)
# handle response manually
verify_transaction(**callback_response)
# test completion of integration request
integration_request = frappe.get_doc("Integration Request", integration_req_ids[0])
self.assertEquals(integration_request.status, "Completed")
# now one request is completed
# second integration request fails
# now retrying payment request should make only one integration request again
pr = pos_invoice.create_payment_request()
new_integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
'name': ['not in', integration_req_ids]
}, pluck="name")
self.assertEquals(len(new_integration_req_ids), 1)
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "")
frappe.db.sql("delete from `tabIntegration Request` where integration_request_service = 'Mpesa'")
pr.reload()
pr.cancel()
pr.delete()
pos_invoice.delete()
def create_mpesa_settings(payment_gateway_name="Express"):
if frappe.db.exists("Mpesa Settings", payment_gateway_name):
return frappe.get_doc("Mpesa Settings", payment_gateway_name)
doc = frappe.get_doc(dict( #nosec
doctype="Mpesa Settings",
payment_gateway_name=payment_gateway_name,
consumer_key="5sMu9LVI1oS3oBGPJfh3JyvLHwZOdTKn",
consumer_secret="VI1oS3oBGPJfh3JyvLHw",
online_passkey="LVI1oS3oBGPJfh3JyvLHwZOd",
till_number="174379"
))
doc.insert(ignore_permissions=True)
return doc
def get_test_account_balance_response():
"""Response received after calling the account balance API."""
return {
"ResultType":0,
"ResultCode":0,
"ResultDesc":"The service request has been accepted successfully.",
"OriginatorConversationID":"10816-694520-2",
"ConversationID":"AG_20200927_00007cdb1f9fb6494315",
"TransactionID":"LGR0000000",
"ResultParameters":{
"ResultParameter":[
{
"Key":"ReceiptNo",
"Value":"LGR919G2AV"
},
{
"Key":"Conversation ID",
"Value":"AG_20170727_00004492b1b6d0078fbe"
},
{
"Key":"FinalisedTime",
"Value":20170727101415
},
{
"Key":"Amount",
"Value":10
},
{
"Key":"TransactionStatus",
"Value":"Completed"
},
{
"Key":"ReasonType",
"Value":"Salary Payment via API"
},
{
"Key":"TransactionReason"
},
{
"Key":"DebitPartyCharges",
"Value":"Fee For B2C Payment|KES|33.00"
},
{
"Key":"DebitAccountType",
"Value":"Utility Account"
},
{
"Key":"InitiatedTime",
"Value":20170727101415
},
{
"Key":"Originator Conversation ID",
"Value":"19455-773836-1"
},
{
"Key":"CreditPartyName",
"Value":"254708374149 - John Doe"
},
{
"Key":"DebitPartyName",
"Value":"600134 - Safaricom157"
}
]
},
"ReferenceData":{
"ReferenceItem":{
"Key":"Occasion",
"Value":"aaaa"
}
}
}
def get_payment_request_response_payload(Amount=500):
"""Response received after successfully calling the stk push process request API."""
CheckoutRequestID = frappe.utils.random_string(10)
return {
"MerchantRequestID": "8071-27184008-1",
"CheckoutRequestID": CheckoutRequestID,
"ResultCode": 0,
"ResultDesc": "The service request is processed successfully.",
"CallbackMetadata": {
"Item": [
{ "Name": "Amount", "Value": Amount },
{ "Name": "MpesaReceiptNumber", "Value": "LGR7OWQX0R" },
{ "Name": "TransactionDate", "Value": 20201006113336 },
{ "Name": "PhoneNumber", "Value": 254723575670 }
]
}
}
def get_payment_callback_payload(Amount=500, CheckoutRequestID="ws_CO_061020201133231972", MpesaReceiptNumber="LGR7OWQX0R"):
"""Response received from the server as callback after calling the stkpush process request API."""
return {
"Body":{
"stkCallback":{
"MerchantRequestID":"19465-780693-1",
"CheckoutRequestID":CheckoutRequestID,
"ResultCode":0,
"ResultDesc":"The service request is processed successfully.",
"CallbackMetadata":{
"Item":[
{ "Name":"Amount", "Value":Amount },
{ "Name":"MpesaReceiptNumber", "Value":MpesaReceiptNumber },
{ "Name":"Balance" },
{ "Name":"TransactionDate", "Value":20170727154800 },
{ "Name":"PhoneNumber", "Value":254721566839 }
]
}
}
}
}
def get_account_balance_callback_payload():
"""Response received from the server as callback after calling the account balance API."""
return {
"Result":{
"ResultType": 0,
"ResultCode": 0,
"ResultDesc": "The service request is processed successfully.",
"OriginatorConversationID": "16470-170099139-1",
"ConversationID": "AG_20200927_00007cdb1f9fb6494315",
"TransactionID": "OIR0000000",
"ResultParameters": {
"ResultParameter": [
{
"Key": "AccountBalance",
"Value": "Working Account|KES|481000.00|481000.00|0.00|0.00"
},
{ "Key": "BOCompletedTime", "Value": 20200927234123 }
]
},
"ReferenceData": {
"ReferenceItem": {
"Key": "QueueTimeoutURL",
"Value": "https://internalsandbox.safaricom.co.ke/mpesa/abresults/v1/submit"
}
}
}
} | 35.177465 | 124 | 0.738629 | 7,996 | 0.640295 | 0 | 0 | 0 | 0 | 0 | 0 | 5,594 | 0.44795 |
2948b21202accf70d658d0b73f9aafb72b41be55 | 114 | py | Python | b2accessdeprovisioning/configparser.py | EUDAT-B2ACCESS/b2access-deprovisioning-report | 2260347a4e1f522386c188c0dfae2e94bc5b2a40 | [
"Apache-2.0"
]
| null | null | null | b2accessdeprovisioning/configparser.py | EUDAT-B2ACCESS/b2access-deprovisioning-report | 2260347a4e1f522386c188c0dfae2e94bc5b2a40 | [
"Apache-2.0"
]
| null | null | null | b2accessdeprovisioning/configparser.py | EUDAT-B2ACCESS/b2access-deprovisioning-report | 2260347a4e1f522386c188c0dfae2e94bc5b2a40 | [
"Apache-2.0"
]
| 2 | 2017-10-05T07:26:39.000Z | 2017-10-05T07:27:54.000Z | from __future__ import absolute_import
import yaml
with open("config.yml", "r") as f:
config = yaml.load(f)
| 16.285714 | 38 | 0.710526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.131579 |
2948ba6edb0a75f155add6e7fa7726939cd6ba56 | 3,870 | py | Python | res_mods/mods/packages/xvm_main/python/vehinfo_tiers.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
]
| null | null | null | res_mods/mods/packages/xvm_main/python/vehinfo_tiers.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
]
| 1 | 2016-04-03T13:31:39.000Z | 2016-04-03T16:48:26.000Z | res_mods/mods/packages/xvm_main/python/vehinfo_tiers.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
]
| null | null | null | """ XVM (c) www.modxvm.com 2013-2017 """
# PUBLIC
def getTiers(level, cls, key):
return _getTiers(level, cls, key)
# PRIVATE
from logger import *
from gui.shared.utils.requesters import REQ_CRITERIA
from helpers import dependency
from skeletons.gui.shared import IItemsCache
_special = {
# Data from http://forum.worldoftanks.ru/index.php?/topic/41221-
# Last update: 23.05.2017
# level 2
'germany:G53_PzI': [ 2, 2 ],
'uk:GB76_Mk_VIC': [ 2, 2 ],
'usa:A19_T2_lt': [ 2, 4 ],
'usa:A93_T7_Combat_Car': [ 2, 2 ],
# level 3
'germany:G36_PzII_J': [ 3, 4 ],
'japan:J05_Ke_Ni_B': [ 3, 4 ],
'ussr:R34_BT-SV': [ 3, 4 ],
'ussr:R50_SU76I': [ 3, 4 ],
'ussr:R56_T-127': [ 3, 4 ],
'ussr:R67_M3_LL': [ 3, 4 ],
'ussr:R86_LTP': [ 3, 4 ],
# level 4
'france:F14_AMX40': [ 4, 6 ],
'germany:G35_B-1bis_captured': [ 4, 4 ],
'japan:J06_Ke_Ho': [ 4, 6 ],
'uk:GB04_Valentine': [ 4, 6 ],
'uk:GB60_Covenanter': [ 4, 6 ],
'ussr:R12_A-20': [ 4, 6 ],
'ussr:R31_Valentine_LL': [ 4, 4 ],
'ussr:R44_T80': [ 4, 6 ],
'ussr:R68_A-32': [ 4, 5 ],
# level 5
'germany:G104_Stug_IV': [ 5, 6 ],
'germany:G32_PzV_PzIV': [ 5, 6 ],
'germany:G32_PzV_PzIV_ausf_Alfa': [ 5, 6 ],
'germany:G70_PzIV_Hydro': [ 5, 6 ],
'uk:GB20_Crusader': [ 5, 7 ],
'uk:GB51_Excelsior': [ 5, 6 ],
'uk:GB68_Matilda_Black_Prince': [ 5, 6 ],
'usa:A21_T14': [ 5, 6 ],
'usa:A44_M4A2E4': [ 5, 6 ],
'ussr:R32_Matilda_II_LL': [ 5, 6 ],
'ussr:R33_Churchill_LL': [ 5, 6 ],
'ussr:R38_KV-220': [ 5, 6 ],
'ussr:R38_KV-220_beta': [ 5, 6 ],
'ussr:R78_SU_85I': [ 5, 6 ],
# level 6
'germany:G32_PzV_PzIV_CN': [ 6, 7 ],
'germany:G32_PzV_PzIV_ausf_Alfa_CN': [ 6, 7 ],
'uk:GB63_TOG_II': [ 6, 7 ],
# level 7
'germany:G48_E-25': [ 7, 8 ],
'germany:G78_Panther_M10': [ 7, 8 ],
'uk:GB71_AT_15A': [ 7, 8 ],
'usa:A86_T23E3': [ 7, 8 ],
'ussr:R98_T44_85': [ 7, 8 ],
'ussr:R99_T44_122': [ 7, 8 ],
# level 8
'china:Ch01_Type59': [ 8, 9 ],
'china:Ch03_WZ-111': [ 8, 9 ],
'china:Ch14_T34_3': [ 8, 9 ],
'china:Ch23_112': [ 8, 9 ],
'france:F65_FCM_50t': [ 8, 9 ],
'germany:G65_JagdTiger_SdKfz_185': [ 8, 9 ],
'usa:A45_M6A2E1': [ 8, 9 ],
'usa:A80_T26_E4_SuperPershing': [ 8, 9 ],
'ussr:R54_KV-5': [ 8, 9 ],
'ussr:R61_Object252': [ 8, 9 ],
'ussr:R61_Object252_BF': [ 8, 9 ],
}
def _getTiers(level, cls, key):
if key in _special:
return _special[key]
# HT: (=T4 max+1)
if level == 4 and cls == 'heavyTank':
return (4, 5)
# default: (<T3 max+1) & (>=T3 max+2) & (>T9 max=11)
return (level, level + 1 if level < 3 else 11 if level > 9 else level + 2)
def _test_specials():
for veh_name in _special.keys():
itemsCache = dependency.instance(IItemsCache)
if not itemsCache.items.getVehicles(REQ_CRITERIA.VEHICLE.SPECIFIC_BY_NAME(veh_name)):
warn('vehinfo_tiers: vehicle %s declared in _special does not exist!' % veh_name)
| 36.857143 | 93 | 0.445478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,424 | 0.367959 |
294933d7ee4435c7faf58b9337983fadc1b0d19b | 6,099 | py | Python | pypy/module/cpyext/test/test_pystrtod.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
]
| 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | pypy/module/cpyext/test/test_pystrtod.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
]
| 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | pypy/module/cpyext/test/test_pystrtod.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
]
| 30 | 2018-08-20T03:16:34.000Z | 2022-01-12T17:39:22.000Z | import math
from pypy.module.cpyext import pystrtod
from pypy.module.cpyext.test.test_api import BaseApiTest, raises_w
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.lltypesystem import lltype
from pypy.module.cpyext.pystrtod import PyOS_string_to_double
class TestPyOS_string_to_double(BaseApiTest):
def test_simple_float(self, space):
s = rffi.str2charp('0.4')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert r == 0.4
rffi.free_charp(s)
def test_empty_string(self, space):
s = rffi.str2charp('')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, None)
rffi.free_charp(s)
def test_bad_string(self, space):
s = rffi.str2charp(' 0.4')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, None)
rffi.free_charp(s)
def test_overflow_pos(self, space):
s = rffi.str2charp('1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert math.isinf(r)
assert r > 0
rffi.free_charp(s)
def test_overflow_neg(self, space):
s = rffi.str2charp('-1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert math.isinf(r)
assert r < 0
rffi.free_charp(s)
def test_overflow_exc(self, space):
s = rffi.str2charp('1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, space.w_ValueError)
rffi.free_charp(s)
def test_endptr_number(self, space):
s = rffi.str2charp('0.4')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
r = PyOS_string_to_double(space, s, endp, None)
assert r == 0.4
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr + 3
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
def test_endptr_tail(self, space):
s = rffi.str2charp('0.4 foo')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
r = PyOS_string_to_double(space, s, endp, None)
assert r == 0.4
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr + 3
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
def test_endptr_no_conversion(self, space):
s = rffi.str2charp('foo')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, endp, None)
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
class TestPyOS_double_to_string(BaseApiTest):
def test_format_code(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(150.0, 'e', 1, 0, ptype)
assert '1.5e+02' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_precision(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(3.14159269397, 'g', 5, 0, ptype)
assert '3.1416' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_sign(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(-3.14, 'g', 3, 1, ptype)
assert '-3.14' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_add_dot_0(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(3, 'g', 5, 2, ptype)
assert '3.0' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_alt(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(314., 'g', 3, 4, ptype)
assert '314.' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_nan(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(float('nan'), 'g', 3, 4, ptype)
assert 'nan' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_NAN == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_infinity(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(1e200 * 1e200, 'g', 0, 0, ptype)
assert 'inf' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_INFINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_null(self, api):
ptype = lltype.nullptr(rffi.INTP.TO)
r = api.PyOS_double_to_string(3.14, 'g', 3, 0, ptype)
assert '3.14' == rffi.charp2str(r)
assert ptype == lltype.nullptr(rffi.INTP.TO)
rffi.free_charp(r)
| 37.881988 | 70 | 0.624529 | 5,819 | 0.954091 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.038367 |
294a474ec8bf0bc2d0dc645a827ce6425f19ce7f | 3,529 | py | Python | mathics/core/systemsymbols.py | Mathics3/mathics-core | 54dc3c00a42cd893c6430054e125291b6eb55ead | [
"Apache-2.0"
]
| 90 | 2021-09-11T14:14:00.000Z | 2022-03-29T02:08:29.000Z | mathics/core/systemsymbols.py | Mathics3/mathics-core | 54dc3c00a42cd893c6430054e125291b6eb55ead | [
"Apache-2.0"
]
| 187 | 2021-09-13T01:00:41.000Z | 2022-03-31T11:52:52.000Z | mathics/core/systemsymbols.py | Mathics3/mathics-core | 54dc3c00a42cd893c6430054e125291b6eb55ead | [
"Apache-2.0"
]
| 10 | 2021-10-05T15:44:26.000Z | 2022-03-21T12:34:33.000Z | # -*- coding: utf-8 -*-
from mathics.core.symbols import Symbol
# Some other common Symbols. This list is sorted in alphabetic order.
SymbolAssumptions = Symbol("$Assumptions")
SymbolAborted = Symbol("$Aborted")
SymbolAll = Symbol("All")
SymbolAlternatives = Symbol("Alternatives")
SymbolAnd = Symbol("And")
SymbolAppend = Symbol("Append")
SymbolApply = Symbol("Apply")
SymbolAssociation = Symbol("Association")
SymbolAutomatic = Symbol("Automatic")
SymbolBlank = Symbol("Blank")
SymbolBlend = Symbol("Blend")
SymbolByteArray = Symbol("ByteArray")
SymbolCatalan = Symbol("Catalan")
SymbolColorData = Symbol("ColorData")
SymbolComplex = Symbol("Complex")
SymbolComplexInfinity = Symbol("ComplexInfinity")
SymbolCondition = Symbol("Condition")
SymbolConditionalExpression = Symbol("ConditionalExpression")
Symbol_Context = Symbol("$Context")
Symbol_ContextPath = Symbol("$ContextPath")
SymbolCos = Symbol("Cos")
SymbolD = Symbol("D")
SymbolDerivative = Symbol("Derivative")
SymbolDirectedInfinity = Symbol("DirectedInfinity")
SymbolDispatch = Symbol("Dispatch")
SymbolE = Symbol("E")
SymbolEdgeForm = Symbol("EdgeForm")
SymbolEqual = Symbol("Equal")
SymbolExpandAll = Symbol("ExpandAll")
SymbolEulerGamma = Symbol("EulerGamma")
SymbolFailed = Symbol("$Failed")
SymbolFunction = Symbol("Function")
SymbolGamma = Symbol("Gamma")
SymbolGet = Symbol("Get")
SymbolGoldenRatio = Symbol("GoldenRatio")
SymbolGraphics = Symbol("Graphics")
SymbolGreater = Symbol("Greater")
SymbolGreaterEqual = Symbol("GreaterEqual")
SymbolGrid = Symbol("Grid")
SymbolHoldForm = Symbol("HoldForm")
SymbolIndeterminate = Symbol("Indeterminate")
SymbolImplies = Symbol("Implies")
SymbolInfinity = Symbol("Infinity")
SymbolInfix = Symbol("Infix")
SymbolInteger = Symbol("Integer")
SymbolIntegrate = Symbol("Integrate")
SymbolLeft = Symbol("Left")
SymbolLess = Symbol("Less")
SymbolLessEqual = Symbol("LessEqual")
SymbolLog = Symbol("Log")
SymbolMachinePrecision = Symbol("MachinePrecision")
SymbolMakeBoxes = Symbol("MakeBoxes")
SymbolMessageName = Symbol("MessageName")
SymbolMinus = Symbol("Minus")
SymbolMap = Symbol("Map")
SymbolMatrixPower = Symbol("MatrixPower")
SymbolMaxPrecision = Symbol("$MaxPrecision")
SymbolMemberQ = Symbol("MemberQ")
SymbolMinus = Symbol("Minus")
SymbolN = Symbol("N")
SymbolNeeds = Symbol("Needs")
SymbolNIntegrate = Symbol("NIntegrate")
SymbolNone = Symbol("None")
SymbolNot = Symbol("Not")
SymbolNull = Symbol("Null")
SymbolNumberQ = Symbol("NumberQ")
SymbolNumericQ = Symbol("NumericQ")
SymbolOptionValue = Symbol("OptionValue")
SymbolOr = Symbol("Or")
SymbolOverflow = Symbol("Overflow")
SymbolPackages = Symbol("$Packages")
SymbolPattern = Symbol("Pattern")
SymbolPi = Symbol("Pi")
SymbolPiecewise = Symbol("Piecewise")
SymbolPoint = Symbol("Point")
SymbolPossibleZeroQ = Symbol("PossibleZeroQ")
SymbolQuiet = Symbol("Quiet")
SymbolRational = Symbol("Rational")
SymbolReal = Symbol("Real")
SymbolRow = Symbol("Row")
SymbolRowBox = Symbol("RowBox")
SymbolRGBColor = Symbol("RGBColor")
SymbolSuperscriptBox = Symbol("SuperscriptBox")
SymbolRule = Symbol("Rule")
SymbolRuleDelayed = Symbol("RuleDelayed")
SymbolSequence = Symbol("Sequence")
SymbolSeries = Symbol("Series")
SymbolSeriesData = Symbol("SeriesData")
SymbolSet = Symbol("Set")
SymbolSimplify = Symbol("Simplify")
SymbolSin = Symbol("Sin")
SymbolSlot = Symbol("Slot")
SymbolStringQ = Symbol("StringQ")
SymbolStyle = Symbol("Style")
SymbolTable = Symbol("Table")
SymbolToString = Symbol("ToString")
SymbolUndefined = Symbol("Undefined")
SymbolXor = Symbol("Xor")
| 33.932692 | 69 | 0.765373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,007 | 0.28535 |
294bff20d8c499704a706ccaf6f51e0e5fd8ce4d | 5,821 | py | Python | exercises/ali/cartpole-MCTS/cartpole.py | alik604/ra | 6058a9adb47db93bb86bcb2c224930c5731d663d | [
"Unlicense"
]
| null | null | null | exercises/ali/cartpole-MCTS/cartpole.py | alik604/ra | 6058a9adb47db93bb86bcb2c224930c5731d663d | [
"Unlicense"
]
| 5 | 2021-03-26T01:30:13.000Z | 2021-04-22T22:19:03.000Z | exercises/ali/cartpole-MCTS/cartpole.py | alik604/ra | 6058a9adb47db93bb86bcb2c224930c5731d663d | [
"Unlicense"
]
| 1 | 2021-05-05T00:57:43.000Z | 2021-05-05T00:57:43.000Z | # from https://github.com/kvwoerden/mcts-cartpole
# ---------------------------------------------------------------------------- #
# Imports #
# ---------------------------------------------------------------------------- #
import os
import time
import random
import argparse
<<<<<<< HEAD
=======
from types import SimpleNamespace
>>>>>>> MCTS
import gym
from gym import logger
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from Simple_mcts import MCTSAgent
<<<<<<< HEAD
# ---------------------------------------------------------------------------- #
# Constants #
# ---------------------------------------------------------------------------- #
SEED = 28
EPISODES = 1
ENVIRONMENT = 'CartPole-v0'
LOGGER_LEVEL = logger.WARN
ITERATION_BUDGET = 80
LOOKAHEAD_TARGET = 100
MAX_EPISODE_STEPS = 1500
VIDEO_BASEPATH = '.\\video' # './video'
START_CP = 20
=======
from Agent import dqn_agent
# ---------------------------------------------------------------------------- #
# Constants #
# ---------------------------------------------------------------------------- #
LOGGER_LEVEL = logger.WARN
args = dict()
args['env_name'] = 'CartPole-v0'
args['episodes'] = 10
args['seed'] = 28
args['iteration_budget'] = 8000 # The number of iterations for each search step. Increasing this should lead to better performance.')
args['lookahead_target'] = 10000 # The target number of steps the agent aims to look forward.'
args['max_episode_steps'] = 1500 # The maximum number of steps to play.
args['video_basepath'] = '.\\video' # './video'
args['start_cp'] = 20 # The start value of C_p, the value that the agent changes to try to achieve the lookahead target. Decreasing this makes the search tree deeper, increasing this makes the search tree wider.
args = SimpleNamespace(**args)
>>>>>>> MCTS
# ---------------------------------------------------------------------------- #
# Main loop #
# ---------------------------------------------------------------------------- #
if __name__ == '__main__':
<<<<<<< HEAD
random.seed(SEED)
parser = argparse.ArgumentParser(
description='Run a Monte Carlo Tree Search agent on the Cartpole environment', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env_id', nargs='?', default=ENVIRONMENT,
help='The environment to run (only CartPole-v0 is supperted)')
parser.add_argument('--episodes', nargs='?', default=EPISODES, type=int,
help='The number of episodes to run.')
parser.add_argument('--iteration_budget', nargs='?', default=ITERATION_BUDGET, type=int,
help='The number of iterations for each search step. Increasing this should lead to better performance.')
parser.add_argument('--lookahead_target', nargs='?', default=LOOKAHEAD_TARGET, type=int,
help='The target number of steps the agent aims to look forward.')
parser.add_argument('--max_episode_steps', nargs='?', default=MAX_EPISODE_STEPS, type=int,
help='The maximum number of steps to play.')
parser.add_argument('--video_basepath', nargs='?', default=VIDEO_BASEPATH,
help='The basepath where the videos will be stored.')
parser.add_argument('--start_cp', nargs='?', default=START_CP, type=int,
help='The start value of C_p, the value that the agent changes to try to achieve the lookahead target. Decreasing this makes the search tree deeper, increasing this makes the search tree wider.')
parser.add_argument('--seed', nargs='?', default=SEED, type=int,
help='The random seed.')
args = parser.parse_args()
logger.set_level(LOGGER_LEVEL)
env = gym.make(args.env_id)
env.seed(args.seed)
agent = MCTSAgent(args.iteration_budget, args.env_id)
=======
logger.set_level(LOGGER_LEVEL)
random.seed(args.seed)
env = gym.make(args.env_name)
env.seed(args.seed)
Q_net = dqn_agent()
agent = MCTSAgent(args.iteration_budget, env, Q_net)
>>>>>>> MCTS
timestr = time.strftime("%Y%m%d-%H%M%S")
reward = 0
done = False
for i in range(args.episodes):
ob = env.reset()
env._max_episode_steps = args.max_episode_steps
video_path = os.path.join(
args.video_basepath, f"output_{timestr}_{i}.mp4")
<<<<<<< HEAD
rec = VideoRecorder(env, path=video_path)
=======
# rec = VideoRecorder(env, path=video_path)
>>>>>>> MCTS
try:
sum_reward = 0
node = None
all_nodes = []
C_p = args.start_cp
while True:
print("################")
env.render()
<<<<<<< HEAD
rec.capture_frame()
=======
# rec.capture_frame()
>>>>>>> MCTS
action, node, C_p = agent.act(env.state, n_actions=env.action_space.n, node=node, C_p=C_p, lookahead_target=args.lookahead_target)
ob, reward, done, _ = env.step(action)
print("### observed state: ", ob)
sum_reward += reward
print("### sum_reward: ", sum_reward)
if done:
<<<<<<< HEAD
rec.close()
break
except KeyboardInterrupt as e:
rec.close()
=======
# rec.close()
break
except KeyboardInterrupt as e:
# rec.close()
>>>>>>> MCTS
env.close()
raise e
env.close()
| 39.067114 | 219 | 0.519842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,522 | 0.433259 |
294c561a401bd6bdb0db578e7797d3a5175a9a58 | 318 | py | Python | wildlifecompliance/components/applications/cron.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
]
| 1 | 2020-12-07T17:12:40.000Z | 2020-12-07T17:12:40.000Z | wildlifecompliance/components/applications/cron.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
]
| 14 | 2020-01-08T08:08:26.000Z | 2021-03-19T22:59:46.000Z | wildlifecompliance/components/applications/cron.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
]
| 15 | 2020-01-08T08:02:28.000Z | 2021-11-03T06:48:32.000Z | from django_cron import CronJobBase, Schedule
class VerifyLicenceSpeciesJob(CronJobBase):
"""
Verifies LicenceSpecies against TSC server.
"""
RUN_AT_TIMES = ['00:00']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'applications.verify_licence_species'
def do(self):
pass
| 21.2 | 50 | 0.704403 | 269 | 0.845912 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.323899 |
294ddecc4d289926d35a18bd81582fdedcf038ee | 2,999 | py | Python | optional-plugins/CSVPlugin/CSVContext.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
]
| null | null | null | optional-plugins/CSVPlugin/CSVContext.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
]
| null | null | null | optional-plugins/CSVPlugin/CSVContext.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
]
| null | null | null | import ASV
from simpletal import simpleTAL, simpleTALES
try:
import logging
except:
import InfoLogging as logging
import codecs
class ColumnSorter:
def __init__ (self, columnList):
self.columnList = columnList
self.log = logging.getLogger ('ColumnSorter')
def setup (self, fieldNames):
mapList = []
for columnName, translationMap in self.columnList:
try:
colNum = fieldNames.index (columnName)
mapList.append ((colNum, translationMap))
except ValueError, e:
self.log.error ("No such column name as %s" % name)
raise e
self.mapList = mapList
def sort (self, row1, row2):
result = 0
for colNum, map in self.mapList:
result = self.doSort (row1, row2, colNum, map)
if (result != 0):
return result
return result
def doSort (self, row1, row2, colNum, map):
if (map is None):
col1 = row1[colNum]
col2 = row2[colNum]
else:
try:
col1 = map [row1[colNum]]
except KeyError, e:
self.log.warn ("No key found for key %s - assuming low value" % row1[colNum])
return -1
try:
col2 = map [row2[colNum]]
except KeyError, e:
self.log.warn ("No key found for key %s - assuming low value" % row1[colNum])
return 1
if (col1 < col2):
return -1
if (col1 == col2):
return 0
if (col1 > col2):
return 1
class CsvContextCreator:
def __init__ (self, fileName, fileCodec):
self.log = logging.getLogger ("CSVTemplate.CsvContextCreator")
self.csvData = ASV.ASV()
self.csvData.input_from_file(fileName, ASV.CSV(), has_field_names = 1)
self.fieldNames = self.csvData.get_field_names()
self.conv = fileCodec
def getContextMap (self, sorter=None):
orderList = []
for row in self.csvData:
orderList.append (row)
if (sorter is not None):
sorter.setup (self.fieldNames)
try:
orderList.sort (sorter.sort)
except Exception, e:
self.log.error ("Exception occured executing sorter: " + str (e))
raise e
contextList = []
for row in orderList:
rowMap = {}
colCount = 0
for col in row:
if (col != ""):
rowMap[self.fieldNames[colCount]] = self.conv(col)[0]
colCount += 1
contextList.append (rowMap)
return contextList
def getRawData (self):
return unicode (self.csvData)
class CSVTemplateExpander:
def __init__ (self, sourceFile, name="csvList"):
self.contextFactory = CsvContextCreator (sourceFile)
self.name = name
self.template=None
def expandTemplate (self, templateName, outputName, additionalContext = None, sorter=None):
context = simpleTALES.Context()
context.addGlobal (self.name, self.contextFactory.getContextMap (sorter))
if (additionalContext is not None):
context.addGlobal (additionalContext[0], additionalContext[1])
if (self.template is None):
templateFile = open (templateName, 'r')
self.template = simpleTAL.compileHTMLTemplate (templateFile)
templateFile.close()
outputFile = open (outputName, 'w')
self.template.expand (context, outputFile)
outputFile.close()
| 26.307018 | 92 | 0.686896 | 2,857 | 0.952651 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.073024 |
294e1d0fe03b7258df243ff2841d037d1b8158e8 | 2,484 | py | Python | wagtail/admin/forms/comments.py | stephiescastle/wagtail | 391f46ef91ca4a7bbf339bf9e9a738df3eb8e179 | [
"BSD-3-Clause"
]
| null | null | null | wagtail/admin/forms/comments.py | stephiescastle/wagtail | 391f46ef91ca4a7bbf339bf9e9a738df3eb8e179 | [
"BSD-3-Clause"
]
| null | null | null | wagtail/admin/forms/comments.py | stephiescastle/wagtail | 391f46ef91ca4a7bbf339bf9e9a738df3eb8e179 | [
"BSD-3-Clause"
]
| null | null | null | from django.forms import BooleanField, ValidationError
from django.utils.timezone import now
from django.utils.translation import gettext as _
from .models import WagtailAdminModelForm
class CommentReplyForm(WagtailAdminModelForm):
class Meta:
fields = ("text",)
def clean(self):
cleaned_data = super().clean()
user = self.for_user
if not self.instance.pk:
self.instance.user = user
elif self.instance.user != user:
# trying to edit someone else's comment reply
if any(field for field in self.changed_data):
# includes DELETION_FIELD_NAME, as users cannot delete each other's individual comment replies
# if deleting a whole thread, this should be done by deleting the parent Comment instead
self.add_error(
None, ValidationError(_("You cannot edit another user's comment."))
)
return cleaned_data
class CommentForm(WagtailAdminModelForm):
"""
This is designed to be subclassed and have the user overridden to enable user-based validation within the edit handler system
"""
resolved = BooleanField(required=False)
class Meta:
formsets = {
"replies": {
"form": CommentReplyForm,
"inherit_kwargs": ["for_user"],
}
}
def clean(self):
cleaned_data = super().clean()
user = self.for_user
if not self.instance.pk:
self.instance.user = user
elif self.instance.user != user:
# trying to edit someone else's comment
if any(
field
for field in self.changed_data
if field not in ["resolved", "position"]
):
# users can resolve each other's base comments and change their positions within a field
self.add_error(
None, ValidationError(_("You cannot edit another user's comment."))
)
return cleaned_data
def save(self, *args, **kwargs):
if self.cleaned_data.get("resolved", False):
if not getattr(self.instance, "resolved_at"):
self.instance.resolved_at = now()
self.instance.resolved_by = self.for_user
else:
self.instance.resolved_by = None
self.instance.resolved_at = None
return super().save(*args, **kwargs)
| 34.5 | 129 | 0.594605 | 2,292 | 0.922705 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.268519 |
294e291b1d27799d1015e0d511b66da83b03b728 | 1,039 | py | Python | run_db_data.py | MahirMahbub/email-client | 71ab85f987f783b703b58780444c072bd683927e | [
"MIT"
]
| null | null | null | run_db_data.py | MahirMahbub/email-client | 71ab85f987f783b703b58780444c072bd683927e | [
"MIT"
]
| 4 | 2021-08-01T16:29:48.000Z | 2021-08-01T16:58:36.000Z | run_db_data.py | MahirMahbub/email-client | 71ab85f987f783b703b58780444c072bd683927e | [
"MIT"
]
| null | null | null | import os
from sqlalchemy.orm import Session
from db.database import SessionLocal
class DbData:
def __init__(self):
self.root_directory: str = "db_merge_scripts"
self.scripts = [
"loader.sql"
]
def sync(self, db: Session):
for script in self.scripts:
try:
directory = os.path.join(self.root_directory, script)
print(directory)
sql = open(directory, "r").read()
db.execute(sql)
db.commit()
print(greed("Data file processed: " + directory))
except Exception as e:
print(red("Error to process data file: " + directory))
print(e)
def colored(text, r, g, b):
return "\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(r, g, b, text)
def red(text):
return colored(text, 255, 0, 0)
def greed(text):
return colored(text, 0, 255, 0)
def add_master_data():
db = SessionLocal()
DbData().sync(db)
db.close()
| 23.088889 | 79 | 0.549567 | 648 | 0.623677 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.127045 |
295015d1c787b58239c947be3f7d6287049d840f | 6,274 | py | Python | scobra/analysis/compare_elements.py | nihalzp/scobra | de1faa73fb4d186d9567bfa8e174b3fd6f1833ef | [
"MIT"
]
| 7 | 2016-03-16T09:03:41.000Z | 2019-09-20T05:55:02.000Z | scobra/analysis/compare_elements.py | nihalzp/scobra | de1faa73fb4d186d9567bfa8e174b3fd6f1833ef | [
"MIT"
]
| 11 | 2019-10-03T15:04:58.000Z | 2020-05-11T17:27:10.000Z | scobra/analysis/compare_elements.py | nihalzp/scobra | de1faa73fb4d186d9567bfa8e174b3fd6f1833ef | [
"MIT"
]
| 6 | 2016-03-16T09:04:54.000Z | 2021-07-24T15:03:41.000Z |
def compareMetaboliteDicts(d1, d2):
sorted_d1_keys = sorted(d1.keys())
sorted_d2_keys = sorted(d2.keys())
for i in range(len(sorted_d1_keys)):
if not compareMetabolites(sorted_d1_keys[i], sorted_d2_keys[i], naive=True):
return False
elif not d1[sorted_d1_keys[i]] == d2[sorted_d2_keys[i]]:
return False
else:
return True
def compareMetabolites(met1, met2, naive=False):
if isinstance(met1, set):
return compareReactions(list(met1), list(met2), naive)
if isinstance(met1, list):
if not isinstance(met2, list):
return False
elif len(met1) != len(met2):
return False
else:
for i in range(len(met1)):
if not compareMetabolites(met1[i], met2[i], naive):
return False
else:
return True
else:
if not True:
#can never be entered
pass
elif not met1._bound == met2._bound:
return False
elif not met1._constraint_sense == met2._constraint_sense:
return False
#elif not met1.annotation == met2.annotation:
# return False
elif not met1.charge == met2.charge:
return False
elif not met1.compartment == met2.compartment:
return False
elif not met1.name == met2.name:
return False
elif not met1.compartment == met2.compartment:
return False
#elif not met1.notes == met2.notes:
# return False
elif not naive:
if not compareReactions(met1._reaction, met2._reaction, naive=True):
return False
elif not compareModels(met1._model, met2._model, naive=True):
return False
else:
return True
else:
return True
def compareReactions(r1, r2, naive=False):
if isinstance(r1, set):
return compareReactions(list(r1), list(r2), naive)
if isinstance(r1, list):
if not isinstance(r2, list):
return False
elif len(r1) != len(r2):
return False
else:
for i in range(len(r1)):
if not compareReactions(r1[i], r2[i],naive):
return False
else:
return True
else:
if not True:
#can never be entered
pass
#elif not r1._compartments == r2._compartments:
# return False
#elif not r1._forward_variable == r2._forward_variable:
# return False
elif not r1._gene_reaction_rule == r2._gene_reaction_rule:
return False
elif not r1._id == r2._id:
return False
elif not r1._lower_bound == r2._lower_bound:
return False
#elif not r1._model == r2._model:
# return False
#elif not r1._reverse_variable == r2._reverse_variable:
# return False
elif not r1._upper_bound == r2._upper_bound:
return False
#elif not r1.annotation == r2.annotation:
# return False
elif not r1.name== r2.name:
return False
#elif not r1.notes == r2.notes:
# return False
elif not r1.subsystem == r2.subsystem:
return False
elif not r1.variable_kind == r2.variable_kind:
return False
elif not naive:
if not compareMetaboliteDicts(r1._metabolites, r2._metabolites):
return False
elif not compareGenes(r1._genes,r2._genes, naive=True):
return False
else:
return True
else:
return True
def compareGenes(g1, g2, naive=False):
if isinstance(g1, set):
return compareGenes(list(g1), list(g2), naive)
if isinstance(g1, list):
if not isinstance(g2, list):
return False
elif len(g1) != len(g2):
return False
else:
for i in range(len(g1)):
if not compareGenes(g1[i], g2[i], naive):
return False
else:
return True
else:
if not True:
#can never be entered
pass
elif not g1._functional == g2._functional:
return False
elif not g1._id == g2._id:
return False
#elif not g1._model == g2._model:
# return False
elif not g1.annotation == g2.annotation:
return False
elif not g1.name == g2.name:
return False
#elif not g1.notes == g2.notes:
# return False
elif not naive:
if not compareReactions(g1._reaction,g2._reaction, naive=True):
return False
else:
return True
else:
return True
def compareModels(m1, m2, naive=False):
if not True:
#can never be entered
pass
#elif not m1._compartments == m2._compartments:
# return False
#elif not m1._contexts == m2._contexts:
# return False
#elif not m1._solver == m2._solver:
# return False
elif not m1._id == m2._id:
return False
#elif not m1._trimmed == m2.trimmed:
# return False
#elif not m1._trimmed_genes == m2._trimmed_genes:
# return False
#elif not m1._trimmed_reactions == m2._trimmed_reactions:
# return False
#elif not m1.annotation == m2.annotation:
# return False
elif not m1.bounds == m2.bounds:
return False
elif not m1.name == m2.name:
return False
#elif not m1.notes == m2.notes:
# return False
#elif not m1.quadratic_component == m2.quadratic_component:
# return False
elif not naive:
if not compareGenes(m1.genes, m2.genes):
return False
elif not compareMetabolites(m1.metabolites, m2.metabolites):
return False
elif not compareReactions(m1.reactions,m2.reactions):
return False
else:
return True
else:
return True
| 33.021053 | 84 | 0.54176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,229 | 0.195888 |
29509faf87f0d6a17ff1205ace918609c71b08fe | 1,750 | py | Python | five/five_copy.py | ngd-b/python-demo | 0341c1620bcde1c1d886cb9e75dc6db3722273c8 | [
"MIT"
]
| 1 | 2019-10-09T13:40:13.000Z | 2019-10-09T13:40:13.000Z | five/five_copy.py | ngd-b/python-demo | 0341c1620bcde1c1d886cb9e75dc6db3722273c8 | [
"MIT"
]
| null | null | null | five/five_copy.py | ngd-b/python-demo | 0341c1620bcde1c1d886cb9e75dc6db3722273c8 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
print("hello world")
f = None
try:
f = open("./hello.txt","r",encoding="utf8")
print(f.read(5),end='')
print(f.read(5),end='')
print(f.read(5))
except IOError as e:
print(e)
finally:
if f:
f.close()
# with auto call the methods' close
with open("./hello.txt","r",encoding="utf8") as f:
print(f.read())
# readlines() 按行读取文件
with open("./hello.txt","r",encoding="utf8") as f:
for line in f.readlines():
print(line.strip())
# 写入数据
with open("./hello_1.txt","w",encoding="utf8") as f:
f.write("北京欢迎你!")
with open("./hello.txt","a",encoding="utf8") as f:
f.write("祖国 70!")
# StringIO / BytesIO
from io import StringIO
# 创建
str = StringIO('init')
# 读取初始化进去的值
while True:
s = str.readline()
if s == '':
break
print(s.strip())
# 写入
str.write("你好!")
str.write(" 南京")
# 获取
print(str.getvalue())
'''
while True:
s = str.readline()
if s == '':
break
print(s.strip())
'''
# 写入二进制数据
from io import BytesIO
bi = BytesIO()
bi.write("你好".encode("utf-8"))
print(bi.getvalue())
by = BytesIO(b'\xe4\xbd\xa0\xe5\xa5\xbd')
print(by.read())
# 操作系统文件目录 OS
import os
# 当前环境 nt
print(os.name)
# python 执行文件 <module 'ntpath' from 'G:\\python-3.7\\lib\\ntpath.py'>
print(os.path)
# 系统环境配置目录 包括系统环境变量、用户变量、
print(os.environ)
# 获取当前控制台的管理用户名 'bobol'
print(os.getlogin())
# 创建一个文件、目录
os.mkdir("./foo/")
# 删除一个目录
os.rmdir("./foo/")
'''
os.path 可以处理部分路径问题
'''
# 获取指定路径的绝对路径 'G:\\pythonDemo\\python-demo\\five'
print(os.path.abspath("./"))
# 返回指定路径是否存在系统文件路径中 False
print(os.path.exists("./foo"))
# 获取指定路径下文件的大小 4096
print(os.path.getsize("../"))
# 返回指定路径是否为绝对路径 False
print(os.path.isabs("../"))
| 19.444444 | 72 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,155 | 0.550524 |
2951af032672242812dfd6a7fa6b331872d27c9b | 210 | py | Python | prepare_sets.py | mechtal/Vaccination_UK | f3ad91c128b8b7711fc03d12a115cbf3e660318f | [
"MIT"
]
| null | null | null | prepare_sets.py | mechtal/Vaccination_UK | f3ad91c128b8b7711fc03d12a115cbf3e660318f | [
"MIT"
]
| 17 | 2022-03-12T11:19:44.000Z | 2022-03-28T08:26:38.000Z | prepare_sets.py | mechtal/Vaccination_UK | f3ad91c128b8b7711fc03d12a115cbf3e660318f | [
"MIT"
]
| null | null | null | def prepare_sets(dataset, feature_columns, y_column):
train_X, val_X, train_y, val_y = train_test_split(dataset[feature_columns], dataset[y_column], random_state=1)
return train_X, val_X, train_y, val_y | 70 | 114 | 0.785714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2951e1c21121343a134fe48bfcc73abc7a482cb1 | 6,355 | py | Python | examples/batch_ts_insert.py | bureau14/qdb-api-python | 2a010df3252d39bc4d529f545547c5cefb9fe86e | [
"BSD-3-Clause"
]
| 9 | 2015-09-02T20:13:13.000Z | 2020-07-16T14:17:36.000Z | examples/batch_ts_insert.py | bureau14/qdb-api-python | 2a010df3252d39bc4d529f545547c5cefb9fe86e | [
"BSD-3-Clause"
]
| 5 | 2018-02-20T10:47:02.000Z | 2020-05-20T10:05:49.000Z | examples/batch_ts_insert.py | bureau14/qdb-api-python | 2a010df3252d39bc4d529f545547c5cefb9fe86e | [
"BSD-3-Clause"
]
| 1 | 2018-04-01T11:12:56.000Z | 2018-04-01T11:12:56.000Z | # Copyright (c) 2009-2020, quasardb SAS. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of quasardb nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY QUASARDB AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from builtins import range as xrange, int
import os
from socket import gethostname
import sys
import inspect
import traceback
import random
import time
import datetime
import locale
import numpy as np
import quasardb
STOCK_COLUMN = "stock_id"
OPEN_COLUMN = "open"
CLOSE_COLUMN = "close"
HIGH_COLUMN = "high"
LOW_COLUMN = "low"
VOLUME_COLUMN = "volume"
def time_execution(str, f, *args):
print(" - ", str, end='')
start_time = time.time()
res = f(*args)
end_time = time.time()
print(" [duration: {}s]".format(end_time - start_time))
return res
def gen_ts_name():
return "test.{}.{}.{}".format(gethostname(), os.getpid(), random.randint(0, 100000))
def create_ts(q, name):
ts = q.ts(name)
ts.create([quasardb.ColumnInfo(quasardb.ColumnType.Int64, STOCK_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, OPEN_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, CLOSE_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, HIGH_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, LOW_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Int64, VOLUME_COLUMN)])
return ts
def create_many_ts(q, names):
return [create_ts(q, x) for x in names]
def generate_prices(price_count):
return np.random.uniform(-100.0, 100.0, price_count)
def generate_points(points_count):
start_time = np.datetime64('2017-01-01', 'ns')
dates = np.array([(start_time + np.timedelta64(i, 'm')) for i in range(points_count)]).astype('datetime64[ns]')
stock_ids = np.random.randint(1, 25, size=points_count)
prices = np.array([generate_prices(60) for i in range(points_count)]).astype('double')
volumes = np.random.randint(0, 10000, points_count)
return (dates, stock_ids, prices, volumes)
def batch_ts_columns(ts_name, prealloc_size):
return (quasardb.BatchColumnInfo(ts_name, STOCK_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, OPEN_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, CLOSE_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, HIGH_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, LOW_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, VOLUME_COLUMN, prealloc_size))
def calculate_minute_bar(prices):
# Takes all prices for a single minute, and calculate OHLC
return (prices[0], prices[-1], np.amax(prices), np.amin(prices))
def bulk_insert(q, ts_names, dates, stock_ids, prices, volumes):
# We generate a flattened list of columns for each timeseries; for example,
# for 2 columns for 4 timeseries each, we have 8 columns.
columns = [column for nested in (batch_ts_columns(ts_name, len(dates))
for ts_name in ts_names)
for column in nested]
batch_inserter = q.ts_batch(columns)
for i in range(len(stock_ids)):
# We use the known layout of column (2 for each timeseries, alternating with
# STOCK_COLUMN and PRICE_COLUMN) to set the values.
for j in range(0, len(ts_names) * 6, 6):
(o, c, h, l) = calculate_minute_bar(prices[i])
batch_inserter.start_row(dates[i])
batch_inserter.set_int64(j, stock_ids[i]) # set stock_id
batch_inserter.set_double(j + 1, o) # open
batch_inserter.set_double(j + 2, c) # close
batch_inserter.set_double(j + 3, h) # high
batch_inserter.set_double(j + 4, l) # low
batch_inserter.set_int64(j + 5, volumes[i]) # low
batch_inserter.push()
def make_it_so(q, points_count):
ts_names = [gen_ts_name(), gen_ts_name()]
ts = time_execution("Creating a time series with names {}".format(ts_names), create_many_ts, q, ts_names)
(dates, stock_ids, prices, volumes) = time_execution("Generating {:,} points".format(points_count), generate_points, points_count)
time_execution("Inserting {:,} points into timeseries with names {}".format(points_count, ts_names), bulk_insert, q, ts_names, dates, stock_ids, prices, volumes)
return (ts_names, dates, np.unique(stock_ids))
def main(quasardb_uri, points_count):
print("Connecting to: ", quasardb_uri)
q = quasardb.Cluster(uri=quasardb_uri)
print(" *** Inserting {:,} into {}".format(points_count, quasardb_uri))
make_it_so(q, points_count)
if __name__ == "__main__":
try:
if len(sys.argv) != 3:
print("usage: ", sys.argv[0], " quasardb_uri points_count")
sys.exit(1)
main(sys.argv[1], int(sys.argv[2]))
except Exception as ex: # pylint: disable=W0703
print("An error ocurred:", str(ex))
traceback.print_exc()
| 40.737179 | 165 | 0.70181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,270 | 0.357199 |
295371af41debc44d7d6cd681954bb737b9ceb2b | 2,128 | py | Python | tests/backends/test_flashtext_backend.py | openredact/pii-identifier | 97eaef56d6de59718501095d631a0fb49700e45a | [
"MIT"
]
| 14 | 2020-07-31T18:45:29.000Z | 2022-02-21T13:24:00.000Z | tests/backends/test_flashtext_backend.py | openredact/pii-identifier | 97eaef56d6de59718501095d631a0fb49700e45a | [
"MIT"
]
| 7 | 2020-07-31T06:17:21.000Z | 2021-05-23T08:40:24.000Z | tests/backends/test_flashtext_backend.py | openredact/pii-identifier | 97eaef56d6de59718501095d631a0fb49700e45a | [
"MIT"
]
| 1 | 2020-09-30T01:42:57.000Z | 2020-09-30T01:42:57.000Z | from nerwhal.backends.flashtext_backend import FlashtextBackend
from nerwhal.recognizer_bases import FlashtextRecognizer
def test_single_recognizer(embed):
class TestRecognizer(FlashtextRecognizer):
TAG = "XX"
SCORE = 1.0
@property
def keywords(self):
return ["abc", "cde"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizer)
text = "Das ist abc und cde."
ents = backend.run(text)
assert embed(text, ents) == "Das ist XX und XX."
assert ents[0].start_char == 8
assert ents[0].end_char == 11
assert ents[0].tag == "XX"
assert ents[0].text == "abc"
assert ents[0].score == 1.0
assert ents[0].recognizer == "TestRecognizer"
def test_multiple_recognizers(embed):
class TestRecognizerA(FlashtextRecognizer):
TAG = "A"
SCORE = 1.0
@property
def keywords(self):
return ["abc"]
class TestRecognizerB(FlashtextRecognizer):
TAG = "B"
SCORE = 0.5
@property
def keywords(self):
return ["cde"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizerA)
backend.register_recognizer(TestRecognizerB)
text = "Das ist abc und cde."
ents = backend.run(text)
assert embed(text, ents) == "Das ist A und B."
assert ents[0].tag == "A"
assert ents[0].score == 1.0
assert ents[1].tag == "B"
assert ents[1].score == 0.5
def test_overlapping_recognizers(embed):
class TestRecognizerA(FlashtextRecognizer):
TAG = "A"
SCORE = 1.0
@property
def keywords(self):
return ["abc", "cde"]
class TestRecognizerB(FlashtextRecognizer):
TAG = "B"
SCORE = 0.5
@property
def keywords(self):
return ["cde", "fgh"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizerA)
backend.register_recognizer(TestRecognizerB)
text = "Das ist cde."
ents = backend.run(text)
# Recognizer B overwrites the keyword "cde"
assert embed(text, ents) == "Das ist B."
| 26.6 | 63 | 0.62218 | 796 | 0.37406 | 0 | 0 | 341 | 0.160244 | 0 | 0 | 239 | 0.112312 |
2954339ee63d8f3aeb46e217258769ecc01fa43c | 1,444 | py | Python | new_rdsmysql.py | AdminTurnedDevOps/AWS_Solutions_Architect_Python | 5389f8c9dfbda7b0b49a94a93e9b070420ca9ece | [
"MIT"
]
| 30 | 2019-01-13T20:14:07.000Z | 2022-02-06T15:08:01.000Z | new_rdsmysql.py | AdminTurnedDevOps/AWS_Solutions_Architect_Python | 5389f8c9dfbda7b0b49a94a93e9b070420ca9ece | [
"MIT"
]
| 1 | 2019-01-13T23:52:39.000Z | 2019-01-14T14:39:45.000Z | new_rdsmysql.py | AdminTurnedDevOps/AWS_Solutions_Architect_Python | 5389f8c9dfbda7b0b49a94a93e9b070420ca9ece | [
"MIT"
]
| 26 | 2019-01-13T21:32:23.000Z | 2022-03-20T05:19:03.000Z | import boto3
import sys
import time
import logging
import getpass
def new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername):
masterPass = getpass.getpass('DBMasterPassword: ')
if len(masterPass) < 10:
logging.warning('Password is not at least 10 characters. Please try again')
time.sleep(5)
exit
else:
None
try:
rds_instance = boto3.client('rds')
create_instance = rds_instance.create_db_instance(
DBName = dbname,
DBInstanceIdentifier = instanceID,
AllocatedStorage = int(storage),
DBInstanceClass = dbInstancetype,
Engine = 'mysql',
MasterUsername = dbusername,
MasterUserPassword = str(masterPass),
MultiAZ = True,
EngineVersion = '5.7.23',
AutoMinorVersionUpgrade = False,
LicenseModel = 'general-public-license',
PubliclyAccessible = False,
Tags = [
{
'Key': 'Name',
'Value' : dbname
}
]
)
print(create_instance)
except Exception as e:
logging.warning('An error has occured')
print(e)
dbname = sys.argv[1]
instanceID = sys.argv[2]
storage = sys.argv[3]
dbInstancetype = sys.argv[4]
dbusername = sys.argv[5]
new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername) | 27.769231 | 83 | 0.587258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.112188 |
295527972ae5a65fd8aad67870244e225d07dc77 | 2,657 | py | Python | src/tzscan/tzscan_block_api.py | Twente-Mining/tezos-reward-distributor | 8df0745fdb44cbd765084303882545202d2427f3 | [
"MIT"
]
| null | null | null | src/tzscan/tzscan_block_api.py | Twente-Mining/tezos-reward-distributor | 8df0745fdb44cbd765084303882545202d2427f3 | [
"MIT"
]
| null | null | null | src/tzscan/tzscan_block_api.py | Twente-Mining/tezos-reward-distributor | 8df0745fdb44cbd765084303882545202d2427f3 | [
"MIT"
]
| null | null | null | import random
import requests
from api.block_api import BlockApi
from exception.tzscan import TzScanException
from log_config import main_logger
logger = main_logger
HEAD_API = {'MAINNET': {'HEAD_API_URL': 'https://api%MIRROR%.tzscan.io/v2/head'},
'ALPHANET': {'HEAD_API_URL': 'http://api.alphanet.tzscan.io/v2/head'},
'ZERONET': {'HEAD_API_URL': 'http://api.zeronet.tzscan.io/v2/head'}
}
REVELATION_API = {'MAINNET': {'HEAD_API_URL': 'https://api%MIRROR%.tzscan.io/v1/operations/%PKH%?type=Reveal'},
'ALPHANET': {'HEAD_API_URL': 'https://api.alphanet.tzscan.io/v1/operations/%PKH%?type=Reveal'},
'ZERONET': {'HEAD_API_URL': 'https://api.zeronet.tzscan.io/v1/operations/%PKH%?type=Reveal'}
}
class TzScanBlockApiImpl(BlockApi):
def __init__(self, nw):
super(TzScanBlockApiImpl, self).__init__(nw)
self.head_api = HEAD_API[nw['NAME']]
if self.head_api is None:
raise Exception("Unknown network {}".format(nw))
self.revelation_api = REVELATION_API[nw['NAME']]
def get_current_level(self, verbose=False):
uri = self.head_api['HEAD_API_URL'].replace("%MIRROR%", str(self.rand_mirror()))
if verbose:
logger.debug("Requesting {}".format(uri))
resp = requests.get(uri)
if resp.status_code != 200:
# This means something went wrong.
raise TzScanException('GET {} {}'.format(uri, resp.status_code))
root = resp.json()
if verbose:
logger.debug("Response from tzscan is: {}".format(root))
current_level = int(root["level"])
return current_level
def get_revelation(self, pkh, verbose=False):
uri = self.revelation_api['HEAD_API_URL'].replace("%MIRROR%", str(self.rand_mirror())).replace("%PKH%", pkh)
if verbose:
logger.debug("Requesting {}".format(uri))
resp = requests.get(uri)
if resp.status_code != 200:
# This means something went wrong.
raise TzScanException('GET {} {}'.format(uri, resp.status_code))
root = resp.json()
if verbose:
logger.debug("Response from tzscan is: {}".format(root))
return len(root) > 0
def rand_mirror(self):
mirror = random.randint(1, 6)
if mirror == 4: # has problem lately
mirror = 3
return mirror
def test_get_revelation():
address_api = TzScanBlockApiImpl({"NAME":"ALPHANET"})
address_api.get_revelation("tz3WXYtyDUNL91qfiCJtVUX746QpNv5i5ve5")
if __name__ == '__main__':
test_get_revelation() | 32.402439 | 116 | 0.621377 | 1,657 | 0.623636 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.301844 |
29554b0b9e721e4b0e9ff426e2c29a4e943ecd1c | 10,086 | py | Python | python/cuxfilter/tests/charts/core/test_core_non_aggregate.py | Anhmike/cuxfilter | a8b25b1c37ac0e5435acb7261f6fcbf677d96bfa | [
"Apache-2.0"
]
| 201 | 2018-12-21T18:32:40.000Z | 2022-03-22T11:50:29.000Z | python/cuxfilter/tests/charts/core/test_core_non_aggregate.py | Anhmike/cuxfilter | a8b25b1c37ac0e5435acb7261f6fcbf677d96bfa | [
"Apache-2.0"
]
| 258 | 2018-12-27T07:37:50.000Z | 2022-03-31T20:01:32.000Z | python/cuxfilter/tests/charts/core/test_core_non_aggregate.py | Anhmike/cuxfilter | a8b25b1c37ac0e5435acb7261f6fcbf677d96bfa | [
"Apache-2.0"
]
| 51 | 2019-01-10T19:03:09.000Z | 2022-03-08T01:37:11.000Z | import pytest
import cudf
import mock
from cuxfilter.charts.core.non_aggregate.core_non_aggregate import (
BaseNonAggregate,
)
from cuxfilter.dashboard import DashBoard
from cuxfilter import DataFrame
from cuxfilter.layouts import chart_view
class TestCoreNonAggregateChart:
def test_variables(self):
bnac = BaseNonAggregate()
# BaseChart variables
assert bnac.chart_type is None
assert bnac.x is None
assert bnac.y is None
assert bnac.aggregate_fn == "count"
assert bnac.color is None
assert bnac.height == 0
assert bnac.width == 0
assert bnac.add_interaction is True
assert bnac.chart is None
assert bnac.source is None
assert bnac.source_backup is None
assert bnac.data_points == 0
assert bnac._library_specific_params == {}
assert bnac.stride is None
assert bnac.stride_type == int
assert bnac.min_value == 0.0
assert bnac.max_value == 0.0
assert bnac.x_label_map == {}
assert bnac.y_label_map == {}
assert bnac.title == ""
# test chart name setter
bnac.x = "x"
bnac.y = "y"
bnac.chart_type = "test_chart_type"
assert bnac.name == "x_y_count_test_chart_type_"
# BaseNonAggregateChart variables
assert bnac.use_data_tiles is False
assert bnac.reset_event is None
assert bnac.x_range is None
assert bnac.y_range is None
assert bnac.aggregate_col is None
def test_label_mappers(self):
bnac = BaseNonAggregate()
library_specific_params = {
"x_label_map": {"a": 1, "b": 2},
"y_label_map": {"a": 1, "b": 2},
}
bnac.library_specific_params = library_specific_params
assert bnac.x_label_map == {"a": 1, "b": 2}
assert bnac.y_label_map == {"a": 1, "b": 2}
@pytest.mark.parametrize("chart, _chart", [(None, None), (1, 1)])
def test_view(self, chart, _chart):
bnac = BaseNonAggregate()
bnac.chart = chart
bnac.width = 400
bnac.title = "test_title"
assert str(bnac.view()) == str(
chart_view(_chart, width=bnac.width, title=bnac.title)
)
def test_get_selection_geometry_callback(self):
bnac = BaseNonAggregate()
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
assert (
bnac.get_selection_geometry_callback(dashboard).__name__
== "selection_callback"
)
assert callable(type(bnac.get_selection_geometry_callback(dashboard)))
def test_box_selection_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
class evt:
geometry = dict(x0=1, x1=2, y0=3, y1=4, type="rect")
t = bnac.get_selection_geometry_callback(dashboard)
t(evt)
assert self.result.equals(df.query("1<=a<=2 and 3<=b<=4"))
def test_lasso_election_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
class evt:
geometry = dict(x=[1, 1, 2], y=[1, 2, 1], type="poly")
final = True
t = bnac.get_selection_geometry_callback(dashboard)
with mock.patch("cuspatial.point_in_polygon") as pip:
pip.return_value = cudf.DataFrame(
{"selection": [True, False, True]}
)
t(evt)
assert pip.called
@pytest.mark.parametrize(
"data, _data",
[
(cudf.DataFrame(), cudf.DataFrame()),
(
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
),
],
)
def test_calculate_source(self, data, _data):
"""
Calculate source just calls to the format_source_data function
which is implemented by chart types inheriting this class.
"""
bnac = BaseNonAggregate()
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.format_source_data = t_function
bnac.calculate_source(data)
assert self.result.equals(_data)
@pytest.mark.parametrize(
"x_range, y_range, query, local_dict",
[
(
(1, 2),
(3, 4),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 1, "x_max": 2, "y_min": 3, "y_max": 4},
),
(
(0, 2),
(3, 5),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 0, "x_max": 2, "y_min": 3, "y_max": 5},
),
],
)
def test_compute_query_dict(self, x_range, y_range, query, local_dict):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "x"
bnac.y = "y"
bnac.x_range = x_range
bnac.y_range = y_range
df = cudf.DataFrame({"x": [1, 2, 2], "y": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
bnac.compute_query_dict(
dashboard._query_str_dict, dashboard._query_local_variables_dict
)
bnac_key = (
f"{bnac.x}_{bnac.y}"
f"{'_' + bnac.aggregate_col if bnac.aggregate_col else ''}"
f"_{bnac.aggregate_fn}_{bnac.chart_type}_{bnac.title}"
)
assert dashboard._query_str_dict[bnac_key] == query
for key in local_dict:
assert (
dashboard._query_local_variables_dict[key] == local_dict[key]
)
@pytest.mark.parametrize(
"add_interaction, reset_event, event_1, event_2",
[
(True, None, "selection_callback", None),
(True, "test_event", "selection_callback", "reset_callback"),
(False, "test_event", None, "reset_callback"),
],
)
def test_add_events(self, add_interaction, reset_event, event_1, event_2):
bnac = BaseNonAggregate()
bnac.add_interaction = add_interaction
bnac.reset_event = reset_event
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
self.event_1 = None
self.event_2 = None
def t_func(fn):
self.event_1 = fn.__name__
def t_func1(event, fn):
self.event_2 = fn.__name__
bnac.add_selection_geometry_event = t_func
bnac.add_event = t_func1
bnac.add_events(dashboard)
assert self.event_1 == event_1
assert self.event_2 == event_2
def test_add_reset_event(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac.x_range = (0, 2)
bnac.y_range = (3, 5)
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
def t_func1(event, fn):
fn("event")
bnac.add_event = t_func1
bnac.add_reset_event(dashboard)
assert bnac.x_range is None
assert bnac.y_range is None
def test_query_chart_by_range(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
query_tuple = (4, 5)
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_range(
active_chart=bnac_1, query_tuple=query_tuple, datatile=None
)
assert self.result.to_string() == " a b\n1 2 4\n2 3 5"
assert self.patch_update is False
@pytest.mark.parametrize(
"new_indices, result",
[
([4, 5], " a b\n1 2 4\n2 3 5"),
([], " a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6"),
([3], " a b\n0 1 3"),
],
)
def test_query_chart_by_indices(self, new_indices, result):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
new_indices = new_indices
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_indices(
active_chart=bnac_1,
old_indices=[],
new_indices=new_indices,
datatile=None,
)
assert self.result.to_string() == result
assert self.patch_update is False
| 30.288288 | 78 | 0.561967 | 9,836 | 0.975213 | 0 | 0 | 4,746 | 0.470553 | 0 | 0 | 1,395 | 0.138311 |
29560939d9082f0d01fcc95be50270dfe0f453ac | 4,265 | py | Python | tunobase/tagging/migrations/0001_initial.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
]
| null | null | null | tunobase/tagging/migrations/0001_initial.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
]
| null | null | null | tunobase/tagging/migrations/0001_initial.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table(u'tagging_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True, blank=True)),
))
db.send_create_signal(u'tagging', ['Tag'])
# Adding unique constraint on 'Tag', fields ['title', 'site']
db.create_unique(u'tagging_tag', ['title', 'site_id'])
# Adding model 'ContentObjectTag'
db.create_table(u'tagging_contentobjecttag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_contentobjecttag', to=orm['contenttypes.ContentType'])),
('object_pk', self.gf('django.db.models.fields.PositiveIntegerField')()),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_object_tags', to=orm['tagging.Tag'])),
))
db.send_create_signal(u'tagging', ['ContentObjectTag'])
def backwards(self, orm):
# Removing unique constraint on 'Tag', fields ['title', 'site']
db.delete_unique(u'tagging_tag', ['title', 'site_id'])
# Deleting model 'Tag'
db.delete_table(u'tagging_tag')
# Deleting model 'ContentObjectTag'
db.delete_table(u'tagging_contentobjecttag')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'tagging.contentobjecttag': {
'Meta': {'object_name': 'ContentObjectTag'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_contentobjecttag'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_object_tags'", 'to': u"orm['tagging.Tag']"})
},
u'tagging.tag': {
'Meta': {'unique_together': "[('title', 'site')]", 'object_name': 'Tag'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
}
}
complete_apps = ['tagging'] | 56.118421 | 182 | 0.597655 | 4,098 | 0.960844 | 0 | 0 | 0 | 0 | 0 | 0 | 2,565 | 0.601407 |
2957082f2761f3302a5b658af0d68aab4daff24f | 1,004 | py | Python | recogym/envs/session.py | philomenec/reco-gym | f8553d197f42ec2f415aefce48525d0e9b10ddaa | [
"Apache-2.0"
]
| 413 | 2018-09-18T17:49:44.000Z | 2022-03-23T12:25:41.000Z | recogym/envs/session.py | aliang-rec/reco-gym | f8553d197f42ec2f415aefce48525d0e9b10ddaa | [
"Apache-2.0"
]
| 15 | 2018-11-08T17:04:21.000Z | 2021-11-30T19:20:27.000Z | recogym/envs/session.py | aliang-rec/reco-gym | f8553d197f42ec2f415aefce48525d0e9b10ddaa | [
"Apache-2.0"
]
| 81 | 2018-09-22T02:28:55.000Z | 2022-03-30T14:03:01.000Z | class Session(list):
"""Abstract Session class"""
def to_strings(self, user_id, session_id):
"""represent session as list of strings (one per event)"""
user_id, session_id = str(user_id), str(session_id)
session_type = self.get_type()
strings = []
for event, product in self:
columns = [user_id, session_type, session_id, event, str(product)]
strings.append(','.join(columns))
return strings
def get_type(self):
raise NotImplemented
class OrganicSessions(Session):
def __init__(self):
super(OrganicSessions, self).__init__()
def next(self, context, product):
self.append(
{
't': context.time(),
'u': context.user(),
'z': 'pageview',
'v': product
}
)
def get_type(self):
return 'organic'
def get_views(self):
return [p for _, _, e, p in self if e == 'pageview']
| 27.135135 | 78 | 0.548805 | 1,000 | 0.996016 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.129482 |
29589b62fc1cb372684ea22b3b76f315253769f1 | 219 | py | Python | message_handlers/location_handler.py | pratyushmore/lunch-tag-bot | e0a055a2ee914c3bc5434244b17cfb7e3b1a7a9a | [
"MIT"
]
| null | null | null | message_handlers/location_handler.py | pratyushmore/lunch-tag-bot | e0a055a2ee914c3bc5434244b17cfb7e3b1a7a9a | [
"MIT"
]
| 1 | 2019-10-21T15:15:48.000Z | 2019-10-21T19:28:10.000Z | message_handlers/location_handler.py | pratyushmore/lunch-tag-bot | e0a055a2ee914c3bc5434244b17cfb7e3b1a7a9a | [
"MIT"
]
| null | null | null | def location(messaging_adaptor, user, channel, location):
message = "Your location has been set to `{}`. You are ready to be matched for Lunch Tag :)".format(location)
messaging_adaptor.send_message(channel, message)
| 54.75 | 110 | 0.767123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.374429 |
2959af169e729db8be7ba1725d5b1686b6c154d4 | 6,462 | py | Python | b.py | lbarchive/b.py | 18b533dc40e5fdf7ba62209b51584927c2dd9ba0 | [
"MIT"
]
| null | null | null | b.py | lbarchive/b.py | 18b533dc40e5fdf7ba62209b51584927c2dd9ba0 | [
"MIT"
]
| null | null | null | b.py | lbarchive/b.py | 18b533dc40e5fdf7ba62209b51584927c2dd9ba0 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# Copyright (C) 2013-2016 by Yu-Jie Lin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
============
b.py command
============
Commands
========
============= =======================
command supported services
============= =======================
``blogs`` ``b``
``post`` ``b``, ``wp``
``generate`` ``base``, ``b``, ``wp``
``checklink`` ``base``, ``b``, ``wp``
``search`` ``b``
============= =======================
Descriptions:
``blogs``
list blogs. This can be used for blog IDs lookup.
``post``
post or update a blog post.
``generate``
generate HTML file at ``<TEMP>/draft.html``, where ``<TEMP>`` is the system's
temporary directory.
The generation can output a preview html at ``<TEMP>/preview.html`` if there
is ``tmpl.html``. It will replace ``%%Title%%`` with post title and
``%%Content%%`` with generated HTML.
``checklink``
check links in generated HTML using lnkckr_.
``search``
search blog
.. _lnkckr: https://pypi.python.org/pypi/lnkckr
"""
from __future__ import print_function
import argparse as ap
import codecs
import imp
import logging
import os
import sys
import traceback
from bpy.handlers import handlers
from bpy.services import find_service, services
__program__ = 'b.py'
__description__ = 'Post to Blogger or WordPress in markup language seamlessly'
__copyright__ = 'Copyright 2013-2016, Yu Jie Lin'
__license__ = 'MIT License'
__version__ = '0.11.0'
__website__ = 'http://bitbucket.org/livibetter/b.py'
__author__ = 'Yu-Jie Lin'
__author_email__ = '[email protected]'
# b.py stuff
############
# filename of local configuration without '.py' suffix.
BRC = 'brc'
def parse_args():
p = ap.ArgumentParser()
p.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
p.add_argument('-d', '--debug', action='store_true',
help='turn on debugging messages')
p.add_argument('-s', '--service', default='base',
help='what service to use. (Default: %(default)s)')
sp = p.add_subparsers(help='commands')
pblogs = sp.add_parser('blogs', help='list blogs')
pblogs.set_defaults(subparser=pblogs, command='blogs')
psearch = sp.add_parser('search', help='search for posts')
psearch.add_argument('-b', '--blog', help='Blog ID')
psearch.add_argument('q', nargs='+', help='query text')
psearch.set_defaults(subparser=psearch, command='search')
pgen = sp.add_parser('generate', help='generate html')
pgen.add_argument('filename')
pgen.set_defaults(subparser=pgen, command='generate')
pchk = sp.add_parser('checklink', help='check links in chkerateed html')
pchk.add_argument('filename')
pchk.set_defaults(subparser=pchk, command='checklink')
ppost = sp.add_parser('post', help='post or update a blog post')
ppost.add_argument('filename')
ppost.set_defaults(subparser=ppost, command='post')
args = p.parse_args()
return args
def load_config():
rc = None
try:
search_path = [os.getcwd()]
_mod_data = imp.find_module(BRC, search_path)
print('Loading local configuration...')
try:
rc = imp.load_module(BRC, *_mod_data)
finally:
if _mod_data[0]:
_mod_data[0].close()
except ImportError:
pass
except Exception:
traceback.print_exc()
print('Error in %s, aborted.' % _mod_data[1])
sys.exit(1)
return rc
def main():
args = parse_args()
logging.basicConfig(
format=(
'%(asctime)s '
'%(levelname).4s '
'%(module)5.5s:%(funcName)-10.10s:%(lineno)04d '
'%(message)s'
),
datefmt='%H:%M:%S',
)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
encoding = sys.stdout.encoding
if not encoding.startswith('UTF'):
msg = (
'standard output encoding is %s, '
'try to set with UTF-8 if there is output issues.'
)
logging.warning(msg % encoding)
if sys.version_info.major == 2:
sys.stdout = codecs.getwriter(encoding)(sys.stdout, 'replace')
sys.stderr = codecs.getwriter(encoding)(sys.stderr, 'replace')
elif sys.version_info.major == 3:
sys.stdout = codecs.getwriter(encoding)(sys.stdout.buffer, 'replace')
sys.stderr = codecs.getwriter(encoding)(sys.stderr.buffer, 'replace')
rc = load_config()
service_options = {'blog': None}
if rc:
if hasattr(rc, 'handlers'):
for name, handler in rc.handlers.items():
if name in handlers:
handlers[name].update(handler)
else:
handlers[name] = handler.copy()
if hasattr(rc, 'services'):
for name, service in rc.services.items():
if name in services:
services[name].update(service)
else:
services[name] = service.copy()
if hasattr(rc, 'service'):
args.service = rc.service
if hasattr(rc, 'service_options'):
service_options.update(rc.service_options)
if hasattr(args, 'blog') and args.blog is not None:
service_options['blog'] = args.blog
filename = args.filename if hasattr(args, 'filename') else None
service = find_service(args.service, service_options, filename)
if args.command == 'blogs':
service.list_blogs()
elif args.command == 'search':
service.search(' '.join(args.q))
elif args.command == 'generate':
service.generate()
elif args.command == 'checklink':
service.checklink()
elif args.command == 'post':
service.post()
if __name__ == '__main__':
main()
| 29.108108 | 79 | 0.660786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,125 | 0.483596 |
295a62c87a95c13de9ca2d600326020d699ab2e2 | 8,729 | py | Python | spyder/plugins/outlineexplorer/api.py | suokunlong/spyder | 2d5d450fdcef232fb7f38e7fefc27f0e7f704c9a | [
"MIT"
]
| 1 | 2018-05-03T02:14:15.000Z | 2018-05-03T02:14:15.000Z | spyder/plugins/outlineexplorer/api.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
]
| null | null | null | spyder/plugins/outlineexplorer/api.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
]
| 1 | 2020-03-05T03:09:11.000Z | 2020-03-05T03:09:11.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Outline explorer API.
You need to declare a OutlineExplorerProxy, and a function for handle the
edit_goto Signal.
class OutlineExplorerProxyCustom(OutlineExplorerProxy):
...
def handle_go_to(name, line, text):
...
outlineexplorer = OutlineExplorerWidget(None)
oe_proxy = OutlineExplorerProxyCustom(name)
outlineexplorer.set_current_editor(oe_proxy, update=True, clear=False)
outlineexplorer.edit_goto.connect(handle_go_to)
"""
import re
from qtpy.QtCore import Signal, QObject
from qtpy.QtGui import QTextBlock
from spyder.config.base import _
from spyder.config.base import running_under_pytest
def document_cells(block, forward=True):
"""
Get cells oedata before or after block in the document.
Parameters
----------
forward : bool, optional
Whether to iterate forward or backward from the current block.
"""
if not block.isValid():
# Not a valid block
return
if forward:
block = block.next()
else:
block = block.previous()
while block.isValid():
data = block.userData()
if (data
and data.oedata
and data.oedata.def_type == OutlineExplorerData.CELL):
yield data.oedata
if forward:
block = block.next()
else:
block = block.previous()
def is_cell_header(block):
"""Check if the given block is a cell header."""
if not block.isValid():
return False
data = block.userData()
return (data
and data.oedata
and data.oedata.def_type == OutlineExplorerData.CELL)
def cell_index(block):
"""Get the cell index of the given block."""
index = len(list(document_cells(block, forward=False)))
if is_cell_header(block):
return index + 1
return index
def cell_name(block):
"""
Get the cell name the block is in.
If the cell is unnamed, return the cell index instead.
"""
if is_cell_header(block):
header = block.userData().oedata
else:
try:
header = next(document_cells(block, forward=False))
except StopIteration:
# This cell has no header, so it is the first cell.
return 0
if header.has_name():
return header.def_name
else:
# No name, return the index
return cell_index(block)
class OutlineExplorerProxy(QObject):
"""
Proxy class between editors and OutlineExplorerWidget.
"""
sig_cursor_position_changed = Signal(int, int)
sig_outline_explorer_data_changed = Signal()
def __init__(self):
super(OutlineExplorerProxy, self).__init__()
self.fname = None
def is_python(self):
"""Return whether the editor is a python file or not."""
raise NotImplementedError
def get_id(self):
"""Return an unique id, used for identify objects in a dict"""
raise NotImplementedError
def give_focus(self):
"""Give focus to the editor, called when toogling visibility of
OutlineExplorerWidget."""
raise NotImplementedError
def get_line_count(self):
"""Return the number of lines of the editor (int)."""
raise NotImplementedError
def parent(self):
"""This is used for diferenciate editors in multi-window mode."""
return None
def get_cursor_line_number(self):
"""Return the cursor line number."""
raise NotImplementedError
def outlineexplorer_data_list(self):
"""Returns a list of outline explorer data."""
raise NotImplementedError
class OutlineExplorerData(QObject):
CLASS, FUNCTION, STATEMENT, COMMENT, CELL = list(range(5))
FUNCTION_TOKEN = 'def'
CLASS_TOKEN = 'class'
# Emitted if the OutlineExplorerData was changed
sig_update = Signal()
def __init__(self, block, text=None, fold_level=None, def_type=None,
def_name=None, color=None):
"""
Args:
text (str)
fold_level (int)
def_type (int): [CLASS, FUNCTION, STATEMENT, COMMENT, CELL]
def_name (str)
color (PyQt.QtGui.QTextCharFormat)
"""
super(OutlineExplorerData, self).__init__()
self.text = text
self.fold_level = fold_level
self.def_type = def_type
self.def_name = def_name
self.color = color
if running_under_pytest():
# block might be a dummy
self.block = block
else:
# Copy the text block to make sure it is not deleted
self.block = QTextBlock(block)
def is_not_class_nor_function(self):
return self.def_type not in (self.CLASS, self.FUNCTION)
def is_class_or_function(self):
return self.def_type in (self.CLASS, self.FUNCTION)
def is_comment(self):
return self.def_type in (self.COMMENT, self.CELL)
def get_class_name(self):
if self.def_type == self.CLASS:
return self.def_name
def get_function_name(self):
if self.def_type == self.FUNCTION:
return self.def_name
def get_token(self):
if self.def_type == self.FUNCTION:
token = self.FUNCTION_TOKEN
elif self.def_type == self.CLASS:
token = self.CLASS_TOKEN
return token
@property
def def_name(self):
"""Get the cell name."""
# Non cell don't need unique names.
if self.def_type != self.CELL:
return self._def_name
def get_name(oedata):
name = oedata._def_name
if not name:
name = _('Unnamed Cell')
return name
self_name = get_name(self)
existing_numbers = []
def check_match(oedata):
# Look for "string"
other_name = get_name(oedata)
pattern = '^' + re.escape(self_name) + r'(?:, #(\d+))?$'
match = re.match(pattern, other_name)
if match:
# Check if already has a number
number = match.groups()[0]
if number:
existing_numbers.append(int(number))
return True
return False
# Count cells
N_prev = 0
for oedata in document_cells(self.block, forward=False):
if check_match(oedata):
N_prev += 1
N_fix_previous = len(existing_numbers)
N_next = 0
for oedata in document_cells(self.block, forward=True):
if check_match(oedata):
N_next += 1
# Get the remaining indexeswe can use
free_indexes = [idx for idx in range(N_prev + N_next + 1)
if idx + 1 not in existing_numbers]
idx = free_indexes[N_prev - N_fix_previous]
if N_prev + N_next > 0:
return self_name + ', #{}'.format(idx + 1)
return self_name
@def_name.setter
def def_name(self, value):
"""Set name."""
self._def_name = value
def update(self, other):
"""Try to update to avoid reloading everything."""
if (self.def_type == other.def_type and
self.fold_level == other.fold_level):
self.text = other.text
old_def_name = self._def_name
self._def_name = other._def_name
self.color = other.color
self.sig_update.emit()
if self.def_type == self.CELL:
if self.cell_level != other.cell_level:
return False
# Must update all other cells whose name has changed.
for oedata in document_cells(self.block, forward=True):
if oedata._def_name in [self._def_name, old_def_name]:
oedata.sig_update.emit()
return True
return False
def is_valid(self):
"""Check if the oedata has a valid block attached."""
block = self.block
return (block
and block.isValid()
and block.userData()
and hasattr(block.userData(), 'oedata')
and block.userData().oedata == self
)
def has_name(self):
"""Check if cell has a name."""
if self._def_name:
return True
else:
return False
def get_block_number(self):
"""Get the block number."""
if not self.is_valid():
# Avoid calling blockNumber if not a valid block
return None
return self.block.blockNumber()
| 29.096667 | 74 | 0.595601 | 6,207 | 0.710997 | 721 | 0.082589 | 1,692 | 0.193814 | 0 | 0 | 2,426 | 0.277892 |
295bcd4e3374d50cf1562ad240b9c1e9e4ac0fc7 | 3,132 | py | Python | seamless/core/__init__.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
]
| 15 | 2017-06-07T12:49:12.000Z | 2020-07-25T18:06:04.000Z | seamless/core/__init__.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
]
| 110 | 2016-06-21T23:20:44.000Z | 2022-02-24T16:15:22.000Z | seamless/core/__init__.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
]
| 6 | 2016-06-21T11:19:22.000Z | 2019-01-21T13:45:39.000Z | import weakref
class IpyString(str):
def _repr_pretty_(self, p, cycle):
return p.text(str(self))
class SeamlessBase:
_destroyed = False
_context = None
_cached_path = None
name = None
def _get_macro(self):
return self._context()._macro
@property
def path(self):
if self._cached_path is not None:
return self._cached_path
if self._context is None:
return ()
elif self._context() is None:
return ("<None>", self.name)
elif self._context().path is None:
return ("<None>", self.name)
else:
return self._context().path + (self.name,)
def _validate_path(self, required_path=None):
if required_path is None:
required_path = self.path
else:
assert self.path == required_path, (self.path, required_path)
return required_path
def _set_context(self, context, name):
from .context import Context, UnboundContext
assert isinstance(context, (Context, UnboundContext))
if self._context is not None and self._context() is context:
assert self.name in context._auto
context._children.pop(self.name)
context._auto.discard(self.name)
self.name = name
return
if isinstance(context, UnboundContext):
assert self._context is None
else:
assert self._context is None or isinstance(self._context(), UnboundContext), self._context
ctx = weakref.ref(context)
self._context = ctx
self.name = name
return self
def _get_manager(self):
assert self._context is not None, self.name #worker/cell must have a context
assert self._context() is not None, self.name #worker/cell must have a context
return self._context()._get_manager()
def _root(self):
if self._context is None:
return None
if self._context() is None:
return None
return self._context()._root()
def _format_path(self):
path = self.path
if path is None:
ret = "<None>"
else:
path = [str(p) for p in path]
ret = "." + ".".join(path)
return ret
def __str__(self):
ret = "Seamless object: " + self._format_path()
return ret
def __repr__(self):
return self.__str__()
def _set_macro_object(self, macro_object):
self._macro_object = macro_object
@property
def self(self):
return self
def destroy(self, **kwargs):
self._destroyed = True
from .mount import mountmanager
from .macro_mode import get_macro_mode, macro_mode_on
from . import cell as cell_module
from .cell import Cell, cell
from . import context as context_module
from .context import Context, context
from .worker import Worker
from .transformer import Transformer, transformer
from .structured_cell import StructuredCell, Inchannel, Outchannel
from .macro import Macro, macro, path
from .reactor import Reactor, reactor
from .unilink import unilink | 30.705882 | 102 | 0.628672 | 2,638 | 0.842273 | 0 | 0 | 445 | 0.142082 | 0 | 0 | 113 | 0.036079 |
295bf91559d8557674834d5e4100c334bcac0923 | 11,613 | py | Python | oguilem/configuration/config.py | dewberryants/oGUIlem | 28271fdc0fb6ffba0037f30f9f9858bec32b0d13 | [
"BSD-3-Clause"
]
| 2 | 2022-02-23T13:16:47.000Z | 2022-03-07T09:47:29.000Z | oguilem/configuration/config.py | dewberryants/oGUIlem | 28271fdc0fb6ffba0037f30f9f9858bec32b0d13 | [
"BSD-3-Clause"
]
| null | null | null | oguilem/configuration/config.py | dewberryants/oGUIlem | 28271fdc0fb6ffba0037f30f9f9858bec32b0d13 | [
"BSD-3-Clause"
]
| 1 | 2022-02-23T13:16:49.000Z | 2022-02-23T13:16:49.000Z | import os
import re
import sys
from oguilem.configuration.fitness import OGUILEMFitnessFunctionConfiguration
from oguilem.configuration.ga import OGUILEMGlobOptConfig
from oguilem.configuration.geometry import OGUILEMGeometryConfig
from oguilem.configuration.utils import ConnectedValue, ConfigFileManager
from oguilem.resources import options
class OGUILEMConfig:
def __init__(self):
self.ui = OGUILEMUIConfig()
self.globopt = OGUILEMGlobOptConfig()
self.options = OGUILEMGeneralConfig()
self.geometry = OGUILEMGeometryConfig()
self.fitness = OGUILEMFitnessFunctionConfiguration()
self.file_manager = ConfigFileManager()
def save_to_file(self, path: str):
content = "###OGOLEM###\n"
content += self.globopt.get_finished_config()
content += self.geometry.get_finished_config(path)
content += self.fitness.get_finished_config()
content += self.options.get_finished_config()
with open(path, "w") as conf_file:
conf_file.write(content)
self.file_manager.signal_saved(path)
def load_from_file(self, path: str, preset=False):
self.options.set_to_default()
with open(path, "r") as conf_file:
content = conf_file.readlines()
# Find geometry block and split off
iter_content = iter(content)
geo_block = list()
backend_defs = list()
charge_block = list()
spin_block = list()
offset = 0
# Separate off blocks
for n, line in enumerate(iter_content):
# Charge and Spin Blocks
if line.strip().startswith("<CHARGES>"):
start = n + offset
try:
charge_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <CHARGES> tag!?")
while not charge_line.startswith("</CHARGES>"):
charge_block.append(charge_line)
try:
charge_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <GEOMETRY> tag in configuration!")
end = start + len(charge_block) + 2
content = content[:start] + content[end:]
offset -= 1
if line.strip().startswith("<SPINS>"):
start = n + offset
try:
spin_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <SPINS> tag!?")
while not spin_line.startswith("</SPINS>"):
spin_block.append(spin_line)
try:
spin_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <SPINS> tag in configuration!")
end = start + len(spin_block) + 2
content = content[:start] + content[end:]
offset -= 1
# Geometry Block
if line.strip().startswith("<GEOMETRY>"):
start = n + offset
try:
geo_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <GEOMETRY> tag!?")
while not geo_line.startswith("</GEOMETRY>"):
geo_block.append(geo_line)
try:
geo_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <GEOMETRY> tag in configuration!")
end = start + len(geo_block) + 2
content = content[:start] + content[end:]
offset -= 1
# Any Backend Definitions
if line.strip().startswith("<CLUSTERBACKEND>"):
back_block = list()
start = n + offset
try:
back_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <CLUSTERBACKEND> tag!?")
while not back_line.startswith("</CLUSTERBACKEND>"):
back_block.append(back_line)
try:
back_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <CLUSTERBACKEND> tag in configuration!")
end = start + len(back_block) + 2
backend_defs.append(back_block)
content = content[:start] + content[end:]
offset -= 1
# Parse them
self.geometry.parse_from_block(geo_block)
self.geometry.parse_charge_block(charge_block)
self.geometry.parse_spin_block(spin_block)
self.fitness.parse_backend_tags(backend_defs)
# Deal with the rest
for line in content:
if line.strip().startswith("LocOptAlgo="):
self.fitness.parse_locopt_algo(line.strip()[11:])
elif line.strip().startswith("GlobOptAlgo="):
self.globopt.parse_globopt_string(line.strip()[12:])
else:
for key in self.options.values:
type = self.options.values[key].type
if re.match(key + "=", line.strip()):
value, index = parse_value(line.strip()[len(key) + 1:], type)
if value is not None:
print("Option {:>30} set to: {:>30}".format(key, str(value)))
self.options.values[key].set(value, index)
else:
print("ERROR: Could not set Option %s. Set to default instead!" % key)
self.options.values[key].set(self.options.defaults[key])
if not preset:
self.file_manager.signal_saved(path)
else:
self.file_manager.signal_modification()
def parse_value(line, type):
value = None
index = -1
work = line.strip()
if type is str:
value = work
elif type is int:
value = int(work)
elif type is float:
value = float(work)
elif type is bool:
value = work.lower() == "true"
elif type is list:
tmp = work.split(";")
value = [float(tmp[0]), float(tmp[1]), float(tmp[2])]
return value, index
class OGUILEMGeneralConfig:
def __init__(self):
self.defaults = dict()
self.values = dict()
for key in options:
type, default = options[key]
if type == "str":
self.defaults[key] = default
elif type == "int":
self.defaults[key] = int(default)
elif type == "float":
self.defaults[key] = float(default)
elif type == "bool":
self.defaults[key] = (default.lower() == "true")
elif type == "3;float":
default = default.strip().split(";")
self.defaults[key] = [float(default[0]), float(default[1]), float(default[2])]
else:
raise IOError("Could not parse xml key %s in general configs!" % key)
self.values[key] = ConnectedValue(self.defaults[key])
def set_to_default(self):
for key in options:
self.values[key].set(self.defaults[key])
def get_finished_config(self) -> str:
content = ""
for key in self.values:
self.values[key].request_update()
value = self.values[key].value
if value != self.defaults[key]:
content += "\n" + key + "=" + str(self.values[key])
return content
def find_config_folder():
if sys.platform == 'Windows':
path = os.path.join(os.environ['APPDATA'], 'oguilem')
else:
path = os.path.join(os.environ['HOME'], '.config', 'oguilem')
if not os.path.isdir(path):
os.mkdir(path)
return path
class OGUILEMUIConfig:
def __init__(self):
self.window_size = None
self.window_position = None
self.java_path = None
self.java_vm_variables = None
self.ogo_path = None
self.ogo_args = None
self.environmental_variables = None
self.recover_from_file()
def get_run_command(self, custom_run_command=""):
run_cmd = custom_run_command if custom_run_command else self.ogo_args
if not all([self.java_path, self.ogo_path, self.ogo_args]):
raise RuntimeError("Cannot run ogolem without knowing java and ogolem paths as well as ogolem arguments!")
if self.java_vm_variables:
return "%s %s -jar %s %s" % (self.java_path, self.java_vm_variables, self.ogo_path, run_cmd)
return "%s -jar %s %s" % (self.java_path, self.ogo_path, run_cmd)
def recover_from_file(self):
path = os.path.join(find_config_folder(), "oguilem.cfg")
try:
with open(path, "r") as config:
lines = config.readlines()
for line in lines:
work = line.strip()
if work.startswith("WINDOWSIZE"):
self.window_size = (int(work.split()[1]), int(work.split()[2]))
elif work.startswith("WINDOWPOS"):
self.window_position = (int(work.split()[1]), int(work.split()[2]))
elif work.startswith("JAVAPATH"):
self.java_path = work[8:].strip()
elif work.startswith("JAVAVM"):
self.java_vm_variables = work[6:].strip()
elif work.startswith("OGOPATH"):
self.ogo_path = work[7:].strip()
elif work.startswith("OGOARGS"):
self.ogo_args = work[7:].strip()
elif work.startswith("ENV"):
self.environmental_variables = work[3:].strip()
except ValueError:
print("There are format errors in the UI config file in '%s'. Using defaults." % find_config_folder())
except IOError:
print("Config file not found. A new one will generate once the program exits.")
def save_to_file(self):
path = os.path.join(find_config_folder(), "oguilem.cfg")
with open(path, "w") as config:
if self.window_size:
config.write("WINDOWSIZE %d %d\n" % (self.window_size[0], self.window_size[1]))
if self.window_position:
config.write("WINDOWPOS %d %d\n" % (self.window_position[0], self.window_position[1]))
if self.java_path:
config.write("JAVAPATH %s\n" % self.java_path)
if self.java_vm_variables:
config.write("JAVAVM %s\n" % self.java_vm_variables)
if self.ogo_path:
config.write("OGOPATH %s\n" % self.ogo_path)
if self.ogo_args:
config.write("OGOARGS %s\n" % self.ogo_args)
if self.java_path:
config.write("ENV %s\n" % self.environmental_variables)
| 43.494382 | 118 | 0.533023 | 10,555 | 0.908895 | 0 | 0 | 0 | 0 | 0 | 0 | 1,341 | 0.115474 |
295cb6523225a5b823029ec4f2d16b55a8369739 | 8,705 | py | Python | xpd_workflow/temp_graph.py | CJ-Wright/xpd_workflow | f3fd84831b86b696631759946c3af9b16b45de26 | [
"BSD-3-Clause"
]
| null | null | null | xpd_workflow/temp_graph.py | CJ-Wright/xpd_workflow | f3fd84831b86b696631759946c3af9b16b45de26 | [
"BSD-3-Clause"
]
| 4 | 2016-08-25T02:59:05.000Z | 2016-09-28T22:32:34.000Z | xpd_workflow/temp_graph.py | CJ-Wright/xpd_workflow | f3fd84831b86b696631759946c3af9b16b45de26 | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import (division, print_function)
import matplotlib.cm as cmx
import matplotlib.colors as colors
from matplotlib import gridspec
from metadatastore.api import db_connect as mds_db_connect
from filestore.api import db_connect as fs_db_connect
fs_db_connect(
**{'database': 'data-processing-dev', 'host': 'localhost', 'port': 27017})
mds_db_connect(
**{'database': 'data-processing-dev', 'host': 'localhost', 'port': 27017})
from databroker import db, get_events
from datamuxer import DataMuxer
from sidewinder_spec.utils.handlers import *
import logging
from xpd_workflow.parsers import parse_xrd_standard
logger = logging.getLogger(__name__)
if __name__ == '__main__':
import os
import numpy as np
import matplotlib.pyplot as plt
save = True
lam = 1.54059
# Standard reflections for sample components
niox_hkl = ['111', '200', '220', '311', '222', '400', '331',
'420', '422', '511']
niox_tth = np.asarray(
[37.44, 43.47, 63.20, 75.37, 79.87, 95.58, 106.72, 111.84,
129.98, 148.68])
pr3_hkl = ['100', '001', '110', '101', '111', '200', '002', '210', '211',
'112', '202']
pr3_tth = np.asarray(
[22.96, 24.33, 32.70, 33.70, 41.18, 46.92, 49.86, 52.86, 59.00, 60.91,
70.87]
)
pr4_hkl = ['111', '113', '008', '117', '200', '119', '028', '0014', '220',
'131', '1115', '0214', '317', '31Na', '2214', '040', '400']
pr4_tth = np.asarray(
[23.43, 25.16, 25.86, 32.62, 33.36, 37.67, 42.19, 46.11, 47.44, 53.18,
55.55, 57.72, 59.10, 59.27, 68.25, 68.71, 70.00]
)
pr2_tth, pr2int, pr2_hkl = parse_xrd_standard(
'/mnt/bulk-data/research_data/Pr2NiO4orthorhombicPDF#97-008-1577.txt')
pr2_tth = pr2_tth[pr2int > 5.]
prox_hkl = ['111', '200', '220', '311', '222', '400', '331', '420', '422',
'511', '440', '531', '600']
prox_tth = np.asarray(
[28.25, 32.74, 46.99, 55.71, 58.43, 68.59, 75.73, 78.08, 87.27,
94.12, 105.63, 112.90, 115.42]
)
standard_names = [
# 'NiO',
'Pr3Ni2O7',
'Pr2NiO4',
# 'Pr4'
'Pr6O11'
]
master_hkl = [
# niox_hkl,
pr3_hkl,
pr2_hkl,
# pr4_hkl
prox_hkl
]
master_tth = [
# niox_tth,
pr3_tth,
pr2_tth,
# pr4_tth
prox_tth
]
color_map = [
# 'red',
'blue',
'black',
'red'
]
line_style = ['--', '-.', ':', ]
ns = [1, 2, 3, 4, 5,
# 18, 20, 22, 16, 28, 29, 27, 26
]
# ns = [26]
ns.sort()
#
for i in ns:
legended_hkl = []
print(i)
folder = '/mnt/bulk-data/research_data/USC_beamtime/APS_March_2016/S' + str(
i) + '/temp_exp'
hdr = db(run_folder=folder)[0]
dm = DataMuxer()
dm.append_events(get_events(hdr))
df = dm.to_sparse_dataframe()
print(df.keys())
binned = dm.bin_on('img', interpolation={'T': 'linear'})
# key_list = [f for f in os.listdir(folder) if
# f.endswith('.gr') and not f.startswith('d')]
key_list = [f for f in os.listdir(folder) if
f.endswith('.chi') and not f.startswith('d') and f.strip(
'0.chi') != '' and int(
f.lstrip('0').strip('.chi')) % 2 == 1]
key_list.sort()
key_list = key_list[:-1]
# key_list2.sort()
idxs = [int(os.path.splitext(f)[0]) for f in key_list]
Ts = binned['T'].values[idxs]
output = os.path.splitext(key_list[0])[-1][1:]
if key_list[0].endswith('.gr'):
offset = .1
skr = 0
else:
skr = 8
offset = .001
data_list = [(np.loadtxt(os.path.join(folder, f),
skiprows=skr
)[:, 0],
np.loadtxt(os.path.join(folder, f),
skiprows=skr
)[:, 1])
for f
in key_list]
ylim_min = None
for xmax, length in zip(
[len(data_list[0][0]) - 1, len(data_list[0][0]) - 1],
['short', 'full']):
fig = plt.figure(figsize=(26, 12))
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
ax1 = plt.subplot(gs[0])
if length == 'short':
ax1.set_xlim(1.5, 4.5)
ax2 = plt.subplot(gs[1], sharey=ax1)
plt.setp(ax2.get_yticklabels(), visible=False)
cm = plt.get_cmap('viridis')
cNorm = colors.Normalize(vmin=0, vmax=len(key_list))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
for idx in range(len(key_list)):
xnm, y = data_list[idx]
colorVal = scalarMap.to_rgba(idx)
if output == 'chi':
x = xnm / 10.
ax1.plot(x[:xmax], y[:xmax] + idx * offset,
color=colorVal)
ax2.plot(Ts[idx], y[-1] + idx * offset, marker='o',
color=colorVal)
if ylim_min is None or ylim_min > np.min(
y[:xmax + idx * offset]):
ylim_min = np.min(y[:xmax + idx * offset])
ax2.set_xticklabels([str(f) for f in ax2.get_xticks()],
rotation=90)
if output == 'gr':
bnds = ['O-Pr', 'O-Ni', 'Ni-Ni', 'Pr-Pr', 'Ni-Pr', 'O-Pr',
'O-Ni',
'Ni-Ni-Ni', 'Pr-Ni', 'Pr-Pr', 'Pr-Ni-O', 'Ni-Pr-Ni',
'Pr-Pr', 'Rs:Pr-Pr', 'Rs:Pr_Pr']
bnd_lens = [2.320, 1.955, 3.883, 3.765, 3.186, 2.771, 2.231,
7.767, 4.426, 6.649, 4.989, 5.404, 3.374, 3.910,
8.801]
# ax1.grid(True)
# ax2.grid(True)
for bnd, bnd_len in zip(bnds, bnd_lens):
ax1.axvline(bnd_len, color='grey', linestyle='--')
ax3 = ax1.twiny()
ax3.set_xticks(np.asarray(bnd_lens) / x[xmax])
ax3.set_xticklabels(bnds, rotation=90)
else:
std_axis = []
for n, hkls, tths, color, ls in zip(standard_names, master_hkl,
master_tth,
color_map, line_style):
std_axis.append(ax1.twiny())
ax3 = std_axis[-1]
hkl_q = np.pi * 4 * np.sin(np.deg2rad(tths / 2)) / lam
for k, (hkl, q) in enumerate(zip(hkls, hkl_q)):
if n not in legended_hkl:
ax1.axvline(q, color=color, linestyle=ls,
lw=2,
label=n
)
legended_hkl.append(n)
else:
ax1.axvline(q, color=color, linestyle=ls,
lw=2,
)
a = hkl_q > ax1.get_xlim()[0]
b = hkl_q < ax1.get_xlim()[1]
c = a & b
ax3.set_xticks(list((hkl_q[c] - ax1.get_xlim()[0]) / (
ax1.get_xlim()[1] - ax1.get_xlim()[0])
))
ax3.set_xticklabels(hkls, rotation=90, color=color)
ax2.set_xlabel('Temperature C')
if output == 'gr':
fig.suptitle('S{} PDF'.format(i))
ax1.set_xlabel(r"$r (\AA)$")
ax1.set_ylabel(r"$G (\AA^{-2})$")
elif output == 'chi':
fig.suptitle('S{} I(Q)'.format(i))
ax1.set_xlabel(r"$Q (\AA^{-1})$")
ax1.set_ylabel(r"$I (Q) $")
ax1.set_ylim(ylim_min)
ax1.legend()
gs.tight_layout(fig, rect=[0, 0, 1, .98], w_pad=1e-6)
if save:
fig.savefig(os.path.join('/mnt/bulk-data/Dropbox/',
'S{}_{}_output_{}.png'.format(
i, length, output)))
fig.savefig(os.path.join('/mnt/bulk-data/Dropbox/',
'S{}_{}_output_{}.eps'.format(
i, length, output)))
else:
plt.show()
| 38.688889 | 84 | 0.443538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,290 | 0.148191 |
295d0342f32753f768fc55a488c38b501e122b06 | 12,101 | py | Python | winnow/core.py | bgschiller/winnow | 0fde7fcc9e2fe3519528feb9115658aa3b3954e5 | [
"MIT"
]
| 3 | 2017-08-10T16:20:29.000Z | 2018-09-19T01:33:13.000Z | winnow/core.py | bgschiller/winnow | 0fde7fcc9e2fe3519528feb9115658aa3b3954e5 | [
"MIT"
]
| null | null | null | winnow/core.py | bgschiller/winnow | 0fde7fcc9e2fe3519528feb9115658aa3b3954e5 | [
"MIT"
]
| 1 | 2019-11-29T20:17:23.000Z | 2019-11-29T20:17:23.000Z | from __future__ import unicode_literals
import copy
import json
from six import string_types
from . import default_operators
from . import sql_prepare
from . import values
from .error import WinnowError
from .templating import SqlFragment
from .templating import WinnowSql
class Winnow(object):
"""
Winnow is a SQL query builder specifically designed for
powerful filtering on a table. It is designed to be
efficient and low-magic.
"""
# Take care here -- In order to avoid mucking up the parent's copy of this
# static value we have to deep copy it to every subclass.
_special_cases = {}
sql_class = WinnowSql
def __init__(self, table, sources):
self.table = table
self.sources = sources
self.sql = self.sql_class()
def prepare_query(self, *args, **kwargs):
"""
Proxy to self.sql
"""
return self.sql.prepare_query(*args, **kwargs)
def resolve(self, filt):
"""
Given a filter, resolve (expand) all it's clauses.
A resolved clause includes information about the
value type of the data source, and how to perform
queries against that data source.
return the modified filter.
"""
filt['logical_op'] = filt.get('logical_op', '&')
if filt['logical_op'] not in '&|':
raise WinnowError("Logical op must be one of &, |. Given: {}".format(
filt['logical_op']))
for ix in range(len(filt['filter_clauses'])):
filt['filter_clauses'][ix] = self.resolve_clause(
filt['filter_clauses'][ix])
return filt
def validate(self, filt):
"""
Make sure a filter is valid (resolves properly), but avoid bulking up
the json object (probably because it's about to go into the db, or
across the network)
"""
self.resolve(copy.deepcopy(filt))
return filt
def resolve_clause(self, filter_clause):
"""
Given a filter_clause, check that it's valid.
Return a dict-style filter_clause with a vivified
value field.
"""
if 'logical_op' in filter_clause:
# nested filter
return self.resolve(filter_clause)
ds, op = self.resolve_components(filter_clause)
value = self.vivify(op['value_type'], filter_clause['value'])
filter_clause['data_source_resolved'] = ds
filter_clause['operator_resolved'] = op
filter_clause['value_vivified'] = value
filter_clause['summary'] = self.summarize(filter_clause)
return filter_clause
def summarize(self, filter_clause):
ds = filter_clause['data_source_resolved']
op = filter_clause['operator_resolved']
value = filter_clause['value_vivified']
cvt = self.coalesce_value_type(op['value_type'])
value_string = value
operator_string = op.get('summary_template') or '{{data_source}} {} {{value}}'.format(op['name'])
if cvt == 'collection':
operator_string, value_string = self.summarize_collection(filter_clause)
elif cvt == 'relative_date':
value_string = value.replace('_', ' ')
elif cvt == 'numeric':
value_string = '{:,}'.format(value)
return operator_string.format(data_source=ds['display_name'], value=value_string)
@classmethod
def coalesce_value_type(cls, value_type):
for op in cls.operators:
if op['value_type'] == value_type:
return op.get('coalesced_value_type', value_type)
return value_type
@classmethod
def summarize_collection(cls, filter_clause):
value = filter_clause['value'] if isinstance(filter_clause['value'], list) else json.loads(filter_clause['value'])
operator_string = '{data_source} any of {value}' if len(value) != 1 else '{data_source} is {value}'
if not value:
value_string = '(none)'
else:
value_string = ', '.join(value)
return operator_string, value_string
@staticmethod
def empty_filter():
return dict(logial_op='&', filter_clauses=[])
@classmethod
def vivify(cls, value_type, value):
"""De-stringify <value> into <value_type>
Raises WinnowError if <value> is not well formatted for that type."""
cvt = cls.coalesce_value_type(value_type)
if cvt == 'string':
return values.vivify_string(value)
elif cvt == 'collection':
return values.vivify_collection(value)
elif cvt in ('numeric', 'string_length'):
return values.vivify_numeric(value)
elif cvt == 'relative_date':
return values.vivify_relative_date(value)
elif cvt == 'absolute_date':
return values.vivify_absolute_date(value)
elif cvt in ('bool', 'nullable'):
return values.vivify_bool(value)
elif cvt == 'single_choice':
return values.vivify_single_choice(value)
else:
raise WinnowError("Unknown value_type, '{}'".format(value_type))
@classmethod
def stringify(cls, value_type, value):
cvt = cls.coalesce_value_type(value_type)
if isinstance(value, string_types):
value = cls.vivify(value_type, value)
if cvt == 'string':
return values.stringify_string(value)
elif cvt == 'collection':
return values.stringify_collection(value)
elif cvt in ('numeric', 'string_length'):
return values.stringify_numeric(value)
elif cvt == 'relative_date':
return values.stringify_relative_date(value)
elif cvt == 'absolute_date':
return values.stringify_absolute_date(value)
elif cvt in ('bool', 'nullable'):
return values.stringify_bool(value)
elif cvt == 'single_choice':
return values.stringify_single_choice(value)
raise WinnowError("Unknown value_type, '{}'".format(value_type))
operators = default_operators.OPERATORS
def resolve_operator(self, op_name, value_types):
'''Given an operator name, return an Op object.
Raise an error if the operator is not found'''
if not isinstance(op_name, string_types):
raise WinnowError("Bad operator type, '{}'. expected string".format(type(op_name)))
op_name = op_name.lower()
matches = [op for op in self.operators
if op['name'].lower() == op_name and op['value_type'] in value_types]
if len(matches) == 0:
raise WinnowError("Unknown operator '{}'".format(op_name))
return matches.pop()
def resolve_source(self, source_name):
"""
Given a source name, return a resolved data source.
Raise an error if the source name is not allowable
"""
matches = [source for source in self.sources
if source['display_name'] == source_name]
if len(matches) == 0:
raise WinnowError("Unknown data source '{}'".format(source_name))
elif len(matches) > 1:
raise WinnowError("Ambiguous data source '{}'".format(source_name))
return matches.pop()
def resolve_components(self, clause):
source = self.resolve_source(clause['data_source'])
operator = self.resolve_operator(clause['operator'],
source['value_types'])
return source, operator
def query(self, filt):
return self.prepare_query(
"SELECT * FROM {{ table | sqlsafe }} WHERE {{ condition }}",
table=self.table,
condition=self.where_clauses(filt))
def strip(self, filt):
"""
Perform the opposite of resolving a filter.
"""
for k in ('data_source_resolved', 'operator_resolved', 'value_vivified'):
filt.pop(k, None)
if 'filter_clauses' in filt:
filt['filter_clauses'] = [self.strip(f) for f in filt['filter_clauses']]
return filt
def where_clauses(self, filt):
'''
Apply a user filter.
Returns a paren-wrapped WHERE clause suitable for using
in a SELECT statement on the opportunity table.
'''
if not filt['filter_clauses']:
return True
filt = self.resolve(filt)
where_clauses = []
for clause in filt['filter_clauses']:
if 'logical_op' in clause:
# nested filter
where_clauses.append(self.where_clauses(clause))
elif 'data_source_resolved' in clause:
where_clauses.append(self._dispatch_clause(clause))
else:
# I don't expect to ever get here, because we should hit this
# issue when we call `filt = self.resolve(filt)`
raise WinnowError("Somehow, this is neither a nested filter, nor a resolved clause")
if not where_clauses:
return True
sep = '\nAND \n ' if filt['logical_op'] == '&' else '\nOR \n '
self.strip(filt)
sql_frag = SqlFragment.join(sep, where_clauses)
sql_frag.query = '(' + sql_frag.query + ')'
return sql_frag
def _dispatch_clause(self, clause):
"""
Evaluates whether a clause is standard, special, or custom
and calls the appropriate specialization function.
Each specialization returns a paren-wrapped WHERE clause, to be AND'd or OR'd
together to produce a final clause."""
for k in ('data_source_resolved', 'operator_resolved', 'value_vivified'):
if k not in clause:
raise WinnowError('failed to resolve component: {}'.format(k))
op = clause['operator_resolved']
special_handler = self.special_case_handler(
source_name=clause['data_source'],
value_type=op['value_type'])
if special_handler is not None:
return special_handler(self, clause)
return self._default_clause(clause)
def where_clause(self, data_source, operator, value):
return sql_prepare.where_clause(data_source['column'], operator, value)
def _default_clause(self, clause):
"""
Given a filter_clause, convert it to a WHERE clause
"""
ds = clause['data_source_resolved']
op = clause['operator_resolved']
value = clause['value_vivified']
return self.where_clause(ds, op, value)
@classmethod
def special_case(cls, source_name, *value_types):
"""
Register a special case handler. A special case handler is a function s:
s(Winnow(), clause) -> WHERE clause string
"""
if cls._special_cases is getattr(super(cls, cls), '_special_cases', None):
raise RuntimeError('Please define your own _special_cases dict, so as to avoid modifying your parent. '
'Note to self: come up with a more durable way to handle this.')
# ideas:
# proxy the _special_cases as the union of own and parent's version.
def decorator(func):
"""
Register a function in the handler table.
"""
for value_type in value_types:
if (source_name, value_type) in cls._special_cases:
raise WinnowError("Conflicting handlers registered for ({},{}): {} and {}".format(
value_type, source_name,
cls._special_cases[(source_name, value_type)].__name__, func.__name__))
cls._special_cases[(source_name, value_type)] = func
return func
return decorator
def special_case_handler(self, source_name, value_type):
"""
Check if a given value_type, source_name pair has
a special case handler.
:return: A function handler for that case accepting
the winnow instance and the clause.
"""
return self._special_cases.get((source_name, value_type))
| 37.934169 | 122 | 0.614081 | 11,823 | 0.977027 | 0 | 0 | 3,841 | 0.317412 | 0 | 0 | 4,277 | 0.353442 |
295d64816bed48df8774a68b70c332508540215b | 12,525 | py | Python | ibis/bigquery/client.py | tswast/ibis | 2f6d47e4c33cefd7ea1d679bb1d9253c2245993b | [
"Apache-2.0"
]
| null | null | null | ibis/bigquery/client.py | tswast/ibis | 2f6d47e4c33cefd7ea1d679bb1d9253c2245993b | [
"Apache-2.0"
]
| null | null | null | ibis/bigquery/client.py | tswast/ibis | 2f6d47e4c33cefd7ea1d679bb1d9253c2245993b | [
"Apache-2.0"
]
| null | null | null | import regex as re
import time
import collections
import datetime
import six
import pandas as pd
import google.cloud.bigquery as bq
from multipledispatch import Dispatcher
import ibis
import ibis.common as com
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.schema as sch
import ibis.expr.datatypes as dt
import ibis.expr.lineage as lin
from ibis.compat import parse_version
from ibis.client import Database, Query, SQLClient
from ibis.bigquery import compiler as comp
from google.api.core.exceptions import BadRequest
NATIVE_PARTITION_COL = '_PARTITIONTIME'
def _ensure_split(table_id, dataset_id):
split = table_id.split('.')
if len(split) > 1:
assert len(split) == 2
if dataset_id:
raise ValueError(
"Can't pass a fully qualified table name *AND* a dataset_id"
)
(dataset_id, table_id) = split
return (table_id, dataset_id)
_IBIS_TYPE_TO_DTYPE = {
'string': 'STRING',
'int64': 'INT64',
'double': 'FLOAT64',
'boolean': 'BOOL',
'timestamp': 'TIMESTAMP',
'date': 'DATE',
}
_DTYPE_TO_IBIS_TYPE = {
'INT64': dt.int64,
'FLOAT64': dt.double,
'BOOL': dt.boolean,
'STRING': dt.string,
'DATE': dt.date,
# FIXME: enforce no tz info
'DATETIME': dt.timestamp,
'TIME': dt.time,
'TIMESTAMP': dt.timestamp,
'BYTES': dt.binary,
}
_LEGACY_TO_STANDARD = {
'INTEGER': 'INT64',
'FLOAT': 'FLOAT64',
'BOOLEAN': 'BOOL',
}
@dt.dtype.register(bq.schema.SchemaField)
def bigquery_field_to_ibis_dtype(field):
typ = field.field_type
if typ == 'RECORD':
fields = field.fields
assert fields
names = [el.name for el in fields]
ibis_types = list(map(dt.dtype, fields))
ibis_type = dt.Struct(names, ibis_types)
else:
ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)
ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)
if field.mode == 'REPEATED':
ibis_type = dt.Array(ibis_type)
return ibis_type
@sch.infer.register(bq.table.Table)
def bigquery_schema(table):
pairs = [(el.name, dt.dtype(el)) for el in table.schema]
try:
if table.list_partitions():
pairs.append((NATIVE_PARTITION_COL, dt.timestamp))
except BadRequest:
pass
return sch.schema(pairs)
class BigQueryCursor(object):
"""Cursor to allow the BigQuery client to reuse machinery in ibis/client.py
"""
def __init__(self, query):
self.query = query
def fetchall(self):
return list(self.query.fetch_data())
@property
def columns(self):
return [field.name for field in self.query.schema]
def __enter__(self):
# For compatibility when constructed from Query.execute()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def _find_scalar_parameter(expr):
""":func:`~ibis.expr.lineage.traverse` function to find all
:class:`~ibis.expr.types.ScalarParameter` instances and yield the operation
and the parent expresssion's resolved name.
Parameters
----------
expr : ibis.expr.types.Expr
Returns
-------
Tuple[bool, object]
"""
op = expr.op()
if isinstance(op, ops.ScalarParameter):
result = op, expr.get_name()
else:
result = None
return lin.proceed, result
class BigQueryQuery(Query):
def __init__(self, client, ddl, query_parameters=None):
super(BigQueryQuery, self).__init__(client, ddl)
# self.expr comes from the parent class
query_parameter_names = dict(
lin.traverse(_find_scalar_parameter, self.expr))
self.query_parameters = [
bigquery_param(
param.to_expr().name(query_parameter_names[param]), value
) for param, value in (query_parameters or {}).items()
]
def _fetch(self, cursor):
df = pd.DataFrame(cursor.fetchall(), columns=cursor.columns)
return self.schema().apply_to(df)
def execute(self):
# synchronous by default
with self.client._execute(
self.compiled_sql,
results=True,
query_parameters=self.query_parameters
) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
class BigQueryAPIProxy(object):
def __init__(self, project_id):
self._client = bq.Client(project_id)
@property
def client(self):
return self._client
@property
def project_id(self):
return self.client.project
def get_datasets(self):
return list(self.client.list_datasets())
def get_dataset(self, dataset_id):
return self.client.dataset(dataset_id)
def get_table(self, table_id, dataset_id, reload=True):
(table_id, dataset_id) = _ensure_split(table_id, dataset_id)
table = self.client.dataset(dataset_id).table(table_id)
if reload:
table.reload()
return table
def get_schema(self, table_id, dataset_id):
return self.get_table(table_id, dataset_id).schema
def run_sync_query(self, stmt):
query = self.client.run_sync_query(stmt)
query.use_legacy_sql = False
query.run()
# run_sync_query is not really synchronous: there's a timeout
while not query.job.done():
query.job.reload()
time.sleep(0.1)
return query
class BigQueryDatabase(Database):
pass
bigquery_param = Dispatcher('bigquery_param')
@bigquery_param.register(ir.StructScalar, collections.OrderedDict)
def bq_param_struct(param, value):
field_params = [bigquery_param(param[k], v) for k, v in value.items()]
return bq.StructQueryParameter(param.get_name(), *field_params)
@bigquery_param.register(ir.ArrayValue, list)
def bq_param_array(param, value):
param_type = param.type()
assert isinstance(param_type, dt.Array), str(param_type)
try:
bigquery_type = _IBIS_TYPE_TO_DTYPE[str(param_type.value_type)]
except KeyError:
raise com.UnsupportedBackendType(param_type)
else:
return bq.ArrayQueryParameter(param.get_name(), bigquery_type, value)
@bigquery_param.register(
ir.TimestampScalar,
six.string_types + (datetime.datetime, datetime.date)
)
def bq_param_timestamp(param, value):
assert isinstance(param.type(), dt.Timestamp)
# TODO(phillipc): Not sure if this is the correct way to do this.
timestamp_value = pd.Timestamp(value, tz='UTC').to_pydatetime()
return bq.ScalarQueryParameter(
param.get_name(), 'TIMESTAMP', timestamp_value)
@bigquery_param.register(ir.StringScalar, six.string_types)
def bq_param_string(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'STRING', value)
@bigquery_param.register(ir.IntegerScalar, six.integer_types)
def bq_param_integer(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'INT64', value)
@bigquery_param.register(ir.FloatingScalar, float)
def bq_param_double(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'FLOAT64', value)
@bigquery_param.register(ir.BooleanScalar, bool)
def bq_param_boolean(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'BOOL', value)
@bigquery_param.register(ir.DateScalar, six.string_types)
def bq_param_date_string(param, value):
return bigquery_param(param, pd.Timestamp(value).to_pydatetime().date())
@bigquery_param.register(ir.DateScalar, datetime.datetime)
def bq_param_date_datetime(param, value):
return bigquery_param(param, value.date())
@bigquery_param.register(ir.DateScalar, datetime.date)
def bq_param_date(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'DATE', value)
class BigQueryClient(SQLClient):
sync_query = BigQueryQuery
database_class = BigQueryDatabase
proxy_class = BigQueryAPIProxy
dialect = comp.BigQueryDialect
def __init__(self, project_id, dataset_id):
self._proxy = type(self).proxy_class(project_id)
self._dataset_id = dataset_id
@property
def project_id(self):
return self._proxy.project_id
@property
def dataset_id(self):
return self._dataset_id
@property
def _table_expr_klass(self):
return ir.TableExpr
def table(self, *args, **kwargs):
t = super(BigQueryClient, self).table(*args, **kwargs)
if NATIVE_PARTITION_COL in t.columns:
col = ibis.options.bigquery.partition_col
assert col not in t
return (t
.mutate(**{col: t[NATIVE_PARTITION_COL]})
.drop([NATIVE_PARTITION_COL]))
return t
def _build_ast(self, expr, context):
result = comp.build_ast(expr, context)
return result
def _execute_query(self, dml, async=False):
klass = self.async_query if async else self.sync_query
inst = klass(self, dml, query_parameters=dml.context.params)
df = inst.execute()
return df
def _fully_qualified_name(self, name, database):
dataset_id = database or self.dataset_id
return dataset_id + '.' + name
def _get_table_schema(self, qualified_name):
return self.get_schema(qualified_name)
def _execute(self, stmt, results=True, query_parameters=None):
# TODO(phillipc): Allow **kwargs in calls to execute
query = self._proxy.client.run_sync_query(stmt)
query.use_legacy_sql = False
query.query_parameters = query_parameters or []
query.run()
# run_sync_query is not really synchronous: there's a timeout
while not query.job.done():
query.job.reload()
time.sleep(0.1)
return BigQueryCursor(query)
def database(self, name=None):
if name is None:
name = self.dataset_id
return self.database_class(name, self)
@property
def current_database(self):
return self.database(self.dataset_id)
def set_database(self, name):
self._dataset_id = name
def exists_database(self, name):
return self._proxy.get_dataset(name).exists()
def list_databases(self, like=None):
results = [dataset.name
for dataset in self._proxy.get_datasets()]
if like:
results = [
dataset_name for dataset_name in results
if re.match(like, dataset_name)
]
return results
def exists_table(self, name, database=None):
(table_id, dataset_id) = _ensure_split(name, database)
return self._proxy.get_table(table_id, dataset_id).exists()
def list_tables(self, like=None, database=None):
dataset = self._proxy.get_dataset(database or self.dataset_id)
result = [table.name for table in dataset.list_tables()]
if like:
result = [
table_name for table_name in result
if re.match(like, table_name)
]
return result
def get_schema(self, name, database=None):
(table_id, dataset_id) = _ensure_split(name, database)
bq_table = self._proxy.get_table(table_id, dataset_id)
return sch.infer(bq_table)
@property
def version(self):
return parse_version(bq.__version__)
_DTYPE_TO_IBIS_TYPE = {
'INT64': dt.int64,
'FLOAT64': dt.double,
'BOOL': dt.boolean,
'STRING': dt.string,
'DATE': dt.date,
# FIXME: enforce no tz info
'DATETIME': dt.timestamp,
'TIME': dt.time,
'TIMESTAMP': dt.timestamp,
'BYTES': dt.binary,
}
_LEGACY_TO_STANDARD = {
'INTEGER': 'INT64',
'FLOAT': 'FLOAT64',
'BOOLEAN': 'BOOL',
}
def _discover_type(field):
typ = field.field_type
if typ == 'RECORD':
fields = field.fields
assert fields
names = [el.name for el in fields]
ibis_types = [_discover_type(el) for el in fields]
ibis_type = dt.Struct(names, ibis_types)
else:
ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)
ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)
if field.mode == 'REPEATED':
ibis_type = dt.Array(ibis_type)
return ibis_type
def bigquery_table_to_ibis_schema(table):
pairs = [(el.name, _discover_type(el)) for el in table.schema]
try:
if table.list_partitions():
pairs.append((NATIVE_PARTITION_COL, dt.timestamp))
except BadRequest:
pass
return ibis.schema(pairs)
| 28.020134 | 79 | 0.660918 | 6,190 | 0.494212 | 0 | 0 | 3,641 | 0.290699 | 0 | 0 | 1,319 | 0.105309 |
295d6dddae668ee8a211bf176e96dec0fc246700 | 1,583 | py | Python | 5 kyu/Family Tree Ancestors.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
]
| 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | 5 kyu/Family Tree Ancestors.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
]
| 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | 5 kyu/Family Tree Ancestors.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
]
| null | null | null | from math import log, ceil
def chart(person):
res=helper(tuple(sorted(person.parents(), key=lambda x: x.sex, reverse=True)), 2, [], 16, 16)
res.append((person.name, 16))
dict={j:i for i,j in res}
dict2=helper2(16)
chart=[]
for i in range(1, 32):
temp=((4-depth(i))*11-1)*" "+("|" if 4-depth(i)!=0 else "")
name=dict.get(i, "_______")
number=str(dict2[i]).rjust(2, "0")
temp+=f"{number} {name}"
chart.append(list(temp))
for index,i in enumerate(chart):
digits=int("".join(j for j in "".join(i) if j.isdigit()))
num=5-ceil(log(digits+1, 2))
if digits==1 or num==0:
continue
for k in range(1, 2**num):
chart[index+(-k if digits%2 else k)][44-num*11-1]="|"
chart[16][32]=" "
chart[14][32]=" "
return "\n".join("".join(i) for i in chart)+"\n"
def helper(person, index, arr, row, rate):
if person==None or index>31:
return
rate//=2
for i,j in enumerate(person):
if (index+i)<=31:
arr.append((j.name, row-rate if j.sex=="M" else row+rate))
helper(tuple(sorted(j.parents(), key=lambda x: x.sex, reverse=True)), (index+i)*2, arr, row-rate if j.sex=="M" else row+rate, rate)
return arr
def depth(num):
total=0
while num%2==0:
num//=2
total+=1
return total
def helper2(num):
start=0
dict={}
while num>0:
increment=2**(start+1)
for i in range(num):
dict[2**start+i*increment]=num+i
start+=1
num//=2
return dict | 32.979167 | 139 | 0.53885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.042325 |
295da24723071b30363f5dee9937e755f296d5c6 | 690 | py | Python | tests/make_expected_lookup.py | bfis/coffea | e5e67d410e86faee1172fcc864774d7024d97653 | [
"BSD-3-Clause"
]
| 77 | 2019-06-09T14:23:33.000Z | 2022-03-22T21:34:01.000Z | tests/make_expected_lookup.py | bfis/coffea | e5e67d410e86faee1172fcc864774d7024d97653 | [
"BSD-3-Clause"
]
| 353 | 2019-06-05T23:54:39.000Z | 2022-03-31T21:21:47.000Z | tests/make_expected_lookup.py | bfis/coffea | e5e67d410e86faee1172fcc864774d7024d97653 | [
"BSD-3-Clause"
]
| 71 | 2019-06-07T02:04:11.000Z | 2022-03-05T21:03:45.000Z | import numpy as np
import ROOT
from dummy_distributions import dummy_pt_eta
counts, test_in1, test_in2 = dummy_pt_eta()
f = ROOT.TFile.Open("samples/testSF2d.root")
sf = f.Get("scalefactors_Tight_Electron")
xmin, xmax = sf.GetXaxis().GetXmin(), sf.GetXaxis().GetXmax()
ymin, ymax = sf.GetYaxis().GetXmin(), sf.GetYaxis().GetXmax()
test_out = np.empty_like(test_in1)
for i, (eta, pt) in enumerate(zip(test_in1, test_in2)):
if xmax <= eta:
eta = xmax - 1.0e-5
elif eta < xmin:
eta = xmin
if ymax <= pt:
pt = ymax - 1.0e-5
elif pt < ymin:
pt = ymin
ib = sf.FindBin(eta, pt)
test_out[i] = sf.GetBinContent(ib)
print(repr(test_out))
| 24.642857 | 61 | 0.649275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.075362 |
295e24a9ef2f154bf2eab43ba3f883adfaf8378d | 5,755 | py | Python | engine/sentiment_analysis.py | zgeorg03/nesase | 4dae70994cd0c730a88b4a54e6b8e29868aafb09 | [
"BSD-3-Clause"
]
| 2 | 2020-12-30T18:03:01.000Z | 2021-08-08T21:05:43.000Z | engine/sentiment_analysis.py | zgeorg03/nesase | 4dae70994cd0c730a88b4a54e6b8e29868aafb09 | [
"BSD-3-Clause"
]
| null | null | null | engine/sentiment_analysis.py | zgeorg03/nesase | 4dae70994cd0c730a88b4a54e6b8e29868aafb09 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 17:42:27 2018
@author: zgeorg03
"""
import re
import json # Used for converting json to dictionary
import datetime # Used for date conversions
import matplotlib.pyplot as plt
import numpy as np
from sentiment import Sentiment
import json
class NewsArticle:
def __init__(self,hash,title,author,url,content,date,topics, feed):
self.hash = hash
self.title = title
self.author = author
self.url = url
self.content = content
self.date = datetime.datetime.fromtimestamp(date/1000.0)
self.topics = topics
self.feed = feed
self.sep = re.compile("[.!?]")
def __repr__(self):
return "hash={},title={},author={},date={},topics={}".format(
self.hash, self.title, self.author,
self.date, self.topics, self.feed)
def __str__(self):
return self.__repr__()
def produce_title_scores(self, sentiment):
lines = self.sep.split(self.title)
sentiment.score(lines)
neg,neu,pos,com,count = sentiment.get_avg_scores()
return (float("{0:.2f}".format(neg*100)), float("{0:.2f}".format(neu*100))
, float("{0:.2f}".format(pos*100)), float("{0:.2f}".format(com*100)),count
)
def produce_content_scores(self, sentiment):
lines = self.sep.split(self.content)
sentiment.score(lines)
neg,neu,pos,com,count = sentiment.get_avg_scores()
return (float("{0:.2f}".format(neg*100)), float("{0:.2f}".format(neu*100))
, float("{0:.2f}".format(pos*100)), float("{0:.2f}".format(com*100)),count
)
class Parser:
def __init__(self,file_in,max_articles=None,file_out=None):
self.file_name = file_name
self.max_articles = max_articles
self.articles = []
self.sentiment = Sentiment()
self.results = []
self.file_out = file_out
def parse(self):
count = 0
with open(self.file_name,"r",encoding="UTF-8") as file:
for line in file:
if line.startswith(','):
continue
self.articles.append(self.parse_news_article(line))
count += 1
if self.max_articles:
if count >= self.max_articles:
break
def write(self):
for i,article in enumerate(self.articles):
if i % 100 == 0:
print('Finished: {} docs'.format(i))
self.write_article(article)
if self.file_out:
with open(self.file_out, 'w') as outfile:
json.dump(self.results, outfile,sort_keys=True,indent=4)
else:
print(json.dumps(self.results,sort_keys=True,indent=4))
def write_article(self,article):
res = {}
res['neg_title'],res['neu_title'],res['pos_title'],res['score_title'], _ = article.produce_title_scores(self.sentiment)
res['neg_content'],res['neu_content'],res['pos_content'],res['score_content'], _ = article.produce_content_scores(self.sentiment)
res['id'] = article.hash
res['title'] = article.title
res['date'] = int(article.date.timestamp())
res['content'] = article.content
res['topics'] = article.topics
res['feed'] = article.feed
res['url'] = article.url
res['author'] = article.author
res['overall_score']= float(res['score_title'])*0.75 + float(res['score_content'])*0.25
overall_score = res['overall_score']
if overall_score <= -50:
res['class']= 'Very Negative'
res['class_code'] = 4
elif overall_score <= 0:
res['class']= 'Negative'
res['class_code'] = 3
elif overall_score <= 50:
res['class']= 'Positive'
res['class_code'] = 2
elif overall_score <= 100:
res['class']= 'Very Positive'
res['class_code'] = 1
self.results.append(res)
def parse_news_article(self, line):
data = json.loads(line)
hash = data['hash']
title = data['title']
author = data['author']
content = data['content']
date = data['date']
topics = list(set(data['topics']))
feed = data['feed']
url = data['link']
return NewsArticle(hash,title,author,url,content,date,topics,feed)
if __name__ == '__main__':
file_name = "./log"
#max_articles = 1000
p = Parser(file_name,file_out='data-26-04.json')
p.parse()
p.write()
print('Finished')
def test():
plt.figure(figsize=(12,9))
plt.title('Articles: {}'.format(max_articles))
plt.plot(x[:,0],'x',label="Negative {0:.2f}".format(np.average(x[:,0])))
plt.plot(x[:,2],'+',label="Positive {0:.2f}".format(np.average(x[:,2])))
plt.plot(x[:,1],'.',label="Neutral {0:.2f}".format(np.average(x[:,1])))
plt.plot(x[:,3],'.',label="Compound {0:.2f}".format(np.average(x[:,3])))
plt.legend()
x = []
for i in range(0,max_articles):
x.append(articles[i].produce_content_scores(sentiment))
x = np.array(x)
print(x[:,0])
plt.figure(figsize=(12,9))
plt.title('Articles: {}'.format(max_articles))
plt.plot(x[:,0],'x',label="Negative {0:.2f}".format(np.average(x[:,0])))
plt.plot(x[:,2],'+',label="Positive {0:.2f}".format(np.average(x[:,2])))
plt.plot(x[:,1],'.',label="Neutral {0:.2f}".format(np.average(x[:,1])))
plt.plot(x[:,3],'.',label="Compound {0:.2f}".format(np.average(x[:,3])))
plt.legend()
| 31.277174 | 137 | 0.559687 | 4,216 | 0.73258 | 0 | 0 | 0 | 0 | 0 | 0 | 985 | 0.171156 |
295e89c3127cdc64f86ba1f4504dbc0c0e95c2df | 1,214 | py | Python | ch05/recursion.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
]
| 74 | 2021-05-06T22:03:18.000Z | 2022-03-25T04:37:51.000Z | ch05/recursion.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
]
| null | null | null | ch05/recursion.py | laszlokiraly/LearningAlgorithms | 032a3cc409546619cf41220821d081cde54bbcce | [
"MIT"
]
| 19 | 2021-07-16T11:42:00.000Z | 2022-03-22T00:25:49.000Z | """Recursive implementations."""
def find_max(A):
"""invoke recursive function to find maximum value in A."""
def rmax(lo, hi):
"""Use recursion to find maximum value in A[lo:hi+1]."""
if lo == hi: return A[lo]
mid = (lo+hi) // 2
L = rmax(lo, mid)
R = rmax(mid+1, hi)
return max(L, R)
return rmax(0, len(A)-1)
def find_max_with_count(A):
"""Count number of comparisons."""
def frmax(lo, hi):
"""Use recursion to find maximum value in A[lo:hi+1] incl. count"""
if lo == hi: return (0, A[lo])
mid = (lo+hi)//2
ctleft,left = frmax(lo, mid)
ctright,right = frmax(mid+1, hi)
return (1+ctleft+ctright, max(left, right))
return frmax(0, len(A)-1)
def count(A,target):
"""invoke recursive function to return number of times target appears in A."""
def rcount(lo, hi, target):
"""Use recursion to find maximum value in A[lo:hi+1]."""
if lo == hi:
return 1 if A[lo] == target else 0
mid = (lo+hi)//2
left = rcount(lo, mid, target)
right = rcount(mid+1, hi, target)
return left + right
return rcount(0, len(A)-1, target)
| 26.977778 | 82 | 0.555189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.314662 |
295eeef6c40b7545564ffef7ae9d385146c2bde6 | 3,084 | py | Python | setup.py | koonimaru/DeepGMAP | 7daac354229fc25fba81649b741921345dc5db05 | [
"Apache-2.0"
]
| 11 | 2018-06-27T11:45:47.000Z | 2021-07-01T15:32:56.000Z | setup.py | koonimaru/DeepGMAP | 7daac354229fc25fba81649b741921345dc5db05 | [
"Apache-2.0"
]
| 3 | 2020-01-28T21:45:15.000Z | 2020-04-20T02:40:48.000Z | setup.py | koonimaru/DeepGMAP | 7daac354229fc25fba81649b741921345dc5db05 | [
"Apache-2.0"
]
| 1 | 2018-10-19T19:43:27.000Z | 2018-10-19T19:43:27.000Z | #from distutils.core import setup
from setuptools import setup, find_packages
from distutils.extension import Extension
import re
import os
import codecs
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M,
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
ext_modules = [ ]
if use_cython:
ext_modules += [
Extension("deepgmap.data_preprocessing_tools.seq_to_binary2", [ "deepgmap/data_preprocessing_tools/seq_to_binary2.pyx" ]),
#Extension("data_preprocessing_tools.queue", [ "deepgmap/data_preprocessing_tools/queue.pyx" ],libraries=["calg"]),
Extension("deepgmap.post_train_tools.cython_util", [ "deepgmap/post_train_tools/cython_util.pyx" ]),
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules += [
Extension("deepgmap.data_preprocessing_tools.seq_to_binary2", [ "deepgmap/data_preprocessing_tools/seq_to_binary2.c" ]),
Extension("deepgmap.post_train_tools.cython_util", [ "deepgmap/post_train_tools/cython_util.c" ]),
]
#print(find_version("deepgmap", "__init__.py"))
setup(
name='DeepGMAP',
#version=VERSION,
version=find_version("deepgmap", "__init__.py"),
description='Learning and predicting gene regulatory sequences in genomes',
author='Koh Onimaru',
author_email='[email protected]',
url='',
packages=['deepgmap','deepgmap.train','deepgmap.network_constructors','deepgmap.post_train_tools','deepgmap.data_preprocessing_tools','deepgmap.misc'],
#packages=find_packages('deepgmap'),
#packages=['deepgmap.'],
package_dir={'DeepGMAP':'deepgmap'},
#package_data = {
# '': ['enhancer_prediction/*', '*.pyx', '*.pxd', '*.c', '*.h'],
#},
scripts=['bin/deepgmap',
],
#packages=find_packages(),
cmdclass = cmdclass,
ext_modules=ext_modules,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License ',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
install_requires=['tensorflow>=1.15', 'numpy', 'matplotlib', 'sklearn', 'tornado', 'natsort', 'psutil', 'pyBigWig'],
long_description=open('README.rst').read(),
)
| 34.651685 | 155 | 0.664073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,693 | 0.548962 |
295f637700f993cfd8e37b0ff39f106d2c2a6469 | 1,716 | py | Python | {{cookiecutter.project_slug}}/api/__init__.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
]
| null | null | null | {{cookiecutter.project_slug}}/api/__init__.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
]
| null | null | null | {{cookiecutter.project_slug}}/api/__init__.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
]
| null | null | null |
import logging
from flask import Flask
from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy
from flask_migrate import Migrate
from flask_cors import CORS
from flask_talisman import Talisman
from flask_ipban import IpBan
from config import Config, get_logger_handler
# database
class SQLAlchemy(_BaseSQLAlchemy):
def apply_pool_defaults(self, app, options):
super(SQLAlchemy, self).apply_pool_defaults(app, options)
options["pool_pre_ping"] = True
db = SQLAlchemy()
migrate = Migrate()
cors = CORS()
talisman = Talisman()
global_config = Config()
ip_ban = IpBan(ban_seconds=200, ban_count=global_config.IP_BAN_LIST_COUNT)
# logging
logger = logging.getLogger('frontend')
def create_app(config_class=None):
app = Flask(__name__)
if config_class is None:
config_class = Config()
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
# TODO - Refine and update when build pipeline is stable. Get from global_config
cors.init_app(app, origins=["http://localhost:5000", "http://localhost:3000", '*'])
if app.config["ENV"] in ("staging", "production"):
# Secure the application and implement best practice https redirects and a content security policy
talisman.init_app(app, content_security_policy=None)
# ip_ban.init_app(app)
# ip_ban.load_nuisances(global_config.IP_BAN_REGEX_FILE)
from api.routes import bp as api_bp
app.register_blueprint(api_bp)
if not app.debug and not app.testing:
app.logger.addHandler(get_logger_handler())
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
return app
from api import models
| 32.377358 | 106 | 0.740093 | 189 | 0.11014 | 0 | 0 | 94 | 0.054779 | 0 | 0 | 375 | 0.218531 |
295f7531aae2696a47947cc69a933b6673909fb5 | 4,937 | py | Python | weibospider/pipelines.py | czyczyyzc/WeiboSpider | 41b9c97cb01d41cb4a62efdd452451b5ef25bdbc | [
"MIT"
]
| 2 | 2021-03-26T03:02:52.000Z | 2021-04-01T11:08:46.000Z | weibospider/pipelines.py | czyczyyzc/WeiboSpider | 41b9c97cb01d41cb4a62efdd452451b5ef25bdbc | [
"MIT"
]
| null | null | null | weibospider/pipelines.py | czyczyyzc/WeiboSpider | 41b9c97cb01d41cb4a62efdd452451b5ef25bdbc | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import os
import csv
import pymongo
from pymongo.errors import DuplicateKeyError
from settings import MONGO_HOST, MONGO_PORT, SAVE_ROOT
class MongoDBPipeline(object):
def __init__(self):
client = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)
db = client['weibo']
self.Users = db["Users"]
self.Tweets = db["Tweets"]
self.Comments = db["Comments"]
self.Relationships = db["Relationships"]
self.Reposts = db["Reposts"]
def process_item(self, item, spider):
if spider.name == 'comment_spider':
self.insert_item(self.Comments, item)
elif spider.name == 'fan_spider':
self.insert_item(self.Relationships, item)
elif spider.name == 'follower_spider':
self.insert_item(self.Relationships, item)
elif spider.name == 'user_spider':
self.insert_item(self.Users, item)
elif spider.name == 'tweet_spider':
self.insert_item(self.Tweets, item)
elif spider.name == 'repost_spider':
self.insert_item(self.Reposts, item)
return item
@staticmethod
def insert_item(collection, item):
try:
collection.insert(dict(item))
except DuplicateKeyError:
pass
class CSVPipeline(object):
def __init__(self):
if not os.path.exists(SAVE_ROOT):
os.makedirs(SAVE_ROOT)
users_file = open(os.path.join(SAVE_ROOT, 'users.csv'), 'w', encoding='utf-8-sig', newline='')
tweets_file = open(os.path.join(SAVE_ROOT, 'tweets.csv'), 'w', encoding='utf-8-sig', newline='')
comments_file = open(os.path.join(SAVE_ROOT, 'comments.csv'), 'w', encoding='utf-8-sig', newline='')
relationships_file = open(os.path.join(SAVE_ROOT, 'relationships.csv'), 'w', encoding='utf-8-sig', newline='')
reposts_file = open(os.path.join(SAVE_ROOT, 'reposts.csv'), 'w', encoding='utf-8-sig', newline='')
self.users_writer = csv.writer(users_file, dialect='excel')
self.tweets_writer = csv.writer(tweets_file, dialect='excel')
self.comments_writer = csv.writer(comments_file, dialect='excel')
self.relationships_writer = csv.writer(relationships_file, dialect='excel')
self.reposts_writer = csv.writer(reposts_file, dialect='excel')
self.users_head = False
self.tweets_head = False
self.comments_head = False
self.relationships_head = False
self.reposts_head = False
self.users_ids = []
self.tweets_ids = []
self.comments_ids = []
self.relationships_ids = []
self.reposts_ids = []
def process_item(self, item, spider):
item = dict(item)
if spider.name == 'comment_spider':
if not self.comments_head:
self.comments_writer.writerow(list(item.keys()))
self.comments_head = True
# if item['_id'] not in self.comments_ids:
self.comments_writer.writerow(list(item.values()))
self.comments_ids.append(item['_id'])
elif spider.name == 'fan_spider':
if not self.relationships_head:
self.relationships_writer.writerow(list(item.keys()))
self.relationships_head = True
# if item['_id'] not in self.relationships_ids:
self.relationships_writer.writerow(list(item.values()))
self.relationships_ids.append(item['_id'])
elif spider.name == 'follower_spider':
if not self.relationships_head:
self.relationships_writer.writerow(list(item.keys()))
self.relationships_head = True
# if item['_id'] not in self.relationships_ids:
self.relationships_writer.writerow(list(item.values()))
self.relationships_ids.append(item['_id'])
elif spider.name == 'user_spider':
if not self.users_head:
self.users_writer.writerow(list(item.keys()))
self.users_head = True
# if item['_id'] not in self.users_ids:
self.users_writer.writerow(list(item.values()))
self.users_ids.append(item['_id'])
elif spider.name == 'tweet_spider':
if not self.tweets_head:
self.tweets_writer.writerow(list(item.keys()))
self.tweets_head = True
# if item['_id'] not in self.tweets_ids:
self.tweets_writer.writerow(list(item.values()))
self.tweets_ids.append(item['_id'])
elif spider.name == 'repost_spider':
if not self.reposts_head:
self.reposts_writer.writerow(list(item.keys()))
self.reposts_head = True
# if item['_id'] not in self.reposts_ids:
self.reposts_writer.writerow(list(item.values()))
self.reposts_ids.append(item['_id'])
return item
| 40.467213 | 118 | 0.611302 | 4,771 | 0.966376 | 0 | 0 | 158 | 0.032003 | 0 | 0 | 723 | 0.146445 |
295f767e353179afb030c3f6f2c390f8073634e9 | 6,020 | py | Python | tests/test_gc3_config.py | ericmharris/gc3-query | 0bf5226130aafbb1974aeb96d93ee1996833e87d | [
"MIT"
]
| null | null | null | tests/test_gc3_config.py | ericmharris/gc3-query | 0bf5226130aafbb1974aeb96d93ee1996833e87d | [
"MIT"
]
| null | null | null | tests/test_gc3_config.py | ericmharris/gc3-query | 0bf5226130aafbb1974aeb96d93ee1996833e87d | [
"MIT"
]
| null | null | null | from pathlib import Path
from requests.auth import _basic_auth_str
import pytest
from bravado_core.formatter import SwaggerFormat, NO_OP
from gc3_query.lib.gc3_config import GC3Config, IDMCredential
TEST_BASE_DIR: Path = Path(__file__).parent.joinpath("GC3Config")
config_dir = TEST_BASE_DIR.joinpath("config")
def test_setup():
assert TEST_BASE_DIR.exists()
assert config_dir.exists()
def test_init():
gc3_config = GC3Config()
assert 'gc30003' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '[email protected]'
def test_set_credential():
gc3_config = GC3Config()
assert 'gc3test' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '[email protected]'
credential = gc3_config.set_credential(idm_domain_name='gc3test', password='Welcome123' )
assert credential
assert credential.password == 'Welcome123'
assert credential.idm_domain_name == 'gc3test'
def test_set_gc3pilot_credential():
gc3_config = GC3Config()
assert 'gc3pilot' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '[email protected]'
credential = gc3_config.set_credential(idm_domain_name='gc3pilot', password='V@nadium123!' )
assert credential
assert credential.password == 'V@nadium123!'
assert credential.idm_domain_name == 'gc3pilot'
@pytest.fixture()
def get_credential_setup() -> IDMCredential:
gc3_config = GC3Config()
assert 'gc3test' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '[email protected]'
credential = gc3_config.set_credential(idm_domain_name='gc3test', password='123Welcome' )
yield (credential)
def test_load_atoml_files_individually(get_credential_setup):
credential = get_credential_setup
gc3_config = GC3Config()
assert 'gc3test' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '[email protected]'
check_credential = gc3_config.get_credential(idm_domain_name='gc3test')
assert check_credential==credential
def test_credential_basic_auth(get_credential_setup):
credential = get_credential_setup
credential_expected_basic_auth =_basic_auth_str('[email protected]', '123Welcome')
gc3_config = GC3Config()
check_credential = gc3_config.get_credential(idm_domain_name='gc30003')
assert gc3_config.user.cloud_username == '[email protected]'
assert check_credential.idm_domain_name=='gc30003'
assert check_credential.basic_auth_str.startswith('Basic')
assert check_credential.basic_auth_str != credential.basic_auth_str
def test_get_main_credential():
gc3_config = GC3Config()
check_credential = gc3_config.get_credential(idm_domain_name='gc30003')
assert gc3_config.user.cloud_username == '[email protected]'
assert check_credential.idm_domain_name=='gc30003'
# @pytest.fixture()
# def get_bravado_config_setup():
# gc3_config = GC3Config()
# assert 'iaas_classic' in gc3_config
# yield (gc3_config)
#
# def test_bravado_client_config(get_bravado_config_setup):
# gc3_config = get_bravado_config_setup
# assert 'iaas_classic' in gc3_config
# bravado_client_config = gc3_config.bravado_client_config
# assert bravado_client_config
# assert 'formats' not in bravado_client_config
# assert not 'include_missing_properties' in bravado_client_config
# assert 'also_return_response' in bravado_client_config
# bravado_client_config_2 = gc3_config.bravado_client_config
# assert bravado_client_config==bravado_client_config_2
# assert bravado_client_config is not bravado_client_config_2
# assert isinstance(bravado_client_config, dict)
#
# def test_bravado_core_config(get_bravado_config_setup):
# gc3_config = get_bravado_config_setup
# assert 'iaas_classic' in gc3_config
# bravado_core_config = gc3_config.bravado_core_config
# assert bravado_core_config
# assert 'formats' in bravado_core_config
# assert 'include_missing_properties' in bravado_core_config
# assert not 'also_return_response' in bravado_core_config
# bravado_core_config_2 = gc3_config.bravado_core_config
# assert bravado_core_config==bravado_core_config_2
# assert bravado_core_config is not bravado_core_config_2
# assert isinstance(bravado_core_config, dict)
# assert isinstance(bravado_core_config['formats'], list)
#
#
#
# def test_bravado_config(get_bravado_config_setup):
# gc3_config = get_bravado_config_setup
# assert 'iaas_classic' in gc3_config
# bravado_config = gc3_config.bravado_config
# assert bravado_config
# assert 'formats' in bravado_config
# assert 'include_missing_properties' in bravado_config
# assert 'also_return_response' in bravado_config
# bravado_config_2 = gc3_config.bravado_config
# assert bravado_config==bravado_config_2
# assert bravado_config is not bravado_config_2
# assert isinstance(bravado_config, dict)
# assert isinstance(bravado_config['formats'], list)
#
@pytest.fixture()
def get_constants_setup():
gc3_config = GC3Config()
assert 'iaas_classic' in gc3_config
yield (gc3_config)
def test_open_api_catalog_dir(get_constants_setup):
gc3_config = get_constants_setup
open_api_catalog_dir = gc3_config.OPEN_API_CATALOG_DIR
assert open_api_catalog_dir
# def test_BRAVADO_CONFIG(get_constants_setup):
# gc3_config = get_constants_setup
# bravado_config = gc3_config.BRAVADO_CONFIG
# assert bravado_config
# assert 'formats' in bravado_config
# assert 'include_missing_properties' in bravado_config
# assert 'also_return_response' in bravado_config
# assert isinstance(bravado_config, dict)
# assert isinstance(bravado_config['formats'], list)
# assert bravado_config['formats']
# formats = [f.format for f in bravado_config['formats']]
# assert 'json-bool' in formats
# assert all([isinstance(i , SwaggerFormat) for i in bravado_config['formats']])
| 39.605263 | 96 | 0.767608 | 0 | 0 | 431 | 0.071595 | 467 | 0.077575 | 0 | 0 | 3,259 | 0.541362 |
2960cfa3589dae062b2a5ee5a75ad678bb175e9d | 2,871 | py | Python | lab6/server/datapredict.py | zhiji95/iot | 4202f00a79b429d5f5083bca6e914fcff09df294 | [
"Apache-2.0"
]
| 2 | 2019-09-20T01:38:40.000Z | 2020-10-13T21:18:18.000Z | lab6/server/datapredict.py | zw2497/4764 | 28caec1947c1b1479d2ec9c8ecba8cd599d66d23 | [
"Apache-2.0"
]
| null | null | null | lab6/server/datapredict.py | zw2497/4764 | 28caec1947c1b1479d2ec9c8ecba8cd599d66d23 | [
"Apache-2.0"
]
| null | null | null | import machine
from machine import *
import ssd1306
import time
import socket
import urequests as requests
import json
word = {'body':8}
labels = ['c', 'o', 'l', 'u', 'm', 'b', 'i', 'a','null']
HOST = '18.218.158.249'
PORT = 8080
flag = 0
stop = False
data = {}
xdata = []
ydata = []
n = 0
def dp(d):
if (d > 128):
return d - 255
return d
def do_connect():
import network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(b'Columbia University')
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
do_connect()
def http_post(url, d):
r = requests.post(url, data=json.dumps(d))
return r.json()
def sendData():
global label
global xdata
global ydata
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
l = {
"label": 'a',
"n": 0,
"number": len(xdata),
"content": {
"data": {
"x": xdata,
"y": ydata
}
}
}
l = json.dumps(l).encode()
s.sendall(l)
data = s.recv(1024)
data = json.loads(data.decode())
xdata, ydata = [], []
return data
def switchAcallback(p):
global flag
time.sleep(0.1)
if p.value() == 1:
flag = 1
def switchCcallback(p):
global stop
if p.value() == 1:
stop = True
switchA = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
switchA.irq(trigger=machine.Pin.IRQ_RISING, handler=switchAcallback)
switchC = machine.Pin(2, machine.Pin.IN, machine.Pin.PULL_UP)
switchC.irq(trigger=machine.Pin.IRQ_RISING, handler=switchCcallback)
spi = machine.SPI(1, baudrate=2000000, polarity=1, phase=1)
cs = machine.Pin(15, machine.Pin.OUT)
cs.value(0)
spi.write(b'\x2d')
spi.write(b'\x2b')
cs.value(1)
cs.value(0)
spi.write(b'\x31')
spi.write(b'\x0f')
cs.value(1)
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4))
oled = ssd1306.SSD1306_I2C(128, 32, i2c)
while True:
x = 0
y = 0
sendstatus = "null"
if (flag):
cs.value(0)
test1 = spi.read(5, 0xf2)
cs.value(1)
cs.value(0)
test2 = spi.read(5, 0xf3)
cs.value(1)
cs.value(0)
test3 = spi.read(5, 0xf4)
cs.value(1)
cs.value(0)
test4 = spi.read(5, 0xf5)
cs.value(1)
x = dp(test2[1])
y = dp(test4[1])
xdata.append(x)
ydata.append(y)
sendstatus = "collect" + str(len(xdata)) + ' '+ ' ' + str(x) + ' ' + str(y)
if send:
word = sendData()
sendstatus = "send success"
flag = 0
send = False
oled.fill(0)
oled.text(labels[word['body']], 0, 0)
oled.text(sendstatus, 0,10)
oled.show()
| 20.804348 | 83 | 0.554859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.08046 |
2960f549fc004cf3590c25e915c7395ebd3b5e4d | 79 | py | Python | Geometry/VeryForwardGeometry/python/dd4hep/geometryRPFromDD_2021_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
]
| 2 | 2020-10-26T18:40:32.000Z | 2021-04-10T16:33:25.000Z | Geometry/VeryForwardGeometry/python/dd4hep/geometryRPFromDD_2021_cfi.py | gartung/cmssw | 3072dde3ce94dcd1791d778988198a44cde02162 | [
"Apache-2.0"
]
| 25 | 2016-06-24T20:55:32.000Z | 2022-02-01T19:24:45.000Z | Geometry/VeryForwardGeometry/python/dd4hep/geometryRPFromDD_2021_cfi.py | gartung/cmssw | 3072dde3ce94dcd1791d778988198a44cde02162 | [
"Apache-2.0"
]
| 8 | 2016-03-25T07:17:43.000Z | 2021-07-08T17:11:21.000Z | from Geometry.VeryForwardGeometry.dd4hep.v5.geometryRPFromDD_2021_cfi import *
| 39.5 | 78 | 0.886076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2962e10ff2cdb13a6dd7a8ef80474fffa61365b3 | 1,464 | py | Python | examples/plots/warmup_schedule.py | shuoyangd/pytorch_warmup | b3557afa6fcfc04e9ddc6ff08a1ae51e8a0ce5df | [
"MIT"
]
| 170 | 2019-11-03T06:14:42.000Z | 2022-03-18T08:21:44.000Z | examples/plots/warmup_schedule.py | shuoyangd/pytorch_warmup | b3557afa6fcfc04e9ddc6ff08a1ae51e8a0ce5df | [
"MIT"
]
| 5 | 2020-05-18T16:53:33.000Z | 2021-11-12T13:03:14.000Z | examples/plots/warmup_schedule.py | shuoyangd/pytorch_warmup | b3557afa6fcfc04e9ddc6ff08a1ae51e8a0ce5df | [
"MIT"
]
| 21 | 2019-11-06T10:55:21.000Z | 2022-02-23T21:38:12.000Z | import argparse
import matplotlib.pyplot as plt
import torch
from pytorch_warmup import *
def get_rates(warmup_cls, beta2, max_step):
rates = []
p = torch.nn.Parameter(torch.arange(10, dtype=torch.float32))
optimizer = torch.optim.Adam([{'params': p}], lr=1.0, betas=(0.9, beta2))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: 1.0)
warmup_scheduler = warmup_cls(optimizer)
for step in range(1, max_step+1):
rates.append(optimizer.param_groups[0]['lr'])
optimizer.zero_grad()
optimizer.step()
lr_scheduler.step()
warmup_scheduler.dampen()
return rates
parser = argparse.ArgumentParser(description='Warmup schedule')
parser.add_argument('--output', type=str, default='none',
choices=['none', 'png', 'pdf'],
help='Output file type (default: none)')
args = parser.parse_args()
beta2 = 0.999
max_step = 3000
plt.plot(range(1, max_step+1), get_rates(RAdamWarmup, beta2, max_step), label='RAdam')
plt.plot(range(1, max_step+1), get_rates(UntunedExponentialWarmup, beta2, max_step), label='Untuned Exponential')
plt.plot(range(1, max_step+1), get_rates(UntunedLinearWarmup, beta2, max_step), label='Untuned Linear')
plt.legend()
plt.title('Warmup Schedule')
plt.xlabel('Iteration')
plt.ylabel(r'Warmup factor $(\omega_t)$')
if args.output == 'none':
plt.show()
else:
plt.savefig(f'warmup_schedule.{args.output}')
| 34.857143 | 113 | 0.693306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.159836 |
2965377859485f3e331393d42e82329e9f5b3052 | 2,107 | py | Python | plugins/httpev.py | wohali/gizzy | c9d4ee9cdcf6fdbf260869365b944f29c660e6aa | [
"Apache-2.0"
]
| 3 | 2015-09-11T23:34:36.000Z | 2018-04-05T21:17:08.000Z | plugins/httpev.py | wohali/gizzy | c9d4ee9cdcf6fdbf260869365b944f29c660e6aa | [
"Apache-2.0"
]
| null | null | null | plugins/httpev.py | wohali/gizzy | c9d4ee9cdcf6fdbf260869365b944f29c660e6aa | [
"Apache-2.0"
]
| null | null | null | """\
This plugin merely enables other plugins to accept data over HTTP. If
a plugin defines a module level function named "httpev" it will be
invoked for POST requests to the url http://$hostname/event/$pluginname.
The function is invoked from the thread in the web.py request context
and as such has access to the full web.py API.
"""
import base64
import json
import web
web.config.debug = False
class Event(object):
def POST(self, plugin):
self.check_authorized()
func = self.find_handler(plugin)
try:
func()
except web.webapi.HTTPError:
raise
except:
log.exception("Plugin '%s' broke handling HTTP event" % plugin)
raise web.webapi.internalerror()
def check_authorized(self):
auth = web.ctx.env.get('HTTP_AUTHORIZATION')
if auth is None:
raise web.webapi.unauthorized()
if not auth.startswith("Basic "):
raise web.webapi.unauthorized()
try:
auth = auth.split(None, 1)[1]
raw = base64.decodestring(auth)
if tuple(raw.split(":", 1)) == config.httpev["auth"]:
return
except:
raise web.webapi.badrequest("Invalid Authorization header")
raise web.webapi.unauthorized()
def find_handler(self, name):
for p in plugin_manager.plugins:
if p.name == name:
func = p.data.get("httpev")
if callable(func):
return func
raise web.webapi.notfound()
class Server(threading.Thread):
def __init__(self):
super(Server, self).__init__()
self.setDaemon(True)
self.urls = ("/event/(.+)", "Event")
self.app = web.application(self.urls, {"Event": Event})
self.addr = ('0.0.0.0', config.httpev["port"])
self.srv = web.httpserver.WSGIServer(self.addr, self.app.wsgifunc())
def stop(self):
self.srv.stop()
def run(self):
self.srv.start()
def load():
s = Server()
s.start()
return s
def unload(s):
s.stop()
| 26.670886 | 76 | 0.591362 | 1,610 | 0.76412 | 0 | 0 | 0 | 0 | 0 | 0 | 491 | 0.233033 |
29656dc8827f4e4fcb777d91bc04e2895b6de0ad | 773 | py | Python | ex056.py | danilodelucio/Exercicios_Curso_em_Video | d59e1b4efaf27dd0fc828a608201613c69ac333d | [
"MIT"
]
| null | null | null | ex056.py | danilodelucio/Exercicios_Curso_em_Video | d59e1b4efaf27dd0fc828a608201613c69ac333d | [
"MIT"
]
| null | null | null | ex056.py | danilodelucio/Exercicios_Curso_em_Video | d59e1b4efaf27dd0fc828a608201613c69ac333d | [
"MIT"
]
| null | null | null | somaIdade = 0
maiorIdade = 0
nomeVelho = ''
totmulher20 = 0
for p in range(1, 3):
print('---- {}ª PESSOA ----'.format(p))
nome = str(input('Nome: ')).strip()
idade = int(input('Idade: '))
sexo = str(input('Sexo [M/F]: '))
somaIdade += idade
if p == 1 and sexo in 'Mm':
maiorIdade = idade
nomeVelho = nome
if sexo in 'Mm' and idade > maiorIdade:
maiorIdade = idade
nomeVelho = nome
if sexo in 'Ff' and idade < 20:
totmulher20 += 1
mediaIdade = int(somaIdade / 4)
print('A média de idade do grupo de pessoas é de {} anos.'.format(mediaIdade))
print('O homem mais velho tem {} anos e se chama {}.'.format(maiorIdade, nomeVelho))
print('Ao todo são {} mulher com menos de 20 anos.'.format(totmulher20)) | 30.92 | 84 | 0.606727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.276705 |
2966debf755863b57841211c2eb24e99ff45937a | 6,583 | py | Python | python/promort.py | simleo/promort_pipeline | 03b9d3553a3dade57d0007e230230b02dd70832f | [
"MIT"
]
| null | null | null | python/promort.py | simleo/promort_pipeline | 03b9d3553a3dade57d0007e230230b02dd70832f | [
"MIT"
]
| null | null | null | python/promort.py | simleo/promort_pipeline | 03b9d3553a3dade57d0007e230230b02dd70832f | [
"MIT"
]
| 3 | 2020-07-29T15:03:40.000Z | 2020-10-06T11:16:04.000Z | """\
PROMORT example.
"""
import argparse
import random
import sys
import pyecvl.ecvl as ecvl
import pyeddl.eddl as eddl
from pyeddl.tensor import Tensor
import models
def VGG16(in_layer, num_classes):
x = in_layer
x = eddl.ReLu(eddl.Conv(x, 64, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 64, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 128, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 128, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 256, [3, 3]))
x = eddl.ReLu(eddl.Conv(x, 256, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 256, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2])
x = eddl.Reshape(x, [-1])
x = eddl.ReLu(eddl.Dense(x, 256))
x = eddl.Softmax(eddl.Dense(x, num_classes))
return x
def main(args):
num_classes = 2
size = [256, 256] # size of images
in_ = eddl.Input([3, size[0], size[1]])
out = models.VGG16_promort(in_, num_classes)
net = eddl.Model([in_], [out])
eddl.build(
net,
eddl.rmsprop(1e-6),
#eddl.sgd(0.001, 0.9),
["soft_cross_entropy"],
["categorical_accuracy"],
eddl.CS_GPU([1], mem="low_mem") if args.gpu else eddl.CS_CPU()
)
eddl.summary(net)
eddl.setlogfile(net, "promort_VGG16_classification")
training_augs = ecvl.SequentialAugmentationContainer([
ecvl.AugResizeDim(size)
#ecvl.AugMirror(.5),
#ecvl.AugFlip(.5),
#ecvl.AugRotate([-180, 180]),
#ecvl.AugAdditivePoissonNoise([0, 10]),
#ecvl.AugGammaContrast([0.5, 1.5]),
#ecvl.AugGaussianBlur([0, 0.8]),
#ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
])
validation_augs = ecvl.SequentialAugmentationContainer([
ecvl.AugResizeDim(size),
])
dataset_augs = ecvl.DatasetAugmentations(
[training_augs, validation_augs, None]
)
print("Reading dataset")
#d = ecvl.DLDataset(args.in_ds, args.batch_size)
d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
y = Tensor([args.batch_size, len(d.classes_)])
num_samples_train = len(d.GetSplit())
num_batches_train = num_samples_train // args.batch_size
d.SetSplit(ecvl.SplitType.validation)
num_samples_val = len(d.GetSplit())
num_batches_val = num_samples_val // args.batch_size
indices = list(range(args.batch_size))
metric = eddl.getMetric("categorical_accuracy")
print("Starting training")
### Main loop across epochs
for e in range(args.epochs):
print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
flush=True)
if args.out_dir:
current_path = os.path.join(args.out_dir, "Epoch_%d" % e)
for c in d.classes_:
c_dir = os.path.join(current_path, c)
os.makedirs(c_dir, exist_ok=True)
d.SetSplit(ecvl.SplitType.training)
eddl.reset_loss(net)
total_metric = []
s = d.GetSplit()
random.shuffle(s)
d.split_.training_ = s
d.ResetAllBatches()
### Looping across batches of training data
for b in range(num_batches_train):
print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
e + 1, args.epochs, b + 1, num_batches_train
), end="", flush=True)
d.LoadBatch(x, y)
x.div_(255.0)
tx, ty = [x], [y]
#print (tx[0].info())
eddl.train_batch(net, tx, ty, indices)
#eddl.print_loss(net, b)
instances = (b+1) * args.batch_size
print ("loss = %.3f, acc = %.3f" % (net.fiterr[0]/instances, net.fiterr[1]/instances))
#print()
print("Saving weights")
eddl.save(net, "promort_checkpoint_%s.bin" % e, "bin")
### Evaluation on validation set
print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
d.SetSplit(ecvl.SplitType.validation)
for b in range(num_batches_val):
n = 0
print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
e + 1, args.epochs, b + 1, num_batches_val
), end="", flush=True)
d.LoadBatch(x, y)
x.div_(255.0)
eddl.forward(net, [x])
output = eddl.getOutput(out)
sum_ = 0.0
for k in range(args.batch_size):
result = output.select([str(k)])
target = y.select([str(k)])
ca = metric.value(target, result)
total_metric.append(ca)
sum_ += ca
if args.out_dir:
result_a = np.array(result, copy=False)
target_a = np.array(target, copy=False)
classe = np.argmax(result_a).item()
gt_class = np.argmax(target_a).item()
single_image = x.select([str(k)])
img_t = ecvl.TensorToView(single_image)
img_t.colortype_ = ecvl.ColorType.BGR
single_image.mult_(255.)
filename = d.samples_[d.GetSplit()[n]].location_[0]
head, tail = os.path.splitext(os.path.basename(filename))
bname = "%s_gt_class_%s.png" % (head, gt_class)
cur_path = os.path.join(
current_path, d.classes_[classe], bname
)
ecvl.ImWrite(cur_path, img_t)
n += 1
print("categorical_accuracy:", sum_ / args.batch_size)
total_avg = sum(total_metric) / len(total_metric)
print("Total categorical accuracy:", total_avg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("in_ds", metavar="INPUT_DATASET")
parser.add_argument("--epochs", type=int, metavar="INT", default=50)
parser.add_argument("--batch-size", type=int, metavar="INT", default=32)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--out-dir", metavar="DIR",
help="if set, save images in this directory")
main(parser.parse_args())
| 37.19209 | 98 | 0.556433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,058 | 0.160717 |
2967592aac9355f4e077c19d82c1790326f4a71b | 343 | py | Python | src/view/services_update_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
]
| null | null | null | src/view/services_update_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
]
| null | null | null | src/view/services_update_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
]
| null | null | null | from src.view.services_page import ServicesPage
from src.view.services_add_page import ServicesAddPage
class ServicesUpdatePage(ServicesAddPage):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.set_title("Update service")
self.set_confirm_button_text("Update")
| 34.3 | 54 | 0.723032 | 229 | 0.667638 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.069971 |
2967a056b02745df6754455d5a9a7411cbb1bfd2 | 7,543 | py | Python | Lib/site-packages/wagtail/utils/l18n/translation.py | SyahmiAmin/belikilo | 0a26dadb514683456ea0dbdcbcfcbf65e09d5dbb | [
"bzip2-1.0.6"
]
| null | null | null | Lib/site-packages/wagtail/utils/l18n/translation.py | SyahmiAmin/belikilo | 0a26dadb514683456ea0dbdcbcfcbf65e09d5dbb | [
"bzip2-1.0.6"
]
| null | null | null | Lib/site-packages/wagtail/utils/l18n/translation.py | SyahmiAmin/belikilo | 0a26dadb514683456ea0dbdcbcfcbf65e09d5dbb | [
"bzip2-1.0.6"
]
| null | null | null | import os
import gettext
import bisect
from locale import getdefaultlocale
from collections.abc import MutableMapping
from copy import copy, deepcopy
import six
class Trans:
def __init__(self):
self.registry = {}
self.current = None
self.set(getdefaultlocale()[0])
def __getitem__(self, language):
if language:
try:
return self.registry[language]
except KeyError:
self.registry[language] = gettext.translation(
'l18n',
os.path.join(os.path.dirname(__file__), 'locale'),
languages=[language],
fallback=True
)
return self.registry[language]
else:
return None
def set(self, language):
self.current = self[language]
def gettext(self, s):
try:
return self.current.gettext(s)
except AttributeError:
return s
if six.PY2:
def ugettext(self, s):
try:
return self.current.ugettext(s)
except AttributeError:
return s
_trans = Trans()
def set_language(language=None):
_trans.set(language)
if six.PY2:
def translate(s, utf8=True, trans=_trans):
if trans:
if utf8:
return trans.ugettext(s)
return trans.gettext(s)
else:
return s
else:
def translate(s, utf8=True, trans=_trans):
if trans:
t = trans.gettext(s)
if utf8:
return t
return t.encode()
else:
return s
class L18NLazyObject:
def _value(self, utf8=True):
raise NotImplementedError
def __str__(self):
return self._value(utf8=six.PY3)
def __bytes__(self):
return self._value(utf8=False)
def __unicode__(self):
return self._value(utf8=True)
class L18NLazyString(L18NLazyObject):
def __init__(self, s):
self._str = s
def __copy__(self):
return self.__class__(self._str)
def __deepcopy__(self, memo):
result = self.__copy__()
memo[id(self)] = result
return result
def _value(self, utf8=True):
return translate(self._str, utf8)
def __repr__(self):
return 'L18NLazyString <%s>' % repr(self._str)
def __getattr__(self, name):
# fallback to call the value's attribute in case it's not found in
# L18NLazyString
return getattr(self._value(), name)
class L18NLazyStringsList(L18NLazyObject):
def __init__(self, sep='/', *s):
# we assume that the separator and the strings have the same encoding
# (text_type)
self._sep = sep
self._strings = s
def __copy__(self):
return self.__class__(self._sep, *self._strings)
def __deepcopy__(self, memo):
result = self.__copy__()
memo[id(self)] = result
return result
def _value(self, utf8=True):
sep = self._sep
if utf8 and isinstance(sep, six.binary_type):
sep = sep.decode(encoding='utf-8')
elif not utf8 and isinstance(sep, six.text_type):
sep = sep.encode(encoding='utf-8')
return sep.join([translate(s, utf8)
for s in self._strings])
def __repr__(self):
return 'L18NLazyStringsList <%s>' % self._sep.join([
repr(s) for s in self._strings
])
def __getattr__(self, name):
# fallback to call the value's attribute in case it's not found in
# L18NLazyStringsList
return getattr(self._value(), name)
class L18NBaseMap(MutableMapping):
"""
Generic dictionary that returns lazy string or lazy string lists
"""
def __init__(self, *args, **kwargs):
self.store = dict(*args, **kwargs)
self.sorted = {}
def __copy__(self):
result = self.__class__()
result.store = self.store
result.sorted = self.sorted
return result
def __deepcopy__(self, memo):
result = self.__class__()
memo[id(self)] = result
result.store = deepcopy(self.store, memo)
result.sorted = deepcopy(self.sorted, memo)
return result
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, value):
self.store[key] = value
for locale, (keys, values) in six.iteritems(self.sorted):
tr = translate(value, trans=_trans[locale])
i = bisect.bisect_left(values, tr)
keys.insert(i, key)
values.insert(i, tr)
def __delitem__(self, key):
del self.store[key]
for keys, values in self.sorted.values():
i = keys.index(key)
del keys[i]
del values[i]
def __iter__(self):
loc = _trans.current._info['language'] if _trans.current else None
try:
return iter(self.sorted[loc][0])
except KeyError:
keys = []
values = []
# we can't use iteritems here, as we need to call __getitem__
# via self[key]
for key in iter(self.store):
value = six.text_type(self[key])
i = bisect.bisect_left(values, value)
keys.insert(i, key)
values.insert(i, value)
self.sorted[loc] = (keys, values)
return iter(keys)
def __len__(self):
return len(self.store)
def subset(self, keys):
"""
Generates a subset of the current map (e.g. to retrieve only tzs in
common_timezones from the tz_cities or tz_fullnames maps)
"""
sub = self.__class__()
self_keys = set(self.store.keys())
subset_keys = self_keys.intersection(keys)
removed_keys = self_keys.difference(subset_keys)
sub.store = {k: self.store[k] for k in subset_keys}
for loc, sorted_items in six.iteritems(self.sorted):
loc_keys = copy(self.sorted[loc][0])
loc_values = copy(self.sorted[loc][1])
for k in removed_keys:
i = loc_keys.index(k)
del loc_keys[i]
del loc_values[i]
sub.sorted[loc] = (loc_keys, loc_values)
return sub
class L18NMap(L18NBaseMap):
def __getitem__(self, key):
return L18NLazyString(self.store[key])
class L18NListMap(L18NBaseMap):
def __init__(self, sep='/', aux=None, *args, **kwargs):
self._sep = sep
self._aux = aux
super(L18NListMap, self).__init__(*args, **kwargs)
def __copy__(self):
result = super(L18NListMap, self).__copy__()
result._sep = self._sep
result._aux = self._aux
return result
def __deepcopy__(self, memo):
result = super(L18NListMap, self).__deepcopy__(memo)
result._sep = self._sep
result._aux = None if self._aux is None else deepcopy(self._aux, memo)
return result
def __getitem__(self, key):
strs = key.split(self._sep)
strs[-1] = key
lst = []
for s in strs:
try:
lst.append(self.store[s])
except KeyError:
lst.append(self._aux[s])
return L18NLazyStringsList(self._sep, *lst)
def subset(self, keys):
sub = super(L18NListMap, self).subset(keys)
sub._sep = self._sep
sub._aux = deepcopy(self._aux)
return sub
| 27.32971 | 78 | 0.571258 | 6,854 | 0.908657 | 0 | 0 | 0 | 0 | 0 | 0 | 655 | 0.086835 |
2967c010afb3c90f1b88a872839f1b992255abcc | 272 | py | Python | playground/sockets/server.py | tunki/lang-training | 79b9f59a7187053f540f9057c585747762ca8890 | [
"MIT"
]
| null | null | null | playground/sockets/server.py | tunki/lang-training | 79b9f59a7187053f540f9057c585747762ca8890 | [
"MIT"
]
| 4 | 2020-03-10T19:20:21.000Z | 2021-06-07T15:39:48.000Z | proglangs-learning/python/example_sockets/server.py | helq/old_code | a432faf1b340cb379190a2f2b11b997b02d1cd8d | [
"CC0-1.0"
]
| null | null | null | import socket
s = socket.socket()
s.bind(("localhost", 9999))
s.listen(1)
sc, addr = s.accept()
while True:
recibido = sc.recv(1024)
if recibido == "quit":
break
print "Recibido:", recibido
sc.send(recibido)
print "adios"
sc.close()
s.close()
| 13.6 | 31 | 0.617647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.128676 |
29682fb767c90bc573a3f797e4f0ca061a3378d9 | 743 | py | Python | examples/example_contour.py | moghimis/geojsoncontour | 23f298cb5c5ae4b7000024423493e109a9cc908d | [
"MIT"
]
| 63 | 2016-10-31T06:55:47.000Z | 2022-02-04T06:47:32.000Z | examples/example_contour.py | moghimis/geojsoncontour | 23f298cb5c5ae4b7000024423493e109a9cc908d | [
"MIT"
]
| 20 | 2016-09-26T15:25:53.000Z | 2020-11-11T18:26:32.000Z | examples/example_contour.py | moghimis/geojsoncontour | 23f298cb5c5ae4b7000024423493e109a9cc908d | [
"MIT"
]
| 26 | 2016-06-15T02:39:10.000Z | 2022-02-04T06:48:15.000Z | import numpy
import matplotlib.pyplot as plt
import geojsoncontour
# Create lat and lon vectors and grid data
grid_size = 1.0
latrange = numpy.arange(-90.0, 90.0, grid_size)
lonrange = numpy.arange(-180.0, 180.0, grid_size)
X, Y = numpy.meshgrid(lonrange, latrange)
Z = numpy.sqrt(X * X + Y * Y)
n_contours = 10
levels = numpy.linspace(start=0, stop=100, num=n_contours)
# Create a contour plot plot from grid (lat, lon) data
figure = plt.figure()
ax = figure.add_subplot(111)
contour = ax.contour(lonrange, latrange, Z, levels=levels, cmap=plt.cm.jet)
# Convert matplotlib contour to geojson
geojsoncontour.contour_to_geojson(
contour=contour,
geojson_filepath='out.geojson',
min_angle_deg=10.0,
ndigits=3,
unit='m'
)
| 26.535714 | 75 | 0.729475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.20323 |
296855e3082fc927d6f123b69b223d9a6934f75b | 1,861 | py | Python | Graphs/ConnectedComponents.py | PK-100/Competitive_Programming | d0863feaaa99462b2999e85dcf115f7a6c08bb8d | [
"MIT"
]
| 70 | 2018-06-25T21:20:15.000Z | 2022-03-24T03:55:17.000Z | Graphs/ConnectedComponents.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
]
| 4 | 2018-09-04T13:12:20.000Z | 2021-06-20T08:29:12.000Z | Graphs/ConnectedComponents.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
]
| 24 | 2018-12-26T05:15:32.000Z | 2022-01-23T23:04:54.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countGroups' function below.
#
# The function is expected to return an INTEGER.
# The function accepts STRING_ARRAY related as parameter.
#
class Graph:
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
def addEdge(self, a,b):
self.adj[a].append(b)
self.adj[b].append(a)
def dfs_util(self, temp, node, visited):
visited[node] = True
temp.append(node)
for i in self.adj[node]:
if not visited[i]:
temp = self.dfs_util(temp, i, visited)
return temp
def countGroups(self):
"""
This is the classical concept of connected components in a Graph
"""
visited = [False] * self.V
groups = []
for node in range(self.V):
if not visited[node]:
temp = []
groups.append(self.dfs_util(temp, node, visited))
return groups
def convertMatrixToGraph(mat):
"""
Accept the input which is an adjacency matrix and return a Graph, which is an adjacency list
"""
n = len(mat)
g = Graph(n)
for i in range(n):
for j in range(n):
if j > i and mat[i][j] == '1':
g.addEdge(i,j)
return g
def countGroups(related):
# Write your code here
graph = convertMatrixToGraph(related)
groups = graph.countGroups()
# print(groups)
return len(groups)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
related_count = int(input().strip())
related = []
for _ in range(related_count):
related_item = input()
related.append(related_item)
result = countGroups(related)
fptr.write(str(result) + '\n')
fptr.close()
| 22.695122 | 96 | 0.576034 | 804 | 0.432026 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.232133 |
29699db1cd4c1b7712dc1f31ae88eb868493c3ed | 1,597 | py | Python | recognition/ml_model.py | hurschler/pig-face-recognition | 5834f3c89448a645ee0eaf2bbdade064f0c4be93 | [
"Apache-2.0"
]
| 1 | 2021-11-19T05:33:39.000Z | 2021-11-19T05:33:39.000Z | recognition/ml_model.py | hurschler/pig-face-recognition | 5834f3c89448a645ee0eaf2bbdade064f0c4be93 | [
"Apache-2.0"
]
| null | null | null | recognition/ml_model.py | hurschler/pig-face-recognition | 5834f3c89448a645ee0eaf2bbdade064f0c4be93 | [
"Apache-2.0"
]
| 1 | 2022-01-05T12:57:12.000Z | 2022-01-05T12:57:12.000Z | import logging.config
import util.logger_init
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from util.tensorboard_util import plot_confusion_matrix, plot_to_image
from tensorflow.python.keras.callbacks_v1 import TensorBoard
from keras import backend as K
class MlModel:
def get_model(self):
return self.model
def summary_print(self):
self.model.summary()
# Define your scheduling function
def scheduler(self, epoch):
return 0.001 * 0.95 ** epoch
def log_confusion_matrix(self, epoch, logs):
# Use the model to predict the values from the test_images.
test_pred_raw = self.model.predict(self.ml_data.x_test)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix using sklearn.metrics
cm = confusion_matrix(self.ml_data.y_test, test_pred)
figure = plot_confusion_matrix(cm, class_names=self.ml_data.pig_dict.values())
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with self.file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define TensorBoard callback child class
class LRTensorBoard(TensorBoard):
def __init__(self, log_dir, **kwargs): # add other arguments to __init__ if you need
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
| 31.313725 | 89 | 0.708203 | 1,251 | 0.783344 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.188478 |
296af987b0de54ddfc1944dd10981828dfe9bc78 | 1,803 | py | Python | requests/UpdateSubscriptionRequest.py | divinorum-webb/python-tableau-api | 9d3f130d63b15307ad2b23e2273b52790b8d9018 | [
"Apache-2.0"
]
| 1 | 2019-06-08T22:19:40.000Z | 2019-06-08T22:19:40.000Z | requests/UpdateSubscriptionRequest.py | divinorum-webb/python-tableau-api | 9d3f130d63b15307ad2b23e2273b52790b8d9018 | [
"Apache-2.0"
]
| null | null | null | requests/UpdateSubscriptionRequest.py | divinorum-webb/python-tableau-api | 9d3f130d63b15307ad2b23e2273b52790b8d9018 | [
"Apache-2.0"
]
| null | null | null | from .BaseRequest import BaseRequest
class UpdateSubscriptionRequest(BaseRequest):
"""
Update subscription request for generating API request URLs to Tableau Server.
:param ts_connection: The Tableau Server connection object.
:type ts_connection: class
:param new_subscription_subject: (Optional) A new subject for the subscription.
:type new_subscription_subject: string
:param new_schedule_id: (Optional) The ID of a schedule to associate this subscription with.
:type new_schedule_id: string
"""
def __init__(self,
ts_connection,
new_subscription_subject=None,
new_schedule_id=None):
super().__init__(ts_connection)
self._new_subscription_subject = new_subscription_subject
self._new_schedule_id = new_schedule_id
@property
def base_update_subscription_request(self):
if self._new_subscription_subject and self._new_schedule_id:
self._request_body.update({
'subscription': {
'subject': self._new_subscription_subject,
'schedule': {'id': self._new_schedule_id}
}
})
elif self._new_subscription_subject and not self._new_schedule_id:
self._request_body.update({
'subscription': {
'subject': self._new_subscription_subject
}
})
else:
self._request_body.update({
'subscription': {
'schedule': {'id': self._new_schedule_id}
}
})
return self._request_body
def get_request(self):
return self.base_update_subscription_request
| 36.795918 | 108 | 0.601775 | 1,763 | 0.977815 | 0 | 0 | 819 | 0.454243 | 0 | 0 | 597 | 0.331115 |
296b3f67c1ceb01054558b270075a2c69d09bb88 | 2,293 | py | Python | doc/examples/cython/cython_main.py | hershg/ray | a1744f67fe954d8408c5b84e28ecccc130157f8e | [
"Apache-2.0"
]
| 2 | 2019-06-17T12:38:24.000Z | 2020-11-11T07:52:26.000Z | doc/examples/cython/cython_main.py | hershg/ray | a1744f67fe954d8408c5b84e28ecccc130157f8e | [
"Apache-2.0"
]
| 3 | 2018-08-15T19:19:25.000Z | 2021-06-30T01:54:46.000Z | doc/examples/cython/cython_main.py | hershg/ray | a1744f67fe954d8408c5b84e28ecccc130157f8e | [
"Apache-2.0"
]
| 2 | 2017-10-31T23:20:07.000Z | 2019-11-13T20:16:03.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
import click
import inspect
import numpy as np
import cython_examples as cyth
def run_func(func, *args, **kwargs):
"""Helper function for running examples"""
ray.init()
func = ray.remote(func)
# NOTE: kwargs not allowed for now
result = ray.get(func.remote(*args))
# Inspect the stack to get calling example
caller = inspect.stack()[1][3]
print("%s: %s" % (caller, str(result)))
return result
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""Working with Cython actors and functions in Ray"""
@cli.command()
def example1():
"""Cython def function"""
run_func(cyth.simple_func, 1, 2, 3)
@cli.command()
def example2():
"""Cython def function, recursive"""
run_func(cyth.fib, 10)
@cli.command()
def example3():
"""Cython def function, built-in typed parameter"""
# NOTE: Cython will attempt to cast argument to correct type
# NOTE: Floats will be cast to int, but string, for example will error
run_func(cyth.fib_int, 10)
@cli.command()
def example4():
"""Cython cpdef function"""
run_func(cyth.fib_cpdef, 10)
@cli.command()
def example5():
"""Cython wrapped cdef function"""
# NOTE: cdef functions are not exposed to Python
run_func(cyth.fib_cdef, 10)
@cli.command()
def example6():
"""Cython simple class"""
ray.init()
cls = ray.remote(cyth.simple_class)
a1 = cls.remote()
a2 = cls.remote()
result1 = ray.get(a1.increment.remote())
result2 = ray.get(a2.increment.remote())
print(result1, result2)
@cli.command()
def example7():
"""Cython with function from BrainIAK (masked log)"""
run_func(cyth.masked_log, np.array([-1.0, 0.0, 1.0, 2.0]))
@cli.command()
def example8():
"""Cython with blas. NOTE: requires scipy"""
# See cython_blas.pyx for argument documentation
mat = np.array(
[[[2.0, 2.0], [2.0, 2.0]], [[2.0, 2.0], [2.0, 2.0]]], dtype=np.float32)
result = np.zeros((2, 2), np.float32, order="C")
run_func(cyth.compute_kernel_matrix, "L", "T", 2, 2, 1.0, mat, 0, 2, 1.0,
result, 2)
if __name__ == "__main__":
cli()
| 20.845455 | 79 | 0.64239 | 0 | 0 | 0 | 0 | 1,670 | 0.728304 | 0 | 0 | 750 | 0.327082 |
296c4de61a467a172ab5bf54ab6a35b296d94e43 | 17,314 | py | Python | Simon/dev/main_age_classification.py | uncharted-distil/simon | 26e4e54e6de455bde8ee1a24634d060e1ec7babb | [
"MIT"
]
| null | null | null | Simon/dev/main_age_classification.py | uncharted-distil/simon | 26e4e54e6de455bde8ee1a24634d060e1ec7babb | [
"MIT"
]
| 2 | 2021-01-27T14:57:26.000Z | 2021-10-07T19:34:09.000Z | Simon/dev/main_age_classification.py | uncharted-distil/simon | 26e4e54e6de455bde8ee1a24634d060e1ec7babb | [
"MIT"
]
| 1 | 2021-03-18T19:13:05.000Z | 2021-03-18T19:13:05.000Z | from DataGenerator import *
from Encoder import *
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Activation, Flatten, Input, Dropout, MaxPooling1D, Convolution1D
from keras.layers import LSTM, Lambda, merge, Masking
from keras.layers import Embedding, TimeDistributed
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.utils import np_utils
import numpy as np
import tensorflow as tf
import re
from keras import backend as K
import keras.callbacks
import sys
import os
import time
import matplotlib.pyplot as plt
import pickle
def binarize(x, sz=71):
return tf.to_float(tf.one_hot(x, sz, on_value=1, off_value=0, axis=-1))
def custom_multi_label_accuracy(y_true, y_pred):
# need some threshold-specific rounding code here, presently only for 0.5 thresh.
return K.mean(K.round(np.multiply(y_true,y_pred)),axis=0)
def eval_binary_accuracy(y_test, y_pred):
correct_indices = y_test==y_pred
all_correct_predictions = np.zeros(y_test.shape)
all_correct_predictions[correct_indices] = 1
#print("DEBUG::binary accuracy matrix")
#print(all_correct_predictions)
return np.mean(all_correct_predictions),np.mean(all_correct_predictions, axis=0),all_correct_predictions
def eval_confusion(y_test, y_pred):
wrong_indices = y_test!=y_pred
all_wrong_predictions = np.zeros(y_test.shape)
all_wrong_predictions[wrong_indices] = 1
#print("DEBUG::confusion matrix")
#print(all_wrong_predictions)
return np.mean(all_wrong_predictions),np.mean(all_wrong_predictions, axis=0),all_wrong_predictions
def eval_false_positives(y_test, y_pred):
false_positive_matrix = np.zeros((y_test.shape[1],y_test.shape[1]))
false_positives = np.multiply(y_pred,1-y_test)
# print(precision_matrix)
for i in np.arange(y_test.shape[0]):
for j in np.arange(y_test.shape[1]) :
if(false_positives[i,j]==1): #positive label for ith sample and jth predicted category
for k in np.arange(y_test.shape[1]):
if(y_test[i,k]==1): #positive label for ith sample and kth true category
# print("DEBUG::i,j,k")
# print("%d,%d,%d"%(i,j,k))
false_positive_matrix[j,k] +=1
# print("DEBUG::precision matrix")
# print(precision_matrix)
return np.sum(false_positive_matrix),np.sum(false_positive_matrix, axis=0),false_positive_matrix
def binarize_outshape(in_shape):
return in_shape[0], in_shape[1], 71
def max_1d(x):
return K.max(x, axis=1)
def striphtml(html):
p = re.compile(r'<.*?>')
return p.sub('', html)
def clean(s):
return re.sub(r'[^\x00-\x7f]', r'', s)
def setup_test_sets(X, y):
ids = np.arange(len(X))
np.random.shuffle(ids)
# shuffle
X = X[ids]
y = y[ids]
train_end = int(X.shape[0] * .6)
cross_validation_end = int(X.shape[0] * .3 + train_end)
test_end = int(X.shape[0] * .1 + cross_validation_end)
X_train = X[:train_end]
X_cv_test = X[train_end:cross_validation_end]
X_test = X[cross_validation_end:test_end]
y_train = y[:train_end]
y_cv_test = y[train_end:cross_validation_end]
y_test = y[cross_validation_end:test_end]
data = type('data_type', (object,), {'X_train' : X_train, 'X_cv_test': X_cv_test, 'X_test': X_test, 'y_train': y_train, 'y_cv_test': y_cv_test, 'y_test':y_test})
return data
def generate_model(max_len, max_cells, category_count):
filter_length = [1, 3, 3]
nb_filter = [40, 200, 1000]
pool_length = 2
# document input
document = Input(shape=(max_cells, max_len), dtype='int64')
# sentence input
in_sentence = Input(shape=(max_len,), dtype='int64')
# char indices to one hot matrix, 1D sequence to 2D
embedded = Lambda(binarize, output_shape=binarize_outshape)(in_sentence)
# embedded: encodes sentence
for i in range(len(nb_filter)):
embedded = Convolution1D(nb_filter=nb_filter[i],
filter_length=filter_length[i],
border_mode='valid',
activation='relu',
init='glorot_normal',
subsample_length=1)(embedded)
embedded = Dropout(0.1)(embedded)
embedded = MaxPooling1D(pool_length=pool_length)(embedded)
forward_sent = LSTM(256, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu')(embedded)
backward_sent = LSTM(256, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu', go_backwards=True)(embedded)
sent_encode = merge([forward_sent, backward_sent],
mode='concat', concat_axis=-1)
sent_encode = Dropout(0.3)(sent_encode)
# sentence encoder
encoder = Model(input=in_sentence, output=sent_encode)
print(encoder.summary())
encoded = TimeDistributed(encoder)(document)
# encoded: sentences to bi-lstm for document encoding
forwards = LSTM(128, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu')(encoded)
backwards = LSTM(128, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu', go_backwards=True)(encoded)
merged = merge([forwards, backwards], mode='concat', concat_axis=-1)
output = Dropout(0.3)(merged)
output = Dense(128, activation='relu')(output)
output = Dropout(0.3)(output)
output = Dense(category_count, activation='softmax')(output)
# output = Activation('softmax')(output)
model = Model(input=document, output=output)
return model
# record history of training
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accuracies = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accuracies.append(logs.get('binary_accuracy'))
def plot_loss(history):
# summarize history for accuracy
plt.subplot('121')
plt.plot(history.history['binary_accuracy'])
plt.plot(history.history['val_binary_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot('122')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def train_model(batch_size, checkpoint_dir, model, nb_epoch, data):
print("starting learning")
check_cb = keras.callbacks.ModelCheckpoint(checkpoint_dir + "text-class" + '.{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss', verbose=0, save_best_only=True, mode='min')
earlystop_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=7, verbose=1, mode='auto')
tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0,
embeddings_layer_names=None, embeddings_metadata=None)
loss_history = LossHistory()
history = model.fit(data.X_train, data.y_train, validation_data=(data.X_cv_test, data.y_cv_test), batch_size=batch_size,
nb_epoch=nb_epoch, shuffle=True, callbacks=[earlystop_cb, check_cb, loss_history, tbCallBack])
print('losses: ')
print(history.history['loss'])
print('accuracies: ')
# print(history.history['acc'])
print(history.history['val_binary_accuracy'])
plot_loss(history)
def evaluate_model(max_cells, model, data, encoder, p_threshold):
print("Starting predictions:")
start = time.time()
scores = model.evaluate(data.X_test, data.y_test, verbose=0)
end = time.time()
print("Accuracy: %.2f%% \n Time: {0}s \n Time/example : {1}s/ex".format(
end - start, (end - start) / data.X_test.shape[0]) % (scores[1] * 100))
# return all predictions above a certain threshold
# first, the maximum probability/class
probabilities = model.predict(data.X_test, verbose=1)
# print("The prediction probabilities are:")
# print(probabilities)
m = np.amax(probabilities, axis=1)
max_index = np.argmax(probabilities, axis=1)
# print("Associated fixed category indices:")
# print(max_index)
with open('Categories.txt','r') as f:
Categories = f.read().splitlines()
print("Remember that the fixed categories are:")
print(Categories)
print("Most Likely Predicted Category/Labels are: ")
print((np.array(Categories))[max_index])
print("Associated max probabilities/confidences:")
print(m)
# next, all probabilities above a certain threshold
print("DEBUG::y_test:")
print(data.y_test)
prediction_indices = probabilities > p_threshold
y_pred = np.zeros(data.y_test.shape)
y_pred[prediction_indices] = 1
print("DEBUG::y_pred:")
print(y_pred)
print("'Binary' accuracy (true positives + true negatives) is:")
print(eval_binary_accuracy(data.y_test,y_pred))
print("'Binary' confusion (false positives + false negatives) is:")
print(eval_confusion(data.y_test,y_pred))
print("False positive matrix is:")
print(eval_false_positives(data.y_test,y_pred))
def main(checkpoint, data_count, data_cols, should_train, nb_epoch, null_pct, try_reuse_data, batch_size, execution_config):
maxlen = 280
max_cells = 1
p_threshold = 0.5
checkpoint_dir = "checkpoints/age_classification/"
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
# read test data
dataset_name = "training_age"
print("Beginning Age Classifier Training...")
raw_data = pd.read_csv('data/age_classification/'+dataset_name+'.csv',
dtype='str', header=0, skip_blank_lines=True)
#print(raw_data)
print(raw_data.shape)
raw_data_low = raw_data.ix[raw_data['label']=='14_17',0]
raw_data_medium = raw_data.ix[raw_data['label']=='18_23',0]
raw_data_high = raw_data.ix[raw_data['label']=='24_plus',0]
print(raw_data_low.shape)
print(raw_data_medium.shape)
print(raw_data_high.shape)
raw_data = np.asarray(raw_data_low)[np.newaxis]
print(raw_data)
print(raw_data.shape)
header = [['14_17'],]*raw_data_low.shape[0]
raw_data = np.column_stack((raw_data,np.asarray(raw_data_medium)[np.newaxis]))
header.extend([['18_23'],]*raw_data_medium.shape[0])
raw_data = np.column_stack((raw_data,np.asarray(raw_data_high)[np.newaxis]))
header.extend([['24_plus'],]*raw_data_high.shape[0])
print("Final raw_data size is:")
print(raw_data.shape)
print("Corresponding header length is:")
print(len(header))
#print(header)
# transpose the data
raw_data = np.char.lower(np.transpose(raw_data).astype('U'))
# do other processing and encode the data
config = {}
if not should_train:
if execution_config is None:
raise TypeError
config = load_config(execution_config, checkpoint_dir)
encoder = config['encoder']
if checkpoint is None:
checkpoint = config['checkpoint']
else:
encoder = Encoder()
encoder.process(raw_data, max_cells)
# encode the data
X, y = encoder.encode_data(raw_data, header, maxlen)
max_cells = encoder.cur_max_cells
data = None
if should_train:
data = setup_test_sets(X, y)
else:
data = type('data_type', (object,), {'X_test': X, 'y_test':y})
print('Sample chars in X:{}'.format(X[2, 0:10]))
print('y:{}'.format(y[2]))
# need to know number of fixed categories to create model
category_count = y.shape[1]
print('Number of fixed categories is :')
print(category_count)
model = generate_model(maxlen, max_cells, category_count)
load_weights(checkpoint, config, model, checkpoint_dir)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['binary_accuracy'])
if(should_train):
start = time.time()
train_model(batch_size, checkpoint_dir, model, nb_epoch, data)
end = time.time()
print("Time for training is %f sec"%(end-start))
config = { 'encoder' : encoder,
'checkpoint' : get_best_checkpoint(checkpoint_dir) }
save_config(config, checkpoint_dir)
#print("DEBUG::The actual headers are:")
#print(header)
evaluate_model(max_cells, model, data, encoder, p_threshold)
# Now, label unlabeled tweets
dataset_name = "jordans_collection2"
print("Beginning Age Classifier Labeling...")
raw_data = pd.read_csv('data/age_classification/'+dataset_name+'.csv',
dtype='str', header=0, skip_blank_lines=True,lineterminator='\n')
raw_data = raw_data.ix[:,2]
raw_data = np.asarray(raw_data)[np.newaxis]
print(raw_data.shape)
raw_data = np.char.lower(np.transpose(raw_data).astype('U'))
X, y = encoder.encode_data(raw_data, [['14_17'],]*raw_data.shape[0], maxlen)
data = type('data_type', (object,), {'X_test': X, 'y_test':y})
probabilities = model.predict(data.X_test, verbose=1)
print(encoder.reverse_label_encode(probabilities,p_threshold))
def resolve_file_path(filename, dir):
if os.path.isfile(str(filename)):
return str(filename)
elif os.path.isfile(str(dir + str(filename))):
return dir + str(filename)
def load_weights(checkpoint_name, config, model, checkpoint_dir):
if config and not checkpoint_name:
checkpoint_name = config['checkpoint']
if checkpoint_name:
checkpoint_path = resolve_file_path(checkpoint_name, checkpoint_dir)
print("Checkpoint : %s" % str(checkpoint_path))
model.load_weights(checkpoint_path)
def save_config(execution_config, checkpoint_dir):
filename = ""
if not execution_config["checkpoint"] is None:
filename = execution_config["checkpoint"].rsplit( ".", 1 )[ 0 ] + ".pkl"
else:
filename = time.strftime("%Y%m%d-%H%M%S") + ".pkl"
with open(checkpoint_dir + filename, 'wb') as output:
pickle.dump(execution_config, output, pickle.HIGHEST_PROTOCOL)
def load_config(execution_config_path, dir):
execution_config_path = resolve_file_path(execution_config_path, dir)
return pickle.load( open( execution_config_path, "rb" ) )
def get_best_checkpoint(checkpoint_dir):
max_mtime = 0
max_file = ''
for dirname,subdirs,files in os.walk(checkpoint_dir):
for fname in files:
full_path = os.path.join(dirname, fname)
mtime = os.stat(full_path).st_mtime
if mtime > max_mtime:
max_mtime = mtime
max_dir = dirname
max_file = fname
return max_file
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='attempts to discern data types looking at columns holistically.')
parser.add_argument('--cp', dest='checkpoint',
help='checkpoint to load')
parser.add_argument('--config', dest='execution_config',
help='execution configuration to load. contains max_cells, and encoder config.')
parser.add_argument('--train', dest='should_train', action="store_true",
default="True", help='run training')
parser.add_argument('--no_train', dest='should_train', action="store_false",
default="True", help='do not run training')
parser.set_defaults(should_train=True)
parser.add_argument('--data_count', dest='data_count', action="store", type=int,
default=100, help='number of data rows to create')
parser.add_argument('--data_cols', dest='data_cols', action="store", type=int,
default=10, help='number of data cols to create')
parser.add_argument('--nullpct', dest='null_pct', action="store", type=float,
default=0, help='percent of Nulls to put in each column')
parser.add_argument('--nb_epoch', dest='nb_epoch', action="store", type=int,
default=5, help='number of epochs')
parser.add_argument('--try_reuse_data', dest='try_reuse_data', action="store_true",
default="True", help='loads existing data if the dimensions have been stored')
parser.add_argument('--force_new_data', dest='try_reuse_data', action="store_false",
default="True", help='forces the creation of new data, even if the dimensions have been stored')
parser.add_argument('--batch_size', dest='batch_size', action="store", type=int,
default=64, help='batch size for training')
args = parser.parse_args()
main(args.checkpoint, args.data_count, args.data_cols, args.should_train,
args.nb_epoch, args.null_pct, args.try_reuse_data, args.batch_size, args.execution_config)
| 39.083521 | 165 | 0.661315 | 287 | 0.016576 | 0 | 0 | 0 | 0 | 0 | 0 | 3,843 | 0.221959 |
296d4245a81dda89f65dda7069ff2c63e222474b | 287 | py | Python | desdeo_tools/solver/__init__.py | phoopies/desdeo-tools | d3cb48c16b35114762386ee8368214b4b432eee0 | [
"MIT"
]
| 1 | 2022-03-30T17:24:55.000Z | 2022-03-30T17:24:55.000Z | desdeo_tools/solver/__init__.py | phoopies/desdeo-tools | d3cb48c16b35114762386ee8368214b4b432eee0 | [
"MIT"
]
| 2 | 2022-01-13T04:05:05.000Z | 2022-03-12T01:07:03.000Z | desdeo_tools/solver/__init__.py | phoopies/desdeo-tools | d3cb48c16b35114762386ee8368214b4b432eee0 | [
"MIT"
]
| null | null | null | """This module implements methods for solving scalar valued functions.
"""
__all__ = ["DiscreteMinimizer", "ScalarMethod", "ScalarMinimizer", "ScalarSolverException"]
from desdeo_tools.solver.ScalarSolver import DiscreteMinimizer, ScalarMethod, ScalarMinimizer, ScalarSolverException
| 35.875 | 116 | 0.818815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.515679 |
296eaf3a25df674b47f03f8d1d4dfa8c276636f7 | 1,661 | py | Python | tests/test_root_to_hdf.py | lundeenj/hawc_hal | 21b7cfd28490e870d1ded39735827e63684556df | [
"BSD-3-Clause"
]
| null | null | null | tests/test_root_to_hdf.py | lundeenj/hawc_hal | 21b7cfd28490e870d1ded39735827e63684556df | [
"BSD-3-Clause"
]
| null | null | null | tests/test_root_to_hdf.py | lundeenj/hawc_hal | 21b7cfd28490e870d1ded39735827e63684556df | [
"BSD-3-Clause"
]
| null | null | null | from hawc_hal.maptree.map_tree import map_tree_factory
from hawc_hal.response import hawc_response_factory
import os
from conftest import check_map_trees, check_responses
def test_root_to_hdf_response(response):
r = hawc_response_factory(response)
test_filename = "response.hd5"
# Make sure it doesn't exist yet, if it does,remove it
if os.path.exists(test_filename):
os.remove(test_filename)
r.write(test_filename)
# Try to open and use it
r2 = hawc_response_factory(test_filename)
check_responses(r, r2)
os.remove(test_filename)
def do_one_test_maptree(geminga_roi,
geminga_maptree,
fullsky=False):
# Test both with a defined ROI and full sky (ROI is None)
if fullsky:
roi_ = None
else:
roi_ = geminga_roi
m = map_tree_factory(geminga_maptree, roi_)
test_filename = "maptree.hd5"
# Make sure it doesn't exist yet, if it does,remove it
if os.path.exists(test_filename):
os.remove(test_filename)
m.write(test_filename)
# Try to open and use it
m2 = map_tree_factory(test_filename, roi_)
check_map_trees(m, m2)
os.remove(test_filename)
def test_root_to_hdf_maptree_roi(geminga_roi,
geminga_maptree):
do_one_test_maptree(geminga_roi,
geminga_maptree,
fullsky=False)
def test_root_to_hdf_maptree_full_sky(geminga_roi,
geminga_maptree):
do_one_test_maptree(geminga_roi,
geminga_maptree,
fullsky=True)
| 24.072464 | 61 | 0.642986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.144491 |
296f3e0d144e44aa8925c39df61ed0e143aa64d0 | 1,577 | py | Python | clickhouse_driver/compression/zstd.py | risicle/clickhouse-driver | d36569f52d3e62ad2e275b1d63ad79b75a06402d | [
"MIT"
]
| 17 | 2019-12-19T13:37:14.000Z | 2022-03-30T15:43:12.000Z | clickhouse_driver/compression/zstd.py | risicle/clickhouse-driver | d36569f52d3e62ad2e275b1d63ad79b75a06402d | [
"MIT"
]
| null | null | null | clickhouse_driver/compression/zstd.py | risicle/clickhouse-driver | d36569f52d3e62ad2e275b1d63ad79b75a06402d | [
"MIT"
]
| 4 | 2019-05-15T14:58:05.000Z | 2021-10-14T03:24:12.000Z | from __future__ import absolute_import
from io import BytesIO
import zstd
from .base import BaseCompressor, BaseDecompressor
from ..protocol import CompressionMethod, CompressionMethodByte
from ..reader import read_binary_uint32
from ..writer import write_binary_uint32, write_binary_uint8
class Compressor(BaseCompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def get_compressed_data(self, extra_header_size):
rv = BytesIO()
data = self.get_value()
compressed = zstd.compress(data)
header_size = extra_header_size + 4 + 4 # sizes
write_binary_uint32(header_size + len(compressed), rv)
write_binary_uint32(len(data), rv)
rv.write(compressed)
return rv.getvalue()
class Decompressor(BaseDecompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def get_decompressed_data(self, method_byte, compressed_hash,
extra_header_size):
size_with_header = read_binary_uint32(self.stream)
compressed_size = size_with_header - extra_header_size - 4
compressed = BytesIO(self.stream.read(compressed_size))
block_check = BytesIO()
write_binary_uint8(method_byte, block_check)
write_binary_uint32(size_with_header, block_check)
block_check.write(compressed.getvalue())
self.check_hash(block_check.getvalue(), compressed_hash)
compressed = compressed.read(compressed_size - 4)
return zstd.decompress(compressed)
| 30.326923 | 66 | 0.723526 | 1,279 | 0.811034 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.004439 |
2970702f348227149035bca1abec42b22aa901b1 | 1,019 | py | Python | tmdb_client.py | SztMar/movies_catalogue | 8140a37e916d9f67314aa679b46134e1794588e6 | [
"MIT"
]
| null | null | null | tmdb_client.py | SztMar/movies_catalogue | 8140a37e916d9f67314aa679b46134e1794588e6 | [
"MIT"
]
| null | null | null | tmdb_client.py | SztMar/movies_catalogue | 8140a37e916d9f67314aa679b46134e1794588e6 | [
"MIT"
]
| null | null | null | import requests
import json
import os
API_TOKEN = os.environ.get("TMDB_API_TOKEN", "")
def call_tmdb_api(_endpoint):
endpoint = f"https://api.themoviedb.org/3/{_endpoint}"
full_url = f'{endpoint}?api_key={API_TOKEN}'
response = requests.get(full_url)
response.raise_for_status()
return response.json()
def get_popular_movies():
return call_tmdb_api(f"movie/popular")
def get_movies_list(list_type):
return call_tmdb_api(f"movie/{list_type}")
def get_poster_url(poster_api_path, size="w324"):
base_url = "https://image.tmdb.org/t/p/"
return f"{base_url}{size}/{poster_api_path}"
def get_single_movie(movie_id):
return call_tmdb_api(f"movie/{movie_id}")
def get_single_movie_cast(movie_id):
return call_tmdb_api(f"movie/{movie_id}/credits")
def get_movies(how_many, list_type='popular'):
data = get_movies_list(list_type)
return data["results"][:how_many]
def get_movie_images(movie_id):
return call_tmdb_api(f"movie/{movie_id}/images")
| 26.128205 | 58 | 0.723258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.286555 |
2970d09ae49d5ea0ddb5185266b5f2bed4f79bf9 | 300 | py | Python | hanibal/ans_escuela/tipo_colaborador.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
]
| null | null | null | hanibal/ans_escuela/tipo_colaborador.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
]
| null | null | null | hanibal/ans_escuela/tipo_colaborador.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
]
| null | null | null | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import models, fields, api, _
class Tipo_Colaborador(models.Model):
_name = 'tipo.colaborador'
_rec_name = 'name'
name=fields.Char(string='Nombre')
active=fields.Boolean(string='Activo',default=True) | 25 | 55 | 0.683333 | 195 | 0.65 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.21 |
2970d64cd60442ad64c58f0629d552521a791dbc | 967 | py | Python | spyder/utils/tests/test_environ.py | Nicztin/spyder | 1c7b2232d2abbf1e101a1e2ab0552b32e8472bd6 | [
"MIT"
]
| 1 | 2019-09-25T15:08:24.000Z | 2019-09-25T15:08:24.000Z | spyder/utils/tests/test_environ.py | jorgeps86/spyder | 1c7b2232d2abbf1e101a1e2ab0552b32e8472bd6 | [
"MIT"
]
| null | null | null | spyder/utils/tests/test_environ.py | jorgeps86/spyder | 1c7b2232d2abbf1e101a1e2ab0552b32e8472bd6 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for environ.py
"""
# Standard library imports
import os
# Test library imports
import pytest
# Third party imports
from qtpy.QtCore import QTimer
# Local imports
from spyder.utils.test import close_message_box
@pytest.fixture
def environ_dialog(qtbot):
"Setup the Environment variables Dialog taking into account the os."
QTimer.singleShot(1000, lambda: close_message_box(qtbot))
if os.name == 'nt':
from spyder.utils.environ import WinUserEnvDialog
dialog = WinUserEnvDialog()
else:
from spyder.utils.environ import EnvDialog
dialog = EnvDialog()
qtbot.addWidget(dialog)
return dialog
def test_environ(environ_dialog, qtbot):
"""Test the environment variables dialog."""
environ_dialog.show()
assert environ_dialog
if __name__ == "__main__":
pytest.main()
| 20.574468 | 72 | 0.713547 | 0 | 0 | 0 | 0 | 432 | 0.446281 | 0 | 0 | 350 | 0.36157 |
2970ef8fae53d89f446cb52a1542393d10955b37 | 4,680 | py | Python | comsole.py | MumuNiMochii/Dumb_Dump | ee431c934a591bc0868792601f5844615f869af1 | [
"MIT"
]
| 1 | 2020-09-21T01:56:41.000Z | 2020-09-21T01:56:41.000Z | comsole.py | MumuNiMochii/Dumb_Dump | ee431c934a591bc0868792601f5844615f869af1 | [
"MIT"
]
| null | null | null | comsole.py | MumuNiMochii/Dumb_Dump | ee431c934a591bc0868792601f5844615f869af1 | [
"MIT"
]
| 1 | 2020-10-09T01:09:20.000Z | 2020-10-09T01:09:20.000Z | import math
def main():
print("""
\tComsole by MumuNiMochii version beta 1.6.23
\t\"Originally made with C\"
\tMAIN MENU
\tWhat do you want to execute and evaluate?
\t1.) Add two addends
\t2.) Subtract a minuend from its subtrahend
\t3.) Multiply a multiplicand to its multiplier
\t4.) Divide a dividend to its divisor
\t5.) Raise to power a base number
\t6.) Get the square root of a number
\t7.) Compare two numbers
\t8.) Compare three numbers
\t9.) Auto-summation up to inputted value
\t10.) Auto-factorial up to inputted value
\t0.) Exit
""")
opt = int(input("\t\tEnter the number of your choice: "))
if opt == 1:
def add():
print("\n\tADD VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number " + str(x) + " is added by " + str(y) + ", and is equals to " + str(float(x + y)))
add()
elif opt == 2:
def sub():
print("\n\tSUBTRACT VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number " + str(x) + " is subtracted by " + str(y) + ", and is equals to " + str(float(x-y)))
sub()
elif opt == 3:
def mul():
print("\n\tMULTIPLY VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number "+str(x)+" is multiplied by "+str(y)+", and is equals to "+str(float(x*y)))
mul()
elif opt == 4:
def div():
print("\n\tDIVIDE VALUES")
x = float(input("\t1.) Enter a first value: "))
y = float(input("\t2.) Enter an second value: "))
print("\t3.) The number "+str(x)+" is divided by "+str(y)+", and is equals to "+str(float(x/y)))
div()
elif opt == 5:
def pow():
print("\n\tPOWERED VALUE")
x = float(input("\t1.) Enter a base value: "))
y = int(input("\t2.) Enter an exponent value: "))
print("\t3.) The number "+str(x)+" is raised to "+str(y)+", and is equals to "+str(math.pow(x, y))+".")
pow()
elif opt == 6:
def sqrt():
print("\n\tRADICAL VALUE")
x = float(input("\t1.) Enter a value: "))
y = math.sqrt(x)
print("\t2.) The number is "+str(int(x))+" and its square root is: "+str(y)+".")
sqrt()
elif opt == 7:
def comp2():
print("\n\tCOMPARE TWO VALUES")
x = int(input("\t1.) Enter a first value: "))
y = int(input("\t2.) Enter a second value: "))
msg = "\t3.) Your numbers are "+str(x)+", and "+str(y)+", where "
if x > y:
print(msg + str(x) + " is greater than " + str(y)+".")
else:
print(msg + str(y) + " is greater than " + str(x)+".")
comp2()
elif opt == 8:
def comp3():
print("\n\tCOMPARE THREE VALUES")
x = int(input("\t1.) Enter a first value: "))
y = int(input("\t2.) Enter a second value: "))
z = int(input("\t3.) Enter a third value: "))
msg = "\t4.) Your numbers are "+str(x)+", "+str(y)+", and "+str(z)+", where "
if x > y and x > z:
print(msg+str(x)+" is greater than the values "+str(y)+" and "+str(z)+".")
elif y > x and y > z:
print(msg+str(y)+" is greater than the values "+str(x)+" and "+str(z)+".")
else:
print(msg+str(z)+" is greater than the values "+str(x)+" and "+str(y)+".")
comp3()
elif opt == 9:
def summ():
print("\n\tSUMMATION UP TO INPUT VALUE")
x = int(input("\t1.) Count up to inputted number: "))
a = list(range(0, x))
a.append(x)
print("\t2.) Summation of numbers: " + str(a))
b = []
b.extend(a)
total = 0
for i in b:
total += i
print("\t3.) Sum: " + str(total))
summ()
elif opt == 10:
def fact():
print("\n\tFACTORIAL INPUT VALUE")
x = int(input("\t1.) Factorial the inputted number: "))
a = list(range(1, x))
a.append(x)
print("\t2.) List of factorials: "+str(a))
b = []
b.extend(a)
total = 1
for i in b:
total *= i
print("\t3.) Product: "+str(total))
fact()
else:
print("Invalid input.")
main()
| 38.04878 | 121 | 0.47906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,032 | 0.434188 |
2971ec9257acdac3ee8d9c3bae4d48e78eefdf76 | 3,485 | py | Python | test/test_discussions.py | fibasile/ticket-gateway | 811a216281a17150adca3edf691f9cf5a1478d2f | [
"MIT"
]
| null | null | null | test/test_discussions.py | fibasile/ticket-gateway | 811a216281a17150adca3edf691f9cf5a1478d2f | [
"MIT"
]
| null | null | null | test/test_discussions.py | fibasile/ticket-gateway | 811a216281a17150adca3edf691f9cf5a1478d2f | [
"MIT"
]
| null | null | null | import unittest
import json
from server import server
from models.abc import db
from repositories import ChannelRepository, GitlabProvider
from unittest.mock import MagicMock, Mock
# from flask import make_response
# from flask.json import jsonify
from util import test_client
class TestDiscussions(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = test_client(server)
cls._getTicket = GitlabProvider.getTicket
cls._addTicketDiscussion = GitlabProvider.addTicketDiscussion
cls._createTicketDiscussion = GitlabProvider.createTicketDiscussion
def setUp(self):
db.create_all()
ChannelRepository.create(
slug='a-channel',
title='test channel',
path='/dummy/path'
)
def tearDown(self):
db.session.remove()
db.drop_all()
cls = TestDiscussions
GitlabProvider.getTicket = cls._getTicket
GitlabProvider.addTicketDiscussion = cls._addTicketDiscussion
GitlabProvider.createTicketDiscussion = cls._createTicketDiscussion
def test_get(self):
"""The GET on `/api/channel/a-channel/tickets/ticket_id/discussions`"""
GitlabProvider.getTicket = MagicMock()
GitlabProvider.getTicket.return_value = Mock(
discussions=Mock(list=Mock(return_value=[
{"id": "3243", "title": "test"}
])))
response = self.client.get(
'/api/channel/a-channel/tickets/some_ticket/discussions')
self.assertEqual(response.status_code, 200)
GitlabProvider.getTicket.assert_called_with(
'/dummy/path', 'some_ticket')
# GitlabProvider.getMembers.assert_called_with('/dummy/path')
response_json = json.loads(response.data.decode('utf-8'))
self.assertEqual(
response_json,
{'data': [{"id": "3243", "title": "test"}]}
)
def test_post_new(self):
"""POST on `/api/channel/a-channel/tickets/ticket_id/discussions`"""
"""should create a comment in a bew discussion """
GitlabProvider.addTicketDiscussion = MagicMock(
name="addTicketDiscussion")
GitlabProvider.addTicketDiscussion.return_value = {"status": "success"}
response = self.client.post(
'/api/channel/a-channel/tickets/some_ticket/discussions',
json={
"discussion_id": "3232",
"user_id": "3234",
"body": "Some comment"
})
self.assertEqual(response.status_code, 201)
GitlabProvider.addTicketDiscussion.assert_called_with(
'/dummy/path', 'some_ticket', '3232', '3234', 'Some comment')
def test_post_existing(self):
"""POST on `/api/channel/a-channel/tickets/ticket_id/discussions`"""
"""should create a comment in an existing discussion """
GitlabProvider.createTicketDiscussion = MagicMock(
name="createTicketDiscussion")
GitlabProvider.createTicketDiscussion.return_value = {
"status": "success"}
response = self.client.post(
'/api/channel/a-channel/tickets/some_ticket/discussions',
json={
"user_id": "3234",
"body": "Some comment"
})
self.assertEqual(response.status_code, 201)
GitlabProvider.createTicketDiscussion.assert_called_with(
'/dummy/path', 'some_ticket', '3234', 'Some comment')
| 37.880435 | 79 | 0.63759 | 3,204 | 0.919369 | 0 | 0 | 274 | 0.078623 | 0 | 0 | 998 | 0.28637 |
29726a4c7ce2a690926dd277befe7e4b116e0d83 | 24,943 | py | Python | cogs/music.py | ETJeanMachine/Pouty-Bot-Discord | 9b005e43885db2e066d525449e05c7eb5d03cd23 | [
"MIT"
]
| null | null | null | cogs/music.py | ETJeanMachine/Pouty-Bot-Discord | 9b005e43885db2e066d525449e05c7eb5d03cd23 | [
"MIT"
]
| null | null | null | cogs/music.py | ETJeanMachine/Pouty-Bot-Discord | 9b005e43885db2e066d525449e05c7eb5d03cd23 | [
"MIT"
]
| null | null | null | """
This is an example cog that shows how you would make use of Lavalink.py.
This example cog requires that you have python 3.6 or higher due to the
f-strings.
"""
import math
import re
import discord
import lavalink
from discord.ext import commands
from discord.ext import menus
from .utils import checks
from typing import List
import asyncio
import logging
url_rx = re.compile('https?:\\/\\/(?:www\\.)?.+') # noqa: W605
class LavalinkVoiceClient(discord.VoiceProtocol):
def __init__(self, client: discord.Client, channel: discord.abc.Connectable):
self.client = client
self.channel = channel
self.connect_event = asyncio.Event()
async def on_voice_server_update(self, data):
lavalink_data = {
't': 'VOICE_SERVER_UPDATE',
'd': data
}
await self.lavalink.voice_update_handler(lavalink_data)
async def on_voice_state_update(self, data):
lavalink_data = {
't': 'VOICE_STATE_UPDATE',
'd': data
}
await self.lavalink.voice_update_handler(lavalink_data)
async def connect(self, *, timeout: float, reconnect: bool) -> None:
await self.channel.guild.change_voice_state(channel=self.channel)
try:
self.lavalink : lavalink.Client = self.client.lavalink
except AttributeError:
self.client.lavalink = self.lavalink = lavalink.Client(self.client.user.id)
self.client.lavalink.add_node(
'localhost',
2333,
'youshallnotpass',
'us',
'default-node')
async def disconnect(self, *, force: bool) -> None:
await self.channel.guild.change_voice_state(channel=None)
player = self.lavalink.player_manager.get(self.channel.guild.id)
if player:
player.channel_id = False
await player.stop()
self.cleanup()
class MusicQueue(menus.ListPageSource):
def __init__(self, data: List[lavalink.AudioTrack] , ctx: commands.Context, player):
self.ctx = ctx
self.player = player
super().__init__(data, per_page=10)
async def format_page(self, menu, entries):
offset = menu.current_page * self.per_page
embed = discord.Embed(title="Queue",
description="\n".join(
f'`{i+1}.` [{v.title}]({v.uri}) requested by **{self.ctx.guild.get_member(v.requester).name}**' for i, v in enumerate(entries, start=offset)
)
)
status = (f"\N{TWISTED RIGHTWARDS ARROWS} Shuffle: {'enabled' if self.player.shuffle else 'disabled'} | "
f"\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS} Repeat: {'enabled' if self.player.repeat else 'disabled'} | "
f"\N{SPEAKER} Volume : {self.player.volume}")
embed.set_footer(text=status)
return embed
def can_stop():
def predicate(ctx):
if not ctx.guild:
raise commands.CheckFailure("Only usable within a server")
if not ctx.guild.me.voice:
return True
my_voice = ctx.guild.me.voice.channel
try:
if checks.is_owner_or_moderator_check(ctx.message):
return True
except commands.CheckFailure:
pass
if ctx.guild.me.voice:
if len(my_voice.members) == 2 and ctx.author in my_voice.members:
return True
if len(my_voice.members) == 1:
return True
raise commands.CheckFailure(
"Can only use this when nobody or "
"only one user in voice channel with me"
)
return commands.check(predicate)
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
if not hasattr(self.bot, 'lavalink'):
self.bot.lavalink = lavalink.Client(self.bot.user.id)
self.bot.lavalink.add_node(
'localhost',
2333,
'youshallnotpass',
'us',
'default-node')
self.bot.lavalink.add_event_hook(self.track_hook)
self.pages = {}
self.skip_votes = {}
def current_voice_channel(self, ctx):
if ctx.guild and ctx.guild.me.voice:
return ctx.guild.me.voice.channel
return None
def cog_unload(self):
self.bot.lavalink._event_hooks.clear()
async def cog_before_invoke(self, ctx):
guild_check = ctx.guild is not None
# This is essentially the same as `@commands.guild_only()`
# except it saves us repeating ourselves (and also a few lines).
if guild_check:
# Ensure that the bot and command author
# share a mutual voicechannel.
await self.ensure_voice(ctx)
return guild_check
async def cog_after_invoke(self,ctx):
for page in self.pages.get(ctx.message.guild.id, []):
await page.show_page(page.current_page)
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
"""
deafen yourself when joining a voice channel
"""
if member.id == member.guild.me.id and after.channel is None:
if member.guild.voice_client:
await member.guild.voice_client.disconnect(force=True)
await self.bot.change_presence(activity=None)
if member.id != member.guild.me.id or not after.channel:
return
my_perms = after.channel.permissions_for(member)
if not after.deaf and my_perms.deafen_members:
await member.edit(deafen=True)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
guild : discord.Guild = self.bot.get_guild(guild_id)
await guild.voice_client.disconnect(force=True)
# Disconnect from the channel -- there's nothing else to play.
if isinstance(event, lavalink.events.TrackEndEvent):
if self.skip_votes and guild_id in self.skip_votes.keys():
self.skip_votes[guild_id].clear()
if isinstance(event, lavalink.events.TrackStartEvent):
await self.bot.change_presence(
activity=discord.Activity(type=discord.ActivityType.listening, name=event.player.current.title)
)
if isinstance(event, lavalink.events.TrackExceptionEvent):
channel = event.player.fetch('channel')
await channel.send(f"Error while playing Track: "
f"**{event.track.title}**:"
f"\n`{event.exception}`")
@commands.group(aliases=['p'],invoke_without_command=True)
async def play(self, ctx, *, query: str):
""" Searches and plays a song from a given query. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not url_rx.match(query):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found!')
embed = discord.Embed(color=discord.Color.blurple())
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
player.add(requester=ctx.author.id, track=track)
embed.title = 'Playlist Enqueued!'
embed.description = (f'{results["playlistInfo"]["name"]}'
f'- {len(tracks)} tracks')
else:
track = results['tracks'][0]
embed.title = 'Track Enqueued'
embed.description = (f'[{track["info"]["title"]}]'
f'({track["info"]["uri"]})')
player.add(requester=ctx.author.id, track=track)
await ctx.send(embed=embed)
if not player.is_playing:
await player.play()
@play.command("soundcloud", aliases=['sc'])
async def sc_play(self, ctx, *, query: str):
"""
search and play songs from soundcloud
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not url_rx.match(query):
query = f'scsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found!')
embed = discord.Embed(color=discord.Color.blurple())
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
player.add(requester=ctx.author.id, track=track)
embed.title = 'Playlist Enqueued!'
embed.description = (f'{results["playlistInfo"]["name"]}'
f'- {len(tracks)} tracks')
else:
track = results['tracks'][0]
embed.title = 'Track Enqueued'
embed.description = (f'[{track["info"]["title"]}]'
f'({track["info"]["uri"]})')
player.add(requester=ctx.author.id, track=track)
await ctx.send(embed=embed)
if not player.is_playing:
await player.play()
@commands.command()
async def seek(self, ctx, *, seconds: int):
""" Seeks to a given position in a track. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if ctx.author.id != player.current.requester:
return await ctx.send("Only requester can seek.")
track_time = player.position + (seconds * 1000)
await player.seek(track_time)
await ctx.send(
f'Moved track to **{lavalink.utils.format_time(track_time)}**'
)
@commands.command(name="fskip", aliases=['forceskip'])
@checks.is_owner_or_moderator()
async def force_skip(self, ctx):
"""
can only be invoked by moderators,
immediately skips the current song
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing.')
await player.skip()
if self.skip_votes:
self.skip_votes[ctx.guild.id].clear()
await ctx.send("⏭ | Skipped by moderator")
@commands.command()
async def skip(self, ctx):
"""
if invoked by requester skips the current song
otherwise starts a skip vote, use again to remove skip vote
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing.')
current_voice = self.current_voice_channel(ctx)
if (ctx.author.id == player.current.requester
or len(current_voice.members) <= 2):
await player.skip()
if ctx.guild.id in self.skip_votes.keys():
self.skip_votes[ctx.guild.id].clear()
await ctx.send('⏭ | Skipped by requester.')
else:
if ctx.guild.id not in self.skip_votes.keys():
self.skip_votes[ctx.guild.id] = {ctx.author.id}
else:
if ctx.author.id in self.skip_votes.values():
self.skip_votes[ctx.guild.id].remove(ctx.author.id)
else:
self.skip_votes[ctx.guild.id].add(ctx.author.id)
skip_vote_number = len(self.skip_votes[ctx.guild.id])
number_of_users_in_voice = len(current_voice.members)-1
if skip_vote_number >= number_of_users_in_voice / 2:
await player.skip()
self.skip_votes[ctx.guild.id].clear()
await ctx.send('⏭ | Skip vote passed.')
else:
votes_needed = \
math.ceil(number_of_users_in_voice/2) - skip_vote_number
await ctx.send(f"current skip vote: "
f"{votes_needed}"
f"more vote(s) needed "
f"for skip")
@commands.command(aliases=['np', 'n', 'playing'])
async def now(self, ctx):
""" Shows some stats about the currently playing song. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.current:
return await ctx.send('Nothing playing.')
position = lavalink.utils.format_time(player.position)
requester = ctx.guild.get_member(player.current.requester)
if player.current.stream:
duration = '🔴 LIVE'
else:
duration = lavalink.utils.format_time(player.current.duration)
song = (f'**[{player.current.title}]({player.current.uri})**\n'
f'({position}/{duration}) '
f'requested by '
f'**{requester.display_name if requester else "?"}**')
embed = discord.Embed(color=discord.Color.blurple(),
title='Now Playing', description=song)
status = (f"\N{TWISTED RIGHTWARDS ARROWS} Shuffle: {'enabled' if player.shuffle else 'disabled'} | "
f"\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS} Repeat: {'enabled' if player.repeat else 'disabled'} | "
f"\N{SPEAKER} Volume : {player.volume}")
embed.set_footer(text=status)
await ctx.send(embed=embed)
@commands.Cog.listener(name="on_reaction_clear")
async def remove_page_on_menu_close(self, message, reactions):
current_pages = self.pages.get(message.guild.id, None)
if not current_pages:
return
found_page = next(filter(lambda p: p.message == message, current_pages), None)
if found_page:
self.pages[message.guild.id].remove(found_page)
@commands.command(aliases=['q', 'playlist'])
async def queue(self, ctx):
""" Shows the player's queue. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued.')
pages= menus.MenuPages(source=MusicQueue(player.queue, ctx, player), clear_reactions_after=True)
await pages.start(ctx)
if ctx.guild.id in self.pages:
self.pages[ctx.guild.id].append(pages)
else:
self.pages[ctx.guild.id] = [pages]
@commands.command(aliases=['resume'])
@checks.is_owner_or_moderator()
async def pause(self, ctx):
""" Pauses/Resumes the current track. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing.')
if player.paused:
await player.set_pause(False)
await ctx.send('⏯ | Resumed')
else:
await player.set_pause(True)
await ctx.send('⏯ | Paused')
@commands.command(aliases=['vol'])
@checks.is_owner_or_moderator()
async def volume(self, ctx, volume: int = None):
""" Changes the player's volume (0-1000). """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
# Lavalink will automatically cap values between, or equal to 0-1000.
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command()
async def shuffle(self, ctx):
""" Shuffles the player's queue. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Nothing playing.')
player.shuffle = not player.shuffle
await ctx.send(f'🔀 | Shuffle '
f'{"enabled" if player.shuffle else "disabled"}')
@commands.command(aliases=['loop'])
async def repeat(self, ctx):
"""
Repeats the current song until the command is invoked again
or until a new song is queued.
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Nothing playing.')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command()
async def remove(self, ctx, index: int):
""" Removes an item from the player's queue with the given index. """
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
can_remove = False
try:
can_remove = checks.is_owner_or_moderator_check(ctx.message)
except commands.CheckFailure:
pass
if can_remove or ctx.author.id == player.queue[index-1].requester:
if not player.queue:
return await ctx.send('Nothing queued.')
if index > len(player.queue) or index < 1:
return await ctx.send(f'Index has to be **between** 1 and {len(player.queue)}')
removed = player.queue.pop(index - 1) # Account for 0-index.
await ctx.send(f'Removed **{removed.title}** from the queue.')
else:
await ctx.send("Only requester and moderators can remove from the list")
@commands.group(aliases=["search"], invoke_without_command=True)
async def find(self, ctx, *, query):
""" Lists the first 10 search results from a given query.
also allows you to queue one of the results (use p and the index number)
for example p 1 to play the first song in the results.
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
original_query = query
if not query.startswith('ytsearch:') and not query.startswith('scsearch:'):
query = 'ytsearch:' + query
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found.')
tracks = results['tracks'][:10] # First 10 results
o = (f"The first 10 results found via query `{original_query}`\n"
f"use `queue` or `play` followed by the number of the result to queue that song\n")
for index, track in enumerate(tracks, start=1):
track_title = track['info']['title']
track_uri = track['info']['uri']
o += f'`{index}.` [{track_title}]({track_uri})\n'
embed = discord.Embed(color=discord.Color.blurple(), description=o)
await ctx.send(embed=embed)
def queue_check(message):
if not re.match(r"(q(uery)?|p(lay)?)", message.content):
return False
try:
get_message_numbers = ''.join(c for c in message.content if c.isdigit())
number = int(get_message_numbers)
except ValueError:
raise commands.CommandError("please choose a number between 1 and 10")
return (number >= 1 or number <= 10) and message.channel == ctx.channel and message.author == ctx.author
try:
msg = await ctx.bot.wait_for("message", check=queue_check, timeout=10.0)
except asyncio.TimeoutError:
return
get_message_numbers = ''.join(c for c in msg.content if c.isdigit())
result_number = int(get_message_numbers)
ctx.command = self.play
await self.cog_before_invoke(ctx)
await ctx.invoke(self.play, query=tracks[result_number-1]['info']['uri'])
@find.group(name="scsearch",aliases=["sc", "soundcloud"], invoke_without_command=True)
async def find_sc(self, ctx, *, query):
""" Lists the first 10 soundcloud search results from a given query.
also allows you to queue one of the results (use p and the index number)
for example p 1 to play the first song in the results.
"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
original_query = query
query = 'scsearch:' + query
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Nothing found.')
tracks = results['tracks'][:10] # First 10 results
o = (f"The first 10 results found via query `{original_query}`\n"
f"use `queue` or `play` followed by the number of the result to queue that song\n")
for index, track in enumerate(tracks, start=1):
track_title = track['info']['title']
track_uri = track['info']['uri']
o += f'`{index}.` [{track_title}]({track_uri})\n'
embed = discord.Embed(color=discord.Color.blurple(), description=o)
await ctx.send(embed=embed)
def queue_check(message):
if not re.match(r"(q(uery)?|p(lay)?)", message.content):
return False
try:
get_message_numbers = ''.join(c for c in message.content if c.isdigit())
number = int(get_message_numbers)
except ValueError:
raise commands.CommandError("please choose a number between 1 and 10")
return (number >= 1 or number <= 10) and message.channel == ctx.channel and message.author == ctx.author
try:
msg = await ctx.bot.wait_for("message", check=queue_check, timeout=10.0)
except asyncio.TimeoutError:
return
get_message_numbers = ''.join(c for c in msg.content if c.isdigit())
result_number = int(get_message_numbers)
ctx.command = self.play
await self.cog_before_invoke(ctx)
await ctx.invoke(self.play, query=tracks[result_number-1]['info']['uri'])
@commands.command(aliases=['dc','stop','leave','quit'])
@can_stop()
async def disconnect(self, ctx: commands.Context):
""" Disconnects the player from the voice channel and clears its queue. """
await ctx.voice_client.disconnect(force=True)
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_connected:
return await ctx.send('Not connected.')
player.queue.clear()
await player.stop()
await ctx.send('*⃣ | Disconnected.')
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
should_connect = ctx.command.name in ('play', 'junbi_ok','soundcloud') # Add commands that require joining voice to work.
if should_connect and self.current_voice_channel(ctx) is None:
if self.bot.lavalink.node_manager.available_nodes:
voice_client = await ctx.author.voice.channel.connect(cls=LavalinkVoiceClient)
player : lavalink.DefaultPlayer = self.bot.lavalink.player_manager.create(ctx.guild.id)
player.store("channel", ctx.channel)
else:
raise commands.CommandError("No audio player nodes available. Please wait a few minutes for a reconnect")
elif self.current_voice_channel(ctx) is not None and not self.bot.lavalink.node_manager.available_nodes:
await ctx.guild.voice_client.disconnect(force=True)
raise commands.CommandError("No audio player nodes available. Please wait a few minutes for a reconnect")
if ctx.command.name in ('find', 'scsearch', 'disconnect', 'now', 'queue'):
return
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CheckFailure('Join a voicechannel first.')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CheckFailure('I need the `CONNECT` and `SPEAK` permissions.')
@commands.command(name="lc")
@checks.is_owner()
async def lavalink_reconnect(self, ctx):
self.bot.lavalink.add_node(
'localhost',
2333,
'youshallnotpass',
'us',
'default-node')
async def setup(bot):
await bot.add_cog(Music(bot))
| 41.092257 | 161 | 0.583611 | 23,599 | 0.945094 | 0 | 0 | 16,823 | 0.673728 | 21,206 | 0.849259 | 5,774 | 0.231237 |
2973ac2848fca5ea3493059da1e9b46b9746f3f2 | 17,005 | py | Python | utils/data_processing.py | LisaAnne/LocalizingMoments | b6a555c8134581305d0ed4716fbc192860e0b88c | [
"BSD-2-Clause"
]
| 157 | 2017-08-04T21:56:46.000Z | 2022-03-05T13:49:02.000Z | utils/data_processing.py | Naman-Bhalla/LocalizingMoments | 618bebfe6c4b897e94770b8011d34aa45c941e62 | [
"BSD-2-Clause"
]
| 19 | 2017-09-26T15:27:47.000Z | 2022-02-27T23:21:00.000Z | utils/data_processing.py | Naman-Bhalla/LocalizingMoments | 618bebfe6c4b897e94770b8011d34aa45c941e62 | [
"BSD-2-Clause"
]
| 48 | 2017-08-08T11:18:39.000Z | 2021-11-15T04:20:33.000Z | import numpy as np
import sys
import os
sys.path.append('utils/')
from config import *
from utils import *
sys.path.append(pycaffe_dir)
import time
import pdb
import random
import pickle as pkl
import caffe
from multiprocessing import Pool
from threading import Thread
import random
import h5py
import itertools
import math
import re
glove_dim = 300
glove_path = 'data/glove.6B.%dd.txt' %glove_dim
#glove_path = 'data/glove_debug_path.txt' #for debugging
if glove_path == 'data/glove_debug_path.txt':
print "continue?"
pdb.set_trace()
possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
possible_segments.append(i)
length_prep_word = 40
length_prep_character = 250
vocab_file = 'data/vocab_glove_complete.txt'
def word_tokenize(s):
sent = s.lower()
sent = re.sub('[^A-Za-z0-9\s]+',' ', sent)
return sent.split()
def sentences_to_words(sentences):
words = []
for s in sentences:
words.extend(word_tokenize(str(s.lower())))
return words
class glove_embedding(object):
''' Creates glove embedding object
'''
def __init__(self, glove_file=glove_path):
glove_txt = open(glove_file).readlines()
glove_txt = [g.strip() for g in glove_txt]
glove_vector = [g.split(' ') for g in glove_txt]
glove_words = [g[0] for g in glove_vector]
glove_vecs = [g[1:] for g in glove_vector]
glove_array = np.zeros((glove_dim, len(glove_words)))
glove_dict = {}
for i, w in enumerate(glove_words): glove_dict[w] = i
for i, vec in enumerate(glove_vecs):
glove_array[:,i] = np.array(vec)
self.glove_array = glove_array
self.glove_dict = glove_dict
self.glove_words = glove_words
class zero_language_vector(object):
def __init__(self, data):
self.dim = glove_dim
def get_vector_dim(self):
return self.dim
def get_vocab_size(self):
return 0
def preprocess(self, data):
embedding = np.zeros((self.get_vector_dim(),))
for d in data:
d['language_input'] = embedding
d['gt'] = (d['gt'][0], d['gt'][1])
return data
class recurrent_language(object):
def get_vocab_size(self):
return len(self.vocab_dict.keys())
def preprocess_sentence(self, words):
vector_dim = self.get_vector_dim()
sentence_mat = np.zeros((len(words), vector_dim))
count_words = 0
for i, w in enumerate(words):
try:
sentence_mat[count_words,:] = self.vocab_dict[w]
count_words += 1
except:
if '<unk>' in self.vocab_dict.keys():
sentence_mat[count_words,:] = self.vocab_dict['<unk>']
count_words += 1
else:
pass
sentence_mat = sentence_mat[:count_words]
return sentence_mat
def preprocess(self, data):
for d in data:
words = sentences_to_words([d['description']])
d['language_input'] = self.preprocess(words)
return data
class recurrent_word(recurrent_language):
def __init__(self, data):
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' not in vocab:
vocab.append('<unk>')
vocab_dict = {}
for i, word in enumerate(vocab):
vocab_dict[word] = i
self.vocab_dict = vocab_dict
def get_vector_dim(self):
return 1
class recurrent_embedding(recurrent_language):
def read_embedding(self):
print "Reading glove embedding"
embedding = glove_embedding(glove_path)
self.embedding = embedding
def get_vector_dim(self):
return glove_dim
def __init__(self, data):
self.read_embedding()
embedding = self.embedding
vector_dim = self.get_vector_dim()
self.data = data
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' in vocab:
vocab.remove('<unk>') #don't have an <unk> vector. Alternatively, could map to random vector...
vocab_dict = {}
for i, word in enumerate(vocab):
try:
vocab_dict[word] = embedding.glove_array[:,embedding.glove_dict[word]]
except:
print "%s not in glove embedding" %word
self.vocab_dict = vocab_dict
def preprocess(self, data):
vector_dim = self.get_vector_dim()
for d in data:
d['language_input'] = sentences_to_words([d['description']])
return data
def get_vocab_dict(self):
return self.vocab_dict
#Methods for extracting visual features
def feature_process_base(start, end, features):
return np.mean(features[start:end+1,:], axis = 0)
def feature_process_norm(start, end, features):
base_feature = np.mean(features[start:end+1,:], axis = 0)
return base_feature/(np.linalg.norm(base_feature) + 0.00001)
def feature_process_context(start, end, features):
feature_dim = features.shape[1]
full_feature = np.zeros((feature_dim*2,))
if np.sum(features[5,:]) > 0:
full_feature[:feature_dim] = feature_process_norm(0,6, features)
else:
full_feature[:feature_dim] = feature_process_norm(0,5, features)
full_feature[feature_dim:feature_dim*2] = feature_process_norm(start, end, features)
return full_feature
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
class extractData(object):
""" General class to extract data.
"""
def increment(self):
#uses iteration, batch_size, data_list, and num_data to extract next batch identifiers
next_batch = [None]*self.batch_size
if self.iteration + self.batch_size >= self.num_data:
next_batch[:self.num_data-self.iteration] = self.data_list[self.iteration:]
next_batch[self.num_data-self.iteration:] = self.data_list[:self.batch_size -(self.num_data-self.iteration)]
random.shuffle(self.data_list)
self.iteration = self.num_data - self.iteration
else:
next_batch = self.data_list[self.iteration:self.iteration+self.batch_size]
self.iteration += self.batch_size
assert self.iteration > -1
assert len(next_batch) == self.batch_size
return next_batch
class extractLanguageFeatures(extractData):
def __init__(self, dataset, params, result=None):
self.data_list = range(len(dataset))
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.vocab_dict = params['vocab_dict']
self.batch_size = params['batch_size']
self.num_glove_centroids = self.vocab_dict.values()[0].shape[0]
self.T = params['sentence_length']
if isinstance(result, dict):
self.result = result
self.query_key = params['query_key']
self.cont_key = params['cont_key']
self.top_keys = [self.query_key, self.cont_key]
self.top_shapes = [(self.T, self.batch_size, self.num_glove_centroids),
(self.T, self.batch_size)]
else:
print "Will only be able to run in test mode"
def get_features(self, query):
feature = np.zeros((self.T, self.num_glove_centroids))
cont = np.zeros((self.T,))
len_query = min(len(query), self.T)
if len_query < len(query):
query = query[:len_query]
for count_word, word in enumerate(query):
try:
feature[-(len_query)+count_word,:] = self.vocab_dict[word]
except:
feature[-(len_query)+count_word,:] = np.zeros((glove_dim,))
cont[-(len_query-1):] = 1
assert np.sum(feature[:-len_query,:]) == 0
return feature, cont
def get_data_test(self, data):
query = data['language_input']
return self.get_features(query)
def get_data(self, next_batch):
data = self.dataset
query_mat = np.zeros((self.T, self.batch_size, self.num_glove_centroids))
cont = np.zeros((self.T, self.batch_size))
for i, nb in enumerate(next_batch):
query = data[nb]['language_input']
query_mat[:,i,:], cont[:,i] = self.get_features(query)
self.result[self.query_key] = query_mat
self.result[self.cont_key] = cont
class extractVisualFeatures(extractData):
def __init__(self, dataset, params, result):
self.data_list = range(len(dataset))
self.feature_process_algo = params['feature_process']
self.loc_feature = params['loc_feature']
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.loc = params['loc_feature']
loss_type = params['loss_type']
assert loss_type in ['triplet', 'inter', 'intra']
self.inter = False
self.intra = False
if loss_type in ['triplet', 'inter']:
self.inter = True
if loss_type in ['triplet', 'intra']:
self.intra = True
self.batch_size = params['batch_size']
self.num_glove_centroids = params['num_glove_centroids']
features_h5py = h5py.File(params['features'])
features = {}
for key in features_h5py.keys():
features[key] = np.array(features_h5py[key])
features_h5py.close()
self.features = features
assert self.feature_process_algo in feature_process_dict.keys()
self.feature_process = feature_process_dict[self.feature_process_algo]
self.feature_dim = self.feature_process(0,0,self.features[self.dataset[0]['video']]).shape[-1]
self.result = result
self.feature_key_p = params['feature_key_p']
self.feature_time_stamp_p = params['feature_time_stamp_p']
self.feature_time_stamp_n = params['feature_time_stamp_n']
self.top_keys = [self.feature_key_p, self.feature_time_stamp_p, self.feature_time_stamp_n]
self.top_shapes = [(self.batch_size, self.feature_dim),
(self.batch_size, 2),
(self.batch_size,2)]
if self.inter:
self.feature_key_inter = 'features_inter'
self.top_keys.append(self.feature_key_inter)
self.top_shapes.append((self.batch_size, self.feature_dim))
if self.intra:
self.feature_key_intra = 'features_intra'
self.top_keys.append(self.feature_key_intra)
self.top_shapes.append((self.batch_size, self.feature_dim))
self.possible_annotations = possible_segments
def get_data_test(self, d):
video_feats = self.features[d['video']]
features = np.zeros((len(self.possible_annotations), self.feature_dim))
loc_feats = np.zeros((len(self.possible_annotations), 2))
for i, p in enumerate(self.possible_annotations):
features[i,:] = self.feature_process(p[0], p[1], video_feats)
loc_feats[i,:] = [p[0]/6., p[1]/6.]
return features, loc_feats
def get_data(self, next_batch):
feature_process = self.feature_process
data = self.dataset
features_p = np.zeros((self.batch_size, self.feature_dim))
if self.inter: features_inter = np.zeros((self.batch_size, self.feature_dim))
if self.intra: features_intra = np.zeros((self.batch_size, self.feature_dim))
features_time_stamp_p = np.zeros((self.batch_size, 2))
features_time_stamp_n = np.zeros((self.batch_size, 2))
for i, nb in enumerate(next_batch):
rint = random.randint(0,len(data[nb]['times'])-1)
gt_s = data[nb]['times'][rint][0]
gt_e = data[nb]['times'][rint][1]
possible_n = list(set(self.possible_annotations) - set(((gt_s,gt_e),)))
random.shuffle(possible_n)
n = possible_n[0]
assert n != (gt_s, gt_e)
video = data[nb]['video']
feats = self.features[video]
if self.inter:
other_video = data[nb]['video']
while (other_video == video):
other_video_index = int(random.random()*len(data))
other_video = data[other_video_index]['video']
feats_inter = self.features[other_video]
features_p[i,:] = feature_process(gt_s, gt_e, feats)
if self.intra:
features_intra[i,:] = feature_process(n[0], n[1], feats)
if self.inter:
try:
features_inter[i,:] = feature_process(gt_s, gt_e, feats_inter)
except:
pdb.set_trace()
if self.loc:
features_time_stamp_p[i,0] = gt_s/6.
features_time_stamp_p[i,1] = gt_e/6.
features_time_stamp_n[i,0] = n[0]/6.
features_time_stamp_n[i,1] = n[1]/6.
else:
features_time_stamp_p[i,0] = 0
features_time_stamp_p[i,1] = 0
features_time_stamp_n[i,0] = 0
features_time_stamp_n[i,1] = 0
assert not math.isnan(np.mean(self.features[data[nb]['video']][n[0]:n[1]+1,:]))
assert not math.isnan(np.mean(self.features[data[nb]['video']][gt_s:gt_e+1,:]))
self.result[self.feature_key_p] = features_p
self.result[self.feature_time_stamp_p] = features_time_stamp_p
self.result[self.feature_time_stamp_n] = features_time_stamp_n
if self.inter:
self.result[self.feature_key_inter] = features_inter
if self.intra:
self.result[self.feature_key_intra] = features_intra
class batchAdvancer(object):
def __init__(self, extractors):
self.extractors = extractors
self.increment_extractor = extractors[0]
def __call__(self):
#The batch advancer just calls each extractor
next_batch = self.increment_extractor.increment()
for e in self.extractors:
e.get_data(next_batch)
class python_data_layer(caffe.Layer):
""" General class to extract data.
"""
def setup(self, bottom, top):
random.seed(10)
self.params = eval(self.param_str)
params = self.params
assert 'top_names' in params.keys()
#set up prefetching
self.thread_result = {}
self.thread = None
self.setup_extractors()
self.batch_advancer = batchAdvancer(self.data_extractors)
shape_dict = {}
self.top_names = []
for de in self.data_extractors:
for top_name, top_shape in zip(de.top_keys, de.top_shapes):
shape_dict[top_name] = top_shape
self.top_names.append((params['top_names'].index(top_name), top_name))
self.dispatch_worker()
self.top_shapes = [shape_dict[tn[1]] for tn in self.top_names]
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
self.join_worker()
#for top_index, name in enumerate(self.top_names.keys()):
top_count = 0
for top_index, name in self.top_names:
shape = self.top_shapes[top_count]
print 'Top name %s has shape %s.' %(name, shape)
top[top_index].reshape(*shape)
top_count += 1
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.thread is not None:
self.join_worker()
for top_index, name in self.top_names:
top[top_index].data[...] = self.thread_result[name]
self.dispatch_worker()
def dispatch_worker(self):
assert self.thread is None
self.thread = Thread(target=self.batch_advancer)
self.thread.start()
def join_worker(self):
assert self.thread is not None
self.thread.join()
self.thread = None
def backward(self, top, propoagate_down, bottom):
pass
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
language_feature_process_dict = {'zero_language': zero_language_vector,
'recurrent_embedding': recurrent_embedding}
class dataLayer_ExtractPairedLanguageVision(python_data_layer):
def setup_extractors(self):
assert 'top_names' in self.params.keys()
assert 'descriptions' in self.params.keys()
assert 'features' in self.params.keys()
if 'batch_size' not in self.params.keys(): self.params['batch_size'] = 120
self.params['query_key'] = 'query'
self.params['feature_key_n'] = 'features_n'
self.params['feature_key_p'] = 'features_p'
self.params['feature_key_t'] = 'features_t'
self.params['feature_time_stamp_p'] = 'features_time_stamp_p'
self.params['feature_time_stamp_n'] = 'features_time_stamp_n'
self.params['cont_key'] = 'cont'
language_extractor_fcn = extractLanguageFeatures
visual_extractor_fcn = extractVisualFeatures
language_process = recurrent_embedding
data_orig = read_json(self.params['descriptions'])
random.shuffle(data_orig)
language_processor = language_process(data_orig)
data = language_processor.preprocess(data_orig)
self.params['vocab_dict'] = language_processor.vocab_dict
num_glove_centroids = language_processor.get_vector_dim()
self.params['num_glove_centroids'] = num_glove_centroids
visual_feature_extractor = visual_extractor_fcn(data, self.params, self.thread_result)
textual_feature_extractor = language_extractor_fcn(data, self.params, self.thread_result)
self.data_extractors = [visual_feature_extractor, textual_feature_extractor]
| 32.267552 | 114 | 0.673625 | 14,574 | 0.857042 | 0 | 0 | 0 | 0 | 0 | 0 | 1,843 | 0.10838 |
297530f3746153e98984e1af10f206c3aa39875c | 17,856 | py | Python | old_metrics/bleu.py | Danial-Alh/fast-bleu | d8726ae829b2cc275192e70819ee9c60c9d4bed6 | [
"MIT"
]
| 21 | 2020-08-09T12:06:58.000Z | 2022-02-13T19:15:11.000Z | old_metrics/bleu.py | Danial-Alh/FastBLEU | 182584a6593aa36d57b15705da6f317bdb06f83e | [
"MIT"
]
| 4 | 2020-08-11T22:00:40.000Z | 2021-09-01T07:35:52.000Z | old_metrics/bleu.py | Danial-Alh/FastBLEU | 182584a6593aa36d57b15705da6f317bdb06f83e | [
"MIT"
]
| 2 | 2020-08-12T14:49:18.000Z | 2021-06-25T12:51:42.000Z | import math
import os
from collections import Counter
from fractions import Fraction
import numpy as np
from nltk import ngrams
from nltk.translate.bleu_score import SmoothingFunction
from .utils import get_ngrams, Threader
def corpus_bleu(references,
hypothesis,
reference_max_counts,
ref_lens,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweight=False,
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweight: Option to re-normalize the weights uniformly.
:type auto_reweight: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
# Iterate through each hypothesis and their corresponding references.
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(reference_max_counts, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(ref_lens, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweight:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
return s
def modified_precision(reference_max_counts, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = reference_max_counts[n - 1]
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {
ngram: min(count, max_counts.get(ngram, 0)) for ngram, count in counts.items()
}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False)
def closest_ref_length(ref_lens, hyp_len):
"""
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int
"""
closest_ref_len = min(
ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
)
return closest_ref_len
def brevity_penalty(closest_ref_len, hyp_len):
"""
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float
"""
if hyp_len > closest_ref_len:
return 1
# If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
elif hyp_len == 0:
return 0
else:
return math.exp(1 - closest_ref_len / hyp_len)
class Bleu(): # this class speedup computation when reference is same for multisample
# Base on https://www.nltk.org/_modules/nltk/translate/bleu_score.html
def __init__(self, references, weights=np.ones(3) / 3., smoothing_function=SmoothingFunction().method1,
auto_reweight=False, process_num=None, other_instance=None):
self.references = references
self.weights = weights
self.smoothing_function = smoothing_function
self.auto_reweight = auto_reweight
self.max_n = len(weights)
if process_num is None:
self.process_num = os.cpu_count()
else:
self.process_num = process_num
print('bleu{} init!'.format(self.max_n))
if other_instance is None:
self.ref_lens = list(len(reference) for reference in references)
self.references_ngrams = [get_ngrams(references, n + 1) for n in range(self.max_n)]
self.references_counts = [[Counter(l) for l in self.references_ngrams[n]] for n in range(self.max_n)]
self.reference_max_counts = [self.get_reference_max_counts(n) for n in range(self.max_n)]
else:
assert other_instance.max_n >= self.max_n, 'invalid cache!'
assert isinstance(other_instance, Bleu), 'invalid cache!'
ref_lens, \
references_ngrams, \
references_counts, \
reference_max_counts = other_instance.get_cached_fields()
self.ref_lens = ref_lens
self.references_ngrams = references_ngrams[:self.max_n]
self.references_counts = references_counts[:self.max_n]
self.reference_max_counts = reference_max_counts[:self.max_n]
def get_cached_fields(self):
return self.ref_lens, \
self.references_ngrams, \
self.references_counts, \
self.reference_max_counts
def get_score(self, samples, compute_in_parallel=True):
print('evaluating bleu {}!'.format(self.max_n))
if compute_in_parallel:
return Threader(samples, self.tmp_get_score, self.process_num, show_tqdm=False).run()
return [self.tmp_get_score(sample) for sample in samples]
def tmp_get_score(self, item):
return corpus_bleu(self.references, item,
self.reference_max_counts, self.ref_lens, self.weights,
self.smoothing_function, self.auto_reweight)
def get_reference_max_counts(self, n):
print('calculating max counts n = %d!' % ((n + 1),))
ngram_keys = list(set([x for y in self.references_ngrams[n] for x in y]))
return dict(zip(ngram_keys, Threader(ngram_keys, self.tmp_get_reference_max_counts, show_tqdm=True).run()))
# return dict(zip(ngram_keys, multi_run(ngram_keys, self.tmp_get_reference_max_counts, show_tqdm=True)))
def tmp_get_reference_max_counts(self, ngram):
counts = [x.get(ngram, 0) for x in self.references_counts[len(ngram) - 1]]
return np.max(counts)
| 44.19802 | 115 | 0.620128 | 3,041 | 0.170307 | 0 | 0 | 0 | 0 | 0 | 0 | 12,547 | 0.702677 |
2975d99cfa9a3c77e63e816ac890c17807b18244 | 1,880 | py | Python | setup.py | gspracklin/bwtools | 08159b86738fed978ef442a60032cf8b00b5635a | [
"MIT"
]
| 4 | 2021-07-23T02:49:54.000Z | 2022-03-22T19:56:13.000Z | setup.py | gspracklin/bwtools | 08159b86738fed978ef442a60032cf8b00b5635a | [
"MIT"
]
| 1 | 2020-08-24T19:03:46.000Z | 2020-08-24T19:03:46.000Z | setup.py | gspracklin/bwtools | 08159b86738fed978ef442a60032cf8b00b5635a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import re
from setuptools import setup, find_packages
# classifiers = """\
# Development Status :: 4 - Beta
# Programming Language :: Python
# Programming Language :: Python :: 3
# Programming Language :: Python :: 3.4
# Programming Language :: Python :: 3.5
# Programming Language :: Python :: 3.6
# Programming Language :: Python :: 3.7
# Programming Language :: Python :: 3.8
# """
def _read(*parts, **kwargs):
filepath = os.path.join(os.path.dirname(__file__), *parts)
encoding = kwargs.pop('encoding', 'utf-8')
with io.open(filepath, encoding=encoding) as fh:
text = fh.read()
return text
def get_version():
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
_read('bwtools', '__init__.py'),
re.MULTILINE).group(1)
return version
def get_long_description():
return _read('README.md')
def get_requirements(path):
content = _read(path)
return [
req
for req in content.split("\n")
if req != '' and not req.startswith('#')
]
install_requires = get_requirements('requirements.txt')
packages = find_packages()
setup(
name='bwtools',
author='George Spracklin',
author_email='@mit.edu',
version=get_version(),
license='MIT',
description='tools for bigwigs',
long_description=get_long_description(),
long_description_content_type='text/markdown',
keywords=['genomics', 'bioinformatics', 'Hi-C', 'analysis', 'cooler'],
url='https://github.com/gspracklin/bwtools',
zip_safe=False,
# classifiers=[s.strip() for s in classifiers.split('\n') if s],
packages=packages,
install_requires=install_requires,
entry_points={
'console_scripts': [
'bwtools = bwtools.cli:cli',
]
}
) | 24.736842 | 74 | 0.623404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 788 | 0.419149 |
2976116c00e8fbc7eadad3295e61cb99c5280023 | 3,018 | py | Python | hoogberta/encoder.py | KateSatida/HoogBERTa_SuperAI2 | e903054bc752a50c391ab610507fdeccc4f5d482 | [
"MIT"
]
| null | null | null | hoogberta/encoder.py | KateSatida/HoogBERTa_SuperAI2 | e903054bc752a50c391ab610507fdeccc4f5d482 | [
"MIT"
]
| null | null | null | hoogberta/encoder.py | KateSatida/HoogBERTa_SuperAI2 | e903054bc752a50c391ab610507fdeccc4f5d482 | [
"MIT"
]
| null | null | null | from .trainer.models import MultiTaskTagger
from .trainer.utils import load_dictionaries,Config
from .trainer.tasks.multitask_tagging import MultiTaskTaggingModule
from fairseq.data.data_utils import collate_tokens
from attacut import tokenize
class HoogBERTaEncoder(object):
def __init__(self,layer=12,cuda=False,base_path="."):
args = Config(base_path=base_path)
self.base_path = base_path
self.pos_dict, self.ne_dict, self.sent_dict = load_dictionaries(self.base_path)
self.model = MultiTaskTagger(args,[len(self.pos_dict), len(self.ne_dict), len(self.sent_dict)])
if cuda == True:
self.model = self.model.cuda()
def extract_features(self,sentence):
all_sent = []
sentences = sentence.split(" ")
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch(self,sentenceL):
inputList = []
for sentX in sentenceL:
sentences = sentX.split(" ")
all_sent = []
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in inputList], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
def extract_features_from_tensor(self,batch):
all_layers = self.model.bert.extract_features(batch, return_all_hiddens=True)
return batch, all_layers[-1]
def extract_features2(self,sentence):
# all_sent = []
# sentences = sentence.split(" ")
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch2(self,sentenceL):
# inputList = []
# for sentX in sentenceL:
# sentences = sentX.split(" ")
# all_sent = []
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
# inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in sentenceL], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
| 38.202532 | 103 | 0.61829 | 2,771 | 0.918158 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.190524 |
29765d30f6c468ab494ba5307c14ec5427857135 | 146 | py | Python | computer_equipment_control/model/__init__.py | hugonp/Proyecto-Triples | ff4c77e37d49a1674d17f3137ef06b5c56ab9c29 | [
"Apache-2.0"
]
| null | null | null | computer_equipment_control/model/__init__.py | hugonp/Proyecto-Triples | ff4c77e37d49a1674d17f3137ef06b5c56ab9c29 | [
"Apache-2.0"
]
| null | null | null | computer_equipment_control/model/__init__.py | hugonp/Proyecto-Triples | ff4c77e37d49a1674d17f3137ef06b5c56ab9c29 | [
"Apache-2.0"
]
| null | null | null | from . import usuario
from . import equipo_cambio
from . import equipo_computo
from . import sucursales
from . import depto
from . import usuario
| 20.857143 | 28 | 0.794521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
297664fa1ad9becfe5e46a57677c012794d49c7e | 697 | py | Python | gui/window.py | frlnx/melee | db2670453771c6d3635e97e28bb8667b14643b05 | [
"CC0-1.0"
]
| null | null | null | gui/window.py | frlnx/melee | db2670453771c6d3635e97e28bb8667b14643b05 | [
"CC0-1.0"
]
| null | null | null | gui/window.py | frlnx/melee | db2670453771c6d3635e97e28bb8667b14643b05 | [
"CC0-1.0"
]
| null | null | null | from pyglet.window import Window as PygletWindow
from .controllers import ComponentContainerController
from .models.container import ComponentContainerModel
from .views import OrthoViewport
class Window(PygletWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._model = ComponentContainerModel([], 0, self.width, 0, self.height)
self._controller = ComponentContainerController(self._model)
self.push_handlers(self._controller)
self._view = OrthoViewport(self._model)
def add_component(self, model):
self._model.add_component(model)
def on_draw(self):
self.clear()
self._view.draw()
| 30.304348 | 80 | 0.714491 | 503 | 0.721664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2979426aee0cb9d3f7a35f048ea9d20f5f7f25ea | 1,806 | py | Python | conductor_calculator.py | aj83854/project-lightning-rod | 77867d6c4ee30650023f3ec2a8318edd92530264 | [
"MIT"
]
| null | null | null | conductor_calculator.py | aj83854/project-lightning-rod | 77867d6c4ee30650023f3ec2a8318edd92530264 | [
"MIT"
]
| null | null | null | conductor_calculator.py | aj83854/project-lightning-rod | 77867d6c4ee30650023f3ec2a8318edd92530264 | [
"MIT"
]
| null | null | null | from pyconductor import load_test_values, calculate_conductance
def conductance_calc():
preloaded_dict = load_test_values()
while preloaded_dict:
print(
"[1] - Show currently available materials in Material Dictionary\n"
"[2] - Add a material (will not be saved upon restart)\n"
"[3] - Quit\n"
"To test the conductive properties of a material, simply type in its name.\n"
"Otherwise, type the corresponding number for an option above.\n"
)
main_prompt = input(">>> ").lower()
if main_prompt == "1":
print(f"\nCurrently contains the following materials:\n{preloaded_dict.keys()}\n")
elif main_prompt == "2":
preloaded_dict.addmat()
elif main_prompt == "3":
quit()
else:
try:
calculate_conductance(preloaded_dict[main_prompt])
while True:
again_prompt = input(
"Would you like to try another calculation? [Y]es or [N]o: ").lower()
if again_prompt in ("y", "yes"):
break
elif again_prompt in ("n", "no"):
print("\nGoodbye!\n")
quit()
except KeyError:
if main_prompt == "":
print("\nNo material specified.\nPlease enter a valid material name "
"listed in option [1], or use option [2] to add your own.\n")
else: # TODO: add logic handling whether user wants to add missing material
print(f"\n{main_prompt} is not a valid material or command!\n")
else:
pass
if __name__ == "__main__":
conductance_calc()
| 42 | 94 | 0.530454 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 719 | 0.398117 |
297a9dbff855963c67a53faea767533cf768f0af | 2,073 | py | Python | compiler/python_compiler/engines/py3_8/Variable.py | unknowncoder05/app-architect | 083278e1386562797614f320649ca85d1c44e009 | [
"MIT"
]
| 3 | 2021-08-12T12:59:27.000Z | 2021-08-29T15:30:49.000Z | compiler/python_compiler/engines/py3_8/Variable.py | unknowncoder05/app-architect | 083278e1386562797614f320649ca85d1c44e009 | [
"MIT"
]
| null | null | null | compiler/python_compiler/engines/py3_8/Variable.py | unknowncoder05/app-architect | 083278e1386562797614f320649ca85d1c44e009 | [
"MIT"
]
| null | null | null | from .Fragment import Fragment
from utils.flags import *
from utils.CustomLogging import CustomLogging
#from python_compiler.engines.utils.types import get_python_type_str, ANY
DEFAULT_ASSIGN_OPERATOR = "="
ASSIGN_OPERATORS = {
"=":"=",
"+=":"+=",
"-=":"-=",
"*=":"*=",
"/=":"/=",
"//=":"//=",
"%=":"%=",
"**=":"**=",
"&=":"&=",
"|=":"|=",
"^=":"^=",
">>=":">>=",
"<<=":"<<=",
}
def get_variable_name(fragment) -> str:
if not (variable_name := fragment.get(ATTRIBUTE_VARIABLE_NAME)):
CustomLogging.critical(f"Fragment type variable '{ATTRIBUTE_VARIABLE_NAME}' attribute does not exist")
return variable_name
def get_variable_type(fragment) -> str:
if not (variable_type := fragment.get(ATTRIBUTE_VARIABLE_TYPE)):
variable_type = ""
else:
variable_type = ":"+variable_type
return variable_type
def get_variable_assign_operator(fragment) -> str:
if not (variable_assign_operator := fragment.get(ATTRIBUTE_VARIABLE_ASSIGN_OPERATOR)):
variable_assign_operator = DEFAULT_ASSIGN_OPERATOR
return ASSIGN_OPERATORS.get(variable_assign_operator)
def get_variable_expression(fragment) -> str:
if not (variable_expression := fragment.get(ATTRIBUTE_VARIABLE_EXPRESSION)):
CustomLogging.critical(f"Fragment type variable '{ATTRIBUTE_VARIABLE_EXPRESSION}' attribute does not exist")
return variable_expression
class Variable(Fragment):
name:str
variable_type:str
assign_operator:str
expression:str
def __init__(self, blueprint, *args, **kwargs) -> None:
super().__init__(blueprint, *args, **kwargs)
self.name = get_variable_name(blueprint)
self.variable_type = get_variable_type(blueprint)
self.assign_operator = get_variable_assign_operator(blueprint)
self.expression = get_variable_expression(blueprint)
def compile(self)->str:
fragment_build = ""
fragment_build = f"{self.name}{self.variable_type} {self.assign_operator} {self.expression}"
return fragment_build | 33.983607 | 116 | 0.675832 | 642 | 0.309696 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.207429 |
297b1b7d9f0a6e280fa0145471626d7d01aa1943 | 266 | py | Python | test/win/vs-macros/test_exists.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
]
| 77 | 2018-07-01T15:55:34.000Z | 2022-03-30T09:16:54.000Z | test/win/vs-macros/test_exists.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
]
| 116 | 2021-05-29T16:32:51.000Z | 2021-08-13T16:05:29.000Z | test/win/vs-macros/test_exists.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
]
| 53 | 2018-04-13T12:06:06.000Z | 2022-03-25T13:54:38.000Z | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
if not os.path.exists(sys.argv[1]):
raise Exception()
open(sys.argv[2], 'w').close()
| 24.181818 | 72 | 0.718045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.586466 |
297bd9ae840483948b9e83e2e47fb15cc768315a | 37,440 | py | Python | Lib/site-packages/qwt/scale_draw.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
]
| null | null | null | Lib/site-packages/qwt/scale_draw.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
]
| 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/qwt/scale_draw.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
"""
QwtAbstractScaleDraw
--------------------
.. autoclass:: QwtAbstractScaleDraw
:members:
QwtScaleDraw
------------
.. autoclass:: QwtScaleDraw
:members:
"""
from qwt.scale_div import QwtScaleDiv
from qwt.scale_map import QwtScaleMap
from qwt.text import QwtText
from qwt._math import qwtRadians
from qtpy.QtGui import QPalette, QFontMetrics, QTransform
from qtpy.QtCore import Qt, qFuzzyCompare, QLocale, QRectF, QPointF, QRect, QPoint
from math import ceil
import numpy as np
class QwtAbstractScaleDraw_PrivateData(object):
def __init__(self):
self.spacing = 4
self.penWidth = 0
self.minExtent = 0.0
self.components = (
QwtAbstractScaleDraw.Backbone
| QwtAbstractScaleDraw.Ticks
| QwtAbstractScaleDraw.Labels
)
self.tick_length = {
QwtScaleDiv.MinorTick: 4.0,
QwtScaleDiv.MediumTick: 6.0,
QwtScaleDiv.MajorTick: 8.0,
}
self.tick_lighter_factor = {
QwtScaleDiv.MinorTick: 100,
QwtScaleDiv.MediumTick: 100,
QwtScaleDiv.MajorTick: 100,
}
self.map = QwtScaleMap()
self.scaleDiv = QwtScaleDiv()
self.labelCache = {}
class QwtAbstractScaleDraw(object):
"""
A abstract base class for drawing scales
`QwtAbstractScaleDraw` can be used to draw linear or logarithmic scales.
After a scale division has been specified as a `QwtScaleDiv` object
using `setScaleDiv()`, the scale can be drawn with the `draw()` member.
Scale components:
* `QwtAbstractScaleDraw.Backbone`: Backbone = the line where the ticks are located
* `QwtAbstractScaleDraw.Ticks`: Ticks
* `QwtAbstractScaleDraw.Labels`: Labels
.. py:class:: QwtAbstractScaleDraw()
The range of the scale is initialized to [0, 100],
The spacing (distance between ticks and labels) is
set to 4, the tick lengths are set to 4,6 and 8 pixels
"""
# enum ScaleComponent
Backbone = 0x01
Ticks = 0x02
Labels = 0x04
def __init__(self):
self.__data = QwtAbstractScaleDraw_PrivateData()
def extent(self, font):
"""
Calculate the extent
The extent is the distance from the baseline to the outermost
pixel of the scale draw in opposite to its orientation.
It is at least minimumExtent() pixels.
:param QFont font: Font used for drawing the tick labels
:return: Number of pixels
.. seealso::
:py:meth:`setMinimumExtent()`, :py:meth:`minimumExtent()`
"""
return 0.0
def drawTick(self, painter, value, len_):
"""
Draw a tick
:param QPainter painter: Painter
:param float value: Value of the tick
:param float len: Length of the tick
.. seealso::
:py:meth:`drawBackbone()`, :py:meth:`drawLabel()`
"""
pass
def drawBackbone(self, painter):
"""
Draws the baseline of the scale
:param QPainter painter: Painter
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawLabel()`
"""
pass
def drawLabel(self, painter, value):
"""
Draws the label for a major scale tick
:param QPainter painter: Painter
:param float value: Value
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawBackbone()`
"""
pass
def enableComponent(self, component, enable):
"""
En/Disable a component of the scale
:param int component: Scale component
:param bool enable: On/Off
.. seealso::
:py:meth:`hasComponent()`
"""
if enable:
self.__data.components |= component
else:
self.__data.components &= ~component
def hasComponent(self, component):
"""
Check if a component is enabled
:param int component: Component type
:return: True, when component is enabled
.. seealso::
:py:meth:`enableComponent()`
"""
return self.__data.components & component
def setScaleDiv(self, scaleDiv):
"""
Change the scale division
:param qwt.scale_div.QwtScaleDiv scaleDiv: New scale division
"""
self.__data.scaleDiv = scaleDiv
self.__data.map.setScaleInterval(scaleDiv.lowerBound(), scaleDiv.upperBound())
self.__data.labelCache.clear()
def setTransformation(self, transformation):
"""
Change the transformation of the scale
:param qwt.transform.QwtTransform transformation: New scale transformation
"""
self.__data.map.setTransformation(transformation)
def scaleMap(self):
"""
:return: Map how to translate between scale and pixel values
"""
return self.__data.map
def scaleDiv(self):
"""
:return: scale division
"""
return self.__data.scaleDiv
def setPenWidth(self, width):
"""
Specify the width of the scale pen
:param int width: Pen width
.. seealso::
:py:meth:`penWidth()`
"""
if width < 0:
width = 0
if width != self.__data.penWidth:
self.__data.penWidth = width
def penWidth(self):
"""
:return: Scale pen width
.. seealso::
:py:meth:`setPenWidth()`
"""
return self.__data.penWidth
def draw(self, painter, palette):
"""
Draw the scale
:param QPainter painter: The painter
:param QPalette palette: Palette, text color is used for the labels, foreground color for ticks and backbone
"""
painter.save()
pen = painter.pen()
pen.setWidth(self.__data.penWidth)
pen.setCosmetic(False)
painter.setPen(pen)
if self.hasComponent(QwtAbstractScaleDraw.Labels):
painter.save()
painter.setPen(palette.color(QPalette.Text))
majorTicks = self.__data.scaleDiv.ticks(QwtScaleDiv.MajorTick)
for v in majorTicks:
if self.__data.scaleDiv.contains(v):
self.drawLabel(painter, v)
painter.restore()
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
painter.save()
pen = painter.pen()
pen.setCapStyle(Qt.FlatCap)
default_color = palette.color(QPalette.WindowText)
for tickType in range(QwtScaleDiv.NTickTypes):
tickLen = self.__data.tick_length[tickType]
if tickLen <= 0.0:
continue
factor = self.__data.tick_lighter_factor[tickType]
pen.setColor(default_color.lighter(factor))
painter.setPen(pen)
ticks = self.__data.scaleDiv.ticks(tickType)
for v in ticks:
if self.__data.scaleDiv.contains(v):
self.drawTick(painter, v, tickLen)
painter.restore()
if self.hasComponent(QwtAbstractScaleDraw.Backbone):
painter.save()
pen = painter.pen()
pen.setColor(palette.color(QPalette.WindowText))
pen.setCapStyle(Qt.FlatCap)
painter.setPen(pen)
self.drawBackbone(painter)
painter.restore()
painter.restore()
def setSpacing(self, spacing):
"""
Set the spacing between tick and labels
The spacing is the distance between ticks and labels.
The default spacing is 4 pixels.
:param float spacing: Spacing
.. seealso::
:py:meth:`spacing()`
"""
if spacing < 0:
spacing = 0
self.__data.spacing = spacing
def spacing(self):
"""
Get the spacing
The spacing is the distance between ticks and labels.
The default spacing is 4 pixels.
:return: Spacing
.. seealso::
:py:meth:`setSpacing()`
"""
return self.__data.spacing
def setMinimumExtent(self, minExtent):
"""
Set a minimum for the extent
The extent is calculated from the components of the
scale draw. In situations, where the labels are
changing and the layout depends on the extent (f.e scrolling
a scale), setting an upper limit as minimum extent will
avoid jumps of the layout.
:param float minExtent: Minimum extent
.. seealso::
:py:meth:`extent()`, :py:meth:`minimumExtent()`
"""
if minExtent < 0.0:
minExtent = 0.0
self.__data.minExtent = minExtent
def minimumExtent(self):
"""
Get the minimum extent
:return: Minimum extent
.. seealso::
:py:meth:`extent()`, :py:meth:`setMinimumExtent()`
"""
return self.__data.minExtent
def setTickLength(self, tick_type, length):
"""
Set the length of the ticks
:param int tick_type: Tick type
:param float length: New length
.. warning::
the length is limited to [0..1000]
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
self.__data.tick_length[tick_type] = min([1000.0, max([0.0, length])])
def tickLength(self, tick_type):
"""
:param int tick_type: Tick type
:return: Length of the ticks
.. seealso::
:py:meth:`setTickLength()`, :py:meth:`maxTickLength()`
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
return self.__data.tick_length[tick_type]
def maxTickLength(self):
"""
:return: Length of the longest tick
Useful for layout calculations
.. seealso::
:py:meth:`tickLength()`, :py:meth:`setTickLength()`
"""
return max([0.0] + list(self.__data.tick_length.values()))
def setTickLighterFactor(self, tick_type, factor):
"""
Set the color lighter factor of the ticks
:param int tick_type: Tick type
:param int factor: New factor
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
self.__data.tick_lighter_factor[tick_type] = min([0, factor])
def tickLighterFactor(self, tick_type):
"""
:param int tick_type: Tick type
:return: Color lighter factor of the ticks
.. seealso::
:py:meth:`setTickLighterFactor()`
"""
if tick_type not in self.__data.tick_length:
raise ValueError("Invalid tick type: %r" % tick_type)
return self.__data.tick_lighter_factor[tick_type]
def label(self, value):
"""
Convert a value into its representing label
The value is converted to a plain text using
`QLocale().toString(value)`.
This method is often overloaded by applications to have individual
labels.
:param float value: Value
:return: Label string
"""
return QLocale().toString(value)
def tickLabel(self, font, value):
"""
Convert a value into its representing label and cache it.
The conversion between value and label is called very often
in the layout and painting code. Unfortunately the
calculation of the label sizes might be slow (really slow
for rich text in Qt4), so it's necessary to cache the labels.
:param QFont font: Font
:param float value: Value
:return: Tick label
"""
lbl = self.__data.labelCache.get(value)
if lbl is None:
lbl = QwtText(self.label(value))
lbl.setRenderFlags(0)
lbl.setLayoutAttribute(QwtText.MinimumLayout)
lbl.textSize(font)
self.__data.labelCache[value] = lbl
return lbl
def invalidateCache(self):
"""
Invalidate the cache used by `tickLabel()`
The cache is invalidated, when a new `QwtScaleDiv` is set. If
the labels need to be changed. while the same `QwtScaleDiv` is set,
`invalidateCache()` needs to be called manually.
"""
self.__data.labelCache.clear()
class QwtScaleDraw_PrivateData(object):
def __init__(self):
self.len = 0
self.alignment = QwtScaleDraw.BottomScale
self.labelAlignment = 0
self.labelRotation = 0.0
self.labelAutoSize = True
self.pos = QPointF()
class QwtScaleDraw(QwtAbstractScaleDraw):
"""
A class for drawing scales
QwtScaleDraw can be used to draw linear or logarithmic scales.
A scale has a position, an alignment and a length, which can be specified .
The labels can be rotated and aligned
to the ticks using `setLabelRotation()` and `setLabelAlignment()`.
After a scale division has been specified as a QwtScaleDiv object
using `QwtAbstractScaleDraw.setScaleDiv(scaleDiv)`,
the scale can be drawn with the `QwtAbstractScaleDraw.draw()` member.
Alignment of the scale draw:
* `QwtScaleDraw.BottomScale`: The scale is below
* `QwtScaleDraw.TopScale`: The scale is above
* `QwtScaleDraw.LeftScale`: The scale is left
* `QwtScaleDraw.RightScale`: The scale is right
.. py:class:: QwtScaleDraw()
The range of the scale is initialized to [0, 100],
The position is at (0, 0) with a length of 100.
The orientation is `QwtAbstractScaleDraw.Bottom`.
"""
# enum Alignment
BottomScale, TopScale, LeftScale, RightScale = list(range(4))
Flags = (
Qt.AlignHCenter | Qt.AlignBottom, # BottomScale
Qt.AlignHCenter | Qt.AlignTop, # TopScale
Qt.AlignLeft | Qt.AlignVCenter, # LeftScale
Qt.AlignRight | Qt.AlignVCenter, # RightScale
)
def __init__(self):
QwtAbstractScaleDraw.__init__(self)
self.__data = QwtScaleDraw_PrivateData()
self.setLength(100)
self._max_label_sizes = {}
def alignment(self):
"""
:return: Alignment of the scale
.. seealso::
:py:meth:`setAlignment()`
"""
return self.__data.alignment
def setAlignment(self, align):
"""
Set the alignment of the scale
:param int align: Alignment of the scale
Alignment of the scale draw:
* `QwtScaleDraw.BottomScale`: The scale is below
* `QwtScaleDraw.TopScale`: The scale is above
* `QwtScaleDraw.LeftScale`: The scale is left
* `QwtScaleDraw.RightScale`: The scale is right
The default alignment is `QwtScaleDraw.BottomScale`
.. seealso::
:py:meth:`alignment()`
"""
self.__data.alignment = align
def orientation(self):
"""
Return the orientation
TopScale, BottomScale are horizontal (`Qt.Horizontal`) scales,
LeftScale, RightScale are vertical (`Qt.Vertical`) scales.
:return: Orientation of the scale
.. seealso::
:py:meth:`alignment()`
"""
if self.__data.alignment in (self.TopScale, self.BottomScale):
return Qt.Horizontal
elif self.__data.alignment in (self.LeftScale, self.RightScale):
return Qt.Vertical
def getBorderDistHint(self, font):
"""
Determine the minimum border distance
This member function returns the minimum space
needed to draw the mark labels at the scale's endpoints.
:param QFont font: Font
:return: tuple `(start, end)`
Returned tuple:
* start: Start border distance
* end: End border distance
"""
start, end = 0, 1.0
if not self.hasComponent(QwtAbstractScaleDraw.Labels):
return start, end
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if len(ticks) == 0:
return start, end
minTick = ticks[0]
minPos = self.scaleMap().transform(minTick)
maxTick = minTick
maxPos = minPos
for tick in ticks:
tickPos = self.scaleMap().transform(tick)
if tickPos < minPos:
minTick = tick
minPos = tickPos
if tickPos > self.scaleMap().transform(maxTick):
maxTick = tick
maxPos = tickPos
s = 0.0
e = 0.0
if self.orientation() == Qt.Vertical:
s = -self.labelRect(font, minTick).top()
s -= abs(minPos - round(self.scaleMap().p2()))
e = self.labelRect(font, maxTick).bottom()
e -= abs(maxPos - self.scaleMap().p1())
else:
s = -self.labelRect(font, minTick).left()
s -= abs(minPos - self.scaleMap().p1())
e = self.labelRect(font, maxTick).right()
e -= abs(maxPos - self.scaleMap().p2())
return max(ceil(s), 0), max(ceil(e), 0)
def minLabelDist(self, font):
"""
Determine the minimum distance between two labels, that is necessary
that the texts don't overlap.
:param QFont font: Font
:return: The maximum width of a label
.. seealso::
:py:meth:`getBorderDistHint()`
"""
if not self.hasComponent(QwtAbstractScaleDraw.Labels):
return 0
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if not ticks:
return 0
fm = QFontMetrics(font)
vertical = self.orientation() == Qt.Vertical
bRect1 = QRectF()
bRect2 = self.labelRect(font, ticks[0])
if vertical:
bRect2.setRect(-bRect2.bottom(), 0.0, bRect2.height(), bRect2.width())
maxDist = 0.0
for tick in ticks:
bRect1 = bRect2
bRect2 = self.labelRect(font, tick)
if vertical:
bRect2.setRect(-bRect2.bottom(), 0.0, bRect2.height(), bRect2.width())
dist = fm.leading()
if bRect1.right() > 0:
dist += bRect1.right()
if bRect2.left() < 0:
dist += -bRect2.left()
if dist > maxDist:
maxDist = dist
angle = qwtRadians(self.labelRotation())
if vertical:
angle += np.pi / 2
sinA = np.sin(angle)
if qFuzzyCompare(sinA + 1.0, 1.0):
return np.ceil(maxDist)
fmHeight = fm.ascent() - 2
labelDist = fmHeight / np.sin(angle) * np.cos(angle)
if labelDist < 0:
labelDist = -labelDist
if labelDist > maxDist:
labelDist = maxDist
if labelDist < fmHeight:
labelDist = fmHeight
return np.ceil(labelDist)
def extent(self, font):
"""
Calculate the width/height that is needed for a
vertical/horizontal scale.
The extent is calculated from the pen width of the backbone,
the major tick length, the spacing and the maximum width/height
of the labels.
:param QFont font: Font used for painting the labels
:return: Extent
.. seealso::
:py:meth:`minLength()`
"""
d = 0.0
if self.hasComponent(QwtAbstractScaleDraw.Labels):
if self.orientation() == Qt.Vertical:
d = self.maxLabelWidth(font)
else:
d = self.maxLabelHeight(font)
if d > 0:
d += self.spacing()
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
d += self.maxTickLength()
if self.hasComponent(QwtAbstractScaleDraw.Backbone):
pw = max([1, self.penWidth()])
d += pw
return max([d, self.minimumExtent()])
def minLength(self, font):
"""
Calculate the minimum length that is needed to draw the scale
:param QFont font: Font used for painting the labels
:return: Minimum length that is needed to draw the scale
.. seealso::
:py:meth:`extent()`
"""
startDist, endDist = self.getBorderDistHint(font)
sd = self.scaleDiv()
minorCount = len(sd.ticks(QwtScaleDiv.MinorTick)) + len(
sd.ticks(QwtScaleDiv.MediumTick)
)
majorCount = len(sd.ticks(QwtScaleDiv.MajorTick))
lengthForLabels = 0
if self.hasComponent(QwtAbstractScaleDraw.Labels):
lengthForLabels = self.minLabelDist(font) * majorCount
lengthForTicks = 0
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
pw = max([1, self.penWidth()])
lengthForTicks = np.ceil((majorCount + minorCount) * (pw + 1.0))
return startDist + endDist + max([lengthForLabels, lengthForTicks])
def labelPosition(self, value):
"""
Find the position, where to paint a label
The position has a distance that depends on the length of the ticks
in direction of the `alignment()`.
:param float value: Value
:return: Position, where to paint a label
"""
tval = self.scaleMap().transform(value)
dist = self.spacing()
if self.hasComponent(QwtAbstractScaleDraw.Backbone):
dist += max([1, self.penWidth()])
if self.hasComponent(QwtAbstractScaleDraw.Ticks):
dist += self.tickLength(QwtScaleDiv.MajorTick)
px = 0
py = 0
if self.alignment() == self.RightScale:
px = self.__data.pos.x() + dist
py = tval
elif self.alignment() == self.LeftScale:
px = self.__data.pos.x() - dist
py = tval
elif self.alignment() == self.BottomScale:
px = tval
py = self.__data.pos.y() + dist
elif self.alignment() == self.TopScale:
px = tval
py = self.__data.pos.y() - dist
return QPointF(px, py)
def drawTick(self, painter, value, len_):
"""
Draw a tick
:param QPainter painter: Painter
:param float value: Value of the tick
:param float len: Length of the tick
.. seealso::
:py:meth:`drawBackbone()`, :py:meth:`drawLabel()`
"""
if len_ <= 0:
return
pos = self.__data.pos
tval = self.scaleMap().transform(value)
pw = self.penWidth()
a = 0
if self.alignment() == self.LeftScale:
x1 = pos.x() + a
x2 = pos.x() + a - pw - len_
painter.drawLine(x1, tval, x2, tval)
elif self.alignment() == self.RightScale:
x1 = pos.x()
x2 = pos.x() + pw + len_
painter.drawLine(x1, tval, x2, tval)
elif self.alignment() == self.BottomScale:
y1 = pos.y()
y2 = pos.y() + pw + len_
painter.drawLine(tval, y1, tval, y2)
elif self.alignment() == self.TopScale:
y1 = pos.y() + a
y2 = pos.y() - pw - len_ + a
painter.drawLine(tval, y1, tval, y2)
def drawBackbone(self, painter):
"""
Draws the baseline of the scale
:param QPainter painter: Painter
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawLabel()`
"""
pos = self.__data.pos
len_ = self.__data.len
off = 0.5 * self.penWidth()
if self.alignment() == self.LeftScale:
x = pos.x() - off
painter.drawLine(x, pos.y(), x, pos.y() + len_)
elif self.alignment() == self.RightScale:
x = pos.x() + off
painter.drawLine(x, pos.y(), x, pos.y() + len_)
elif self.alignment() == self.TopScale:
y = pos.y() - off
painter.drawLine(pos.x(), y, pos.x() + len_, y)
elif self.alignment() == self.BottomScale:
y = pos.y() + off
painter.drawLine(pos.x(), y, pos.x() + len_, y)
def move(self, *args):
"""
Move the position of the scale
The meaning of the parameter pos depends on the alignment:
* `QwtScaleDraw.LeftScale`:
The origin is the topmost point of the backbone. The backbone is a
vertical line. Scale marks and labels are drawn at the left of the
backbone.
* `QwtScaleDraw.RightScale`:
The origin is the topmost point of the backbone. The backbone is a
vertical line. Scale marks and labels are drawn at the right of
the backbone.
* `QwtScaleDraw.TopScale`:
The origin is the leftmost point of the backbone. The backbone is
a horizontal line. Scale marks and labels are drawn above the
backbone.
* `QwtScaleDraw.BottomScale`:
The origin is the leftmost point of the backbone. The backbone is
a horizontal line Scale marks and labels are drawn below the
backbone.
.. py:method:: move(x, y)
:noindex:
:param float x: X coordinate
:param float y: Y coordinate
.. py:method:: move(pos)
:noindex:
:param QPointF pos: position
.. seealso::
:py:meth:`pos()`, :py:meth:`setLength()`
"""
if len(args) == 2:
x, y = args
self.move(QPointF(x, y))
elif len(args) == 1:
(pos,) = args
self.__data.pos = pos
self.updateMap()
else:
raise TypeError(
"%s().move() takes 1 or 2 argument(s) (%s given)"
% (self.__class__.__name__, len(args))
)
def pos(self):
"""
:return: Origin of the scale
.. seealso::
:py:meth:`pos()`, :py:meth:`setLength()`
"""
return self.__data.pos
def setLength(self, length):
"""
Set the length of the backbone.
The length doesn't include the space needed for overlapping labels.
:param float length: Length of the backbone
.. seealso::
:py:meth:`move()`, :py:meth:`minLabelDist()`
"""
if length >= 0 and length < 10:
length = 10
if length < 0 and length > -10:
length = -10
self.__data.len = length
self.updateMap()
def length(self):
"""
:return: the length of the backbone
.. seealso::
:py:meth:`setLength()`, :py:meth:`pos()`
"""
return self.__data.len
def drawLabel(self, painter, value):
"""
Draws the label for a major scale tick
:param QPainter painter: Painter
:param float value: Value
.. seealso::
:py:meth:`drawTick()`, :py:meth:`drawBackbone()`,
:py:meth:`boundingLabelRect()`
"""
lbl = self.tickLabel(painter.font(), value)
if lbl is None or lbl.isEmpty():
return
pos = self.labelPosition(value)
labelSize = lbl.textSize(painter.font())
transform = self.labelTransformation(pos, labelSize)
painter.save()
painter.setWorldTransform(transform, True)
lbl.draw(painter, QRect(QPoint(0, 0), labelSize.toSize()))
painter.restore()
def boundingLabelRect(self, font, value):
"""
Find the bounding rectangle for the label.
The coordinates of the rectangle are absolute (calculated from
`pos()`) in direction of the tick.
:param QFont font: Font used for painting
:param float value: Value
:return: Bounding rectangle
.. seealso::
:py:meth:`labelRect()`
"""
lbl = self.tickLabel(font, value)
if lbl.isEmpty():
return QRect()
pos = self.labelPosition(value)
labelSize = lbl.textSize(font)
transform = self.labelTransformation(pos, labelSize)
return transform.mapRect(QRect(QPoint(0, 0), labelSize.toSize()))
def labelTransformation(self, pos, size):
"""
Calculate the transformation that is needed to paint a label
depending on its alignment and rotation.
:param QPointF pos: Position where to paint the label
:param QSizeF size: Size of the label
:return: Transformation matrix
.. seealso::
:py:meth:`setLabelAlignment()`, :py:meth:`setLabelRotation()`
"""
transform = QTransform()
transform.translate(pos.x(), pos.y())
transform.rotate(self.labelRotation())
flags = self.labelAlignment()
if flags == 0:
flags = self.Flags[self.alignment()]
if flags & Qt.AlignLeft:
x = -size.width()
elif flags & Qt.AlignRight:
x = 0.0
else:
x = -(0.5 * size.width())
if flags & Qt.AlignTop:
y = -size.height()
elif flags & Qt.AlignBottom:
y = 0
else:
y = -(0.5 * size.height())
transform.translate(x, y)
return transform
def labelRect(self, font, value):
"""
Find the bounding rectangle for the label. The coordinates of
the rectangle are relative to spacing + tick length from the backbone
in direction of the tick.
:param QFont font: Font used for painting
:param float value: Value
:return: Bounding rectangle that is needed to draw a label
"""
lbl = self.tickLabel(font, value)
if not lbl or lbl.isEmpty():
return QRectF(0.0, 0.0, 0.0, 0.0)
pos = self.labelPosition(value)
labelSize = lbl.textSize(font)
transform = self.labelTransformation(pos, labelSize)
br = transform.mapRect(QRectF(QPointF(0, 0), labelSize))
br.translate(-pos.x(), -pos.y())
return br
def labelSize(self, font, value):
"""
Calculate the size that is needed to draw a label
:param QFont font: Label font
:param float value: Value
:return: Size that is needed to draw a label
"""
return self.labelRect(font, value).size()
def setLabelRotation(self, rotation):
"""
Rotate all labels.
When changing the rotation, it might be necessary to
adjust the label flags too. Finding a useful combination is
often the result of try and error.
:param float rotation: Angle in degrees. When changing the label rotation, the label flags often needs to be adjusted too.
.. seealso::
:py:meth:`setLabelAlignment()`, :py:meth:`labelRotation()`,
:py:meth:`labelAlignment()`
"""
self.__data.labelRotation = rotation
def labelRotation(self):
"""
:return: the label rotation
.. seealso::
:py:meth:`setLabelRotation()`, :py:meth:`labelAlignment()`
"""
return self.__data.labelRotation
def setLabelAlignment(self, alignment):
"""
Change the label flags
Labels are aligned to the point tick length + spacing away from the
backbone.
The alignment is relative to the orientation of the label text.
In case of an flags of 0 the label will be aligned
depending on the orientation of the scale:
* `QwtScaleDraw.TopScale`: `Qt.AlignHCenter | Qt.AlignTop`
* `QwtScaleDraw.BottomScale`: `Qt.AlignHCenter | Qt.AlignBottom`
* `QwtScaleDraw.LeftScale`: `Qt.AlignLeft | Qt.AlignVCenter`
* `QwtScaleDraw.RightScale`: `Qt.AlignRight | Qt.AlignVCenter`
Changing the alignment is often necessary for rotated labels.
:param Qt.Alignment alignment Or'd `Qt.AlignmentFlags`
.. seealso::
:py:meth:`setLabelRotation()`, :py:meth:`labelRotation()`,
:py:meth:`labelAlignment()`
.. warning::
The various alignments might be confusing. The alignment of the
label is not the alignment of the scale and is not the alignment
of the flags (`QwtText.flags()`) returned from
`QwtAbstractScaleDraw.label()`.
"""
self.__data.labelAlignment = alignment
def labelAlignment(self):
"""
:return: the label flags
.. seealso::
:py:meth:`setLabelAlignment()`, :py:meth:`labelRotation()`
"""
return self.__data.labelAlignment
def setLabelAutoSize(self, state):
"""
Set label automatic size option state
When drawing text labels, if automatic size mode is enabled (default
behavior), the axes are drawn in order to optimize layout space and
depends on text label individual sizes. Otherwise, width and height
won't change when axis range is changing.
This option is not implemented in Qwt C++ library: this may be used
either as an optimization (updating plot layout is faster when this
option is enabled) or as an appearance preference (with Qwt default
behavior, the size of axes may change when zooming and/or panning
plot canvas which in some cases may not be desired).
:param bool state: On/off
.. seealso::
:py:meth:`labelAutoSize()`
"""
self.__data.labelAutoSize = state
def labelAutoSize(self):
"""
:return: True if automatic size option is enabled for labels
.. seealso::
:py:meth:`setLabelAutoSize()`
"""
return self.__data.labelAutoSize
def _get_max_label_size(self, font):
key = (font.toString(), self.labelRotation())
size = self._max_label_sizes.get(key)
if size is None:
size = self.labelSize(font, -999999) # -999999 is the biggest label
size.setWidth(np.ceil(size.width()))
size.setHeight(np.ceil(size.height()))
return self._max_label_sizes.setdefault(key, size)
else:
return size
def maxLabelWidth(self, font):
"""
:param QFont font: Font
:return: the maximum width of a label
"""
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if not ticks:
return 0
if self.labelAutoSize():
vmax = sorted(
[v for v in ticks if self.scaleDiv().contains(v)],
key=lambda obj: len(QLocale().toString(obj)),
)[-1]
return np.ceil(self.labelSize(font, vmax).width())
## Original implementation (closer to Qwt's C++ code, but slower):
# return np.ceil(max([self.labelSize(font, v).width()
# for v in ticks if self.scaleDiv().contains(v)]))
else:
return self._get_max_label_size(font).width()
def maxLabelHeight(self, font):
"""
:param QFont font: Font
:return: the maximum height of a label
"""
ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
if not ticks:
return 0
if self.labelAutoSize():
vmax = sorted(
[v for v in ticks if self.scaleDiv().contains(v)],
key=lambda obj: len(QLocale().toString(obj)),
)[-1]
return np.ceil(self.labelSize(font, vmax).height())
## Original implementation (closer to Qwt's C++ code, but slower):
# return np.ceil(max([self.labelSize(font, v).height()
# for v in ticks if self.scaleDiv().contains(v)]))
else:
return self._get_max_label_size(font).height()
def updateMap(self):
pos = self.__data.pos
len_ = self.__data.len
sm = self.scaleMap()
if self.orientation() == Qt.Vertical:
sm.setPaintInterval(pos.y() + len_, pos.y())
else:
sm.setPaintInterval(pos.x(), pos.x() + len_)
| 30.967742 | 130 | 0.559161 | 36,686 | 0.979861 | 0 | 0 | 0 | 0 | 0 | 0 | 18,416 | 0.49188 |
297beccf46f124350d5429aa7d77baa3ba0a7320 | 3,524 | py | Python | fuzzers/LIFCL/090-sysconfig/fuzzer.py | mfkiwl/prjoxide | 318331f8b30c2e2a31cc41d51f104b671e180a8a | [
"0BSD"
]
| 80 | 2019-12-10T21:06:12.000Z | 2021-02-06T09:12:37.000Z | fuzzers/LIFCL/090-sysconfig/fuzzer.py | mfkiwl/prjoxide | 318331f8b30c2e2a31cc41d51f104b671e180a8a | [
"0BSD"
]
| 13 | 2021-03-18T12:59:25.000Z | 2022-03-30T11:35:51.000Z | fuzzers/LIFCL/090-sysconfig/fuzzer.py | mfkiwl/prjoxide | 318331f8b30c2e2a31cc41d51f104b671e180a8a | [
"0BSD"
]
| 4 | 2020-10-04T22:23:15.000Z | 2021-01-29T21:51:25.000Z | from fuzzconfig import FuzzConfig
import nonrouting
import fuzzloops
import re
cfgs = [
FuzzConfig(job="SYSCONFIG40", device="LIFCL-40", sv="../shared/empty_40.v",
tiles=["CIB_R0C75:EFB_0", "CIB_R0C72:BANKREF0", "CIB_R0C77:EFB_1_OSC", "CIB_R0C79:EFB_2",
"CIB_R0C81:I2C_EFB_3", "CIB_R0C85:PMU", "CIB_R0C87:MIB_CNR_32_FAFD", "CIB_R1C87:IREF_P33", "CIB_R2C87:POR"]),
FuzzConfig(job="SYSCONFIG17", device="LIFCL-17", sv="../shared/empty_17.v",
tiles=["CIB_R1C75:IREF_15K", "CIB_R0C75:PPT_QOUT_15K", "CIB_R0C74:PVTCAL33_15K", "CIB_R0C73:POR_15K",
"CIB_R0C72:I2C_15K", "CIB_R0C71:OSC_15K", "CIB_R0C70:PMU_15K", "CIB_R0C66:EFB_15K"])
]
def main():
for cfg in cfgs:
cfg.setup()
empty = cfg.build_design(cfg.sv, {})
cfg.sv = "../shared/empty_presyn_40.v"
cfg.struct_mode = False
def get_substs(k, v):
return dict(sysconfig="{}={}".format(k, v))
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.MASTER_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"],
lambda x: get_substs("MASTER_SPI_PORT", x), False,
assume_zero_base=True,
desc="status of master SPI port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"],
lambda x: get_substs("SLAVE_SPI_PORT", x), False,
assume_zero_base=True,
desc="status of slave SPI port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I2C_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("SLAVE_I2C_PORT", x), False,
assume_zero_base=True,
desc="status of slave I2C port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I3C_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("SLAVE_I3C_PORT", x), False,
assume_zero_base=True,
desc="status of slave I3C port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.JTAG_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("JTAG_PORT", x), False,
assume_zero_base=True,
desc="status of JTAG port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.DONE_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("DONE_PORT", x), False,
assume_zero_base=True,
desc="use DONE output after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.INITN_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("INITN_PORT", x), False,
assume_zero_base=True,
desc="use INITN input after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.PROGRAMN_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("PROGRAMN_PORT", x), False,
assume_zero_base=True,
desc="use PROGRAMN input after configuration")
if __name__ == "__main__":
main()
| 61.824561 | 117 | 0.557605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,280 | 0.363224 |
297cba1ed48f76e8524cfa4c59799c094d9928a0 | 368 | py | Python | test.py | jdonovanCS/CS-352-Evolutionary-Computation | dca64068c1824dac8ec3983d7215effa426bc3bc | [
"BSD-3-Clause"
]
| null | null | null | test.py | jdonovanCS/CS-352-Evolutionary-Computation | dca64068c1824dac8ec3983d7215effa426bc3bc | [
"BSD-3-Clause"
]
| null | null | null | test.py | jdonovanCS/CS-352-Evolutionary-Computation | dca64068c1824dac8ec3983d7215effa426bc3bc | [
"BSD-3-Clause"
]
| null | null | null | import argparse
import collections
import os
import random
import json
from copy import deepcopy
import ConfigSpace
import numpy as np
# from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark,\
# FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark
from tabular_benchmarks import NASCifar10A, NASCifar10B | 26.285714 | 98 | 0.872283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.470109 |
297cf3d7631c7db955fd11e56c685c2b4f84aac0 | 1,254 | py | Python | tests/test_base64_uuid.py | cds-snc/notifier-utils | c3a205ac4381312fe1884a39ffafa7ffb862736f | [
"MIT"
]
| 3 | 2020-04-29T17:13:43.000Z | 2020-12-04T21:08:33.000Z | tests/test_base64_uuid.py | cds-snc/notifier-utils | c3a205ac4381312fe1884a39ffafa7ffb862736f | [
"MIT"
]
| 21 | 2020-04-16T12:29:46.000Z | 2022-02-28T17:17:15.000Z | tests/test_base64_uuid.py | cds-snc/notifier-utils | c3a205ac4381312fe1884a39ffafa7ffb862736f | [
"MIT"
]
| 4 | 2020-02-21T20:20:00.000Z | 2021-02-11T19:00:59.000Z | from uuid import UUID
import os
import pytest
from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64
def test_bytes_to_base64_to_bytes():
b = os.urandom(32)
b64 = bytes_to_base64(b)
assert base64_to_bytes(b64) == b
@pytest.mark.parametrize(
"url_val",
[
"AAAAAAAAAAAAAAAAAAAAAQ",
"AAAAAAAAAAAAAAAAAAAAAQ=", # even though this has invalid padding we put extra =s on the end so this is okay
"AAAAAAAAAAAAAAAAAAAAAQ==",
],
)
def test_base64_converter_to_python(url_val):
assert base64_to_uuid(url_val) == UUID(int=1)
@pytest.mark.parametrize("python_val", [UUID(int=1), "00000000-0000-0000-0000-000000000001"])
def test_base64_converter_to_url(python_val):
assert uuid_to_base64(python_val) == "AAAAAAAAAAAAAAAAAAAAAQ"
@pytest.mark.parametrize(
"url_val",
[
"this_is_valid_base64_but_is_too_long_to_be_a_uuid",
"this_one_has_emoji_➕➕➕",
],
)
def test_base64_converter_to_python_raises_validation_error(url_val):
with pytest.raises(Exception):
base64_to_uuid(url_val)
def test_base64_converter_to_url_raises_validation_error():
with pytest.raises(Exception):
uuid_to_base64(object())
| 27.26087 | 117 | 0.739234 | 0 | 0 | 0 | 0 | 837 | 0.664286 | 0 | 0 | 329 | 0.261111 |
297d3ac09c9100be856d7c8b2496765760397058 | 3,571 | py | Python | homeassistant/components/devolo_home_control/__init__.py | dummys/home-assistant | dd908caebade15adf061fade686355b94ed2f43a | [
"Apache-2.0"
]
| 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/devolo_home_control/__init__.py | dummys/home-assistant | dd908caebade15adf061fade686355b94ed2f43a | [
"Apache-2.0"
]
| 64 | 2020-10-01T06:39:48.000Z | 2022-03-31T06:02:17.000Z | homeassistant/components/devolo_home_control/__init__.py | dummys/home-assistant | dd908caebade15adf061fade686355b94ed2f43a | [
"Apache-2.0"
]
| 6 | 2018-02-04T03:48:55.000Z | 2022-01-24T20:37:04.000Z | """The devolo_home_control integration."""
from __future__ import annotations
import asyncio
from functools import partial
from types import MappingProxyType
from typing import Any
from devolo_home_control_api.exceptions.gateway import GatewayOfflineError
from devolo_home_control_api.homecontrol import HomeControl
from devolo_home_control_api.mydevolo import Mydevolo
from homeassistant.components import zeroconf
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from .const import (
CONF_MYDEVOLO,
DEFAULT_MYDEVOLO,
DOMAIN,
GATEWAY_SERIAL_PATTERN,
PLATFORMS,
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the devolo account from a config entry."""
hass.data.setdefault(DOMAIN, {})
mydevolo = configure_mydevolo(entry.data)
credentials_valid = await hass.async_add_executor_job(mydevolo.credentials_valid)
if not credentials_valid:
raise ConfigEntryAuthFailed
if await hass.async_add_executor_job(mydevolo.maintenance):
raise ConfigEntryNotReady
gateway_ids = await hass.async_add_executor_job(mydevolo.get_gateway_ids)
if entry.unique_id and GATEWAY_SERIAL_PATTERN.match(entry.unique_id):
uuid = await hass.async_add_executor_job(mydevolo.uuid)
hass.config_entries.async_update_entry(entry, unique_id=uuid)
try:
zeroconf_instance = await zeroconf.async_get_instance(hass)
hass.data[DOMAIN][entry.entry_id] = {"gateways": [], "listener": None}
for gateway_id in gateway_ids:
hass.data[DOMAIN][entry.entry_id]["gateways"].append(
await hass.async_add_executor_job(
partial(
HomeControl,
gateway_id=gateway_id,
mydevolo_instance=mydevolo,
zeroconf_instance=zeroconf_instance,
)
)
)
except GatewayOfflineError as err:
raise ConfigEntryNotReady from err
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
def shutdown(event: Event) -> None:
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]:
gateway.websocket_disconnect(
f"websocket disconnect requested by {EVENT_HOMEASSISTANT_STOP}"
)
# Listen when EVENT_HOMEASSISTANT_STOP is fired
hass.data[DOMAIN][entry.entry_id]["listener"] = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, shutdown
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
await asyncio.gather(
*[
hass.async_add_executor_job(gateway.websocket_disconnect)
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]
]
)
hass.data[DOMAIN][entry.entry_id]["listener"]()
hass.data[DOMAIN].pop(entry.entry_id)
return unload
def configure_mydevolo(conf: dict[str, Any] | MappingProxyType[str, Any]) -> Mydevolo:
"""Configure mydevolo."""
mydevolo = Mydevolo()
mydevolo.user = conf[CONF_USERNAME]
mydevolo.password = conf[CONF_PASSWORD]
mydevolo.url = conf.get(CONF_MYDEVOLO, DEFAULT_MYDEVOLO)
return mydevolo
| 35.009804 | 86 | 0.711845 | 0 | 0 | 0 | 0 | 0 | 0 | 2,444 | 0.684402 | 327 | 0.091571 |
297d9c0aeb9cbc7ee91fb0bd8b0450dda847eca4 | 1,199 | py | Python | trab1/hillClimbing.py | RafaelPedruzzi/IA-2019-2 | 7d99a8f02ec826403bd48c6eba574d802e558c36 | [
"MIT"
]
| null | null | null | trab1/hillClimbing.py | RafaelPedruzzi/IA-2019-2 | 7d99a8f02ec826403bd48c6eba574d802e558c36 | [
"MIT"
]
| null | null | null | trab1/hillClimbing.py | RafaelPedruzzi/IA-2019-2 | 7d99a8f02ec826403bd48c6eba574d802e558c36 | [
"MIT"
]
| null | null | null | ## -------------------------------------------------------- ##
# Trab 1 IA 2019-2
#
# Rafael Belmock Pedruzzi
#
# hillClimbing.py: implements the hill climbing metaheuristic for the bag problem
#
# Python version: 3.7.4
## -------------------------------------------------------- ##
import bagProblem as bp
from time import time
# Returns True and the valid state with the biggest value, or False if no state is valid:
def select_Best(si, T, OBJs):
sn = -1 # best state position
sv = 0 # state value
for i in range(len(si)):
v = bp.state_Value(si[i], OBJs) # current value
if bp.state_Verify(si[i], T, OBJs) and v > sv:
sv = v
sn = i
if sn == -1:
return False, []
return True, si[sn]
# Hill Climbing:
def hill_Climbing(T, OBJs, execTime, *args):
sn = [0]*len(OBJs) # initial state
c = True # continue flag
start = time()
while c:
if time() - start > execTime:
break
cs = sn # storing current state
c, sn = select_Best(bp.state_Expansion(cs), T, OBJs)
return cs
# T = 19 # bag size
# OBJs = [(1,3), (4,6), (5,7)] # object list (v,t)
# print(hill_Climbing(T,OBJs))
| 28.547619 | 89 | 0.533778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.490409 |
2980dcb521571569813c10466d1c0e11144f30c7 | 151 | py | Python | ssb_pseudonymization/__init__.py | statisticsnorway/ssb-pseudonymization-py | cc8e1abd1cf00b403bfaf52fcb335f09ee71b591 | [
"MIT"
]
| 1 | 2021-02-17T09:51:12.000Z | 2021-02-17T09:51:12.000Z | ssb_pseudonymization/__init__.py | statisticsnorway/ssb-pseudonymization-py | cc8e1abd1cf00b403bfaf52fcb335f09ee71b591 | [
"MIT"
]
| null | null | null | ssb_pseudonymization/__init__.py | statisticsnorway/ssb-pseudonymization-py | cc8e1abd1cf00b403bfaf52fcb335f09ee71b591 | [
"MIT"
]
| 3 | 2020-08-25T07:12:17.000Z | 2021-04-20T12:26:42.000Z | """ssb-pseudonymization - Data pseudonymization functions used by SSB"""
__version__ = '0.0.2'
__author__ = 'Statistics Norway (ssb.no)'
__all__ = []
| 25.166667 | 72 | 0.721854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.708609 |
2980f0b218fed38559f7aa3fa0718ba902a95fb9 | 7,657 | py | Python | hwilib/devices/keepkey.py | cjackie/HWI | 8c1b50aaaac37714b5d61f720b4b06f8aa24c73a | [
"MIT"
]
| 285 | 2019-01-31T03:10:19.000Z | 2022-03-31T10:38:37.000Z | hwilib/devices/keepkey.py | cjackie/HWI | 8c1b50aaaac37714b5d61f720b4b06f8aa24c73a | [
"MIT"
]
| 426 | 2019-01-31T10:38:02.000Z | 2022-03-28T15:58:13.000Z | hwilib/devices/keepkey.py | cjackie/HWI | 8c1b50aaaac37714b5d61f720b4b06f8aa24c73a | [
"MIT"
]
| 128 | 2019-01-30T22:32:32.000Z | 2022-03-28T19:23:46.000Z | """
Keepkey
*******
"""
from ..errors import (
DEVICE_NOT_INITIALIZED,
DeviceNotReadyError,
common_err_msgs,
handle_errors,
)
from .trezorlib import protobuf as p
from .trezorlib.transport import (
hid,
udp,
webusb,
)
from .trezor import TrezorClient, HID_IDS, WEBUSB_IDS
from .trezorlib.messages import (
DebugLinkState,
Features,
HDNodeType,
ResetDevice,
)
from typing import (
Any,
Dict,
List,
Optional,
)
py_enumerate = enumerate # Need to use the enumerate built-in but there's another function already named that
KEEPKEY_HID_IDS = {(0x2B24, 0x0001)}
KEEPKEY_WEBUSB_IDS = {(0x2B24, 0x0002)}
KEEPKEY_SIMULATOR_PATH = '127.0.0.1:11044'
HID_IDS.update(KEEPKEY_HID_IDS)
WEBUSB_IDS.update(KEEPKEY_WEBUSB_IDS)
class KeepkeyFeatures(Features): # type: ignore
def __init__(
self,
*,
firmware_variant: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.firmware_variant = firmware_variant
self.firmware_hash = firmware_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('vendor', p.UnicodeType, None),
2: ('major_version', p.UVarintType, None),
3: ('minor_version', p.UVarintType, None),
4: ('patch_version', p.UVarintType, None),
5: ('bootloader_mode', p.BoolType, None),
6: ('device_id', p.UnicodeType, None),
7: ('pin_protection', p.BoolType, None),
8: ('passphrase_protection', p.BoolType, None),
9: ('language', p.UnicodeType, None),
10: ('label', p.UnicodeType, None),
12: ('initialized', p.BoolType, None),
13: ('revision', p.BytesType, None),
14: ('bootloader_hash', p.BytesType, None),
15: ('imported', p.BoolType, None),
16: ('unlocked', p.BoolType, None),
21: ('model', p.UnicodeType, None),
22: ('firmware_variant', p.UnicodeType, None),
23: ('firmware_hash', p.BytesType, None),
24: ('no_backup', p.BoolType, None),
25: ('wipe_code_protection', p.BoolType, None),
}
class KeepkeyResetDevice(ResetDevice): # type: ignore
def __init__(
self,
*,
auto_lock_delay_ms: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.auto_lock_delay_ms = auto_lock_delay_ms
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('display_random', p.BoolType, None),
2: ('strength', p.UVarintType, 256), # default=256
3: ('passphrase_protection', p.BoolType, None),
4: ('pin_protection', p.BoolType, None),
5: ('language', p.UnicodeType, "en-US"), # default=en-US
6: ('label', p.UnicodeType, None),
7: ('no_backup', p.BoolType, None),
8: ('auto_lock_delay_ms', p.UVarintType, None),
9: ('u2f_counter', p.UVarintType, None),
}
class KeepkeyDebugLinkState(DebugLinkState): # type: ignore
def __init__(
self,
*,
recovery_cipher: Optional[str] = None,
recovery_auto_completed_word: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
storage_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.recovery_cipher = recovery_cipher
self.recovery_auto_completed_word = recovery_auto_completed_word
self.firmware_hash = firmware_hash
self.storage_hash = storage_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldType]:
return {
1: ('layout', p.BytesType, None),
2: ('pin', p.UnicodeType, None),
3: ('matrix', p.UnicodeType, None),
4: ('mnemonic_secret', p.BytesType, None),
5: ('node', HDNodeType, None),
6: ('passphrase_protection', p.BoolType, None),
7: ('reset_word', p.UnicodeType, None),
8: ('reset_entropy', p.BytesType, None),
9: ('recovery_fake_word', p.UnicodeType, None),
10: ('recovery_word_pos', p.UVarintType, None),
11: ('recovery_cipher', p.UnicodeType, None),
12: ('recovery_auto_completed_word', p.UnicodeType, None),
13: ('firmware_hash', p.BytesType, None),
14: ('storage_hash', p.BytesType, None),
}
class KeepkeyClient(TrezorClient):
def __init__(self, path: str, password: str = "", expert: bool = False) -> None:
"""
The `KeepkeyClient` is a `HardwareWalletClient` for interacting with the Keepkey.
As Keepkeys are clones of the Trezor 1, please refer to `TrezorClient` for documentation.
"""
super(KeepkeyClient, self).__init__(path, password, expert, KEEPKEY_HID_IDS, KEEPKEY_WEBUSB_IDS, KEEPKEY_SIMULATOR_PATH)
self.type = 'Keepkey'
self.client.vendors = ("keepkey.com")
self.client.minimum_versions = {"K1-14AM": (0, 0, 0)}
self.client.map_type_to_class_override[KeepkeyFeatures.MESSAGE_WIRE_TYPE] = KeepkeyFeatures
self.client.map_type_to_class_override[KeepkeyResetDevice.MESSAGE_WIRE_TYPE] = KeepkeyResetDevice
if self.simulator:
self.client.debug.map_type_to_class_override[KeepkeyDebugLinkState.MESSAGE_WIRE_TYPE] = KeepkeyDebugLinkState
def enumerate(password: str = "") -> List[Dict[str, Any]]:
results = []
devs = hid.HidTransport.enumerate(usb_ids=KEEPKEY_HID_IDS)
devs.extend(webusb.WebUsbTransport.enumerate(usb_ids=KEEPKEY_WEBUSB_IDS))
devs.extend(udp.UdpTransport.enumerate(KEEPKEY_SIMULATOR_PATH))
for dev in devs:
d_data: Dict[str, Any] = {}
d_data['type'] = 'keepkey'
d_data['model'] = 'keepkey'
d_data['path'] = dev.get_path()
client = None
with handle_errors(common_err_msgs["enumerate"], d_data):
client = KeepkeyClient(d_data['path'], password)
try:
client.client.refresh_features()
except TypeError:
continue
if 'keepkey' not in client.client.features.vendor:
continue
d_data['label'] = client.client.features.label
if d_data['path'].startswith('udp:'):
d_data['model'] += '_simulator'
d_data['needs_pin_sent'] = client.client.features.pin_protection and not client.client.features.unlocked
d_data['needs_passphrase_sent'] = client.client.features.passphrase_protection # always need the passphrase sent for Keepkey if it has passphrase protection enabled
if d_data['needs_pin_sent']:
raise DeviceNotReadyError('Keepkey is locked. Unlock by using \'promptpin\' and then \'sendpin\'.')
if d_data['needs_passphrase_sent'] and not password:
raise DeviceNotReadyError("Passphrase needs to be specified before the fingerprint information can be retrieved")
if client.client.features.initialized:
d_data['fingerprint'] = client.get_master_fingerprint().hex()
d_data['needs_passphrase_sent'] = False # Passphrase is always needed for the above to have worked, so it's already sent
else:
d_data['error'] = 'Not initialized'
d_data['code'] = DEVICE_NOT_INITIALIZED
if client:
client.close()
results.append(d_data)
return results
| 37.534314 | 176 | 0.614601 | 4,760 | 0.621653 | 0 | 0 | 2,587 | 0.337861 | 0 | 0 | 1,613 | 0.210657 |
298195d9b4f163d34048ccfa931e4a45670c98bc | 15,195 | py | Python | sklearn_extra/cluster/_k_medoids.py | chkoar/scikit-learn-extra | 9e62f1ac18b40f444009db9f6557196ddc6c5e86 | [
"BSD-3-Clause"
]
| 1 | 2019-08-27T01:10:48.000Z | 2019-08-27T01:10:48.000Z | sklearn_extra/cluster/_k_medoids.py | KonstantinKlepikov/scikit-learn-extra | 12fb111acdfa7aada0ed29f53b9b92c44d8de7c3 | [
"BSD-3-Clause"
]
| null | null | null | sklearn_extra/cluster/_k_medoids.py | KonstantinKlepikov/scikit-learn-extra | 12fb111acdfa7aada0ed29f53b9b92c44d8de7c3 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
"""K-medoids clustering"""
# Authors: Timo Erkkilä <[email protected]>
# Antti Lehmussola <[email protected]>
# Kornel Kiełczewski <[email protected]>
# Zane Dufour <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import (
pairwise_distances,
pairwise_distances_argmin,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import ConvergenceWarning
class KMedoids(BaseEstimator, ClusterMixin, TransformerMixin):
"""k-medoids clustering.
Read more in the :ref:`User Guide <k_medoids>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of medoids to
generate.
metric : string, or callable, optional, default: 'euclidean'
What distance metric to use. See :func:metrics.pairwise_distances
init : {'random', 'heuristic', 'k-medoids++'}, optional, default: 'heuristic'
Specify medoid initialization method. 'random' selects n_clusters
elements from the dataset. 'heuristic' picks the n_clusters points
with the smallest sum distance to every other point. 'k-medoids++'
follows an approach based on k-means++_, and in general, gives initial
medoids which are more separated than those generated by the other methods.
.. _k-means++: https://theory.stanford.edu/~sergei/papers/kMeansPP-soda.pdf
max_iter : int, optional, default : 300
Specify the maximum number of iterations when fitting.
random_state : int, RandomState instance or None, optional
Specify random state for the random number generator. Used to
initialise medoids when init='random'.
Attributes
----------
cluster_centers_ : array, shape = (n_clusters, n_features)
or None if metric == 'precomputed'
Cluster centers, i.e. medoids (elements from the original dataset)
medoid_indices_ : array, shape = (n_clusters,)
The indices of the medoid rows in X
labels_ : array, shape = (n_samples,)
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn_extra.cluster import KMedoids
>>> import numpy as np
>>> X = np.asarray([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmedoids = KMedoids(n_clusters=2, random_state=0).fit(X)
>>> kmedoids.labels_
array([0, 0, 0, 1, 1, 1])
>>> kmedoids.predict([[0,0], [4,4]])
array([0, 1])
>>> kmedoids.cluster_centers_
array([[1, 2],
[4, 2]])
>>> kmedoids.inertia_
8.0
See scikit-learn-extra/examples/plot_kmedoids_digits.py for examples
of KMedoids with various distance metrics.
References
----------
Kaufman, L. and Rousseeuw, P.J., Statistical Data Analysis Based on
the L1–Norm and Related Methods, edited by Y. Dodge, North-Holland,
405–416. 1987
See also
--------
KMeans
The KMeans algorithm minimizes the within-cluster sum-of-squares
criterion. It scales well to large number of samples.
Notes
-----
Since all pairwise distances are calculated and stored in memory for
the duration of fit, the space complexity is O(n_samples ** 2).
"""
def __init__(
self,
n_clusters=8,
metric="euclidean",
init="heuristic",
max_iter=300,
random_state=None,
):
self.n_clusters = n_clusters
self.metric = metric
self.init = init
self.max_iter = max_iter
self.random_state = random_state
def _check_nonnegative_int(self, value, desc):
"""Validates if value is a valid integer > 0"""
if (
value is None
or value <= 0
or not isinstance(value, (int, np.integer))
):
raise ValueError(
"%s should be a nonnegative integer. "
"%s was given" % (desc, value)
)
def _check_init_args(self):
"""Validates the input arguments. """
# Check n_clusters and max_iter
self._check_nonnegative_int(self.n_clusters, "n_clusters")
self._check_nonnegative_int(self.max_iter, "max_iter")
# Check init
init_methods = ["random", "heuristic", "k-medoids++"]
if self.init not in init_methods:
raise ValueError(
"init needs to be one of "
+ "the following: "
+ "%s" % init_methods
)
def fit(self, X, y=None):
"""Fit K-Medoids to the provided data.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features), \
or (n_samples, n_samples) if metric == 'precomputed'
Dataset to cluster.
y : Ignored
Returns
-------
self
"""
random_state_ = check_random_state(self.random_state)
self._check_init_args()
X = check_array(X, accept_sparse=["csr", "csc"])
if self.n_clusters > X.shape[0]:
raise ValueError(
"The number of medoids (%d) must be less "
"than the number of samples %d."
% (self.n_clusters, X.shape[0])
)
D = pairwise_distances(X, metric=self.metric)
medoid_idxs = self._initialize_medoids(
D, self.n_clusters, random_state_
)
labels = None
# Continue the algorithm as long as
# the medoids keep changing and the maximum number
# of iterations is not exceeded
for self.n_iter_ in range(0, self.max_iter):
old_medoid_idxs = np.copy(medoid_idxs)
labels = np.argmin(D[medoid_idxs, :], axis=0)
# Update medoids with the new cluster indices
self._update_medoid_idxs_in_place(D, labels, medoid_idxs)
if np.all(old_medoid_idxs == medoid_idxs):
break
elif self.n_iter_ == self.max_iter - 1:
warnings.warn(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning,
)
# Set the resulting instance variables.
if self.metric == "precomputed":
self.cluster_centers_ = None
else:
self.cluster_centers_ = X[medoid_idxs]
# Expose labels_ which are the assignments of
# the training data to clusters
self.labels_ = labels
self.medoid_indices_ = medoid_idxs
self.inertia_ = self._compute_inertia(self.transform(X))
# Return self to enable method chaining
return self
def _update_medoid_idxs_in_place(self, D, labels, medoid_idxs):
"""In-place update of the medoid indices"""
# Update the medoids for each cluster
for k in range(self.n_clusters):
# Extract the distance matrix between the data points
# inside the cluster k
cluster_k_idxs = np.where(labels == k)[0]
if len(cluster_k_idxs) == 0:
warnings.warn(
"Cluster {k} is empty! "
"self.labels_[self.medoid_indices_[{k}]] "
"may not be labeled with "
"its corresponding cluster ({k}).".format(k=k)
)
continue
in_cluster_distances = D[
cluster_k_idxs, cluster_k_idxs[:, np.newaxis]
]
# Calculate all costs from each point to all others in the cluster
in_cluster_all_costs = np.sum(in_cluster_distances, axis=1)
min_cost_idx = np.argmin(in_cluster_all_costs)
min_cost = in_cluster_all_costs[min_cost_idx]
curr_cost = in_cluster_all_costs[
np.argmax(cluster_k_idxs == medoid_idxs[k])
]
# Adopt a new medoid if its distance is smaller then the current
if min_cost < curr_cost:
medoid_idxs[k] = cluster_k_idxs[min_cost_idx]
def transform(self, X):
"""Transforms X to cluster-distance space.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Data to transform.
Returns
-------
X_new : {array-like, sparse matrix}, shape=(n_query, n_clusters)
X transformed in the new space of distances to cluster centers.
"""
X = check_array(X, accept_sparse=["csr", "csc"])
if self.metric == "precomputed":
check_is_fitted(self, "medoid_indices_")
return X[:, self.medoid_indices_]
else:
check_is_fitted(self, "cluster_centers_")
Y = self.cluster_centers_
return pairwise_distances(X, Y=Y, metric=self.metric)
def predict(self, X):
"""Predict the closest cluster for each sample in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
New data to predict.
Returns
-------
labels : array, shape = (n_query,)
Index of the cluster each sample belongs to.
"""
X = check_array(X, accept_sparse=["csr", "csc"])
if self.metric == "precomputed":
check_is_fitted(self, "medoid_indices_")
return np.argmin(X[:, self.medoid_indices_], axis=1)
else:
check_is_fitted(self, "cluster_centers_")
# Return data points to clusters based on which cluster assignment
# yields the smallest distance
return pairwise_distances_argmin(
X, Y=self.cluster_centers_, metric=self.metric
)
def _compute_inertia(self, distances):
"""Compute inertia of new samples. Inertia is defined as the sum of the
sample distances to closest cluster centers.
Parameters
----------
distances : {array-like, sparse matrix}, shape=(n_samples, n_clusters)
Distances to cluster centers.
Returns
-------
Sum of sample distances to closest cluster centers.
"""
# Define inertia as the sum of the sample-distances
# to closest cluster centers
inertia = np.sum(np.min(distances, axis=1))
return inertia
def _initialize_medoids(self, D, n_clusters, random_state_):
"""Select initial mediods when beginning clustering."""
if self.init == "random": # Random initialization
# Pick random k medoids as the initial ones.
medoids = random_state_.choice(len(D), n_clusters)
elif self.init == "k-medoids++":
medoids = self._kpp_init(D, n_clusters, random_state_)
elif self.init == "heuristic": # Initialization by heuristic
# Pick K first data points that have the smallest sum distance
# to every other point. These are the initial medoids.
medoids = np.argpartition(np.sum(D, axis=1), n_clusters - 1)[
:n_clusters
]
else:
raise ValueError(
"init value '{init}' not recognized".format(init=self.init)
)
return medoids
# Copied from sklearn.cluster.k_means_._k_init
def _kpp_init(self, D, n_clusters, random_state_, n_local_trials=None):
"""Init n_clusters seeds with a method similar to k-means++
Parameters
-----------
D : array, shape (n_samples, n_samples)
The distance matrix we will use to select medoid indices.
n_clusters : integer
The number of seeds to choose
random_state : RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-medoid clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, _ = D.shape
centers = np.empty(n_clusters, dtype=int)
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state_.randint(n_samples)
centers[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = D[centers[0], :] ** 2
current_pot = closest_dist_sq.sum()
# pick the remaining n_clusters-1 points
for cluster_index in range(1, n_clusters):
rand_vals = (
random_state_.random_sample(n_local_trials) * current_pot
)
candidate_ids = np.searchsorted(
stable_cumsum(closest_dist_sq), rand_vals
)
# Compute distances to center candidates
distance_to_candidates = D[candidate_ids, :] ** 2
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(
closest_dist_sq, distance_to_candidates[trial]
)
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
centers[cluster_index] = best_candidate
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
| 35.173611 | 83 | 0.596249 | 14,499 | 0.953819 | 0 | 0 | 0 | 0 | 0 | 0 | 8,287 | 0.545162 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.