hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d19f0ff06adc850dcf2436e1f6a4aeadf9e7144 | 1,130 | py | Python | example/undistort_ir_images.py | greeknerd1/stereo-rectify | 98a23c3ff96dd4344ecad13d4ff145060c8fb992 | [
"MIT"
] | null | null | null | example/undistort_ir_images.py | greeknerd1/stereo-rectify | 98a23c3ff96dd4344ecad13d4ff145060c8fb992 | [
"MIT"
] | null | null | null | example/undistort_ir_images.py | greeknerd1/stereo-rectify | 98a23c3ff96dd4344ecad13d4ff145060c8fb992 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import cv2
import numpy as np
import os
import glob
import itertools
import json
from numpy.core.fromnumeric import argmax
#SECTION 1: UNDISTORT FISHEYE
#Read in OpenCV compatible instrinsics & distortion coeffs
COLOR_INTRINSIC = np.load('./savedCoeff/colorIntr.npy')
COLOR_DIST = np.load('./savedCoeff/colorDist.npy')
IR_INTRINSIC = np.load('./savedCoeff/irIntr.npy')
IR_DIST = np.load('./savedCoeff/irDist.npy')
print('Undistorting images-----------------')
imageDir = 'december_callibration_images'
ir_images = glob.glob('./' + imageDir + '/ir-*.png')
DIMS = (1024, 1024)
IDENTITY = np.eye(3)
for i in range(len(ir_images)):
ir_img = cv2.imread(ir_images[i], cv2.IMREAD_UNCHANGED)
new_K, roi = cv2.getOptimalNewCameraMatrix(IR_INTRINSIC, IR_DIST, DIMS, 1)
map1, map2 = cv2.initUndistortRectifyMap(IR_INTRINSIC, IR_DIST, IDENTITY, new_K, DIMS, cv2.CV_32FC1)
undistorted_ir_img = cv2.remap(ir_img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
#save the undistorted image
cv2.imwrite('./undistorted_december_ir_images/' + 'ir-' + str(i) + '.png', undistorted_ir_img)
| 36.451613 | 115 | 0.752212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.327434 |
9d1ab6609be43e89cc309b21cfc303cd71c0ffae | 5,617 | py | Python | tests/tensor/test_tensor_data.py | aspfohl/tinytorch | 99ac1847b798f755d12876667ec7c3a6c7149857 | [
"MIT"
] | null | null | null | tests/tensor/test_tensor_data.py | aspfohl/tinytorch | 99ac1847b798f755d12876667ec7c3a6c7149857 | [
"MIT"
] | null | null | null | tests/tensor/test_tensor_data.py | aspfohl/tinytorch | 99ac1847b798f755d12876667ec7c3a6c7149857 | [
"MIT"
] | null | null | null | import pytest
from hypothesis import given
from hypothesis.strategies import data
from numpy import array, array_equal
from tests.strategies import indices, tensor_data
from tinytorch.tensor.data import (
IndexingError,
TensorData,
broadcast_index,
shape_broadcast,
)
# Check basic properties of layout and strides.
def test_layout():
"Test basis properties of layout and strides"
data = [0] * 3 * 5
tensor_data = TensorData(data, (3, 5), (5, 1))
assert tensor_data.is_contiguous()
assert tensor_data.shape == (3, 5)
assert tensor_data.index((1, 0)) == 5
assert tensor_data.index((1, 2)) == 7
tensor_data = TensorData(data, (5, 3), (1, 5))
assert tensor_data.shape == (5, 3)
assert not tensor_data.is_contiguous()
data = [0] * 4 * 2 * 2
tensor_data = TensorData(data, (4, 2, 2))
assert tensor_data.strides == (4, 2, 1)
@pytest.mark.xfail
def test_layout_bad():
"Test basis properties of layout and strides"
data = [0] * 3 * 5
TensorData(data, (3, 5), (6,))
@given(tensor_data())
def test_enumeration(tensor_data):
"Test enumeration of tensor_datas."
indices = list(tensor_data.indices())
# Check that enough positions are enumerated.
assert len(indices) == tensor_data.size
# Check that all positions are enumerated only once.
assert len(set(tensor_data.indices())) == len(indices)
# Check that all indices are within the shape.
for ind in tensor_data.indices():
for i, p in enumerate(ind):
assert p >= 0
assert p < tensor_data.shape[i]
@given(tensor_data())
def test_index(tensor_data):
"Test enumeration of tensor_data."
# Check that all indices are within the size.
for ind in tensor_data.indices():
pos = tensor_data.index(ind)
assert pos >= 0 and pos < tensor_data.size
base = [0] * tensor_data.dims
with pytest.raises(IndexingError):
base[0] = -1
tensor_data.index(tuple(base))
if tensor_data.dims > 1:
with pytest.raises(IndexingError):
base = [0] * (tensor_data.dims - 1)
tensor_data.index(tuple(base))
@given(data())
def test_permute(data):
td = data.draw(tensor_data())
ind = data.draw(indices(td))
td_rev = td.permute(*list(reversed(range(td.dims))))
assert td.index(ind) == td_rev.index(tuple(reversed(ind)))
td2 = td_rev.permute(*list(reversed(range(td_rev.dims))))
assert td.index(ind) == td2.index(ind)
# Check basic properties of broadcasting.
def test_broadcast_index_smaller():
"Tests broadcast mapping between higher and lower dim tensors"
out_index = array([0, 0])
def _broadcast_index(big_index):
return broadcast_index(
big_index=big_index,
big_shape=array([2, 2, 3]),
shape=array([2, 1]),
out_index=out_index,
)
for big_index, expected_out_index in (
([0, 0, 0], [0, 0]),
([0, 0, 1], [0, 0]),
([0, 0, 2], [0, 0]),
([0, 1, 0], [1, 0]),
([0, 1, 1], [1, 0]),
([0, 1, 2], [1, 0]),
([1, 0, 0], [0, 0]),
([1, 0, 1], [0, 0]),
([1, 0, 2], [0, 0]),
([1, 1, 0], [1, 0]),
([1, 1, 1], [1, 0]),
([1, 1, 2], [1, 0]),
):
print(big_index, expected_out_index)
_broadcast_index(big_index=array(big_index))
assert array_equal(out_index, expected_out_index)
def test_broadcast_index():
out_index = array([0, 0])
def _broadcast_index(big_index):
return broadcast_index(
big_index=big_index,
big_shape=array([3, 2]),
shape=array([3, 1]),
out_index=out_index,
)
for big_index, expected_out_index in (
([0, 0], [0, 0]),
([0, 1], [0, 0]),
([1, 0], [1, 0]),
([1, 1], [1, 0]),
([2, 0], [2, 0]),
([2, 1], [2, 0]),
):
_broadcast_index(big_index=array(big_index))
assert array_equal(out_index, array(expected_out_index))
def test_broadcast_index_constant():
out_index = array([0])
def _broadcast_index(big_index):
return broadcast_index(
big_index=big_index,
big_shape=array([3, 2]),
shape=array([1]),
out_index=out_index,
)
expected_out_index = array([0])
for big_index in ([0, 0, 0], [0, 0, 1], [0, 0, 2], [1, 0, 0], [1, 0, 1], [1, 0, 2]):
_broadcast_index(big_index=array(big_index))
assert array_equal(out_index, expected_out_index)
@pytest.mark.parametrize(
"shape1, shape2, expected_return",
(
((1,), (5, 5), (5, 5)),
((5, 5), (1,), (5, 5)),
((1, 5, 5), (5, 5), (1, 5, 5)),
((5, 1, 5, 1), (1, 5, 1, 5), (5, 5, 5, 5)),
((2, 5), (5,), (2, 5)),
),
)
def test_shape_broadcast(shape1, shape2, expected_return):
c = shape_broadcast(shape1, shape2)
assert c == expected_return
@pytest.mark.parametrize(
"shape1, shape2",
(
# 2nd-indexed dimension (7 and 5) can't be broadcasted
((5, 7, 5, 1), (1, 5, 1, 5)),
# 2nd-indexed dimension (2 and 5) can't be broadcasted
((5, 2), (5,)),
# shape1 can't be empty
(tuple(), (1,)),
# shape2 can't be empty
((1,), tuple()),
# multiples don't work
((4,), (2,)),
),
)
def test_shape_broadcast_errors(shape1, shape2):
with pytest.raises(IndexingError):
c = shape_broadcast(shape1, shape2)
print(c)
@given(tensor_data())
def test_string(tensor_data):
tensor_data.to_string()
| 27.534314 | 88 | 0.574862 | 0 | 0 | 0 | 0 | 2,632 | 0.468578 | 0 | 0 | 722 | 0.128538 |
9d1aff1bfb4da29713d9d7f9b89454bc608165f8 | 359 | py | Python | terra_layer/apps.py | Terralego/terra-layer | 6564a63d389503d3ae1f63ce46e674b228d6764b | [
"MIT"
] | 1 | 2019-08-08T15:17:32.000Z | 2019-08-08T15:17:32.000Z | terra_layer/apps.py | Terralego/terra-layer | 6564a63d389503d3ae1f63ce46e674b228d6764b | [
"MIT"
] | 65 | 2019-10-21T10:05:00.000Z | 2022-03-08T14:08:27.000Z | terra_layer/apps.py | Terralego/terra-layer | 6564a63d389503d3ae1f63ce46e674b228d6764b | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from terra_accounts.permissions_mixins import PermissionRegistrationMixin
class TerraLayerConfig(PermissionRegistrationMixin, AppConfig):
name = "terra_layer"
permissions = (
("DataLayer", "can_manage_layers", "Can manage layers"),
("DataSource", "can_manage_sources", "Can manage sources"),
)
| 29.916667 | 73 | 0.740947 | 248 | 0.690808 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.317549 |
9d1d92e0aac0102261fb87134d9195f41601abbb | 2,813 | py | Python | aps/tokenizer/word.py | ishine/aps | c814dc5a8b0bff5efa7e1ecc23c6180e76b8e26c | [
"Apache-2.0"
] | 117 | 2021-02-02T13:38:16.000Z | 2022-03-16T05:40:25.000Z | aps/tokenizer/word.py | ishine/aps | c814dc5a8b0bff5efa7e1ecc23c6180e76b8e26c | [
"Apache-2.0"
] | 3 | 2021-11-11T07:07:31.000Z | 2021-11-20T15:25:42.000Z | aps/tokenizer/word.py | ishine/aps | c814dc5a8b0bff5efa7e1ecc23c6180e76b8e26c | [
"Apache-2.0"
] | 19 | 2021-02-04T10:04:25.000Z | 2022-02-16T05:24:44.000Z | #!/usr/bin/env python
# Copyright 2021 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from typing import List, Union
from aps.tokenizer.base import TokenizerAbc, ApsTokenizer
class WordBasedTokenizer(TokenizerAbc):
"""
Word based (word, character) tokenizer
Args:
filter_words (list): filter those words
char (bool): use character unit or word unit
space (str): insert space symbol between words
"""
def __init__(self,
filter_words: List[str] = [],
char: bool = False,
space: str = ""):
super(WordBasedTokenizer, self).__init__()
self.char = char
self.space = space
self.filter_words = filter_words
def encode(self, utt: Union[str, List[str]]) -> List[str]:
if isinstance(utt, str):
raw_tokens = utt.split()
else:
raw_tokens = utt
kept_tokens = []
for tok in raw_tokens:
# remove tokens
is_filter_tok = tok in self.filter_words
if is_filter_tok:
continue
# word => char
if self.char and not is_filter_tok:
toks = [t for t in tok]
else:
toks = [tok]
kept_tokens += toks
if self.space:
kept_tokens += [self.space]
if self.space:
# remove last one
kept_tokens = kept_tokens[:-1]
return kept_tokens
def decode(self, utt: Union[str, List[str]]) -> List[str]:
if isinstance(utt, str):
enc_tokens = utt.split()
else:
enc_tokens = utt
if not self.char:
return enc_tokens
if self.space:
strs = "".join(enc_tokens).replace(self.space, " ")
else:
strs = " ".join(enc_tokens)
return strs.split(" ")
@ApsTokenizer.register("word")
class WordTokenizer(WordBasedTokenizer):
"""
Word tokenizer
Args:
filter_words (list): filter those words
"""
def __init__(self, filter_words: List[str] = []):
super(WordTokenizer, self).__init__(filter_words=filter_words,
char=False,
space="")
@ApsTokenizer.register("char")
class CharTokenizer(WordBasedTokenizer):
"""
Character tokenizer
Args:
filter_words (list): filter those words
space (str): insert space symbol between words
"""
def __init__(self, filter_words: List[str] = [], space: str = "<space>"):
super(CharTokenizer, self).__init__(filter_words=filter_words,
char=True,
space=space)
| 30.247312 | 77 | 0.539637 | 2,537 | 0.901884 | 0 | 0 | 886 | 0.314966 | 0 | 0 | 649 | 0.230715 |
9d1d953211acad0e8c4ba6634015c410a59e3522 | 1,736 | py | Python | tests/test_session.py | StenSipma/astrometry-client | 11d5b0cd0ae41a18b5bbd7f5570af60dbfbd9cc6 | [
"MIT"
] | 1 | 2020-08-06T17:55:52.000Z | 2020-08-06T17:55:52.000Z | tests/test_session.py | StenSipma/astrometry-client | 11d5b0cd0ae41a18b5bbd7f5570af60dbfbd9cc6 | [
"MIT"
] | 1 | 2021-12-18T17:03:21.000Z | 2021-12-19T12:33:16.000Z | tests/test_session.py | StenSipma/astrometry-client | 11d5b0cd0ae41a18b5bbd7f5570af60dbfbd9cc6 | [
"MIT"
] | null | null | null | import os
from unittest import mock
import pytest
import requests
from constants import VALID_KEY
from utils import FunctionCalledException, function_called_raiser
from astrometry_net_client import Session
from astrometry_net_client.exceptions import APIKeyError, LoginFailedException
some_key = "somekey"
# Start of tests
def test_session_key_input_invalid():
with pytest.raises(APIKeyError):
Session()
def test_session_key_input_string():
s = Session(some_key)
assert not s.logged_in
assert s.api_key == some_key
def test_session_key_input_file():
s = Session(key_location="./tests/data/testkey")
assert not s.logged_in
assert s.api_key == some_key
@mock.patch.dict(os.environ, {"ASTROMETRY_API_KEY": some_key})
def test_session_key_input_environment():
s = Session()
assert not s.logged_in
assert s.api_key == some_key
def test_valid_session_login(mock_server, monkeypatch):
session = Session(api_key=VALID_KEY)
session.login() # login for the first time
assert session.logged_in
assert getattr(session, "key", None) # token exists
original_key = session.key
# We patch the post call to send an error if it is called.
monkeypatch.setattr(requests, "post", function_called_raiser)
session.login() # login should not be done now, as it is already done
assert session.logged_in
assert session.key == original_key
# Here we force the login which should raise the patched exception
with pytest.raises(FunctionCalledException):
session.login(force=True)
def test_invalid_session_login(mock_server):
session = Session(api_key="invalid_key")
with pytest.raises(LoginFailedException):
session.login()
| 27.555556 | 78 | 0.75 | 0 | 0 | 0 | 0 | 182 | 0.104839 | 0 | 0 | 308 | 0.177419 |
9d1e173ec4f6da5495185d4e64e6ce6be159c672 | 2,184 | py | Python | all_repos_depends/lang/python.py | mxr/all-repos-depends | dcf715dbfb7182899e2412dbfaaf1ef4cc50865c | [
"MIT"
] | 11 | 2018-04-23T06:41:55.000Z | 2022-01-27T13:37:59.000Z | all_repos_depends/lang/python.py | mxr/all-repos-depends | dcf715dbfb7182899e2412dbfaaf1ef4cc50865c | [
"MIT"
] | 2 | 2018-04-23T06:03:18.000Z | 2018-04-23T06:03:51.000Z | all_repos_depends/lang/python.py | mxr/all-repos-depends | dcf715dbfb7182899e2412dbfaaf1ef4cc50865c | [
"MIT"
] | 2 | 2021-02-01T15:02:14.000Z | 2021-09-25T15:49:44.000Z | import ast
import os.path
from typing import Iterable
from packaging.requirements import InvalidRequirement
from packaging.requirements import Requirement
from packaging.utils import canonicalize_name
from all_repos_depends.errors import DependsError
from all_repos_depends.types import Depends
NAME = 'python'
def to_name(s: str) -> str:
return s.lower().replace('_', '-')
def load_setup_py_ast() -> ast.AST:
with open('setup.py', 'rb') as f:
try:
return ast.parse(f.read(), filename='setup.py')
except SyntaxError:
raise DependsError('Had setup.py but could not be parsed')
def node_is_setup_call(node: ast.Call) -> bool:
return (
# setup(
(isinstance(node.func, ast.Name) and node.func.id == 'setup') or
# setuptools.setup(
(
isinstance(node.func, ast.Attribute) and
isinstance(node.func.value, ast.Name) and
node.func.value.id == 'setuptools' and
node.func.attr == 'setup'
)
)
def to_depends(relationship: str, requirement_s: str) -> Depends:
try:
req = Requirement(requirement_s)
except InvalidRequirement:
return Depends(relationship, NAME, requirement_s, ' (unable to parse)')
spec_parts = []
if req.extras:
spec_parts.append('[{}]'.format(','.join(sorted(req.extras))))
if req.specifier:
spec_parts.append(str(req.specifier))
if req.marker:
spec_parts.append(f';{req.marker}')
spec = ''.join(spec_parts)
return Depends(relationship, NAME, canonicalize_name(req.name), spec)
def from_reqs_file(relationship: str, filename: str) -> Iterable[Depends]:
with open(filename) as f:
for line in f:
line, _, _ = line.partition('#')
line = line.strip()
# local editable paths aren't all that interesting
if line.startswith('-e '):
_, _, path = line.partition(' ')
path = os.path.join(os.path.dirname(filename), path)
if os.path.exists(path):
continue
if line:
yield to_depends(relationship, line)
| 29.513514 | 79 | 0.617674 | 0 | 0 | 570 | 0.260989 | 0 | 0 | 0 | 0 | 237 | 0.108516 |
9d1fd039657947bcd1efbe3cb094639c4aa0c630 | 2,829 | py | Python | mac/macos_app_audit.py | airdata/scripts | b24d62d70bbc70f02b3758ea14e47cc2b34646a9 | [
"Apache-2.0"
] | null | null | null | mac/macos_app_audit.py | airdata/scripts | b24d62d70bbc70f02b3758ea14e47cc2b34646a9 | [
"Apache-2.0"
] | null | null | null | mac/macos_app_audit.py | airdata/scripts | b24d62d70bbc70f02b3758ea14e47cc2b34646a9 | [
"Apache-2.0"
] | null | null | null | from os import listdir
from os.path import isfile, join
class Command(object):
"""
Run a command and capture it's output string, error string and exit status
Source: http://stackoverflow.com/a/13848259/354247
"""
def __init__(self, command):
self.command = command
def run(self, shell=True):
import subprocess as sp
process = sp.Popen(self.command, shell = shell, stdout = sp.PIPE, stderr = sp.PIPE)
self.pid = process.pid
self.output, self.error = process.communicate()
self.failed = process.returncode
return self
@property
def returncode(self):
return self.failed
default_applications = ['Utilities','App Store.app','Automator.app','Calculator.app','Calendar.app','Chess.app','Contacts.app','Dashboard.app','Dictionary.app','DVD Player.app','FaceTime.app','Font Book.app','iBooks.app','Image Capture.app','iTunes.app','Launchpad.app','Mail.app','Maps.app','Messages.app','Mission Control.app','Notes.app','Paste.app','Photo Booth.app','Photos.app','Preview.app','QuickTime Player.app','Reminders.app','Safari.app','Siri.app','Stickies.app','System Preferences.app','TextEdit.app','Time Machine.app','Utilities.app']
remaps = {
"iTerm.app": "iTerm2", # brew cask install iterm2 gives iTerm.app
"Alfred 3.app": "Alfred" # brew cask install alfred gives Alfred 3.app
}
mypath = "/Applications"
installed_applications = [f for f in listdir(mypath) if not isfile(join(mypath, f))]
cask_packages = Command('brew cask list').run().output.split()
mac_app_store_apps = Command('mas list').run().output.splitlines()
# collect applications that are not default ones.
user_applications = []
for x in installed_applications:
#first remap the names
if(x in remaps):
name = remaps[x]
else:
name = x
#then check if they are defaults
if name not in default_applications:
user_applications.append(name)
# determine which applications weren't installed via brew cask
unmanged_applications = []
for x in user_applications:
strip_dotapp = x[:-4] if (".app" in x) else x
trimmed = strip_dotapp.replace(" ", "-").lower()
is_casked = trimmed in cask_packages
is_mas = any(strip_dotapp in s for s in mac_app_store_apps)
# print('{} -> {}: {}|{}'.format(x, trimmed, is_casked, is_mas))
if(not is_casked and not is_mas):
unmanged_applications.append(x)
# print("-------------------")
print("You have {} default applications.".format(len(default_applications)))
print("Tou have {} brew cask applications.".format(len(cask_packages)))
print("Tou have {} app store applications.".format(len(mac_app_store_apps)))
print("You have {} user applications Applications not managed by brew cask or app store...\n------".format(len(unmanged_applications)))
for x in unmanged_applications:
print(x)
# print(mac_app_store_apps) | 41.602941 | 551 | 0.70555 | 606 | 0.21421 | 0 | 0 | 62 | 0.021916 | 0 | 0 | 1,308 | 0.462354 |
9d208e0e14d75f5e83f5d7ca01135d1ab258d6e8 | 317 | py | Python | src/hark_lang/machine/stdout_item.py | krrome/teal-lang | 594ac0f0baae047fdb19ac9126d174408d487905 | [
"Apache-2.0"
] | 85 | 2020-04-29T13:51:33.000Z | 2020-08-28T04:40:11.000Z | src/hark_lang/machine/stdout_item.py | krrome/teal-lang | 594ac0f0baae047fdb19ac9126d174408d487905 | [
"Apache-2.0"
] | 15 | 2020-05-06T07:58:18.000Z | 2020-08-28T10:29:28.000Z | src/hark_lang/machine/stdout_item.py | krrome/teal-lang | 594ac0f0baae047fdb19ac9126d174408d487905 | [
"Apache-2.0"
] | 4 | 2020-05-31T09:42:08.000Z | 2020-08-27T17:04:26.000Z | """StdoutItem class"""
from dataclasses import asdict, dataclass
from .hark_serialisable import HarkSerialisable, now_str
@dataclass
class StdoutItem(HarkSerialisable):
thread: int
text: str
time: str = None
def __post_init__(self):
if self.time is None:
self.time = now_str()
| 19.8125 | 56 | 0.690852 | 180 | 0.567823 | 0 | 0 | 191 | 0.602524 | 0 | 0 | 22 | 0.069401 |
9d20e8c21375abfa3aefb4fb09790b9ecbec1d58 | 6,911 | py | Python | compress/algorithms/lzw.py | ShellCode33/CompressionAlgorithms | 3b2e7b497ef0af4ba7ac8bc6f4d6e77ea4c4aedc | [
"MIT"
] | null | null | null | compress/algorithms/lzw.py | ShellCode33/CompressionAlgorithms | 3b2e7b497ef0af4ba7ac8bc6f4d6e77ea4c4aedc | [
"MIT"
] | null | null | null | compress/algorithms/lzw.py | ShellCode33/CompressionAlgorithms | 3b2e7b497ef0af4ba7ac8bc6f4d6e77ea4c4aedc | [
"MIT"
] | null | null | null | # coding: utf-8
class LZW(object):
""" Implementation of the LZW algorithm.
Attributes
----------
translation_dict : dict
Association between repeated bytes sequences and integers.
Examples
--------
An array of bytes like ['\x41', '\x42', '\x43', '\x0A', '\x00'] can be represented by an integer like 256.
It means that one integer is able to represent multiple bytes at once.
Notes
-----
On the internet we usually find this algorithm using integers that are coded on 12bits. But I think it's a waste of
space and it can be optimized by sending along the encoded content, the size of the integers. So instead of sending
12 bits integers, we will be able to send smaller (and bigger) integers. The size of the integers will be determined
based on the biggest integer in the dictionary. This integer will be on 5 bits, it means other integers can be coded
on 2^5 = 32 bits max. Which means the biggest supported dictionary is 2^32 = 4294967296 long. Which is more than
enough.
"""
def __init__(self, verbose=False):
self.verbose = verbose
self.translation_dict = None
self.max_size_integer_size = 5 # The integers size is encoded on 5 bits by default
self.integers_size_bits = 0 # Max value must be 2**max_size_integer_size (= 32 by default)
def __build_bytes_dictionary(self, decompression=False):
if decompression:
self.translation_dict = {byte: bytes([byte]) for byte in range(256)}
else:
self.translation_dict = {bytes([byte]): byte for byte in range(256)}
def __compress(self, bytes_list):
self.__build_bytes_dictionary()
biggest_integer = 0
compressed = []
pattern = bytes([])
for byte in bytes_list:
byte_as_array = bytes([byte])
current = pattern + byte_as_array
if current in self.translation_dict:
pattern = current
else:
self.translation_dict[current] = len(self.translation_dict)
compressed.append(self.translation_dict[pattern])
if biggest_integer < self.translation_dict[pattern]:
biggest_integer = self.translation_dict[pattern]
pattern = byte_as_array
compressed.append(self.translation_dict[pattern])
if biggest_integer < self.translation_dict[pattern]:
biggest_integer = self.translation_dict[pattern]
if biggest_integer > 2 ** (2 ** self.max_size_integer_size):
# Shouldn't happen
raise ValueError("Can't encode such value... Maybe you should increase the size of max_size_integer_size.")
self.integers_size_bits = biggest_integer.bit_length()
if self.verbose:
print("The biggest integer is {} so integers will be coded on {} bits.".format(biggest_integer,
self.integers_size_bits))
return compressed
def compress_file(self, input_filename, output_filename):
with open(input_filename, "rb") as input_file:
bytes_list = input_file.read()
if not bytes_list:
raise IOError("File is empty !")
if self.verbose:
print("Input size : {} bytes.".format(len(bytes_list)))
compressed = self.__compress(bytes_list)
if self.verbose:
print("Assembling integers together...")
# Originally, each integer was added to a big one using bits shifting, but this method was way to slow.
# Strings are better for this purpose.
binary_string_compressed = "1" # Padding with a 1 to keep the first zeros when converting to integer
# Add binary representation of the integers bit-length
binary_string_compressed += format(self.integers_size_bits, "0{}b".format(self.max_size_integer_size))
# https://waymoot.org/home/python_string/
# According to this, the fastest way to concatenate strings is to use join() on a list
bin_format = "0{}b".format(self.integers_size_bits)
binary_string_compressed += ''.join([format(byte, bin_format) for byte in compressed])
if self.verbose:
print("Done.")
big_int_compress = int(binary_string_compressed, 2)
to_store_in_file = big_int_compress.to_bytes((big_int_compress.bit_length() + 7) // 8, 'big')
total_file_size = len(to_store_in_file)
if self.verbose:
print("Output : {} bytes".format(total_file_size))
if len(bytes_list) <= total_file_size:
raise Exception("Aborted. No gain, you shouldn't compress that file. (+{} bytes)".format(
total_file_size - len(bytes_list)))
compression_rate = 100 - total_file_size * 100 / len(bytes_list)
# Print anyway, even when not in verbose mode
print("Compression gain : {0:.2f}%".format(compression_rate))
with open(output_filename, "wb") as output_file:
output_file.write(to_store_in_file)
return compression_rate
def __decompress(self, compressed_bytes_list):
self.__build_bytes_dictionary(decompression=True)
previous_code = compressed_bytes_list[0]
decompressed = self.translation_dict[previous_code]
first_byte = None
for new_code in compressed_bytes_list[1:]:
try:
translation = self.translation_dict[new_code]
except KeyError:
translation = first_byte + self.translation_dict[previous_code]
decompressed += translation
first_byte = bytes([translation[0]])
self.translation_dict[len(self.translation_dict)] = self.translation_dict[previous_code] + first_byte
previous_code = new_code
return decompressed
def decompress_file(self, input_filename, output_filename):
with open(input_filename, "rb") as input_file:
bytes_list = input_file.read()
if not bytes_list:
raise IOError("File is empty !")
big_int_compressed = int.from_bytes(bytes_list, 'big')
bits_string_compressed = format(big_int_compressed, "0b")
self.integers_size_bits = int(bits_string_compressed[1:self.max_size_integer_size + 1], 2) # Skip first pad bit
if self.verbose:
print("Integers are {} bits long.".format(self.integers_size_bits))
compressed = []
for i in range(self.max_size_integer_size + 1, len(bits_string_compressed), self.integers_size_bits):
compressed.append(int(bits_string_compressed[i:i + self.integers_size_bits], 2))
decompressed = self.__decompress(compressed)
with open(output_filename, "wb") as output_file:
output_file.write(decompressed)
| 38.825843 | 120 | 0.64911 | 6,892 | 0.997251 | 0 | 0 | 0 | 0 | 0 | 0 | 2,061 | 0.29822 |
9d20f94306c2d2e2215af2edce02e11edf2054d9 | 1,322 | py | Python | app/models.py | ariqfadlan/donorojo-db-api | dd1a3241ead5738c94eb77ed0bbb23b26582618f | [
"MIT"
] | null | null | null | app/models.py | ariqfadlan/donorojo-db-api | dd1a3241ead5738c94eb77ed0bbb23b26582618f | [
"MIT"
] | null | null | null | app/models.py | ariqfadlan/donorojo-db-api | dd1a3241ead5738c94eb77ed0bbb23b26582618f | [
"MIT"
] | null | null | null | """
Contains database models
"""
from sqlalchemy import Column, ForeignKey, Integer, String, Float
from sqlalchemy.orm import relationship
from .database import Base
class TouristAttraction(Base):
__tablename__ = "tourist_attraction"
id = Column(Integer, primary_key=True, index=True)
name = Column(String(50), nullable=False)
category = Column(String(255), nullable=False)
address = relationship("Address", back_populates="tourist_attraction", uselist=False)
location = relationship("Location", back_populates="tourist_attraction", uselist=False)
class Address(Base):
__tablename__ = "address"
tourist_attraction_id = Column(Integer, ForeignKey("tourist_attraction.id"), primary_key=True)
subvillage = Column(String(255))
village = Column(String(255))
district = Column(String(255))
regency = Column(String(255))
province = Column(String(255))
tourist_attraction = relationship("TouristAttraction", back_populates="address")
class Location(Base):
__tablename__ = "location"
tourist_attraction_id = Column(Integer, ForeignKey("tourist_attraction.id"), primary_key=True)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
tourist_attraction = relationship("TouristAttraction", back_populates="location")
| 33.05 | 98 | 0.746596 | 1,148 | 0.868381 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.176248 |
9d2612bdf9b9d5fe13c734ed2826b9452f048d19 | 1,096 | py | Python | hackerrank_contests/101Hack44/prime.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | hackerrank_contests/101Hack44/prime.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | hackerrank_contests/101Hack44/prime.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | 1 | 2020-01-30T06:47:09.000Z | 2020-01-30T06:47:09.000Z | def rwh_primes2(n):
correction = (n%6>1)
n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]
sieve = [True] * (n//3)
sieve[0] = False
for i in range(int(n**0.5)//3+1):
if sieve[i]:
k=3*i+1|1
sieve[ ((k*k)//3) ::2*k]=[False]*((n//6-(k*k)//6-1)//k+1)
sieve[(k*k+4*k-2*k*(i&1))//3::2*k]=[False]*((n//6-(k*k+4*k-2*k*(i&1))//6-1)//k+1)
return [2,3] + [3*i+1|1 for i in range(1,n//3-correction) if sieve[i]]
# a = rwh_primes2(100)
# print(a)
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Input n>=6, Returns a list of primes, 2 <= p < n """
def sieve_for_primes_to(n):
size = n//2
sieve = [1]*size
limit = int(n**0.5)
for i in range(1,limit):
if sieve[i]:
val = 2*i+1
tmp = ((size-1) - i)//val
sieve[i+val::val] = [0]*tmp
return [2] + [i*2+1 for i, v in enumerate(sieve) if v and i>0]
print(sieve_for_primes_to(3))
print(sieve_for_primes_to(1))
print(sieve_for_primes_to(100))
| 33.212121 | 110 | 0.519161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.182482 |
9d26ca6234d4434fd99a9aa1e9b161d86a72613c | 2,649 | py | Python | competitive_k_means.py | QLightman/competitive_k_means | 264a3da409177e40f150da1107d00e149ff1e125 | [
"MIT"
] | 1 | 2019-09-03T09:56:43.000Z | 2019-09-03T09:56:43.000Z | competitive_k_means.py | QLightman/competitive_k_means | 264a3da409177e40f150da1107d00e149ff1e125 | [
"MIT"
] | null | null | null | competitive_k_means.py | QLightman/competitive_k_means | 264a3da409177e40f150da1107d00e149ff1e125 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import copy
k = 4
ratio=0.95
def competitive_k_means(save_plot=False):
plt.figure(figsize=(12, 12))
X, y =generate_dataset()
plt.scatter(X[:,0],X[:,1],c=y,marker='+')
plt.title("results from the data")
if save_plot:
plt.savefig("data.png")
pre_center=np.empty((k,2))
for i in range (k):
pre_center[i]=X[i]
y_pred=distance(X,pre_center)
his_y_pred=np.empty(len(X))
iteration_time=1
while np.sum(his_y_pred!=y_pred)!=0:
iteration_time+=1
his_y_pred=copy.copy(y_pred)
y_pred=distance(X,pre_center)
plt.figure()
plt.scatter(X[:,0],X[:,1],c=y_pred,marker='+')
plt.scatter(pre_center[:,0],pre_center[:,1],c='r')
if save_plot:
plt.savefig("%dinterations.jpg"%iteration_time)
plt.show()
def distance(X,pre_center,competitive=True):
result=np.empty(len(X))
pre_center_number=np.empty(k)
for i in range(len(X)):
tmp=[]
for j in range (k):
tmp.append(((X[i]-pre_center[j])**2).sum())
result[i]=tmp.index(min(tmp))
for i in range(k):
pre_center[i]=X[result==i].mean(0)
pre_center_number[i]=np.sum(result==i)
print pre_center_number
# implement the RPCL to k-mean so that the number
# of clusters is automatically determined
if competitive:
minimum_distance=float('inf')
minimun_index=np.array([0,0])
for i in range(k):
for j in range(k):
if(j<=i):
continue
if(((pre_center[i]-pre_center[j])**2).sum()<minimum_distance):
minimum_distance=((pre_center[i]-pre_center[j])**2).sum()
minimun_index[0]=i
minimun_index[1]=j
if(pre_center_number[minimun_index[0]]>pre_center_number[minimun_index[1]]):
pre_center[minimun_index[1]]=push(pre_center[minimun_index[1]],pre_center[minimun_index[0]])
else:
pre_center[minimun_index[0]]=push(pre_center[minimun_index[0]],pre_center[minimun_index[1]])
return result
# push the competitive center
def push(push_center,center):
return (push_center-center)*(1+ratio)+center
def generate_dataset(location=np.array([[0,0],[6,0],[3,5]]),n_samples=300,centers=3):
y=np.empty(n_samples)
X=np.empty([n_samples,2])
for i in range(n_samples):
for j in range(centers):
tmp=np.random.randint(0,centers)
X[i]=location[tmp]+np.random.rand(2)*2
y[i]=tmp
return X,y
if __name__ == '__main__':
competitive_k_means()
| 33.1125 | 104 | 0.609287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.073613 |
9d280cecbd0d584acd8037cf6b0f18c473484417 | 3,031 | py | Python | shiftmanager/redshift.py | whitmo/shiftmanager | 49cd461854a9e8bc270b5cc6f9a2303cf87c2fb3 | [
"BSD-2-Clause"
] | null | null | null | shiftmanager/redshift.py | whitmo/shiftmanager | 49cd461854a9e8bc270b5cc6f9a2303cf87c2fb3 | [
"BSD-2-Clause"
] | null | null | null | shiftmanager/redshift.py | whitmo/shiftmanager | 49cd461854a9e8bc270b5cc6f9a2303cf87c2fb3 | [
"BSD-2-Clause"
] | 1 | 2020-09-02T04:37:37.000Z | 2020-09-02T04:37:37.000Z | """
Defines a Redshift class which encapsulates a database connection
and utility functions for managing that database.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import psycopg2
from shiftmanager.mixins import AdminMixin, ReflectionMixin, S3Mixin
from shiftmanager.memoized_property import memoized_property
class Redshift(AdminMixin, ReflectionMixin, S3Mixin):
"""Interface to Redshift.
This class will default to environment params for all arguments.
For methods requiring S3, aws keys are not required if you have
environmental params set for boto to pick up:
http://boto.readthedocs.org/en/latest/s3_tut.html#creating-a-connection
Parameters
----------
database : str
envvar equivalent: PGDATABASE
user : str
envvar equivalent: PGUSER
password : str
envvar equivalent: PGPASSWORD
host : str
envvar equivalent: PGHOST
port : int
envvar equivalent: PGPORT
aws_access_key_id : str
aws_secret_access_key : str
"""
@memoized_property
def connection(self):
"""A `psycopg2.connect` connection to Redshift.
Instantiation is delayed until the object is first used.
"""
print("Connecting to %s..." % self.host)
return psycopg2.connect(user=self.user,
host=self.host,
port=self.port,
database=self.database,
password=self.password)
def __init__(self, database=None, user=None, password=None, host=None,
port=5439, aws_access_key_id=None,
aws_secret_access_key=None):
self.set_aws_credentials(aws_access_key_id, aws_secret_access_key)
self.s3_conn = None
self.user = user or os.environ.get('PGUSER')
self.host = host or os.environ.get('PGHOST')
self.port = port or os.environ.get('PGPORT')
self.database = database or os.environ.get('PGDATABASE')
self.password = password or os.environ.get('PGPASSWORD')
self._all_privileges = None
def execute(self, batch, parameters=None):
"""
Execute a batch of SQL statements using this instance's connection.
Statements are executed within a transaction.
Parameters
----------
batch : str
The batch of SQL statements to execute.
parameters : list or dict
Values to bind to the batch, passed to `cursor.execute`
"""
with self.connection as conn:
with conn.cursor() as curs:
curs.execute(batch, parameters)
def mogrify(self, batch, parameters=None, execute=False):
if execute:
self.execute(batch, parameters)
with self.connection as conn:
with conn.cursor() as curs:
mogrified = curs.mogrify(batch, parameters)
return mogrified
| 32.244681 | 75 | 0.629165 | 2,635 | 0.86935 | 0 | 0 | 483 | 0.159353 | 0 | 0 | 1,327 | 0.437809 |
9d2bc7d987bd63f2af30edb8519069c52527c5c7 | 387 | py | Python | General Data Preprocessing/copyFile.py | yuxiawang1992/Python-Code | d457a1fd61742dfac08a82a26b66703e5ff6f780 | [
"Apache-2.0"
] | null | null | null | General Data Preprocessing/copyFile.py | yuxiawang1992/Python-Code | d457a1fd61742dfac08a82a26b66703e5ff6f780 | [
"Apache-2.0"
] | null | null | null | General Data Preprocessing/copyFile.py | yuxiawang1992/Python-Code | d457a1fd61742dfac08a82a26b66703e5ff6f780 | [
"Apache-2.0"
] | null | null | null | #Python 3.4.3
#coding=gbk
# copy file wangyuxia 20160920
import sys, shutil, os, string
path = "E:\\test for qgis\\"
target_path = "E:\\test for qgis\\HourScale\\"
for i in range(2,31):
for j in range(0,24):
filename = 'N'+str(i).zfill(2)+str(j).zfill(2)
shutil.copyfile(path+'d_02.hdr',target_path+filename+'.hdr')
print("------------finished---------")
| 25.8 | 68 | 0.596899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.408269 |
9d2c26cb802d2c6da46e391e982eacb22cc6b08d | 3,581 | py | Python | convert_to_onnx.py | bhahn2004/FaceBoxes.PyTorch | be01c2449c6efa2a976a701dd8a052aa903a32b4 | [
"MIT"
] | null | null | null | convert_to_onnx.py | bhahn2004/FaceBoxes.PyTorch | be01c2449c6efa2a976a701dd8a052aa903a32b4 | [
"MIT"
] | null | null | null | convert_to_onnx.py | bhahn2004/FaceBoxes.PyTorch | be01c2449c6efa2a976a701dd8a052aa903a32b4 | [
"MIT"
] | null | null | null | import sys
from scipy.special import softmax
import torch.onnx
import onnxruntime as ort
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from pytorch2keras.converter import pytorch_to_keras
from models.faceboxes import FaceBoxes
input_dim = 1024
num_classes = 2
model_path = "weights/FaceBoxesProd.pth"
net = FaceBoxes('train', input_dim, num_classes)
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
net = load_model(net, model_path, False)
net.eval()
net.to("cuda")
model_name = model_path.split("/")[-1].split(".")[0]
onnx_model_path = f"models/onnx/base-model.onnx"
# export ONNX model
dummy_input = torch.randn(1, 3, input_dim, input_dim).to("cuda")
torch.onnx.export(net, dummy_input, onnx_model_path, verbose=False, input_names=['input'], output_names=['output'])
"""
# try using pytorch2keras
keras_model = pytorch_to_keras(net, dummy_input, [(3, input_dim, input_dim)])
keras_model_path = f"models/onnx/base-model"
#keras_model.save(model_path)
# 0. print PyTorch outputs
out = net(dummy_input)
dummy_input = dummy_input.cpu().detach().numpy()
out = out.cpu().detach().numpy()
loc = out[:, :, 2:]
conf = out[:, :, :2]
scores = softmax(conf, axis=-1)
print(scores)
# 1. check if ONNX outputs are the same
ort_session = ort.InferenceSession(onnx_model_path)
input_name = ort_session.get_inputs()[0].name
out = ort_session.run(None, {input_name: dummy_input})[0]
loc = out[:, :, 2:]
conf = out[:, :, :2]
scores = softmax(conf, axis=-1)
print(scores)
# 2. check if Keras outputs are the same
keras_model_path = f"models/onnx/base-model"
keras_model = tf.keras.models.load_model(keras_model_path)
out = keras_model.predict(dummy_input)
loc = out[:, :, 2:]
conf = out[:, :, :2]
scores = softmax(conf, axis=-1)
print(scores)
# 3. check if intermediate results of Keras are the same
test_fn = K.function([keras_model.input], [keras_model.get_layer('334').output[0]])
test_out = test_fn(dummy_input)
print(np.round(np.array(test_out), 4)[:30])
"""
| 33.46729 | 115 | 0.729405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,589 | 0.443731 |
9d2c9923a0dda16187c578d67868231654968587 | 358 | py | Python | setup.py | kckaiwei/pysteamcmd | 273f114352975268b01cb8007cc2336115aea4fc | [
"MIT"
] | null | null | null | setup.py | kckaiwei/pysteamcmd | 273f114352975268b01cb8007cc2336115aea4fc | [
"MIT"
] | null | null | null | setup.py | kckaiwei/pysteamcmd | 273f114352975268b01cb8007cc2336115aea4fc | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='pysteamcmd',
version='0.1.2',
description='Python package to install and utilize steamcmd',
url='http://github.com/f0rkz/pysteamcmd',
author='f0rkz',
author_email='[email protected]',
license='MIT',
packages=['pysteamcmd'],
install_requires=[],
zip_safe=False)
| 27.538462 | 67 | 0.648045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.410615 |
9d2f4723ec751e23b2b4a9d81dfaceee08d127d9 | 3,292 | py | Python | x2py/links/strategies/buffer_transform_strategy.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | null | null | null | x2py/links/strategies/buffer_transform_strategy.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | 1 | 2019-06-05T09:35:09.000Z | 2020-07-02T09:46:46.000Z | x2py/links/strategies/buffer_transform_strategy.py | jaykang920/x2py | b8bd473f94ff4b9576e984cc384f4159ab71278d | [
"MIT"
] | null | null | null | # Copyright (c) 2017, 2018 Jae-jun Kang
# See the file LICENSE for details.
from x2py.event_factory import EventFactory
from x2py.links.link_events import *
from x2py.links.strategy import ChannelStrategy
from x2py.util.trace import Trace
class BufferTransformStrategy(ChannelStrategy):
EventFactory.register_type(HandshakeReq)
EventFactory.register_type(HandshakeResp)
EventFactory.register_type(HandshakeAck)
def __init__(self, buffer_transform=None):
self.buffer_transform = buffer_transform
def before_session_setup(self, session):
session_strategy = BufferTransformSessionStrategy()
session_strategy.session = session
session.channel_strategy = session_strategy
def init_handshake(self, session):
if self.buffer_transform is None:
return
session_strategy = session.channel_strategy
buffer_transform = self.buffer_transform.clone()
session_strategy.buffer_transform = buffer_transform
session.send(HandshakeReq().setattrs(
_transform = False,
data = buffer_transform.init_handshake()
))
def cleanup(self):
if self.buffer_transform is None:
return
self.buffer_transform.cleanup()
self.buffer_transform = None
class BufferTransformSessionStrategy(ChannelStrategy.SubStrategy):
def __init__(self):
self.buffer_transform = None
self.rx_transform_ready = False
self.tx_transform_ready = False
def process(self, event):
type_id = event.type_id()
if type_id == LinkEventType.HANDSHAKE_REQ:
response = None
try:
response = self.buffer_transform.handshake(event.data)
except Exception as ex:
Trace.error("{} error handshaking {}", self.link.name, ex)
self.session.send(HandshakeResp().setattrs(
_transform = False,
data = response
))
elif type_id == LinkEventType.HANDSHAKE_RESP:
result = False
try:
result = self.buffer_transform.fini_handshake(event.data)
except Exception as ex:
Trace.error("{} error finishing handshake {}", self.link.name, ex)
if result:
self.rx_transform_ready = True
self.session.send(HandshakeAck().setattrs(
_transform = False,
result = result
))
elif type_id == LinkEventType.HANDSHAKE_ACK:
result = event.result
if result:
self.tx_transform_ready = True
self.session.link.on_connect(result, self.session)
else:
return False
return True
def cleanup(self):
if self.buffer_transform is None:
return
self.buffer_transform.cleanup()
self.buffer_transform = None
def before_send(self, buffer):
if self.tx_transform_ready:
buffer = self.buffer_transform.transform(buffer)
return True, buffer
return False, buffer
def after_receive(self, buffer):
if self.rx_transform_ready:
buffer = self.buffer_transform.inverse_transform(buffer)
return buffer
| 33.591837 | 82 | 0.637303 | 3,047 | 0.925577 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.040097 |
9d2ffa602fd2739373ede0b55f827179feb8572a | 5,632 | py | Python | ignite_trainer/_visdom.py | jinczing/AudioCLIP | b080fc946599290c91f9d3b203295e5968af1bf6 | [
"MIT"
] | 304 | 2021-06-28T09:59:13.000Z | 2022-03-30T17:33:52.000Z | ignite_trainer/_visdom.py | AK391/AudioCLIP | 45327aa203839bfeb58681dd36c04fd493ee72f4 | [
"MIT"
] | 176 | 2021-07-23T08:30:21.000Z | 2022-03-14T12:29:06.000Z | ignite_trainer/_visdom.py | AK391/AudioCLIP | 45327aa203839bfeb58681dd36c04fd493ee72f4 | [
"MIT"
] | 34 | 2021-06-29T11:50:19.000Z | 2022-03-02T12:01:36.000Z | import os
import sys
import json
import time
import tqdm
import socket
import subprocess
import numpy as np
import visdom
from typing import Tuple
from typing import Optional
def calc_ytick_range(vis: visdom.Visdom, window_name: str, env: Optional[str] = None) -> Tuple[float, float]:
lower_bound, upper_bound = -1.0, 1.0
stats = vis.get_window_data(win=window_name, env=env)
if stats:
stats = json.loads(stats)
stats = [np.array(item['y']) for item in stats['content']['data']]
stats = [item[item != np.array([None])].astype(np.float16) for item in stats]
if stats:
q25s = np.array([np.quantile(item, 0.25) for item in stats if len(item) > 0])
q75s = np.array([np.quantile(item, 0.75) for item in stats if len(item) > 0])
if q25s.shape == q75s.shape and len(q25s) > 0:
iqrs = q75s - q25s
lower_bounds = q25s - 1.5 * iqrs
upper_bounds = q75s + 1.5 * iqrs
stats_sanitized = list()
idx = 0
for item in stats:
if len(item) > 0:
item_sanitized = item[(item >= lower_bounds[idx]) & (item <= upper_bounds[idx])]
stats_sanitized.append(item_sanitized)
idx += 1
stats_sanitized = np.array(stats_sanitized)
q25_sanitized = np.array([np.quantile(item, 0.25) for item in stats_sanitized])
q75_sanitized = np.array([np.quantile(item, 0.75) for item in stats_sanitized])
iqr_sanitized = np.sum(q75_sanitized - q25_sanitized)
lower_bound = np.min(q25_sanitized) - 1.5 * iqr_sanitized
upper_bound = np.max(q75_sanitized) + 1.5 * iqr_sanitized
return lower_bound, upper_bound
def plot_line(vis: visdom.Visdom,
window_name: str,
env: Optional[str] = None,
line_label: Optional[str] = None,
x: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
x_label: Optional[str] = None,
y_label: Optional[str] = None,
width: int = 576,
height: int = 416,
draw_marker: bool = False) -> str:
empty_call = not vis.win_exists(window_name)
if empty_call and (x is not None or y is not None):
return window_name
if x is None:
x = np.ones(1)
empty_call = empty_call & True
if y is None:
y = np.full(1, np.nan)
empty_call = empty_call & True
if x.shape != y.shape:
x = np.ones_like(y)
opts = {
'showlegend': True,
'markers': draw_marker,
'markersize': 5,
}
if empty_call:
opts['title'] = window_name
opts['width'] = width
opts['height'] = height
window_name = vis.line(
X=x,
Y=y,
win=window_name,
env=env,
update='append',
name=line_label,
opts=opts
)
xtickmin, xtickmax = 0.0, np.max(x) * 1.05
ytickmin, ytickmax = calc_ytick_range(vis, window_name, env)
opts = {
'showlegend': True,
'xtickmin': xtickmin,
'xtickmax': xtickmax,
'ytickmin': ytickmin,
'ytickmax': ytickmax,
'xlabel': x_label,
'ylabel': y_label
}
window_name = vis.update_window_opts(win=window_name, opts=opts, env=env)
return window_name
def create_summary_window(vis: visdom.Visdom,
visdom_env_name: str,
experiment_name: str,
summary: str) -> str:
return vis.text(
text=summary,
win=experiment_name,
env=visdom_env_name,
opts={'title': 'Summary', 'width': 576, 'height': 416},
append=vis.win_exists(experiment_name, visdom_env_name)
)
def connection_is_alive(host: str, port: int) -> bool:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.connect((host, port))
sock.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
def get_visdom_instance(host: str = 'localhost',
port: int = 8097,
env_name: str = 'main',
env_path: str = 'visdom_env') -> Tuple[visdom.Visdom, Optional[int]]:
vis_pid = None
if not connection_is_alive(host, port):
if any(host.strip('/').endswith(lh) for lh in ['127.0.0.1', 'localhost']):
os.makedirs(env_path, exist_ok=True)
tqdm.tqdm.write('Starting visdom on port {}'.format(port), end='')
vis_args = [
sys.executable,
'-m', 'visdom.server',
'-port', str(port),
'-env_path', os.path.join(os.getcwd(), env_path)
]
vis_proc = subprocess.Popen(vis_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(2.0)
vis_pid = vis_proc.pid
tqdm.tqdm.write('PID -> {}'.format(vis_pid))
trials_left = 5
while not connection_is_alive(host, port):
time.sleep(1.0)
tqdm.tqdm.write('Trying to connect ({} left)...'.format(trials_left))
trials_left -= 1
if trials_left < 1:
raise RuntimeError('Visdom server is not running. Please run "python -m visdom.server".')
vis = visdom.Visdom(
server='http://{}'.format(host),
port=port,
env=env_name
)
return vis, vis_pid
| 29.333333 | 109 | 0.552734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.075284 |
9d3007ae1a0b21a2c5b82a4a63774e81f6aa5a00 | 4,960 | py | Python | anonybot.py | sp0oks/anonybot | 864688f04231e3088737b12caed76f61a5128993 | [
"MIT"
] | 5 | 2019-12-17T17:53:51.000Z | 2020-09-06T07:51:23.000Z | anonybot.py | CptSpookz/anonybot | 864688f04231e3088737b12caed76f61a5128993 | [
"MIT"
] | null | null | null | anonybot.py | CptSpookz/anonybot | 864688f04231e3088737b12caed76f61a5128993 | [
"MIT"
] | 2 | 2020-01-20T01:01:20.000Z | 2020-09-06T07:51:25.000Z | import os
import time
from sqlalchemy import create_engine, BigInteger, UnicodeText, Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.exc import SQLAlchemyError
from aiogram import Bot, Dispatcher, executor, types
from aiogram.utils.exceptions import ChatNotFound
from dotenv import load_dotenv
load_dotenv()
# Database configuration
DB = os.getenv('DB_ADDR')
ENGINE = create_engine(DB)
Base = declarative_base()
Session = scoped_session(sessionmaker(bind=ENGINE))
class Msg(Base):
__tablename__ = 'messages'
id = Column(Integer, primary_key=True)
user_id = Column(BigInteger)
text = Column(UnicodeText(4096))
# Bot configuration
USAGE = """\
/status -- show how many messages are pending
/receive -- receive pending messages
/send [user_id] -- reply to message to send it to given user
/drop -- drop all pending messages
/help -- shows this message
"""
TOKEN = os.getenv('BOT_TOKEN')
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=['send'])
async def send_msg(message: types.Message):
if message.chat.type == 'private':
session = Session()
args = message.get_args().split()
if len(args) >= 1:
try:
receiver = int(args[0])
except ValueError:
await message.reply('You need to specify a Telegram id as the receiver.')
return
if message.reply_to_message is not None:
msg = Msg(user_id=receiver, text=message.reply_to_message.text)
try:
session.add(msg)
session.commit()
try:
await bot.send_message(receiver, 'You have a new message!')
await message.reply('Message was sent.')
except ChatNotFound:
session.flush()
await message.reply('This user id does not exist.')
except SQLAlchemyError as err:
session.rollback()
print(f'[{time.asctime()}]: {err}')
await message.reply('Something happened, message could not be sent.\nTry sending the message again.')
else:
await message.reply('You must reply to the message you want to send.')
else:
await message.reply('You must provide a receiver to the message.')
@dp.message_handler(commands=['receive'])
async def receive_msg(message: types.Message):
if message.chat.type == 'private':
session = Session()
msgs = session.query(Msg).filter_by(user_id=message.from_user.id).all()
if len(msgs) > 0:
for i, msg in enumerate(msgs, 1):
text = f'#{i}: {msg.text}'
await message.reply(text, parse_mode=types.message.ParseMode.MARKDOWN, reply=False)
try:
session.query(Msg).filter_by(user_id=message.from_user.id).delete()
session.commit()
except SQLAlchemyError as err:
session.rollback()
print(f'[{time.asctime()}]: {err}')
await message.reply('Something happened, could not drop messages.')
else:
await message.reply('Your inbox is currently empty.')
@dp.message_handler(commands=['drop'])
async def drop_msg(message: types.Message):
if message.chat.type == 'private':
session = Session()
msgs = session.query(Msg).filter_by(user_id=message.from_user.id).count()
try:
session.query(Msg).filter_by(user_id=message.from_user.id).delete()
session.commit()
await message.reply(f'Dropped {msgs} messages.')
except SQLAlchemyError as err:
session.rollback()
print(f'[{time.asctime()}]: {err}')
await message.reply(f'Something happened, could not drop messages.')
@dp.message_handler(commands=['status'])
async def status(message: types.Message):
if message.chat.type == 'private':
session = Session()
msgs = session.query(Msg).filter_by(user_id=message.from_user.id).count()
text = f'You have {msgs} pending messages.'
await message.reply(text)
@dp.message_handler(commands=['help'])
async def start(message: types.Message):
if message.chat.type == 'private':
await message.reply(text=USAGE)
@dp.message_handler(commands=['start'])
async def start(message: types.Message):
if message.chat.type == 'private':
text = f'Hello, this is Anonybot.\n'+USAGE
session = Session()
msgs = session.query(Msg).filter_by(user_id=message.from_user.id).count()
text += f'\nYou have {msgs} pending messages.'
await message.reply(text=text, reply=False)
if __name__ == '__main__':
Base.metadata.create_all(ENGINE)
executor.start_polling(dp)
| 36.20438 | 121 | 0.626008 | 161 | 0.03246 | 0 | 0 | 3,793 | 0.764718 | 3,553 | 0.716331 | 1,052 | 0.212097 |
9d303166d818d8f8f693a98022e31dfc5961d444 | 2,912 | py | Python | tests/test_doc_cvnn_example.py | saugatkandel/cvnn | f6d7b5c17fd064a7eaa60e7af922914a974eb69a | [
"MIT"
] | 38 | 2020-09-16T14:47:36.000Z | 2022-03-30T13:35:05.000Z | tests/test_doc_cvnn_example.py | saugatkandel/cvnn | f6d7b5c17fd064a7eaa60e7af922914a974eb69a | [
"MIT"
] | 25 | 2020-10-03T19:30:16.000Z | 2022-03-29T15:24:44.000Z | tests/test_doc_cvnn_example.py | saugatkandel/cvnn | f6d7b5c17fd064a7eaa60e7af922914a974eb69a | [
"MIT"
] | 9 | 2021-01-18T10:48:57.000Z | 2022-02-11T10:34:52.000Z | import numpy as np
import cvnn.layers as complex_layers
import tensorflow as tf
from pdb import set_trace
def get_dataset():
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
train_images = train_images.astype(dtype=np.complex64) / 255.0
test_images = test_images.astype(dtype=np.complex64) / 255.0
return (train_images, train_labels), (test_images, test_labels)
def test_cifar():
(train_images, train_labels), (test_images, test_labels) = get_dataset()
# Create your model
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=(32, 32, 3))) # Always use ComplexInput at the start
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
model.add(complex_layers.ComplexDense(10, activation='convert_to_real_with_abs'))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# model.summary()
history = model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
def test_regression():
input_shape = (4, 28, 28, 3)
x = tf.cast(tf.random.normal(input_shape), tf.complex64)
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=input_shape[1:]))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(units=64, activation='cart_relu'))
model.add(complex_layers.ComplexDense(units=10, activation='linear'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
y = model(x)
assert y.dtype == np.complex64
def test_functional_api():
inputs = complex_layers.complex_input(shape=(128, 128, 3))
c0 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(inputs)
c1 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(c0)
c2 = complex_layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)
t01 = complex_layers.ComplexConv2DTranspose(5, kernel_size=2, strides=(2, 2), activation='cart_relu')(c2)
concat01 = tf.keras.layers.concatenate([t01, c1], axis=-1)
c3 = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(concat01)
out = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(c3)
model = tf.keras.Model(inputs, out)
if __name__ == '__main__':
test_functional_api()
test_regression()
test_cifar()
| 45.5 | 109 | 0.730426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.096841 |
9d31c3b53c5a416e56a025e297cf9e335432c27b | 2,580 | py | Python | gkutils/commonutils/getCSVColumnSubset.py | genghisken/gkutils | 0c8aa06d813de72b1cd9cba11219a78952799420 | [
"MIT"
] | null | null | null | gkutils/commonutils/getCSVColumnSubset.py | genghisken/gkutils | 0c8aa06d813de72b1cd9cba11219a78952799420 | [
"MIT"
] | 1 | 2021-11-19T19:28:52.000Z | 2021-11-19T19:29:57.000Z | gkutils/commonutils/getCSVColumnSubset.py | genghisken/gkutils | 0c8aa06d813de72b1cd9cba11219a78952799420 | [
"MIT"
] | null | null | null | """Write a subset of keys from one CSV to another. Don't use lots of memory.
Usage:
%s <filename> <outputfile> [--columns=<columns>] [--htm] [--racol=<racol>] [--deccol=<deccol>] [--filtercol=<filtercol>]
%s (-h | --help)
%s --version
Options:
-h --help Show this screen.
--version Show version.
--columns=<columns> Comma separated (no spaces) columns.
--htm Generate HTM IDs and add to the column subset.
--racol=<racol> RA column, ignored if htm not specified [default: ra]
--deccol=<deccol> Declination column, ignored if htm not specified [default: dec]
--filtercol=<filtercol> Only write the row when this column is not blank.
"""
import sys
__doc__ = __doc__ % (sys.argv[0], sys.argv[0], sys.argv[0])
from docopt import docopt
from gkutils.commonutils import Struct, readGenericDataFile, cleanOptions
import csv
from gkhtm._gkhtm import htmName
def getColumnSubset(options):
# DictReader doesn't burden the memory - so let's use it to select our column subset.
data = csv.DictReader(open(options.filename), delimiter=',')
columns = options.columns.split(',')
if options.htm:
columns.append('htm10')
columns.append('htm13')
columns.append('htm16')
with open(options.outputfile, 'w') as f:
w = csv.DictWriter(f, columns, delimiter = ',')
w.writeheader()
for row in data:
# TO FIX - code is very inefficient. HTMs generated regardless of filtercol. Silly!
trimmedRow = {key: row[key] for key in options.columns.split(',')}
if options.htm:
htm16Name = htmName(16, float(row[options.racol]), float(row[options.deccol]))
trimmedRow['htm10'] = htm16Name[0:12]
trimmedRow['htm13'] = htm16Name[12:15]
trimmedRow['htm16'] = htm16Name[15:18]
try:
if options.filtercol:
if trimmedRow[options.filtercol] and trimmedRow[options.filtercol] != 'null':
w.writerow(trimmedRow)
else:
w.writerow(trimmedRow)
except KeyError as e:
w.writerow(trimmedRow)
return
def main(argv = None):
opts = docopt(__doc__, version='0.1')
opts = cleanOptions(opts)
# Use utils.Struct to convert the dict into an object for compatibility with old optparse code.
options = Struct(**opts)
getColumnSubset(options)
if __name__ == '__main__':
main()
| 35.342466 | 122 | 0.605039 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,098 | 0.425581 |
9d3448187e277186c37746a8eee21eed655db199 | 1,030 | py | Python | questions/univalued-binary-tree/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 141 | 2017-12-12T21:45:53.000Z | 2022-03-25T07:03:39.000Z | questions/univalued-binary-tree/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 32 | 2015-10-05T14:09:52.000Z | 2021-05-30T10:28:41.000Z | questions/univalued-binary-tree/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 56 | 2015-09-30T05:23:28.000Z | 2022-03-08T07:57:11.000Z | """
A binary tree is univalued if every node in the tree has the same value.
Return true if and only if the given tree is univalued.
Example 1:
Input: [1,1,1,1,1,null,1]
Output: true
Example 2:
Input: [2,2,2,5,2]
Output: false
Note:
The number of nodes in the given tree will be in the range [1, 100].
Each node's value will be an integer in the range [0, 99].
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isUnivalTree(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return True
if root.left is not None:
if root.val != root.left.val:
return False
if root.right is not None:
if root.val != root.right.val:
return False
return self.isUnivalTree(root.left) and self.isUnivalTree(root.right) | 20.196078 | 77 | 0.586408 | 470 | 0.454985 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.598258 |
9d35413c71268ad28c881bbed6ac0a459290c12c | 495 | py | Python | chargingMethod.py | IllllIIlI/TeamProjPrac | da0a3e5ea9f6204c35c4a969eb522da05f5fc05c | [
"MIT"
] | null | null | null | chargingMethod.py | IllllIIlI/TeamProjPrac | da0a3e5ea9f6204c35c4a969eb522da05f5fc05c | [
"MIT"
] | null | null | null | chargingMethod.py | IllllIIlI/TeamProjPrac | da0a3e5ea9f6204c35c4a969eb522da05f5fc05c | [
"MIT"
] | null | null | null | def guide():
print("If you want to be informed about how to charge the electric vehicle, please enter Y.")
answer = input()
if answer == "Y":
print("1.Stop in front of the charger\n" "2.Check the charging connector for my car\n"
"3.Connect to the vehicle\n" "4.Set charge amount\n"
"5.Pay the charging fee with the previously issued charging card or credit card\n"
"6.Clean up connectors and instruments after charging is completed") | 61.875 | 97 | 0.656566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.743434 |
9d35852cc4326c58c6eb53f1d5a84c6b35a5e6fb | 1,006 | py | Python | src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/GetParentStatus.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/GetParentStatus.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/GetParentStatus.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | #!/usr/bin/env python
"""
_GetParentStatus_
MySQL implementation of DBSBufferFile.GetParentStatus
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetParentStatus(DBFormatter):
sql = """SELECT status FROM dbsbuffer_file
INNER JOIN dbsbuffer_file_parent ON
dbsbuffer_file.id = dbsbuffer_file_parent.parent
WHERE dbsbuffer_file_parent.child =
(SELECT id FROM dbsbuffer_file WHERE lfn = :lfn)"""
def format(self, results):
"""
_format_
Format the query results into a list of LFNs.
"""
results = DBFormatter.format(self, results)
status = []
for result in results:
status.append(result[0])
return status
def execute(self, lfn, conn = None, transaction = False):
result = self.dbi.processData(self.sql, {"lfn": lfn}, conn = conn,
transaction = transaction)
return self.format(result)
| 27.189189 | 74 | 0.614314 | 845 | 0.83996 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.460239 |
9d364f1bcfe20f4a4dd1ba1db0e841b93b086bab | 1,672 | py | Python | yagoMoviesDown.py | afshinsadeghi/DBPediaDownloder | f9c98cab6fc7ce1d4f1c707ce1491c5dacbaf2cc | [
"Apache-2.0"
] | 1 | 2019-04-02T11:12:52.000Z | 2019-04-02T11:12:52.000Z | yagoMoviesDown.py | afshinsadeghi/DBPediaDownloder | f9c98cab6fc7ce1d4f1c707ce1491c5dacbaf2cc | [
"Apache-2.0"
] | null | null | null | yagoMoviesDown.py | afshinsadeghi/DBPediaDownloder | f9c98cab6fc7ce1d4f1c707ce1491c5dacbaf2cc | [
"Apache-2.0"
] | null | null | null | import os
from time import sleep
import requests
querysub0 = 'https://linkeddata1.calcul.u-psud.fr/sparql?default-graph-uri=&query=construct%7B+%3Fs+%3Fp+%3Fo%7D+where+%7B+%0D%0Aselect+distinct+%3Fs+%3Fp+%3Fo+where+%7B%0D%0A%7B%0D%0A%3Fs1+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2FactedIn%3E+++%3Fs+.%0D%0A%3Fs2+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2Fdirected%3E+++%3Fs+.%0D%0A%3Fs+++%3Fp+++%3Fo.%0D%0A%7D+Union%7B%0D%0A%3Fs+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2FactedIn%3E+++%3Fs3+.%0D%0A%3Fs4+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2Fdirected%3E+++%3Fs3+.%0D%0A%3Fs+++%3Fp+++%3Fo.%0D%0A%7D+Union%7B%0D%0A%3Fs7+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2FactedIn%3E+++%3Fs5+.%0D%0A%3Fs+++%3Chttp%3A%2F%2Fyago-knowledge.org%2Fresource%2Fdirected%3E+++%3Fs5+.%0D%0A%3Fs+++%3Fp+++%3Fo.%7D+%0D%0A%7D%0D%0ALimit+10000+offset+'
querysub1 = '+%7D%0D%0A&format=text%2Fplain&timeout=0'
def download_big_file(counter):
link = querysub0 + str(counter * 10000) + querysub1
local_filename = "YagoMovie" + str(counter) + ".nt"
r = requests.get(link, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
print local_filename, " is downloaded."
for counter in range(1, 700, 1):
sleep(10) # sleep so to let the server breath
download_big_file(counter)
print "making yagoMovies.nt ..."
os.system('find . -name "*.nt" -size -15 -delete')
os.system("cat *.nt > a.ntt")
os.system("rm *.nt")
os.system("mv a.ntt yagoMovies.nt")
print "yagoMovies.nt is created. have fun!"
| 59.714286 | 819 | 0.692584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,109 | 0.663278 |
9d3825f401efd886ac45bc856e7732ecfff783b3 | 1,418 | py | Python | src/TestValues/TestProtractor3D.py | SvenKratz/Protractor3D | 39b6c877cc88cae028ca938e994034b83fcccb68 | [
"MIT"
] | 4 | 2018-02-06T14:41:26.000Z | 2020-03-19T14:16:05.000Z | src/TestValues/TestProtractor3D.py | SvenKratz/Protractor3D | 39b6c877cc88cae028ca938e994034b83fcccb68 | [
"MIT"
] | null | null | null | src/TestValues/TestProtractor3D.py | SvenKratz/Protractor3D | 39b6c877cc88cae028ca938e994034b83fcccb68 | [
"MIT"
] | null | null | null | '''
Created on Apr 12, 2011
@author: svenkratz
'''
import TestGestures
import Protractor3D.Protractor3D
from Protractor3D.Protractor3D import *
def triplify(g):
out = []
if len(g) % 3 != 0:
print "Warning: Data not divisible by 3"
for k in xrange(len(g)/3):
out = out + [[g[3*k], g[3*k+1], g[3*k+2]]]
return out
triangle = triplify(TestGestures.Triangle)
print triangle
circle1 = triplify(TestGestures.Circle_1)
circle2 = triplify(TestGestures.Circle_2)
rectangle = triplify(TestGestures.Rectangle)
p_triangle = Protractor3D(triangle)
p_circle1 = Protractor3D(circle1)
p_circle2 = Protractor3D(circle2)
p_rectangle = Protractor3D(rectangle)
#print p_circle1.trace
#
#print "Trace", p_triangle.trace
#print "Resampled", p_triangle.resampled
#print "Scaled", p_triangle.scaled
#print "Centered", p_triangle.centered
#print "Template", p_triangle.template
print "========== Evaluations =============="
Protractor3D.DEBUG = 5
gesturesAndNames = [(p_triangle,"Triangle"), (p_circle1,"Circle1"), ( p_circle2, "Circle2") , (p_rectangle, "Rectangle")]
while gesturesAndNames != []:
gesture = gesturesAndNames[0]
templates = gesturesAndNames[1:]
gesturesAndNames = templates
if len(templates) != 0:
for t in templates:
print "======================================="
print "Results for", gesture[1]," <---> ", t[1]
gesture[0].protractor3D_classify(gesture[0].template, t[0].template)
| 22.507937 | 121 | 0.693935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.303949 |
9d3874299d6c36b60cba6fdb324222e4353364ea | 481 | py | Python | tests/test_actor.py | sdss/HAL | c7a2111f8737a498a124f5571d6f0e6b46e5c371 | [
"BSD-3-Clause"
] | null | null | null | tests/test_actor.py | sdss/HAL | c7a2111f8737a498a124f5571d6f0e6b46e5c371 | [
"BSD-3-Clause"
] | 2 | 2022-01-14T04:50:58.000Z | 2022-02-28T22:31:06.000Z | tests/test_actor.py | sdss/HAL | c7a2111f8737a498a124f5571d6f0e6b46e5c371 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego ([email protected])
# @Date: 2021-03-24
# @Filename: test_hal.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import pytest
from hal import __version__
pytestmark = [pytest.mark.asyncio]
async def test_version(actor):
await actor.invoke_mock_command("version")
assert len(actor.mock_replies) == 2
assert actor.mock_replies[-1]["version"] == __version__
| 20.913043 | 74 | 0.706861 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.3706 | 231 | 0.478261 |
9d3a4036188d6088bc1ce4cfe8dfff01c0a9fdb1 | 490 | py | Python | day_07/puzzles.py | electronsandstuff/Advent-of-Code-2021 | 9c23872640e8d092088dcb6d5cb845cd11d98994 | [
"BSD-3-Clause"
] | null | null | null | day_07/puzzles.py | electronsandstuff/Advent-of-Code-2021 | 9c23872640e8d092088dcb6d5cb845cd11d98994 | [
"BSD-3-Clause"
] | null | null | null | day_07/puzzles.py | electronsandstuff/Advent-of-Code-2021 | 9c23872640e8d092088dcb6d5cb845cd11d98994 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
def crab_fuel(n):
return (n**2 + n) // 2
if __name__ == '__main__':
with open('input.txt') as f:
pin = np.array([int(x) for x in f.read().split(',')])
distances = np.abs(pin[None, :] - np.arange(pin.max() + 1)[:, None])
total_fuel = np.sum(distances, axis=1)
print(f'Solution 1: {total_fuel.min()}')
distances_v2 = crab_fuel(distances)
total_fuel_v2 = np.sum(distances_v2, axis=1)
print(f'Solution 2: {total_fuel_v2.min()}')
| 25.789474 | 72 | 0.608163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.189796 |
9d3ac14e7019eef027448de09382a14cd8c888c7 | 352 | py | Python | radical_translations/core/migrations/0033_delete_work.py | kingsdigitallab/radical_translations | c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a | [
"MIT"
] | 3 | 2022-02-08T18:03:44.000Z | 2022-03-18T18:10:43.000Z | radical_translations/core/migrations/0033_delete_work.py | kingsdigitallab/radical_translations | c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a | [
"MIT"
] | 19 | 2020-05-11T15:36:35.000Z | 2022-02-08T11:26:40.000Z | radical_translations/core/migrations/0033_delete_work.py | kingsdigitallab/radical_translations | c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a | [
"MIT"
] | null | null | null | # Generated by Django 2.2.10 on 2020-05-18 10:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0008_alter_field_classification_on_event'),
('core', '0032_delete_instance'),
]
operations = [
migrations.DeleteModel(
name='Work',
),
]
| 19.555556 | 63 | 0.616477 | 266 | 0.755682 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.375 |
9d3b2ee3ee8d1f5868d497f89b1766382405982d | 16,114 | py | Python | sampling.py | bigdata-inha/FedDC | c90c48fc7e35b6cb80890194c8cdfb0d412a0819 | [
"MIT"
] | null | null | null | sampling.py | bigdata-inha/FedDC | c90c48fc7e35b6cb80890194c8cdfb0d412a0819 | [
"MIT"
] | null | null | null | sampling.py | bigdata-inha/FedDC | c90c48fc7e35b6cb80890194c8cdfb0d412a0819 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import numpy as np
from torchvision import datasets, transforms
import logging
import random
import torch
# Settings for a multiplicative linear congruential generator (aka Lehmer
# generator) suggested in 'Random Number Generators: Good
# Ones are Hard to Find' by Park and Miller.
MLCG_MODULUS = 2**(31) - 1
MLCG_MULTIPLIER = 16807
# Default quantiles for federated evaluations.
DEFAULT_QUANTILES = (0.0, 0.25, 0.5, 0.75, 1.0)
def mnist_iid(dataset, num_users):
"""
Sample I.I.D. client data from MNIST dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
num_items = int(len(dataset) / num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items,
replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users
def mnist_noniid(dataset, num_users):
"""
Sample non-I.I.D client data from MNIST dataset
:param dataset:
:param num_users:
:return:
"""
# 60,000 training imgs --> 200 imgs/shard X 300 shards
num_shards, num_imgs = 200, 300
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
labels = dataset.train_labels.numpy()
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
# divide and assign 2 shards/client
for i in range(num_users):
rand_set = set(np.random.choice(idx_shard, 2, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0)
return dict_users
def mnist_noniid_unequal(dataset, num_users):
"""
Sample non-I.I.D client data from MNIST dataset s.t clients
have unequal amount of data
:param dataset:
:param num_users:
:returns a dict of clients with each clients assigned certain
number of training imgs
"""
# 60,000 training imgs --> 50 imgs/shard X 1200 shards
num_shards, num_imgs = 1200, 50
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
labels = dataset.train_labels.numpy()
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
# Minimum and maximum shards assigned per client:
min_shard = 1
max_shard = 30
# Divide the shards into random chunks for every client
# s.t the sum of these chunks = num_shards
random_shard_size = np.random.randint(min_shard, max_shard + 1,
size=num_users)
random_shard_size = np.around(random_shard_size /
sum(random_shard_size) * num_shards)
random_shard_size = random_shard_size.astype(int)
# Assign the shards randomly to each client
if sum(random_shard_size) > num_shards:
for i in range(num_users):
# First assign each client 1 shard to ensure every client has
# atleast one shard of data
rand_set = set(np.random.choice(idx_shard, 1, replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
random_shard_size = random_shard_size - 1
# Next, randomly assign the remaining shards
for i in range(num_users):
if len(idx_shard) == 0:
continue
shard_size = random_shard_size[i]
if shard_size > len(idx_shard):
shard_size = len(idx_shard)
rand_set = set(np.random.choice(idx_shard, shard_size,
replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
else:
for i in range(num_users):
shard_size = random_shard_size[i]
rand_set = set(np.random.choice(idx_shard, shard_size,
replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
if len(idx_shard) > 0:
# Add the leftover shards to the client with minimum images:
shard_size = len(idx_shard)
# Add the remaining shard to the client with lowest data
k = min(dict_users, key=lambda x: len(dict_users.get(x)))
rand_set = set(np.random.choice(idx_shard, shard_size,
replace=False))
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[k] = np.concatenate(
(dict_users[k], idxs[rand * num_imgs:(rand + 1) * num_imgs]),
axis=0)
return dict_users
def cifar_iid(dataset, num_users, args):
"""
Sample I.I.D. client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return: dict of image index
"""
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
num_items = int(len(dataset) / num_users)
# dict_users란? 0~100의 유저들에게 50000개 데이터를 100개씩 할당. 유저마다 indx를 가지고 있는 list
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
for i in range(num_users):
dict_users[i] = set(np.random.choice(all_idxs, num_items,
replace=False))
all_idxs = list(set(all_idxs) - dict_users[i])
return dict_users
def imagenet_noniid(dataset, num_users, args, class_num=2):
"""
Sample non-I.I.D client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return:
"""
#num_shards -> 총클래스 개수/ num_imgs ->한명당 가지는 데이터개수.but imagenet은 클래스마다 다름.세어줘야함 / # idxs 총데이터수
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# 아직 한 유저당 클래스 한개만 들어가는 경우 발생.
#idx_shards ->유저당 가지는 랜덤시드 n개(n개는 클래스 개수임.) -> 클래스2 x 유저수100 = 200
#num_imgs -> 전체데이터셋중 유저 한명이 가지는 한 클래스 데이터 수. 5만/100 =500, 2개클래스 500개
num_shards, num_imgs = num_users*class_num, int(len(dataset)/num_users/class_num)
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
# labels = dataset.train_labels.numpy()
labels = np.array(dataset.targets)
# sort labels
idxs = np.argsort(labels)
class_count = [0 for i in range(num_shards)]
for i in labels:
class_count[i] += 1
accumulate_class_count = [0 for i in range(num_shards)]
for c in range(num_shards):
if c==0:
accumulate_class_count[c] = class_count[0]
else:
accumulate_class_count[c] = accumulate_class_count[c-1] + class_count[c]
idx_shuffle = np.random.permutation(idx_shard)
client_class_set = []
for i in range(num_users):
user_class_set = idx_shuffle[i*class_num:(i+1)*class_num]
client_class_set.append(user_class_set)
for class_seed in user_class_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[accumulate_class_count[class_seed] -class_count[class_seed] :accumulate_class_count[class_seed]]), axis=0)
return dict_users,client_class_set
def cifar10_iid(train_dataset, num_users, args):
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
n_dataset = len(train_dataset)
idxs = np.random.permutation(n_dataset)
batch_idxs = np.array_split(idxs, num_users)
net_dataidx_map = {i: batch_idxs[i] for i in range(num_users)}
return net_dataidx_map
def record_net_data_stats(y_train, net_dataidx_map):
net_cls_counts = {}
for net_i, dataidx in net_dataidx_map.items():
unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_cls_counts[net_i] = tmp
logging.debug('Data statistics: %s' % str(net_cls_counts))
return net_cls_counts
def partition_data(train_dataset, partition, num_uers, alpha, args):
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
train_labels = np.array(train_dataset.targets)
num_train = len(train_dataset)
if partition == "homo":
idxs = np.random.permutation(num_train)
batch_idxs = np.array_split(idxs, num_uers)
net_dataidx_map = {i: batch_idxs[i] for i in range(num_uers)}
elif partition == "dirichlet":
min_size = 0
K = args.num_classes
N = len(train_labels) # train data 수 ex)cifar- 50000
net_dataidx_map = {}
while min_size < 10:
idx_batch = [[] for _ in range(num_uers)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(train_labels == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, num_uers))
## Balance
proportions = np.array([p * (len(idx_j) < N / num_uers) for p, idx_j in zip(proportions, idx_batch)])
proportions = proportions / proportions.sum()
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(num_uers):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
elif partition > "noniid-#label0" and partition <= "noniid-#label9":
num = eval(partition[13:])
K = 10
if num == 10:
net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(num_uers)}
for i in range(10):
idx_k = np.where(train_labels==i)[0]
np.random.shuffle(idx_k)
split = np.array_split(idx_k,num_uers)
for j in range(num_uers):
net_dataidx_map[j]=np.append(net_dataidx_map[j],split[j])
else:
times=[0 for i in range(10)]
contain=[]
for i in range(num_uers):
current=[i%K]
times[i%K]+=1
j=1
while (j<num):
ind=random.randint(0,K-1)
if (ind not in current):
j=j+1
current.append(ind)
times[ind]+=1
contain.append(current)
net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(num_uers)}
for i in range(K):
idx_k = np.where(train_labels==i)[0]
np.random.shuffle(idx_k)
split = np.array_split(idx_k,times[i])
ids=0
for j in range(num_uers):
if i in contain[j]:
net_dataidx_map[j]=np.append(net_dataidx_map[j],split[ids])
ids+=1
traindata_cls_counts = record_net_data_stats(train_labels, net_dataidx_map)
#print(traindata_cls_counts)
# return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts)
# 이전 버전return y_train, net_dataidx_map, traindata_cls_counts
return net_dataidx_map
def cifar_noniid(dataset, num_users, args, class_num=2):
"""
Sample non-I.I.D client data from CIFAR10 dataset
:param dataset:
:param num_users:
:return:
"""
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# 아직 한 유저당 클래스 한개만 들어가는 경우 발생.
#idx_shards ->유저당 갖는 랜덤시드 n개(n개는 클래스 개수임.) -> 클래스2 x 유저수100 = 200
#num_imgs -> 전체데이터셋중 유저 한명이 가지는 한 클래스 데이터 수. 5만/100 =500, 2개클래스 500개
num_shards, num_imgs = num_users*class_num, int(len(dataset)/num_users/class_num)
idx_shard = [i for i in range(num_shards)]
dict_users = {i: np.array([]) for i in range(num_users)}
idxs = np.arange(num_shards * num_imgs)
# labels = dataset.train_labels.numpy()
labels = np.array(dataset.targets)
#sort_index = np.argsort(labels)
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :]
user_classs_dict = []
# divide and assign
for i in range(num_users):
# 200중에 2개 랜덤 선택.
rand_set = set(np.random.choice(idx_shard, class_num, replace=False))
if class_num > 1 and i != num_users-1:
while dataset.targets[idxs[list(rand_set)[1] * num_imgs]] == dataset.targets[idxs[list(rand_set)[0] *num_imgs]]:
rand_set = set(np.random.choice(idx_shard, class_num, replace=False))
#print(dataset.targets[idxs[list(rand_set)[1] * num_imgs]])
#print(dataset.targets[idxs[list(rand_set)[0] * num_imgs]])
#print('\t')
user_classs_dict.append(rand_set)
idx_shard = list(set(idx_shard) - rand_set)
for rand in rand_set:
dict_users[i] = np.concatenate(
(dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0)
# for data_idx, j in enumerate(dict_users[i]):
# print(i, data_idx, dataset.targets[int(j)])
return dict_users, user_classs_dict
class client_choice(object):
def __init__(self, args, num_users):
self.args =args
self.num_users = num_users
self.mlcg_start = np.random.RandomState(args.seed).randint(1, MLCG_MODULUS - 1)
def client_sampling(self, num_users, m, random_seed, round_num):
# Settings for a multiplicative linear congruential generator (aka Lehmer
# generator) suggested in 'Random Number Generators: Good
# Ones are Hard to Find' by Park and Miller.
pseudo_random_int = pow(MLCG_MULTIPLIER, round_num, MLCG_MODULUS) * self.mlcg_start % MLCG_MODULUS
random_state = np.random.RandomState(pseudo_random_int)
return random_state.choice(num_users, m, replace=False)
if __name__ == '__main__':
dataset_train = datasets.MNIST('./data/mnist/', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),
(0.3081,))
]))
num = 100
d = mnist_noniid(dataset_train, num)
| 37.561772 | 151 | 0.608353 | 744 | 0.044944 | 0 | 0 | 0 | 0 | 0 | 0 | 3,615 | 0.218376 |
9d3ca477c6b29581c9b909f6a0a67fb1fa79ccca | 2,502 | py | Python | codeforcesRating/codeforcesRating.py | gaurav512/Python-Scripts | 46483ab09cccef380c8425d6924507e029745479 | [
"MIT"
] | 3 | 2020-05-23T14:31:35.000Z | 2020-11-12T12:56:08.000Z | codeforcesRating/codeforcesRating.py | gaurav512/Python-Scripts | 46483ab09cccef380c8425d6924507e029745479 | [
"MIT"
] | null | null | null | codeforcesRating/codeforcesRating.py | gaurav512/Python-Scripts | 46483ab09cccef380c8425d6924507e029745479 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
# Author: gaurav512
''' Script written to scrape basic information about a
Codeforces profile given the user id
Usage: Enter the userid as command line argument OR as the input
after running the following in terminal- python3 codeforces.py [userid]
'''
import requests, bs4, sys
def getDetails(userid):
url = 'http://www.codeforces.com/profile/'+userid
headers = {'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:73.0) Gecko/20100101 Firefox/73.0'}
res = requests.get(url, headers=headers)
try:
res.raise_for_status()
except:
print('Cannot access codeforces')
return
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# Getting the rating of the user
title = soup.select('.user-rank > span:nth-child(1)')
if not title:
print(f'User \'{userid}\' not found')
return None
title = title[0].text
print('Title:\t\t',title)
# Getting the name and place of the user (if updated on profile)
elem = soup.select('.main-info > div:nth-child(3) > div:nth-child(1)')
if elem:
content = elem[0].text.split(',')
name = content[0]
print('Name:\t\t',name)
if len(content) > 1:
place = ','.join(content[1:]).lstrip()
print('Place: \t', place)
# Getting organization of the user (if updated on profile)
elem2 = soup.select('.main-info > div:nth-child(3) > div:nth-child(2)')
if elem2:
organization = elem2[0].text
pos = organization.find(' ')
print('Organization:\t', organization[pos+1:])
# If the user is unrated then return back
if title.strip() == 'Unrated':
return None
# Following code snippet takes care of the inconsistent css selectors on the Codeforces site due to display of badges in some profiles
rating_selector = '.info > ul:nth-child(2) > li:nth-child(1) > span:nth-child(2)'
if soup.select('div.badge:nth-child(1) > img:nth-child(1)'):
rating_selector = rating_selector[:21]+'3'+rating_selector[22:]
# Fetch the rating of the user
rating = soup.select(rating_selector)[0].text
print('Rating:\t\t', rating)
# Fetch the highest title achieved by the user
highestTitle = soup.select('span.smaller > span:nth-child(1)')[0].text
print('Highest Title:\t', highestTitle[:-2].title())
# Fetch the highest rating achieved by the user
highestRating = soup.select('span.smaller > span:nth-child(2)')[0].text
print('Highest Rating:\t', highestRating)
def main():
if len(sys.argv) > 1:
userid = sys.argv[1]
else:
userid = input()
getDetails(userid)
if __name__ == '__main__':
main()
| 30.144578 | 136 | 0.691847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,359 | 0.543165 |
9d3e62b9c9792273ad0f8b50076e62ff7aa9fb5b | 566 | py | Python | tests/test_combinator.py | BrunoSanchez/capsule_N1 | a5ee3b74afc27de1a954ae2f9f96c278a4723226 | [
"BSD-3-Clause"
] | 12 | 2017-04-13T06:49:42.000Z | 2019-11-19T09:27:43.000Z | tests/test_combinator.py | BrunoSanchez/capsule_N1 | a5ee3b74afc27de1a954ae2f9f96c278a4723226 | [
"BSD-3-Clause"
] | 56 | 2017-09-05T16:00:57.000Z | 2020-11-20T18:02:58.000Z | tests/test_combinator.py | BrunoSanchez/capsule_N1 | a5ee3b74afc27de1a954ae2f9f96c278a4723226 | [
"BSD-3-Clause"
] | 5 | 2017-10-08T16:55:40.000Z | 2020-09-22T14:04:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test_combinator.py
#
# Copyright 2020 QuatroPe
#
# This file is part of ProperImage (https://github.com/quatrope/ProperImage)
# License: BSD-3-Clause
# Full Text: https://github.com/quatrope/ProperImage/blob/master/LICENSE.txt
#
"""
test_combinator module from ProperImage
for analysis of astronomical images
Written by Bruno SANCHEZ
PhD of Astromoy - UNC
[email protected]
Instituto de Astronomia Teorica y Experimental (IATE) UNC
Cordoba - Argentina
Of 301
"""
class CombinatorBase(object):
pass
| 17.151515 | 76 | 0.740283 | 39 | 0.068905 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.902827 |
9d3f7a7d27e1b7136efc12dc236457c627b3164e | 1,025 | py | Python | ch09-linear_model/src/score_card.py | ahitboyZBW/book-ml-sem | 73208e7e492c9cbe82c4aaa6459a41e3ac1317be | [
"MIT"
] | 137 | 2020-10-26T11:11:46.000Z | 2022-03-29T01:21:22.000Z | ch09-linear_model/src/score_card.py | zengzhongjie/book-ml-sem | 5d452a427db5ee65538d968ba5b938af013bb87c | [
"MIT"
] | 4 | 2021-01-18T08:57:04.000Z | 2021-07-29T02:39:00.000Z | ch09-linear_model/src/score_card.py | zengzhongjie/book-ml-sem | 5d452a427db5ee65538d968ba5b938af013bb87c | [
"MIT"
] | 46 | 2020-10-26T11:11:57.000Z | 2022-03-08T00:15:32.000Z |
def cal_A_B(pdo=20, base_score=500, odds=1 / 50):
B = pdo / np.log(2)
A = base_score + B * np.log(odds)
return A, B
'''
parameter
---------
df:变量的woe,要求与模型训练logit时的列顺序一样
logit:sklearn中的逻辑回归模型,带截距
return
------
新增每行数据的评分列:Score
example:
df= cal_score(df,logit)
'''
def cal_score_byadd(df, logit, A=387.123, B=28.854):
def _cal_woe_score(x, beta, n, B, beta0, A):
''' 只计算总分'''
score = 0.0
for cc in x.index.tolist():
score += x[cc] * beta[cc]
score = A - B * (beta0 + score)
return score
beta = dict(zip(df.columns.tolist(), logit.coef_[0]))
n = df.shape[1]
beta0 = logit.intercept_[0]
df['Score'] = df.apply(lambda x: _cal_woe_score(x, beta, n, B, beta0, A),
axis=1)
return df
def cal_score_byodds(df, logit, A=387.123, B=28.854):
beta0 = logit.intercept_[0]
prob_01 = logit.predict_proba(df)
df['Score'] = A - B * np.log(prob_01[:, 1] / prob_01[:, 0])
return df
| 21.808511 | 77 | 0.559024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.244843 |
9d41431a104dca3b80f9642ad172c2f1314cf033 | 3,790 | py | Python | Tools/ecl_ekf/batch_process_logdata_ekf.py | lgarciaos/Firmware | 26dba1407bd1fbc65c23870a22fed904afba6347 | [
"BSD-3-Clause"
] | 4,224 | 2015-01-02T11:51:02.000Z | 2020-10-27T23:42:28.000Z | Tools/ecl_ekf/batch_process_logdata_ekf.py | choudhary0parivesh/Firmware | 02f4ad61ec8eb4f7906dd06b4eb1fd6abb994244 | [
"BSD-3-Clause"
] | 11,736 | 2015-01-01T11:59:16.000Z | 2020-10-28T17:13:38.000Z | Tools/ecl_ekf/batch_process_logdata_ekf.py | choudhary0parivesh/Firmware | 02f4ad61ec8eb4f7906dd06b4eb1fd6abb994244 | [
"BSD-3-Clause"
] | 11,850 | 2015-01-02T14:54:47.000Z | 2020-10-28T16:42:47.000Z | #! /usr/bin/env python3
"""
Runs process_logdata_ekf.py on the .ulg files in the supplied directory. ulog files are skipped from the analysis, if a
corresponding .pdf file already exists (unless the overwrite flag was set).
"""
# -*- coding: utf-8 -*-
import argparse
import os, glob
from process_logdata_ekf import process_logdata_ekf
def get_arguments():
parser = argparse.ArgumentParser(description='Analyse the estimator_status and ekf2_innovation message data for the'
' .ulg files in the specified directory')
parser.add_argument("directory_path")
parser.add_argument('-o', '--overwrite', action='store_true',
help='Whether to overwrite an already analysed file. If a file with .pdf extension exists for a .ulg'
'file, the log file will be skipped from analysis unless this flag has been set.')
parser.add_argument('--no-plots', action='store_true',
help='Whether to only analyse and not plot the summaries for developers.')
parser.add_argument('--check-level-thresholds', type=str, default=None,
help='The csv file of fail and warning test thresholds for analysis.')
parser.add_argument('--check-table', type=str, default=None,
help='The csv file with descriptions of the checks.')
parser.add_argument('--no-sensor-safety-margin', action='store_true',
help='Whether to not cut-off 5s after take-off and 5s before landing '
'(for certain sensors that might be influence by proximity to ground).')
return parser.parse_args()
def main() -> None:
args = get_arguments()
if args.check_level_thresholds is not None:
check_level_dict_filename = args.check_level_thresholds
else:
file_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
check_level_dict_filename = os.path.join(file_dir, "check_level_dict.csv")
if args.check_table is not None:
check_table_filename = args.check_table
else:
file_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
check_table_filename = os.path.join(file_dir, "check_table.csv")
ulog_directory = args.directory_path
# get all the ulog files found in the specified directory and in subdirectories
ulog_files = glob.glob(os.path.join(ulog_directory, '**/*.ulg'), recursive=True)
print("found {:d} .ulg files in {:s}".format(len(ulog_files), ulog_directory))
# remove the files already analysed unless the overwrite flag was specified. A ulog file is consired to be analysed if
# a corresponding .pdf file exists.'
if not args.overwrite:
print("skipping already analysed ulg files.")
ulog_files = [ulog_file for ulog_file in ulog_files if
not os.path.exists('{}.pdf'.format(ulog_file))]
n_files = len(ulog_files)
print("analysing the {:d} .ulg files".format(n_files))
i = 1
n_skipped = 0
# analyse all ulog files
for ulog_file in ulog_files:
print('analysing file {:d}/{:d}: {:s}'.format(i, n_files, ulog_file))
try:
_ = process_logdata_ekf(
ulog_file, check_level_dict_filename, check_table_filename,
plot=not args.no_plots, sensor_safety_margins=not args.no_sensor_safety_margin)
except Exception as e:
print(str(e))
print('an exception occurred, skipping file {:s}'.format(ulog_file))
n_skipped = n_skipped + 1
i = i + 1
print('{:d}/{:d} files analysed, {:d} skipped.'.format(n_files-n_skipped, n_files, n_skipped))
if __name__ == '__main__':
main() | 43.563218 | 125 | 0.656201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,542 | 0.40686 |
9d429d9ff49854612f73350299d50ebaeb16c00a | 1,468 | py | Python | goodok_mlu/trackers/neptune.py | roma-goodok/ml_utils | c1d6630021a519102b5c4e029cecccdd8a0da946 | [
"MIT"
] | null | null | null | goodok_mlu/trackers/neptune.py | roma-goodok/ml_utils | c1d6630021a519102b5c4e029cecccdd8a0da946 | [
"MIT"
] | null | null | null | goodok_mlu/trackers/neptune.py | roma-goodok/ml_utils | c1d6630021a519102b5c4e029cecccdd8a0da946 | [
"MIT"
] | 1 | 2021-03-29T13:15:02.000Z | 2021-03-29T13:15:02.000Z | import inspect
import warnings
from pathlib import Path
def send_model_code(model, model_config, logdir, NEPTUNE_ON=False, exp=None):
model_init = None
model_forward = None
model_config_s = None
try:
model_init = inspect.getsource(model.__init__)
except Exception as e:
warnings.warn(f"Can't save model_init: {e}", UserWarning)
try:
model_forward = inspect.getsource(model.forward)
except Exception as e:
warnings.warn(f"Can't save model_forward: {e}", UserWarning)
try:
model_config_s = str(model_config)
except Exception as e:
warnings.warn(f"Can't save model_config: {e}", UserWarning)
def save_and_send(src, fnbase):
if src is not None:
fn = Path(logdir) / fnbase
with open(fn, 'w') as f:
f.write(src)
if NEPTUNE_ON and exp is not None:
exp.send_artifact(fn)
save_and_send(model_init, 'model_init.py')
save_and_send(model_forward, 'model_forward.py')
save_and_send(model_config_s, 'model_config.txt')
def log_and_send_string(value, name='example.txt', logdir=None, NEPTUNE_ON=False, exp=None):
def save_and_send(src, fnbase):
if src is not None:
fn = Path(logdir) / fnbase
with open(fn, 'w') as f:
f.write(src)
if NEPTUNE_ON and exp is not None:
exp.send_artifact(fn)
save_and_send(value, name)
| 30.583333 | 92 | 0.632834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.110354 |
9d433078a8277e748b8b40142b8dbbea8d970588 | 63 | py | Python | python/testData/intentions/returnTypeInPy3AnnotationNoColon_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/intentions/returnTypeInPy3AnnotationNoColon_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/intentions/returnTypeInPy3AnnotationNoColon_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def foo() -> object:
@decorator
def bar():
pass | 15.75 | 20 | 0.492063 | 0 | 0 | 0 | 0 | 38 | 0.603175 | 0 | 0 | 0 | 0 |
9d438aadf58244488ff98e5078d8104573590578 | 3,099 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | # Python
import logging
from os import path
# Abstract
from genie.abstract import Lookup
# Parser
from genie.libs import parser
from genie.metaparser.util.exceptions import SchemaEmptyParserError
# unicon
from unicon.eal.dialogs import Statement, Dialog
log = logging.getLogger(__name__)
def save_device_information(device, **kwargs):
"""Install the commit packages. This is for IOSXR devices.
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
True: Result is PASSED
False: Result is PASSX
Raises:
None
Example:
>>> save_device_information(device=Device())
"""
# Checking the config-register has 0x2
# if not configure 0x2
# RP/0/RSP1/CPU0:PE1#admin config-register 0x2
if device.is_ha:
conn = device.active
else:
conn = device
# Install commit ( when thre are package to bring up features)
# from admin prompt
conn.admin_execute('install commit')
def get_default_dir(device):
""" Get the default directory of this device
Args:
Mandatory:
device (`obj`) : Device object.
Returns:
default_dir (`str`): Default directory of the system
Raises:
Exception
Example:
>>> get_default_dir(device=device)
"""
try:
lookup = Lookup.from_device(device)
parsed_dict = lookup.parser.show_platform.Dir(device=device).parse()
if ":" in parsed_dict['dir']['dir_name']:
default_dir = parsed_dict['dir']['dir_name']
else:
default_dir = ''
except SchemaEmptyParserError as e:
raise Exception("No output when executing 'dir' command") from e
except Exception as e:
raise Exception("Unable to execute 'dir' command") from e
# Return default_dir to caller
log.info("Default directory on '{d}' is '{dir}'".format(d=device.name,
dir=default_dir))
return default_dir
def configure_replace(device, file_location, timeout=60, file_name=None):
"""Configure replace on device
Args:
device (`obj`): Device object
file_location (`str`): File location
timeout (`int`): Timeout value in seconds
file_name (`str`): File name
Returns:
None
Raises:
pyATS Results
"""
if file_name:
file_location = '{}{}'.format(
file_location,
file_name)
try:
# check if file exist
device.execute.error_pattern.append('.*Path does not exist.*')
device.execute("dir {}".format(file_location))
except Exception:
raise Exception("File {} does not exist".format(file_location))
dialog = Dialog([
Statement(pattern=r'\[no\]',
action='sendline(y)',
loop_continue=True,
continue_timer=False)])
device.configure("load {}\ncommit replace".format(file_location),
timeout=timeout, reply=dialog)
| 26.042017 | 77 | 0.601162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,476 | 0.476283 |
9d4487b1ae1496a3f2089388dee11fd461798de0 | 2,933 | py | Python | whisper_scalability/plot.py | Evalir/research | 0128cdc7c3cecaad4cc057886fd84e79b78f6b9c | [
"MIT"
] | 42 | 2019-08-03T18:04:47.000Z | 2022-02-28T14:24:56.000Z | whisper_scalability/plot.py | Evalir/research | 0128cdc7c3cecaad4cc057886fd84e79b78f6b9c | [
"MIT"
] | 88 | 2019-10-03T23:11:12.000Z | 2022-03-30T05:28:44.000Z | whisper_scalability/plot.py | Evalir/research | 0128cdc7c3cecaad4cc057886fd84e79b78f6b9c | [
"MIT"
] | 3 | 2019-09-03T17:19:39.000Z | 2021-12-27T16:53:44.000Z | import matplotlib.pyplot as plt
import numpy as np
from labellines import labelLines
# # Trying to get interpolation to work but getting error:
# # ValueError: The number of derivatives at boundaries does not match: expected 1, got 0+0
# from scipy.interpolate import make_interp_spline, BSpline
# n_users = np.array([100, 10000, 1000000])
# bw_case8 = np.array([1, 1.5, 98.1])
# # 300 represents number of points to make between T.min and T.max
# n_users_new = np.linspace(n_users.min(), n_users.max(), 300)
# spl8 = make_interp_spline(n_users, bw_case8, k=3) # type: BSpline
# bw_case8_smooth = spl8(n_users_new)
# plt.plot(n_users_new, bw_case8_smooth, label='case 8', linewidth=2)
n_users = [100, 10000, 1000000]
bw_case1 = [1, 1, 1]
bw_case2 = [97.7, 9.5*1000, 935.7*1000]
bw_case3 = [49.3, 4.*10008, 476.8*1000]
bw_case4 = [1, 1.5, 98.1]
bw_case5 = [10.7, 978, 95.5*1000]
bw_case6 = [21.5, 1.9*1000, 190.9*1000]
bw_case7 = [3.9, 284.8, 27.8*1000]
bw_case8 = [1, 1.5, 98.1]
plt.xlim(100, 10**6)
plt.ylim(1, 10**6)
plt.plot(n_users, bw_case1, label='case 1', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case2, label='case 2', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case3, label='case 3', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case4, label='case 4', linewidth=4, linestyle='dashed')
plt.plot(n_users, bw_case5, label='case 5', linewidth=4)
plt.plot(n_users, bw_case6, label='case 6', linewidth=4)
plt.plot(n_users, bw_case7, label='case 7', linewidth=4)
plt.plot(n_users, bw_case8, label='case 8', linewidth=4)
#labelLines(plt.gca().get_lines(),zorder=0)
case1 = "Case 1. Only receiving messages meant for you [naive case]"
case2 = "Case 2. Receiving messages for everyone [naive case]"
case3 = "Case 3. All private messages go over one discovery topic [naive case]"
case4 = "Case 4. All private messages partitioned into shards [naive case]"
case5 = "Case 5. Case 4 + All messages passed through bloom filter"
case6 = "Case 6. Case 5 + Benign duplicate receives"
case7 = "Case 7. Case 6 + Mailserver case under good conditions with small bloom fp and mostly offline"
case8 = "Case 8. Waku - No metadata protection with bloom filter and one node connected; static shard"
plt.xlabel('number of users (log)')
plt.ylabel('mb/day (log)')
plt.legend([case1, case2, case3, case4, case5, case6, case7, case8], loc='upper left')
plt.xscale('log')
plt.yscale('log')
plt.axhspan(0, 10, facecolor='0.2', alpha=0.2, color='blue')
plt.axhspan(10, 30, facecolor='0.2', alpha=0.2, color='green')
plt.axhspan(30, 100, facecolor='0.2', alpha=0.2, color='orange')
plt.axhspan(100, 10**6, facecolor='0.2', alpha=0.2, color='red')
#plt.axvspan(0, 10**2+3, facecolor='0.2', alpha=0.5)
#plt.axvspan(10**4, 10**4+10**2, facecolor='0.2', alpha=0.5)
#plt.axvspan(10**6, 10**6+10**4, facecolor='0.2', alpha=0.5)
#for i in range(0, 5):
# plt.axhspan(i, i+.2, facecolor='0.2', alpha=0.5)
plt.show()
| 41.309859 | 103 | 0.703034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,626 | 0.554381 |
9d44910e8c82debe9ba07f0a00ed736a65d972a9 | 2,000 | py | Python | polydomino/search.py | PsiACE/polydomino | ade7cdb303cb4073d8c075659a5494392d31f8b4 | [
"MIT"
] | null | null | null | polydomino/search.py | PsiACE/polydomino | ade7cdb303cb4073d8c075659a5494392d31f8b4 | [
"MIT"
] | null | null | null | polydomino/search.py | PsiACE/polydomino | ade7cdb303cb4073d8c075659a5494392d31f8b4 | [
"MIT"
] | null | null | null | # import the necessary packages
import argparse
import cv2
import numpy as np
from polydomino.colordescriptor import ColorDescriptor
from polydomino.searcher import Searcher
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
"-i",
"--index",
required=True,
help="Path to where the computed index will be stored",
)
ap.add_argument("-q", "--query", required=True, help="Path to the query image")
ap.add_argument(
"-fm", "--features", required=True, help="Method to get features of pics",
)
ap.add_argument(
"-sm", "--searcher", required=True, help="Method to search pics",
)
# ap.add_argument("-r", "--result-path", required=True, help="Path to the result path")
args = vars(ap.parse_args())
# initialize the image descriptor
cd = ColorDescriptor((8, 12, 3))
# load the query image and describe it
query = cv2.imread(args["query"])
if args["features"] == "color-moments":
features = cd.color_moments(query)
elif args["features"] == "hsv-describe":
features = cd.hsv_describe(query)
elif args["features"] == "gray-matrix":
features = cd.gray_matrix(query)
elif args["features"] == "humoments":
features = cd.humoments(query)
elif args["features"] == "ahash":
features = cd.ahash(query)
elif args["features"] == "phash":
features = cd.phash(query)
elif args["features"] == "dhash":
features = cd.dhash(query)
elif args["features"] == "mse":
features = cd.mse(query)
elif args["features"] == "hog":
features = cd.hog(query)
else:
print("Sorry, we don't support this method.")
exit(1)
# perform the search
method = args["searcher"]
searcher = Searcher(args["index"])
results = searcher.search(features, method)
print(results)
# display the query
cv2.namedWindow("Query", 0)
cv2.resizeWindow("Query", 640, 480)
cv2.imshow("Query", query)
# loop over the results
for (score, resultID) in results:
result = cv2.imread(resultID)
cv2.imshow("Result", result)
cv2.waitKey(0)
| 30.30303 | 87 | 0.6935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 760 | 0.38 |
9d44ee135064ae4d96e5b84d0ccf61816f50cfb1 | 1,253 | py | Python | irekua_dev_tools/cli.py | CONABIO-audio/irekua-dev-tools | 87485592b7d4793c5e1f6eda2e00247810bfc99c | [
"BSD-4-Clause"
] | null | null | null | irekua_dev_tools/cli.py | CONABIO-audio/irekua-dev-tools | 87485592b7d4793c5e1f6eda2e00247810bfc99c | [
"BSD-4-Clause"
] | null | null | null | irekua_dev_tools/cli.py | CONABIO-audio/irekua-dev-tools | 87485592b7d4793c5e1f6eda2e00247810bfc99c | [
"BSD-4-Clause"
] | null | null | null | import click
from irekua_dev_tools.utils import load_config
from irekua_dev_tools.utils import get_working_directory
from irekua_dev_tools.utils import load_environment_variables
from irekua_dev_tools.utils import load_repository_info
from . import git
from . import dev
from . import config
from . import db
from .extra import clean
@click.group()
@click.pass_context
@click.option('--config-file', '-c', 'config_file', type=click.Path())
@click.option('--target', '-t', type=click.Path(exists=True))
@click.option('--default-config', '-dc', 'default_config', is_flag=True)
def cli(ctx, config_file, target, default_config):
config = load_config(path=config_file, aux_config=not default_config)
repository_info = load_repository_info(
method=config['repositories']['method'],
repository_file=config['repositories']['repository_file'])
load_environment_variables(config)
ctx.ensure_object(dict)
ctx.obj['config'] = config
ctx.obj['repository_info'] = repository_info
if target is None:
target = get_working_directory(ctx.obj['config'])
ctx.obj['target'] = target
cli.add_command(dev.cli)
cli.add_command(git.cli)
cli.add_command(config.cli)
cli.add_command(db.cli)
cli.add_command(clean)
| 29.833333 | 73 | 0.751796 | 0 | 0 | 0 | 0 | 786 | 0.627294 | 0 | 0 | 179 | 0.142857 |
9d451d7664d2140e40043248faa30a6b327e59ee | 2,880 | py | Python | optimism/test/testMinimizeScalar.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | null | null | null | optimism/test/testMinimizeScalar.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 1 | 2022-03-12T00:01:12.000Z | 2022-03-12T00:01:12.000Z | optimism/test/testMinimizeScalar.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 3 | 2021-12-23T19:53:31.000Z | 2022-03-27T23:12:03.000Z | from optimism.JaxConfig import *
from optimism import MinimizeScalar
from optimism.test import TestFixture
from optimism.material import J2Plastic
def f(x): return 0.25*x**4 - 50.0*x**2 + 2.0
df = jacfwd(f)
class TestMinimizeScalarFixture(TestFixture.TestFixture):
def setUp(self):
self.minimize_scalar_jitted = jit(MinimizeScalar.minimize_scalar, static_argnums=(0,4))
def test_solves_quadratic_problem_in_one_iteration(self):
f = lambda x: x*x
x0 = 3.5
settings = MinimizeScalar.get_settings(tol=1e-8, max_iters=1)
x = MinimizeScalar.minimize_scalar(f, x0,
diffArgs=tuple(), nondiffArgs=tuple(),
settings=settings)
self.assertNear(x, 0.0, 12)
def test_does_not_converge_to_saddle_point(self):
x0 = -0.001
settings = MinimizeScalar.get_settings(tol=1e-10, max_iters=30)
x = MinimizeScalar.minimize_scalar(f, x0,
diffArgs=tuple(), nondiffArgs=tuple(),
settings=settings)
r = np.abs(df(x))
self.assertLess(r, settings.tol)
self.assertNear(x, -10.0, 9)
def notest_jit(self):
x0 = -0.001
settings = MinimizeScalar.get_settings(tol=1e-10, max_iters=30)
x = self.minimize_scalar_jitted(f, x0,
diffArgs=tuple(), nondiffArgs=tuple(),
settings=settings)
print("x={:1.13e}".format(x))
self.assertNear(x, -1.0, 9)
def notest_grad(self):
def g(x,c): return 0.25*x**4 - 0.5*(c*x)**2 + 2.0
c = -2.0
x0 = -3.0
settings = MinimizeScalar.get_settings(tol=1e-10, max_iters=30)
x = MinimizeScalar.minimize_scalar(g, x0,
diffArgs=(c,), nondiffArgs=tuple(),
settings=settings)
print("x={:1.13e}".format(x))
self.assertNear(x, c, 10)
def notest_stiff_problem(self):
E = 69.0
Y0 = 350.0
n = 3.0
eps0 = 1.0
e = 1.01*Y0/E
def Wp(ep):
w = np.where(ep > 0.0,
Y0*ep + Y0*eps0*n/(n + 1.0)*(ep/eps0)**(1+1/n),
Y0*ep)
return w
W = lambda ep: 0.5*E*(e - ep)**2 + Wp(ep)
settings = MinimizeScalar.get_settings(tol=1e-8*Y0, max_iters=30)
ep = MinimizeScalar.minimize_scalar(W, 1e-15, diffArgs=tuple(), nondiffArgs=tuple(),
settings=settings)
print("ep = ", ep)
yield_func = grad(W)
print("r=", -yield_func(ep))
if __name__ == '__main__':
TestFixture.unittest.main()
| 34.698795 | 95 | 0.515278 | 2,600 | 0.902778 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.015625 |
9d46c2badf319d174f35513f77f2237bac4308e9 | 2,709 | py | Python | anima/ui/review_dialog.py | MehmetErer/anima | f92ae599b5a4c181fc8e131a9ccdde537e635303 | [
"MIT"
] | 101 | 2015-02-08T22:20:11.000Z | 2022-03-21T18:56:42.000Z | anima/ui/review_dialog.py | MehmetErer/anima | f92ae599b5a4c181fc8e131a9ccdde537e635303 | [
"MIT"
] | 23 | 2016-11-30T08:33:21.000Z | 2021-01-26T12:11:12.000Z | anima/ui/review_dialog.py | MehmetErer/anima | f92ae599b5a4c181fc8e131a9ccdde537e635303 | [
"MIT"
] | 27 | 2015-01-03T06:49:45.000Z | 2021-12-28T03:30:54.000Z | # -*- coding: utf-8 -*-
"""
import datetime
from anima import defaults
defaults.timing_resolution = datetime.timedelta(minutes=10)
from anima.ui import SET_PYSIDE2
SET_PYSIDE2()
from anima.ui.widgets.review import APPROVE, REQUEST_REVISION
from anima.ui import review_dialog
review_dialog.UI(review_type=REQUEST_REVISION)
"""
from anima.ui.lib import QtCore, QtWidgets
from anima.ui.base import ui_caller, AnimaDialogBase
def UI(app_in=None, executor=None, **kwargs):
"""
:param app_in: A Qt Application instance, which you can pass to let the UI
be attached to the given applications event process.
:param executor: Instead of calling app.exec_ the UI will call this given
function. It also passes the created app instance to this executor.
"""
return ui_caller(app_in, executor, ReviewDialog, **kwargs)
class ReviewDialog(QtWidgets.QDialog, AnimaDialogBase):
"""review dialog
"""
def __init__(self, task=None, reviewer=None, review_type=None, parent=None):
super(ReviewDialog, self).__init__(parent=parent)
self.task = task
self.reviewer = reviewer
self.review_type = review_type
self.main_layout = None
self.button_box = None
self._setup_ui()
def _setup_ui(self):
"""set up the ui elements
"""
self.setWindowTitle("Review Dialog")
self.resize(550, 350)
self.main_layout = QtWidgets.QVBoxLayout(self)
# Review
from anima.ui.widgets.review import ReviewWidget
self.review_widget = ReviewWidget(
parent=self,
task=self.task,
reviewer=self.reviewer,
review_type=self.review_type,
)
self.main_layout.addWidget(self.review_widget)
# Button Box
self.button_box = QtWidgets.QDialogButtonBox(self)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(
QtWidgets.QDialogButtonBox.Cancel |
QtWidgets.QDialogButtonBox.Ok
)
self.main_layout.addWidget(self.button_box)
# setup signals
from functools import partial
self.button_box.accepted.connect(partial(self.accept))
self.button_box.rejected.connect(partial(self.reject))
def accept(self):
"""runs when the dialog is accepted
"""
# finalize the review
review = self.review_widget.finalize_review()
if review:
QtWidgets.QMessageBox.information(
self,
"Success",
"Review is created!"
)
# do the default behaviour
super(ReviewDialog, self).accept()
| 29.769231 | 80 | 0.655592 | 1,861 | 0.686969 | 0 | 0 | 0 | 0 | 0 | 0 | 864 | 0.318937 |
9d47cbe33f2156eddf7fcd553e506425ed8d1607 | 12,737 | py | Python | squares/dsl/interpreter.py | Vivokas20/SKEL | d8766ceaa8aa766ea3580bbb61b747572ebfe77c | [
"Apache-2.0"
] | 1 | 2022-01-20T14:57:30.000Z | 2022-01-20T14:57:30.000Z | squares/dsl/interpreter.py | Vivokas20/SKEL | d8766ceaa8aa766ea3580bbb61b747572ebfe77c | [
"Apache-2.0"
] | null | null | null | squares/dsl/interpreter.py | Vivokas20/SKEL | d8766ceaa8aa766ea3580bbb61b747572ebfe77c | [
"Apache-2.0"
] | null | null | null | import math
import re
from itertools import permutations
from logging import getLogger
from typing import Tuple, Union
from rpy2 import robjects
from rpy2.rinterface_lib.embedded import RRuntimeError
from z3 import BitVecVal
from .. import util, results
from ..decider import RowNumberInfo
from ..program import LineInterpreter
from ..tyrell.interpreter import InterpreterError
logger = getLogger('squares.interpreter')
def get_type(df, index):
_script = f'sapply({df}, class)[{index}]'
ret_val = robjects.r(_script)
return ret_val[0]
class RedudantError(InterpreterError):
def __init__(self, *args):
pass
def add_quotes(string: str) -> str:
new_string = ""
if string != '':
string = string.replace(" ", "").replace("\"", "").replace("'","").split(",")
for s in string:
if "=" in s:
new = s.split("=")
new_string += "'" + new[0] + "'" + " = " + "'" + new[1] + "'" + ","
else:
new_string += "'" + s + "'" + ","
new_string = new_string[:-1]
return new_string
def eval_decorator(func):
def wrapper(self, args, key):
if key and not self.final_interpretation and util.get_config().cache_ops:
if not key in self.cache:
name = util.get_fresh_name()
self.try_execute(func(self, name, args))
# if robjects.r(f'all_equal({name}, {args[0]}, convert=T, ignore_row_order=T)')[0] is True:
# results.redundant_lines += 1
# raise RedudantError()
self.cache[key] = name
return self.cache[key]
name = util.get_fresh_name()
script = func(self, name, args)
if self.final_interpretation:
self.program += script
self.try_execute(script)
return name
return wrapper
class SquaresInterpreter(LineInterpreter):
def __init__(self, problem, final_interpretation=False):
self.problem = problem
self.program = ''
self.final_interpretation = final_interpretation
self.cache = {}
def try_execute(self, script):
try:
# print("SCRIPT_EXEC")
# print(script, end='')
robjects.r(script)
except (Exception, RRuntimeError) as e:
# logger.error("Error while evaluating program")
# logger.error("%s", str(e))
raise InterpreterError(e)
@eval_decorator
def eval_filter(self, name, args):
return f'{name} <- {args[0]} %>% filter({args[1]})\n'
@eval_decorator
def eval_filters(self, name, args):
return f'{name} <- {args[0]} %>% filter({args[1]} {args[3]} {args[2]})\n'
@eval_decorator
def eval_summarise(self, name, args):
if args[2]:
args2 = args[2].replace("'", "")
else:
args2 = args[2]
re_object = re.fullmatch(r'([A-Za-z_]+)\$([A-Za-z_]+)', args[1])
if re_object:
return f'{name} <- {args[0]} %>% group_by({args2}) %>% summarise_{re_object.groups()[0]}({re_object.groups()[1]}) %>% ungroup()\n'
else:
return f'{name} <- {args[0]} %>% group_by({args2}) %>% summarise({args[1]}) %>% ungroup()\n'
@eval_decorator
def eval_mutate(self, name, args):
re_object = re.fullmatch(r'([A-Za-z_]+)\$([A-Za-z_]+)', args[1])
if re_object:
return f'{name} <- {args[0]} %>% mutate_{re_object.groups()[0]}({re_object.groups()[1]})\n'
else:
return f'{name} <- {args[0]} %>% mutate({args[1]})\n'
@eval_decorator
def eval_inner_join(self, name, args):
if args[2] and "'" not in args[2]:
args2 = add_quotes(args[2])
else:
args2 = args[2]
_script = f"{name} <- inner_join({args[0]}, {args[1]}, by=c({args2}), suffix = c('', '.other'), na_matches='{util.get_config().na_matches}')"
for pair in args2.split(','):
if '=' in pair:
A, B = pair.split('=')
A = A.strip()[1:-1]
B = B.strip()[1:-1]
if A.strip() != B.strip():
_script += f' %>% mutate({B} = {A})'
return _script + '\n'
@eval_decorator
def eval_natural_join(self, name, args):
if robjects.r(f'length(intersect(colnames({args[0]}), colnames({args[1]})))')[0] > 0:
return f'{name} <- inner_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}")\n'
else:
return f'{name} <- full_join({args[0]}, {args[1]}, by=character(), na_matches="{util.get_config().na_matches}")\n'
@eval_decorator
def eval_natural_join3(self, name, args):
_script = f'{name} <- '
if robjects.r(f'length(intersect(colnames({args[0]}), colnames({args[1]})))')[0] > 0:
_script += f'inner_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}") '
else:
_script += f'full_join({args[0]}, {args[1]}, by=character(), na_matches="{util.get_config().na_matches}") '
if robjects.r(f'length(intersect(union(colnames({args[0]}), colnames({args[1]})), colnames({args[2]})))')[0] > 0:
_script += f'%>% inner_join({args[2]}, na_matches="{util.get_config().na_matches}")\n'
else:
_script += f'%>% full_join({args[2]}, by=character(), na_matches="{util.get_config().na_matches}")\n'
return _script
@eval_decorator
def eval_natural_join4(self, name, args):
_script = f'{name} <- '
if robjects.r(f'length(intersect(colnames({args[0]}), colnames({args[1]})))')[0] > 0:
_script += f'inner_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}") '
else:
_script += f'full_join({args[0]}, {args[1]}, by=character(), na_matches="{util.get_config().na_matches}") '
if robjects.r(f'length(intersect(union(colnames({args[0]}), colnames({args[1]})), colnames({args[2]})))')[0] > 0:
_script += f'%>% inner_join({args[2]}, na_matches="{util.get_config().na_matches}") '
else:
_script += f'%>% full_join({args[2]}, by=character(), na_matches="{util.get_config().na_matches}") '
if robjects.r(f'length(intersect(union(union(colnames({args[0]}), colnames({args[1]})), colnames({args[2]})), colnames({args[3]})))')[0] > 0:
_script += f'%>% inner_join({args[3]}, na_matches="{util.get_config().na_matches}")\n'
else:
_script += f'%>% full_join({args[3]}, by=character(), na_matches="{util.get_config().na_matches}")\n'
return _script
@eval_decorator
def eval_anti_join(self, name, args):
if args[2] and "'" not in args[2]:
args2 = add_quotes(args[2])
else:
args2 = args[2]
return f'{name} <- anti_join({args[0]}, {args[1]}, by=c({args2}), na_matches="{util.get_config().na_matches}")\n'
@eval_decorator
def eval_left_join(self, name, args):
return f'{name} <- left_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}")\n'
@eval_decorator
def eval_union(self, name, args):
return f'{name} <- bind_rows({args[0]}, {args[1]})\n'
@eval_decorator
def eval_intersect(self, name, args):
return f'{name} <- intersect(select({args[0]},{args[2]}), select({args[1]}, {args[2]}))\n'
@eval_decorator
def eval_semi_join(self, name, args):
return f'{name} <- semi_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}")\n'
@eval_decorator
def eval_cross_join(self, name, args):
_script = f'{name} <- full_join({args[0]}, {args[1]}, by=character(), suffix = c("", ".other"), na_matches="{util.get_config().na_matches}")'
if args[2] != '':
_script += f' %>% filter({args[2]})'
return _script + '\n'
@eval_decorator
def eval_unite(self, name, args):
return f'{name} <- unite({args[0]}, {args[1]}, {args[1]}, {args[2]}, sep=":", remove=F)\n'
def apply_row(self, val):
df = robjects.r(val)
return df.nrow
def apply_col(self, val):
df = robjects.r(val)
return df.ncol
def apply_columns(self, val):
a = list(robjects.r(f'colnames({val})'))
bools = list(map(lambda c: c in a, self.problem.all_columns))
raise NotImplementedError()
def equals(self, actual: str, expect: str, *args) -> Tuple[bool, float, Union[RowNumberInfo, None]]:
if robjects.r(f'nrow({actual})')[0] == 0:
results.empty_output += 1
# with rpy2.robjects.conversion.localconverter(robjects.default_converter + pandas2ri.converter):
# print(robjects.conversion.rpy2py(robjects.r(actual)))
score = robjects.r(f'ue <- {expect} %>% unlist %>% unique;length(intersect({actual} %>% unlist %>% unique, ue)) / length(ue)')[0]
if math.isnan(score):
score = 0
if not util.get_config().subsume_conditions and score < 1:
return False, score, None
sketch_cols = None
sketch_distinct = None
sketch_order = None
if self.problem.sketch and self.problem.sketch.select:
if "cols" in self.problem.sketch.select:
sketch_cols = tuple(self.problem.sketch.select["cols"])
if "distinct" in self.problem.sketch.select:
sketch_distinct = self.problem.sketch.select["distinct"]
if "arrange" in self.problem.sketch.select:
sketch_order = self.problem.sketch.select["arrange"]
# The columns are already described in the output so we don't need to use them
a_cols = list(robjects.r(f'colnames({actual})'))
e_cols = list(robjects.r(f'colnames({expect})'))
expected_n = int(robjects.r(f'nrow({expect})')[0])
result = None
if sketch_cols:
selected_columns = [sketch_cols]
else:
selected_columns = permutations(a_cols, len(e_cols))
for combination in selected_columns:
for d in sketch_distinct if sketch_distinct is not None else ['', ' %>% distinct()']:
_script = f'out <- {actual} %>% select({", ".join(map(lambda pair: f"{pair[0]} = {pair[1]}" if pair[0] != pair[1] else pair[0], zip(e_cols, combination)))}){d}'
try:
robjects.r(_script)
if self.test_equality('out', expect, False):
if self.final_interpretation:
if sketch_order != []: # None implies that there is no sketch so it must be [] to ensure there is no order by
if sketch_order:
perms = sketch_order
else:
perms = util.get_permutations(e_cols, len(e_cols))
for perm in perms:
name = util.get_fresh_name()
new_script = f'{name} <- out %>% arrange({perm})'
robjects.r(new_script)
if self.test_equality(name, expect, True):
_script += f' %>% arrange({perm})'
break
self.program += _script + '\n'
return True, score, None
except:
continue
finally:
if util.get_config().subsume_conditions and result != RowNumberInfo.UNKNOWN:
actual_n = int(robjects.r(f'nrow(out)')[0])
if actual_n > expected_n:
if result is None or result == RowNumberInfo.LESS_ROWS:
result = RowNumberInfo.LESS_ROWS
else:
result = RowNumberInfo.UNKNOWN
if actual_n < expected_n:
if result is None or result == RowNumberInfo.MORE_ROWS:
result = RowNumberInfo.MORE_ROWS
else:
result = RowNumberInfo.UNKNOWN
return False, score, result
def test_equality(self, actual: str, expect: str, keep_order: bool = False) -> bool:
if not keep_order:
_script = f'all_equal({actual}, {expect}, convert=T)'
else:
_script = f'all_equal({actual}, {expect}, convert=T, ignore_row_order=T)'
try:
return robjects.r(_script)[0] is True
except:
return False
| 42.885522 | 176 | 0.544241 | 10,933 | 0.858365 | 0 | 0 | 5,494 | 0.431342 | 0 | 0 | 4,216 | 0.331004 |
9d4857e094a5401228d6f2b6484e13982abb69b9 | 7,869 | py | Python | src/data_preparation/process_airbnb_data.py | ejgenc/Data-Analysis_Istanbul-Health-Tourism | 34b9838690ca640c6a7a60f63eb2f51983ec46ef | [
"MIT"
] | 1 | 2020-11-18T15:27:53.000Z | 2020-11-18T15:27:53.000Z | src/data_preparation/process_airbnb_data.py | ejgenc/Data-Analysis_Istanbul-Health-Tourism | 34b9838690ca640c6a7a60f63eb2f51983ec46ef | [
"MIT"
] | null | null | null | src/data_preparation/process_airbnb_data.py | ejgenc/Data-Analysis_Istanbul-Health-Tourism | 34b9838690ca640c6a7a60f63eb2f51983ec46ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
------ What is this file? ------
This script targets the istanbul_airbnb_raw.csv file. It cleans the .csv
file in order to prepare it for further analysis
"""
#%% --- Import Required Packages ---
import os
import pathlib
from pathlib import Path # To wrap around filepaths
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import iqr
from src.helper_functions.data_preparation_helper_functions import sample_and_read_from_df
from src.helper_functions.data_preparation_helper_functions import report_null_values
#%% --- Set proper directory to assure integration with doit ---
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
#%% --- Import Data ---
import_fp = Path("../../data/raw/istanbul_airbnb_raw.csv")
airbnb = pd.read_csv(import_fp, encoding='utf-8-sig')
#%% --- Get a general sense of the datasets ---
# Shape of the data
print(airbnb.shape) # 16251 rows, 16 cols
# First few lines
print(airbnb.head())
#Not much info, let's print the columns
airbnb_columns = airbnb.columns
#%% --- Clean the dataset: Relevant - Irrelevant Columns ---
airbnb_unwanted_columns = ["neighbourhood_group", "last_review", "number_of_reviews",
"minimum_nights",
"reviews_per_month",
"calculated_host_listings_count",
"availability_365"]
#Drop unwanted columns
airbnb.drop(columns = airbnb_unwanted_columns,
axis = 1,
inplace = True)
# Check shape now
print(airbnb.shape) # 16251 rows, 9 cols
#%% --- Clean the dataset: Further Troubleshooting ---
#I want to be able to randomly take n samples from each dataset and then print them
#on a clean format to see the potential problems
#If i had something to test for, i'd strive for somewhat of a representative sample size
#while sampling. However, i think the best to do here would be to print what i can read
#because i don't have any computational measure to test for something:
sample_and_read_from_df(airbnb, 20)
#SPOTTED PROBLEMS:
# dataframe airbnb column neigborhood is not properly formatted:
# Formatting fixes
# should actually be called "district_tr"
# There should be an accompanying "district_eng" column.
#%% --- Fix column naming ---
#I can use either dataframe.columns attribute to assign new columns
#or i can pass a dictionary with old names/new names into dataframe.rename()
airbnb_columns_in_english = ["listing_id", "name", "host_id", "host_name", "district_eng",
"latitude", "longitude", "room_type", "price"]
airbnb.columns = airbnb_columns_in_english
#%% --- One-off fix for districts named "Eyup" ---
eyup_mask = airbnb.loc[:,"district_eng"] == "Eyup"
airbnb.loc[eyup_mask, "district_eng"] = "Eyupsultan"
#%% --- Add a new "district_tr" column
airbnb.loc[:,"district_tr"] = airbnb.loc[:,"district_eng"].str.lower().str.capitalize()
#I will be using df.map() method, so i'll need two dataframes: one for existing values - tr values
#and one for exixsting values - eng values
unique_districts_tr_corrected = ["Kadıköy", "Fatih", "Tuzla", "Gaziosmanpaşa",
"Üsküdar", "Adalar", "Sarıyer", "Arnavutköy",
"Silivri", "Çatalca", "Küçükçekmece", "Beyoğlu",
"Şile", "Kartal", "Şişli", "Beşiktaş", "Kağıthane",
"Esenyurt", "Bahçelievler", "Avcılar", "Başakşehir",
"Sultangazi", "Maltepe", "Sancaktepe", "Beykoz",
"Büyükçekmece", "Bakırköy", "Pendik", "Bağcılar",
"Esenler", "Beylikdüzü", "Ümraniye", "Eyüpsultan",
"Çekmeköy", "Ataşehir", "Sultanbeyli", "Zeytinburnu",
"Güngören", "Bayrampaşa"]
unique_districts_eng_corrected = ["Kadikoy", "Fatih", "Tuzla", "Gaziosmanpasa",
"Uskudar", "Adalar", "Sariyer", "Arnavutkoy",
"Silivri", "Catalca", "Kucukcekmece", "Beyoglu",
"Sile", "Kartal", "Sisli", "Besiktas", "Kagithane",
"Esenyurt", "Bahcelievler", "Avcilar", "Basaksehir",
"Sultangazi", "Maltepe", "Sancaktepe", "Beykoz",
"Buyukcekmece", "Bakirkoy", "Pendik", "Bagcilar",
"Esenler", "Beylikduzu", "Umraniye", "Eyupsultan",
"Cekmekoy", "Atasehir", "Sultanbeyli", "Zeytinburnu",
"Gungoren", "Bayrampasa"]
airbnb_unique_districts_dict_tr = dict(zip(unique_districts_eng_corrected, unique_districts_tr_corrected))
airbnb.loc[:,"district_tr"] = airbnb.loc[:,"district_tr"].map(airbnb_unique_districts_dict_tr)
#%% --- EDA: Explore Missing Values ---
#Let's check null values first
null_report = report_null_values(airbnb)
#We have so few missing values, dropping them won't affect our quality at all.
# Let's do exactly that.
airbnb.dropna(axis = 0,
inplace = True)
#%% --- EDA: Explore Datatype agreement ---
#Now, let's check data type agreement for each column.
data_types = airbnb.dtypes
# The data types with "object" warrant further investigation
#They could just be strings, but mixed data types also show as "object"
# Let's select "object" data types and query once again.
airbnb_dtype_object_only = airbnb.select_dtypes(include = ["object"])
print(airbnb_dtype_object_only.columns)
#As all the column names seem to accomodate only strings, we can be
#pretty sure that showing up as object is correct behavior.
#%% --- EDA - Explore Outliers in price ---
fig = plt.figure(figsize = (19.20, 10.80))
ax = fig.add_subplot(1,1,1)
ax.hist(x = airbnb.loc[:,"price"],
bins = 20)
#Our histogram is very wonky. It's obvious that there are some issues. Let's see:
# It doesn't make sense for a airbnb room to cost 0 liras. That's for sure.
print(airbnb.loc[:,"price"].sort_values().head(20))
#What about maxes?
print(airbnb.loc[:,"price"].sort_values(ascending = False).head(30))
#There are some very high maxes, that's for sure. Let's try to make heads and tails of
#what these houses are:
possible_outliers = airbnb.sort_values(by = "price",
axis = 0,
ascending = False).head(30)
# A qualitative analysis of such houses show that there really aappears to be a problem
#with pricing. Let's calculate the IQR to drop the outliers:
#Calculate the iqr
price_iqr = iqr(airbnb.loc[:,"price"], axis = 0)
#Calculate q3 and q1
q1 = airbnb["price"].quantile(0.25)
q3 = airbnb["price"].quantile(0.75)
#Create min and max mask
min_mask = airbnb.loc[:,"price"] >= q1 - (1.5 * price_iqr)
max_mask = airbnb.loc[:,"price"] <= q3 + (1.5 * price_iqr)
#Combine masks
combined_mask = min_mask & max_mask
#Create subset
airbnb_within_iqr = airbnb.loc[combined_mask]
fig = plt.figure(figsize = (19.20, 10.80))
ax = fig.add_subplot(1,1,1)
ax.hist(x = airbnb_within_iqr.loc[:,"price"],
bins = 20)
#Alright, limiting our data to an IQR appears to omit a whole lot of data.
#I am sure that some of the outliers we have are errors of entry.
#However, the only ones that we can conclusively prove are the entries that are rated at 0.
#We'll drop these
#Create a mask for zeros
zero_mask = (airbnb.loc[:,"price"] > 0)
#Filter using the mask
airbnb = airbnb.loc[zero_mask,:]
# #%% --- Export Data ---
export_fp = Path("../../data/processed/istanbul_airbnb_processed.csv")
airbnb.to_csv(export_fp,
encoding='utf-8-sig',
index = False) | 38.199029 | 106 | 0.641632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,484 | 0.566877 |
9d4ac45e3a86ef95dc9b84f578aa4f83f679c9b6 | 3,695 | py | Python | py/shure.py | dman776/micboard | 166987dfad529dc35654f402fdbbde7f16b60f77 | [
"MIT"
] | 44 | 2019-08-30T02:51:59.000Z | 2022-03-15T13:47:18.000Z | py/shure.py | dman776/micboard | 166987dfad529dc35654f402fdbbde7f16b60f77 | [
"MIT"
] | 21 | 2019-09-01T16:17:22.000Z | 2022-02-01T15:47:55.000Z | py/shure.py | dman776/micboard | 166987dfad529dc35654f402fdbbde7f16b60f77 | [
"MIT"
] | 16 | 2019-09-01T01:40:09.000Z | 2022-03-15T17:12:28.000Z | import time
import select
import queue
import atexit
import sys
import logging
from networkdevice import ShureNetworkDevice
from channel import chart_update_list, data_update_list
# from mic import WirelessMic
# from iem import IEM
NetworkDevices = []
DeviceMessageQueue = queue.Queue()
def get_network_device_by_ip(ip):
return next((x for x in NetworkDevices if x.ip == ip), None)
def get_network_device_by_slot(slot):
for networkdevice in NetworkDevices:
for channel in networkdevice.channels:
if channel.slot == slot:
return channel
def check_add_network_device(ip, type):
net = get_network_device_by_ip(ip)
if net:
return net
net = ShureNetworkDevice(ip, type)
NetworkDevices.append(net)
return net
def watchdog_monitor():
for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTED'):
if (int(time.perf_counter()) - rx.socket_watchdog) > 5:
logging.debug('disconnected from: %s', rx.ip)
rx.socket_disconnect()
for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTING'):
if (int(time.perf_counter()) - rx.socket_watchdog) > 2:
rx.socket_disconnect()
for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'DISCONNECTED'):
if (int(time.perf_counter()) - rx.socket_watchdog) > 20:
rx.socket_connect()
def WirelessQueryQueue():
while True:
for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTED'):
strings = rx.get_query_strings()
for string in strings:
rx.writeQueue.put(string)
time.sleep(10)
def ProcessRXMessageQueue():
while True:
rx, msg = DeviceMessageQueue.get()
rx.parse_raw_rx(msg)
def SocketService():
for rx in NetworkDevices:
rx.socket_connect()
while True:
watchdog_monitor()
readrx = [rx for rx in NetworkDevices if rx.rx_com_status in ['CONNECTING', 'CONNECTED']]
writerx = [rx for rx in readrx if not rx.writeQueue.empty()]
read_socks, write_socks, error_socks = select.select(readrx, writerx, readrx, .2)
for rx in read_socks:
try:
data = rx.f.recv(1024).decode('UTF-8')
except:
rx.socket_disconnect()
break
# print("read: {} data: {}".format(rx.ip,data))
d = '>'
if rx.type == 'uhfr':
d = '*'
data = [e+d for e in data.split(d) if e]
for line in data:
# rx.parse_raw_rx(line)
DeviceMessageQueue.put((rx, line))
rx.socket_watchdog = int(time.perf_counter())
rx.set_rx_com_status('CONNECTED')
for rx in write_socks:
string = rx.writeQueue.get()
logging.debug("write: %s data: %s", rx.ip, string)
try:
if rx.type in ['qlxd', 'ulxd', 'axtd', 'p10t']:
rx.f.sendall(bytearray(string, 'UTF-8'))
elif rx.type == 'uhfr':
rx.f.sendto(bytearray(string, 'UTF-8'), (rx.ip, 2202))
except:
logging.warning("TX ERROR IP: %s String: %s", rx.ip, string)
for rx in error_socks:
rx.set_rx_com_status('DISCONNECTED')
# @atexit.register
def on_exit():
connected = [rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTED']
for rx in connected:
rx.disable_metering()
time.sleep(50)
print("IT DONE!")
sys.exit(0)
# atexit.register(on_exit)
# signal.signal(signal.SIGTERM, on_exit)
# signal.signal(signal.SIGINT, on_exit)
| 29.56 | 97 | 0.603518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 494 | 0.133694 |
9d4cf41e0ad2b23397f4ee9bbfa792895ce345d0 | 277 | py | Python | alviscorpus/status.py | Bibliome/alviscorpus | ec7bf45efbc6a3cd864fda48e0066090cfb93313 | [
"MIT"
] | null | null | null | alviscorpus/status.py | Bibliome/alviscorpus | ec7bf45efbc6a3cd864fda48e0066090cfb93313 | [
"MIT"
] | null | null | null | alviscorpus/status.py | Bibliome/alviscorpus | ec7bf45efbc6a3cd864fda48e0066090cfb93313 | [
"MIT"
] | null | null | null | import enum
class Status(enum.Enum):
QUEUED = 'queued'
STARTED = 'started'
FINISHED = 'finished'
ERROR = 'error'
def __str__(self):
return self.value
QUEUED = Status.QUEUED
STARTED = Status.STARTED
FINISHED = Status.FINISHED
ERROR = Status.ERROR
| 17.3125 | 26 | 0.67148 | 166 | 0.599278 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.122744 |
9d4d51c8583e8a9e583bab2100d07d40d0fad696 | 977 | py | Python | 01_simulate_competition_experiment.py | stevenshave/competition-label-affinity | 2383309e852954a1bd88c6364087c3d57e7acec0 | [
"MIT"
] | null | null | null | 01_simulate_competition_experiment.py | stevenshave/competition-label-affinity | 2383309e852954a1bd88c6364087c3d57e7acec0 | [
"MIT"
] | null | null | null | 01_simulate_competition_experiment.py | stevenshave/competition-label-affinity | 2383309e852954a1bd88c6364087c3d57e7acec0 | [
"MIT"
] | null | null | null | """Simulation 1:1:1 comptition binding"""
import numpy as np
from high_accuracy_binding_equations import *
# We can choose to work in a common unit, typically nM, or uM, as long as all
# numbers are in the same unit, the result is valid. We assume uM for all
# concentrations bellow.
# First, lets simulate a few single points from three different systems
# p, l and i are protrin, ligand and inhibitor concentrations respectively
# kdpl is the dissociation constant (KD) of the protein-ligand interaction
# kdpi is the dissociation constant (KD) of the protein-inhibitor interaction
# We can either expand the dictionary with ** as shown in the example with
# system1, or we can pass arguments to competition_pl with the following
# singature: competition_pl(p, l , i, kdpl, kdpi)
system1={"p":1, "l":2, "i":10, "kdpl":0.1, "kdpi":10}
pl_conc=competition_pl(**system1)
print(f"pl_conc = {round(pl_conc,4)}, fraction ligand bound = {round(pl_conc/system1['l'],4)}")
| 36.185185 | 95 | 0.745138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 816 | 0.83521 |
9d50b18aa63e6f3b4b6406ced31f91d878b8ae26 | 773 | py | Python | e_vae_proj/qualitative/mnist/btcvae/gen_train.py | kuangdai/disentangling-vae | 9a5f9da44a82a2c643b7289c4945320621b86247 | [
"MIT"
] | 1 | 2021-06-30T08:58:49.000Z | 2021-06-30T08:58:49.000Z | e_vae_proj/qualitative/mnist/btcvae/gen_train.py | kuangdai/disentangling-vae | 9a5f9da44a82a2c643b7289c4945320621b86247 | [
"MIT"
] | null | null | null | e_vae_proj/qualitative/mnist/btcvae/gen_train.py | kuangdai/disentangling-vae | 9a5f9da44a82a2c643b7289c4945320621b86247 | [
"MIT"
] | null | null | null | import numpy as np
from pathlib import Path
import sys
if __name__ == '__main__':
# absolute path
my_path = Path(__file__).parent.resolve().expanduser()
main_path = my_path.parent.parent
seed = 0
nlat = 10
alpha = 1.0
beta = 6.0
gamma = 1.0
epochs = 100
# cmd template
cmd = f'python main.py btcvae_mnist_{epochs}ep/z{nlat}_a{alpha}_b{beta}_g{gamma}_s{seed} -s {seed} ' \
f'--checkpoint-every 25 -d mnist -e {epochs} -b 64 --lr 0.0005 ' \
f'-z {nlat} -l btcvae --btcvae-A {alpha} --btcvae-B {beta} --btcvae-G {gamma} ' \
f'--no-test\n'
with open(my_path / f'train_beta{beta}.sh', 'w') as f:
unnormalized_beta = beta * nlat
f.write(cmd)
| 28.62963 | 107 | 0.564036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.410091 |
9d5197f8d1796538860fe2f3fb98a1af46c8ef38 | 3,331 | py | Python | tests/test_load.py | tom3131/simfin | 8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1 | [
"MIT"
] | 231 | 2019-09-25T13:30:00.000Z | 2022-03-26T08:00:47.000Z | tests/test_load.py | tom3131/simfin | 8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1 | [
"MIT"
] | 11 | 2019-10-01T14:50:15.000Z | 2022-02-23T10:35:47.000Z | tests/test_load.py | tom3131/simfin | 8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1 | [
"MIT"
] | 36 | 2019-09-30T16:14:48.000Z | 2022-03-19T19:59:30.000Z | ##########################################################################
#
# Unit tests (pytest) for load.py
#
##########################################################################
# SimFin - Simple financial data for Python.
# www.simfin.com - www.github.com/simfin/simfin
# See README.md for instructions and LICENSE.txt for license details.
##########################################################################
import simfin as sf
from simfin.datasets import iter_all_datasets
##########################################################################
# Test configuration.
# Set data directory.
sf.set_data_dir(data_dir='~/simfin_data/')
# Load API key or use default 'free' if key-file doesn't exist.
sf.load_api_key(path='~/simfin_api_key.txt', default_key='free')
# Set number of days before refreshing data from SimFin server.
refresh_days = 30
##########################################################################
# Helper functions.
def _create_kwargs(variant, market):
"""
Create a dict with keyword args for sf.load() functions that take
variant, market and refresh_days as kwargs.
"""
kwargs = \
{
'variant': variant,
'market': market,
'refresh_days': refresh_days,
}
return kwargs
##########################################################################
# Test functions.
def test_load():
"""Test simfin.bulk.load()"""
for dataset, variant, market in iter_all_datasets():
sf.load(dataset=dataset,
variant=variant,
market=market,
refresh_days=refresh_days)
def test_load_income():
"""Test simfin.bulk.load_income()"""
for dataset, variant, market in iter_all_datasets(datasets='income'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_income(**kwargs)
sf.load_income_banks(**kwargs)
sf.load_income_insurance(**kwargs)
def test_load_balance():
"""Test simfin.bulk.load_balance()"""
for dataset, variant, market in iter_all_datasets(datasets='balance'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_balance(**kwargs)
sf.load_balance_banks(**kwargs)
sf.load_balance_insurance(**kwargs)
def test_load_cashflow():
"""Test simfin.bulk.load_cashflow()"""
for dataset, variant, market in iter_all_datasets(datasets='cashflow'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_cashflow(**kwargs)
sf.load_cashflow_banks(**kwargs)
sf.load_cashflow_insurance(**kwargs)
def test_load_shareprices():
"""Test simfin.bulk.load_shareprices()"""
for dataset, variant, market in iter_all_datasets(datasets='shareprices'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_shareprices(**kwargs)
def test_load_companies():
"""Test simfin.bulk.load_companies()"""
for dataset, variant, market in iter_all_datasets(datasets='companies'):
kwargs = _create_kwargs(variant=variant, market=market)
sf.load_companies(**kwargs)
def test_load_industries():
"""Test simfin.bulk.load_industries()"""
sf.load_industries(refresh_days=refresh_days)
##########################################################################
| 31.424528 | 78 | 0.576403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,432 | 0.429901 |
9d5477ef2956d3615e64662b0ab23440b2cbff69 | 447 | py | Python | lab/__init__.py | patel-zeel/lab | cc0df2c03196863041e78fa4179445341e86958c | [
"MIT"
] | 36 | 2018-05-08T20:54:21.000Z | 2022-02-24T09:15:58.000Z | lab/__init__.py | patel-zeel/lab | cc0df2c03196863041e78fa4179445341e86958c | [
"MIT"
] | 4 | 2021-06-24T11:59:29.000Z | 2022-02-01T15:51:30.000Z | lab/__init__.py | patel-zeel/lab | cc0df2c03196863041e78fa4179445341e86958c | [
"MIT"
] | 3 | 2021-02-14T13:00:26.000Z | 2021-12-10T08:55:17.000Z | import sys
from plum import Dispatcher
B = sys.modules[__name__] # Allow both import styles.
dispatch = Dispatcher() # This dispatch namespace will be used everywhere.
from .generic import *
from .shaping import *
from .linear_algebra import *
from .random import *
from .numpy import *
from .types import *
from .control_flow import *
# Fix namespace issues with `B.bvn_cdf` simply by setting it explicitly.
B.bvn_cdf = B.generic.bvn_cdf
| 22.35 | 75 | 0.756152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.333333 |
9d556827bb836c6e6f6530ec156f0777935a5dea | 1,514 | py | Python | async_nbgrader/apps/exportapp.py | IllumiDesk/async_nbgrader | 427e1b634277c043a1ed9f00bf7e417e0f611aca | [
"Apache-2.0"
] | 2 | 2021-06-23T17:58:22.000Z | 2021-09-27T10:00:01.000Z | async_nbgrader/apps/exportapp.py | IllumiDesk/async-nbgrader | 427e1b634277c043a1ed9f00bf7e417e0f611aca | [
"Apache-2.0"
] | 6 | 2021-06-17T21:40:24.000Z | 2021-11-11T17:48:15.000Z | async_nbgrader/apps/exportapp.py | IllumiDesk/async-nbgrader | 427e1b634277c043a1ed9f00bf7e417e0f611aca | [
"Apache-2.0"
] | 2 | 2021-06-10T18:16:22.000Z | 2021-06-17T02:52:45.000Z | # coding: utf-8
from nbgrader.api import Gradebook
from nbgrader.apps import ExportApp as BaseExportApp
from traitlets import Instance
from traitlets import Type
from traitlets import default
from ..plugins import CanvasCsvExportPlugin
from ..plugins import CustomExportPlugin
aliases = {
"log-level": "Application.log_level",
"db": "CourseDirectory.db_url",
"to": "CanvasCsvExportPlugin.to",
"canvas_import": "CanvasCsvExportPlugin.canvas_import",
"exporter": "ExportApp.plugin_class",
"assignment": "CanvasCsvExportPlugin.assignment",
"student": "CanvasCsvExportPlugin.student",
"course": "CourseDirectory.course_id",
}
flags = {}
class ExportApp(BaseExportApp):
"""Custom nbgrader export app to export grades from a Canvas LMS
course.
"""
name = "async_nbgrader-export"
aliases = aliases
plugin_class = Type(
CanvasCsvExportPlugin,
klass=CustomExportPlugin,
help="The plugin class for exporting the grades.",
).tag(config=True)
plugin_inst = Instance(CustomExportPlugin).tag(config=False)
@default("classes")
def _classes_default(self):
classes = super(ExportApp, self)._classes_default()
classes.append(ExportApp)
classes.append(CustomExportPlugin)
return classes
def start(self):
super(ExportApp, self).start()
self.init_plugin()
with Gradebook(self.coursedir.db_url, self.coursedir.course_id) as gb:
self.plugin_inst.export(gb)
| 27.527273 | 78 | 0.703435 | 842 | 0.556143 | 0 | 0 | 211 | 0.139366 | 0 | 0 | 474 | 0.313078 |
9d55833e8ac84841e916071829ab4546156cae04 | 2,810 | py | Python | django_ocr_server/conf.py | shmakovpn/django_ocr_server | 4d694629c39c18a6c13bcdfafdb8258b78e5a859 | [
"Apache-2.0"
] | 17 | 2019-12-04T03:14:56.000Z | 2022-03-27T07:05:19.000Z | django_ocr_server/conf.py | shmakovpn/django_ocr_server | 4d694629c39c18a6c13bcdfafdb8258b78e5a859 | [
"Apache-2.0"
] | 1 | 2020-04-17T07:32:30.000Z | 2020-04-17T07:32:30.000Z | django_ocr_server/conf.py | shmakovpn/django_ocr_server | 4d694629c39c18a6c13bcdfafdb8258b78e5a859 | [
"Apache-2.0"
] | 5 | 2020-03-16T10:43:03.000Z | 2021-07-14T14:43:49.000Z | """
django_ocr_server/conf.py
+++++++++++++++++++++++++
The settings manager for **django_ocr_server**.
Usage:
.. code-block:: python
from django_ocr_server.conf import ocr_settings
# Next line will print a value of **OCR_TESSERACT_LANG**
# using the variable from the Django's *settings.py* file
# if the variable is set there.
# Or the default value of **OCR_TESSERACT_LANG** from
# *django_ocr_server/default_settings.py* otherwise.
print(ocr_settings.OCR_TESSERACT_LANG)
| Author: shmakovpn <[email protected]>
| Date: 2021-01-20
"""
from typing import List
from datetime import timedelta
from django.conf import settings as _s
import django_ocr_server.default_settings as _ds
class DjangoOcrSettings:
"""The settings manager of **django_ocr_server**"""
@property
def OCR_STORE_FILES(_) -> bool:
return bool(getattr(_s, 'OCR_STORE_FILES', _ds.OCR_STORE_FILES))
@property
def OCR_FILE_PREVIEW(_) -> bool:
return bool(getattr(_s, 'OCR_FILE_PREVIEW', _ds.OCR_FILE_PREVIEW))
@property
def OCR_TESSERACT_LANG(_) -> str:
return str(getattr(_s, 'OCR_TESSERACT_LANG', _ds.OCR_TESSERACT_LANG))
@property
def OCR_STORE_PDF(_) -> bool:
return bool(getattr(_s, 'OCR_STORE_PDF', _ds.OCR_STORE_PDF))
@property
def OCR_STORE_FILES_DISABLED_LABEL(_) -> str:
return str(
getattr(_s, 'OCR_STORE_FILES_LABEL',
_ds.OCR_STORE_FILES_DISABLED_LABEL))
@property
def OCR_STORE_PDF_DISABLED_LABEL(_) -> str:
return str(
getattr(_s, 'OCR_FILE_REMOVED_LABEL', _ds.OCR_FILE_REMOVED_LABEL))
@property
def OCR_FILE_REMOVED_LABEL(_) -> str:
return str(
getattr(_s, 'OCR_FILE_REMOVED_LABEL', _ds.OCR_FILE_REMOVED_LABEL))
@property
def OCR_PDF_REMOVED_LABEL(_) -> str:
return str(
getattr(_s, 'OCR_PDF_REMOVED_LABEL', _ds.OCR_PDF_REMOVED_LABEL))
@property
def OCR_ALLOWED_FILE_TYPES(_) -> List[str]:
return list(
getattr(_s, 'OCR_ALLOWED_FILE_TYPES', _ds.OCR_ALLOWED_FILE_TYPES))
@property
def OCR_FILES_UPLOAD_TO(_) -> str:
return str(getattr(_s, 'OCR_FILES_UPLOAD_TO', _ds.OCR_FILES_UPLOAD_TO))
@property
def OCR_PDF_UPLOAD_TO(_) -> str:
return str(getattr(_s, 'OCR_PDF_UPLOAD_TO', _ds.OCR_PDF_UPLOAD_TO))
@property
def OCR_FILES_TTL(_) -> timedelta:
return getattr(_s, 'OCR_FILES_TTL', _ds.OCR_FILES_TTL)
@property
def OCR_PDF_TTL(_) -> timedelta:
return getattr(_s, 'ocr_pdf_ttl', _ds.OCR_PDF_TTL)
@property
def OCR_TTL(_) -> timedelta:
return getattr(_s, 'OCR_TTL', _ds.OCR_TTL)
ocr_settings: DjangoOcrSettings = DjangoOcrSettings()
"""The instance of settings manager of **django_ocr_server**""" | 29.893617 | 79 | 0.687544 | 1,993 | 0.709253 | 0 | 0 | 1,830 | 0.651246 | 0 | 0 | 930 | 0.330961 |
9d56f0959997626e16345a92ca50c1b01d2ed5e6 | 105 | py | Python | ibms_project/sfm/apps.py | mohdbakhrayba/ibms | 029e1f3bf108586289c65bb1d547f86851f9494f | [
"Apache-2.0"
] | null | null | null | ibms_project/sfm/apps.py | mohdbakhrayba/ibms | 029e1f3bf108586289c65bb1d547f86851f9494f | [
"Apache-2.0"
] | null | null | null | ibms_project/sfm/apps.py | mohdbakhrayba/ibms | 029e1f3bf108586289c65bb1d547f86851f9494f | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class SFMConfig(AppConfig):
name = 'sfm'
verbose_name = 'SFM'
| 17.5 | 33 | 0.704762 | 69 | 0.657143 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.095238 |
9d5735cba5c6faf4bc0915b6d346541d85cbb4ac | 15,960 | py | Python | torsion/model/symmetry_function.py | hnlab/TorsionNet | e81ab624f1340765345b34240a049a8cc5f4d581 | [
"MIT"
] | 15 | 2021-01-15T01:54:26.000Z | 2022-03-31T16:00:52.000Z | torsion/model/symmetry_function.py | hnlab/TorsionNet | e81ab624f1340765345b34240a049a8cc5f4d581 | [
"MIT"
] | 2 | 2021-07-21T22:42:09.000Z | 2021-11-22T06:39:20.000Z | torsion/model/symmetry_function.py | hnlab/TorsionNet | e81ab624f1340765345b34240a049a8cc5f4d581 | [
"MIT"
] | 6 | 2021-01-16T04:07:17.000Z | 2022-02-23T02:11:49.000Z | import math
import numpy as np
from openeye import oechem
from torsion.inchi_keys import get_torsion_oeatom_list, get_torsion_oebond
def GetPairwiseDistanceMatrix(icoords, jcoords):
'''
input: two sets of coordinates, icoords, jcoords; each of which are a list
of OEDoubleArray(3) containing x, y, and z component
output:
xij - the x component of the distance matrix
yij - the y component of the distance matrix
zij - the z component of the distance matrix
rij - the distance matrix
rij2 - square of the distance matrix
'''
nullRet = [None, None, None, None, None]
ni = len(icoords)
nj = len(jcoords)
try:
iArrayX = np.array([c[0] for c in icoords])
iArrayY = np.array([c[1] for c in icoords])
iArrayZ = np.array([c[2] for c in icoords])
iArrayX = np.repeat(iArrayX, nj)
iArrayY = np.repeat(iArrayY, nj)
iArrayZ = np.repeat(iArrayZ, nj)
iArrayX = iArrayX.reshape(ni, nj)
iArrayY = iArrayY.reshape(ni, nj)
iArrayZ = iArrayZ.reshape(ni, nj)
jArrayX = np.array([c[0] for c in jcoords])
jArrayY = np.array([c[1] for c in jcoords])
jArrayZ = np.array([c[2] for c in jcoords])
jArrayX = np.repeat(jArrayX, ni)
jArrayY = np.repeat(jArrayY, ni)
jArrayZ = np.repeat(jArrayZ, ni)
jArrayX = jArrayX.reshape(nj, ni)
jArrayY = jArrayY.reshape(nj, ni)
jArrayZ = jArrayZ.reshape(nj, ni)
jArrayX = np.transpose(jArrayX)
jArrayY = np.transpose(jArrayY)
jArrayZ = np.transpose(jArrayZ)
ijArrayX = jArrayX - iArrayX
ijArrayY = jArrayY - iArrayY
ijArrayZ = jArrayZ - iArrayZ
rijArraySq = (ijArrayX * ijArrayX) + (ijArrayY * ijArrayY) + (ijArrayZ * ijArrayZ)
rijArray = np.sqrt(rijArraySq)
return ijArrayX, ijArrayY, ijArrayZ, rijArray, rijArraySq
except:
return nullRet
def GetThetaIJKMatrix(iCoords, jCoords, kCoords):
'''
Using the given input, calculates a matrix of angles ijk
iCoords -> OEDoubleArray containing x, y, and z component of the reference coordinate
jCoordsList -> list of N OEDoubleArrays, each OEDoubleArray is of size 3
kCoordsList -> list of M OEDoubleArrays, each OEDoubleArray is of size 3
return a N-by-M matrix of angle theta_ijk
'''
jiArrayX, jiArrayY, jiArrayZ, rjiArray, rjiArraySq \
= GetPairwiseDistanceMatrix(jCoords, iCoords)
jkArrayX, jkArrayY, jkArrayZ, rjkArray, rjkArraySq \
= GetPairwiseDistanceMatrix(jCoords, kCoords)
if jCoords == kCoords:
rjkArray = np.eye(len(jCoords)) + np.sqrt(rjkArraySq)
else:
rjkArray = np.sqrt(rjkArraySq)
if jCoords == iCoords:
rjiArray = np.eye(len(jCoords)) + np.sqrt(rjiArraySq)
else:
rjiArray = np.sqrt(rjiArraySq)
jiArrayX = jiArrayX / rjiArray
jiArrayY = jiArrayY / rjiArray
jiArrayZ = jiArrayZ / rjiArray
jkArrayX = jkArrayX / rjkArray
jkArrayY = jkArrayY / rjkArray
jkArrayZ = jkArrayZ / rjkArray
dotProduct = (jiArrayX * jkArrayX) + (jiArrayY * jkArrayY) + (jiArrayZ * jkArrayZ)
dotProduct = np.select([dotProduct <= -1.0, dotProduct >= 1.0, np.abs(dotProduct) < 1.0],
[-0.999, 0.999, dotProduct])
theta_ijk = np.arccos(dotProduct)
return theta_ijk
def GetThetaIJKLMatrix(mol, iAtoms, jAtom, kAtom, lAtoms, transform=True):
'''
Using the given input, calculates a matrix of torsion angles around jk
jAtom, kAtom -> OEAtombase, middle two atoms of the torsion
iAtoms -> list of N OEAtombase
lAtoms -> list of M OEAtombase
return a N-by-M matrix of angle theta_ijkl
'''
torsions = []
for iAtom in iAtoms:
for lAtom in lAtoms:
tor_angle = oechem.OEGetTorsion(mol, iAtom, jAtom, kAtom, lAtom)
if not transform:
torsions.append(tor_angle)
else:
torsions.append((math.pi + tor_angle) / 4.0)
theta_ijkl = np.array(torsions)
theta_ijkl = theta_ijkl.reshape(len(iAtoms), len(lAtoms))
return theta_ijkl
class SymmetryFunction:
def __init__(self):
self.rcMax = 8.0 # distance cutoff for symmetry functions
self.ita = 0.0001
self.rcMin = 1.0
self.rcIncr = 0.5
self.rsVec = [0.0]
self.theta_s_Vec = [0.0]
self.rsVec_tor = [2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 6.0]
self.theta_s_Vec_tor = [0.0]
self.rcRadVec = [1.5, 2.0, 2.5, 3.0, 4.0, 6.0, 10.0]
self.rcAngVec = [4.5]
self.rcTorVec = [2.5, 3.5, 5.0, 10.0]
self.rs = 0.0 # parameter determining shape of the function
self.itaVec = [0.0001] # parameter determining shape of the function
self.lambda1 = 0.5 # parameter for angular symmetry function
self.chi = 0.5 # parameter for angular symmetry function
self.elemList = [oechem.OEElemNo_H, oechem.OEElemNo_C, oechem.OEElemNo_N, oechem.OEElemNo_O,
oechem.OEElemNo_F, oechem.OEElemNo_S, oechem.OEElemNo_Cl, "pc", "nc"]
def GetEnvAtomCoords(self, elem, refAtom, envMol, envAtoms):
elemEnvList = []
for envAtom in envAtoms:
if envAtom == refAtom:
continue
if elem == 'pc' and envAtom.GetFormalCharge() >= 1:
elemEnvList.append(envAtom)
elif elem == 'nc' and envAtom.GetFormalCharge() <= -1:
elemEnvList.append(envAtom)
elif envAtom.GetAtomicNum() == elem:
elemEnvList.append(envAtom)
coordsList = []
for elemEnvAtom in elemEnvList:
coords = oechem.OEDoubleArray(3)
if envMol.GetCoords(elemEnvAtom, coords):
coordsList.append(coords)
return coordsList
def GetTorsionEnvAtoms(self, elem, bgnAtom, endAtom, envMol):
elemEnvList = []
for envAtom in oechem.OEGetSubtree(bgnAtom, endAtom):
if elem == 'pc' and envAtom.GetFormalCharge() >= 1:
elemEnvList.append(envAtom)
elif elem == 'nc' and envAtom.GetFormalCharge() <= -1:
elemEnvList.append(envAtom)
elif envAtom.GetAtomicNum() == elem:
elemEnvList.append(envAtom)
coordsList = []
for elemEnvAtom in elemEnvList:
coords = oechem.OEDoubleArray(3)
if envMol.GetCoords(elemEnvAtom, coords):
coordsList.append(coords)
return elemEnvList, coordsList
def CalculateTorsionSymmetryFunction(self, envMol, num_iter):
'''
Takes refAtom coordinates from refMol as reference and calculates the angular symmetry
function using envMol atoms
Functional form is described in the DFT-NN review article by Behler, page 30, equations 25 and 26
'''
tsf = []
elemList = self.elemList
nullRet = []
bond = get_torsion_oebond(envMol)
if bond is None:
return nullRet
jAtom = bond.GetBgn()
jcoords = oechem.OEDoubleArray(3)
if not envMol.GetCoords(bond.GetBgn(), jcoords):
return nullRet
kAtom = bond.GetEnd()
kcoords = oechem.OEDoubleArray(3)
if not envMol.GetCoords(bond.GetEnd(), kcoords):
return nullRet
# tsf.append(bond.GetBgn().GetAtomicNum() * bond.GetEnd().GetAtomicNum());
for inum, iElem in enumerate(elemList):
if num_iter == 1:
iAtoms, icoords = self.GetTorsionEnvAtoms(iElem, bond.GetBgn(), bond.GetEnd(), envMol)
else:
iAtoms, icoords = self.GetTorsionEnvAtoms(iElem, bond.GetEnd(), bond.GetBgn(), envMol)
if len(icoords) == 0:
for ita in self.itaVec:
for rc in self.rcTorVec:
for num1, _ in enumerate(elemList):
if num1 < inum:
continue
tsf.append(0.0)
continue
_, _, _, rij, _ = GetPairwiseDistanceMatrix(icoords, [jcoords])
for lnum, lElem in enumerate(elemList):
if lnum < inum:
continue
if num_iter == 1:
lAtoms, lcoords = self.GetTorsionEnvAtoms(lElem, bond.GetEnd(), bond.GetBgn(), envMol)
else:
lAtoms, lcoords = self.GetTorsionEnvAtoms(lElem, bond.GetBgn(), bond.GetEnd(), envMol)
if len(lcoords) == 0:
for ita in self.itaVec:
for rc in self.rcTorVec:
tsf.append(0.0)
continue
_, _, _, rkl, _ = GetPairwiseDistanceMatrix([kcoords], lcoords)
_, _, _, ril, _ = GetPairwiseDistanceMatrix(icoords, lcoords)
theta_ijkl = GetThetaIJKLMatrix(envMol, iAtoms, jAtom, kAtom, lAtoms)
# angular symmetry function
for ita in self.itaVec:
for rc in self.rcTorVec:
rijMat = np.repeat(rij, rkl.size)
rijMat = rijMat.reshape(rij.size, rkl.size)
rklMat = np.repeat(rkl, rij.size)
rklMat = rklMat.reshape(rkl.size, rij.size)
rklMat = np.transpose(rklMat)
fcRij = np.select([rijMat <= rc, rijMat > rc],
[0.5 * (np.cos(np.pi * rijMat / rc) + 1.0), 0.0])
fcRkl = np.select([rklMat <= rc, rklMat > rc],
[0.5 * (np.cos(np.pi * rklMat / rc) + 1.0), 0.0])
fcRil = np.select([ril <= rc, ril > rc], [0.5 * (np.cos(np.pi * ril / rc) + 1.0), 0.0])
exponent = ita * (np.square(rijMat) + np.square(rklMat) + np.square(ril))
term1 = np.power((1 + self.lambda1 * np.cos(theta_ijkl)), self.chi)
term2 = np.exp(-exponent)
term3 = (fcRij * fcRkl) * fcRil
sumIL = np.sum(term1 * term2 * term3)
coeff = np.power(2, 1 - self.chi) * sumIL
tsf.append(coeff * jAtom.GetAtomicNum() * kAtom.GetAtomicNum())
a, b, c, d = get_torsion_oeatom_list(envMol)
tsf.append(oechem.OEGetDistance2(envMol, a, d))
tsf.append(oechem.OEGetDistance2(envMol, b, c))
tsf.append(oechem.OEGetTorsion(envMol, a, b, c, d))
tsf.append(a.GetAtomicNum() * d.GetAtomicNum())
tsf.append(b.GetAtomicNum() * c.GetAtomicNum())
return tsf
def GetTorsionCenterAsOEMol(self, mol):
refCoords = oechem.OEDoubleArray(3)
try:
torsion_atoms = get_torsion_oeatom_list(mol)
bgnCoords = mol.GetCoords(torsion_atoms[1])
endCoords = mol.GetCoords(torsion_atoms[2])
refCoords[0] = (bgnCoords[0] + endCoords[0]) / 2.0
refCoords[1] = (bgnCoords[1] + endCoords[1]) / 2.0
refCoords[2] = (bgnCoords[2] + endCoords[2]) / 2.0
except Exception as e:
print(e)
return None
refMol = oechem.OEMol()
refAtom = refMol.NewAtom(oechem.OEElemNo_C)
refMol.SetCoords(refAtom, refCoords)
refMol.Sweep()
return refMol
def CalculateSymmetryFunction(self, envMol):
'''
Takes refAtom coordinates from refMol as reference and calculates the angular symmetry
function using envMol atoms
Functional form is described in the DFT-NN review article by Behler, page 30, equations 25 and 26
'''
refMol = self.GetTorsionCenterAsOEMol(envMol)
_, b, c, _ = get_torsion_oeatom_list(envMol)
refAtom = refMol.GetAtom(oechem.OEHasAtomIdx(0))
rsf = []
asf = []
elemList = self.elemList
nullRet = [[], []]
icoords = oechem.OEDoubleArray(3)
if not refMol.GetCoords(refAtom, icoords):
return nullRet
for jnum, jElem in enumerate(elemList):
jcoords = self.GetEnvAtomCoords(jElem, refAtom, envMol, envMol.GetAtoms())
if len(jcoords) == 0:
for ita in self.itaVec:
for rc in self.rcRadVec:
rsf.append(0.0) # radial
for rc in self.rcAngVec:
for num1, _ in enumerate(elemList):
if num1 < jnum:
continue
asf.append(0.0) # angular
continue
#ijX, ijY, ijZ, rij, rij2 = GetPairwiseDistanceMatrix([icoords], jcoords)
_, _, _, rij, _ = GetPairwiseDistanceMatrix([icoords], jcoords)
for ita in self.itaVec:
expArg = ita * ((rij - self.rs) * (rij - self.rs))
expTerm = np.exp(-expArg)
# radial symmetry function
for rc in self.rcRadVec:
fc = np.select([rij <= rc, rij > rc], [0.5 * (np.cos(np.pi * rij / rc) + 1.0), 0.0])
prod = expTerm * fc
coeff = np.sum(prod)
rsf.append(coeff * b.GetAtomicNum() * c.GetAtomicNum())
for knum, kElem in enumerate(elemList):
if knum < jnum:
continue
kcoords = self.GetEnvAtomCoords(kElem, refAtom, envMol, envMol.GetAtoms())
if len(kcoords) == 0:
for ita in self.itaVec:
for rc in self.rcAngVec:
asf.append(0.0) # angular
continue
_, _, _, rik, _ = GetPairwiseDistanceMatrix([icoords], kcoords)
_, _, _, rjk, _ = GetPairwiseDistanceMatrix(jcoords, kcoords)
theta_ijk = GetThetaIJKMatrix([icoords], jcoords, kcoords)
# angular symmetry function
for ita in self.itaVec:
for rc in self.rcAngVec:
rijMat = np.repeat(rij, rik.size)
rijMat = rijMat.reshape(rij.size, rik.size)
rikMat = np.repeat(rik, rij.size)
rikMat = rikMat.reshape(rik.size, rij.size)
rikMat = np.transpose(rikMat)
fcRij = np.select([rijMat <= rc, rijMat > rc],
[0.5 * (np.cos(np.pi * rijMat / rc) + 1.0), 0.0])
fcRik = np.select([rikMat <= rc, rikMat > rc],
[0.5 * (np.cos(np.pi * rikMat / rc) + 1.0), 0.0])
fcRjk = np.select([rjk <= rc, rjk > rc], [0.5 * (np.cos(np.pi * rjk / rc) + 1.0), 0.0])
exponent = ita * (np.square(rijMat) + np.square(rikMat) + np.square(rjk))
term1 = np.power((1 + self.lambda1 * np.cos(theta_ijk)), self.chi)
term2 = np.exp(-exponent)
term3 = (fcRij * fcRjk) * fcRik
sumJK = np.sum(term1 * term2 * term3)
coeff = np.power(2, 1 - self.chi) * sumJK
asf.append(coeff * b.GetAtomicNum() * c.GetAtomicNum())
return rsf, asf
def get_sf_elements(mol):
sfObj = SymmetryFunction()
oechem.OEAssignFormalCharges(mol)
oechem.OEAssignHybridization(mol)
rsf, asf = sfObj.CalculateSymmetryFunction(mol)
tsf1 = sfObj.CalculateTorsionSymmetryFunction(mol, 1)
tsf2 = sfObj.CalculateTorsionSymmetryFunction(mol, 2)
tsf = []
for elem1, elem2 in zip(tsf1, tsf2):
tsf.append(elem1 + elem2)
sf_elements = rsf
sf_elements.extend(asf)
sf_elements.extend(tsf)
return sf_elements
| 40.507614 | 111 | 0.553446 | 11,286 | 0.707143 | 0 | 0 | 0 | 0 | 0 | 0 | 2,033 | 0.127381 |
9d5757c4a8bf60547e9dd883852158e386888c4b | 6,785 | py | Python | recommendation/recommendation.py | Jackson-Y/Machine-Learning | ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8 | [
"MIT"
] | 4 | 2017-08-17T02:11:45.000Z | 2017-09-25T00:46:13.000Z | recommendation/recommendation.py | Jackson-Y/Machine-Learning | ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8 | [
"MIT"
] | null | null | null | recommendation/recommendation.py | Jackson-Y/Machine-Learning | ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8 | [
"MIT"
] | null | null | null | """ 候选生成(Candidate generation) & 排序(LTR, Learning to Ranking)"""
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
from operator import itemgetter
from math import sqrt
import pandas as pd
import pymysql
from sklearn.model_selection import train_test_split
# from sklearn.metrics.pairwise import pairwise_distances
# from sklearn.metrics import mean_squared_error
class UserBasedCF(object):
""" 基于用户的协同过滤 """
def __init__(self, n_similarity_users=20, n_recommendation_articles=10):
self.n_similarity_users = n_similarity_users
self.n_recomendation_articles = n_recommendation_articles
self.train_data = {}
self.test_data = {}
self.user_similarity_matrix = {}
self.article_count = 0
print("Number of similarity users = {}".format(self.n_similarity_users))
print("Number of recommended articles = {}".format(self.n_recomendation_articles))
def store_data_mysql2csv(self):
"""Store data from mysql to csv."""
sql = 'select uid,lid,ImportantDegree,LocalModifyTime from 20171020_rating'
conn = pymysql.connect(host='192.168.106.231', \
user='root', password='cnkidras', \
db='recomm', charset='utf8', use_unicode=True)
df = pd.read_sql(sql, con=conn)
print(df.head())
df.to_csv("data.csv", index=False)
conn.close()
def load_data(self):
"""Load data from csv."""
if os.path.isfile('data.csv'):
if os.path.getsize('data.csv') > 0:
return
self.store_data_mysql2csv()
header = ['uid', 'lid', 'ImportantDegree', 'LocalModifyTime']
df = pd.read_csv('data.csv', sep=',', names=header, low_memory=False)
train_data, test_data = train_test_split(df, test_size=0.2)
train_data_len = 0
test_data_len = 0
for line in train_data.itertuples():
if line[1] not in self.train_data:
self.train_data.setdefault(line[1], {})
self.train_data[line[1]][line[2]] = line[3]
train_data_len += 1
for line in test_data.itertuples():
if line[1] not in self.test_data:
self.test_data.setdefault(line[1], {})
self.test_data[line[1]][line[2]] = line[3]
test_data_len += 1
print('Train data length = %s' % train_data_len)
print('Test data length = %s' % test_data_len)
def calc_user_similarity(self):
""" 计算用户相似度 """
article_user = {}
for uid, lids in self.train_data.items():
for lid in lids:
if lid not in article_user:
article_user[lid] = set()
article_user[lid].add(uid)
self.article_count = len(article_user)
print("Total article numbers = %d" % self.article_count)
for lid, uids in article_user.items():
for uid1 in uids:
for uid2 in uids:
if uid1 == uid2:
continue
self.user_similarity_matrix.setdefault(uid1, {})
self.user_similarity_matrix[uid1].setdefault(uid2, 0)
self.user_similarity_matrix[uid1][uid2] += 1
for u, related_users in self.user_similarity_matrix.items():
for v, count in related_users.items():
self.user_similarity_matrix[u][v] = count / sqrt(len(self.train_data[u]) * len(self.train_data[v]))
def recommendation(self, user):
""" 为用户user推荐文献,返回推荐列表及评分。 """
K = self.n_similarity_users
N = self.n_recomendation_articles
rank = {}
print("user: ", user)
# watched_articles = self.train_data[user]
watched_articles = self.train_data.get(user, {})
if watched_articles is None:
print(" [x] New User. ")
return []
for v, wuv in sorted(self.user_similarity_matrix[user].items(), key=itemgetter(1), reverse=True)[0:K]:
for article in self.train_data[v]:
if article in watched_articles:
continue
rank.setdefault(article, 0)
rank[article] += wuv
return sorted(rank.items(), key=itemgetter(1), reverse=True)
def evaluate(self):
""" 计算准确率、召回率、覆盖率 """
N = self.n_recomendation_articles
hit = 0
recommend_count = 0
test_count = 0
all_rec_article = set()
for i, user, in enumerate(self.train_data):
test_articles = self.test_data.get(user, {})
recommend_articles = self.recommendation(user)
for article, w in recommend_articles:
if article in test_articles:
hit += 1
all_rec_article.add(article)
recommend_count += N
test_count = len(test_articles)
precision = hit / (1.0 * recommend_count)
recall = hit / (1.0 * test_count)
coverage = len(all_rec_article) / (1.0 * self.article_count)
print('precision= %.4f\t recall=%.4f\t coverage=%.4f' % (precision, recall, coverage))
class PrintArticles(object):
""" print class """
def __init__(self, lid_list):
self.lid_list = lid_list
def output(self):
""" 在数据库中查找lid对应的文献标题,并打印。 """
conn = pymysql.connect(host='192.168.106.231', \
user='root', password='cnkidras', \
db='recomm', charset='utf8', use_unicode=True)
for score_tuple in self.lid_list:
sql = 'select lid,UserID,title from test where lid = %s;' % score_tuple[0]
df = pd.read_sql(sql, con=conn)
print(df)
conn.close()
FLAGS = None
def main(_):
"""main function"""
user_cf = UserBasedCF(20, 10)
user_cf.load_data()
user_cf.calc_user_similarity()
recommended_articled = user_cf.recommendation(FLAGS.uid)
print(recommended_articled[0:10])
out = PrintArticles(recommended_articled[0:10])
out.output()
# user_cf.evaluate()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--uid",
type=int,
default=80871,
help="The user who is going to be recommended articles."
)
parser.add_argument(
"--n",
type=int,
default=10,
help="Number of recommended articles."
)
FLAGS, unparsed = parser.parse_known_args()
print("{} {}".format(sys.argv[0], unparsed))
print(FLAGS)
main(FLAGS)
| 36.875 | 115 | 0.592336 | 5,535 | 0.797665 | 0 | 0 | 0 | 0 | 0 | 0 | 1,268 | 0.182735 |
9d59344dd6f980db538f0cd26f71a979f4b914e4 | 1,592 | py | Python | orchestration/dags/twitter_streaming.py | amommendes/tweetstream | ef09928a4f3344210c597388332d18a53149bb41 | [
"Apache-2.0"
] | null | null | null | orchestration/dags/twitter_streaming.py | amommendes/tweetstream | ef09928a4f3344210c597388332d18a53149bb41 | [
"Apache-2.0"
] | null | null | null | orchestration/dags/twitter_streaming.py | amommendes/tweetstream | ef09928a4f3344210c597388332d18a53149bb41 | [
"Apache-2.0"
] | null | null | null | from datetime import timedelta
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.python_operator import PythonOperator
from tweetstream.consumers.twitter_streaming import TwitterStreamingConsumer
from tweetstream.clients.spark import SparkClient
default_args = {
"owner": "tweeetstream",
"depends_on_past": False,
"start_date": days_ago(1),
"email": ["[email protected]"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
def main():
spark_client = SparkClient(
session_config={
"spark.jars": "/usr/local/airflow/dags/tweetstream/libs/spark-sql-kafka-0-10_2.12-3.0.1.jar,"
"/usr/local/airflow/dags/tweetstream/libs/kafka-clients-2.5.0.jar,"
"/usr/local/airflow/dags/tweetstream/libs/spark-token-provider-kafka-0-10_2.12-3.0.1.jar,"
"/usr/local/airflow/dags/tweetstream/libs/commons-pool2-2.8.0.jar",
"failOnDataLoss": "false",
}
)
spark = spark_client.get_session()
consumer = TwitterStreamingConsumer(
spark=spark,
output_path="hdfs://hadoop:9000/twitter/consumer",
checkpoint="hdfs://hadoop:9000/twitter/checkpoint",
)
consumer.start()
dag = DAG(
dag_id="twitter_streaming",
default_args=default_args,
description="Tweets Streaming Consumer",
schedule_interval=timedelta(days=1),
)
start_job_task = PythonOperator(
dag=dag,
task_id="start_streaming",
python_callable=main,
execution_timeout=None,
)
| 30.615385 | 105 | 0.692839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.383794 |
9d59ba6b91ae4d068be41f7bfb3634b177f8ade2 | 217 | py | Python | tests/expr/expr09.py | ktok07b6/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 83 | 2015-11-30T09:59:13.000Z | 2021-08-03T09:12:28.000Z | tests/expr/expr09.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 4 | 2017-02-10T01:43:11.000Z | 2020-07-14T03:52:25.000Z | tests/expr/expr09.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 11 | 2016-11-18T14:39:15.000Z | 2021-02-23T10:05:20.000Z | from polyphony import testbench
def expr09(a, b):
return a ^ b
@testbench
def test():
assert 1 == expr09(0b1000, 0b1001)
assert 3 == expr09(0b1000, 0b1011)
assert 1 == expr09(0b1010, 0b1011)
test()
| 16.692308 | 38 | 0.654378 | 0 | 0 | 0 | 0 | 139 | 0.640553 | 0 | 0 | 0 | 0 |
9d5d5a4039dbeb89722961536cacebbce65b4ec3 | 1,059 | py | Python | setup.py | fg1/ipynb_format | 58dc276fca4f1fbb179d7e84ce41d59663d011c2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | fg1/ipynb_format | 58dc276fca4f1fbb179d7e84ce41d59663d011c2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | fg1/ipynb_format | 58dc276fca4f1fbb179d7e84ce41d59663d011c2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
from setuptools import setup, find_packages
from codecs import open
with open('README.rst', 'r', 'utf-8') as fd:
long_description = fd.read()
setup(name='ipynb_format',
version='0.1.1',
description='A code formatter for python code in ipython notebooks',
long_description=long_description,
url='https://github.com/fg1/ipynb_format',
author='fg1',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
keywords='ipython notebook',
packages=find_packages(),
install_requires=['yapf'],
entry_points={
'console_scripts': [
'ipynb_format=ipynb_format:cli',
],
}, )
| 31.147059 | 74 | 0.588291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.492918 |
9d5e11c9180f5fb664452c5f269722fdf9e6a4db | 140 | py | Python | Homework/Homework2/2_3.py | 404nofound/CS521-Info-Str-Python | 2ead691c519f29713419e79d600a2d2a1c87d1c1 | [
"Apache-2.0"
] | null | null | null | Homework/Homework2/2_3.py | 404nofound/CS521-Info-Str-Python | 2ead691c519f29713419e79d600a2d2a1c87d1c1 | [
"Apache-2.0"
] | null | null | null | Homework/Homework2/2_3.py | 404nofound/CS521-Info-Str-Python | 2ead691c519f29713419e79d600a2d2a1c87d1c1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
feet = eval(input("Enter a value for feet: "))
meter = feet * 0.305
print (feet, "feet is %.4f meters" %(meter))
| 17.5 | 46 | 0.585714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.5 |
9d5e75f8cb60c04470b0412ae613020592c5aad7 | 16,072 | py | Python | nn/units/ceecnet.py | feevos/ceecnet | 9dc76f8cd16d44b264cae8c5846eefb8fcf6162d | [
"BSD-3-Clause",
"MIT"
] | 45 | 2020-09-07T01:19:44.000Z | 2022-03-15T14:44:20.000Z | nn/units/ceecnet.py | xautdestiny/ceecnet | 897cd5c128ffd85369732d1cac82a6ddc7643afa | [
"MIT",
"BSD-3-Clause"
] | 10 | 2020-10-02T10:14:47.000Z | 2021-10-19T09:34:14.000Z | nn/units/ceecnet.py | xautdestiny/ceecnet | 897cd5c128ffd85369732d1cac82a6ddc7643afa | [
"MIT",
"BSD-3-Clause"
] | 14 | 2020-09-29T02:46:18.000Z | 2021-09-27T07:13:47.000Z | from mxnet import gluon
from mxnet.gluon import HybridBlock
from ceecnet.nn.layers.conv2Dnormed import *
from ceecnet.utils.get_norm import *
from ceecnet.nn.layers.attention import *
class ResizeLayer(HybridBlock):
"""
Applies bilinear up/down sampling in spatial dims and changes number of filters as well
"""
def __init__(self, nfilters, height, width, _norm_type = 'BatchNorm', norm_groups=None, **kwards):
super().__init__(**kwards)
self.height=height
self.width = width
with self.name_scope():
self.conv2d = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1, _norm_type=_norm_type, norm_groups = norm_groups, **kwards)
def hybrid_forward(self, F, input):
out = F.contrib.BilinearResize2D(input,height=self.height,width=self.width)
out = self.conv2d(out)
return out
class ExpandLayer(HybridBlock):
def __init__(self,nfilters, _norm_type = 'BatchNorm', norm_groups=None, ngroups=1,**kwards):
super().__init__(**kwards)
with self.name_scope():
self.conv1 = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1,groups=ngroups, _norm_type=_norm_type, norm_groups = norm_groups, **kwards)
self.conv2 = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1,groups=ngroups,_norm_type=_norm_type, norm_groups = norm_groups,**kwards)
def hybrid_forward(self, F, input):
out = F.contrib.BilinearResize2D(input,scale_height=2.,scale_width=2.)
out = self.conv1(out)
out = F.relu(out)
out = self.conv2(out)
out = F.relu(out)
return out
class ExpandNCombine(HybridBlock):
def __init__(self,nfilters, _norm_type = 'BatchNorm', norm_groups=None,ngroups=1,**kwards):
super().__init__(**kwards)
with self.name_scope():
self.conv1 = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1,groups=ngroups,_norm_type=_norm_type, norm_groups = norm_groups,**kwards)
self.conv2 = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1,groups=ngroups,_norm_type=_norm_type, norm_groups = norm_groups,**kwards)
def hybrid_forward(self, F, input1, input2):
out = F.contrib.BilinearResize2D(input1,scale_height=2.,scale_width=2.)
out = self.conv1(out)
out = F.relu(out)
out2 = self.conv2(F.concat(out,input2,dim=1))
out2 = F.relu(out2)
return out2
class CEEC_unit_v1(HybridBlock):
def __init__(self, nfilters, nheads= 1, ngroups=1, norm_type='BatchNorm', norm_groups=None, ftdepth=5, **kwards):
super().__init__(**kwards)
with self.name_scope():
nfilters_init = nfilters//2
self.conv_init_1 = Conv2DNormed(channels=nfilters_init, kernel_size=3,padding=1,strides=1, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups, **kwards)
self.compr11 = Conv2DNormed(channels=nfilters_init*2, kernel_size=3,padding=1,strides=2, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups, **kwards)
self.compr12 = Conv2DNormed(channels=nfilters_init*2, kernel_size=3,padding=1,strides=1, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups,**kwards)
self.expand1 = ExpandNCombine(nfilters_init,_norm_type = norm_type, norm_groups=norm_groups,ngroups=ngroups)
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
self.conv_init_2 = Conv2DNormed(channels=nfilters_init, kernel_size=3,padding=1,strides=1, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups, **kwards)#half size
self.expand2 = ExpandLayer(nfilters_init//2 ,_norm_type = norm_type, norm_groups=norm_groups,ngroups=ngroups )
self.compr21 = Conv2DNormed(channels=nfilters_init, kernel_size=3,padding=1,strides=2, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups,**kwards)
self.compr22 = Conv2DNormed(channels=nfilters_init, kernel_size=3,padding=1,strides=1, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups,**kwards)
# Will join with master input with concatenation -- IMPORTANT: ngroups = 1 !!!!
self.collect = Conv2DNormed(channels=nfilters, kernel_size=3,padding=1,strides=1, groups=1, _norm_type=norm_type, norm_groups=norm_groups,**kwards)
self.att = FTAttention2D(nkeys=nfilters,nheads=nheads,norm=norm_type, norm_groups = norm_groups,ftdepth=ftdepth)
self.ratt122 = RelFTAttention2D(nkeys=nfilters_init, nheads=nheads,norm=norm_type, norm_groups = norm_groups,ftdepth=ftdepth)
self.ratt211 = RelFTAttention2D(nkeys=nfilters_init, nheads=nheads,norm=norm_type, norm_groups = norm_groups,ftdepth=ftdepth)
self.gamma1 = self.params.get('gamma1', shape=(1,), init=mx.init.Zero())
self.gamma2 = self.params.get('gamma2', shape=(1,), init=mx.init.Zero())
self.gamma3 = self.params.get('gamma3', shape=(1,), init=mx.init.Zero())
def hybrid_forward(self, F, input, gamma1, gamma2, gamma3):
# =========== UNet branch ===========
out10 = self.conv_init_1(input)
out1 = self.compr11(out10)
out1 = F.relu(out1)
out1 = self.compr12(out1)
out1 = F.relu(out1)
out1 = self.expand1(out1,out10)
out1 = F.relu(out1)
# =========== \capNet branch ===========
input = F.identity(input) # Solves a mxnet bug
out20 = self.conv_init_2(input)
out2 = self.expand2(out20)
out2 = F.relu(out2)
out2 = self.compr21(out2)
out2 = F.relu(out2)
out2 = self.compr22(F.concat(out2,out20,dim=1))
out2 = F.relu(out2)
att = F.broadcast_mul(gamma1,self.att(input))
ratt122 = F.broadcast_mul(gamma2,self.ratt122(out1,out2,out2))
ratt211 = F.broadcast_mul(gamma3,self.ratt211(out2,out1,out1))
ones1 = F.ones_like(out10)
ones2 = F.ones_like(input)
# Enhanced output of 1, based on memory of 2
out122 = F.broadcast_mul(out1,ones1 + ratt122)
# Enhanced output of 2, based on memory of 1
out211 = F.broadcast_mul(out2,ones1 + ratt211)
out12 = F.relu(self.collect(F.concat(out122,out211,dim=1)))
# Emphasize residual output from memory on input
out_res = F.broadcast_mul(input + out12, ones2 + att)
return out_res
# ======= Definitions for CEEC unit v2 (replace concatenations with Fusion =========================
# -------------------------------------- helper functions -------------------------------------------
class Fusion(HybridBlock):
def __init__(self,nfilters, kernel_size=3, padding=1,nheads=1, norm = 'BatchNorm', norm_groups=None, ftdepth=5,**kwards):
super().__init__(**kwards)
with self.name_scope():
self.fuse = Conv2DNormed(nfilters,kernel_size= kernel_size, padding = padding, _norm_type= norm, norm_groups=norm_groups, groups=nheads,**kwards)
# Or shall I use the same?
self.relatt12 = RelFTAttention2D(nkeys=nfilters, kernel_size=kernel_size, padding=padding, nheads=nheads, norm =norm, norm_groups=norm_groups,ftdepth=ftdepth,**kwards)
self.relatt21 = RelFTAttention2D(nkeys=nfilters, kernel_size=kernel_size, padding=padding, nheads=nheads, norm =norm, norm_groups=norm_groups,ftdepth=ftdepth,**kwards)
self.gamma1 = self.params.get('gamma1', shape=(1,), init=mx.init.Zero())
self.gamma2 = self.params.get('gamma2', shape=(1,), init=mx.init.Zero())
def hybrid_forward(self, F, input_t1, input_t2, gamma1, gamma2):
# These inputs must have the same dimensionality , t1, t2
relatt12 = F.broadcast_mul(gamma1,self.relatt12(input_t1,input_t2,input_t2))
relatt21 = F.broadcast_mul(gamma2,self.relatt21(input_t2,input_t1,input_t1))
ones = F.ones_like(input_t1)
# Enhanced output of 1, based on memory of 2
out12 = F.broadcast_mul(input_t1,ones + relatt12)
# Enhanced output of 2, based on memory of 1
out21 = F.broadcast_mul(input_t2,ones + relatt21)
fuse = self.fuse(F.concat(out12, out21,dim=1))
fuse = F.relu(fuse)
return fuse
class CATFusion(HybridBlock):
"""
Alternative to concatenation followed by normed convolution: improves performance.
"""
def __init__(self,nfilters_out, nfilters_in, kernel_size=3, padding=1,nheads=1, norm = 'BatchNorm', norm_groups=None, ftdepth=5,**kwards):
super().__init__(**kwards)
with self.name_scope():
self.fuse = Conv2DNormed(nfilters_out,kernel_size= kernel_size, padding = padding, _norm_type= norm, norm_groups=norm_groups, groups=nheads,**kwards)
# Or shall I use the same?
self.relatt12 = RelFTAttention2D(nkeys=nfilters_in, kernel_size=kernel_size, padding=padding, nheads=nheads, norm =norm, norm_groups=norm_groups,ftdepth=ftdepth,**kwards)
self.relatt21 = RelFTAttention2D(nkeys=nfilters_in, kernel_size=kernel_size, padding=padding, nheads=nheads, norm =norm, norm_groups=norm_groups,ftdepth=ftdepth,**kwards)
self.gamma1 = self.params.get('gamma1', shape=(1,), init=mx.init.Zero())
self.gamma2 = self.params.get('gamma2', shape=(1,), init=mx.init.Zero())
def hybrid_forward(self, F, input_t1, input_t2, gamma1, gamma2):
# These inputs must have the same dimensionality , t1, t2
relatt12 = F.broadcast_mul(gamma1,self.relatt12(input_t1,input_t2,input_t2))
relatt21 = F.broadcast_mul(gamma2,self.relatt21(input_t2,input_t1,input_t1))
ones = F.ones_like(input_t1)
# Enhanced output of 1, based on memory of 2
out12 = F.broadcast_mul(input_t1,ones + relatt12)
# Enhanced output of 2, based on memory of 1
out21 = F.broadcast_mul(input_t2,ones + relatt21)
fuse = self.fuse(F.concat(out12, out21,dim=1))
fuse = F.relu(fuse)
return fuse
class combine_layers_wthFusion(HybridBlock):
def __init__(self,nfilters, nheads=1, _norm_type = 'BatchNorm', norm_groups=None,ftdepth=5, **kwards):
HybridBlock.__init__(self,**kwards)
with self.name_scope():
self.conv1 = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1, groups=nheads, _norm_type=_norm_type, norm_groups = norm_groups, **kwards)# restore help
self.conv3 = Fusion(nfilters=nfilters, kernel_size=3, padding=1, nheads=nheads, norm=_norm_type, norm_groups = norm_groups, ftdepth=ftdepth,**kwards) # process
def hybrid_forward(self,F,_layer_lo, _layer_hi):
up = F.contrib.BilinearResize2D(_layer_lo,scale_height=2.,scale_width=2.)
up = self.conv1(up)
up = F.relu(up)
x = self.conv3(up,_layer_hi)
return x
class ExpandNCombine_V3(HybridBlock):
def __init__(self,nfilters, _norm_type = 'BatchNorm', norm_groups=None,ngroups=1,ftdepth=5,**kwards):
super().__init__(**kwards)
with self.name_scope():
self.conv1 = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1,groups=ngroups,_norm_type=_norm_type, norm_groups = norm_groups,**kwards)# restore help
self.conv2 = Conv2DNormed(channels=nfilters,kernel_size=3,padding=1,groups=ngroups,_norm_type=_norm_type, norm_groups = norm_groups,**kwards)# restore help
self.conv3 = Fusion(nfilters=nfilters,kernel_size=3,padding=1,nheads=ngroups,norm=_norm_type, norm_groups = norm_groups,ftdepth=ftdepth,**kwards) # process
def hybrid_forward(self, F, input1, input2):
out = F.contrib.BilinearResize2D(input1,scale_height=2.,scale_width=2.)
out = self.conv1(out)
out1 = F.relu(out)
out2 = self.conv2(input2)
out2 = F.relu(out2)
outf = self.conv3(out1,out2)
outf = F.relu(outf)
return outf
# -------------------------------------------------------------------------------------------------------------------
class CEEC_unit_v2(HybridBlock):
def __init__(self, nfilters, nheads= 1, ngroups=1, norm_type='BatchNorm', norm_groups=None, ftdepth=5, **kwards):
super().__init__(**kwards)
with self.name_scope():
nfilters_init = nfilters//2
self.conv_init_1 = Conv2DNormed(channels=nfilters_init, kernel_size=3,padding=1,strides=1, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups, **kwards)#half size
self.compr11 = Conv2DNormed(channels=nfilters_init*2, kernel_size=3,padding=1,strides=2, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups, **kwards)#half size
self.compr12 = Conv2DNormed(channels=nfilters_init*2, kernel_size=3,padding=1,strides=1, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups,**kwards)# process
self.expand1 = ExpandNCombine_V3(nfilters_init,_norm_type = norm_type, norm_groups=norm_groups,ngroups=ngroups,ftdepth=ftdepth) # restore original size + process
self.conv_init_2 = Conv2DNormed(channels=nfilters_init, kernel_size=3,padding=1,strides=1, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups, **kwards)#half size
self.expand2 = ExpandLayer(nfilters_init//2 ,_norm_type = norm_type, norm_groups=norm_groups,ngroups=ngroups )
self.compr21 = Conv2DNormed(channels=nfilters_init, kernel_size=3,padding=1,strides=2, groups=ngroups, _norm_type=norm_type, norm_groups=norm_groups,**kwards)
self.compr22 = Fusion(nfilters=nfilters_init, kernel_size=3,padding=1, nheads=ngroups, norm=norm_type, norm_groups=norm_groups,ftdepth=ftdepth,**kwards)
self.collect = CATFusion(nfilters_out=nfilters, nfilters_in=nfilters_init, kernel_size=3,padding=1,nheads=1, norm=norm_type, norm_groups=norm_groups,ftdepth=ftdepth,**kwards)
self.att = FTAttention2D(nkeys=nfilters,nheads=nheads,norm=norm_type, norm_groups = norm_groups, ftdepth=ftdepth)
self.ratt122 = RelFTAttention2D(nkeys=nfilters_init, nheads=nheads,norm=norm_type, norm_groups = norm_groups, ftdepth=ftdepth)
self.ratt211 = RelFTAttention2D(nkeys=nfilters_init, nheads=nheads,norm=norm_type, norm_groups = norm_groups, ftdepth=ftdepth)
self.gamma1 = self.params.get('gamma1', shape=(1,), init=mx.init.Zero())
self.gamma2 = self.params.get('gamma2', shape=(1,), init=mx.init.Zero())
self.gamma3 = self.params.get('gamma3', shape=(1,), init=mx.init.Zero())
def hybrid_forward(self, F, input, gamma1, gamma2, gamma3):
# =========== UNet branch ===========
out10 = self.conv_init_1(input)
out1 = self.compr11(out10)
out1 = F.relu(out1)
#print (out1.shape)
out1 = self.compr12(out1)
out1 = F.relu(out1)
#print (out1.shape)
out1 = self.expand1(out1,out10)
out1 = F.relu(out1)
# =========== \capNet branch ===========
input = F.identity(input) # Solves a mxnet bug
out20 = self.conv_init_2(input)
out2 = self.expand2(out20)
out2 = F.relu(out2)
out2 = self.compr21(out2)
out2 = F.relu(out2)
out2 = self.compr22(out2,out20)
input = F.identity(input) # Solves a mxnet bug
att = F.broadcast_mul(gamma1,self.att(input))
ratt122 = F.broadcast_mul(gamma2,self.ratt122(out1,out2,out2))
ratt211 = F.broadcast_mul(gamma3,self.ratt211(out2,out1,out1))
ones1 = F.ones_like(out10)
ones2 = F.ones_like(input)
# Enhanced output of 1, based on memory of 2
out122 = F.broadcast_mul(out1,ones1 + ratt122)
# Enhanced output of 2, based on memory of 1
out211 = F.broadcast_mul(out2,ones1 + ratt211)
out12 = self.collect(out122,out211) # includes relu, it's for fusion
out_res = F.broadcast_mul(input + out12, ones2 + att)
return out_res
| 45.659091 | 186 | 0.66059 | 15,527 | 0.96609 | 0 | 0 | 0 | 0 | 0 | 0 | 1,990 | 0.123818 |
9d6069e2ba0263497aa9f814cd08018989e4473f | 1,928 | py | Python | reviewboard/reviews/search_indexes.py | znick/reviewboard | f32320b267efcdf2feff1661eabe57f99ef490a7 | [
"MIT"
] | 1 | 2018-08-23T09:19:02.000Z | 2018-08-23T09:19:02.000Z | reviewboard/reviews/search_indexes.py | klpyang/reviewboard | d7dabf36e5b492f18048dd7084026bf99d6933c5 | [
"MIT"
] | null | null | null | reviewboard/reviews/search_indexes.py | klpyang/reviewboard | d7dabf36e5b492f18048dd7084026bf99d6933c5 | [
"MIT"
] | 1 | 2021-11-23T15:25:44.000Z | 2021-11-23T15:25:44.000Z | from django.db.models import Q
from haystack import indexes
from reviewboard.reviews.models import ReviewRequest
class ReviewRequestIndex(indexes.SearchIndex, indexes.Indexable):
"""A Haystack search index for Review Requests."""
# By Haystack convention, the full-text template is automatically
# referenced at
# reviewboard/templates/search/indexes/reviews/reviewrequest_text.txt
text = indexes.CharField(document=True, use_template=True)
# We shouldn't use 'id' as a field name because it's by default reserved
# for Haystack. Hiding it will cause duplicates when updating the index.
review_request_id = indexes.IntegerField(model_attr='id')
summary = indexes.CharField(model_attr='summary')
description = indexes.CharField(model_attr='description')
testing_done = indexes.CharField(model_attr='testing_done')
bug = indexes.CharField(model_attr='bugs_closed')
username = indexes.CharField(model_attr='submitter__username')
author = indexes.CharField(model_attr='submitter__get_full_name')
file = indexes.CharField()
def get_model(self):
"""Returns the Django model for this index."""
return ReviewRequest
def get_updated_field(self):
return 'last_updated'
def index_queryset(self, using=None):
"""Index only public pending and submitted review requests."""
queryset = self.get_model().objects.public(
status=None,
extra_query=Q(status='P') | Q(status='S'))
queryset = queryset.select_related('submitter', 'diffset_history')
queryset = queryset.prefetch_related(
'diffset_history__diffsets__files')
return queryset
def prepare_file(self, obj):
return set([
(filediff.source_file, filediff.dest_file)
for diffset in obj.diffset_history.diffsets.all()
for filediff in diffset.files.all()
])
| 39.346939 | 76 | 0.705394 | 1,811 | 0.939315 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.32832 |
19b2caec75b18b0aa3e0597b5caa0b0c55ce8cad | 7,365 | py | Python | gpss/transaction.py | martendo/gpss.py | 52c6781bd8a65b651381ed11da9e31ddfae6e313 | [
"MIT"
] | 2 | 2021-11-28T08:48:02.000Z | 2022-03-09T16:19:06.000Z | gpss/transaction.py | martendo/gpss.py | 52c6781bd8a65b651381ed11da9e31ddfae6e313 | [
"MIT"
] | null | null | null | gpss/transaction.py | martendo/gpss.py | 52c6781bd8a65b651381ed11da9e31ddfae6e313 | [
"MIT"
] | null | null | null | from .statement import Statement, StatementType
from .event import Event
from ._helpers import debugmsg, simulation_error
class TransactionGenerator:
def __init__(self, simulation, block_num, operands):
self.simulation = simulation
self.block = self.simulation.program[block_num]
self.start_block = block_num + 1
self.operands = operands
self.generated = 0
def __str__(self):
return f"TransactionGenerator({','.join(map(str, self.operands))})"
def prime(self):
# Add initial Transaction generation event using the Offset
# Interval
self.add_next_event(self.operands[2])
def add_next_event(self, time=None):
# If reached generation Limit Count, stop
if (self.operands[3] is not None
and self.generated >= self.operands[3]):
return
# Add event to event list to generate next Transaction
if time is None:
time = self.simulation.time + self.operands[0]
if self.operands[1] != 0:
time += self.simulation.rngs[1].randint(
-self.operands[1], +self.operands[1],
)
if time < self.simulation.time:
simulation_error(self.simulation.parser.infile,
self.block.linenum,
"Cannot GENERATE a Transaction in a negative amount "
f"of time ({time - self.simulation.time})")
elif time == self.simulation.time and time is None:
# Generate immediately, no need to add to event list
self.generate()
else:
self.simulation.add_event(Event(time, self.generate))
def generate(self):
# Generate a new Transaction
debugmsg("generate:", self.simulation.time, self.operands)
transaction = Transaction(self.simulation, self.start_block,
self.operands[4])
self.simulation.transactions.add(transaction)
self.generated += 1
# Add next Transaction generation event
self.add_next_event()
transaction.update()
class Transaction:
def __init__(self, simulation, start_block, priority):
self.simulation = simulation
self.current_block = start_block
self.priority = priority
def __str__(self):
return f"Transaction({self.priority})"
def update(self):
while True:
# Execute next block
block = self.simulation.program[self.current_block]
self.current_block += 1
self.current_linenum = block.linenum
if block.type is StatementType.TERMINATE:
self.simulation.terminate(self, block.operands[0])
return
elif block.type is StatementType.QUEUE:
self.simulation.queues[block.operands[0]].join(self,
block.operands[1])
elif block.type is StatementType.DEPART:
self.simulation.queues[block.operands[0]].depart(self,
block.operands[1])
elif block.type is StatementType.ADVANCE:
interval, spread = block.operands[0:2]
# Add event for end of delay
time = self.simulation.time + interval
if spread != 0:
time += self.simulation.rngs[1].randint(
-spread, +spread,
)
if time < self.simulation.time:
simulation_error(self.simulation.parser.infile,
block.linenum,
"Cannot ADVANCE a negative amount of time "
f"({time - self.simulation.time})")
elif time == self.simulation.time:
# ADVANCE 0 -> no-op
continue
self.simulation.add_event(Event(time, self.update))
return
elif block.type is StatementType.SEIZE:
# Use Facility or enter Delay Chain if busy
if not self.simulation.facilities[block.operands[0]].seize(self):
# Facility is busy -> wait
return
elif block.type is StatementType.RELEASE:
self.simulation.facilities[block.operands[0]].release(self)
elif block.type is StatementType.ENTER:
# Enter Storage or enter Delay Chain if cannot satisfy
# demand
try:
if not(self.simulation.storages[block.operands[0]]
.enter(self, block.operands[1])):
# Not enough Storage available
return
except KeyError:
simulation_error(self.simulation.parser.infile,
block.linenum,
f"No Storage named \"{block.operands[0]}\"")
elif block.type is StatementType.LEAVE:
try:
self.simulation.storages[block.operands[0]].leave(
self, block.operands[1])
except KeyError:
simulation_error(self.simulation.parser.infile,
block.linenum,
f"No Storage named \"{block.operands[0]}\"")
elif block.type is StatementType.TRANSFER:
if block.operands[0] is None:
# Unconditional transfer mode
self.current_block = (
self.simulation.labels[block.operands[1]].number)
elif block.operands[0] == "BOTH":
# BOTH mode
if block.operands[1] != "":
b_block = (
self.simulation.labels[block.operands[1]])
else:
# Use sequential Block
b_block = (
self.simulation.program[self.current_block])
c_block = self.simulation.labels[block.operands[2]]
if not b_block.refuse(self.simulation):
self.current_block = b_block.number
elif not c_block.refuse(self.simulation):
self.current_block = c_block.number
else:
# Refused entry to both Blocks, stay on this one
self.current_block -= 1
self.simulation.current_events.append(self.update)
return
else:
# Statistical transfer mode
if self.simulation.rngs[1].random() < block.operands[0]:
new_block = block.operands[2]
else:
new_block = block.operands[1]
if new_block == "":
# Continue to sequential Block
continue
self.current_block = (
self.simulation.labels[new_block].number)
| 41.610169 | 81 | 0.509029 | 7,239 | 0.982892 | 0 | 0 | 0 | 0 | 0 | 0 | 1,049 | 0.14243 |
19b32c34ea299311dabdf3d678344f668cb1f1a4 | 234 | py | Python | stone/config.py | ichengplus/mpmatrix | 41cf1ac48abe9aef1b92f1174157608a60e30da0 | [
"Apache-2.0"
] | null | null | null | stone/config.py | ichengplus/mpmatrix | 41cf1ac48abe9aef1b92f1174157608a60e30da0 | [
"Apache-2.0"
] | 3 | 2021-03-10T16:23:59.000Z | 2022-02-13T12:02:00.000Z | stone/config.py | ichengplus/mpmatrix | 41cf1ac48abe9aef1b92f1174157608a60e30da0 | [
"Apache-2.0"
] | null | null | null | REDIS_URL = "redis://redis:6379/0"
DEBUG = True
TESTING = False
JOBS = [
{
'id': 'actoken_refresh',
'func': 'actoken:refresh',
'args': None,
'trigger': 'interval',
'seconds': 7000
}
]
| 15.6 | 34 | 0.504274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.42735 |
19b3b12f916bfa71763e1f5555965f2dffc3a223 | 727 | py | Python | utils/update.py | adavila0703/warehouse-hub | 29778d605e372d6d6b41d05a3637edefb047f0bc | [
"MIT"
] | null | null | null | utils/update.py | adavila0703/warehouse-hub | 29778d605e372d6d6b41d05a3637edefb047f0bc | [
"MIT"
] | null | null | null | utils/update.py | adavila0703/warehouse-hub | 29778d605e372d6d6b41d05a3637edefb047f0bc | [
"MIT"
] | null | null | null | from shutil import copy, copytree, rmtree
import pathlib
import os
import time
def update():
"""Update is a script to auto update all the files that the user is using"""
print('Warehouse Hub is updating, do not close this window...')
time.sleep(3)
print('Applying patch...')
time.sleep(1)
copy('C:/warehousehub/warehousehub.exe', pathlib.Path().absolute())
rmtree(f'{pathlib.Path().absolute()}/templates')
copytree('C:/warehousehub/templates', f'{pathlib.Path().absolute()}/templates')
print('Patch Completed!')
print('Warehouse Hub is restarting, please wait...')
os.system(f'cmd /c "{pathlib.Path().absolute()}/warehousehub.exe"')
if __name__ == '__main__':
update()
| 25.068966 | 83 | 0.672627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.579092 |
19b3f6aeb28dd07d2770e4ea600d2a99c0c06e65 | 3,134 | py | Python | train_video.py | jacke121/MBMD | 2daf5edb4fb40ee652baead4f9332ca00fa111a5 | [
"MIT"
] | 220 | 2018-09-17T15:42:54.000Z | 2021-09-13T13:14:22.000Z | train_video.py | jacke121/MBMD | 2daf5edb4fb40ee652baead4f9332ca00fa111a5 | [
"MIT"
] | 12 | 2018-09-19T09:30:42.000Z | 2019-07-01T04:03:51.000Z | train_video.py | jacke121/MBMD | 2daf5edb4fb40ee652baead4f9332ca00fa111a5 | [
"MIT"
] | 60 | 2018-09-18T00:29:50.000Z | 2021-02-22T03:55:19.000Z | import functools
import tensorflow as tf
from core import trainer_video, input_reader
from core.model_builder import build_man_model
from google.protobuf import text_format
from object_detection.builders import input_reader_builder
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
import os
'''
lijun's code
modify bb to conv1*2 conv3*2
l2 normalization to match
'''
os.environ["CUDA_VISIBLE_DEVICES"]="0"
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('train_dir', 'model/dump',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', 'model/ssd_mobilenet_video.config',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
flags.DEFINE_string('image_root', '/home/xiaobai/Documents/ILSVRC2014_DET_train/image/ILSVRC2014_DET_train',
'Root path to input images')
flags.DEFINE_string('video_root', '/home/xiaobai/Documents/ILSVRC2015/',
'Root path to input videos')
flags.DEFINE_string('image_tfrecord', './train_seq.record',
'Path to image tfrecord.')
flags.DEFINE_string('video_tfrecord', './train_vid.record',
'Path to video tfrecord')
FLAGS = flags.FLAGS
def get_configs_from_pipeline_file():
"""Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Reads training config from file specified by pipeline_config_path flag.
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model.ssd
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
return model_config, train_config, input_config
def main(_):
model_config, train_config, input_config = get_configs_from_pipeline_file()
model_fn = functools.partial(
build_man_model,
model_config=model_config,
is_training=True)
create_input_image_dict_fn = functools.partial(
input_reader.read_video_image, FLAGS.video_tfrecord, FLAGS.image_tfrecord)
trainer_video.train(model_fn, create_input_image_dict_fn, train_config, FLAGS.train_dir, FLAGS.image_root, FLAGS.video_root)
if __name__ == '__main__':
# update moving average
tf.app.run()
| 35.613636 | 128 | 0.744735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,235 | 0.394065 |
19b4fbf622ea3b5c2b94266b63984fdd1ea1e133 | 460 | py | Python | config.py | icewing1996/biaffine-parser | f5a4ece7ba9a087d81b76dd6a8ea6aa7d90c6c82 | [
"MIT"
] | 1 | 2019-04-02T14:42:20.000Z | 2019-04-02T14:42:20.000Z | config.py | icewing1996/biaffine-parser | f5a4ece7ba9a087d81b76dd6a8ea6aa7d90c6c82 | [
"MIT"
] | null | null | null | config.py | icewing1996/biaffine-parser | f5a4ece7ba9a087d81b76dd6a8ea6aa7d90c6c82 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
class Config(object):
# [Network]
n_embed = 100
n_tag_embed = 100
embed_dropout = 0.33
n_lstm_hidden = 400
n_lstm_layers = 3
lstm_dropout = 0.33
n_mlp_arc = 500
n_mlp_rel = 100
mlp_dropout = 0.33
# [Optimizer]
lr = 2e-3
beta_1 = 0.9
beta_2 = 0.9
epsilon = 1e-12
decay = .75
decay_steps = 5000
# [Run]
batch_size = 200
epochs = 1000
patience = 100
| 15.862069 | 24 | 0.558696 | 433 | 0.941304 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.117391 |
19b7ef31e8ac32e464e2b7f9641c6ad98cd6de46 | 3,301 | py | Python | conf_dblp.py | AmiraKetfi/ScientificProductScraper | c700fb579ac47266e76ec834ccbd8674abeaff50 | [
"MIT"
] | 4 | 2018-04-04T12:10:59.000Z | 2020-02-22T17:26:14.000Z | conf_dblp.py | AmiraKetfi/ScientificProductScraper | c700fb579ac47266e76ec834ccbd8674abeaff50 | [
"MIT"
] | null | null | null | conf_dblp.py | AmiraKetfi/ScientificProductScraper | c700fb579ac47266e76ec834ccbd8674abeaff50 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 23:01:40 2018
@author: pc
"""
import scholarly,re,urllib.request,nltk
import bs4 as bs
# =============================================================================
# #Probléme les derniere conf ne se rajoute pas
# =============================================================================
def find_ComputerScienceConferences_Workshops_names_DBLP(url_deb):
page=urllib.request.urlopen(url_deb).read()
c,soup=0,bs.BeautifulSoup(page,'lxml')
for p in soup.find_all('a'):
if c==1 and p.text!="[previous 100 entries]":
print(p.text)
# s1=p.get("href")
# if re.search(r"http://dblp.uni-trier.de/db/conf/.",s1):
# publication_conf_dblp(s1)
if p.text=="[next 100 entries]":
c,s=1,p.get("href")
url_a="http://dblp.uni-trier.de/db/conf/"+s
if (p.text=="[previous 100 entries]")and(c==1): find_ComputerScienceConferences_Workshops_names_DBLP(url_a)
def Timeline_of_conferences(url_deb):
page=urllib.request.urlopen(url_deb).read()
soup=bs.BeautifulSoup(page,'lxml')
last_s=""
for q in soup.find_all('a'):
s=q.get("href")
if re.search(r"http://dblp.uni-trier.de/db/conf/.*/.*\.html",s):
if last_s!=s:
fichier = open("Lien_de_toutes_les_conf.txt", "a")
fichier.write("\n"+s)
fichier.close()
last_s=s
def publication_conf_dblp(url):
fichier = open("conf.txt", "w")
fichier.close()
fichier = open("publisher.txt", "w")
fichier.close()
fichier = open("Date.txt", "w")
fichier.close()
fichier = open("isbn.txt", "w")
fichier.close()
page=urllib.request.urlopen(url).read()
soup=bs.BeautifulSoup(page,'lxml')
c=0
for p in soup.find_all('span'):
s1=p.get("class")
try:
if s1[0]=='title':
fichier = open("conf.txt", "a")
fichier.write("\n"+p.text)
fichier.close()
except TypeError:
print("\t")
s2=p.get("itemprop")
try:
if s2=="publisher":
fichier = open("publisher.txt", "a")
fichier.write("\n"+p.text)
fichier.close()
if s2=="datePublished":
fichier = open("Date.txt", "a")
fichier.write("\n"+p.text)
fichier.close()
if s2=="isbn":
fichier = open("isbn.txt", "a")
fichier.write("\n"+p.text)
fichier.close()
if s2=="pagination":
fichier = open("pages.txt", "a")
fichier.write("\n"+p.text)
fichier.close()
except TypeError:
print("\t")
# pass
url_deb='https://dblp.uni-trier.de/db/conf/'
url_deb2='http://dblp.uni-trier.de/db/conf/3dim/3dimpvt2012.html'
url_deb3='http://dblp.uni-trier.de/db/conf/3dpvt/'
#Timeline_of_conferences(url_deb2)
publication_conf_dblp(url_deb3)
#find_ComputerScienceConferences_Workshops_names_DBLP(url_deb) | 38.383721 | 124 | 0.499546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,151 | 0.348577 |
19b8ce0aa97bf71df30c5a8e086263306534c4c7 | 4,540 | py | Python | src/robot.py | FROG3160/FRC2018-ARWING | 6635274d79839ea92d8591af2c8e51f8e1112ec1 | [
"MIT"
] | 1 | 2019-01-15T00:47:16.000Z | 2019-01-15T00:47:16.000Z | src/robot.py | FROG3160/FRC2018-ARWING | 6635274d79839ea92d8591af2c8e51f8e1112ec1 | [
"MIT"
] | 18 | 2018-02-15T01:07:03.000Z | 2018-04-10T00:25:59.000Z | src/robot.py | FROG3160/FRC2018-ARWING | 6635274d79839ea92d8591af2c8e51f8e1112ec1 | [
"MIT"
] | 4 | 2018-01-31T01:53:44.000Z | 2018-02-16T00:30:14.000Z | #!/usr/bin/env python3
"""
Main code for Robot
"""
import wpilib
import robotmap
from wpilib import Joystick
from subsystems.drivetrain import DriveTrain as Drive
from subsystems.grabber import cubeGrabber
from subsystems.elevator import Elevator
from subsystems.climber import Climber
from subsystems.autonomous import Autonomous
from wpilib.sendablechooser import SendableChooser
# from robotpy_ext.common_drivers.navx import AHRS
class Robot(wpilib.IterativeRobot):
def robotInit(self):
"""
This function is called upon program startup and
should be used for any initialization code.
"""
self.pneumaticControlModuleCANID = robotmap.PCM
self.kDriveTrain = robotmap.DriveTrain
self.kCubeGrabber = robotmap.CubeGrabber
self.kElevator = robotmap.Elevator
self.kSticks = robotmap.Sticks
self.kClimber = robotmap.Climber
self.dStick = Joystick(self.kSticks['drive'])
self.cStick = Joystick(self.kSticks['control'])
self.drive = Drive(self)
self.cubeGrabber = cubeGrabber(self)
self.elevator = Elevator(self)
self.climber = Climber(self)
self.sendableChooser()
def robotPeriodic(self):
pass
def disabledInit(self):
pass
def disabledPeriodic(self):
self.drive.stop()
def autonomousInit(self):
"""This function is run once each time the robot enters autonomous mode."""
self.autonomous = Autonomous(self)
self.autonomous.reset()
self.drive.autoInit()
def autonomousPeriodic(self):
"""This function is called periodically during autonomous."""
#self.autonomous.testMove(self.autonomous.WALL_TO_SCALE, -1, False)
#self.autonomous.testAngle(-90, -1)
#self.elevator.setElevatorPosition(self.elevator.kScale)
#self.autonomous.start()
self.autonomous.run()
#self.elevator.setElevatorPosition(-20000)
#self.autonomous.telemetry()
def teleopInit(self):
self.drive.teleInit()
def teleopPeriodic(self):
"""This function is called periodically during operator control."""
speed = (self.dStick.getY() * -1)**3
rotation = self.dStick.getTwist()/(1.1+self.dStick.getRawAxis(3))
# self.drive.moveSpeed(speed, speed)
self.drive.arcadeWithRPM(speed, rotation, 2800)
self.cubeGrabber.grabberFunction()
#
self.elevator.elevatorFunction()
#self.elevator.telemetry()
self.climber.climberFunction()
def testInit(self):
pass
def testPeriodic(self):
wpilib.LiveWindow.setEnabled(True)
pass
def sendableChooser(self):
self.startingChooser = SendableChooser()
self.startingChooser.addDefault('Move Forward Only', '!')
self.startingChooser.addObject('Starting Left', 'L')
self.startingChooser.addObject('Starting Middle', 'M')
self.startingChooser.addObject('Starting Right', 'R')
wpilib.SmartDashboard.putData('Starting Side', self.startingChooser)
self.startingDelayChooser = SendableChooser()
self.startingDelayChooser.addDefault('0', 0)
self.startingDelayChooser.addObject('1', 1)
self.startingDelayChooser.addObject('2', 2)
self.startingDelayChooser.addObject('3', 3)
self.startingDelayChooser.addObject('4', 4)
self.startingDelayChooser.addObject('5', 5)
self.startingDelayChooser.addObject('6', 6)
self.startingDelayChooser.addObject('7', 7)
self.startingDelayChooser.addObject('8', 8)
self.startingDelayChooser.addObject('9', 9)
self.startingDelayChooser.addObject('10', 10)
self.startingDelayChooser.addObject('11', 11)
self.startingDelayChooser.addObject('12', 12)
self.startingDelayChooser.addObject('13', 13)
self.startingDelayChooser.addObject('14', 14)
self.startingDelayChooser.addObject('15', 15)
wpilib.SmartDashboard.putData('Delay Time(sec)', self.startingDelayChooser)
self.switchOrScale = SendableChooser()
self.switchOrScale.addDefault('Switch', 'Switch')
self.switchOrScale.addObject('Scale', 'Scale')
wpilib.SmartDashboard.putData('Switch or Scale', self.switchOrScale)
if __name__ == "__main__":
wpilib.run(Robot)
| 32.661871 | 83 | 0.652643 | 4,053 | 0.892731 | 0 | 0 | 0 | 0 | 0 | 0 | 989 | 0.217841 |
19b94d7c9d394f09ecf7228b67004f998dd55522 | 1,764 | py | Python | api/attomized_avm.py | johncoleman83/attom_python_client | 2fad572162f481a71cccf6003da4cbd8ec4477d4 | [
"MIT"
] | null | null | null | api/attomized_avm.py | johncoleman83/attom_python_client | 2fad572162f481a71cccf6003da4cbd8ec4477d4 | [
"MIT"
] | null | null | null | api/attomized_avm.py | johncoleman83/attom_python_client | 2fad572162f481a71cccf6003da4cbd8ec4477d4 | [
"MIT"
] | 1 | 2020-11-20T19:28:36.000Z | 2020-11-20T19:28:36.000Z | #!/usr/bin/env python3
"""
ATTOM API
https://api.developer.attomdata.com
"""
import requests
from urllib.parse import quote, urlencode
from api import api
PATH = "attomavm/detail"
def get_avm_by_address(number_street, city_state):
"""
API request to get attomavm/detail
"""
params = urlencode(
{
"address1": number_street,
"address2": city_state,
}
)
url = "{}/{}?{}".format(api.ATTOM_URL, PATH, params)
r = requests.get(url, headers=api.headers)
return r.json()
def get_building_from(p, all_beds, all_baths, all_building_sizes):
b = {
'size': p.get('building', {}).get('size', {}).get('livingsize', None),
'baths': p.get('building', {}).get('rooms', {}).get('bathstotal', None),
'beds': p.get('building', {}).get('rooms', {}).get('beds', None),
'bsmt': p.get('building', {}).get('interior', {}).get('bsmtsize', None),
}
if b.get('beds'):
all_beds.append(b.get('beds'))
if b.get('baths'):
all_baths.append(b.get('baths'))
if b.get('size'):
all_building_sizes.append(b.get('size'))
return b
def get_sale_from(p, all_sale_values):
sale = {
'saleamt': p.get('sale', {}).get('amount', {}).get('saleamt', None),
'saledate': p.get('sale', {}).get('amount', {}).get('salerecdate', None),
}
if sale.get('saleamt') == 0:
sale['saleamt'] = None
if sale.get('saleamt'):
all_sale_values.append(sale.get('saleamt'))
return sale
def get_address_from(p):
return p.get('address', {}).get('line1', "NULL")
def get_lot_from(p):
return p.get('lot', {}).get('lotsize2', "NULL")
def get_market_value_from(p):
return p.get('assessment', {}).get('market', {}).get('mktttlvalue', None)
def get_avm_from(p):
return p.get('avm', {}).get('amount', {}).get('value', None)
| 27.138462 | 77 | 0.620181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 542 | 0.307256 |
19b9c7cf12ec5b8b173b1bc2764d7bfc2577385f | 7,064 | py | Python | idmap/models.py | tkhyn/django-idmap | 383124fc4bd537d053f9d4c0d02a498f66831baa | [
"BSD-2-Clause"
] | 1 | 2021-04-24T16:35:15.000Z | 2021-04-24T16:35:15.000Z | idmap/models.py | tkhyn/django-idmap | 383124fc4bd537d053f9d4c0d02a498f66831baa | [
"BSD-2-Clause"
] | null | null | null | idmap/models.py | tkhyn/django-idmap | 383124fc4bd537d053f9d4c0d02a498f66831baa | [
"BSD-2-Clause"
] | 1 | 2021-02-27T14:45:48.000Z | 2021-02-27T14:45:48.000Z | import django
from django.db import models
from django.db.models.base import ModelBase
from django.utils import six
from .manager import IdMapManager
from . import tls # thread local storage
META_VALUES = {
'use_strong_refs': False,
'multi_db': False
}
class IdMapModelBase(ModelBase):
def __new__(mcs, name, bases, attrs):
meta = attrs.get('Meta', type('Meta', (object,), {}))
meta_values = {}
for attr, default in six.iteritems(META_VALUES):
try:
meta_values[attr] = getattr(meta, attr)
delattr(meta, attr)
except AttributeError:
pass
if django.VERSION < (1, 10):
# these attributes are only supported from 1.10 onwards
# if they are still defined when calling super.__new__ this raises
# an exception
for attr in ['base_manager_name', 'default_manager_name']:
try:
delattr(meta, attr)
except AttributeError:
pass
cls = super(IdMapModelBase, mcs).__new__(mcs, name, bases, attrs)
for attr in six.iterkeys(META_VALUES):
try:
# value defined in the class' own Meta
setattr(cls._meta, attr, meta_values[attr])
except KeyError:
# value not defined, look into bases' Meta
for base in cls.mro()[1:]:
try:
setattr(cls._meta, attr, getattr(base._meta, attr))
break
except AttributeError:
pass
else:
setattr(cls._meta, attr, META_VALUES[attr])
return cls
class IdMapModel(six.with_metaclass(IdMapModelBase, models.Model)):
"""
Abstract class to derive any idmap-enabled model from
Meta can set ``use_strong_refs`` to True if one should use strong references
(= kept in cache until explicitly flushed) for stored instances, and
``multi_db`` to True if the model is used in several databases
"""
objects = IdMapManager()
class Meta:
# does not inherit from base_class.Meta but that's not an issue
abstract = True
base_manager_name = 'objects'
default_manager_name = 'objects'
# OVERRIDES
@classmethod
def from_db(cls, db, field_names, values):
"""
This method will either create an instance (by calling the default
implementation) or try to retrieve one from the class-wide cache by
infering the pk value from args and kwargs. The cache is then populated
whenever possible (ie when it is possible to infer the pk value).
"""
try:
is_deferred = cls is models.DEFERRED
except AttributeError:
# django < 1.10
is_deferred = cls._deferred
if is_deferred:
args = ()
kwargs = dict(zip(field_names, values))
else:
args = values
kwargs = {}
instance_key = cls._get_cache_key(args, kwargs)
def create_instance():
inst = cls(*args, **kwargs)
inst._state.adding = False
inst._state.db = db
cls.cache_instance(inst)
return inst
# depending on the arguments, we might not be able to infer the PK
# in that case, we create a new instance
if instance_key is None:
return create_instance()
else:
instance = cls.get_cached_instance(instance_key, db)
if instance is None:
return create_instance()
else:
return instance
def refresh_from_db(self, using=None, fields=None):
self.flush_cached_instance(self)
super(IdMapModel, self).refresh_from_db(using, fields)
self.cache_instance(self)
# DJANGO-IDMAP METHODS
@classmethod
def _get_cache_key(cls, args, kwargs):
"""
This method is used by the caching subsystem to infer the PK value
from the constructor arguments. It is used to decide if an instance
has to be built or is already in the cache.
"""
result = None
# Quick hack for my composites work for now.
if hasattr(cls._meta, 'pks'):
pk = cls._meta.pks[0]
else:
pk = cls._meta.pk
pk_position = getattr(cls._meta, 'pk_pos', None)
if pk_position is None:
# the pk position could not be extracted from _meta
# calculate it ...
pk_position = cls._meta.fields.index(pk)
# ... and store it
setattr(cls._meta, 'pk_pos', pk_position)
if len(args) > pk_position:
# if it's in the args, we can get it easily by index
result = args[pk_position]
elif pk.attname in kwargs:
# retrieve the pk value. Note that we use attname instead of name,
# to handle the case where the pk is a ForeignKey.
result = kwargs[pk.attname]
elif pk.name != pk.attname and pk.name in kwargs:
# ok we couldn't find the value, but maybe it's a FK and we can
# find the corresponding object instead
result = kwargs[pk.name]
if result is not None and isinstance(result, models.Model):
# if the pk value happens to be a model instance (which can
# happen with a FK), we'd rather use its own pk as the key
result = result._get_pk_val()
return result
@classmethod
def get_cached_instance(cls, pk, db=None):
"""
Method to retrieve a cached instance by pk value and db. Returns None
when not found (which will always be the case when caching is disabled
for this class). Please note that the lookup will be done even when
instance caching is disabled.
"""
return tls.get_cached_instance(cls, pk, db)
@classmethod
def cache_instance(cls, instance):
"""
Method to store an instance in the cache.
"""
pk = instance._get_pk_val()
if pk is not None:
tls.cache_instance(cls, instance)
@classmethod
def flush_cached_instance(cls, instance):
"""
Method to flush an instance from the cache. The instance will always
be flushed from the cache, since this is most likely called from
delete(), and we want to make sure we don't cache dead objects.
"""
tls.flush_cached_instance(cls, instance)
@classmethod
def flush_instance_cache(cls, db=None, flush_sub=False):
tls.get_cache(cls, flush=db)
if flush_sub:
for s in cls.__subclasses__():
s.flush_instance_cache(db, flush_sub)
def save(self, *args, **kwargs):
"""
Caches the instance on save
"""
super(IdMapModel, self).save(*args, **kwargs)
self.__class__.cache_instance(self)
| 33.799043 | 80 | 0.58876 | 6,792 | 0.961495 | 0 | 0 | 4,246 | 0.601076 | 0 | 0 | 2,559 | 0.362259 |
19bbd9ee5d1a69e647b6029452a9fd29e645da59 | 1,345 | py | Python | test_search_in_rotated_sorted_array.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | null | null | null | test_search_in_rotated_sorted_array.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | 2 | 2019-11-13T19:55:49.000Z | 2019-11-13T19:55:57.000Z | test_search_in_rotated_sorted_array.py | jaebradley/leetcode.py | 64634cc7d0e975ddd163f35acb18cc92960b8eb5 | [
"MIT"
] | null | null | null | from unittest import TestCase
from search_in_rotated_sorted_array import Solution
class TestSearchInRotatedSortedArray(TestCase):
def test_0_when_first_element_is_target(self):
self.assertEqual(Solution().search([1, 2, 3, 4, 5, 6, 7], 1), 0)
def test_end_index_last_element_is_target(self):
self.assertEqual(Solution().search([1, 2, 3, 4, 5, 6, 7], 7), 6)
def test_middle_index_is_target(self):
self.assertEqual(Solution().search([1, 2, 3, 4, 5, 6, 7], 4), 3)
def test_0_when_first_element_is_target_when_rotated(self):
self.assertEqual(Solution().search([5, 6, 7, 1, 2, 3, 4], 5), 0)
def test_two_element_descending_second_element(self):
self.assertEqual(Solution().search([2, 1], 1), 1)
def test_two_element_descending_first_element(self):
self.assertEqual(Solution().search([2, 1], 2), 0)
def test_two_element_ascending_first_element(self):
self.assertEqual(Solution().search([1, 2], 1), 0)
def test_two_element_ascending_second_element(self):
self.assertEqual(Solution().search([1, 2], 2), 1)
def test_unable_to_find_element(self):
self.assertEqual(Solution().search([1, 2, 3, 4, 5, 6], 7), -1)
def test_target_element_is_right_of_pivot(self):
self.assertEqual(Solution().search([4, 5, 6, 7, 0, 1, 2], 0), 4)
| 37.361111 | 72 | 0.684758 | 1,259 | 0.936059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
19bd0b651a92c3989a6dcd3e14655ea86b1f4a83 | 2,501 | py | Python | pyrfu/pyrf/ts_skymap.py | ablotekar/irfu-python | 740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e | [
"MIT"
] | 2 | 2020-11-27T11:35:42.000Z | 2021-07-17T11:08:10.000Z | pyrfu/pyrf/ts_skymap.py | ablotekar/irfu-python | 740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e | [
"MIT"
] | 1 | 2021-12-04T07:55:48.000Z | 2021-12-10T12:45:27.000Z | pyrfu/pyrf/ts_skymap.py | ablotekar/irfu-python | 740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e | [
"MIT"
] | 2 | 2021-07-17T11:08:12.000Z | 2021-07-18T18:41:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
import xarray as xr
__author__ = "Louis Richard"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def ts_skymap(time, data, energy, phi, theta, **kwargs):
r"""Creates a skymap of the distribution function.
Parameters
----------
time : ndarray
List of times.
data : ndarray
Values of the distribution function.
energy : ndarray
Energy levels.
phi : ndarray
Azimuthal angles.
theta : ndarray
Elevation angles.
Other Parameters
---------------
**kwargs
Hash table of keyword arguments with :
* energy0 : ndarray
Energy table 0 (odd time indices).
* energy1 : ndarray
Energy table 1 (even time indices).
* esteptable : ndarray
Time series of the stepping table between energies (burst).
Returns
-------
out : xarray.Dataset
Skymap of the distribution function.
"""
energy0, energy1, esteptable = [None] * 3
energy0_ok, energy1_ok, esteptable_ok = [False] * 3
if energy is None:
if "energy0" in kwargs:
energy0, energy0_ok = [kwargs["energy0"], True]
if "energy1" in kwargs:
energy1, energy1_ok = [kwargs["energy1"], True]
if "esteptable" in kwargs:
esteptable, esteptable_ok = [kwargs["esteptable"], True]
if not energy0_ok and not energy1_ok and not esteptable_ok:
raise ValueError("Energy input required")
energy = np.tile(energy0, (len(esteptable), 1))
energy[esteptable == 1] = np.tile(energy1,
(int(np.sum(esteptable)), 1))
if phi.ndim == 1:
phi = np.tile(phi, (len(time), 1))
out_dict = {"data": (["time", "idx0", "idx1", "idx2"], data),
"phi": (["time", "idx1"], phi), "theta": (["idx2"], theta),
"energy": (["time", "idx0"], energy), "time": time,
"idx0": np.arange(energy.shape[1]),
"idx1": np.arange(phi.shape[1]), "idx2": np.arange(len(theta))}
out = xr.Dataset(out_dict)
if energy0_ok:
out.attrs["energy0"] = energy0
if energy1_ok:
out.attrs["energy1"] = energy1
if energy0_ok:
out.attrs["esteptable"] = esteptable
return out
| 26.892473 | 79 | 0.562575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,161 | 0.464214 |
19be0f2de874f8b441c89b5d8fd8cac69393789a | 2,037 | py | Python | src/log_utils.py | alexklwong/calibrated-backprojection-network | 57dbec03c6da94ee0cd020b6de5f02e7e8ee726e | [
"Intel"
] | 38 | 2021-08-28T06:01:25.000Z | 2022-03-03T03:23:23.000Z | src/log_utils.py | alexklwong/calibrated-backprojection-network | 57dbec03c6da94ee0cd020b6de5f02e7e8ee726e | [
"Intel"
] | 14 | 2021-11-15T12:30:34.000Z | 2022-03-30T14:03:16.000Z | src/log_utils.py | alexklwong/calibrated-backprojection-network | 57dbec03c6da94ee0cd020b6de5f02e7e8ee726e | [
"Intel"
] | 9 | 2021-10-19T23:45:07.000Z | 2021-12-20T07:45:37.000Z | '''
Author: Alex Wong <[email protected]>
If you use this code, please cite the following paper:
A. Wong, and S. Soatto. Unsupervised Depth Completion with Calibrated Backprojection Layers.
https://arxiv.org/pdf/2108.10531.pdf
@inproceedings{wong2021unsupervised,
title={Unsupervised Depth Completion with Calibrated Backprojection Layers},
author={Wong, Alex and Soatto, Stefano},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages={12747--12756},
year={2021}
}
'''
import os
import torch
import numpy as np
from matplotlib import pyplot as plt
def log(s, filepath=None, to_console=True):
'''
Logs a string to either file or console
Arg(s):
s : str
string to log
filepath
output filepath for logging
to_console : bool
log to console
'''
if to_console:
print(s)
if filepath is not None:
if not os.path.isdir(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
with open(filepath, 'w+') as o:
o.write(s + '\n')
else:
with open(filepath, 'a+') as o:
o.write(s + '\n')
def colorize(T, colormap='magma'):
'''
Colorizes a 1-channel tensor with matplotlib colormaps
Arg(s):
T : torch.Tensor[float32]
1-channel tensor
colormap : str
matplotlib colormap
'''
cm = plt.cm.get_cmap(colormap)
shape = T.shape
# Convert to numpy array and transpose
if shape[0] > 1:
T = np.squeeze(np.transpose(T.cpu().numpy(), (0, 2, 3, 1)))
else:
T = np.squeeze(np.transpose(T.cpu().numpy(), (0, 2, 3, 1)), axis=-1)
# Colorize using colormap and transpose back
color = np.concatenate([
np.expand_dims(cm(T[n, ...])[..., 0:3], 0) for n in range(T.shape[0])],
axis=0)
color = np.transpose(color, (0, 3, 1, 2))
# Convert back to tensor
return torch.from_numpy(color.astype(np.float32))
| 26.802632 | 92 | 0.60972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,069 | 0.524791 |
19c214d222aa500c556609e883b1ff02ba286869 | 788 | py | Python | add-two-numbers/add-two-numbers.py | shaurya-src/code-leet | f642b81eb7bead46c66404bd48ca74bdfeb2abbb | [
"MIT"
] | null | null | null | add-two-numbers/add-two-numbers.py | shaurya-src/code-leet | f642b81eb7bead46c66404bd48ca74bdfeb2abbb | [
"MIT"
] | null | null | null | add-two-numbers/add-two-numbers.py | shaurya-src/code-leet | f642b81eb7bead46c66404bd48ca74bdfeb2abbb | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
a = self.get_num(l1)
b = self.get_num(l2)
total = str(a+b)[::-1]
res = ListNode(total[0])
itr = res
for i in range(1, len(total)):
curr = ListNode(total[i])
itr.next = curr
itr = itr.next
return res
def get_num(self, ll):
if not ll:
return 0
num = ""
curr = ll
while curr:
num += str(curr.val)
curr = curr.next
return int(num[::-1]) | 28.142857 | 98 | 0.497462 | 637 | 0.808376 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.187817 |
19c251bd8c7eb79b25c470c6951dca0f932a8918 | 2,834 | py | Python | likedtweets.py | PoliTwit1984/Politwitverse | 837dd2d05b3977aa24a70f52a3b951ef22c51dc6 | [
"MIT"
] | 3 | 2022-01-05T07:12:14.000Z | 2022-02-19T20:58:25.000Z | likedtweets.py | PoliTwit1984/Politwitverse | 837dd2d05b3977aa24a70f52a3b951ef22c51dc6 | [
"MIT"
] | 25 | 2022-01-05T08:23:59.000Z | 2022-02-07T01:25:39.000Z | likedtweets.py | PoliTwit1984/Politwitverse | 837dd2d05b3977aa24a70f52a3b951ef22c51dc6 | [
"MIT"
] | 1 | 2022-02-01T22:39:57.000Z | 2022-02-01T22:39:57.000Z | import time
import re
import tweepy
import preprocessor as p
import config
import string
consumer_key = config.consumer_key
consumer_secret = config.consumer_secret
access_token = config.access_token
access_token_secret = config.access_token_secret
bearer_token = config.bearer_token
username = config.username
password = config.password
def clean_text(text):
"""
Function to clean the text.
Parameters:
text: the raw text as a string value that needs to be cleaned
Returns:
cleaned_text: the cleaned text as string
"""
# convert to lower case
cleaned_text = text.lower()
# remove HTML tags
html_pattern = re.compile('<.*?>')
cleaned_text = re.sub(html_pattern, '', cleaned_text)
# remove punctuations
cleaned_text = cleaned_text.translate(
str.maketrans('', '', string.punctuation))
return cleaned_text.strip()
def remove_whitespace(text):
return " ".join(text.split())
def clean_tweets(tweet_text):
# URL p.OPT.URL
# Mention p.OPT.MENTION
# Hashtag p.OPT.HASHTAG
# Reserved Words p.OPT.RESERVED
# Emoji p.OPT.EMOJI
# Smiley p.OPT.SMILEY
# Number p.OPT.NUMBER
p.set_options(p.OPT.URL, p.OPT.MENTION, p.OPT.EMOJI, p.OPT.SMILEY)
clean_tweet_text = p.clean(tweet_text)
clean_tweet_text = remove_whitespace(clean_tweet_text)
clean_tweet_text = clean_tweet_text.replace('&', "")
return(clean_tweet_text)
def makeitastring(wannabestring):
convertedstring = ','.join(map(str, wannabestring))
return(convertedstring)
client = tweepy.Client(bearer_token=bearer_token)
list_id = "1467207384011526144" # all missouri legislators
response = client.get_list_members(list_id, max_results = 100)
users = response.data
metadata = response.meta
next_token = metadata.get("next_token")
print(next_token)
while next_token is not None:
for user in users:
string = str(user.name)+","+str(user.id)+","+str(user.username)+"\n"
with open('moleglistmembership.txt', 'a') as f:
f.write(string)
response = client.get_list_members(list_id, pagination_token = next_token, max_results = 100)
users = response.data
metadata = response.meta
next_token = metadata.get("next_token")
print(next_token)
# tweet_text = tweet.text
# tweet_clean_text = clean_tweets(tweet.text)
# tweet_created_at = tweet.created_at
# tweet_clean_text = clean_text(tweet_clean_text)
# print(tweet_clean_text)
# print('\n')
# print(tweet_created_at)
# print('\n')
# print('-----------------------------------------------------------------')
# with open('molegmembership.txt', 'a') as f:
# f.write(tweet_clean_text)
# f.write('\n')
# response = client.get_list_tweets(list_id, max_results=100)
| 26.485981 | 97 | 0.677135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,056 | 0.372618 |
19c32bbd1169664ffd8d06d663183110a2d5e53c | 391 | py | Python | src/app/migrations/0004_history_missed.py | deadlock-delegate/arkdelegates | 8a5262f51b519ba3bc10094756c8866fc550df65 | [
"MIT"
] | 2 | 2018-05-22T13:47:09.000Z | 2018-05-23T12:45:05.000Z | src/app/migrations/0004_history_missed.py | deadlock-delegate/arkdelegates | 8a5262f51b519ba3bc10094756c8866fc550df65 | [
"MIT"
] | 21 | 2018-05-08T12:56:46.000Z | 2020-06-05T18:59:38.000Z | src/app/migrations/0004_history_missed.py | deadlock-delegate/arkdelegates | 8a5262f51b519ba3bc10094756c8866fc550df65 | [
"MIT"
] | 4 | 2018-05-04T15:00:59.000Z | 2019-02-13T02:39:07.000Z | # Generated by Django 2.0.3 on 2018-03-14 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_delegate_public_key'),
]
operations = [
migrations.AddField(
model_name='history',
name='missed',
field=models.FloatField(blank=True, null=True),
),
]
| 20.578947 | 59 | 0.595908 | 298 | 0.762148 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.242967 |
19c43d42b7108f348940b9fd8fc9fb33a8830e2c | 2,112 | py | Python | audclass.py | theunafraid/audiofeedback-prevention | 0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5 | [
"Apache-2.0"
] | 1 | 2022-01-20T08:30:20.000Z | 2022-01-20T08:30:20.000Z | audclass.py | theunafraid/audiofeedback-prevention | 0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5 | [
"Apache-2.0"
] | null | null | null | audclass.py | theunafraid/audiofeedback-prevention | 0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
from tensorflow.python.ops.gen_batch_ops import batch
from model import AudioClass
from qrnn import QRNN
from numpy.random import seed
from numpy.random import randn
from random import randint
from lstmfcn import LSTM_FCN
import librosa
import os
def getData():
outX = []
outY = []
for i in range(10):
values = randn(16000)
outX.append(np.array(values))
pos = randint(0, 2)
outY1=np.zeros(3)
outY1[pos] = 1.0
outY.append(outY1)
outX = np.array(outX)
return outX, np.array(outY)
def readFileData(dir, filename):
class_id = (filename.split('-')[1]).split('.')[0]
# print("found class : ", class_id, flush=True)
filepath = dir + '/'+filename
data, sample_rate = librosa.load(filepath,sr=16000)
# a = np.vstack(data)
# print(a.shape)
return np.vstack(data), int(class_id)
def getDataFromFolder(folder):
outX = []
outY = []
files = os.listdir(folder)
print("files : ", files)
for file in files:
if os.path.isfile(folder + "/" +file):
data, classid = readFileData(folder, file)
# print("data ", data)
# print("classid ", classid)
outX.append(np.asarray(data).astype(np.float32))#np.array(data))
# pos = randint(0, 2)
outY1=np.zeros(3)
outY1[classid] = 1.0
outY.append(outY1)
#print(outX, flush=True)
outX = np.asarray(outX).astype(np.float32) #np.array(outX, dtype="object")
return outX, np.array(outY)
def main():
try:
model = QRNN(16000, 5120) #16000)#AudioClass(3)
model.printmodel()
# return
X, Y = getDataFromFolder("./audio/ds_0.3s/300ms_additional/")
#print(Y.shape)
#print(X.shape)
#print(Y)
#print(X)
# return
epochs = 350
batch = 8
model.train(X, Y, epochs, batch)
print("save model...", flush=True)
model.save("./qrnn.h5")
except Exception as ex:
print(ex)
if __name__ == "__main__":
main()
| 26.734177 | 78 | 0.588542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.191288 |
19c4cd4bbbc8cea75d64211787db92e8b023d09a | 3,135 | py | Python | pytanga/components/OpenConfig/routing/ospfv2/ospfv2Interface.py | renatoalmeidaoliveira/Pytanga | aa02f1c0f2573da1330d1d246ab780fa3be336a5 | [
"MIT"
] | null | null | null | pytanga/components/OpenConfig/routing/ospfv2/ospfv2Interface.py | renatoalmeidaoliveira/Pytanga | aa02f1c0f2573da1330d1d246ab780fa3be336a5 | [
"MIT"
] | null | null | null | pytanga/components/OpenConfig/routing/ospfv2/ospfv2Interface.py | renatoalmeidaoliveira/Pytanga | aa02f1c0f2573da1330d1d246ab780fa3be336a5 | [
"MIT"
] | null | null | null | from pytanga.components import AbstractComponent
class ospfv2InterfaceComponent(AbstractComponent):
def __init__(
self,
if_id,
network_type=None,
priority=None,
multi_area_adjacency_primary=None,
authentication_type=None,
metric=None,
passive=None,
hide_network=None):
self._xmlns = {}
self.attributes = self.setAttributes(if_id,
network_type,
priority,
multi_area_adjacency_primary,
authentication_type,
metric,
passive,
hide_network)
self.parent_xmlns = {}
self._children: List[AbstractComponent] = []
self.childrenData = []
self.tag = 'interface'
@property
def xmlns(self):
return self._xmlns
@xmlns.setter
def xmlns(self, xmlns):
self._xmlns = xmlns
def setAttributes(self,
if_id,
network_type,
priority,
multi_area_adjacency_primary,
authentication_type,
metric,
passive,
hide_network):
atributes = {
'id': if_id
}
atributes['config'] = {}
if(network_type):
attributes['config']['network-type'] = {
'keys': {
'xmlns:oc-ospf-types': 'http://openconfig.net/yang/ospf-types'
},
'value': f"oc-ospf-types:{network_type}"
}
if(priority):
atributes['config']['priority'] = priority
if(multi_area_adjacency_primary):
atributes['config']['multi-area-adjacency-primary'] = multi_area_adjacency_primary
if(authentication_type):
atributes['config']['authentication-type'] = authentication_type
if(metric):
atributes['config']['metric'] = metric
if(passive):
atributes['config']['passive'] = passive
if(hide_network):
atributes['config']['hide-network'] = hide_network
if(atributes['config'] == {}):
del atributes['config']
return atributes
def add(self, component) -> None:
self._children.append(component)
def remove(self, component) -> None:
self._children.remove(component)
def is_composite(self) -> bool:
return False
def getXMLNS(self):
childrenData = []
for child in self._children:
self.parent_xmlns.update(child.getXMLNS())
return self.parent_xmlns
def parse(self, serializer):
self.childrenData = []
self.getXMLNS()
for child in self._children:
self.childrenData.append(child.parse(serializer))
return serializer.parse(self)
| 32.319588 | 94 | 0.500797 | 3,082 | 0.983094 | 0 | 0 | 126 | 0.040191 | 0 | 0 | 305 | 0.097289 |
19c79aebe6cccec71cf534b0497f44d1a8496883 | 4,127 | py | Python | python_implementation/matriz/quadrada.py | SousaPedro11/algoritmos | 86a3601912778d120b9ec8094267c26a7eb6d153 | [
"MIT"
] | null | null | null | python_implementation/matriz/quadrada.py | SousaPedro11/algoritmos | 86a3601912778d120b9ec8094267c26a7eb6d153 | [
"MIT"
] | null | null | null | python_implementation/matriz/quadrada.py | SousaPedro11/algoritmos | 86a3601912778d120b9ec8094267c26a7eb6d153 | [
"MIT"
] | null | null | null | import math
from typing import List, Tuple
def __cria_matriz_quadrada(tamanho: int = 20) -> List[List[str]]:
matriz = []
for _ in range(tamanho):
linha = ['0' for _ in range(tamanho)]
matriz.append(linha)
return matriz
def __diagonais(matriz: List[List[str]]) -> Tuple[list, list]:
tamanho = len(matriz)
diagonal_principal = []
diagonal_secundaria = []
top, bottom, right, left = 'B', 'A', 'Y', 'X'
if tamanho >= 20:
ponto_medio = math.ceil(tamanho / 2)
diagonal_principal = [j for j in range(tamanho)]
diagonal_secundaria = [j for j in range(tamanho)[::-1]]
for i, j in enumerate(diagonal_secundaria):
matriz[i][j] = right if (i < ponto_medio) else left
for i, j in enumerate(diagonal_principal):
matriz[i][j] = top if (j < ponto_medio) else bottom
return diagonal_principal, diagonal_secundaria
def __quadrantes(matriz: List[List[str]], diagonal_p: list, diagonal_s: list) -> None:
tamanho = len(matriz)
if tamanho >= 20:
for i in range(tamanho):
elemento_dp = diagonal_p[i]
elemento_ds = diagonal_s[i]
for j in range(tamanho):
if elemento_dp < j < elemento_ds:
matriz[i][j] = 'B'
elif elemento_ds < j < elemento_dp:
matriz[i][j] = 'A'
elif j < elemento_dp and j < elemento_ds:
matriz[i][j] = 'X'
elif j > elemento_dp and j > elemento_ds:
matriz[i][j] = 'Y'
def __imprime_matriz(matriz: List[List[str]]) -> None:
try:
print(f'Matriz de tamanho: {len(matriz)}')
for linha in matriz:
print(' '.join(linha))
print('\n')
except ValueError as e:
print(e)
def __define_tamanho(msg: str) -> int:
while True:
try:
tamanho = int(input(f'{msg}: '))
break
except ValueError:
print('O valor informado não é um inteiro!')
return tamanho
def __define_matriz_maior() -> List[List[str]]:
print('MATRIZ MAIOR')
tamanho = __define_tamanho(
msg='Defina a ordem de uma matriz quadrada (inteiro maior ou igual a 20)',
)
while tamanho < 20:
print('Valor informado menor que 20!')
tamanho = __define_tamanho(
msg='Defina a ordem de uma matriz quadrada (inteiro maior ou igual a 20)',
)
matriz = __cria_matriz_quadrada(tamanho)
diagonal_principal, diagonal_secundaria = __diagonais(matriz)
__quadrantes(matriz, diagonal_principal, diagonal_secundaria)
__imprime_matriz(matriz)
return matriz
def __define_matriz_menor(len_matriz_maior: int) -> List[List[str]]:
print('MATRIZ MENOR')
tamanho = __define_tamanho(
msg=f'Defina a ordem de uma matriz quadrada (inteiro menor que {len_matriz_maior})',
)
while tamanho >= len_matriz_maior:
print(f'Valor informado maior que {len_matriz_maior}!')
tamanho = __define_tamanho(
msg=f'Defina a ordem de uma matriz quadrada (inteiro menor que {len_matriz_maior})',
)
matriz = __cria_matriz_quadrada(tamanho)
__imprime_matriz(matriz)
return matriz
def __gera_matriz_concentrica(matriz_maior: List[List[str]], matriz_menor: List[List[str]]) -> None:
if len(matriz_menor) > len(matriz_maior):
raise ValueError('Matriz menor declarada no local errado!')
print('MATRIZ CONCENTRICA')
maior = matriz_maior.copy()
menor = matriz_menor.copy()
ponto_medio_maior = math.ceil(len(maior) / 2)
ponto_medio_menor = math.ceil(len(menor) / 2)
diferenca = ponto_medio_maior - ponto_medio_menor
for i, linha in enumerate(menor):
for j, coluna in enumerate(linha):
maior[i + diferenca][j + diferenca] = coluna
__imprime_matriz(maior)
def solucao_problema():
matriz_maior = __define_matriz_maior()
matriz_menor = __define_matriz_menor(len(matriz_maior))
__gera_matriz_concentrica(matriz_maior, matriz_menor)
if __name__ == '__main__':
solucao_problema()
| 33.282258 | 100 | 0.628544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.143618 |
19c7ee6d99159a70af01f16f7e183bb9ec3972a5 | 7,017 | py | Python | app_mongo.py | emmpets/MongoProject | cbef19b590503825158909703125b34c1bf536ec | [
"Apache-2.0"
] | null | null | null | app_mongo.py | emmpets/MongoProject | cbef19b590503825158909703125b34c1bf536ec | [
"Apache-2.0"
] | null | null | null | app_mongo.py | emmpets/MongoProject | cbef19b590503825158909703125b34c1bf536ec | [
"Apache-2.0"
] | null | null | null | from pymongo import MongoClient
import pymongo
from datetime import datetime,time
import time
from bson.code import Code
mongo_client=MongoClient('mongodb://localhost:27017/')
db=mongo_client.mydb
db_col=db.things
dbc = mongo_client.mydb.things
print mongo_client
print(db)
print("connected")
def first_querry():
all_count = db.things.find().count()
return all_count
def second_querry():
allusers = first_querry()
pipeline = [
{"$group": {"_id": "$id_member", "count": {"$sum": 1}}},
{"$sort": {"count": -1}},
{"$limit": 10}
]
result = list(db.things.aggregate(pipeline))
sum1 = 0
for plithos in result:
sum1 = sum1 + plithos['count']
percentage = 100.0 * sum1 / allusers
return percentage
def third_querry():
result3a = db.things.find({}, {"timestamp": 1}).sort("timestamp", pymongo.DESCENDING).limit(1)
for row in result3a:
# print("The last message published on:"),
str(row["timestamp"])
tmax = row["timestamp"]
result3b = db.things.find({"timestamp": {'$ne': None}}, {"timestamp": 1}).sort("timestamp",
pymongo.ASCENDING).limit(1)
for rb in result3b:
# print("The earliest message published on:"),
str(rb["timestamp"])
tmin = rb["timestamp"]
return (tmax,tmin)
def fourth_querry():
tmax, tmin = third_querry()
dmax = datetime.strptime(tmax, "%Y-%m-%d %H:%M:%S")
secondmax = time.mktime(dmax.timetuple())
dmin = datetime.strptime(tmin, "%Y-%m-%d %H:%M:%S")
secondmin = time.mktime(dmin.timetuple())
all_plithos_msg = db.things.find().count()
deltatimemean = ((secondmax - secondmin) / (all_plithos_msg - 1))
return deltatimemean
data = dbc.find()
def fifth_querry(data):
sum_of_texts = 0
for row in data:
if 'text' in row:
sum_of_texts += len(str(row["text"]).encode('utf-8'))
average_tweet_size = sum_of_texts / db.things.count()
return average_tweet_size
def sixth_querry():
mapperUni = Code("""
function() {
var thisText = this.text;
var splitStr = thisText.toString().split(" ");
for(i=0 ; i< splitStr.length ;i++){
var clean1 = splitStr[i].replace(/[.,-\/#!$%\^&\*;:{}=\-_`~()]/g,"");
var clean2 = clean1.replace(/\s{2,}/g," ");
var cleanStr = clean2.trim();
if (cleanStr.length>0)
emit(cleanStr,1);
}
}
""")
reducerUni = Code("""
function(key, value) {
return Array.sum(value);
}
""")
unigram_counter = dbc.map_reduce(mapperUni, reducerUni, 'uniCounter')
unigram_list = list(db.uniCounter.find().sort('value', -1).limit(10))
for uni in unigram_list:
print ('Unigram' + uni['_id'] + 'has' + str(uni['value']) + 'appearances')
def seventh_querry():
mapperBi = Code("""
function() {
var tempText = this.text;
var splitText = tempText.toString().split(" ");
for(i=0 ; i<splitText.length-1 ;i++){
punctText = splitText[i].trim();
punctText2 = splitText[i+1].trim();
var punctRem = punctText.replace(/[.,-\/#!$%\^&\*;:{}=\-_`~()]/g,"");
var punctRem2 = punctText2.replace(/[.,-\/#!$%\^&\*;:{}=\-_`~()]/g,"");
var firstStr = punctRem.replace(/\s{2,}/g," ");
var secStr = punctRem2.replace(/\s{2,}/g," ");
finalStr = (firstStr + ' ' + secStr).trim();
if (finalStr !== '')
emit(finalStr,1);
}
}
""")
reducerBi = Code("""
function(key, value) {
return Array.sum(value);
}
""")
bigram_counter = dbc.map_reduce(mapperBi, reducerBi, 'bigramCounter')
bigram_list = list(db.bigramCounter.find().sort('value', -1).limit(10))
for bigrams in bigram_list:
print ('Bigram' + bigrams['_id'] + 'has' + str(bigrams['value']) + 'appearances')
def eight_querry(data):
sum_of_hashes_per_text = 0
for row in data:
if 'text' in row:
sum_of_hashes_per_text += str(row['text']).count('#')
average_hashes_size = sum_of_hashes_per_text / db.things.count()
return average_hashes_size
def ninth_querry():
mapperMap = Code("""
function() {
var ukCenterLat = '54.749991';
var ukCenterLng = '-3.867188';
var currentLng = this.geo_lng;
var currentLat = this.geo_lat;
var loc = "";
if (currentLng < ukCenterLng && currentLat >= ukCenterLat) {
loc = "North-West";
}else if(currentLng < ukCenterLng && currentLat < ukCenterLat){
loc = "South-West";
}else if (currentLng >= ukCenterLng && currentLat >= ukCenterLat) {
loc = "North-East";
}else if (currentLng >= ukCenterLng && currentLat < ukCenterLat){
loc = "South-East";
}
emit(loc, 1);
}
""")
reducerMap = Code("""
function(key, value) {
return Array.sum(value);
}
""")
LocationCounter = dbc.map_reduce(mapperMap, reducerMap, 'geoLocDistr')
topLocation = db.geoLocDistr.find().sort('value', -1).limit(1)
print('Most of the messages were published in' + topLocation[0]['_id'] + ' with ' + str(
topLocation[0]['value']) + ' tweets')
ans=True
while ans:
print("""
1.How many unique users are there?
2.How many tweets (%) did the top 10 users (measured by the number of messages) publish?
3.What was the earliest and latest date (YYYY-MM-DD HH:MM:SS) that a message was published?
4.What is the mean time delta between all messages?
5.What is the mean length of a message?
6.What are the 10 most common unigram within the messages?
7.What are the 10 most common bigram within the messages?
8.What is the average number of hashtags (#) used within a message?
10.Exit/Quit
""")
ans = raw_input("What would you like to do? ")
if ans == "1":
print "The summary of all unique users is: ", first_querry()
elif ans == "2":
print("The percentage of the ALL messages of top ten user"), second_querry(), "%",
elif ans == "3":
print"The last message published on:", third_querry()[0]
print"The earliest message published on:", third_querry()[1]
elif ans == "4":
print"The mean time delta between all messages is :", fourth_querry()
elif ans == "5":
print"The mean length of the messages is :", fifth_querry(data)
elif ans == "6":
print"The 10 most common unigrams within the messages are:", sixth_querry()
elif ans == "7":
print"The 10 most common bigrams within the messages are:", seventh_querry()
elif ans == "8":
print"The average number of hashtags (#) used within a message is:", eight_querry(data)
elif ans == "9":
ninth_querry()
elif ans == "10":
print("\n Goodbye")
ans = None
else:
print("\n Not Valid Choice Try again")
| 30.11588 | 110 | 0.584865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,612 | 0.51475 |
19c9e0f683fb12bcf45633873b78ecba612bb09f | 7,399 | py | Python | theseus/util/serialize.py | shiplift/theseus | 9324d67e6e0c6b93a7734a5531838c5a909a1424 | [
"0BSD"
] | null | null | null | theseus/util/serialize.py | shiplift/theseus | 9324d67e6e0c6b93a7734a5531838c5a909a1424 | [
"0BSD"
] | null | null | null | theseus/util/serialize.py | shiplift/theseus | 9324d67e6e0c6b93a7734a5531838c5a909a1424 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
serialize
provide means to persist and recreate the currently known
set of W_Tags and all shapes and transformations reachable
from there.
The rmarshal modules is used for serialization; the format is
marshal_proto = (
int, # number of shapes
[ # shape list
( # a shape
int, # id
(str, int), # tag
[int], # structure: list of id's
{ # _hist
(int, int) : # index, id
int # count
},
{ # transformation_rules
(int, int) : # index, id
int # id
}
)
],
{
(str, int) : # name arity
int #id
}
)
The serialized tree is written to a '.docked' files
"""
import os.path
from rpython.rlib.streamio import open_file_as_stream
from rpython.rlib.rmarshal import get_marshaller, get_unmarshaller
from rpython.rlib.debug import debug_start, debug_stop, debug_print
from theseus.model import W_Tag
from theseus.shape import in_storage_shape, CompoundShape
marshal_proto = (
int, # number of shapes
[ # shape list
( # a shape
int, # id
(str, int), # tag
[int], # structure: list of id's
{ # _hist
(int, int) : # index, id
int # count
},
{ # transformation_rules
(int, int) : # index, id
int # id
}
)
],
{
(str, int) : # name arity
int #id
}
)
marshaller = get_marshaller(marshal_proto)
unmarshaller = get_unmarshaller(marshal_proto)
def punch_shape(s, registry):
"""
Punch a shape to a tuple for marshalling.
See slurp_shapes, configure_shapes for inverse.
Format is
( # a shape
int, # id
(str, int), # tag
[int], # structure: list of id's
{ # _hist
(int, int) : # index, id
int # count
},
{ # transformation_rules
(int, int) : # index, id
int # id
}
)
"""
if s == in_storage_shape:
return (0, ('', 0), [], {}, {})
else:
assert isinstance(s, CompoundShape)
my_index = registry.index(s)
hist = {}
for (index, shape), count in s._hist.items():
shape_id = registry.index(shape)
hist[(index, shape_id)] = count
trans = {}
for (index, shape), to_shape in s.transformation_rules.items():
shape_id = registry.index(shape)
to_shape_id = registry.index(to_shape)
trans[(index, registry.index(shape))] = registry.index(to_shape)
punchee = (
registry.index(s),
(s._tag.name, s._tag.arity()),
[registry.index(subshape) for subshape in s._structure],
hist,
trans
)
return punchee
def recreate_shape(shape_desc, tags, registry):
"""
Recreate a shape from its punched format; see punch_shape.
Does not handle history and transformations.
See configure_shape(s).
"""
id, tag, structure_ids = shape_desc
structure = [None] * len(structure_ids)
for structure_index, sub_id in enumerate(structure_ids):
assert sub_id < id
subshape = registry[sub_id]
assert subshape is not None
structure[structure_index] = subshape
return CompoundShape(tags[tag], structure)
def configure_shape(shape, hist, trans, registry):
"""
Reconfigure a shape from its punched format; see punch_shape.
Does _only_ handle history and transformations.
See configure_shapes.
"""
assert isinstance(shape, CompoundShape)
shape._hist = {}
for (index, s_id), count in hist.items():
k = (index, registry[s_id])
shape._hist[k] = count
shape.transformation_rules = {}
for (index, s_id), to_s_id in trans.items():
k = (index, registry[s_id])
shape.transformation_rules[k] = registry[to_s_id]
def configure_shapes(shapes, registry):
"""
Reconfigure all shapes.
Does _only_ handle history and transformations.
See configure_shapes.
"""
for id, _tag, _structure_ids, hist, trans in shapes:
if id == 0: continue # in_storage_shape, no configure
configure_shape(registry[id], hist, trans, registry)
def slurp_registry(shapes, registry, tags_slurp, tags):
"""
Slurp all shapes from their punched format (see punch_shape)
not including history or transformation
"""
known_ids = [0]
for default_id in tags_slurp.values():
known_ids.append(default_id)
for id, tag, structure_ids, _hist, _trans in shapes:
if id in known_ids: continue
assert registry[id] is None
registry[id] = recreate_shape((id, tag, structure_ids), tags, registry)
def punch_tags(tags):
"""
Punch all tags into marshallable format:
(
int, # number of shapes
[ # shape list
],
{
(str, int) : # name arity
int #id
}
)
"""
reg = [in_storage_shape] + CompoundShape._shapes
punch_reg = [punch_shape(s, reg) for s in reg]
res = {}
for key, value in tags.items():
res[key] = reg.index(value.default_shape)
return (len(punch_reg), punch_reg, res)
def slurp_tags(un_tags):
"""
Slurp all tags from their punched format (see punch_tag).
Recursively slurps shapes and then configures them.
"""
num_shapes, shapes_slurp, tags_slurp = un_tags
registry = [None] * num_shapes
registry[0] = in_storage_shape
tags = {}
for (name, arity), default_id in tags_slurp.items():
tag = W_Tag(name, arity)
tags[(name, arity)] = tag
registry[default_id] = tag.default_shape
slurp_registry(shapes_slurp, registry, tags_slurp, tags)
configure_shapes(shapes_slurp, registry)
return tags
def come_up(basename):
"""
Bring up previously marshalled Tags, shapes and transformations
from '.docked' file un-marshalling, slurping and replacement of
current Tags.
"""
from theseus.shape import CompoundShape
# later
# from os import stat
# statres = stat(path)
debug_start("theseus-come-up")
path = basename + '.docked'
if not os.path.exists(path):
return
try:
f = open_file_as_stream(path, buffering=0)
except OSError as e:
os.write(2, "Error(come_up)%s -- %s\n" % (os.strerror(e.errno), path))
return
try:
res = unmarshaller(f.readall())
finally:
f.close()
del CompoundShape._shapes[:]
W_Tag.tags.clear()
new_tags = slurp_tags(res)
for key, value in new_tags.items():
W_Tag.tags[key] = value
debug_stop("theseus-come-up")
def settle(basename):
"""
Settle Tags, shapes and transformations to a '.docked' file
punching and marshalling all current Tags.
"""
debug_start("theseus-settle")
path = basename + '.docked'
buf = []
marshaller(buf, punch_tags(W_Tag.tags))
try:
f = open_file_as_stream(path, mode="w", buffering=0)
except OSError as e:
os.write(2, "Error(settle)%s -- %s\n" % (os.strerror(e.errno), path))
return
try:
f.write(''.join(buf))
finally:
f.close()
debug_stop("theseus-settle")
| 27.403704 | 79 | 0.592783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,814 | 0.380322 |
19cc7f391c49230cd25af4f7949e261ca27ffe2b | 1,359 | py | Python | external_scripts/run2.py | AAS97/tokenizRE | 0186a2b533edaa0045b16b0b111b9637248e5046 | [
"MIT"
] | null | null | null | external_scripts/run2.py | AAS97/tokenizRE | 0186a2b533edaa0045b16b0b111b9637248e5046 | [
"MIT"
] | null | null | null | external_scripts/run2.py | AAS97/tokenizRE | 0186a2b533edaa0045b16b0b111b9637248e5046 | [
"MIT"
] | null | null | null | from web3 import Web3, HTTPProvider
import json
import os
w3 = Web3(HTTPProvider("http://127.0.0.1:7545",
request_kwargs={'timeout': 60}))
print(f"Web3 is connected : {w3.isConnected()}")
accounts = w3.eth.accounts
# ------------------------------- get contract ------------------------------- #
abi_path = "./vapp/src/contracts/"
with open(os.path.join(abi_path, 'TokenHolderPayer.json'), "r") as file:
property_contract_compiled = json.load(file)
property_contract_abi = property_contract_compiled['abi']
contract_address = "0xE5972821D1218120C4E98986A3eEc997931690b4"
property_contract = w3.eth.contract(address=contract_address, abi=property_contract_abi)
# ------------------- buy some token from realestate agent ------------------- #
amount = 500
# Allow token to be sent
property_contract.functions.increaseAllowance(accounts[1], amount).transact({'from':accounts[0], 'gas': 420000, 'gasPrice': 21000})
balance = property_contract.functions.balanceOf(accounts[1]).call()
print(f"initial balance {balance}")
tx_hash = property_contract.functions.transferFrom(accounts[0], accounts[1], 500).transact({'from':accounts[1], 'gas': 420000, 'gasPrice': 21000})
receipt = w3.eth.waitForTransactionReceipt(tx_hash)
balance = property_contract.functions.balanceOf(accounts[1]).call()
print(f"final balance {balance}")
| 37.75 | 146 | 0.693893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.331862 |
19cc949ad53b4cdcbc1b975b94608f7737a43f64 | 825 | py | Python | main.py | cltl-students/hamersma-agression-causes | 11cbfd94031a0a3c84a27afa20d8a539acdab609 | [
"MIT"
] | null | null | null | main.py | cltl-students/hamersma-agression-causes | 11cbfd94031a0a3c84a27afa20d8a539acdab609 | [
"MIT"
] | null | null | null | main.py | cltl-students/hamersma-agression-causes | 11cbfd94031a0a3c84a27afa20d8a539acdab609 | [
"MIT"
] | null | null | null | from preprocessing import preprocess
from approach1_rulebased import get_predictions_rulebased
from approach2_machine_learning import get_predictions_ml
from bertopic_clustering import cluster_precursors
def main():
'''Main function to use from commandline, preprocess input to generate embeddings, detect agression clauses using
provided approach, extract features and labels from training and features from input data, trains a model and
classifies test data using the trained model, evaluates predictions and goldlabels from input'''
inputfile = 'sample_input.xls'
preprocess(inputfile)
get_predictions_rulebased()
get_predictions_ml()
### only clusters with enough data, else everything in outlier cluster
cluster_precursors()
if __name__ == '__main__':
main() | 43.421053 | 118 | 0.778182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.52 |
19cd9765a8c1a72e36854304f427fda7349e31d0 | 8,985 | pyw | Python | 12th project cs/Gui pages.pyw | Jatin-Ya/ChatApp-12th-project- | 77ced9b18205728334f4370fbce8d74687bc5373 | [
"Apache-2.0"
] | null | null | null | 12th project cs/Gui pages.pyw | Jatin-Ya/ChatApp-12th-project- | 77ced9b18205728334f4370fbce8d74687bc5373 | [
"Apache-2.0"
] | null | null | null | 12th project cs/Gui pages.pyw | Jatin-Ya/ChatApp-12th-project- | 77ced9b18205728334f4370fbce8d74687bc5373 | [
"Apache-2.0"
] | null | null | null | from tkinter import *
import threading
import sql_manager as ss
def temp(er,s):
er.destroy()
s.sperson()
def temp2(er,s):
er.destroy()
s.w.destroy()
def close_w(self):
self.w.destroy()
class GUI:
def __init__(self):
self.w=Tk()
p1 = PhotoImage(file = 'logo.png')
self.w.iconphoto(True, p1)
self.w.withdraw()
self.welcome=Toplevel()
self.welcome.title("WELCOME")
self.welcome.configure(width=400,height=400)
photo= PhotoImage(file = "logo.png")
l=Label(self.welcome,image=photo)
l.place(relheight=0.8,relwidth=1)
lb=Label(self.welcome,text = "Welcome to CHATROOM \n -by JATIN YADAV",font="ALGERIAN 20 bold")
lb.place(relheight=0.2,relwidth=1,rely=0.8)
thread1=threading.Thread(target=self.y)
thread1.start()
self.login=Toplevel()
self.login.title("Login")
self.login.resizable(width=False,height=False)
self.login.configure(width=400,height=400)
self.t=Label(self.login,text = "Login to continue",font="Arial 20 bold")
self.t.place(relheight=0.15,relx=0.2,rely=0.001)
self.uidlabel=Label(self.login,text="UID : ",font="Helvetica 12")
self.uidlabel.place(relheight=0.2,relx=0.1,rely=0.1)
self.uidentry=Entry(self.login,font="Helvetica 14")
self.uidentry.place(relheight=0.1,relwidth=0.5,relx=0.4,rely=0.15)
self.passwordlabel=Label(self.login,text="PASSWORD : ",font="Helvetica 12")
self.passwordlabel.place(relheight=0.2,relx=0.1,rely=0.26)
self.passwordentry=Entry(self.login,font="Helvetica 14")
self.passwordentry.place(relheight=0.1,relwidth=0.5,relx=0.4,rely=0.3)
self.next = Button(self.login,text = "LOGIN",font = "Helvetica 14 bold",bg="red", \
command = lambda: self.Next(self.uidentry.get(),self.passwordentry.get()))
self.next.place(relheight=0.2,relwidth=0.6,relx=0.25,rely=0.6)
self.login.protocol("WM_DELETE_WINDOW",lambda : close_w(self))
self.login.withdraw()
self.w.mainloop()
def y(self):
def fo():
self.welcome.destroy()
self.login.deiconify()
WAIT_TIME_SECONDS = 3
ticker = threading.Event()
if not ticker.wait(WAIT_TIME_SECONDS):
fo()
def Next(self,uid,password):
self.login.destroy()
self.uid=uid
self.password=password
c=ss.entrycheck(uid,password)
if c:
self.sperson()
else:
err2=Toplevel()
err2.title("ERROR")
err2.resizable(width=False,height=False)
err2.configure(width=800,height=200)
e=Label(err2,text = "ERROR \n YOUR DATA DOESN'T EXIST",font="Arial 30 bold")
e.place(relwidth=1,relheight=0.8)
ext2 = Button(err2,text ="EXIT",font = "Helvetica 14 bold",bg="red", \
command=lambda : temp2(err2,self))
ext2.place(relheight=0.2,relwidth=1,rely=0.8)
err2.protocol("WM_DELETE_WINDOW",lambda : close_w(self))
def sperson(self):
self.sp=Toplevel()
self.sp.title("RECEIVER DETAILS")
self.sp.resizable(width=False,height=False)
self.sp.configure(width=400,height=400)
self.st=Label(self.sp,text = "Enter receivers details",font="Arial 20 bold")
self.st.place(relheight=0.15,relx=0.2,rely=0.001)
self.uidlabel2=Label(self.sp,text="UID : ",font="Helvetica 12")
self.uidlabel2.place(relheight=0.2,relx=0.1,rely=0.1)
self.uidentry2=Entry(self.sp,font="Helvetica 14")
self.uidentry2.place(relheight=0.1,relwidth=0.5,relx=0.4,rely=0.15)
self.next2 = Button(self.sp,text = "NEXT",font = "Helvetica 14 bold",bg="red", \
command = lambda: self.Next2(self.uidentry2.get()))
self.next2.place(relheight=0.2,relwidth=0.6,relx=0.25,rely=0.6)
self.sp.protocol("WM_DELETE_WINDOW",lambda : close_w(self))
def Next2(self,uid):
self.sp.destroy()
self.uid2=uid
c=ss.check2(uid)
if c:
self.layout()
else:
err2=Toplevel()
err2.title("ERROR")
err2.resizable(width=False,height=False)
err2.configure(width=800,height=200)
e=Label(err2,text = "ERROR \n THIS USER DOESN'T EXIST",font="Arial 30 bold")
e.place(relwidth=1,relheight=0.8)
ext2 = Button(err2,text ="BACK",font = "Helvetica 14 bold",bg="red", \
command=lambda : temp(err2,self))
ext2.place(relheight=0.2,relwidth=1,rely=0.8)
err2.protocol("WM_DELETE_WINDOW",lambda : close_w(self))
def layout(self):
self.sname=ss.fetch_name(self.uid)
self.rname=ss.fetch_name(self.uid2)
self.w.deiconify()
self.w.title("CHATROOM | JATIN YADAV")
self.w.configure(width=800,height=750,bg="gray27")
self.Headframe=Frame(self.w, width=800,height=37.5,bg="gray27")
self.Headframe.place(relwidth=1,relx=0,rely=0)
sender_label=Label(self.Headframe,text="SENDER : "+self.sname,width=150, \
font="arial 16 bold",height=37,bg="SteelBlue3")
sender_label.place(relx=0.01,rely=0.01,relheight=0.9,relwidth=0.3)
reciver_label=Label(self.Headframe,text="RECIVER : "+self.rname,width=150, \
font="arial 16 bold",height=37,bg="SteelBlue3")
reciver_label.place(relx=0.65,rely=0.01,relheight=1,relwidth=0.3)
self.historybutton=Button(self.w,text="CHAT HISTORY",font="arial 16 bold", \
bg="lime green",command=lambda : self.history())
self.historybutton.place(relheight=0.04,relwidth=0.4,relx=0.25,rely=0.06)
self.textconsole=Text(self.w,width=30,height=5,bg="black",fg="white", \
font="Courier 14")
self.textconsole.place(relwidth=0.95,relheight=0.7,rely=0.1)
scrollbar=Scrollbar(self.textconsole)
scrollbar.place(relheight=1,relx=0.97)
scrollbar.config(command=self.textconsole.yview)
self.bottomframe=Frame(self.w,width=800,height=140,bg="gray27")
self.bottomframe.place(relwidth=1,rely=0.81)
self.msgtxt=Text(self.bottomframe,width=30,height=5,bg="aquamarine",font="arial 14")
self.msgtxt.place(relwidth=0.7,relheight=0.98,rely=0.01,relx=0.01)
self.sendbutton=Button(self.bottomframe,text="SEND",font="arial 16 bold", \
bg="lime green",command=lambda : self.sendmsg(self.msgtxt.get("0.1","end-1c")))
self.sendbutton.place(relheight=0.9,relwidth=0.2,relx=0.75,rely=0.01)
self.textconsole.config(cursor = "arrow")
self.textconsole.config(state='disable')
thread=threading.Thread(target=self.x)
thread.start()
def sndbutton(self,msg):
snd=threading.Thread(target=self.sendmsg,args=(msg,))
snd.start()
def x(self):
def foo():
self.recmsg()
WAIT_TIME_SECONDS = 1
ticker = threading.Event()
while not ticker.wait(WAIT_TIME_SECONDS):
foo()
def sendmsg(self,msg):
self.textconsole.config(state='normal')
self.textconsole.insert(END,self.sname+"->"+msg+"\n")
self.textconsole.config(state='disable')
self.msgtxt.delete('1.0',END)
ss.postmsg(msg)
def recmsg(self):
msglist=ss.getmsg()
if len(msglist)!=0:
self.textconsole.config(state='normal')
for i in msglist:
self.textconsole.insert(END,self.rname+"->"+i+"\n")
self.textconsole.config(state='disable')
else :
pass
def history(self):
self.chathist=Toplevel()
self.chathist.title("CHAT HISTORY")
self.chathist.configure(width=400,height=400)
messages_list=ss.getmessages()
textconsole=Text(self.chathist,width=30,height=5,bg="black",fg="white")
textconsole.place(relwidth=0.95,relheight=0.7,rely=0.1)
for i in messages_list:
textconsole.insert(END,ss.fetch_name(i[0])+"->"+i[1]+"\n")
textconsole.config(state='disable')
textconsole.config(cursor = "arrow")
scrollbar=Scrollbar(textconsole)
scrollbar.place(relheight=1,relx=0.97)
scrollbar.config(command=textconsole.yview)
g=GUI()
| 35.654762 | 111 | 0.575292 | 8,720 | 0.970506 | 0 | 0 | 0 | 0 | 0 | 0 | 987 | 0.10985 |
19cea24c1060f2d6ff7113c23c57266d177697db | 1,528 | py | Python | 19/19a.py | jamOne-/adventofcode2018 | d51c01578ae7e4f30824c4f6ace66958491c1ed4 | [
"MIT"
] | null | null | null | 19/19a.py | jamOne-/adventofcode2018 | d51c01578ae7e4f30824c4f6ace66958491c1ed4 | [
"MIT"
] | null | null | null | 19/19a.py | jamOne-/adventofcode2018 | d51c01578ae7e4f30824c4f6ace66958491c1ed4 | [
"MIT"
] | null | null | null | import sys
OPERATIONS = {
'addr': lambda a, b, c, registers: registers[a] + registers[b],
'addi': lambda a, b, c, registers: registers[a] + b,
'mulr': lambda a, b, c, registers: registers[a] * registers[b],
'muli': lambda a, b, c, registers: registers[a] * b,
'banr': lambda a, b, c, registers: registers[a] & registers[b],
'bani': lambda a, b, c, registers: registers[a] & b,
'borr': lambda a, b, c, registers: registers[a] | registers[b],
'bori': lambda a, b, c, registers: registers[a] | b,
'setr': lambda a, b, c, registers: registers[a],
'seti': lambda a, b, c, registers: a,
'grir': lambda a, b, c, registers: 1 if a > registers[b] else 0,
'gtri': lambda a, b, c, registers: 1 if registers[a] > b else 0,
'gtrr': lambda a, b, c, registers: 1 if registers[a] > registers[b] else 0,
'eqir': lambda a, b, c, registers: 1 if a == registers[b] else 0,
'eqri': lambda a, b, c, registers: 1 if registers[a] == b else 0,
'eqrr': lambda a, b, c, registers: 1 if registers[a] == registers[b] else 0
}
def solve(puzzle_input):
lines = [line.strip() for line in puzzle_input]
ip_register = int(lines[0].split(' ')[1])
instructions = lines[1:]
registers = [0] * 6
while registers[ip_register] < len(instructions):
instruction = instructions[registers[ip_register]]
op_code, *abc = instruction.split(' ')
a, b, c = tuple(map(int, abc))
registers[c] = OPERATIONS[op_code](a, b, c, registers)
registers[ip_register] += 1
return registers[0]
print(solve(sys.stdin))
| 36.380952 | 77 | 0.633508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.066754 |
19d1e57d2a97ef66002ffa2d6966b97f5f533bee | 1,878 | py | Python | examples/websocket_echo_server.py | HMXHIU/VeryPowerfulAgents | 06abd52776aeaf701637533f760176459c9c361c | [
"MIT"
] | 2 | 2021-11-30T16:14:01.000Z | 2022-03-04T09:20:51.000Z | examples/websocket_echo_server.py | HMXHIU/VeryPowerfulAgents | 06abd52776aeaf701637533f760176459c9c361c | [
"MIT"
] | 8 | 2021-02-10T15:43:49.000Z | 2021-02-10T16:00:16.000Z | examples/websocket_echo_server.py | HMXHIU/VeryPowerfulAgents | 06abd52776aeaf701637533f760176459c9c361c | [
"MIT"
] | 1 | 2021-01-01T12:31:48.000Z | 2021-01-01T12:31:48.000Z | from aiohttp import web
from agents import Agent
class WebServer(Agent):
html = """
<!DOCTYPE html>
<html>
<head>
<title>WebSocket Echo</title>
</head>
<body>
<h1>WebSocket Echo</h1>
<form action="" onsubmit="sendMessage(event)">
<input type="text" id="messageText" autocomplete="off"/>
<button>Send</button>
</form>
<ul id='messages'>
</ul>
<script>
var ws = new WebSocket("ws://{}:{}{}");
ws.onmessage = function(event) {{
var messages = document.getElementById('messages')
var message = document.createElement('li')
var content = document.createTextNode(event.data)
message.appendChild(content)
messages.appendChild(message)
}};
function sendMessage(event) {{
var input = document.getElementById("messageText")
ws.send(input.value)
input.value = ''
event.preventDefault()
}}
</script>
</body>
</html>
"""
def setup(self, host, port, route):
self.host = host
self.port = port
self.route = route
self.create_webserver(host, port)
self.create_route("GET", "/", self.echo)
self.rtx, self.connections = self.create_websocket(route)
self.disposables.append(self.rtx.subscribe(lambda msg: self.rtx.on_next(msg)))
async def echo(self, request):
return web.Response(
text=self.html.format(self.host, self.port, self.route),
content_type="text/html",
)
if __name__ == "__main__":
webserver = WebServer("127.0.0.1", 8080, "/ws")
| 30.786885 | 86 | 0.510117 | 1,744 | 0.928647 | 0 | 0 | 0 | 0 | 176 | 0.093717 | 1,208 | 0.643237 |
19d3c2532fdc242dd0fdaf80342fa01cfdf2a61d | 5,401 | py | Python | janaganana/tables.py | deshetti/janaganana | f29ced95fc9f8b98f77560d9afdbd999510dd497 | [
"MIT"
] | 11 | 2017-02-16T20:45:54.000Z | 2021-12-31T01:08:40.000Z | janaganana/tables.py | deshetti/janaganana | f29ced95fc9f8b98f77560d9afdbd999510dd497 | [
"MIT"
] | 18 | 2017-02-15T20:24:29.000Z | 2022-03-29T21:54:36.000Z | janaganana/tables.py | deshetti/janaganana | f29ced95fc9f8b98f77560d9afdbd999510dd497 | [
"MIT"
] | 13 | 2017-02-16T20:45:25.000Z | 2020-09-23T21:40:57.000Z | from wazimap.data.tables import FieldTable
# Define our tables so the data API can discover them.
# Household tables
FieldTable(['rural population'], universe='Population', table_per_level=False)
FieldTable(['area', 'sex'], universe='Population', table_per_level=False)
FieldTable(['census_year', 'measure'], universe='A2-Decadal Variation', table_per_level=False)
FieldTable(['census_year', 'sex_vis'], universe='VISUAL', table_per_level=False)
FieldTable(['area', 'sex', 'literacy'], universe='Population', table_per_level=False)
FieldTable(['area','village_town_comparison'], universe='A3APPENDIX', table_per_level=False)
FieldTable(['religion', 'area', 'sex'], universe='Religion', table_per_level=False)
FieldTable(['age', 'area', 'sex'], universe='Age', table_per_level=False)
FieldTable(['village_town_measures','area'], universe='A1-', table_per_level=False)
FieldTable(['education', 'area', 'sex'], universe='Education', table_per_level=False)
FieldTable(['houseless_population','area', 'sex'], universe='A7-Houseless', table_per_level=False)
FieldTable(['sc_houseless_population','area', 'sex'], universe='A8-SC_Houseless', table_per_level=False)
FieldTable(['st_houseless_population','area', 'sex'], universe='A9-ST_Houseless', table_per_level=False)
FieldTable(['village_measures','population_range'], universe='A3-Inhabited Villages', table_per_level=False)
FieldTable(['maritalstatus', 'area', 'sex'], universe='Relation', table_per_level=False)
FieldTable(['workertype','age_group','area','sex'], universe='B1-Workerstype', table_per_level=False)
FieldTable(['sc_workertype','age_group','area','sex'], universe='B1SC-Workerstype', table_per_level=False)
FieldTable(['st_workertype','age_group','area','sex'], universe='B1ST-Workerstype', table_per_level=False)
FieldTable(['workers', 'area', 'workerssex'], universe='Workers', table_per_level=False)
FieldTable(['workertype','education_level', 'area', 'sex'], universe='B3', table_per_level=False)
FieldTable(['education_level', 'area', 'sex_vis'], universe='VISUAL', table_per_level=False)
FieldTable(['sc_workertype','education_level', 'area', 'sex'], universe='B3SC', table_per_level=False)
FieldTable(['st_workertype','education_level', 'area', 'sex'], universe='B3ST', table_per_level=False)
FieldTable(['nonworkertype', 'age_group','area','sex'], universe='B13', table_per_level=False)
FieldTable(['nonworkertype_vis', 'age_group','area','sex'], universe='VISUAL', table_per_level=False)
FieldTable(['sc_nonworkertype', 'age_group','area','sex'], universe='B13SC', table_per_level=False)
FieldTable(['st_nonworkertype', 'age_group','area','sex'], universe='B13ST', table_per_level=False)
FieldTable(['religion','nonworkertype','age_group','area', 'sex'], universe='B14', table_per_level=False)
FieldTable(['religion','area', 'sex'], universe='C1', table_per_level=False)
FieldTable(['religious_community','area', 'sex'], universe='C1APPENDIX', table_per_level=False)
FieldTable(['age_group','marital_status','area', 'sex'], universe='C2', table_per_level=False)
FieldTable(['religion','marital_status','area', 'sex'], universe='C3', table_per_level=False)
FieldTable(['mother_tongue_vis','area', 'sex'], universe='VISUAL', table_per_level=False)
FieldTable(['disability','age_group','area', 'sex'], universe='c20', table_per_level=False)
FieldTable(['mother_tongue','area', 'sex'], universe='c16', table_per_level=False)
FieldTable(['educational_institution','age','area', 'sex'], universe='c10', table_per_level=False)
FieldTable(['sc_educational_institution','age','area', 'sex'], universe='c10sc', table_per_level=False)
FieldTable(['st_educational_institution','age','area', 'sex'], universe='c10st', table_per_level=False)
FieldTable(['economic_activity','age','area', 'sex'], universe='c12', table_per_level=False)
FieldTable(['marriage_duration','age','area', 'sex'], universe='c4', table_per_level=False)
FieldTable(['parity','age','area'], universe='F1', table_per_level=False)
FieldTable(['sc_parity','age','area'], universe='F1sc', table_per_level=False)
FieldTable(['st_parity','age','area'], universe='F1st', table_per_level=False)
FieldTable(['parity_vis','age','area'], universe='VISUAL', table_per_level=False)
FieldTable(['surviving_children','age','area'], universe='F5', table_per_level=False)
FieldTable(['sc_surviving_children','age','area'], universe='F5SC', table_per_level=False)
FieldTable(['st_surviving_children','age','area'], universe='F5ST', table_per_level=False)
FieldTable(['household_size','area'], universe='HH1', table_per_level=False)
FieldTable(['household_size_vis','area'], universe='VISUAL', table_per_level=False)
FieldTable(['sc_household_size','area'], universe='HH1SC', table_per_level=False)
FieldTable(['st_household_size','area'], universe='HH1ST', table_per_level=False)
FieldTable(['household_workers','workers_in_household','area'], universe='HH11', table_per_level=False)
FieldTable(['household_size','available_for_work','area'], universe='HH12', table_per_level=False)
FieldTable(['sevenyearsandabove','literates_in_household','area'], universe='HH08', table_per_level=False)
FieldTable(['age','area', 'head','household_marital_status'], universe='HH06', table_per_level=False)
FieldTable(['houseless_households','area'], universe='HH02', table_per_level=False)
FieldTable(['households_size','aged_persons','area'], universe='HH05', table_per_level=False)
| 66.679012 | 108 | 0.755601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,377 | 0.440104 |
19d4df790639614b567c8829dbce219210c26642 | 585 | py | Python | src/weekly-reset.py | SlimeeGameS/VirginityBot | a1745893f21a16112bbf775fb2aff199c14dbbbb | [
"CC0-1.0"
] | null | null | null | src/weekly-reset.py | SlimeeGameS/VirginityBot | a1745893f21a16112bbf775fb2aff199c14dbbbb | [
"CC0-1.0"
] | 14 | 2020-03-26T01:02:31.000Z | 2021-03-24T23:48:44.000Z | src/weekly-reset.py | SlimeeGameS/VirginityBot | a1745893f21a16112bbf775fb2aff199c14dbbbb | [
"CC0-1.0"
] | 2 | 2020-08-09T19:08:41.000Z | 2021-05-12T17:44:28.000Z | import os
import asyncio
import logging
from pony.orm import *
import logger
from database import start_orm, get_biggest_virgin, Guild, Virgin
logger = logging.getLogger('virginity-bot')
async def reset_weekly_virginity():
with db_session:
virgins = Virgin.select()
for virgin in virgins:
virgin.total_vc_time = 0
virgin.virginity_score = 0
commit()
async def main():
logger.info('Running weekly reset')
start_orm()
await reset_weekly_virginity()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 18.870968 | 65 | 0.729915 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.499145 | 47 | 0.080342 |
19d525875da360fb20fb2929a08fff78176398d0 | 1,165 | py | Python | hardhat/recipes/racket.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/racket.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/racket.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | import os
import shutil
from .base import GnuRecipe
class RacketRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(RacketRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'bf2bce50b02c626666a8d2093638893e' \
'8beb8b2a19cdd43efa151a686c88edcf'
self.depends = ['libffi']
self.name = 'racket'
self.version = '6.6'
self.url = 'http://mirror.racket-lang.org/installers/$version/' \
'racket-$version-src.tgz'
self.configure_args = self.shell_args + [
'../src/configure',
'--prefix=%s' % self.prefix_dir]
# -O3 generates SIGSEGVs
self.environment['CFLAGS'] = '-O2'
self.environment['CXXFLAGS'] = '-O2'
def patch(self):
self.directory = os.path.join(self.directory, 'build')
os.makedirs(self.directory)
def clean(self):
super(RacketRecipe, self).clean()
dirs = ['include', 'etc', 'share/doc', 'share', 'lib']
for dir in dirs:
d = os.path.join(self.prefix_dir, dir, 'racket')
if os.path.exists(d):
shutil.rmtree(d)
| 31.486486 | 73 | 0.572532 | 1,110 | 0.95279 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.258369 |
19d5619a8ce652fe7933c1843f9585227eb325de | 3,257 | py | Python | lichess-gist.py | swimmy4days/lichess-gist | b70e605345f789e032291253df506384ccbaa270 | [
"MIT"
] | null | null | null | lichess-gist.py | swimmy4days/lichess-gist | b70e605345f789e032291253df506384ccbaa270 | [
"MIT"
] | null | null | null | lichess-gist.py | swimmy4days/lichess-gist | b70e605345f789e032291253df506384ccbaa270 | [
"MIT"
] | null | null | null | import os
import sys
import berserk
from github import Github, InputFileContent, Gist
SEPARATOR = "."
PADDING = {"puzzle": 0, "crazyhouse": 0, "chess960": 0,
"kingOfTheHill": 0, "threeCheck": 2, "antichess": 0, "atomic": 0, "horde": 0, "racingKings": 0,
"ultraBullet": 0, "blitz": 1, "classical": 1, "rapid": 0, "bullet": 0, "correspondence": 3}
emojis = {"puzzle": "🧩", "crazyhouse": "🤪", "chess960": "9️⃣6️⃣0️⃣",
"kingOfTheHill": "👑", "threeCheck": "3️⃣", "antichess": "", "atomic": "⚛", "horde": "🐎", "racingKings": "🏁",
"ultraBullet": "🚅", "blitz": "⚡", "classical": "🏛", "rapid": "⏰", "bullet": "🚂", "correspondence": "🤼♂️"}
ENV_VAR_GIST_ID = "GIST_ID"
ENV_VAR_GITHUB_TOKEN = "GH_TOKEN"
ENV_VAR_LICHESS_USERNAME = "LICHESS_USERNAME"
REQUIRED_ENVS = [
ENV_VAR_GIST_ID,
ENV_VAR_GITHUB_TOKEN,
ENV_VAR_LICHESS_USERNAME
]
def check_vars() -> bool:
env_vars_absent = [
env
for env in REQUIRED_ENVS
if env not in os.environ or len(os.environ[env]) == 0
]
if env_vars_absent:
print(
f"Please define {env_vars_absent} in your github secrets. Aborting...")
return False
return True
def init() -> tuple:
gh_gist = Github(ENV_VAR_GITHUB_TOKEN).get_gist(ENV_VAR_GIST_ID)
lichess_acc = berserk.Client().users.get_public_data(ENV_VAR_LICHESS_USERNAME)
return (gh_gist, lichess_acc)
def get_rating(acc: dict) -> list:
ratings = []
for key in acc['perfs'].keys():
prov = '?'
try:
acc['perfs'][key]['prov']
except KeyError:
prov = ""
ratings.append((key, acc['perfs'][key]['rating'],
prov, acc['perfs'][key]['games']))
ratings.sort(key=lambda k: k[1], reverse=True)
return ratings
def fromated_line(variant: str, games: str, rating_prov: str, max_line_length: int) -> str:
separation = max_line_length - (
len(variant) + len(games) + len(rating_prov) + 4 # emojis and brackets
)
separator = f" {SEPARATOR * separation} "
return variant + f"({games})" + separator + rating_prov
def update_gist(gist: Gist, text: str) -> bool:
gist.edit(description="", files={list(gist.files.keys())[0]:
InputFileContent(content=text)})
def main():
if not check_vars():
return
global ENV_VAR_GIST_ID, ENV_VAR_GITHUB_TOKEN, ENV_VAR_LICHESS_USERNAME
ENV_VAR_GIST_ID = os.environ[ENV_VAR_GIST_ID]
ENV_VAR_GITHUB_TOKEN = os.environ[ENV_VAR_GITHUB_TOKEN]
ENV_VAR_LICHESS_USERNAME = os.environ[ENV_VAR_LICHESS_USERNAME]
gist, lichess_acc = init()
rating = get_rating(lichess_acc)
content = [fromated_line((emojis[line[0]] + line[0]), str(line[3]),
str(line[1]) + line[2] + " 📈", 52 + PADDING[line[0]]) for line in rating]
print("\n".join(content))
update_gist(gist, "\n".join(content))
if __name__ == "__main__":
# test with python lichess-gist.py test <gist> <github-token> <user>
if len(sys.argv) > 1:
os.environ[ENV_VAR_GIST_ID] = sys.argv[2]
os.environ[ENV_VAR_GITHUB_TOKEN] = sys.argv[3]
os.environ[ENV_VAR_LICHESS_USERNAME] = sys.argv[4]
main()
# %%
| 31.317308 | 118 | 0.612834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 753 | 0.227149 |
19d5e02630a84a1866bbfe9f9deb571cc98a96cc | 951 | py | Python | alembic/versions/60c735df8d2f_.py | brouberol/grand-cedre | 05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93 | [
"BSD-3-Clause"
] | null | null | null | alembic/versions/60c735df8d2f_.py | brouberol/grand-cedre | 05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93 | [
"BSD-3-Clause"
] | 22 | 2019-09-03T20:08:42.000Z | 2022-03-11T23:58:02.000Z | alembic/versions/60c735df8d2f_.py | brouberol/grand-cedre | 05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93 | [
"BSD-3-Clause"
] | null | null | null | """empty message
Revision ID: 60c735df8d2f
Revises: 88bb7e12da60
Create Date: 2019-09-06 08:27:03.082097
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "60c735df8d2f"
down_revision = "88bb7e12da60"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("invoices", sa.Column("payed_at", sa.Date(), nullable=True))
op.add_column("invoices", sa.Column("check_number", sa.String(), nullable=True))
op.add_column(
"invoices", sa.Column("wire_transfer_number", sa.String(), nullable=True)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("invoices", "wire_transfer_number")
op.drop_column("invoices", "check_number")
op.drop_column("invoices", "payed_at")
# ### end Alembic commands ###
| 27.171429 | 84 | 0.690852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 512 | 0.538381 |
19d5e29e652c7abc55afdd0fed0c5112571018a1 | 3,640 | py | Python | python/genre_classifier.py | nscharrenberg/Aliran | 628de0476b8f8b413a6fdddf5392c590e8b27654 | [
"MIT"
] | null | null | null | python/genre_classifier.py | nscharrenberg/Aliran | 628de0476b8f8b413a6fdddf5392c590e8b27654 | [
"MIT"
] | null | null | null | python/genre_classifier.py | nscharrenberg/Aliran | 628de0476b8f8b413a6fdddf5392c590e8b27654 | [
"MIT"
] | null | null | null | import scipy.io.wavfile as wav
import numpy as np
import os
import pickle
import random
import operator
from python_speech_features import mfcc
dataset = []
training_set = []
test_set = []
# Get the distance between feature vectors
def distance(instance1, instance2, k):
mm1 = instance1[0]
cm1 = instance1[1]
mm2 = instance2[0]
cm2 = instance2[1]
dist = np.trace(np.dot(np.linalg.inv(cm2), cm1))
dist += (np.dot(np.dot((mm2 - mm1).transpose(), np.linalg.inv(cm2)), mm2 - mm1))
dist += np.log(np.linalg.det(cm2)) - np.log(np.linalg.det(cm1))
dist -= k
return dist
# Find Neighbors
def get_neighbors(training_dataset, instance, k):
distances = []
for i in range(len(training_dataset)):
dist = distance(training_dataset[i], instance, k) + distance(instance, training_dataset[i], k)
distances.append((training_dataset[i][2], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for i in range(k):
neighbors.append(distances[i][0])
return neighbors
# Identify the Nearest Neighbor (Genres)
def nearest_genre(neighbors):
class_vote = {}
for i in range(len(neighbors)):
res = neighbors[i]
if res in class_vote:
class_vote[res] += 1
else:
class_vote[res] = 1
sorted_vote = sorted(class_vote.items(), key=operator.itemgetter(1), reverse=True)
return sorted_vote[0][0]
# Model Evaluation to get the accuracy
def get_accuracy(temp_test_set, temp_predictions):
correct = 0
for i in range(len(temp_test_set)):
if temp_test_set[i][-1] == temp_predictions[i]:
correct += 1
return 1.0 * correct / len(temp_test_set)
# Extract features from the audio files and store them in a model file
def extract_features(filename):
directory = "Data/genres_original/"
f = open(filename, "wb")
it = 0
for tempDir in os.listdir(directory):
it += 1
if it == 11:
break
for file in os.listdir(directory + tempDir):
try:
print(file)
(rate, sig) = wav.read(directory + tempDir + "/" + file)
mfcc_feat = mfcc(sig, rate, winlen=0.020, appendEnergy=False)
covariance = np.cov(np.matrix.transpose(mfcc_feat))
mean_matrix = mfcc_feat.mean(0)
feature = (mean_matrix, covariance, it)
pickle.dump(feature, f)
except EOFError:
f.close()
f.close()
# Load in the Dataset
def load_dataset(filename, split, tr_set, te_set):
with open(filename, "rb") as f:
while True:
try:
dataset.append(pickle.load(f))
except EOFError:
f.close()
break
for i in range(len(dataset)):
if random.random() < split:
tr_set.append(dataset[i])
else:
te_set.append(dataset[i])
if __name__ == '__main__':
print('Starting....')
local_filename = "dataset.aliran"
extracting = False
if extracting:
print('Extracting Features...')
print('Building Model...')
extract_features(local_filename)
print('Loading Dataset...')
load_dataset(local_filename, 0.66, training_set, test_set)
print('Making a prediction...')
print('(This may take a few minutes)')
predictions = []
for x in range(len(test_set)):
predictions.append(nearest_genre(get_neighbors(training_set, test_set[x], 5)))
accuracy = get_accuracy(test_set, predictions)
print('Prediction Accuracy is:')
print(accuracy)
| 26.376812 | 102 | 0.613462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 444 | 0.121978 |
19d94ed3daa7c3c452d53a4b890d6a26c3139991 | 1,653 | py | Python | run.py | dkosilov/reconciler_anchor_salesforce | 5cf6a8ccaedce84e7dab6c32955c644ede0c6e07 | [
"Xnet",
"X11"
] | 1 | 2020-09-22T11:49:07.000Z | 2020-09-22T11:49:07.000Z | run.py | dkosilov/reconciler_anchor_salesforce | 5cf6a8ccaedce84e7dab6c32955c644ede0c6e07 | [
"Xnet",
"X11"
] | null | null | null | run.py | dkosilov/reconciler_anchor_salesforce | 5cf6a8ccaedce84e7dab6c32955c644ede0c6e07 | [
"Xnet",
"X11"
] | null | null | null | import argparse
from libs.data_model import AnchorNorthstarDataframe, SalesForceDataframe, \
AnchorSalesforceAccountsDataframe, AnchorSalesforceContactsDataframe
from libs.utils import save_dataframes_to_excel
parser = argparse.ArgumentParser(description='Reconcile accounts and contacts between Anchor and Salesforce')
parser.add_argument('-a', '--anchor-file', help='Path to Anchor Excel workbook', required=True)
parser.add_argument('-n', '--northstar-file', help='Path to Northstar Excel workbook', required=True)
parser.add_argument('-s', '--salesforce-file', help='Path to Salesforce Excel workbook', required=True)
parser.add_argument('-t', '--account-name-match-ratio-threshold', type=int,
help='Account names with specified (or above) similarity ratio will be used for joining Anchor and '
'Salesforce account data. Number between 0 and 100.', default=75)
parser.add_argument('-r', '--result-file',
help='Path to result Excel workbook. The file will have 2 spreadsheets for accounts and '
'contacts reconciliation', required=True)
args = parser.parse_args()
anchor_ns = AnchorNorthstarDataframe(args.anchor_file, args.northstar_file)
salesforce = SalesForceDataframe(args.salesforce_file)
anchor_sf_accounts = AnchorSalesforceAccountsDataframe(anchor_ns, salesforce, args.account_name_match_ratio_threshold)
anchor_sf_contacts = AnchorSalesforceContactsDataframe(anchor_ns, salesforce)
save_dataframes_to_excel(args.result_file, {'Accounts': anchor_sf_accounts.df, 'Contacts': anchor_sf_contacts.df},
wrap_text=False)
| 57 | 120 | 0.754991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.341198 |
19db3143b0967735343ec7fb40012d028a989ea5 | 1,650 | py | Python | billrelease.py | arby36/BillAi | e5c10c35279a1669d218439671e03bc17acb7fdc | [
"MIT"
] | null | null | null | billrelease.py | arby36/BillAi | e5c10c35279a1669d218439671e03bc17acb7fdc | [
"MIT"
] | null | null | null | billrelease.py | arby36/BillAi | e5c10c35279a1669d218439671e03bc17acb7fdc | [
"MIT"
] | null | null | null |
def bill():
print("I am bill, please input your name")
name = str(raw_input())
print("Hi %s" % name)
print("Now input a command")
a = raw_input("Command line:")
a = a.lower()
if a == "":
print("You inputed nothing")
bill()
if a == "help":
print("The commands in my database are help, hello, do this * math problem, do this division math problem")
bill()
if a == "hello":
print("Hello %s!" % name)
bill()
if a == "do this * math problem":
print("Type no. 1")
b = int(raw_input("Please type an integer"))
print("Type no. 2")
c = int(raw_input("Please type an integer"))
print("Computing...")
d = b * c
print("The answer is %d" % d)
bill()
if a == "do this division math problem":
print("Type no. 1")
e = int(raw_input("Please type an integer"))
print("Type no. 2")
f = int(raw_input("Please type an integer"))
print("Computing...")
g = e * f
print("The answer is %d" % g)
bill()
if a == "multiply my name":
name * 100
bill()
if a == "open database":
print("Openining database")
bill_database()
else:
print("That command is not in my database")
def bill_database():
print("Welcome to the bill Profile database, input your first name (Sorry, this command has been discontinued in the release version.")
a = str(raw_input("Enter Here:"))
a = a.lower()
print("Information for %s" % a)
a = a.lower()
bill()
bill() | 27.966102 | 139 | 0.527273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 738 | 0.447273 |
19ddf831a5a3b46c86717f74ec094bf9d7bcc0cd | 757 | py | Python | homeworks/hw05/tests/q3b1.py | cwakamiya/ieor135 | 084490380f265225927d11b43d948c1206b0aab8 | [
"Apache-2.0"
] | 28 | 2020-06-15T23:53:36.000Z | 2022-03-19T09:27:02.000Z | homeworks/hw05/tests/q3b1.py | cwakamiya/ieor135 | 084490380f265225927d11b43d948c1206b0aab8 | [
"Apache-2.0"
] | 4 | 2020-06-24T22:20:31.000Z | 2022-02-28T01:37:36.000Z | homeworks/hw05/tests/q3b1.py | cwakamiya/ieor135 | 084490380f265225927d11b43d948c1206b0aab8 | [
"Apache-2.0"
] | 78 | 2020-06-19T09:41:01.000Z | 2022-02-05T00:13:29.000Z | test = { 'name': 'q3b1',
'points': 2,
'suites': [ { 'cases': [ { 'code': '>>> 4 <= '
"sum(list(X1.describe().loc['mean'])) "
'<= 9\n'
'True',
'hidden': False,
'locked': False},
{ 'code': '>>> len(X1) == 768\nTrue',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 47.3125 | 86 | 0.211361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.281374 |
19e32a5576ac8d30a109ed4090fee43e0912beb9 | 3,050 | py | Python | scanpy/api/__init__.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | null | null | null | scanpy/api/__init__.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | null | null | null | scanpy/api/__init__.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | 1 | 2019-02-18T07:39:59.000Z | 2019-02-18T07:39:59.000Z | """Scanpy's high-level API provides an overview of all features relevant to pratical use::
import scanpy.api as sc
.. raw:: html
<h3>Preprocessing tools</h3>
Filtering of highly-variable genes, batch-effect correction, per-cell (UMI) normalization, preprocessing recipes.
.. raw:: html
<h4>Basic Preprocessing</h4>
.. autosummary::
:toctree: .
pp.filter_cells
pp.filter_genes
pp.filter_genes_dispersion
pp.log1p
pp.pca
pp.normalize_per_cell
pp.regress_out
pp.scale
pp.subsample
.. raw:: html
<h4>Recipes</h4>
.. autosummary::
:toctree: .
pp.recipe_zheng17
pp.recipe_weinreb16
.. raw:: html
<h3>Machine Learning and Statistics tools<h3>
.. raw:: html
<h4>Visualization</h4>
.. autosummary::
:toctree: .
tl.pca
tl.tsne
tl.diffmap
tl.draw_graph
.. raw:: html
<h4>Branching trajectories and pseudotime, clustering, differential expression</h4>
.. autosummary::
:toctree: .
tl.aga
tl.louvain
tl.dpt
tl.rank_genes_groups
.. raw:: html
<h4>Simulations</h4>
.. autosummary::
:toctree: .
tl.sim
.. raw:: html
<h3>Generic methods</h3>
.. raw:: html
<h4>Reading and Writing</h4>
.. autosummary::
:toctree: .
read
write
read_10x_h5
.. raw:: html
<h4>Data Structures</h4>
.. autosummary::
:toctree: .
AnnData
DataGraph
.. raw:: html
<h3>Plotting</h3>
.. raw:: html
<h4>Generic plotting with AnnData</h4>
.. autosummary::
:toctree: .
pl.scatter
pl.violin
pl.ranking
.. raw:: html
<h4>Plotting tool results</h4>
Methods that extract and visualize tool-specific annotation in an AnnData object.
.. raw:: html
<h5>Visualization</h5>
.. autosummary::
:toctree: .
pl.pca
pl.pca_loadings
pl.pca_scatter
pl.pca_variance_ratio
pl.tsne
pl.diffmap
pl.draw_graph
.. raw:: html
<h5>Branching trajectories and pseudotime, clustering, differential expression</h5>
.. autosummary::
:toctree: .
pl.aga
pl.aga_graph
pl.aga_path
pl.louvain
pl.dpt
pl.dpt_scatter
pl.dpt_groups_pseudotime
pl.dpt_timeseries
pl.rank_genes_groups
pl.rank_genes_groups_violin
.. raw:: html
<h5>Simulations</h5>
.. autosummary::
:toctree: .
pl.sim
.. raw:: html
<h4>Builtin datasets</h4>
Simple functions that provide annotated datasets for benchmarking. See
`here <https://scanpy.readthedocs.io/en/latest/examples.html>`_ for extensive
documented tutorials and use cases.
All of these functions return an Annotated Data object.
.. autosummary::
:toctree: .
datasets.paul15
datasets.toggleswitch
datasets.krumsiek11
datasets.blobs
datasets.moignard15
"""
from .. import __version__
from .. import settings
from .. import logging
from . import tl
tools = tl
from . import pl
plotting = pl
from . import pp
preprocessing = pp
from ..readwrite import read, read_10x_h5, write, read_params, write_params
from . import datasets
from ..data_structs import AnnData, DataGraph
from .. import utils
| 14.95098 | 113 | 0.679016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,712 | 0.88918 |
19e36b29ee592d089dc07f0b81f9a1312e103cce | 34,894 | py | Python | sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py | yihuajack/EdgeBERT | a51ae7557187e3251f4b11bc13ef9cbd336019ff | [
"Apache-2.0"
] | 8 | 2021-11-01T01:38:04.000Z | 2022-03-20T16:03:39.000Z | sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py | yihuajack/EdgeBERT | a51ae7557187e3251f4b11bc13ef9cbd336019ff | [
"Apache-2.0"
] | 1 | 2021-11-19T08:04:02.000Z | 2021-12-19T07:21:48.000Z | sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py | yihuajack/EdgeBERT | a51ae7557187e3251f4b11bc13ef9cbd336019ff | [
"Apache-2.0"
] | 5 | 2021-11-19T07:52:44.000Z | 2022-02-10T08:23:19.000Z | import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_albert import AlbertPreTrainedModel, AlbertLayerNorm, AlbertLayerGroup
from .modeling_bert import BertEmbeddings
from .modeling_highway_bert import BertPooler
import numpy as np
def entropy(x):
# x: torch.Tensor, logits BEFORE softmax
exp_x = torch.exp(x)
A = torch.sum(exp_x, dim=1) # sum of exp(x_i)
B = torch.sum(x*exp_x, dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B/A
class AlbertEmbeddings(BertEmbeddings):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
#super(AlbertEmbeddings, self).__init__()
super().__init__(config)
#self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
#self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
#self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
#self.LayerNorm = AlbertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.LayerNorm = AlbertLayerNorm(config.embedding_size, eps=config.layer_norm_eps)
#self.dropout = nn.Dropout(config.hidden_dropout_prob)
#def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
# if input_ids is not None:
# input_shape = input_ids.size()
# else:
# input_shape = inputs_embeds.size()[:-1]
#
# seq_length = input_shape[1]
# device = input_ids.device if input_ids is not None else inputs_embeds.device
# if position_ids is None:
# position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
# position_ids = position_ids.unsqueeze(0).expand(input_shape)
# if token_type_ids is None:
# token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
#
# if inputs_embeds is None:
# inputs_embeds = self.word_embeddings(input_ids)
# position_embeddings = self.position_embeddings(position_ids)
# token_type_embeddings = self.token_type_embeddings(token_type_ids)
#
# embeddings = inputs_embeds + position_embeddings + token_type_embeddings
# embeddings = self.LayerNorm(embeddings)
# #embeddings = self.dropout(embeddings)
# return embeddings
class AlbertTransformer(nn.Module):
def __init__(self, config, params):
super().__init__()
self.config = config
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config, params) for _ in range(config.num_hidden_groups)])
self.entropy_predictor = config.entropy_predictor
if config.entropy_predictor:
self.lookup_table = np.loadtxt(config.lookup_table_file, delimiter=",")
self.predict_layer = config.predict_layer
self.predict_average_layers = config.predict_average_layers
self.extra_layer=config.extra_layer
self.get_predict_acc=config.get_predict_acc
self.no_ee_before=config.no_ee_before
#self.layer = nn.ModuleList([AlbertLayer(config) for _ in range(config.num_hidden_layers)])
### try grouping for efficiency
if config.one_class:
self.highway = nn.ModuleList([AlbertHighway(config) for _ in range(config.num_hidden_groups)])
self.early_exit_entropy = [-1 for _ in range(config.num_hidden_groups)]
else:
self.highway = nn.ModuleList([AlbertHighway(config) for _ in range(config.num_hidden_layers)])
self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]
def set_early_exit_entropy(self, x):
print(x)
if (type(x) is float) or (type(x) is int):
for i in range(len(self.early_exit_entropy)):
self.early_exit_entropy[i] = x
else:
self.early_exit_entropy = x
def init_highway_pooler(self, pooler):
loaded_model = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_attentions = ()
all_highway_exits = ()
#if self.output_hidden_states:
# all_hidden_states = (hidden_states,)
#for i,layer_module in enumerate(self.albert_layer_groups):
#for i, layer_module in enumerate(self.layer):
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
)
hidden_states = layer_group_output[0]
#stopped here
if self.output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
#added this section
current_outputs = (hidden_states,)
if self.output_hidden_states:
current_outputs = current_outputs + (all_hidden_states,)
if self.output_attentions:
current_outputs = current_outputs + (all_attentions,)
if self.config.one_class:
highway_exit = self.highway[group_idx](current_outputs)
else:
highway_exit = self.highway[i](current_outputs)
#added this section
if not self.training:
highway_logits = highway_exit[0]
highway_entropy = entropy(highway_logits)
highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
all_highway_exits = all_highway_exits + (highway_exit,)
if self.config.one_class:
ent_ = self.early_exit_entropy[group_idx]
else:
ent_ = self.early_exit_entropy[i]
if not self.entropy_predictor:
if highway_entropy < ent_:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i+1)
elif (self.get_predict_acc):
if i==0:
count = 0
check_ee = 0
if self.predict_layer-1 == i:
if self.predict_average_layers:
if i == 0:
hw_ent_temp = highway_entropy.cpu().numpy()[0]
else:
hw_ent_temp = hw_ent_temp + highway_entropy.cpu().numpy()[0]
hw_ent = hw_ent_temp / float((i+1))
else:
hw_ent = highway_entropy.cpu().numpy()[0]
#hash into lookup table w/ highway_entropy
idx = (np.abs(self.lookup_table[:,0] - hw_ent)).argmin()
entropy_layers = np.transpose(self.lookup_table[idx,1:])
below_thresh = entropy_layers < ent_
k = np.argmax(below_thresh) # k is number of remaining layers
if (np.sum(below_thresh) == 0): #never hit threshold
k = entropy_layers.shape[0] - 1
k = k + self.predict_layer
count = count + 1
#print(idx)
#print(self.lookup_table[idx,:])
#print(k)
if ((highway_entropy < ent_) or (i == self.config.num_hidden_layers-1)) and not check_ee:
j = i # j is hw exit layer
count = count + 1
check_ee = 1
if count == 2:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
#return abs value of diff between j and k
if j>k:
raise HighwayException(new_output, (j-k) + 1)
else:
raise HighwayException(new_output, (k-j) + 1)
else:
if (i < self.predict_layer - 1): # before predict layer
#exit here????
if highway_entropy < ent_:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i+1)
if self.predict_average_layers: # predict layer
if i == 0:
hw_ent_temp = highway_entropy.cpu().numpy()[0]
else:
hw_ent_temp = hw_ent_temp + highway_entropy.cpu().numpy()[0]
if (i == self.predict_layer - 1): # predict layer
if highway_entropy < ent_:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i+1)
if self.predict_average_layers:
if i == 0:
hw_ent_temp = highway_entropy.cpu().numpy()[0]
else:
hw_ent_temp = hw_ent_temp + highway_entropy.cpu().numpy()[0]
hw_ent = hw_ent_temp / float((i+1))
else:
hw_ent = highway_entropy.cpu().numpy()[0]
#hash into lookup table w/ highway_entropy
idx = (np.abs(self.lookup_table[:,0] - hw_ent)).argmin()
entropy_layers = np.transpose(self.lookup_table[idx,1:])
below_thresh = entropy_layers < ent_
k = np.argmax(below_thresh) # k is number of remaining layers
if (np.sum(below_thresh) == 0): #never hit threshold
k = entropy_layers.shape[0] - 1
# other layers (count down and then trigger highway exit if layer < self.num_hidden_layers)
elif ((i >= self.predict_layer) and (i < self.config.num_hidden_layers - 2)):
if (self.extra_layer):
if k == 0:
if highway_entropy < ent_:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i+1)
elif k==-1:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i+1)
else:
if (not self.no_ee_before):
if highway_entropy < ent_:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i+1)
if k == 0: #exit after counting down layers (CHECK CORRECT # OF LAYERS)
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i+1)
k = k - 1
else:
all_highway_exits = all_highway_exits + (highway_exit,)
#use this????
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
outputs = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class AlbertModel(AlbertPreTrainedModel):
def __init__(self, config, params):
super().__init__(config, params)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.embeddings.requires_grad_(requires_grad=False)
self.encoder = AlbertTransformer(config, params)
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
self.init_weights()
def init_highway_pooler(self):
self.encoder.init_highway_pooler(self.pooler)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups.
If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there
is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error.
See base class PreTrainedModel for more information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
#@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
from transformers import AlbertModel, AlbertTokenizer
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertModel.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
#CHECK THIS
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))
outputs = (sequence_output, pooled_output) + encoder_outputs[1:]
# add hidden_states and attentions if they are here
return outputs
class HighwayException(Exception):
def __init__(self, message, exit_layer):
self.message = message
self.exit_layer = exit_layer # start from 1!
class AlbertHighway(nn.Module):
r"""A module to provide a shortcut
from
the output of one non-final BertLayer in BertEncoder
to
cross-entropy computation in BertForSequenceClassification
"""
def __init__(self, config):
#super().__init__(config) ###
super(AlbertHighway, self).__init__()
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
##
# self.pooler = BertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_outputs):
# Pooler
pooler_input = encoder_outputs[0]
# pooler_output = self.pooler(pooler_input)
# "return" pooler_output
#adding here:
pooler_input = self.pooler(pooler_input[:,0])
pooler_output = self.pooler_activation(pooler_input)
# BertModel
bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bodel_output
# Dropout and classification
pooled_output = bmodel_output[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits, pooled_output
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config, params):
super().__init__(config, params)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
self.albert = AlbertModel(config, params)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
#@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_layer=-1,
train_highway=False
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import AlbertTokenizer, AlbertForSequenceClassification
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForSequenceClassification.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
exit_layer = self.num_layers
try:
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
logits = outputs[0]
if not self.training:
original_entropy = entropy(logits)
highway_entropy = []
highway_logits_all = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
highway_losses = []
for highway_exit in outputs[-1]:
highway_logits = highway_exit[0]
if not self.training:
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(highway_loss)
if train_highway:
outputs = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
outputs = (loss,) + outputs
if not self.training:
outputs = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
outputs = (outputs[0],) +\
(highway_logits_all[output_layer],) +\
outputs[2:] ## use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
def __init__(self, config, params):
super().__init__(config)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
#self.albert = AlbertModel(config)
self.albert = AlbertModel(config, params)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
# @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_layer=-1,
train_highway=False
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
end_scores: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# The checkpoint albert-base-v2 is not fine-tuned for question answering. Please see the
# examples/run_squad.py example to see how to fine-tune a model to a question answering task.
from transformers import AlbertTokenizer, AlbertForQuestionAnswering
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer.encode_plus(question, text, return_tensors='pt')
start_scores, end_scores = model(**input_dict)
"""
exit_layer = self.num_layers
try:
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
start_logits = outputs[0]
end_logits = outputs[1]
if not self.training:
# original_start_entropy = entropy(start_logits)
# original_end_entropy = entropy(end_logits)
original_entropy = entropy(logits)
highway_entropy = []
# highway_start_logits_all = []
# highway_end_logits_all = []
highway_logits_all = []
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
# outputs = (total_loss,) + outputs
# work with highway exits
highway_losses = []
for highway_exit in outputs[-1]:
highway_logits = highway_exit[0]
highway_start_logits, highway_end_logits = highway_logits.split(1, dim=-1)
highway_start_logits = highway_start_logits.squeeze(-1)
highway_end_logits = highway_end_logits.squeeze(-1)
if not self.training:
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[1])
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(highway_start_logits, start_positions)
end_loss = loss_fct(highway_end_logits, end_positions)
highway_loss = (start_loss + end_loss) / 2
highway_losses.append(highway_loss)
if train_highway:
outputs = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
outputs = (total_loss,) + outputs
if not self.training:
outputs = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
outputs = (outputs[0],) +\
(highway_logits_all[output_layer],) +\
outputs[2:] ## use the highway of the last layer
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 46.963661 | 148 | 0.611366 | 34,370 | 0.984983 | 0 | 0 | 0 | 0 | 0 | 0 | 12,966 | 0.371583 |
19e3c7e8cb0d8e13048dc4a21c8f8d2b1867724a | 1,809 | py | Python | tests/test_sar.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 8 | 2021-05-18T02:22:03.000Z | 2021-09-11T02:49:04.000Z | tests/test_sar.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-04-26T04:38:35.000Z | 2021-04-26T04:38:35.000Z | tests/test_sar.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-11-10T02:28:47.000Z | 2021-11-10T02:28:47.000Z | # Copyright 2020 Kapil Thangavelu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import BaseTest
class SARTest(BaseTest):
def test_query(self):
factory = self.replay_flight_data('test_sar_query_app')
p = self.load_policy({
'name': 'test-sar',
'resource': 'aws.serverless-app'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], 'GitterArchive')
def test_cross_account(self):
factory = self.replay_flight_data('test_sar_cross_account')
p = self.load_policy({
'name': 'test-sar',
'resource': 'aws.serverless-app',
'filters': [{
'type': 'cross-account',
'whitelist_orgids': ['o-4adkskbcff']
}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.maxDiff = None
self.assertEqual(
resources[0]['CrossAccountViolations'], [
{'Actions': ['serverlessrepo:Deploy'],
'Effect': 'Allow',
'Principal': {'AWS': ['112233445566']},
'StatementId': 'b364d84f-62d2-411c-9787-3636b2b1975c'}
])
| 35.470588 | 74 | 0.616363 | 1,199 | 0.662797 | 0 | 0 | 0 | 0 | 0 | 0 | 934 | 0.516307 |
19e3cc99b66e2939b99c81e570efb9afd33fa23d | 5,773 | py | Python | rovina.py | Pandoro/tools | 631c6036cb74dc845668fd912588fd31aae46f8b | [
"MIT"
] | 1 | 2019-04-22T16:38:03.000Z | 2019-04-22T16:38:03.000Z | rovina.py | afcarl/tools-Pandoro | 631c6036cb74dc845668fd912588fd31aae46f8b | [
"MIT"
] | 2 | 2018-03-13T10:49:48.000Z | 2018-03-13T10:54:01.000Z | rovina.py | afcarl/tools-Pandoro | 631c6036cb74dc845668fd912588fd31aae46f8b | [
"MIT"
] | 2 | 2018-03-08T19:40:10.000Z | 2018-06-11T14:43:49.000Z | import json
import os
import sys
sys.path.append('/usr/lib/python2.7/dist-packages')
import cv2
import numpy as np
from tqdm import *
import dataset_utils
class Rovina(object):
def __init__(self, config_filename):
self.config_filename = config_filename
with open(config_filename) as config_file:
self.config = json.load(config_file)
if self.config['use_relative_paths']:
self.root_folder = os.path.dirname(config_filename)
else:
self.root_folder = ''
#Used if we want to use the "flipped" version of the camera 0.
self.folder_postfix = self.config['flipped_post_fix']
image_f = self.config['image_folder']
if image_f is not None:
image_f += self.folder_postfix
self.image_folder = os.path.join(self.root_folder, image_f)
self.image_extension = self.config['image_extension']
else:
self.image_folder = None
obj_label_f = self.config['object_label_folder']
if obj_label_f is not None:
obj_label_f += self.folder_postfix
self.obj_label_folder = os.path.join(self.root_folder, obj_label_f)
self.obj_label_extension = self.config['object_label_extension']
else:
self.obj_label_folder = None
mat_label_f = self.config['material_label_folder']
if mat_label_f is not None:
mat_label_f += self.folder_postfix
self.mat_label_folder = os.path.join(self.root_folder, mat_label_f)
self.mat_label_extension = self.config['material_label_extension']
else:
self.mat_label_folder = None
calib_f = self.config.get('calibration_folder')
if calib_f is not None:
calib_f += self.folder_postfix
self.calibration_folder = os.path.join(self.root_folder, calib_f)
self.calibration_extension = self.config.get('calibration_extension')
else:
self.calibration_folder = None
depth_f = self.config.get('depth_folder')
if depth_f is not None:
depth_f += self.folder_postfix
self.depth_folder = os.path.join(self.root_folder, depth_f)
self.depth_extension = self.config.get('depth_extension')
else:
self.depth_folder = None
self.train_filenames = self.config['train_images']
self.test_filenames = self.config['test_images']
self.dataset = self.config['dataset_name']
self.color_coding = { 'mat' : dataset_utils.LabelConversion(self.config['material_color_coding']),
'obj' : dataset_utils.LabelConversion(self.config['object_color_coding'])}
self.class_count = {k : self.color_coding[k].class_count for k in self.color_coding.keys()}
self.class_names = {k : self.color_coding[k].class_names for k in self.color_coding.keys()}
def label_to_rgb(self, image, type):
return self.color_coding[type].label_to_rgb(image)
def rgb_to_label(self, image, type):
return self.color_coding[type].rgb_to_label(image)
def get_data(self, data_type, color_images=True, mat_label_images=True, obj_label_images=True, calibrations=False, depth=False):
file_list = []
for t in data_type:
list_type = t + '_images'
if list_type in self.config:
file_list += self.config[list_type]
else:
raise Exception('The config does not contain a list for the entry: \'{0}_images\' \nConfig file located at: {1}'.format(t, self.config_filename))
return_list = []
if color_images:
images = []
for fn in tqdm(file_list):
i_n = os.path.join(self.image_folder, fn+self.image_extension)
images.append(self.load_color(i_n))
return_list.append(images)
if mat_label_images:
mat_labels = []
for fn in tqdm(file_list):
mat_l_n = os.path.join(self.mat_label_folder, fn+self.mat_label_extension)
mat_labels.append(self.load_labels(mat_l_n, 'mat'))
return_list.append(mat_labels)
if obj_label_images:
obj_labels = []
for fn in tqdm(file_list):
obj_l_n = os.path.join(self.obj_label_folder, fn+self.obj_label_extension)
obj_labels.append(self.load_labels(obj_l_n, 'obj'))
return_list.append(obj_labels)
if calibrations:
calibration_data = []
for fn in tqdm(file_list):
c_n = os.path.join(self.calibration_folder, fn+self.calibration_extension)
calibration_data.append(self.load_calibration(c_n))
return_list.append(calibration_data)
if depth:
depth_data = []
for fn in tqdm(file_list):
d_n = os.path.join(self.depth_folder, fn+self.depth_extension)
depth_data.append(self.load_depth(d_n))
return_list.append(depth_data)
if len(return_list) == 1:
return return_list[0]
else:
return return_list
def load_color(self, file_name):
return cv2.imread(file_name)[:,:,::-1] # flip bgr to rgb
def load_labels(self, file_name, type):
rgb = cv2.imread(file_name)[:,:,::-1]
return self.rgb_to_label(rgb, type)
def load_calibration(self, file_name):
with open(file_name) as calib_file:
return json.load(calib_file)
def load_depth(self, file_name):
d = cv2.imread(file_name, cv2.CV_LOAD_IMAGE_UNCHANGED)
if d.dtype == np.uint16:
d = d.astype(np.float32)/256.
return d | 37.245161 | 161 | 0.625498 | 5,611 | 0.971938 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.09735 |
19e85b96640382129fd31d8131a6692e41afddf9 | 4,952 | py | Python | gpgLabs/GPR/GPRlab1.py | victortocantins/gpgLabs | 310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12 | [
"MIT"
] | null | null | null | gpgLabs/GPR/GPRlab1.py | victortocantins/gpgLabs | 310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12 | [
"MIT"
] | null | null | null | gpgLabs/GPR/GPRlab1.py | victortocantins/gpgLabs | 310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.constants import mu_0, epsilon_0
import matplotlib.pyplot as plt
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
from ipywidgets import interact, interactive, IntSlider, widget, FloatText, FloatSlider, fixed
from .Wiggle import wiggle, PrimaryWave, ReflectedWave
import requests
from io import BytesIO
########################################
# DOWNLOAD FUNCTIONS
########################################
def downloadRadargramImage(URL):
urlObj = requests.get(URL)
imgcmp = Image.open(BytesIO(urlObj.content))
return imgcmp
########################################
# WIDGETS
########################################
def PrimaryWidget(dataFile,timeFile):
i = interact(PrimaryWidgetFcn,
epsrL = (1, 10, 1),
epsrH = (1, 20, 1),
tinterpL = (0, 150, 2),
tinterpH = (0, 150, 2),
dFile = fixed(dataFile),
tFile = fixed(timeFile))
return i
def PrimaryFieldWidget(radargramImage):
i = interact(PrimaryFieldWidgetFcn,
tinterp = (0, 80, 2),
epsr = (1, 40, 1),
radgramImg = fixed(radargramImage))
return i
def PipeWidget(radargramImage):
i = interact(PipeWidgetFcn,
epsr = (0, 100, 1),
h=(0.1, 2.0, 0.1),
xc=(0., 40., 0.2),
r=(0.1, 3, 0.1),
imgcmp=fixed(radargramImage))
return i
def WallWidget(radargramImagePath):
i = interact(WallWidgetFcn,
epsr = (0, 100, 1),
h=(0.1, 2.0, 0.1),
x1=(1, 35, 1),
x2=(20, 40, 1),
imgcmp=fixed(radargramImagePath))
return i
########################################
# FUNCTIONS
########################################
def PrimaryWidgetFcn(tinterpL, epsrL, tinterpH, epsrH, dFile, tFile):
data = np.load(dFile)
time = np.load(tFile)
dt = time[1]-time[0]
v1 = 1./np.sqrt(epsilon_0*epsrL*mu_0)
v2 = 1./np.sqrt(epsilon_0*epsrH*mu_0)
dx = 0.3
nano = 1e9
xorig = np.arange(data.shape[0])*dx
out1 = PrimaryWave(xorig, v1, tinterpL/nano)
out2 = ReflectedWave(xorig, v2, tinterpH/nano)
kwargs = {
'skipt':1,
'scale': 0.5,
'lwidth': 0.1,
'dx': dx,
'sampr': dt*nano,
}
extent = [0., 30, 300, 0]
fig, ax1 = plt.subplots(1,1, figsize = (8,5))
ax1.invert_yaxis()
ax1.axis(extent)
ax1.set_xlabel('Offset (m)')
ax1.set_ylabel('Time (ns)')
ax1.set_title('Shot Gather')
wiggle(data, ax = ax1, **kwargs)
ax1.plot(xorig, out1*nano, 'b', lw = 2)
ax1.plot(xorig, out2*nano, 'r', lw = 2)
plt.show()
def PrimaryFieldWidgetFcn(tinterp, epsr, radgramImg):
imgcmp = Image.open(radgramImg)
fig = plt.figure(figsize = (6,7))
ax = plt.subplot(111)
plt.imshow(imgcmp, extent = [0, 150, 150, 0])
x = np.arange(81)*0.1
xconvert = x*150./8.
v = 1./np.sqrt(mu_0*epsilon_0*epsr)
nano = 1e9
# tinterp = 30
y = (1./v*x)*nano + tinterp
plt.plot(xconvert, y, lw = 2)
plt.xticks(np.arange(11)*15, np.arange(11)*0.8+2.4) #+2.4 for offset correction
plt.xlim(0., 150.)
plt.ylim(146.,0.)
plt.ylabel('Time (ns)')
plt.xlabel('Offset (m)')
plt.show()
def PipeWidgetFcn(epsr, h, xc, r, imgcmp):
# imgcmp = Image.open(dataImage)
imgcmp = imgcmp.resize((600, 800))
fig = plt.figure(figsize = (9,11))
ax = plt.subplot(111)
plt.imshow(imgcmp, extent = [0, 400, 250, 0])
x = np.arange(41)*1.
xconvert = x*10.
v = 1./np.sqrt(mu_0*epsilon_0*epsr)
nano = 1e9
time = (np.sqrt(((x-xc)**2+4*h**2)) - r)/v
plt.plot(xconvert, time*nano, 'r--',lw = 2)
plt.xticks(np.arange(11)*40, np.arange(11)*4.0 )
plt.xlim(0., 400)
plt.ylim(240., 0.)
plt.ylabel('Time (ns)')
plt.xlabel('Survey line location (m)')
plt.show()
def WallWidgetFcn(epsr, h, x1, x2, imgcmp):
# imgcmp = Image.open(dataImage)
imgcmp = imgcmp.resize((600, 800))
fig = plt.figure(figsize = (9,11))
ax = plt.subplot(111)
plt.imshow(imgcmp, extent = [0, 400, 250, 0])
x = np.arange(41)*1.
ind1 = x <= x1
ind2 = x >= x2
ind3 = np.logical_not(np.logical_or(ind1, ind2))
scale = 10.
xconvert = x*scale
v = 1./np.sqrt(mu_0*epsilon_0*epsr)
nano = 1e9
def arrival(x, xc, h, v):
return (np.sqrt(((x-xc)**2+4*h**2)))/v
plt.plot(xconvert[ind1], arrival(x[ind1], x1, h, v)*nano, 'b--',lw = 2)
plt.plot(xconvert[ind2], arrival(x[ind2], x2, h, v)*nano, 'b--',lw = 2)
plt.plot(np.r_[x1*scale, x2*scale], np.r_[2.*h/v, 2.*h/v]*nano, 'b--',lw = 2)
# plt.plot(xconvert[ind3], arrival(x[ind3], xc?, h, v)*nano, 'r--',lw = 2)
plt.xticks(np.arange(11)*40, np.arange(11)*4.0 )
plt.xlim(0., 400)
plt.ylim(240., 0.)
plt.ylabel('Time (ns)')
plt.xlabel('Survey line location (m)')
plt.show()
| 24.636816 | 94 | 0.544628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.139943 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.