hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b931a37de7e1f1ed0fc213effed503351b163f01
| 9,946 |
py
|
Python
|
goopylib/objects/_BBox.py
|
BhavyeMathur/goopylib
|
f9eb1458e9218a8dd4add6693ce70b804624bf91
|
[
"MIT"
] | 25 |
2020-07-09T10:57:16.000Z
|
2022-02-06T10:31:34.000Z
|
goopylib/objects/_BBox.py
|
BhavyeMathur/goopy
|
f9eb1458e9218a8dd4add6693ce70b804624bf91
|
[
"MIT"
] | 48 |
2020-07-02T20:08:40.000Z
|
2020-07-06T16:09:25.000Z
|
goopylib/objects/_BBox.py
|
BhavyeMathur/goopy
|
f9eb1458e9218a8dd4add6693ce70b804624bf91
|
[
"MIT"
] | 1 |
2020-12-01T13:45:53.000Z
|
2020-12-01T13:45:53.000Z
|
from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import *
| 37.81749 | 122 | 0.594008 |
b931c0b51c15ef9d8f1fe028562964e4cc16bd70
| 670 |
py
|
Python
|
Graph/DFS&BFS.py
|
Mayner0220/Programmers
|
42e4783a526506fb7d8208841a76201909ed5c5c
|
[
"Apache-2.0"
] | 1 |
2021-04-01T06:19:02.000Z
|
2021-04-01T06:19:02.000Z
|
Graph/DFS&BFS.py
|
Mayner0220/Programmers
|
42e4783a526506fb7d8208841a76201909ed5c5c
|
[
"Apache-2.0"
] | null | null | null |
Graph/DFS&BFS.py
|
Mayner0220/Programmers
|
42e4783a526506fb7d8208841a76201909ed5c5c
|
[
"Apache-2.0"
] | null | null | null |
# https://www.acmicpc.net/problem/1260
n, m, v = map(int, input().split())
graph = [[0] * (n+1) for _ in range(n+1)]
visit = [False] * (n+1)
for _ in range(m):
R, C = map(int, input().split())
graph[R][C] = 1
graph[C][R] = 1
dfs(v)
print()
bfs(v)
| 19.142857 | 43 | 0.470149 |
b9341a63382a080379eb1fbad26490deed5a76c6
| 2,404 |
py
|
Python
|
pysteps/tests/helpers.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 6 |
2019-01-06T07:42:55.000Z
|
2021-02-03T13:59:50.000Z
|
pysteps/tests/helpers.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 5 |
2018-12-23T15:10:27.000Z
|
2021-01-06T15:03:03.000Z
|
pysteps/tests/helpers.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 2 |
2019-08-06T14:16:43.000Z
|
2019-08-13T00:36:31.000Z
|
"""
Testing helper functions
=======================
Collection of helper functions for the testing suite.
"""
from datetime import datetime
import numpy as np
import pytest
import pysteps as stp
from pysteps import io, rcparams
def get_precipitation_fields(num_prev_files=0):
"""Get a precipitation field from the archive to be used as reference."""
# Selected case
date = datetime.strptime("201505151630", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
# Find the input files from the archive
fns = io.archive.find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext,
timestep=5, num_prev_files=num_prev_files)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
reference_field, quality, metadata = io.read_timeseries(fns, importer,
**importer_kwargs)
del quality # Not used
if num_prev_files == 0:
reference_field = np.squeeze(reference_field) # Remove time dimension
# Convert to mm/h
reference_field, metadata = stp.utils.to_rainrate(reference_field, metadata)
# Mask invalid values
reference_field = np.ma.masked_invalid(reference_field)
# Log-transform the data [dBR]
reference_field, metadata = stp.utils.dB_transform(reference_field,
metadata,
threshold=0.1,
zerovalue=-15.0)
return reference_field
def smart_assert(actual_value, expected, tolerance=None):
"""
Assert by equality for non-numeric values, or by approximation otherwise.
If the precision keyword is None, assert by equality.
When the precision is not None, assert that two numeric values
(or two sets of numbers) are equal to each other within the tolerance.
"""
if tolerance is None:
assert actual_value == expected
else:
# Compare numbers up to a certain precision
assert actual_value == pytest.approx(expected, 1e-6)
| 33.388889 | 80 | 0.640599 |
b934cd0c4d4115b02def19c6bd570d1877b158cd
| 3,598 |
py
|
Python
|
modules/courses/courses.py
|
ehiller/mobilecsp-v18
|
a59801c44c616d30f5e916d6771e479c8a9e88f7
|
[
"Apache-2.0"
] | null | null | null |
modules/courses/courses.py
|
ehiller/mobilecsp-v18
|
a59801c44c616d30f5e916d6771e479c8a9e88f7
|
[
"Apache-2.0"
] | null | null | null |
modules/courses/courses.py
|
ehiller/mobilecsp-v18
|
a59801c44c616d30f5e916d6771e479c8a9e88f7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = 'Pavel Simakov ([email protected])'
from common import resource
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import resources_display
from models import custom_modules
from models import roles
from tools import verify
All_LOCALES_PERMISSION = 'can_pick_all_locales'
All_LOCALES_DESCRIPTION = 'Can pick all locales, including unavailable ones.'
SEE_DRAFTS_PERMISSION = 'can_see_draft_content'
SEE_DRAFTS_DESCRIPTION = 'Can see lessons and assessments with draft status.'
custom_module = None
def register_module():
"""Registers this module in the registry."""
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.UnitHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/resources', utils.ResourcesHandler),
('/rest/locale', utils.StudentLocaleRESTHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/settracks', utils.StudentSetTracksHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes,
notify_module_enabled=on_module_enabled)
return custom_module
| 36.714286 | 78 | 0.735686 |
b934ce47dae53d305023f829683b8ba6f625367b
| 1,362 |
py
|
Python
|
packages/merlin/protocols/PrefixLayout.py
|
pyre/pyre
|
0f903836f52450bf81216c5dfdfdfebb16090177
|
[
"BSD-3-Clause"
] | 25 |
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
packages/merlin/protocols/PrefixLayout.py
|
pyre/pyre
|
0f903836f52450bf81216c5dfdfdfebb16090177
|
[
"BSD-3-Clause"
] | 53 |
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
packages/merlin/protocols/PrefixLayout.py
|
pyre/pyre
|
0f903836f52450bf81216c5dfdfdfebb16090177
|
[
"BSD-3-Clause"
] | 12 |
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
# -*- coding: utf-8 -*-
#
# michael a.g. avzis <[email protected]>
# (c) 1998-2021 all rights reserved
# support
import merlin
# the manager of intermediate and final build products
# end of file
| 23.482759 | 82 | 0.668135 |
b9355080468a287acd9198671ea28f44a47c9a46
| 2,389 |
py
|
Python
|
test/IECoreMaya/ImageConverterTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 386 |
2015-01-02T11:10:43.000Z
|
2022-03-10T15:12:20.000Z
|
test/IECoreMaya/ImageConverterTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 484 |
2015-01-09T18:28:06.000Z
|
2022-03-31T16:02:04.000Z
|
test/IECoreMaya/ImageConverterTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 99 |
2015-01-28T23:18:04.000Z
|
2022-03-27T00:59:39.000Z
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreImage
import IECoreMaya
if __name__ == "__main__":
IECoreMaya.TestProgram()
| 37.920635 | 92 | 0.706153 |
b936e2da1dfb0c50e0a4123e54c302664e300cf0
| 4,454 |
py
|
Python
|
tests/core_ptl/check_for_ranks.py
|
PatrykNeubauer/NeMo
|
3ada744b884dba5f233f22c6991fc6092c6ca8d0
|
[
"Apache-2.0"
] | 2 |
2021-09-21T07:36:20.000Z
|
2022-02-05T15:29:04.000Z
|
tests/core_ptl/check_for_ranks.py
|
PatrykNeubauer/NeMo
|
3ada744b884dba5f233f22c6991fc6092c6ca8d0
|
[
"Apache-2.0"
] | null | null | null |
tests/core_ptl/check_for_ranks.py
|
PatrykNeubauer/NeMo
|
3ada744b884dba5f233f22c6991fc6092c6ca8d0
|
[
"Apache-2.0"
] | 12 |
2021-06-20T08:56:10.000Z
|
2022-03-16T19:07:10.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import torch
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.distributed import rank_zero_only
from nemo.core import ModelPT
from nemo.utils import logging
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
def instantiate_multinode_ddp_if_possible():
num_gpus = torch.cuda.device_count()
trainer = Trainer(gpus=num_gpus, accelerator='ddp', logger=None, checkpoint_callback=None)
exp_manager_cfg = ExpManagerConfig(exp_dir='./ddp_check/', use_datetime_version=False, version="")
exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg))
return trainer
def setup_model(trainer: Trainer):
model = ExampleModel(trainer=trainer)
logging.info(f"M.Global Rank:{model.global_rank}")
logging.info(f"M.Local Rank:{model.local_rank}")
logging.info(f"M.World Size:{model.trainer.world_size}")
trainer.predict(model)
return model
def get_rank_info(texts: list, rank_key: str) -> int:
for line in texts:
if rank_key in line:
rank_value = line.split(":")[-1]
rank_value = int(rank_value)
return rank_value
print("Could not find the correct rank key !")
exit(1)
def run_checks():
cleanup()
trainer = instantiate_multinode_ddp_if_possible()
model = setup_model(trainer)
check_model_ranks(model)
print("DDP checks passed !")
cleanup()
if __name__ == '__main__':
run_checks()
| 28.551282 | 102 | 0.687023 |
b93839299c30aa23ab066b85969c7c27e043c202
| 1,143 |
py
|
Python
|
helpers/json_manager.py
|
Lofi-Lemonade/Python-Discord-Bot-Template
|
4cb79197c751c88100ad396adb38e88bf2a4d1ed
|
[
"Apache-2.0"
] | null | null | null |
helpers/json_manager.py
|
Lofi-Lemonade/Python-Discord-Bot-Template
|
4cb79197c751c88100ad396adb38e88bf2a4d1ed
|
[
"Apache-2.0"
] | null | null | null |
helpers/json_manager.py
|
Lofi-Lemonade/Python-Discord-Bot-Template
|
4cb79197c751c88100ad396adb38e88bf2a4d1ed
|
[
"Apache-2.0"
] | null | null | null |
""""
Copyright Krypton 2022 - https://github.com/kkrypt0nn (https://krypton.ninja)
Description:
This is a template to create your own discord bot in python.
Version: 4.1
"""
import json
def add_user_to_blacklist(user_id: int) -> None:
"""
This function will add a user based on its ID in the blacklist.json file.
:param user_id: The ID of the user that should be added into the blacklist.json file.
"""
with open("blacklist.json", "r+") as file:
file_data = json.load(file)
file_data["ids"].append(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
def remove_user_from_blacklist(user_id: int) -> None:
"""
This function will remove a user based on its ID from the blacklist.json file.
:param user_id: The ID of the user that should be removed from the blacklist.json file.
"""
with open("blacklist.json", "r") as file:
file_data = json.load(file)
file_data["ids"].remove(user_id)
with open("blacklist.json", "w") as file:
file.seek(0)
json.dump(file_data, file, indent=4)
| 31.75 | 91 | 0.659668 |
b93889b31eb8ffef50e08b669fe2f20c16f4d959
| 1,628 |
py
|
Python
|
tests/test_common.py
|
ColinKennedy/ways
|
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
|
[
"MIT"
] | 2 |
2019-11-10T18:35:38.000Z
|
2020-05-12T10:37:42.000Z
|
tests/test_common.py
|
ColinKennedy/ways
|
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
|
[
"MIT"
] | 5 |
2017-11-27T18:05:25.000Z
|
2021-06-01T21:57:48.000Z
|
tests/test_common.py
|
ColinKennedy/ways
|
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
|
[
"MIT"
] | 1 |
2017-11-27T17:54:53.000Z
|
2017-11-27T17:54:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Make sure that generic functions work exactly as we expect.'''
# IMPORT STANDARD LIBRARIES
import unittest
# IMPORT WAYS LIBRARIES
from ways import common
| 35.391304 | 84 | 0.686732 |
b938dd2d4297c0de33a03a4e075f88143c4fb4d8
| 942 |
py
|
Python
|
setup.py
|
glibin/natasha
|
4f5c153f754759c189779f9879decd8d218356af
|
[
"MIT"
] | 1 |
2020-01-16T14:02:01.000Z
|
2020-01-16T14:02:01.000Z
|
setup.py
|
glibin/natasha
|
4f5c153f754759c189779f9879decd8d218356af
|
[
"MIT"
] | null | null | null |
setup.py
|
glibin/natasha
|
4f5c153f754759c189779f9879decd8d218356af
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='natasha',
version='0.2.0',
description='Named-entity recognition for russian language',
url='https://github.com/bureaucratic-labs/natasha',
author='Dmitry Veselov',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='natural language processing, russian morphology, named entity recognition, tomita',
packages=find_packages(),
install_requires=[
'yargy==0.3.0'
],
extras_require={
'web': [
'ujson',
'aiohttp',
],
},
)
| 29.4375 | 97 | 0.59448 |
b93a3daf85b033d7039d8c3747eadb457802db6b
| 2,814 |
py
|
Python
|
GeneratePassword/generate_password_v2.py
|
OneScreenfulOfPython/screenfuls
|
ea4e378c8d9e530edadd4a3315fe9e8acc98460b
|
[
"Apache-2.0"
] | 2 |
2015-01-19T14:50:55.000Z
|
2015-01-28T12:45:59.000Z
|
GeneratePassword/generate_password_v2.py
|
OneScreenfulOfPython/screenfuls
|
ea4e378c8d9e530edadd4a3315fe9e8acc98460b
|
[
"Apache-2.0"
] | null | null | null |
GeneratePassword/generate_password_v2.py
|
OneScreenfulOfPython/screenfuls
|
ea4e378c8d9e530edadd4a3315fe9e8acc98460b
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
import random
import string
try:
# Make Python2 work like Python3
input = raw_input
except NameError:
# On Python3; already using input
pass
letters = string.ascii_letters
numbers = string.digits
punctuation = string.punctuation
def generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation):
"""Generate a password by include enough random
characters to meet the password length restriction.
In addition, the user can specify that at least one
of the each of the classes of character be used.
"""
#
# Any combination of characters is valid
#
valid_characters = ""
if at_least_one_letter:
valid_characters += letters
if at_least_one_number:
valid_characters += numbers
if at_least_one_punctuation:
valid_characters += punctuation
#
# Start with a blank password and then go round enough
# times to make a password of the required length.
#
password = ""
for i in range(password_length):
#
# Each time around, ensure that one of each of the selected
# groups is chosen, and then just choose randomly from all
# groups.
#
if at_least_one_letter:
character = random.choice(letters)
at_least_one_letter = False
elif at_least_one_number:
character = random.choice(numbers)
at_least_one_number = False
elif at_least_one_punctuation:
character = random.choice(punctuation)
at_least_one_punctuation = False
else:
character = random.choice(valid_characters)
password += character
#
# Finally, shuffle the password so we don't always get a
# letter at the beginning, with a number after and some
# punctuation.
#
characters = list(password)
#
# random.shuffle shuffles a list *in place*
#
random.shuffle(characters)
#
# X.join(...) means: return all the strings in (...) joined by X
# ", ".join(['Eggs', 'Bacon', 'Beans']) => "Eggs, Bacon, Beans"
# But if you want to generate *real* .csv files, use the csv module
# because there are lots of corner-cases.
#
password = "".join(characters)
return password
if __name__ == '__main__':
password_length = int(input("How many letters? "))
at_least_one_letter = "Y" == (input("At least one letter [Y/n]? ").upper() or "Y")
at_least_one_number = "Y" == (input("At least one number [Y/n]? ").upper() or "Y")
at_least_one_punctuation = "Y" == (input("At least one punctuation [Y/n]? ").upper() or "Y")
password = generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation)
print("Your password is: {}".format(password))
| 33.5 | 108 | 0.658138 |
b93a4101b4ff85c90fbde08405fbe7515b2816bd
| 17,093 |
py
|
Python
|
bot/jobs/thorchain_node_jobs.py
|
block42-blockchain-company/thornode-telegram-bot
|
6478b1eb41e36c5fdd327b963b55343de1ce5337
|
[
"MIT"
] | 15 |
2020-04-21T07:51:26.000Z
|
2021-11-02T05:45:48.000Z
|
bot/jobs/thorchain_node_jobs.py
|
block42-blockchain-company/thornode-telegram-bot
|
6478b1eb41e36c5fdd327b963b55343de1ce5337
|
[
"MIT"
] | 78 |
2020-04-13T23:01:16.000Z
|
2021-05-09T11:46:25.000Z
|
bot/jobs/thorchain_node_jobs.py
|
block42-blockchain-company/thornode-telegram-bot
|
6478b1eb41e36c5fdd327b963b55343de1ce5337
|
[
"MIT"
] | 5 |
2020-09-03T21:19:16.000Z
|
2021-11-20T00:17:56.000Z
|
from constants.messages import get_node_health_warning_message, get_node_healthy_again_message
from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users
from packaging import version
from service.utils import *
def check_thorchain_catch_up_status(context, node_address):
"""
Check if node is some blocks behind with catch up status
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
if 'is_catching_up' not in node_data:
node_data['is_catching_up'] = False
try:
is_currently_catching_up = is_thorchain_catching_up(
node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
return
if node_data['is_catching_up'] != is_currently_catching_up:
try:
block_height = get_latest_block_height(node_data['ip_address'])
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_data['ip_address']}")
block_height = "currently unavailable"
if is_currently_catching_up:
node_data['is_catching_up'] = True
text = 'The Node is behind the latest block height and catching up! ' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height + '\n\n' + \
'Please check your Thornode immediately!'
else:
node_data['is_catching_up'] = False
text = 'The node caught up to the latest block height again! ' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n' + \
'Current block height: ' + block_height
try_message_with_home_menu(context=context, chat_id=chat_id, text=text)
def check_thorchain_midgard_api(context, node_address):
"""
Check that Midgard API is ok
"""
chat_id = context.job.context['chat_id']
node_data = context.job.context['chat_data']['nodes'][node_address]
was_healthy = node_data.setdefault('is_midgard_healthy', True)
is_midgard_healthy = is_midgard_api_healthy(node_data['ip_address'])
if was_healthy != is_midgard_healthy:
if is_midgard_healthy:
text = 'Midgard API is healthy again! ' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address
try_message_with_home_menu(context, chat_id=chat_id, text=text)
else:
text = 'Midgard API is not healthy anymore! ' + '\n' + \
'IP: ' + node_data['ip_address'] + '\n' + \
'THORNode: ' + node_data['alias'] + '\n' + \
'Node address: ' + node_address + '\n\n' + \
'Please check your Thornode immediately!'
try_message_with_home_menu(context, chat_id=chat_id, text=text)
node_data['is_midgard_healthy'] = is_midgard_healthy
| 43.164141 | 126 | 0.600889 |
b93aaafe8012e07a3a1b7cd6bfac2b4027e51ebd
| 3,760 |
py
|
Python
|
hard-gists/7578539/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21 |
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/7578539/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5 |
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/7578539/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17 |
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
from pylab import *
from numpy import *
from numpy.linalg import solve
from scipy.integrate import odeint
from scipy.stats import norm, uniform, beta
from scipy.special import jacobi
a = 0.0
b = 3.0
theta=1.0
sigma=sqrt(theta/(2*(a+b+2)))
tscale = 0.05
invariant_distribution = poly1d( [-1 for x in range(int(a))], True)*poly1d( [1 for x in range(int(b))], True)
gaussian_var = norm()
def poly_to_jacobi(x):
"""x is a poly1d object"""
xc = x.coeffs
N = x.order+1
matrix = zeros(shape=(N,N), dtype=float)
for i in range(N):
matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs
return solve(matrix, xc)
def propagate_jacobi(pc, t):
"""Takes jacobi coefficients and propagates them"""
n = arange(pc.shape[0], dtype=float)
l = theta*n*(n+a+b+1.0)/(a+b+2.0)*tscale
return exp(-l*t)*pc
tmax = 4
prior = beta_prior(40, 20)
prior_in_jacobi = poly_to_jacobi(prior)
dt = 0.1
times = arange(0,tmax,dt)
x = arange(-1,1,0.01)
rw_dt = 0.01
t, y = random_walk(0.35*2-1, tmax, rw_dt)
solution_as_x = zeros(shape=(times.size, x.size), dtype=float)
solution_as_jacobi = None
empirical_ctr = zeros(shape=(4,), dtype=float)
for i in range(0,4):
nt = int(1.0/dt)
prior = prior_in_jacobi
rnd = uniform(0,1)
if (i > 0):
nsamples = 40
r = rnd.rvs(nsamples)
ctr = (y[i/rw_dt]+1)/2.0
print "CTR: " + str(ctr)
success = (r < ctr).sum()
print "Empirical: " + str(success / float(nsamples))
evidence = beta_prior( nsamples - success, success)
prior = None
j = truncate_unnecessary_jacobi(solution_as_jacobi[int(1/dt)-1])
prior = poly_to_jacobi(evidence * jacobi_to_poly_no_invariant(j))
empirical_ctr[i] = success / float(nsamples)
solution_as_jacobi = pde_solve(prior, times[i*nt:(i+1)*nt])
solution_as_x[i*nt:(i+1)*nt] = transform_to_x(solution_as_jacobi, x)
plot(arange(0,4), empirical_ctr, 'go')
plot(t, (y+1)/2.0, 'k')
imshow(solution_as_x.transpose(), origin='lower', extent=[0,tmax,0,1])
xlabel("time")
ylabel("CTR")
title("Bayesian Estimate of CTR")
colorbar()
show()
| 27.246377 | 109 | 0.611702 |
b93b21d31a5eecb527d2b3ad7f00cf5d4683d661
| 1,535 |
py
|
Python
|
forms.py
|
lennykioko/Flask-social-network
|
15bfe1f7dca90074c0cbef62c5da9d5a25b5ce65
|
[
"MIT"
] | 1 |
2018-04-15T19:35:54.000Z
|
2018-04-15T19:35:54.000Z
|
forms.py
|
lennykioko/Flask-social-network
|
15bfe1f7dca90074c0cbef62c5da9d5a25b5ce65
|
[
"MIT"
] | null | null | null |
forms.py
|
lennykioko/Flask-social-network
|
15bfe1f7dca90074c0cbef62c5da9d5a25b5ce65
|
[
"MIT"
] | null | null | null |
# forms are not just about display, instead they are more of validation
# wtf forms protect our site against csrf attacks
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email,
Length, EqualTo)
from models import User
| 25.583333 | 85 | 0.712704 |
b93b8add4495a7de42fb7a036f7ba8c5ddea0d87
| 1,508 |
py
|
Python
|
pantam_cli/utils/messages.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | 2 |
2020-10-04T10:29:43.000Z
|
2021-03-30T13:45:09.000Z
|
pantam_cli/utils/messages.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | null | null | null |
pantam_cli/utils/messages.py
|
flmnt/pantam
|
da47d977e69ec410d0642b5ade1f2323c1b6b350
|
[
"MIT"
] | null | null | null |
from sys import stderr, stdout
from enum import Enum
from colored import fg, attr
PANTAM: str = fg("yellow") + attr("bold") + "PANTAM" + attr("reset")
colour_msg = lambda msg, colour: fg(colour) + attr("bold") + msg + attr("reset")
info_msg = lambda msg: colour_msg(msg, "blue")
success_msg = lambda msg: colour_msg(msg, "green")
error_msg = lambda msg: colour_msg(msg, "red")
def write_msg(msg: str, spacing: NewLine = None) -> None:
"""Write message to stdout"""
prefix: str = "\n" if spacing in (NewLine.before, NewLine.both) else ""
suffix: str = "\n" if spacing in (NewLine.after, NewLine.both) else ""
stdout.write("%s%s%s" % (prefix, msg, suffix))
def write_error(msg: str) -> None:
"""Write message to stderr"""
stderr.write("\n%s\n" % msg)
welcome_msg = (
lambda: PANTAM
+ """
The microframework for microservices.
Let's build your app...
"""
)
name_index_file_msg = lambda: "What is the name of your main script?"
name_actions_folder_msg = lambda: "What is the name of your actions folder?"
def create_actions_file_msg(second_run: bool):
"""Actions File Message"""
article = "another" if second_run else "an"
return "Do you want to create %s action file?" % article
name_actions_file_msg = lambda: "What is the name of your actions file?"
confirm_structure_msg = (
lambda structure: """Your application will look like this:
%s
Happy to proceed?"""
% structure
)
| 24.322581 | 80 | 0.671088 |
b93da1b1bbce8a3e5fafae55f093b2f5323fb641
| 2,510 |
py
|
Python
|
tests/manage/test_remove_mon_from_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
tests/manage/test_remove_mon_from_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
tests/manage/test_remove_mon_from_cluster.py
|
zmc/ocs-ci
|
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
|
[
"MIT"
] | null | null | null |
"""
A Testcase to remove mon from
when I/O's are happening.
Polarion-ID- OCS-355
"""
import logging
import pytest
from ocs_ci.ocs import ocp, constants
from ocs_ci.framework.testlib import tier4, ManageTest
from ocs_ci.framework import config
from ocs_ci.ocs.resources import pod
from tests.helpers import run_io_with_rados_bench, delete_cephblockpool
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.exceptions import CephHealthException
log = logging.getLogger(__name__)
def run_io_on_pool():
"""
Runs the I/O on the pool and delete the pool
Returns: A thread of I/O
"""
tools_pod = pod.get_ceph_tools_pod()
tools_pod.add_role(role='client')
return run_io_with_rados_bench(
ceph_pods=[tools_pod],
config={'time': 45, 'cleanup': False,
'pool': 'test-pool'
}
)
| 29.529412 | 94 | 0.688446 |
b93f9ebd7406695d9627c10b5f85877c35692320
| 2,690 |
py
|
Python
|
smartystreets_python_sdk/us_autocomplete_pro/client.py
|
Caaz/smartystreets-python-sdk
|
f56cd00d29861bde297143c128f79a4b1d89541c
|
[
"Apache-2.0"
] | null | null | null |
smartystreets_python_sdk/us_autocomplete_pro/client.py
|
Caaz/smartystreets-python-sdk
|
f56cd00d29861bde297143c128f79a4b1d89541c
|
[
"Apache-2.0"
] | null | null | null |
smartystreets_python_sdk/us_autocomplete_pro/client.py
|
Caaz/smartystreets-python-sdk
|
f56cd00d29861bde297143c128f79a4b1d89541c
|
[
"Apache-2.0"
] | null | null | null |
from smartystreets_python_sdk import Request
from smartystreets_python_sdk.exceptions import SmartyException
from smartystreets_python_sdk.us_autocomplete_pro import Suggestion, geolocation_type
| 42.03125 | 112 | 0.717472 |
b94044f865f05e0aee9b401bba3907e01e40ff6c
| 11,578 |
py
|
Python
|
mssqlvc.py
|
Saritasa/mssqlvc
|
836caeea59cc0ed23234687b94062e007707c603
|
[
"BSD-2-Clause"
] | 2 |
2016-09-22T04:36:46.000Z
|
2018-07-31T21:36:42.000Z
|
mssqlvc.py
|
Saritasa/mssqlvc
|
836caeea59cc0ed23234687b94062e007707c603
|
[
"BSD-2-Clause"
] | 1 |
2016-02-02T07:58:29.000Z
|
2016-02-02T14:19:18.000Z
|
mssqlvc.py
|
krasninja/mssqlvc
|
836caeea59cc0ed23234687b94062e007707c603
|
[
"BSD-2-Clause"
] | 2 |
2016-09-21T09:48:44.000Z
|
2020-03-24T15:59:54.000Z
|
# -*- coding: utf-8 -*-
"""
mssqlvc
~~~~~~~
Database version control utility for Microsoft SQL Server. See README.md for more information.
Licensed under the BSD license. See LICENSE file in the project root for full license information.
"""
import argparse
import datetime
import io
import logging
import os
import re
import sys
import urlparse
try:
import clr
except ImportError:
print('Cannot import crl module, make sure you run this script using IronPython')
exit(2)
import System
clr.AddReference('Microsoft.SqlServer.Smo')
clr.AddReference('Microsoft.SqlServer.SqlEnum')
clr.AddReference('Microsoft.SqlServer.ConnectionInfo')
import Microsoft.SqlServer.Management.Smo as Smo
import Microsoft.SqlServer.Management.Common as Common
__author__ = 'Ivan Kozhin'
__copyright__ = 'Copyright (c) 2015-2016, Saritasa'
__license__ = 'BSD'
__version__ = '1.4.5'
__all__ = ['MsSqlVersion']
def get_cmd_line_parser():
"""Get initialized argparse.ArgumentParser object"""
parser = argparse.ArgumentParser(
description='MSSQL database patch history tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''Example: %(prog)s -c "mssql://sa:123@host\instance/database" -d "D:/1/project/patch"''')
parser.add_argument('--connection', '-c',
required=True,
dest='connection',
action='store',
help='connection string in rfc1738 url format, required')
parser.add_argument('--directory', '-d',
dest='directory',
action='store',
default='.',
help='directory with patch files')
parser.add_argument('--log', '-l',
dest='log',
action='store',
help='log file')
parser.add_argument('--noexecute', '-n',
action='store_true',
dest='noexecute',
default=False,
help='displays pending script files with no execution')
parser.add_argument('--noexecute-fill', '-nf',
action='store_true',
dest='noexecute_fill',
default=False,
help='displays pending script files with no execution and fills patch table')
parser.add_argument('--stop-on-error', '-soe',
action='store_true',
dest='stop_on_error',
default=False,
help='stops execution if any script fails')
parser.add_argument('--exclude-pattern', '-ep',
dest='exclude_pattern',
help='skips files match to regular expression')
parser.add_argument('--record-files-only', '-rfo',
action='store_true',
dest='record_files_only',
default=False,
help='only file names will be stored to patch table without folder paths')
parser.add_argument('--case-insensitive', '-ci',
action='store_true',
dest='case_insensitive',
default=False,
help='use case insensitive to compare patch files so "PatchName.sql" and "patchname.sql" is the same')
parser.add_argument('--debug',
action='store_true',
dest='debug',
default=False,
help='enables debug output')
parser.add_argument('--version', '-v',
action='version',
version='%(prog)s ' + __version__)
return parser
if __name__ == '__main__':
# parser
parser = get_cmd_line_parser()
parser_args = parser.parse_args()
if parser_args.connection is None or parser_args.directory is None:
parser.print_help()
exit(1)
# logging
logger = logging.getLogger('mssql')
if parser_args.log:
fh = logging.FileHandler(parser_args.log)
fh.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
logger.setLevel(logging.DEBUG if parser_args.debug else logging.INFO)
logger.addHandler(ch)
# database handle
sqlvc = MsSqlVersion(parser_args.connection, parser_args.directory, exclude_pattern=parser_args.exclude_pattern,
stop_on_error=parser_args.stop_on_error, case_insensitive=parser_args.case_insensitive,
record_files_only=parser_args.record_files_only, logger=logger)
if parser_args.noexecute:
for patch in sqlvc.get_pending_patches():
logger.info(' ' + patch)
elif parser_args.noexecute_fill:
sqlvc.fill()
else:
sqlvc.update()
| 39.515358 | 121 | 0.640266 |
b9408aacd4d750c790ebb27107e026e183ea1d35
| 4,296 |
py
|
Python
|
lib/python3.6/site-packages/statsmodels/iolib/tests/test_table_econpy.py
|
KshitizSharmaV/Quant_Platform_Python
|
d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39
|
[
"BSD-3-Clause"
] | 1 |
2020-05-09T08:42:52.000Z
|
2020-05-09T08:42:52.000Z
|
statsmodels/iolib/tests/test_table_econpy.py
|
yanzhenxiong/statsmodels
|
e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/iolib/tests/test_table_econpy.py
|
yanzhenxiong/statsmodels
|
e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0
|
[
"BSD-3-Clause"
] | 1 |
2020-05-09T08:42:58.000Z
|
2020-05-09T08:42:58.000Z
|
'''
Unit tests table.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_equal
__docformat__ = "restructuredtext en"
from statsmodels.iolib.table import Cell, SimpleTable
from statsmodels.iolib.table import default_latex_fmt
from statsmodels.iolib.table import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
| 30.041958 | 261 | 0.573091 |
b9409e44daa0d7a262748b347f053c849e397b73
| 291 |
py
|
Python
|
homeassistant/components/todoist/types.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023 |
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/todoist/types.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710 |
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/todoist/types.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956 |
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Types for the Todoist component."""
from __future__ import annotations
from typing import TypedDict
| 19.4 | 65 | 0.697595 |
b9417eb816defb8a05e4de472fa5d06b0845774d
| 4,237 |
py
|
Python
|
src/c/c_pyzstd.py
|
corneliusroemer/pyzstd
|
06f14ad29735d9ae85c188703dcb64c24686c4f2
|
[
"BSD-3-Clause"
] | 29 |
2020-10-13T03:35:37.000Z
|
2022-03-14T11:09:47.000Z
|
src/c/c_pyzstd.py
|
corneliusroemer/pyzstd
|
06f14ad29735d9ae85c188703dcb64c24686c4f2
|
[
"BSD-3-Clause"
] | 12 |
2020-12-22T02:27:47.000Z
|
2022-03-18T14:54:33.000Z
|
src/c/c_pyzstd.py
|
corneliusroemer/pyzstd
|
06f14ad29735d9ae85c188703dcb64c24686c4f2
|
[
"BSD-3-Clause"
] | 3 |
2020-11-21T20:57:10.000Z
|
2021-09-26T01:14:44.000Z
|
from collections import namedtuple
from enum import IntEnum
from ._zstd import *
from . import _zstd
__all__ = (# From this file
'compressionLevel_values', 'get_frame_info',
'CParameter', 'DParameter', 'Strategy',
# From _zstd
'ZstdCompressor', 'RichMemZstdCompressor',
'ZstdDecompressor', 'EndlessZstdDecompressor',
'ZstdDict', 'ZstdError', 'decompress', 'get_frame_size',
'compress_stream', 'decompress_stream',
'zstd_version', 'zstd_version_info', 'zstd_support_multithread')
# Used in __init__.py
_ZSTD_DStreamInSize = _zstd._ZSTD_DStreamInSize
_train_dict = _zstd._train_dict
_finalize_dict = _zstd._finalize_dict
# compressionLevel_values
_nt_values = namedtuple('values', ['default', 'min', 'max'])
compressionLevel_values = _nt_values(_zstd._ZSTD_defaultCLevel,
_zstd._ZSTD_minCLevel,
_zstd._ZSTD_maxCLevel)
_nt_frame_info = namedtuple('frame_info',
['decompressed_size', 'dictionary_id'])
def get_frame_info(frame_buffer):
"""Get zstd frame infomation from a frame header.
Argument
frame_buffer: A bytes-like object. It should starts from the beginning of
a frame, and needs to include at least the frame header (6 to
18 bytes).
Return a two-items namedtuple: (decompressed_size, dictionary_id)
If decompressed_size is None, decompressed size is unknown.
dictionary_id is a 32-bit unsigned integer value. 0 means dictionary ID was
not recorded in the frame header, the frame may or may not need a dictionary
to be decoded, and the ID of such a dictionary is not specified.
It's possible to append more items to the namedtuple in the future."""
ret_tuple = _zstd._get_frame_info(frame_buffer)
return _nt_frame_info(*ret_tuple)
# Set CParameter/DParameter types for validity check
_zstd._set_parameter_types(CParameter, DParameter)
| 36.213675 | 80 | 0.663441 |
b941e493bd72a0cc29b7f5487a4bd483b40a8fe3
| 4,414 |
py
|
Python
|
test/unit/data/model/mapping/common.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,085 |
2015-02-18T16:14:38.000Z
|
2022-03-30T23:52:07.000Z
|
test/unit/data/model/mapping/common.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 11,253 |
2015-02-18T17:47:32.000Z
|
2022-03-31T21:47:03.000Z
|
test/unit/data/model/mapping/common.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,000 |
2015-02-18T16:18:10.000Z
|
2022-03-29T08:22:56.000Z
|
from abc import ABC, abstractmethod
from contextlib import contextmanager
from uuid import uuid4
import pytest
from sqlalchemy import (
delete,
select,
UniqueConstraint,
)
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
def delete_from_database(session, objects):
"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""
# Ensure we have a list of objects (check for list explicitly: a model can be iterable)
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
table = obj.__table__
stmt = delete(table).where(table.c.id == obj.id)
session.execute(stmt)
def collection_consists_of_objects(collection, *objects):
"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""
if len(collection) != len(objects): # False if lengths are different
return False
if not collection: # True if both are empty
return True
# Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=lambda item: item.id)
objects_l = list(objects)
objects_l.sort(key=lambda item: item.id)
for item1, item2 in zip(collection, objects_l):
if item1.id is None or item2.id is None or item1.id != item2.id:
return False
return True
def get_unique_value():
"""Generate unique values to accommodate unique constraints."""
return uuid4().hex
| 31.084507 | 98 | 0.677843 |
b9421dbb7e263a5a3de9a9e29e270b09ceba630c
| 1,004 |
py
|
Python
|
django_events/users/management/commands/create_default_su.py
|
chrisBrookes93/django-events-management
|
93886448a7bb85c8758324977ff67bcacc80bbec
|
[
"MIT"
] | null | null | null |
django_events/users/management/commands/create_default_su.py
|
chrisBrookes93/django-events-management
|
93886448a7bb85c8758324977ff67bcacc80bbec
|
[
"MIT"
] | null | null | null |
django_events/users/management/commands/create_default_su.py
|
chrisBrookes93/django-events-management
|
93886448a7bb85c8758324977ff67bcacc80bbec
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
| 41.833333 | 114 | 0.661355 |
b942ff3dafb5c886434a478e8bfb0592e83afd1c
| 6,215 |
bzl
|
Python
|
antlir/bzl/image_layer.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 28 |
2020-08-11T16:22:46.000Z
|
2022-03-04T15:41:52.000Z
|
antlir/bzl/image_layer.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 137 |
2020-08-11T16:07:49.000Z
|
2022-02-27T10:59:05.000Z
|
antlir/bzl/image_layer.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 10 |
2020-09-10T00:01:28.000Z
|
2022-03-08T18:00:28.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
An `image.layer` is a set of `feature` with some additional parameters. Its
purpose to materialize those `feature`s as a btrfs subvolume in the
per-repo `buck-image/out/volume/targets`.
We call the subvolume a "layer" because it can be built on top of a snapshot
of its `parent_layer`, and thus can be represented as a btrfs send-stream for
more efficient storage & distribution.
The Buck output of an `image.layer` target is a JSON file with information
on how to find the resulting layer in the per-repo
`buck-image/out/volume/targets`. See `SubvolumeOnDisk.to_json_file`.
## Implementation notes
The implementation of this converter deliberately minimizes the amount of
business logic in its command. The converter must include **only** our
interactions with the buck target graph. Everything else should be
delegated to subcommands.
### Command
In composing the `bash` command, our core maxim is: make it a hermetic
function of the converter's inputs -- do not read data from disk, do not
insert disk paths into the command, do not do anything that might cause the
bytes of the command to vary between machines or between runs. To achieve
this, we use Buck macros to resolve all paths, including those to helper
scripts. We rely on environment variables or pipes to pass data between the
helper scripts.
Another reason to keep this converter minimal is that `buck test` cannot
make assertions about targets that fail to build. Since we only have the
ability to test the "good" targets, it behooves us to put most logic in
external scripts, so that we can unit-test its successes **and** failures
thoroughly.
### Output
We mark `image.layer` uncacheable, because there's no easy way to teach Buck
to serialize a btrfs subvolume (for that, we have `package.new`).
That said, we should still follow best practices to avoid problems if e.g.
the user renames their repo, or similar. These practices include:
- The output JSON must store no absolute paths.
- Store Buck target paths instead of paths into the output directory.
### Dependency resolution
An `image.layer` consumes a set of `feature` outputs to decide what to put into
the btrfs subvolume. These outputs are actually just JSON files that
reference other targets, and do not contain the data to be written into the
image.
Therefore, `image.layer` has to explicitly tell buck that it needs all
direct dependencies of its `feature`s to be present on disk -- see our
`attrfilter` queries below. Without this, Buck would merrily fetch the just
the `feature` JSONs from its cache, and not provide us with any of the
buid artifacts that comprise the image.
We do NOT need the direct dependencies of the parent layer's features,
because we treat the parent layer as a black box -- whatever it has laid
down in the image, that's what it provides (and we don't care about how).
The consequences of this information hiding are:
- Better Buck cache efficiency -- we don't have to download
the dependencies of the ancestor layers' features. Doing that would be
wasteful, since those bits are redundant with what's in the parent.
- Ability to use genrule image layers / apply non-pure post-processing to
a layer. In terms of engineering, both of these non-pure approaches are
a terrible idea and a maintainability headache, but they do provide a
useful bridge for transitioning to Buck image builds from legacy
imperative systems.
- The image compiler needs a litte extra code to walk the parent layer and
determine what it provides.
- We cannot have "unobservable" dependencies between features. Since
feature dependencies are expected to routinely cross layer boundaries,
feature implementations are forced only to depend on data that can be
inferred from the filesystem -- since this is all that the parent layer
implementation can do. NB: This is easy to relax in the future by
writing a manifest with additional metadata into each layer, and using
that metadata during compilation.
"""
load(":compile_image_features.bzl", "compile_image_features")
load(":image_layer_utils.bzl", "image_layer_utils")
load(":image_utils.bzl", "image_utils")
def image_layer(
name,
parent_layer = None,
features = None,
flavor = None,
flavor_config_override = None,
antlir_rule = "user-internal",
**image_layer_kwargs):
"""
Arguments
- `parent_layer`: The name of another `image_layer` target, on
top of which the current layer will install its features.
- `features`: List of `feature` target paths and/or
nameless structs from `feature.new`.
- `flavor`: Picks default build options for the layer, including
`build_appliance`, RPM installer, and others. See `flavor_helpers.bzl`
for details.
- `flavor_config_override`: A struct that can override the default
values fetched from `REPO_CFG[flavor].flavor_to_config`.
- `mount_config`: Specifies how this layer is mounted in the
`mounts` field of a `feature` of a parent layer. See
the field in `_image_layer_impl` in `image_layer_utils.bzl`
- `runtime`: A list of desired helper buck targets to be emitted.
`container` is always included in the list by default.
See the field in `_image_layer_impl` in `image_layer_utils.bzl` and the
[docs](/docs/tutorials/helper-buck-targets#imagelayer) for the list of
possible helpers, their respective behaviours, and how to invoke them.
"""
image_layer_utils.image_layer_impl(
_rule_type = "image_layer",
_layer_name = name,
# Build a new layer. It may be empty.
_make_subvol_cmd = compile_image_features(
name = name,
current_target = image_utils.current_target(name),
parent_layer = parent_layer,
features = features,
flavor = flavor,
flavor_config_override = flavor_config_override,
),
antlir_rule = antlir_rule,
**image_layer_kwargs
)
| 44.078014 | 79 | 0.740628 |
b943636ba1006005819134b02620af2faa23d559
| 84 |
py
|
Python
|
python/testData/debug/test_ignore_lib.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2 |
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/debug/test_ignore_lib.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173 |
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/debug/test_ignore_lib.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2 |
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from calendar import setfirstweekday
stopped_in_user_file = True
setfirstweekday(15)
| 28 | 36 | 0.880952 |
b9443b673da6e4fd8c252e11eba4606e69192845
| 1,036 |
py
|
Python
|
promt_tr/__main__.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
promt_tr/__main__.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
promt_tr/__main__.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
''' __main__, to run:
python -m promt_tr
'''
import sys
from random import randint
from promt_tr import promt_tr, LANG_CODES
# pragma: no cover
def main():
'''main'''
from_lang = 'auto'
to_lang = 'zh'
text = 'test ' + str(randint(0, 10000))
if not sys.argv[1:]:
print('Provide some English text, with an optional to_lang')
print('E.g., python -m promt_tr test this and that de')
print('Testing with some random text\n')
else:
argv = sys.argv[1:]
len_ = len(argv)
if len_ == 1:
if argv[0] in LANG_CODES:
to_lang = argv[0]
else:
text = argv[0]
elif argv[-1] in LANG_CODES:
to_lang = argv[-1]
text = ' '.join(argv[:-1])
else:
text = ' '.join(argv)
for to_lang in ['zh', 'de', 'fr', 'it', 'es']:
resu = promt_tr(text, from_lang, to_lang)
print(f'[{text}] translated to [{to_lang}]: [{resu}]')
if __name__ == '__main__':
main()
| 23.545455 | 68 | 0.527027 |
b9458ab72f55b4db845f6d76e44dba3b00e000ed
| 6,265 |
py
|
Python
|
src/features/v3/proc_v3_n1_calc_distance.py
|
askoki/nfl_dpi_prediction
|
dc3256f24ddc0b6725eace2081d1fb1a7e5ce805
|
[
"MIT"
] | null | null | null |
src/features/v3/proc_v3_n1_calc_distance.py
|
askoki/nfl_dpi_prediction
|
dc3256f24ddc0b6725eace2081d1fb1a7e5ce805
|
[
"MIT"
] | null | null | null |
src/features/v3/proc_v3_n1_calc_distance.py
|
askoki/nfl_dpi_prediction
|
dc3256f24ddc0b6725eace2081d1fb1a7e5ce805
|
[
"MIT"
] | null | null | null |
import os
import sys
import pandas as pd
from datetime import datetime
from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION
from src.features.helpers.processing import add_missing_timestamp_values
from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, \
normalize_according_to_play_direction, check_group_event
from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation
week_num = int(sys.argv[1])
data_v3 = DataV3(DATA_V3_SUBVERSION)
save_file_path = data_v3.get_step1_checkpoint_path(week_num)
try:
clean_df = pd.read_csv(save_file_path)
save_file_exists = True
except FileNotFoundError:
save_file_exists = False
if not save_file_exists:
print("Started loading data")
play_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'plays.csv'))
games_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'games.csv'))
week_and_games = games_df[games_df.week == week_num]
tracking_df = pd.read_csv(os.path.join(RAW_DATA_DIR, f'week{week_num}.csv'))
print("Data loaded. Start processing timestamps")
tracking_df = add_missing_timestamp_values(tracking_df)
games_n_plays_df = play_df.merge(week_and_games, how='inner', on='gameId')
m_grouped = games_n_plays_df.groupby(['gameId', 'playId'])
df_t = tracking_df.merge(games_n_plays_df, how='left', on=['gameId', 'playId'])
# Remove all events without 'pass_forward'
df_t_grouped = df_t.groupby(['gameId', 'playId'])
df_t_v3 = df_t.copy().sort_index()
for name, group in df_t_grouped:
game_id, play_id = name
# if group does not contain pass forward, drop it
if all(group.event != 'pass_forward'):
df_t_v3 = df_t_v3[(df_t_v3.gameId != game_id) | (df_t_v3.playId != play_id)]
df_t_v3_s = df_t_v3.sort_values(by=['gameId', 'playId', 'time', 'event'])
df_t_v3_s = df_t_v3_s.reset_index(drop=True)
df_t_grouped = df_t_v3_s.groupby(['gameId', 'playId'])
# remove all values before 'pass_forward'
print("Removing all values before pass forward event...")
for name, group in df_t_grouped:
game_id, play_id = name
pass_forward_frame_id = group[group.event == 'pass_forward'].index.min() - 1
remove_start = group.index.min()
df_t_v3_s = df_t_v3_s.drop(df_t_v3_s.loc[remove_start:pass_forward_frame_id].index)
pd.options.mode.chained_assignment = None
gb = df_t_v3_s.groupby(['gameId', 'playId'])
print('Getting closest players...')
keep_indices = []
for name, group in gb:
game_id, play_id = name
try:
event_3rd = group.event.unique()[2]
except IndexError:
print('Number of events is < 3, skipping...')
continue
situation_df = group[group.event == event_3rd]
# convert dataframe into series
ball_row = situation_df[situation_df.team == 'football'].head(1)
# remove ball
player_situation_df = situation_df[situation_df.team != 'football']
try:
p1, p2 = get_closest_players(player_situation_df, ball_row.x.item(), ball_row.y.item())
except ValueError:
print('Value Error raised. This group will be skipped.')
continue
p_n_b_indices = get_players_and_ball_indices(group, p1, p2)
if p_n_b_indices:
keep_indices.extend(p_n_b_indices)
clean_df = df_t_v3_s[df_t_v3_s.index.isin(keep_indices)]
clean_df.to_csv(
save_file_path,
index=False
)
print('Normalize...')
clean_df = normalize_according_to_play_direction(clean_df)
clean_df['homeHasPossession'] = clean_df.apply(
lambda row: home_has_possession(row), axis=1
)
clean_df['teamSituation'] = clean_df.apply(
lambda row: calculate_team_sitation(row), axis=1
)
print('Creating features...')
min_df = clean_df[[
'time', 'x', 'y', 's', 'o', 'dir', 'event', 'team',
'gameId', 'playId', 'frameId', 'isDefensivePI'
]]
gb_2 = clean_df.groupby(['gameId', 'playId', 'frameId'])
# ball direction and orientation are NaN
calc_df = pd.DataFrame(
columns=[
'time',
'att_def_d', 'att_ball_d', 'def_ball_d',
'att_s', 'def_s', 'ball_s',
'att_o', 'def_o',
'att_dir', 'def_dir',
'event', 'gameId', 'playId', 'frameId', 'isDefensivePI'
]
)
GROUP_SIZE_MINIMUM = 3
for name, group in gb_2:
game_id, play_id, frameId = name
if len(group) < GROUP_SIZE_MINIMUM:
continue
ball = group[group.teamSituation == 'football'].head(1).squeeze()
p_att = group[group.teamSituation == 'attacking'].head(1).squeeze()
p_def = group[group.teamSituation == 'defending'].head(1).squeeze()
group_row = group.head(1).squeeze()
group_events = group.event.unique().tolist()
dict_to_append = {
'time': group_row.time,
'att_def_d': calculate_distance(p_att.x, p_att.y, p_def.x, p_def.y),
'att_ball_d': calculate_distance(p_att.x, p_att.y, ball.x, ball.y),
'def_ball_d': calculate_distance(p_def.x, p_def.y, ball.x, ball.y),
'att_s': p_att.s, 'def_s': p_def.s, 'ball_s': ball.s,
'att_a': p_att.a, 'def_a': p_def.a, 'ball_a': ball.a,
'att_o': p_att.o, 'def_o': p_def.o,
'att_dir': p_att.dir, 'def_dir': p_def.dir,
'event': group_row.event,
'pass_arrived': check_group_event(group_events, 'pass_arrived'),
'pass_outcome_caught': check_group_event(group_events, 'pass_outcome_caught'),
'tackle': check_group_event(group_events, 'tackle'),
'first_contact': check_group_event(group_events, 'first_contact'),
'pass_outcome_incomplete': check_group_event(group_events, 'pass_outcome_incomplete'),
'out_of_bounds': check_group_event(group_events, 'out_of_bounds'),
'week': week_num,
'gameId': group_row.gameId,
'playId': group_row.playId,
'frameId': group_row.frameId,
'isDefensivePI': group_row.isDefensivePI
}
calc_df = calc_df.append(
dict_to_append,
ignore_index=True
)
print("Saving data...")
calc_df.to_csv(
data_v3.get_step1_end_path(week_num),
index=False
)
print(f'End time: {datetime.now().strftime("%H:%M:%S")}')
| 35.596591 | 119 | 0.675499 |
b945e094a775936b9b256c03b9ad1404cebcb291
| 1,312 |
py
|
Python
|
annotate-preprocessed.py
|
Rajpratik71/devel-scripts
|
068285719a13b02889b1314361cc5bdb764d9a3a
|
[
"Apache-2.0"
] | null | null | null |
annotate-preprocessed.py
|
Rajpratik71/devel-scripts
|
068285719a13b02889b1314361cc5bdb764d9a3a
|
[
"Apache-2.0"
] | null | null | null |
annotate-preprocessed.py
|
Rajpratik71/devel-scripts
|
068285719a13b02889b1314361cc5bdb764d9a3a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""Annotates -E preprocessed source input with line numbers.
Read std input, then annotate each line with line number based on previous
expanded line directives from -E output. Useful in the context of compiler
debugging.
"""
import getopt
import os
import re
import sys
import script_utils as u
flag_reverse = True
def usage(msgarg):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print """\
usage: %s [options] < input > output
options:
-d increase debug msg verbosity level
""" % os.path.basename(sys.argv[0])
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_reverse
try:
optlist, _ = getopt.getopt(sys.argv[1:], "dr")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, _ in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-r":
flag_reverse = False
# Setup
u.setdeflanglocale()
parse_args()
# Read
lines = sys.stdin.readlines()
lnum = -1
matcher = re.compile(r"^\#\s+(\d+)\s+\"(\S+)\".*$")
for line in lines:
m = matcher.match(line)
if m:
lnum = int(m.group(1))
afile = m.group(2)
print "<%s:%d>" % (afile, lnum)
continue
print "%d:%s" % (lnum, line.strip())
lnum += 1
| 19.014493 | 74 | 0.636433 |
b94613d2fb24bf9487b3045eae02b837543d3647
| 2,547 |
py
|
Python
|
pages/lstm.py
|
tekeburak/dam-occupancy-model
|
f39d436bf27088068177245f0180cafaa56ad123
|
[
"MIT"
] | 8 |
2021-01-24T14:56:23.000Z
|
2021-03-26T18:10:33.000Z
|
pages/lstm.py
|
tekeburak/dam-occupancy-model
|
f39d436bf27088068177245f0180cafaa56ad123
|
[
"MIT"
] | null | null | null |
pages/lstm.py
|
tekeburak/dam-occupancy-model
|
f39d436bf27088068177245f0180cafaa56ad123
|
[
"MIT"
] | 6 |
2021-01-24T14:44:49.000Z
|
2021-03-21T17:50:30.000Z
|
import streamlit as st
import tensorflow as tf
import numpy
from utils.get_owm_data import get_open_weather_map_data
from utils.get_date import get_date_list_for_gmt
import plotly.graph_objects as go
from plotly import tools
import plotly.offline as py
import plotly.express as px
| 50.94 | 476 | 0.773852 |
b9475ee1123a7f8c87eb161ddf2246d4b5a64a79
| 1,847 |
py
|
Python
|
fst_web/demo_settings.py
|
kamidev/autobuild_fst
|
6baffa955075ffe3c5f197789e9fd065fa74058e
|
[
"BSD-3-Clause"
] | null | null | null |
fst_web/demo_settings.py
|
kamidev/autobuild_fst
|
6baffa955075ffe3c5f197789e9fd065fa74058e
|
[
"BSD-3-Clause"
] | null | null | null |
fst_web/demo_settings.py
|
kamidev/autobuild_fst
|
6baffa955075ffe3c5f197789e9fd065fa74058e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
path = lambda *args: os.path.join(ROOT, *args)
""" Template for local settings of the FST webservice (fst_web)
Please edit this file and replace all generic values with values suitable to
your particular installation.
"""
# NOTE! Always set this to False before deploying
DEBUG = True
# NOTE! Before deploying on a public, uncomment ALLOWED_HOSTS
# and add IP address and/or domain of your site
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'fst.magokoro.nu']
# Look for instance-specific settings
try:
from .instance_settings import *
except ImportError:
from .default_instance_settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path('database/fst_demo.db')
}
}
LOG_LEVEL = "DEBUG"
# Enable this to override global DB Debug setting
# DB_DEBUG_LEVEL = "DEBUG"
# Setup mail server for sending email notifications.
# You can use any mail server you want.
# But a very simple way to get started is to use a gmail account.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
# EMAIL_HOST_USER = 'your email'
# EMAIL_HOST_PASSWORD = 'your password'
# Admins specified here receive email notifications on critical errors.
ADMINS = ()
MANAGERS = ADMINS
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = os.path.join("/dokument/")
# Site and port for hosting FST service (do not add ending '/').
FST_SITE_URL = "http://127.0.0.1:8000"
# TODO - Check if FST_INSTANCE_PREFIX can be removed
# Site and port of specific FST instance (do not add ending '/').
FST_INSTANCE_URL = os.path.join(
"http://127.0.0.1:8000",
FST_INSTANCE_PREFIX)
| 28.415385 | 76 | 0.721711 |
b947d963b017c12ec37d222b3722de432bf97da6
| 8,891 |
py
|
Python
|
BookingScraper-joao_v2/BookingScraper/airbnb.py
|
joaocamargo/estudos-python
|
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
|
[
"MIT"
] | 1 |
2019-10-09T12:56:13.000Z
|
2019-10-09T12:56:13.000Z
|
BookingScraper-joao_v2/BookingScraper/airbnb.py
|
joaocamargo/estudos-python
|
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
|
[
"MIT"
] | null | null | null |
BookingScraper-joao_v2/BookingScraper/airbnb.py
|
joaocamargo/estudos-python
|
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3.6
import argparse
import argcomplete
from argcomplete.completers import ChoicesCompleter
from argcomplete.completers import EnvironCompleter
import requests
from bthread import BookingThread
from bs4 import BeautifulSoup
from file_writer import FileWriter
hotels = []
def prep_data(rooms=1, country='Macedonia', dest_id='-1', DayIni='01/01/2019', DayFim='02/01/2019', out_format=None):
'''
Prepare data for saving
:return: hotels: set()
'''
offset = 1
session = requests.Session()
parsed_html = get_booking_page(session, offset, rooms, country, dest_id, DayIni,DayFim)
all_offset = parsed_html.find_all('li', {'class':
'sr_pagination_item'})[-1].get_text().splitlines()[-1]
threads = []
for i in range(int(all_offset)):
offset += 1
t = BookingThread(session, offset, rooms, country,dest_id,DayIni, DayFim, process_hotels)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
hotels2 = hotels
return hotels2
def get_data(rooms=1, country='Macedonia', dest_id='-1',DayIni='01/01/2019',DayFim='02/01/2019', out_format=None):
'''
Get all accomodations in Macedonia and save them in file
:return: hotels-in-macedonia.{txt/csv/xlsx} file
'''
print('Procurando por',country)
hotels_list = prep_data(rooms, country,dest_id, DayIni, DayFim, out_format)
save_data(hotels_list , out_format=out_format, country=country)
def save_data(data, out_format, country):
'''
Saves hotels list in file
:param data: hotels list
:param out_format: json, csv or excel
:return:
'''
writer = FileWriter(data, out_format, country)
file = writer.output_file()
print('All accommodations are saved.')
print('You can find them in', file, 'file')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
countries = get_countries()
parser.add_argument("--rooms",
help='Add the number of rooms to the booking request.',
default=1,
type=int,
nargs='?')
parser.add_argument("--country",
help='Add the country to the booking request.',
default='Macedonia',
nargs='?').completer = ChoicesCompleter(countries)
parser.add_argument("--dest_id",
help='Add the country to the booking request.',
default='0',
nargs='?')
parser.add_argument("--DayIni",
help='Data inicial',
default='01/01/2019',
nargs='?')
parser.add_argument("--DayFim",
help='Data inicial',
default='02/01/2019',
nargs='?')
parser.add_argument("--out_format",
help='Add the format for the output file. Add excel, json or csv.',
default='json',
choices=['json', 'excel', 'csv'],
nargs='?').completer = EnvironCompleter
argcomplete.autocomplete(parser)
args = parser.parse_args()
localidades = [{
'Pais': 'London',
'dest_id': '-2601889'
}, {
'Pais': 'Utrecht',
'dest_id': '-2154382'
}, {
'Pais': 'Buzios',
'dest_id': '-626254'
}, {
'Pais': '',
'dest_id': ''
}]
countryAux = [d['Pais'] for d in localidades if args.dest_id in d['dest_id']]
if len(countryAux)>0:
country = countryAux[0]
print('Parametros')
print(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
get_data(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
else:
country = 'Nao Identificado'
locais = [d['Pais'] + ':' + d['dest_id'] for d in localidades if d['Pais'] != '']
print('----------')
print('Utilize uma das seguintes localizaes')
for i in locais:
print(i)
print('----------')
| 37.995726 | 250 | 0.576313 |
b94890b4860019fd993040c0790c0701fc24a0c5
| 2,919 |
py
|
Python
|
main.py
|
valurhrafn/chromium-sync
|
df5e3299d179fc47ff34d1a95409383f46aac4d4
|
[
"MIT"
] | 4 |
2017-03-27T02:25:07.000Z
|
2021-03-07T21:40:58.000Z
|
main.py
|
valurhrafn/chromium-sync
|
df5e3299d179fc47ff34d1a95409383f46aac4d4
|
[
"MIT"
] | null | null | null |
main.py
|
valurhrafn/chromium-sync
|
df5e3299d179fc47ff34d1a95409383f46aac4d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
import webapp2
# For datastore
import cgi
import urllib
from google.appengine.ext import ndb
# ************** MainHandler ************* #
# ************** GetUser ************* #
# ************** HasData ************* #
app = webapp2.WSGIApplication([
('/', MainHandler),
('/GetUser/', GetUser),
('/HasData/', HasData),
('/chrome-sync/command/', PostData),
('/GetSyncData/', GetSyncData)
], debug=True)
| 30.40625 | 74 | 0.647825 |
b94a534d42db78fa886439d7fdfdf20e0f8b2504
| 1,434 |
py
|
Python
|
comet/service/subscriber.py
|
dneise/Comet
|
abaa0da65d69f90a5262d81416477b4e71deb2ad
|
[
"BSD-2-Clause"
] | 15 |
2015-11-29T18:53:58.000Z
|
2022-03-09T15:47:30.000Z
|
comet/service/subscriber.py
|
dneise/Comet
|
abaa0da65d69f90a5262d81416477b4e71deb2ad
|
[
"BSD-2-Clause"
] | 29 |
2016-01-21T18:10:45.000Z
|
2021-10-01T16:41:12.000Z
|
comet/service/subscriber.py
|
dneise/Comet
|
abaa0da65d69f90a5262d81416477b4e71deb2ad
|
[
"BSD-2-Clause"
] | 11 |
2016-01-22T14:05:51.000Z
|
2022-03-09T17:49:56.000Z
|
# Comet VOEvent Broker.
from twisted.application.internet import ClientService
from comet.protocol.subscriber import VOEventSubscriberFactory
__all__ = ["makeSubscriberService"]
def makeSubscriberService(endpoint, local_ivo, validators, handlers, filters):
"""Create a reconnecting VOEvent subscriber service.
Parameters
----------
endpoint : implements `twisted.internet.interfaces.IStreamClientEndpoint`
The endpoint to which the service will connect.
local_ivo : `str` or `None`
IVOA identifier for the subscriber.
validators : `list` of implementers of `~comet.icomet.IValidator`.
Validators which will be applied to incoming events. Events which fail
validation will be rejected.
handlers : `list` of implementers of `~comet.icomet.IHandler`.
Handlers to which events which pass validation will be passed.
filters : `list` of `str`
XPath filters. Will be passed to upstream as a request to filter the
alerts being sent.
Notes
-----
Upstream brokes may not provide support for XPath filtering; in this case,
the filters suppplied will be ignored.
Reconnection is handled according to the default policies of
`twisted.application.internet.ClientService`.
"""
factory = VOEventSubscriberFactory(local_ivo, validators, handlers, filters)
service = ClientService(endpoint, factory)
return service
| 35.85 | 80 | 0.727336 |
b94c3a86b197fdae8da6f36cf6af0eeecde07155
| 13,008 |
py
|
Python
|
scripts/master/cros_try_job_git.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/master/cros_try_job_git.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/master/cros_try_job_git.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | 1 |
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import os
import re
import shutil
import zlib
from StringIO import StringIO
try:
# Create a block to work around evil sys.modules manipulation in
# email/__init__.py that triggers pylint false positives.
# pylint: disable=E0611,F0401
from email.Message import Message
from email.Utils import formatdate
except ImportError:
raise
from buildbot.process.properties import Properties
from buildbot.schedulers.trysched import TryBase
from twisted.internet import defer, reactor, utils
from twisted.mail.smtp import SMTPSenderFactory
from twisted.python import log
from common.twisted_util.response import StringResponse
from master import gitiles_poller
from master.try_job_base import BadJobfile
def translate_v1_to_v2(parsed_job):
"""Translate tryjob desc from V1 to V2."""
parsed_job.setdefault('extra_args', []).append('--remote-trybot')
parsed_job['version'] = 2
def translate_v2_to_v3(parsed_job):
"""Translate tryjob desc from V2 to V3."""
# V3 --remote-patches format is not backwards compatible.
if any(a.startswith('--remote-patches')
for a in parsed_job.get('extra_args', ())):
raise BadJobfile('Cannot translate --remote-patches from tryjob v.2 to '
'v.3. Please run repo sync.')
parsed_job['version'] = 3
| 37.165714 | 80 | 0.676661 |
b94d43136b5079271270c2099bbeca811ff9b1ce
| 1,412 |
py
|
Python
|
Medium/515.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 6 |
2017-09-25T18:05:50.000Z
|
2019-03-27T00:23:15.000Z
|
Medium/515.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 1 |
2017-10-29T12:04:41.000Z
|
2018-08-16T18:00:37.000Z
|
Medium/515.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | null | null | null |
# ------------------------------
# 515. Find Largest Value in Each Tree Row
#
# Description:
# You need to find the largest value in each row of a binary tree.
# Example:
# Input:
# 1
# / \
# 3 2
# / \ \
# 5 3 9
# Output: [1, 3, 9]
#
# Version: 1.0
# 12/22/18 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# BFS solution.
| 23.147541 | 66 | 0.434136 |
b94d5a11e77235531376a017f673e8c5a0fdf637
| 9,578 |
py
|
Python
|
opsmop/meta/docs/exparser.py
|
lachmanfrantisek/opsmop
|
562ae2d753ff84b3d794a6815d0436753e82d2a0
|
[
"Apache-2.0"
] | null | null | null |
opsmop/meta/docs/exparser.py
|
lachmanfrantisek/opsmop
|
562ae2d753ff84b3d794a6815d0436753e82d2a0
|
[
"Apache-2.0"
] | null | null | null |
opsmop/meta/docs/exparser.py
|
lachmanfrantisek/opsmop
|
562ae2d753ff84b3d794a6815d0436753e82d2a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Michael DeHaan LLC, <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
| 37.708661 | 90 | 0.516914 |
b94dd4c5db15c696e937d22b21b3d1a6fd038ef8
| 737 |
py
|
Python
|
pylox/TokenType.py
|
sheunl/Compiler_Tests
|
18c5e0568bc39a60094f3e44943ac252c279ffb9
|
[
"CC0-1.0"
] | null | null | null |
pylox/TokenType.py
|
sheunl/Compiler_Tests
|
18c5e0568bc39a60094f3e44943ac252c279ffb9
|
[
"CC0-1.0"
] | null | null | null |
pylox/TokenType.py
|
sheunl/Compiler_Tests
|
18c5e0568bc39a60094f3e44943ac252c279ffb9
|
[
"CC0-1.0"
] | null | null | null |
from enum import Enum
| 14.45098 | 32 | 0.522388 |
b94e05939494c3c75adce95bb694899b36d0a091
| 919 |
py
|
Python
|
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py
|
dios-game/dios-cocos
|
b7fbcbafe02f516ef18fdb64b4519dbf806303fc
|
[
"MIT"
] | 1 |
2021-07-22T15:53:26.000Z
|
2021-07-22T15:53:26.000Z
|
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py
|
dios-game/dios-cocos
|
b7fbcbafe02f516ef18fdb64b4519dbf806303fc
|
[
"MIT"
] | null | null | null |
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py
|
dios-game/dios-cocos
|
b7fbcbafe02f516ef18fdb64b4519dbf806303fc
|
[
"MIT"
] | null | null | null |
import cocos
from MultiLanguage import MultiLanguage
from package.helper import ProjectHelper
| 28.71875 | 108 | 0.686616 |
b9514946d8170f94e426e1cbf736a481d8427c11
| 761 |
py
|
Python
|
src/utils.py
|
f-grimaldi/explain_ML
|
00892675be32bebd023b274270ccb05b798fb388
|
[
"MIT"
] | 1 |
2020-08-03T08:23:31.000Z
|
2020-08-03T08:23:31.000Z
|
src/utils.py
|
f-grimaldi/explain_ML
|
00892675be32bebd023b274270ccb05b798fb388
|
[
"MIT"
] | null | null | null |
src/utils.py
|
f-grimaldi/explain_ML
|
00892675be32bebd023b274270ccb05b798fb388
|
[
"MIT"
] | null | null | null |
from matplotlib import colors
import numpy as np
| 31.708333 | 89 | 0.659658 |
b9516c7b124e87fce1712aca1aa49ef2cd923f11
| 3,056 |
py
|
Python
|
lib/two/mongomgr.py
|
erkyrath/tworld
|
9f5237771196b03753d027277ffc296e25fd7425
|
[
"MIT"
] | 38 |
2015-01-03T16:59:20.000Z
|
2021-10-13T09:15:53.000Z
|
lib/two/mongomgr.py
|
Oreolek/tworld
|
9f5237771196b03753d027277ffc296e25fd7425
|
[
"MIT"
] | 32 |
2015-01-04T01:59:34.000Z
|
2016-05-20T16:29:26.000Z
|
lib/two/mongomgr.py
|
Oreolek/tworld
|
9f5237771196b03753d027277ffc296e25fd7425
|
[
"MIT"
] | 7 |
2015-10-08T21:01:20.000Z
|
2020-05-21T17:42:54.000Z
|
"""
Manage the connection to the MongoDB server.
"""
import tornado.gen
import tornado.ioloop
import motor
| 35.534884 | 97 | 0.576571 |
b9530c0fbf29c36506820a41f0b32bd37796d3e0
| 1,298 |
py
|
Python
|
code/examples/example_binomial_and_log_normal_abtest.py
|
hugopibernat/BayesianABTestAnalysis
|
026960524f5313f4a734f30fd447a5731be802e0
|
[
"Apache-2.0"
] | null | null | null |
code/examples/example_binomial_and_log_normal_abtest.py
|
hugopibernat/BayesianABTestAnalysis
|
026960524f5313f4a734f30fd447a5731be802e0
|
[
"Apache-2.0"
] | null | null | null |
code/examples/example_binomial_and_log_normal_abtest.py
|
hugopibernat/BayesianABTestAnalysis
|
026960524f5313f4a734f30fd447a5731be802e0
|
[
"Apache-2.0"
] | null | null | null |
#################################################
####### Author: Hugo Pibernat #######
####### Contact: [email protected] #######
####### Date: April 2014 #######
#################################################
from bayesianABTest import sampleSuccessRateForBinomial, sampleMeanForLogNormal, probabilityOfABetterThanB
from numpy.random import lognormal
from numpy import mean, concatenate, zeros
# Generate Log-Normal data
A_actuals = lognormal(mean=4.10, sigma=1.0, size=100)
B_actuals = lognormal(mean=4.00, sigma=1.0, size=100)
# Plus some zeros
A_data = concatenate([A_actuals,zeros(10000)])
B_data = concatenate([B_actuals,zeros(10000)])
# Modeling conversions with a binomial variable
A_purchases = sum(A_data > 0)
A_sessions = len(A_data)
B_purchases = sum(B_data > 0)
B_sessions = len(B_data)
A_CR = sampleSuccessRateForBinomial(A_sessions,A_purchases)
B_CR = sampleSuccessRateForBinomial(B_sessions,B_purchases)
# Modeling the spend with a log-normal
A_non_zero_data = A_data[A_data > 0]
B_non_zero_data = B_data[B_data > 0]
A_spend = sampleMeanForLogNormal(A_non_zero_data)
B_spend = sampleMeanForLogNormal(B_non_zero_data)
# Combining the two
A_rps = A_CR*A_spend
B_rps = B_CR*B_spend
# Result:
print probabilityOfABetterThanB(A_rps,B_rps)
| 32.45 | 106 | 0.692604 |
b95332c99e63e536863282307e578d423edf7664
| 644 |
py
|
Python
|
tests/models/test_documents.py
|
airslate-oss/python-airslate
|
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
|
[
"Apache-2.0"
] | 3 |
2021-02-07T20:04:26.000Z
|
2021-09-22T08:32:26.000Z
|
tests/models/test_documents.py
|
airslate-oss/python-airslate
|
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
|
[
"Apache-2.0"
] | 15 |
2021-01-21T15:38:37.000Z
|
2021-02-16T07:52:20.000Z
|
tests/models/test_documents.py
|
airslate-oss/python-airslate
|
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of the airslate.
#
# Copyright (c) 2021 airSlate, Inc.
#
# For the full copyright and license information, please view
# the LICENSE file that was distributed with this source code.
from airslate.models.documents import UpdateFields
from airslate.entities.fields import Field
| 28 | 62 | 0.677019 |
b95403252db42b0394653a122fd73b2b596e194d
| 400 |
py
|
Python
|
app/main.py
|
meysam81/sheypoor
|
aa67e20646ebc4143b83968f60c0b28c2ad340a1
|
[
"MIT"
] | null | null | null |
app/main.py
|
meysam81/sheypoor
|
aa67e20646ebc4143b83968f60c0b28c2ad340a1
|
[
"MIT"
] | null | null | null |
app/main.py
|
meysam81/sheypoor
|
aa67e20646ebc4143b83968f60c0b28c2ad340a1
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app import api
from app.core.config import config
app = FastAPI(title="Sheypoor")
# Set all CORS enabled origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api.router, prefix=config.API_URI)
| 21.052632 | 53 | 0.7425 |
b9556579b31dd7d2370d8083a431ada02beb471d
| 2,205 |
py
|
Python
|
cdnu/ccds.py
|
Indy2222/mbg-codon-usage
|
d415076a8150cd712010c0389c71ef22ba9ad850
|
[
"MIT"
] | null | null | null |
cdnu/ccds.py
|
Indy2222/mbg-codon-usage
|
d415076a8150cd712010c0389c71ef22ba9ad850
|
[
"MIT"
] | null | null | null |
cdnu/ccds.py
|
Indy2222/mbg-codon-usage
|
d415076a8150cd712010c0389c71ef22ba9ad850
|
[
"MIT"
] | null | null | null |
from typing import List, NamedTuple
CCDS_FILE = 'CCDS.current.txt'
CHROMOSOMES = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
'13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
'X', 'Y')
def load_ccds() -> List[CdsPos]:
"""Load file with CDS locations within GRCh38 genome as a list of
:class:`CdsPos`."""
cds = []
with open(CCDS_FILE, encoding='utf-8', newline='\n') as fp:
for line in fp:
if not line:
# Skip empty lines
continue
if line.startswith('#'):
# Skip comments
continue
parts = line.split('\t')
ccds_id = parts[4]
status = parts[5]
if 'Public' not in status:
# CDS is not yet public
continue
if parts[6] == '-':
# CDS strand negative order = reverse-complement
continue
locations_str = parts[9]
if locations_str == '-':
# CDS location unknown
continue
chromosome = parts[0]
assert chromosome in CHROMOSOMES, chromosome
locations = []
assert locations_str.startswith('[')
assert locations_str.endswith(']')
for location_str in locations_str[1:-1].split(','):
start_str, stop_str = location_str.split('-')
start, stop = int(start_str), int(stop_str) + 1
locations.append((start, stop))
if sum(b - a for a, b in locations) % 3 != 0:
# Skip CDS which are not multiple of three in length.
continue
cds.append(CdsPos(
ccds_id=ccds_id,
molecule='chr' + chromosome,
indexes=locations
))
return cds
| 30.205479 | 77 | 0.502494 |
b9576be4fad430a84f92a2e3dc9d1b34f113118c
| 2,732 |
py
|
Python
|
test/test_resolve_errors.py
|
ITMO-NSS-team/GEFEST
|
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
|
[
"BSD-3-Clause"
] | 12 |
2022-01-19T11:06:32.000Z
|
2022-02-21T14:59:23.000Z
|
test/test_resolve_errors.py
|
ITMO-NSS-team/GEFEST
|
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
|
[
"BSD-3-Clause"
] | 9 |
2022-01-19T11:09:11.000Z
|
2022-03-29T13:36:41.000Z
|
test/test_resolve_errors.py
|
ITMO-NSS-team/GEFEST
|
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
|
[
"BSD-3-Clause"
] | 2 |
2022-01-19T11:37:24.000Z
|
2022-03-24T19:35:33.000Z
|
import pytest
from copy import deepcopy
from gefest.core.structure.point import Point
from gefest.core.structure.polygon import Polygon
from gefest.core.structure.structure import Structure
from gefest.core.algs.postproc.resolve_errors import *
from gefest.core.algs.geom.validation import *
# marking length and width for testing polygon
poly_width = 10
poly_length = 20
# creating a testing polygons via corner points
rectangle_points = [(-1, 40), (-1, poly_length+40), (-poly_width-10, poly_length+40), (-poly_width-10, 40)]
out_bounds_rectangle_poly = Polygon('rectangle', points=[Point(*coords) for coords in rectangle_points])
triangle_points = [(1, 1), (poly_width, poly_length), (1, poly_length)]
unclosed_triangle_poly = Polygon('triangle', points=[Point(*coords) for coords in triangle_points])
incorrect_points = [(5, 5), (5, poly_length), (8, poly_length), (5, 5), (5, 30)]
incorrect_poly = Polygon('incorrect_poly', points=[Point(*coords) for coords in incorrect_points])
domain = Domain()
| 37.424658 | 107 | 0.739019 |
b959064c37513b8eabaf61132941fe714e3a8dbc
| 1,833 |
py
|
Python
|
tests/mocks.py
|
davla/i3-live-tree
|
8dc3917afdd09f53f7cf39653c2bf12cb0200983
|
[
"MIT"
] | 1 |
2021-07-22T09:04:46.000Z
|
2021-07-22T09:04:46.000Z
|
tests/mocks.py
|
davla/i3-live-tree
|
8dc3917afdd09f53f7cf39653c2bf12cb0200983
|
[
"MIT"
] | null | null | null |
tests/mocks.py
|
davla/i3-live-tree
|
8dc3917afdd09f53f7cf39653c2bf12cb0200983
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock, Mock
from i3ipc.aio import Con
import i3_live_tree.tree_serializer # noqa: F401
| 30.55 | 78 | 0.67485 |
b95a54ae27c88b1a727a1742ed1880093d3693e0
| 971 |
py
|
Python
|
hvac/api/secrets_engines/gcp.py
|
nested-tech/hvac
|
2a58ac9850b882e43c1617ae6b0ea93104c99794
|
[
"Apache-2.0"
] | null | null | null |
hvac/api/secrets_engines/gcp.py
|
nested-tech/hvac
|
2a58ac9850b882e43c1617ae6b0ea93104c99794
|
[
"Apache-2.0"
] | null | null | null |
hvac/api/secrets_engines/gcp.py
|
nested-tech/hvac
|
2a58ac9850b882e43c1617ae6b0ea93104c99794
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Gcp methods module."""
from hvac import exceptions
from hvac.api.vault_api_base import VaultApiBase
from hvac.constants.gcp import DEFAULT_MOUNT_POINT, ALLOWED_CREDS_ENDPOINTS
| 34.678571 | 108 | 0.652935 |
b95b84a26deaf7cd8b371b13b34ee9e7005ee7c0
| 9,155 |
py
|
Python
|
ypricemagic/uniswap.py
|
poolpitako/ypricemagic
|
882aa2071a918937e77e0b85e5f52191a4714d28
|
[
"MIT"
] | null | null | null |
ypricemagic/uniswap.py
|
poolpitako/ypricemagic
|
882aa2071a918937e77e0b85e5f52191a4714d28
|
[
"MIT"
] | null | null | null |
ypricemagic/uniswap.py
|
poolpitako/ypricemagic
|
882aa2071a918937e77e0b85e5f52191a4714d28
|
[
"MIT"
] | null | null | null |
import token
from tokenize import tokenize
from brownie import Contract, chain
from brownie.exceptions import ContractNotFound
from cachetools.func import ttl_cache
from .utils.cache import memory
from .utils.multicall2 import fetch_multicall
from .interfaces.ERC20 import ERC20ABI
import ypricemagic.magic
import ypricemagic.utils.utils
from .constants import STABLECOINS, dai, usdc, usdt, wbtc, weth, sushi
# NOTE: If this is failing to pull a price for a token you need, it's likely because that token requires a special swap path.
# Please add a viable swap path below to fetch price data successfully.
#project.load()
if chain.id == 1:
FACTORIES = {
"uniswap": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"sushiswap": "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac",
}
ROUTERS = {
"uniswap": Contract("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
"sushiswap": Contract("0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"),
}
SPECIAL_PATHS = {
"sushiswap": {
"0xEF69B5697f2Fb0345cC680210fD39b593a2f9684": ["0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e": ["0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e","0xC28E27870558cF22ADD83540d2126da2e4b464c2",weth,usdc]
,"0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2": ["0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2","0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6": ["0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6","0x87F5F9eBE40786D49D35E1B5997b07cCAA8ADbFF",weth,usdc]
,"0x4954Db6391F4feB5468b6B943D4935353596aEC9": ["0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0": ["0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0","0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d": ["0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d","0xba100000625a3754423978a60c9317c58a424e3D",weth,usdc]
,"0xBA50933C268F567BDC86E1aC131BE072C6B0b71a": ["0xBA50933C268F567BDC86E1aC131BE072C6B0b71a",weth,usdc]
,"0x6102407f07029892eB5Ff02164ADFaFb85f4d222": ["0x6102407f07029892eB5Ff02164ADFaFb85f4d222",usdt]
,"0x85034b3b2e292493D029443455Cc62ab669573B3": ["0x85034b3b2e292493D029443455Cc62ab669573B3","0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984",weth,usdc]
,"0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8": ["0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8", usdc]
,"0x383518188C0C6d7730D91b2c03a03C837814a899": ["0x383518188C0C6d7730D91b2c03a03C837814a899",dai]
,"0xafcE9B78D409bF74980CACF610AFB851BF02F257": ["0xafcE9B78D409bF74980CACF610AFB851BF02F257",wbtc,weth,usdc]
},
"uniswap": {
}
}
elif chain.id == 56:
ROUTERS = {
"pancakeswapv2": Contract("0x10ED43C718714eb63d5aA57B78B54704E256024E"),
"pancakeswapv1": Contract("0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F")
}
FACTORIES = {
"pancakeswapv2": "0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73",
"pancakeswapv1": "0xBCfCcbde45cE874adCB698cC183deBcF17952812"
}
SPECIAL_PATHS = {
"pancakeswapv2": {
},
"pancakeswapv1": {
}
}
elif chain.id == 137:
ROUTERS = {
"quickswap": Contract("0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff")
}
FACTORIES = {
"quickswap": "0x5757371414417b8C6CAad45bAeF941aBc7d3Ab32",
}
SPECIAL_PATHS = {
"quickswap": {
}
}
FACTORY_TO_ROUTER = {FACTORIES[name]: ROUTERS[name] for name in FACTORIES}
FACTORY_TO_PROTOCOL = {FACTORIES[name]: name for name in FACTORIES}
| 43.388626 | 205 | 0.68935 |
b95bf173c71497f893fb19ff1c8e2576967d5c36
| 611 |
py
|
Python
|
configs/configuration_textrnn.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | 2 |
2020-08-19T12:32:21.000Z
|
2021-11-08T15:50:08.000Z
|
configs/configuration_textrnn.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | null | null | null |
configs/configuration_textrnn.py
|
haodingkui/semeval2020-task5-subtask1
|
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
|
[
"MIT"
] | 1 |
2020-08-19T12:32:48.000Z
|
2020-08-19T12:32:48.000Z
|
""" TextRNN model configuration """
| 27.772727 | 56 | 0.657938 |
b95cfef2234f9a61adbaa0afe2564f0d012dea38
| 38 |
py
|
Python
|
settings/debug_members.py
|
akorzunin/telegram_auction_bot
|
d4d5042614ea11f8085815d8f9fb8b6fbebcfab0
|
[
"Apache-2.0"
] | null | null | null |
settings/debug_members.py
|
akorzunin/telegram_auction_bot
|
d4d5042614ea11f8085815d8f9fb8b6fbebcfab0
|
[
"Apache-2.0"
] | null | null | null |
settings/debug_members.py
|
akorzunin/telegram_auction_bot
|
d4d5042614ea11f8085815d8f9fb8b6fbebcfab0
|
[
"Apache-2.0"
] | null | null | null |
DEBUG_MEMBER_LIST = [
503131177,
]
| 12.666667 | 21 | 0.684211 |
b95d4a692bcb2991f35a3f589cde3570c4033e09
| 29,218 |
py
|
Python
|
metrics/pointops/pointops_util.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 73 |
2021-05-11T12:00:29.000Z
|
2022-03-31T09:40:12.000Z
|
metrics/pointops/pointops_util.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 6 |
2021-08-18T13:03:43.000Z
|
2022-03-30T04:48:29.000Z
|
metrics/pointops/pointops_util.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 13 |
2021-08-28T20:09:13.000Z
|
2022-03-20T12:42:51.000Z
|
from typing import Tuple
import torch
from torch.autograd import Function
import torch.nn as nn
from metrics.pointops import pointops_cuda
import numpy as np
furthestsampling = FurthestSampling.apply
gathering = Gathering.apply
nearestneighbor = NearestNeighbor.apply
interpolation = Interpolation.apply
grouping = Grouping.apply
grouping_int = GroupingInt.apply
ballquery = BallQuery.apply
featuredistribute = FeatureDistribute.apply
featuregather = FeatureGather.apply
labelstat_ballrange = LabelStatBallRange.apply
labelstat_idx = LabelStatIdx.apply
labelstat_and_ballquery = LabelStatAndBallQuery.apply
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
import numpy as np
return torch.clamp(dist, 0.0, np.inf)
knnquery_naive = KNNQueryNaive.apply
knnquery = KNNQuery.apply
knnquery_exclude = KNNQueryExclude.apply
| 37.458974 | 145 | 0.585906 |
b95d5c160689db0e0a64a0a455645d72081698d5
| 2,992 |
py
|
Python
|
core/src/zeit/cms/content/caching.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 5 |
2019-05-16T09:51:29.000Z
|
2021-05-31T09:30:03.000Z
|
core/src/zeit/cms/content/caching.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 107 |
2019-05-24T12:19:02.000Z
|
2022-03-23T15:05:56.000Z
|
core/src/zeit/cms/content/caching.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 3 |
2020-08-14T11:01:17.000Z
|
2022-01-08T17:32:19.000Z
|
from collections import defaultdict
from logging import getLogger
from operator import itemgetter
from os import environ
from time import time
from zope.cachedescriptors.property import Lazy as cachedproperty
from zeit.cms.content.sources import FEATURE_TOGGLES
from zope.component import getUtility
from zeit.connector.interfaces import IConnector
from zeit.connector.filesystem import Connector
log = getLogger(__name__)
__cache = ContentCache()
get = __cache.get
info = __cache.info
| 32.521739 | 78 | 0.57988 |
b95e87663683cd1ca4cf5da88872ac29da6e83c7
| 1,177 |
py
|
Python
|
genesis/project.py
|
genialis/genesis-genapi
|
dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a
|
[
"Apache-2.0"
] | 3 |
2020-01-24T17:03:23.000Z
|
2021-03-16T03:20:31.000Z
|
genesis/project.py
|
genialis/genesis-genapi
|
dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a
|
[
"Apache-2.0"
] | 1 |
2018-02-15T19:33:00.000Z
|
2018-02-15T19:33:00.000Z
|
genesis/project.py
|
genialis/genesis-genapi
|
dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a
|
[
"Apache-2.0"
] | 6 |
2015-05-27T10:14:46.000Z
|
2021-01-01T06:35:40.000Z
|
"""Project"""
from __future__ import absolute_import, division, print_function, unicode_literals
| 30.973684 | 84 | 0.619371 |
b95f2f6c2258ef8998ac2a053019013dbf870640
| 2,351 |
py
|
Python
|
account/views.py
|
KimSoungRyoul/drf_unitteset_study_project
|
9a0d824bdc6343eeba6209299c077a6e9d280516
|
[
"MIT"
] | null | null | null |
account/views.py
|
KimSoungRyoul/drf_unitteset_study_project
|
9a0d824bdc6343eeba6209299c077a6e9d280516
|
[
"MIT"
] | null | null | null |
account/views.py
|
KimSoungRyoul/drf_unitteset_study_project
|
9a0d824bdc6343eeba6209299c077a6e9d280516
|
[
"MIT"
] | null | null | null |
# Create your views here.
from django.db.models import QuerySet
from django.utils.decorators import method_decorator
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets, status
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import mixins
from account.documents import DjangoFilterDescriptionInspector
from account.models import Customer
from account.serializers import CustomerInfoSerializer, SignUpFormSerializer
| 40.534483 | 103 | 0.722671 |
b95fe9aa9fab4f285d9028f8b01c9820d83254e4
| 3,831 |
py
|
Python
|
src/front-door/azext_front_door/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207 |
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/front-door/azext_front_door/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061 |
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/front-door/azext_front_door/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802 |
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
# pylint: disable=protected-access
| 35.472222 | 119 | 0.645262 |
b960f3f5be88ef82754359823e7c6a9b7ed78089
| 7,763 |
py
|
Python
|
mimesis/data/int/development.py
|
DevAerial/mimesis
|
33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
|
[
"MIT"
] | null | null | null |
mimesis/data/int/development.py
|
DevAerial/mimesis
|
33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
|
[
"MIT"
] | 1 |
2022-03-26T07:46:59.000Z
|
2022-03-26T07:47:20.000Z
|
mimesis/data/int/development.py
|
DevAerial/mimesis
|
33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
|
[
"MIT"
] | null | null | null |
"""Provides all the data related to the development."""
LICENSES = [
"Apache License, 2.0 (Apache-2.0)",
"The BSD 3-Clause License",
"The BSD 2-Clause License",
"GNU General Public License (GPL)",
"General Public License (LGPL)",
"MIT License (MIT)",
"Mozilla Public License 2.0 (MPL-2.0)",
"Common Development and Distribution License (CDDL-1.0)",
"Eclipse Public License (EPL-1.0)",
]
PROGRAMMING_LANGS = [
"ASP",
"Assembly",
"AutoIt",
"Awk",
"Bash",
"C",
"C Shell",
"C#",
"C++",
"Caml",
"Ceylon",
"Clojure",
"CoffeeScript",
"Common Lisp",
"D",
"Dart",
"Delphi",
"Dylan",
"ECMAScript",
"Elixir",
"Emacs Lisp",
"Erlang",
"F#",
"Falcon",
"Fortran",
"GNU Octave",
"Go",
"Groovy",
"Haskell",
"haXe",
"Io",
"J#",
"Java",
"JavaScript",
"Julia",
"Kotlin",
"Lisp",
"Lua",
"Mathematica",
"Objective-C",
"OCaml",
"Perl",
"PHP",
"PL-I",
"PL-SQL",
"PowerShell",
"Prolog",
"Python",
"R",
"Racket",
"Ruby",
"Rust",
"Scala",
"Scheme",
"Smalltalk",
"Tcl",
"Tex",
"Transact-SQL",
"TypeScript",
"Z shell",
]
OS = [
"Arch",
"CentOS",
"Debian",
"Fedora",
"FreeBSD",
"Gentoo",
"Kali",
"Lubuntu",
"Manjaro",
"Mint",
"OS X",
"macOS",
"OpenBSD",
"PCLinuxOS",
"Slackware",
"Ubuntu",
"Windows 10",
"Windows 7",
"Windows 8",
"Windows 8.1",
"Zorin",
"elementaryOS",
"macOS",
"openSUSE",
]
FOLDERS = [
"Development",
"Downloads",
"Documents",
"Music",
"Video",
"Work",
"Pictures",
"Desktop",
"Study",
]
PROJECT_NAMES = [
"aardonyx",
"abelisaurus",
"achelousaurus",
"achillobator",
"acrocanthosaurus",
"aegyptosaurus",
"afrovenator",
"agilisaurus",
"alamosaurus",
"albertaceratops",
"albertosaurus",
"alectrosaurus",
"alioramus",
"allosaurus",
"alvarezsaurus",
"amargasaurus",
"ammosaurus",
"ampelosaurus",
"amygdalodon",
"anatotitan",
"anchiceratops",
"anchisaurus",
"ankylosaurus",
"anserimimus",
"antarctopelta",
"antarctosaurus",
"apatosaurus",
"aragosaurus",
"aralosaurus",
"archaeoceratops",
"archaeopteryx",
"archaeornithomimus",
"argentinosaurus",
"arrhinoceratops",
"atlascopcosaurus",
"aucasaurus",
"austrosaurus",
"avaceratops",
"avalonia",
"avimimus",
"azendohsaurus",
"bactrosaurus",
"bagaceratops",
"bambiraptor",
"barapasaurus",
"barosaurus",
"baryonyx",
"becklespinax",
"beipiaosaurus",
"bellusaurus",
"borogovia",
"brachiosaurus",
"brachyceratops",
"bugenasaura",
"buitreraptor",
"camarasaurus",
"camptosaurus",
"carnotaurus",
"caudipteryx",
"cedarpelta",
"centrosaurus",
"ceratosaurus",
"cetiosauriscus",
"cetiosaurus",
"chaoyangsaurus",
"chasmosaurus",
"chialingosaurus",
"chindesaurus",
"chinshakiangosaurus",
"chirostenotes",
"chubutisaurus",
"chungkingosaurus",
"citipati",
"coelophysis",
"coelurus",
"coloradisaurus",
"compsognathus",
"conchoraptor",
"confuciusornis",
"corythosaurus",
"cryolophosaurus",
"dacentrurus",
"daspletosaurus",
"datousaurus",
"deinocheirus",
"deinonychus",
"deltadromeus",
"diceratops",
"dicraeosaurus",
"dilophosaurus",
"diplodocus",
"dracorex",
"dravidosaurus",
"dromaeosaurus",
"dromiceiomimus",
"dryosaurus",
"dryptosaurus",
"dubreuillosaurus",
"edmontonia",
"edmontosaurus",
"einiosaurus",
"elaphrosaurus",
"emausaurus",
"eolambia",
"eoraptor",
"eotyrannus",
"equijubus",
"erketu",
"erlikosaurus",
"euhelopus",
"euoplocephalus",
"europasaurus",
"euskelosaurus",
"eustreptospondylus",
"fukuiraptor",
"fukuisaurus",
"gallimimus",
"gargoyleosaurus",
"garudimimus",
"gasosaurus",
"gasparinisaura",
"gastonia",
"giganotosaurus",
"gilmoreosaurus",
"giraffatitan",
"gobisaurus",
"gorgosaurus",
"goyocephale",
"graciliceratops",
"gryposaurus",
"guaibasaurus",
"guanlong",
"hadrosaurus",
"hagryphus",
"haplocanthosaurus",
"harpymimus",
"herrerasaurus",
"hesperosaurus",
"heterodontosaurus",
"homalocephale",
"huayangosaurus",
"hylaeosaurus",
"hypacrosaurus",
"hypselosaurus",
"hypsilophodon",
"iguanodon",
"indosuchus",
"ingenia",
"irritator",
"isisaurus",
"janenschia",
"jaxartosaurus",
"jingshanosaurus",
"jinzhousaurus",
"jobaria",
"juravenator",
"kentrosaurus",
"khaan",
"kotasaurus",
"kritosaurus",
"lamaceratops",
"lambeosaurus",
"lapparentosaurus",
"leaellynasaura",
"leptoceratops",
"lesothosaurus",
"lexovisaurus",
"liaoceratops",
"liaoxiornis",
"ligabuesaurus",
"liliensternus",
"lophorhothon",
"lophostropheus",
"lufengosaurus",
"lurdusaurus",
"lycorhinus",
"magyarosaurus",
"maiasaura",
"majungatholus",
"malawisaurus",
"mamenchisaurus",
"mapusaurus",
"marshosaurus",
"masiakasaurus",
"massospondylus",
"maxakalisaurus",
"megalosaurus",
"melanorosaurus",
"metriacanthosaurus",
"microceratops",
"micropachycephalosaurus",
"microraptor",
"minmi",
"monolophosaurus",
"mononykus",
"mussaurus",
"muttaburrasaurus",
"nanotyrannus",
"nanshiungosaurus",
"nemegtosaurus",
"neovenator",
"neuquenosaurus",
"nigersaurus",
"nipponosaurus",
"noasaurus",
"nodosaurus",
"nomingia",
"nothronychus",
"nqwebasaurus",
"omeisaurus",
"ornitholestes",
"ornithomimus",
"orodromeus",
"oryctodromeus",
"othnielia",
"ouranosaurus",
"oviraptor",
"rebbachisaurus",
"rhabdodon",
"rhoetosaurus",
"rinchenia",
"riojasaurus",
"rugops",
"saichania",
"saltasaurus",
"saltopus",
"sarcosaurus",
"saurolophus",
"sauropelta",
"saurophaganax",
"saurornithoides",
"scelidosaurus",
"scutellosaurus",
"secernosaurus",
"segisaurus",
"segnosaurus",
"seismosaurus",
"shamosaurus",
"shanag",
"shantungosaurus",
"shunosaurus",
"shuvuuia",
"silvisaurus",
"sinocalliopteryx",
"sinornithosaurus",
"sinosauropteryx",
"sinraptor",
"sinvenator",
"zalmoxes",
"zephyrosaurus",
"zuniceratops",
"byzantine",
"svengali",
"accolade",
"acrimony",
"angst",
"anomaly",
"antidote",
"baroque",
"bona_fide",
"bourgeois",
"bravado",
"brogue",
"brusque",
"cacophony",
"caustic",
"charisma",
"cloying",
"deja-vu",
"dichotomy",
"elan",
"ennui",
"epitome",
"esoteric",
"euphemism",
"faux pas",
"fiasco",
"finagle",
"glib",
"harbinger",
"hedonist",
"heresy",
"idyllic",
"insidious",
"junket",
"kitsch",
"litany",
"lurid",
"malaise",
"malinger",
"mantra",
"maudlin",
"mercenary",
"misnomer",
"nirvana",
"oblivion",
"ogle",
"ostracize",
"panacea",
"paradox",
"peevish",
"propriety",
"revel",
"rhetoric",
"spartan",
"stigma",
"stoic",
"suave",
"sycophant",
"tirade",
"tryst",
"untenable",
"vicarious",
"vile",
"waft",
"zealous",
]
| 17.845977 | 61 | 0.551977 |
b962302fa813576c8cf57a4deea0db5f25dfb918
| 620 |
py
|
Python
|
docs/mathparse.py
|
pcmoritz/flow
|
bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
|
[
"MIT"
] | 16 |
2018-05-25T06:30:28.000Z
|
2020-08-08T00:03:47.000Z
|
docs/mathparse.py
|
pcmoritz/flow
|
bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
|
[
"MIT"
] | 46 |
2018-05-22T21:32:55.000Z
|
2019-06-12T13:10:02.000Z
|
docs/mathparse.py
|
pcmoritz/flow
|
bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
|
[
"MIT"
] | 6 |
2018-06-22T14:59:14.000Z
|
2019-08-29T06:00:34.000Z
|
"""
A preliminary attempt at parsing an RST file's math syntax
in order to make math render as inline rather than display
mode. This doesn't work as of yet but might be useful.
It could, however, be not useful if there's a pandoc option
for converting .md to .rst that makes math inline and not
display. Keeping it around, though.
"""
import re
s = """Define
.. math:: v_{des}
as the desired velocity,
.. math:: 1^k
a vector of ones of length"""
with open('/Users/nishant/Downloads/tutorialtest.rst', 'r') as myfile:
s = myfile.read()
print([elem[11:-2] for elem in re.findall('\n.. math:: *\S*\n\n', s)])
| 22.962963 | 70 | 0.693548 |
b96253f9f9bc87e42d80842aebed3aa7dacb859b
| 1,994 |
py
|
Python
|
lib/layout/primitives.py
|
tailhook/pyzza
|
610be6ee4bea9b64f8226faf7338523fdafdf2cf
|
[
"MIT"
] | 2 |
2015-08-07T15:39:25.000Z
|
2019-03-31T12:45:37.000Z
|
lib/layout/primitives.py
|
tailhook/pyzza
|
610be6ee4bea9b64f8226faf7338523fdafdf2cf
|
[
"MIT"
] | null | null | null |
lib/layout/primitives.py
|
tailhook/pyzza
|
610be6ee4bea9b64f8226faf7338523fdafdf2cf
|
[
"MIT"
] | null | null | null |
from layout import Shape, Widget
from flash.text.engine import TextBlock, TextElement
| 32.688525 | 70 | 0.609829 |
b963a238595dc05d6bc40e6f5888099b52a8fc14
| 20,515 |
py
|
Python
|
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
tests/testing_server.py
|
ImportTaste/WebRequest
|
0cc385622624de16ec980e0c12d9080d593cab74
|
[
"WTFPL"
] | null | null | null |
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
| 32.929374 | 165 | 0.640653 |
b963e6196b8baa521ce89adb40142bf81a9183a6
| 3,770 |
py
|
Python
|
calcgrades.py
|
qrowsxi/calcgrades
|
93c71c1afef8dde5174726ae1702b71ccba633de
|
[
"MIT"
] | null | null | null |
calcgrades.py
|
qrowsxi/calcgrades
|
93c71c1afef8dde5174726ae1702b71ccba633de
|
[
"MIT"
] | null | null | null |
calcgrades.py
|
qrowsxi/calcgrades
|
93c71c1afef8dde5174726ae1702b71ccba633de
|
[
"MIT"
] | null | null | null |
import csv
import math
import numpy as np
import pandas
import scipy.optimize
import sys
import argparse
if __name__ == '__main__':
main()
| 35.566038 | 116 | 0.609284 |
b9652ceb78b45d3bef98c61d48e3cd4630133615
| 19,317 |
py
|
Python
|
sdk/python/pulumi_google_native/testing/v1/test_matrix.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44 |
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/testing/v1/test_matrix.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354 |
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/testing/v1/test_matrix.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8 |
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TestMatrixArgs', 'TestMatrix']
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = client_info
if environment_matrix is None and not opts.urn:
raise TypeError("Missing required property 'environment_matrix'")
__props__.__dict__["environment_matrix"] = environment_matrix
__props__.__dict__["fail_fast"] = fail_fast
__props__.__dict__["flaky_test_attempts"] = flaky_test_attempts
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
if result_storage is None and not opts.urn:
raise TypeError("Missing required property 'result_storage'")
__props__.__dict__["result_storage"] = result_storage
if test_specification is None and not opts.urn:
raise TypeError("Missing required property 'test_specification'")
__props__.__dict__["test_specification"] = test_specification
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["timestamp"] = None
super(TestMatrix, __self__).__init__(
'google-native:testing/v1:TestMatrix',
resource_name,
__props__,
opts)
| 50.436031 | 458 | 0.67671 |
b965c021bcb2dac479172708e85ad9ed89f09ef2
| 5,427 |
py
|
Python
|
View/View.py
|
MoriokaReimen/ConfigHeaderGenerator
|
73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e
|
[
"MIT"
] | null | null | null |
View/View.py
|
MoriokaReimen/ConfigHeaderGenerator
|
73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e
|
[
"MIT"
] | null | null | null |
View/View.py
|
MoriokaReimen/ConfigHeaderGenerator
|
73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import tkinter.messagebox
from Control import Control
| 43.071429 | 112 | 0.629445 |
b9669e29ffa745ca4256305d7461bcbe497cc930
| 1,428 |
py
|
Python
|
tests/bugs/core_3355_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1 |
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_3355_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1 |
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_3355_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1 |
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_3355
# title: Wrong comparsion of DATE and TIMESTAMP if index is used
# decription:
# tracker_id: CORE-3355
# min_versions: ['2.1.5']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """create table tdate (id integer not null primary key, val date);
create index tdateix1 on tdate (val);
commit;
insert into tdate values (0, '1997-12-31');
insert into tdate values (1, '1998-01-01');
insert into tdate values (2, '1998-01-02');
insert into tdate values (3, '1998-01-03');
insert into tdate values (4, '1998-01-04');
insert into tdate values (5, '1998-01-05');
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """select count(*) from tdate where val >= timestamp'1998-01-04 12:00:00.0000';
select count(*) from tdate where val < timestamp'1998-01-04 12:00:00.0000';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
COUNT
=====================
1
COUNT
=====================
5
"""
| 25.052632 | 95 | 0.641457 |
b967ba0197b144171458b230c2dfe31844ba0b72
| 5,231 |
py
|
Python
|
dags/download_decrypt_transfer_files.py
|
hms-dbmi/bch-pic-sure-airflow-dags
|
0c1e6f07da4e270581942e551ac30284474921d4
|
[
"Apache-2.0"
] | null | null | null |
dags/download_decrypt_transfer_files.py
|
hms-dbmi/bch-pic-sure-airflow-dags
|
0c1e6f07da4e270581942e551ac30284474921d4
|
[
"Apache-2.0"
] | null | null | null |
dags/download_decrypt_transfer_files.py
|
hms-dbmi/bch-pic-sure-airflow-dags
|
0c1e6f07da4e270581942e551ac30284474921d4
|
[
"Apache-2.0"
] | null | null | null |
"""
@author: anilkdegala
"""
import os
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import date, timedelta, datetime
from collections import OrderedDict
from scripts.dag_pebbles import DagPebbles
from airflow.configuration import conf
from scripts.configurations import *
from airflow.operators.dummy_operator import DummyOperator
default_args = {
"owner": "anilkdegala",
"depends_on_past": True,
"max_active_runs": 1,
"start_date": datetime(2015, 6, 1),
"is_active": True,
"is_paused_upon_creation": False,
}
with DAG( "DOWNLOAD_DECRYPT_TRANSFER",
description="Download, Decrypt, Transfer files (Source: S3, Staging: EC2: Target: RDS Oracle)",
default_args=default_args,
schedule_interval=None,
catchup=False,
orientation="TB",
tags=['Utils'],
dagrun_timeout=timedelta(hours=240)
) as dag:
t_pipeline_begin = PythonOperator(
task_id="begin_pipeline",
python_callable=begin_pipeline,
provide_context=True,
dag=dag,
)
t_check_pipeline = BranchPythonOperator(
task_id="check_pipeline",
python_callable=pipeline_enable_check,
provide_context=True,
dag=dag,
)
t_pipeline_check_passed = PythonOperator(
task_id="pipeline_check_passed",
python_callable=pipeline_check_passed,
provide_context=True,
dag=dag,
)
t_pipeline_check_skipped = PythonOperator(
task_id="pipeline_check_skipped",
python_callable=pipeline_check_skipped,
provide_context=True,
dag=dag,
)
download_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/download_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}}"
t_download_files = BashOperator(
task_id='download_files',
bash_command=download_files_cmd,
dag=dag)
decrypt_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/decrypt_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}} "
t_decrypt_files = BashOperator(
task_id='decrypt_files',
bash_command=decrypt_files_cmd,
dag=dag)
transfer_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/transfer_files_rds.pl "+"{{ ti.xcom_pull(key='transfer_arguments')}} "
t_transfer_files = BashOperator(
task_id='transfer_files',
bash_command=transfer_files_cmd,
dag=dag)
t_end_pipeline = PythonOperator(
task_id="end_pipeline",
python_callable=end_pipeline,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_notify = PythonOperator(
task_id="send_notifications",
python_callable=notify,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_cleanup = PythonOperator(
task_id="cleanup",
python_callable=cleanup,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_end = PythonOperator(
task_id="end",
python_callable=end,
provide_context=True,
trigger_rule="none_failed",
dag=dag,
)
t_pipeline_begin >> t_check_pipeline
t_check_pipeline >> t_pipeline_check_skipped >> t_end_pipeline
t_check_pipeline >> t_pipeline_check_passed >> t_download_files >> t_decrypt_files >> t_transfer_files >> t_end_pipeline
t_end_pipeline >> t_cleanup >> t_notify >> t_end
| 30.770588 | 171 | 0.664118 |
b96834dcae4311b040352e86ae4bdc019619193a
| 7,518 |
py
|
Python
|
keystone-moon/keystone/endpoint_policy/controllers.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
keystone-moon/keystone/endpoint_policy/controllers.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
keystone-moon/keystone/endpoint_policy/controllers.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | 1 |
2021-03-21T11:38:30.000Z
|
2021-03-21T11:38:30.000Z
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import controller
from keystone.common import dependency
from keystone import notifications
| 45.017964 | 79 | 0.699654 |
b96893ff0c22487256e91c812d37a56c2c479eb3
| 11,886 |
py
|
Python
|
src/nibetaseries/cli/run.py
|
ipacheco-uy/NiBetaSeries
|
3d8716552f22f925524d80af9aace09469c22d4d
|
[
"MIT"
] | 1 |
2019-10-03T21:20:48.000Z
|
2019-10-03T21:20:48.000Z
|
src/nibetaseries/cli/run.py
|
ipacheco-uy/NiBetaSeries
|
3d8716552f22f925524d80af9aace09469c22d4d
|
[
"MIT"
] | null | null | null |
src/nibetaseries/cli/run.py
|
ipacheco-uy/NiBetaSeries
|
3d8716552f22f925524d80af9aace09469c22d4d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -m nibetaseries` python will execute
``__main__.py`` as a script. That means there won't be any
``nibetaseries.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``nibetaseries.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
from __future__ import absolute_import
import os
import argparse
from argparse import RawTextHelpFormatter
from glob import glob
from multiprocessing import cpu_count
from nipype import config as ncfg
def get_parser():
"""Build parser object"""
from ..__init__ import __version__
import sys
verstr = 'nibs v{}'.format(__version__)
parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments',
formatter_class=RawTextHelpFormatter)
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('derivatives_pipeline', help='The pipeline that contains '
'minimally preprocessed img, brainmask, and confounds.tsv')
parser.add_argument('output_dir', help='The directory where the output directory '
'and files should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', choices=['participant', 'group'],
help='Level of the analysis that will be performed '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir')
parser.add_argument('-v', '--version', action='version',
version=verstr)
# Atlas Arguments (Required Options)
atlas_args = parser.add_argument_group('Required Atlas Arguments')
atlas_args.add_argument('-a', '--atlas-img', action='store',
required=('-l' in sys.argv or '--atlas-lut' in sys.argv),
help='input atlas nifti where each voxel within a "region" '
'is labeled with the same integer and there is a unique '
'integer associated with each region of interest.')
atlas_args.add_argument('-l', '--atlas-lut', action='store',
required=('-a' in sys.argv or '--atlas-img' in sys.argv),
help='atlas look up table (tsv) formatted with the columns: '
'index, regions which correspond to the regions in the '
'nifti file specified by --atlas-img.')
# preprocessing options
proc_opts = parser.add_argument_group('Options for processing')
proc_opts.add_argument('--estimator', default='lss',
choices=['lss', 'lsa'],
help='beta series modeling method')
proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0,
help='select a smoothing kernel (mm)')
proc_opts.add_argument('-hp', '--high-pass', action='store', type=float,
default=0.0078125, help='high pass filter (Hz)')
proc_opts.add_argument('-c', '--confounds', help='The confound column names '
'that are to be included in nuisance regression. '
'write the confounds you wish to include separated by a space',
nargs="+")
proc_opts.add_argument('--hrf-model', default='glover',
choices=['glover', 'spm', 'fir',
'glover + derivative',
'glover + derivative + dispersion',
'spm + derivative',
'spm + derivative + dispersion'],
help='convolve your regressors '
'with one of the following hemodynamic response functions')
proc_opts.add_argument('--fir-delays', default=None,
nargs='+', type=int, help='FIR delays in volumes',
metavar='VOL')
proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files '
'are stored (i.e. non-essential files). '
'This directory can be deleted once you are reasonably '
'certain nibs finished as expected.')
# Image Selection options
image_opts = parser.add_argument_group('Options for selecting images')
parser.add_argument('--participant-label', nargs="+",
help='The label(s) of the participant(s) '
'that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.')
image_opts.add_argument('--session-label', action='store',
default=None, help='select a session to analyze')
image_opts.add_argument('-t', '--task-label', action='store',
default=None, help='select a specific task to be processed')
image_opts.add_argument('--run-label', action='store',
default=None, help='select a run to analyze')
image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym',
choices=['MNI152NLin2009cAsym'],
help='select a bold derivative in a specific space to be used')
image_opts.add_argument('--description-label', action='store',
default=None, help='select a bold file with particular '
'`desc` label to process')
image_opts.add_argument('--exclude-description-label', action='store_true',
default=False, help='exclude this `desc` label from nibetaseries')
# performance options
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
# misc options
misc = parser.add_argument_group('misc options')
misc.add_argument('--graph', action='store_true', default=False,
help='generates a graph png of the workflow')
return parser
init()
| 46.611765 | 98 | 0.595406 |
b9693ae1ef191dd2735a2abba99bb1bc689af26f
| 2,727 |
py
|
Python
|
custom_components/senz/config_flow.py
|
astrandb/senz_hass
|
6725d37fd9c6d250ac10a16e68c56908bf1c8404
|
[
"MIT"
] | 2 |
2022-01-15T09:55:58.000Z
|
2022-02-10T10:13:35.000Z
|
custom_components/senz/config_flow.py
|
astrandb/senz_hass
|
6725d37fd9c6d250ac10a16e68c56908bf1c8404
|
[
"MIT"
] | 4 |
2022-01-15T19:41:28.000Z
|
2022-02-14T16:01:47.000Z
|
custom_components/senz/config_flow.py
|
astrandb/senz_hass
|
6725d37fd9c6d250ac10a16e68c56908bf1c8404
|
[
"MIT"
] | null | null | null |
"""Config flow for SENZ WiFi."""
from __future__ import annotations
import logging
from typing import Any
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
from .pysenz import PreAPI
| 34.518987 | 184 | 0.671067 |
b9697b05a9b44247d80463465fa92118d707fb98
| 6,465 |
py
|
Python
|
astropy_helpers/git_helpers.py
|
bsipocz/astropy-helpers
|
4999df1cfb6a5022347b0cef9caf8a556517c625
|
[
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 9 |
2019-12-06T13:12:33.000Z
|
2021-10-05T12:47:15.000Z
|
astropy_helpers/git_helpers.py
|
bsipocz/astropy-helpers
|
4999df1cfb6a5022347b0cef9caf8a556517c625
|
[
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 |
2019-11-28T17:20:27.000Z
|
2019-12-09T18:44:35.000Z
|
astropy_helpers/git_helpers.py
|
bsipocz/astropy-helpers
|
4999df1cfb6a5022347b0cef9caf8a556517c625
|
[
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3 |
2019-11-28T17:04:22.000Z
|
2021-10-19T13:12:34.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for retrieving revision information from a project's git repository.
"""
# Do not remove the following comment; it is used by
# astropy_helpers.version_helpers to determine the beginning of the code in
# this module
# BEGIN
import locale
import os
import subprocess
import warnings
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
# otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used, and must
be the root of the git repository.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revision number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if sha:
# Faster for getting just the hash of HEAD
cmd = ['rev-parse', 'HEAD']
else:
cmd = ['rev-list', '--count', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
if not sha and returncode == 128:
# git returns 128 if the command is not run from within a git
# repository tree. In this case, a warning is produced above but we
# return the default dev version of '0'.
return '0'
elif not sha and returncode == 129:
# git returns 129 if a command option failed to parse; in
# particular this could happen in git versions older than 1.7.2
# where the --count option is not supported
# Also use --abbrev-commit and --abbrev=0 to display the minimum
# number of characters needed per-commit (rather than the full hash)
cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
# Fall back on the old method of getting all revisions and counting
# the lines
if returncode == 0:
return str(stdout.count(b'\n'))
else:
return ''
elif sha:
return _decode_stdio(stdout)[:40]
else:
return _decode_stdio(stdout).strip()
# This function is tested but it is only ever executed within a subprocess when
# creating a fake package, so it doesn't get picked up by coverage metrics.
def _get_repo_path(pathname, levels=None): # pragma: no cover
"""
Given a file or directory name, determine the root of the git repository
this path is under. If given, this won't look any higher than ``levels``
(that is, if ``levels=0`` then the given path must be the root of the git
repository and is returned if so.
Returns `None` if the given path could not be determined to belong to a git
repo.
"""
if os.path.isfile(pathname):
current_dir = os.path.abspath(os.path.dirname(pathname))
elif os.path.isdir(pathname):
current_dir = os.path.abspath(pathname)
else:
return None
current_level = 0
while levels is None or current_level <= levels:
if os.path.exists(os.path.join(current_dir, '.git')):
return current_dir
current_level += 1
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return None
| 33.324742 | 79 | 0.612065 |
b96b280416f0d557826ffa670a7914f2d45e5fc5
| 526 |
py
|
Python
|
src/sot_talos_balance/test/test_feet_admittance.py
|
imaroger/sot-talos-balance
|
5e56700b4e105273ecf6feb3474789beac469a77
|
[
"BSD-2-Clause"
] | null | null | null |
src/sot_talos_balance/test/test_feet_admittance.py
|
imaroger/sot-talos-balance
|
5e56700b4e105273ecf6feb3474789beac469a77
|
[
"BSD-2-Clause"
] | null | null | null |
src/sot_talos_balance/test/test_feet_admittance.py
|
imaroger/sot-talos-balance
|
5e56700b4e105273ecf6feb3474789beac469a77
|
[
"BSD-2-Clause"
] | null | null | null |
'''Test feet admittance control'''
from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_feet_admittance.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
print('Set saturation value')
runCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]')
input("Wait before dumping the data")
runCommandClient('dump_tracer(robot.tracer)')
| 25.047619 | 97 | 0.752852 |
b96bb8e94e8bbfe556cc0ad3a314b6991573aa47
| 544 |
py
|
Python
|
tests/test_db.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 24 |
2017-08-18T20:36:27.000Z
|
2020-03-27T08:55:39.000Z
|
tests/test_db.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 6 |
2017-10-14T05:50:34.000Z
|
2019-06-03T08:39:49.000Z
|
tests/test_db.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 5 |
2018-01-09T11:07:06.000Z
|
2019-06-02T14:34:34.000Z
|
import os
from tendermint.db import VanillaDB
from tendermint.utils import home_dir
| 20.923077 | 42 | 0.621324 |
b96d766a7c5eab27eb3785b1277b6beccda7c9ed
| 1,446 |
py
|
Python
|
auth/tests/test_views.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
auth/tests/test_views.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
auth/tests/test_views.py
|
asb29/Redundant
|
ee816fd41f9217610bd11f757cf9175288723c70
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.test import Client
| 30.125 | 52 | 0.53527 |
b96f6c5854c1e905c9ad5d8f08d016972c710a1f
| 4,134 |
py
|
Python
|
projects/OneNet/onenet/head.py
|
iFighting/OneNet
|
6e33b46d2aa13131262833c75f0fd1c3d224ef03
|
[
"MIT"
] | 2 |
2021-06-16T01:31:17.000Z
|
2021-11-25T15:27:28.000Z
|
projects/OneNet/onenet/head.py
|
xieenze/OneNet
|
3b06ad6832727cef4c0262389de4cdbb2a666197
|
[
"MIT"
] | null | null | null |
projects/OneNet/onenet/head.py
|
xieenze/OneNet
|
3b06ad6832727cef4c0262389de4cdbb2a666197
|
[
"MIT"
] | 1 |
2021-02-04T06:38:42.000Z
|
2021-02-04T06:38:42.000Z
|
#
# Modified by Peize Sun
# Contact: [email protected]
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
OneNet Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
import math
from typing import Optional, List
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from detectron2.modeling.poolers import ROIPooler, cat
from detectron2.structures import Boxes
from .deconv import CenternetDeconv
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 32.296875 | 94 | 0.600871 |
b96fae5c29fd446ea7199733a629bbe0f6190046
| 49,876 |
py
|
Python
|
mermaid/utils.py
|
HastingsGreer/mermaid
|
bd13c5fc427eb8cd9054973a8eaaeb302078182d
|
[
"Apache-2.0"
] | 120 |
2019-10-29T23:53:02.000Z
|
2022-03-30T02:59:58.000Z
|
mermaid/utils.py
|
AlexanderChristgau/mermaid
|
ba07883cc3cb5982e4655048a434b4495cb49c6d
|
[
"Apache-2.0"
] | 10 |
2019-11-05T09:28:35.000Z
|
2022-01-09T19:12:51.000Z
|
mermaid/utils.py
|
AlexanderChristgau/mermaid
|
ba07883cc3cb5982e4655048a434b4495cb49c6d
|
[
"Apache-2.0"
] | 19 |
2019-11-10T13:34:39.000Z
|
2022-03-13T20:30:10.000Z
|
"""Various utility functions.
.. todo::
Reorganize this package in a more meaningful way.
"""
from __future__ import print_function
from __future__ import absolute_import
# from builtins import str
# from builtins import range
import torch
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from .libraries.modules.stn_nd import STN_ND_BCXYZ
from .data_wrapper import AdaptVal
from .data_wrapper import MyTensor
from . import smoother_factory as sf
from .data_wrapper import USE_CUDA
import numpy as np
from . import finite_differences as fd
import torch.nn as nn
import torch.nn.init as init
from . import module_parameters as pars
from .spline_interpolation import SplineInterpolation_ND_BCXYZ
import os
try:
from .libraries.functions.nn_interpolation import get_nn_interpolation
except ImportError:
print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). '
'Some functionality may not be available.')
def my_hasnan(x):
"""Check if any input elements are NaNs.
:param x: numpy array
:return: True if NaNs are present, False else
"""
return (x != x).any()
def combine_dict(d1,d2):
"""Creates a dictionary which has entries from both of them.
:param d1: dictionary 1
:param d2: dictionary 2
:return: resulting dictionary
"""
d = d1.copy()
d.update(d2)
return d
def get_parameter_list_from_parameter_dict(pd):
"""Takes a dictionary which contains key value pairs for model parameters and converts it into a list of
parameters that can be used as an input to an optimizer.
:param pd: parameter dictionary
:return: list of parameters
"""
pl = []
for key in pd:
pl.append(pd[key])
return pl
def get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd):
"""Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys
based on memory id.
:param pd: parameter dictionary
:return: tuple of (parameter_list, name_dictionary)
"""
par_to_name_dict = dict()
pl = []
for key in pd:
pl.append(pd[key])
par_to_name_dict[pd[key]] = key
return pl, par_to_name_dict
def lift_to_dimension(A, dim):
"""Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim > dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim == dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
def get_dim_of_affine_transform(Ab):
"""Returns the number of dimensions corresponding to an affine transformation of the
form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply
[a1;a2;a3;b], i.e., all columns stacked on top of each other.
:param Ab: parameter vector
:return: dimensionality of transform (1,2,or 3)
"""
nr = len(Ab)
if nr==2:
return 1
elif nr==6:
return 2
elif nr==12:
return 3
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity(Ab):
"""Sets the affine transformation as given by the column vector Ab to the identity transform.
:param Ab: Affine parameter vector (will be overwritten with the identity transform)
:return:
"""
dim = get_dim_of_affine_transform(Ab)
if dim==1:
Ab.zero_()
Ab[0]=1.
elif dim==2:
Ab.zero_()
Ab[0]=1.
Ab[3]=1.
elif dim==3:
Ab.zero_()
Ab[0]=1.
Ab[4]=1.
Ab[8]=1.
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity_multiN(Ab):
"""Set the affine transforms to the identity (in the case of arbitrary batch size).
:param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans.
:return:
"""
sz = Ab.size()
nr_of_images = sz[0]
for nrI in range(nr_of_images):
set_affine_transform_to_identity(Ab[nrI, :])
def get_inverse_affine_param(Ab):
"""Computes inverse of affine transformation.
Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb
:param Ab: B x pars (batch size x param. vector)
:return: Inverse of affine parameters
"""
dim =0
if Ab.shape[1] == 2:
dim = 1
elif Ab.shape[1] == 6:
dim = 2
elif Ab.shape[1] == 12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2)
Ab_inv = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_inv = torch.inverse(Ab[n, :, :dim])
Ab_inv[n, :, :dim] = tm_inv
Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim])
inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1)
return inv_affine_param
def update_affine_param(Ab, Cd):
"""Update affine parameters.
Formally: C(Ax+b)+d = CAx+Cb+d
:param Ab: B x pars (batch size x param. vector)
:return: Updated affine parameters
"""
dim = 0
if Ab.shape[1]==2:
dim = 1
elif Ab.shape[1]==6:
dim = 2
elif Ab.shape[1]==12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2)
Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2)
updated_param = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim])
updated_param[n,:,:dim] = tm_param
updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim]
updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1)
return updated_param
def apply_affine_transform_to_map(Ab,phi):
"""Applies an affine transform to a map.
:param Ab: affine transform parameter column vector
:param phi: map; format nrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed map
"""
sz = phi.size()
dim = len(sz) - 1
if dim not in [1,2,3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
phiR = MyTensor(sz).zero_().type_as(phi)
if dim == 1:
phiR = phi * Ab[0] + Ab[1]
elif dim == 2:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2
elif dim == 3:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9]
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10]
phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11]
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
return phiR
def apply_affine_transform_to_map_multiNC(Ab,phi):
"""Applies an affine transform to maps (for arbitrary batch size).
:param Ab: affine transform parameter column vectors (batch size x param. vector)
:param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed maps
"""
sz = phi.size()
dim = get_dim_of_affine_transform(Ab[0,:])
nr_of_images = Ab.size()[0]
if nr_of_images != sz[0]:
raise ValueError('Incompatible number of affine transforms')
if dim != len(sz)-2:
raise ValueError('Incompatible number of affine transforms')
phiR = MyTensor(sz).zero_().type_as(phi)
for nrI in range(nr_of_images):
phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...])
return phiR
def compute_normalized_gaussian(X, mu, sig):
"""Computes a normalized Gaussian.
:param X: map with coordinates at which to evaluate
:param mu: array indicating the mean
:param sig: array indicating the standard deviations for the different dimensions
:return: Normalized Gaussian evaluated at coordinates in X
Example::
>>> mu, sig = [1,1], [1,1]
>>> X = [0,0]
>>> print(compute_normalized_gaussian(X, mu, sig)
"""
dim = len(mu)
if dim == 1:
g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.)))
g = g/g.sum()
return g
elif dim == 2:
g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.))
- np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)))
g = g/g.sum()
return g
elif dim == 3:
g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.))
-np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))
-np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.)))
g = g / g.sum()
return g
else:
raise ValueError('Can only compute Gaussians in dimensions 1-3')
def compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size XxYxZ
:param phi: map for the warping, size dimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size XxYxZ
"""
# implements this by creating a different view (effectively adding dimensions)
Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))),
phi.view(torch.Size([1] + list(phi.size()))),
spacing,
spline_order,
zero_boundary,
use_01_input)
return Iw.view(I0.size())
def compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size BxCxXxYxZ
:param phi: map for the warping, size BxdimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size BxCxXxYxZ
"""
dim = I0.dim()-2
if dim == 1:
return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 2:
return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 3:
return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
else:
raise ValueError('Images can only be warped in dimensions 1 to 3')
def _get_low_res_spacing_from_spacing(spacing, sz, lowResSize):
"""Computes spacing for the low-res parametrization from image spacing.
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
#todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1)
def _get_low_res_size_from_size(sz, factor):
"""Returns the corresponding low-res size from a (high-res) sz.
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None) or (factor >= 1):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return np.array(sz)
else:
low_res_sz = np.array(sz)
low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16')
return low_res_sz
def compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, BxCxXxYxZ
:param I: image, BxCxXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
nrOfI = sz[0] # number of images
m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC
nrOfC = sz[1]
for c in range(nrOfC): # loop over all the channels and add the results
m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...],
I[:, c, ...],
nrOfI,
sz[2::],
spacing)
return m
def compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, batchxXxYxZ
:param I: image, batchXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
fdt = fd.FD_torch(spacing)
dim = len(sz)
m = create_ND_vector_field_variable_multiN(sz, nrOfI)
if dim == 1:
m[:, 0, :] = fdt.dXc(I)*lam
elif dim == 2:
m[:, 0, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :] = fdt.dYc(I)*lam
elif dim == 3:
m[:, 0, :, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :, :] = fdt.dYc(I)*lam
m[:, 2, :, :, :] = fdt.dZc(I)*lam
else:
raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3')
return m
def create_ND_vector_field_variable_multiN(sz, nr_of_images=1):
"""
Create vector field torch Variable of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nr_of_images, dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0., 1e-7)
def create_ND_vector_field_variable(sz):
"""Create vector field torch Variable of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:return: returns vector field of size dimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0.,1e-7)
def create_vector_parameter(nr_of_elements):
"""Creates a vector parameters with a specified number of elements.
:param nr_of_elements: number of vector elements
:return: returns the parameter vector
"""
return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7))
def create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False):
"""Create vector field torch Parameter of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI, dim]+list(csz))
if get_field_from_external_network:
tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7)
tmp.requires_grad = True
else:
tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
return tmp
def create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
nr_of_mg_weights = len(gaussian_std_weights)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nr_of_mg_weights]+list(csz))
weights = torch.empty(*csz)
# set the default
if sched =='w_K_w':
gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights]
for g in range(nr_of_mg_weights):
weights[:, g, ...] = gaussian_std_weights[g]
tmp = AdaptVal(weights)
if get_preweight_from_network:
tmp.requires_grad = True
else:
tmp = Parameter(tmp)
return tmp
def create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:param nrOfC: number of channels
:return: returns vector field of size nrOfIxnrOfCxXxYxZ
"""
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nrOfC]+list(csz))
return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
def centered_identity_map_multiN(sz, spacing, dtype='float32'):
"""
Create a centered identity map (shifted so it is centered around 0)
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz) - 2
nrOfI = sz[0]
if dim == 1:
id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype)
return id
def identity_map_multiN(sz,spacing,dtype='float32'):
"""
Create an identity map
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz)-2
nrOfI = int(sz[0])
if dim == 1:
id = np.zeros([nrOfI,1,sz[2]],dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n,...] = identity_map(sz[2::],spacing,dtype=dtype)
return id
def centered_identity_map(sz, spacing, dtype='float32'):
"""
Returns a centered identity map (with 0 in the middle) if the sz is odd
Otherwise shifts everything by 0.5*spacing
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0:sz[0]]
elif dim == 2:
id = np.mgrid[0:sz[0], 0:sz[1]]
elif dim == 3:
id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array(id.astype(dtype))
if dim == 1:
id = id.reshape(1, sz[0]) # add a dummy first index
for d in range(dim):
id[d] *= spacing[d]
if sz[d]%2==0:
#even
id[d] -= spacing[d]*(sz[d]//2)
else:
#odd
id[d] -= spacing[d]*((sz[d]+1)//2)
# and now store it in a dim+1 array
if dim == 1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0, :] = id[0]
elif dim == 2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0, :, :] = id[0]
idnp[1, :, :] = id[1]
elif dim == 3:
idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0, :, :, :] = id[0]
idnp[1, :, :, :] = id[1]
idnp[2, :, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
return idnp
#
# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):
# """
# Returns a centered identity map (with 0 in the middle) if the sz is odd
# Otherwise shifts everything by 0.5*spacing
#
# :param sz: just the spatial dimensions, i.e., XxYxZ
# :param spacing: list with spacing information [sx,sy,sz]
# :param dtype: numpy data-type ('float32', 'float64', ...)
# :return: returns the identity map of dimension dimxXxYxZ
# """
# dim = len(sz)
# if dim == 1:
# id = np.mgrid[0:sz[0]]
# elif dim == 2:
# id = np.mgrid[0:sz[0], 0:sz[1]]
# elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
#
# min_spacing = np.min(spacing)
# spacing_ratio = spacing/min_spacing
#
#
# # now get it into range [0,(sz-1)*spacing]^d
# id = np.array(id.astype(dtype))
# if dim == 1:
# id = id.reshape(1, sz[0]) # add a dummy first index
#
# for d in range(dim):
# id[d] *= spacing[d]
# if sz[d]%2==0:
# #even
# id[d] -= spacing[d]*(sz[d]//2)
# else:
# #odd
# id[d] -= spacing[d]*((sz[d]+1)//2)
#
# # and now store it in a dim+1 array and rescale by the ratio
# if dim == 1:
# idnp = np.zeros([1, sz[0]], dtype=dtype)
# idnp[0, :] = id[0] * spacing_ratio[0]
# elif dim == 2:
# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
# idnp[0, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :] = id[1] * spacing_ratio[1]
# elif dim == 3:
# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
# idnp[0, :, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :, :] = id[1] * spacing_ratio[1]
# idnp[2, :, :, :] = id[2] * spacing_ratio[2]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
#
# return idnp
#
# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =min_spacing/spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =spacing/min_spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
#
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.ones(*mask_sz))*mask_value
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
return mask.detach()
def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.zeros(*mask_sz))
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
if pow ==2:
mask = mask**2
if pow ==3:
mask = mask*mask*mask
return mask
# def compute_omt_const(stds,param,dim):
# omt_power = param['forward_model']['smoother']['omt_power']
# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']
# min_std = torch.min(stds)
# max_std = torch.max(stds)
# omt_const = torch.abs(torch.log(max_std/stds))**omt_power
# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)
# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)
# sz = [1]+ [len(stds)] +[1]*(dim+1)
# return omt_const.view(*sz)
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
return (v.detach()).cpu().numpy()
def cxyz_to_xyzc( v ):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
dim = len(v.shape)-2
if dim ==2:
v = v.permute(0,2,3,1)
if dim ==3:
v = v.permute(0,2,3,4,1)
return v
def checkNan(x):
""""
input should be list of Variable
"""
return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x]
def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""
if spacing is None:
img_sz = I.shape[2:]
spacing = 1. / (np.array(img_sz) - 1)
if identity_map is not None: # todo will remove, currently fix for symmetric training
if I.shape[0] != identity_map.shape[0]:
n_batch = I.shape[0]
desiredSize = desiredSize.copy()
desiredSize[0] = n_batch
identity_map = identity_map[:n_batch]
resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order,
zero_boundary=zero_boundary, identity_map=identity_map)
return resampled
def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after downsampling
"""
desiredSize = desiredSize[2:]
is_numpy = False
if not isinstance(I, torch.Tensor):
I = torch.Tensor(I)
is_numpy = True
sz = np.array(list(I.size()))
# check that the batch size and the number of channels is the same
nrOfI = sz[0]
nrOfC = sz[1]
desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize))
newspacing = spacing * ((sz[2::].astype('float') - 1.) / (
desiredSizeNC[2::].astype('float') - 1.)) ###########################################
if identity_map is not None:
idDes = identity_map
else:
idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing)))
# now use this map for resampling
ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary)
return ID if not is_numpy else ID.numpy(), newspacing
def get_res_size_from_size(sz, factor):
"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return sz
else:
lowResSize = np.array(sz)
if not isinstance(factor, list):
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')
else:
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')
if lowResSize[-1] % 2 != 0:
lowResSize[-1] -= 1
print(
'\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n')
return lowResSize
def get_res_spacing_from_spacing(spacing, sz, lowResSize):
"""
Computes spacing for the low-res parameterization from image spacing
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
# todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1)
########################################## Adaptive Net ###################################################3
def space_normal(tensors, std=0.1):
"""
space normalize for the net kernel
:param tensor:
:param mean:
:param std:
:return:
"""
if isinstance(tensors, Variable):
space_normal(tensors.data, std=std)
return tensors
for n in range(tensors.size()[0]):
for c in range(tensors.size()[1]):
dim = tensors[n][c].dim()
sz = tensors[n][c].size()
mus = np.zeros(dim)
stds = std * np.ones(dim)
print('WARNING: What should the spacing be here? Needed for new identity map code')
raise ValueError('Double check the spacing here before running this code')
spacing = np.ones(dim)
centered_id = centered_identity_map(sz,spacing)
g = compute_normalized_gaussian(centered_id, mus, stds)
tensors[n,c] = torch.from_numpy(g)
| 34.805304 | 130 | 0.602675 |
b96fca03cef0164231c4fa09bc83db6c5b2aa7db
| 1,093 |
py
|
Python
|
examples/io/plot_read_evoked.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3 |
2021-01-04T08:45:56.000Z
|
2021-05-19T12:25:59.000Z
|
examples/io/plot_read_evoked.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 28 |
2020-05-07T00:58:34.000Z
|
2020-08-29T23:02:17.000Z
|
examples/io/plot_read_evoked.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3 |
2019-01-28T13:48:00.000Z
|
2019-07-10T16:02:11.000Z
|
"""
==================================
Reading and writing an evoked file
==================================
This script shows how to read and write evoked datasets.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from mne import read_evokeds
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
###############################################################################
# Show result as a butterfly plot:
# By using exclude=[] bad channels are not excluded and are shown in red
evoked.plot(exclude=[], time_unit='s')
# Show result as a 2D image (x: time, y: channels, color: amplitude)
evoked.plot_image(exclude=[], time_unit='s')
###############################################################################
# Use :func:`mne.Evoked.save` or :func:`mne.write_evokeds` to write the evoked
# responses to a file.
| 29.540541 | 79 | 0.569076 |
b970d836b7397be4bc4d63762c0eec8adfb90a91
| 611 |
py
|
Python
|
source/monkeyPatches/__init__.py
|
lukaszgo1/nvda
|
38a2efd1e1bff7db4471cb7afa03ab1590b7adef
|
[
"bzip2-1.0.6"
] | 19 |
2016-05-11T05:15:31.000Z
|
2022-03-17T12:40:10.000Z
|
source/monkeyPatches/__init__.py
|
lukaszgo1/nvda
|
38a2efd1e1bff7db4471cb7afa03ab1590b7adef
|
[
"bzip2-1.0.6"
] | 307 |
2015-08-27T11:22:33.000Z
|
2022-03-29T10:43:34.000Z
|
source/monkeyPatches/__init__.py
|
lukaszgo1/nvda
|
38a2efd1e1bff7db4471cb7afa03ab1590b7adef
|
[
"bzip2-1.0.6"
] | 14 |
2016-03-28T07:31:49.000Z
|
2022-03-30T04:56:35.000Z
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2021 NV Access Limited
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
from . import wxMonkeyPatches
applyWxMonkeyPatches = wxMonkeyPatches.apply
| 30.55 | 86 | 0.761047 |
b970f8ccb56e24dd8d65fd92869bbf7790f6e611
| 5,298 |
py
|
Python
|
yt_dlp/extractor/ninenow.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 11 |
2022-01-06T22:09:50.000Z
|
2022-03-12T22:26:22.000Z
|
yt_dlp/extractor/ninenow.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 4 |
2022-02-25T08:20:18.000Z
|
2022-03-17T16:16:20.000Z
|
yt_dlp/extractor/ninenow.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 3 |
2022-02-19T08:59:13.000Z
|
2022-03-06T16:11:21.000Z
|
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
smuggle_url,
str_or_none,
try_get,
unified_strdate,
unified_timestamp,
)
| 43.073171 | 146 | 0.575123 |
b97242dec299cf214174fe1ceb1c2d4c7e16b595
| 4,783 |
py
|
Python
|
apex/fp16_utils/fused_weight_norm.py
|
mcarilli/apex
|
766e36c9e10fe4efd847c3f77c3b38974c89eab1
|
[
"BSD-3-Clause"
] | 1 |
2020-05-05T01:37:42.000Z
|
2020-05-05T01:37:42.000Z
|
apex/fp16_utils/fused_weight_norm.py
|
mcarilli/apex
|
766e36c9e10fe4efd847c3f77c3b38974c89eab1
|
[
"BSD-3-Clause"
] | 1 |
2018-06-24T18:56:56.000Z
|
2018-06-24T18:56:56.000Z
|
apex/fp16_utils/fused_weight_norm.py
|
mcarilli/apex
|
766e36c9e10fe4efd847c3f77c3b38974c89eab1
|
[
"BSD-3-Clause"
] | 1 |
2020-07-03T00:37:20.000Z
|
2020-07-03T00:37:20.000Z
|
import torch
from torch.autograd import Variable
from torch.autograd.function import Function, once_differentiable
import apex_C
| 41.95614 | 175 | 0.604223 |
b9724b70833f729e47c38eb018294247250b7282
| 23,312 |
py
|
Python
|
bzt/modules/grinder.py
|
gerardorf/taurus
|
610872b4cf70af31d79a346db1aebd3466310d77
|
[
"Apache-2.0"
] | 1 |
2019-01-15T17:23:58.000Z
|
2019-01-15T17:23:58.000Z
|
bzt/modules/grinder.py
|
gerardorf/taurus
|
610872b4cf70af31d79a346db1aebd3466310d77
|
[
"Apache-2.0"
] | null | null | null |
bzt/modules/grinder.py
|
gerardorf/taurus
|
610872b4cf70af31d79a346db1aebd3466310d77
|
[
"Apache-2.0"
] | null | null | null |
"""
Module holds all stuff regarding Grinder tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import time
from bzt import TaurusConfigError, ToolError
from bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.java import TaurusJavaHelper
from bzt.requests_model import HTTPRequest
from bzt.six import iteritems
from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR
| 40.82662 | 119 | 0.618823 |
b972e358701b6b26d8d3c931dfecc57580620c15
| 467 |
py
|
Python
|
test/Fortran/fixture/myfortran_flags.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1,403 |
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/Fortran/fixture/myfortran_flags.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 3,708 |
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/Fortran/fixture/myfortran_flags.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 281 |
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
import getopt
import sys
comment = ('#' + sys.argv[1]).encode()
opts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy')
optstring = ''
length = len(comment)
for opt, arg in opts:
if opt == '-o': out = arg
elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
outfile.write((optstring + "\n").encode())
for l in infile.readlines():
if l[:length] != comment:
outfile.write(l)
sys.exit(0)
| 27.470588 | 67 | 0.601713 |
b9736fc25869ac44481082e255dc93e0f52aa441
| 9,015 |
py
|
Python
|
zen_knit/organizer/__init__.py
|
Zen-Reportz/zen_knit
|
104c2693d2cc61520657131da769f5d59d2df8e9
|
[
"MIT"
] | 30 |
2021-12-25T15:39:42.000Z
|
2022-02-25T04:53:44.000Z
|
zen_knit/organizer/__init__.py
|
Zen-Reportz/zen_knit
|
104c2693d2cc61520657131da769f5d59d2df8e9
|
[
"MIT"
] | 11 |
2022-01-02T22:10:07.000Z
|
2022-02-02T00:56:33.000Z
|
zen_knit/organizer/__init__.py
|
Zen-Reportz/zen_knit
|
104c2693d2cc61520657131da769f5d59d2df8e9
|
[
"MIT"
] | 2 |
2022-01-27T13:22:46.000Z
|
2022-01-30T05:01:59.000Z
|
import io
import os
import base64
from pathlib import Path
from nbconvert import filters
from pygments.formatters.latex import LatexFormatter
from zen_knit import formattor
from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData
from zen_knit.formattor.html_formatter import HTMLFormatter
mime_extensions = {"image/png" : "png",
"image/jpg" : "jpg"}
| 37.5625 | 139 | 0.533888 |
b974558759b358f82c2d72d79bab9c7dc3e35a76
| 12,467 |
py
|
Python
|
qibullet/robot_virtual.py
|
mcaniot/qibullet
|
9c5e1b319a18dd289263eb82f9d7303429bcbe21
|
[
"Apache-2.0"
] | null | null | null |
qibullet/robot_virtual.py
|
mcaniot/qibullet
|
9c5e1b319a18dd289263eb82f9d7303429bcbe21
|
[
"Apache-2.0"
] | null | null | null |
qibullet/robot_virtual.py
|
mcaniot/qibullet
|
9c5e1b319a18dd289263eb82f9d7303429bcbe21
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import sys
import pybullet
from qibullet.camera import *
from qibullet.link import Link
from qibullet.joint import Joint
IS_VERSION_PYTHON_3 = sys.version_info[0] >= 3
| 35.31728 | 79 | 0.593006 |
b974d5d1bd35654f50415a8f7c66f3fb9a0316ab
| 704 |
py
|
Python
|
tests/test_formatter.py
|
hbraux/kafkacli
|
5f7ed23150932b66b484fb43dd6210b6c0968776
|
[
"MIT"
] | null | null | null |
tests/test_formatter.py
|
hbraux/kafkacli
|
5f7ed23150932b66b484fb43dd6210b6c0968776
|
[
"MIT"
] | null | null | null |
tests/test_formatter.py
|
hbraux/kafkacli
|
5f7ed23150932b66b484fb43dd6210b6c0968776
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import json
from kafkacli.formatter import Formatter
sampleJson = json.loads('{"a":"s", "b":1}')
| 24.275862 | 62 | 0.640625 |
b9750e636d7a3d49a65558af431533fc2e745edb
| 187 |
py
|
Python
|
src/jobs/forms.py
|
arc198/DJANGO-JOB-SITE
|
d9547c4ee85751677ba6458380b609973c3b4a8d
|
[
"MIT"
] | 20 |
2018-05-04T18:42:35.000Z
|
2021-03-18T07:15:12.000Z
|
src/jobs/forms.py
|
fleepgeek/django-jobsite
|
d9547c4ee85751677ba6458380b609973c3b4a8d
|
[
"MIT"
] | 5 |
2020-02-11T22:22:33.000Z
|
2021-06-10T20:18:05.000Z
|
src/jobs/forms.py
|
arc198/DJANGO-JOB-SITE
|
d9547c4ee85751677ba6458380b609973c3b4a8d
|
[
"MIT"
] | 8 |
2018-05-04T19:03:23.000Z
|
2020-09-23T00:24:46.000Z
|
from django import forms
from .models import Application
| 23.375 | 44 | 0.700535 |
b975e6fb7fb3fa8849afb4e4ce41618c2ce94c1b
| 451 |
py
|
Python
|
src/test/tests/unit/protocol.py
|
ylee88/visit
|
8e0920996d84fef70a7014b0d770360918d849d5
|
[
"BSD-3-Clause"
] | 1 |
2022-01-27T23:52:04.000Z
|
2022-01-27T23:52:04.000Z
|
src/test/tests/unit/protocol.py
|
ylee88/visit
|
8e0920996d84fef70a7014b0d770360918d849d5
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/tests/unit/protocol.py
|
ylee88/visit
|
8e0920996d84fef70a7014b0d770360918d849d5
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: protocolo.py
#
# Tests: vistprotocol unit test
#
# Mark C. Miller, Tue Jan 11 10:19:23 PST 2011
# ----------------------------------------------------------------------------
tapp = visit_bin_path("visitprotocol")
res = sexe(tapp,ret_output=True)
if res["return_code"] == 0:
excode = 111
else:
excode = 113
Exit(excode)
| 26.529412 | 78 | 0.432373 |
b97645cb1bc48b7d30c6b37e139952912087b791
| 3,348 |
py
|
Python
|
pyMazeBacktrack.py
|
Dozed12/pyMazeBacktrack
|
aaa2a902fdca17dca6e2ee00e672b6bb38da5639
|
[
"MIT"
] | 2 |
2019-02-22T10:35:25.000Z
|
2020-08-11T01:25:12.000Z
|
pyMazeBacktrack.py
|
Dozed12/pyMazeBacktrack
|
aaa2a902fdca17dca6e2ee00e672b6bb38da5639
|
[
"MIT"
] | null | null | null |
pyMazeBacktrack.py
|
Dozed12/pyMazeBacktrack
|
aaa2a902fdca17dca6e2ee00e672b6bb38da5639
|
[
"MIT"
] | null | null | null |
import libtcodpy as libtcod
from random import randint
nSquares = 30
nTiles = nSquares * 2 + 1
SCREEN_WIDTH = nTiles
SCREEN_HEIGHT = nTiles
libtcod.console_set_custom_font("cp437_12x12.png", libtcod.FONT_LAYOUT_ASCII_INROW)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL)
black = libtcod.black
white = libtcod.white
Table = [[0 for i in range(nTiles)]for i in range(nTiles)]
for x in range(nTiles):
for y in range(nTiles):
Table[x][y] = black
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
Memory = []
CurrX = 1
CurrY = 1
Table[CurrX][CurrY] = white
end = 0
while end == 0:
while Possible(CurrX,CurrY,Table,nTiles):
Dir = randint(1,4)
while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0:
Dir = randint(1,4)
if Dir == 1:
Table[CurrX][CurrY - 1] = white
CurrY -= 2
Table[CurrX][CurrY] = white
elif Dir == 2:
Table[CurrX + 1][CurrY] = white
CurrX += 2
Table[CurrX][CurrY] = white
elif Dir == 3:
Table[CurrX][CurrY + 1] = white
CurrY += 2
Table[CurrX][CurrY] = white
elif Dir == 4:
Table[CurrX - 1][CurrY] = white
CurrX -= 2
Table[CurrX][CurrY] = white
Memory.append(Dir)
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
while Possible(CurrX,CurrY,Table,nTiles) == 0:
MemorySize = len(Memory)
Dir = Memory[MemorySize-1]
if Dir == 1:
CurrY += 2
elif Dir == 2:
CurrX -= 2
elif Dir == 3:
CurrY -= 2
elif Dir == 4:
CurrX += 2
del Memory[MemorySize-1]
if CurrX == 1 and CurrY == 1:
end = 1
break
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
libtcod.console_wait_for_keypress(True)
| 20.168675 | 106 | 0.496416 |
b978586a0e39802db346feaf3a0aa1c91c336f05
| 3,011 |
py
|
Python
|
source/tests/test_resources.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 6 |
2021-09-23T16:33:24.000Z
|
2022-03-31T11:45:13.000Z
|
source/tests/test_resources.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 4 |
2021-09-24T21:34:14.000Z
|
2022-01-27T22:11:08.000Z
|
source/tests/test_resources.py
|
aws-solutions/maintaining-personalized-experiences-with-machine-learning
|
3f6f1b0069df4828eae9b0835b717500189e4f71
|
[
"Apache-2.0"
] | 9 |
2021-09-23T23:24:46.000Z
|
2022-02-12T04:53:16.000Z
|
# ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
import pytest
from shared.resource import (
DatasetGroup,
Schema,
Dataset,
DatasetImportJob,
Solution,
SolutionVersion,
Campaign,
EventTracker,
BatchSegmentJob,
BatchInferenceJob,
)
| 42.408451 | 120 | 0.454334 |
b9787b11fbcd5779df09a2f0f27e44e75ad576ac
| 1,870 |
py
|
Python
|
app_venv/Lib/site-packages/phonenumbers/data/region_AG.py
|
orlandofv/sianna
|
f07dd6dbc62a9604f31ab800e482e62f14fba766
|
[
"MIT"
] | null | null | null |
app_venv/Lib/site-packages/phonenumbers/data/region_AG.py
|
orlandofv/sianna
|
f07dd6dbc62a9604f31ab800e482e62f14fba766
|
[
"MIT"
] | null | null | null |
app_venv/Lib/site-packages/phonenumbers/data/region_AG.py
|
orlandofv/sianna
|
f07dd6dbc62a9604f31ab800e482e62f14fba766
|
[
"MIT"
] | null | null | null |
"""Auto-generated file, do not edit by hand. AG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AG = PhoneMetadata(id='AG', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='(?:268|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='268(?:4(?:6[0-38]|84)|56[0-2])\\d{4}', example_number='2684601234', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='268(?:464|7(?:1[3-9]|[28]\\d|3[0246]|64|7[0-689]))\\d{4}', example_number='2684641234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='52(?:355[0-46-9]|4(?:5(?:2[024-9]|5[0-46-9])|60[1-9]|9(?:2[0-5]|49)))\\d{4}|52(?:3(?:[2-46-9][02-9]|5[02-46-9])|4(?:[2-478][02-9]|5[034]|6[2-9]|9[05-9])|7[2-4]\\d)\\d{5}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[1256]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
voip=PhoneNumberDesc(national_number_pattern='26848[01]\\d{4}', example_number='2684801234', possible_length=(10,), possible_length_local_only=(7,)),
pager=PhoneNumberDesc(national_number_pattern='26840[69]\\d{4}', example_number='2684061234', possible_length=(10,), possible_length_local_only=(7,)),
national_prefix='1',
national_prefix_for_parsing='1|([457]\\d{6})$',
national_prefix_transform_rule='268\\1',
leading_digits='268',
mobile_number_portable_region=True)
| 103.888889 | 352 | 0.711765 |
b97884a1b2bbd76cce01bb9efe2744d31832af25
| 2,182 |
py
|
Python
|
gradefiles-send.py
|
lapets/bu-gsubmit-grading
|
69c40a763908be1c954dce3e5e5aab854ac379ff
|
[
"MIT"
] | 3 |
2016-10-03T15:29:20.000Z
|
2019-06-28T17:33:06.000Z
|
gradefiles-send.py
|
lapets/bu-gsubmit-grading
|
69c40a763908be1c954dce3e5e5aab854ac379ff
|
[
"MIT"
] | null | null | null |
gradefiles-send.py
|
lapets/bu-gsubmit-grading
|
69c40a763908be1c954dce3e5e5aab854ac379ff
|
[
"MIT"
] | null | null | null |
#####################################################################
##
## gradefiles-send.py
##
## Script to send grade files by email to enrolled students; the
## input grade file names should correspond to the user names of
## the students.
##
##
from email.mime.text import MIMEText # For creating a message string.
from subprocess import Popen, PIPE # For sending email on linux.
import sys # For command line arguments.
import os # For commands and file manipulation (walk, path, system).
#####################################################################
## Sending a simple email message.
##
#####################################################################
## Process the command line parameters.
##
if len(sys.argv) == 6\
and (int(sys.argv[1][0:3]) in range(100,1000))\
and sys.argv[2] in ['Fall', 'Spring']\
and int(sys.argv[3]) in range(2000,2100):
courseNumber = sys.argv[1] # Accepts course names like "591 X1."
season = sys.argv[2]
year = sys.argv[3]
task = sys.argv[4]
sender = sys.argv[5]
else:
print('\n Usage:\n\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\n')
exit()
#####################################################################
## Check for list of files.
##
if not os.path.exists('./data'):
print('No folder "data" containing grade files found. Exiting.')
exit()
#####################################################################
## Send the grade files.
##
for curdir, dirs, files in os.walk('./data/'):
for file in files:
txt = open('./data/'+file, 'r').read()
targets = file.split('.')[0].split("_")
send(txt, courseNumber, task, sender, targets)
print('Sent grade file to ' + str(targets) + '.')
#eof
| 33.569231 | 112 | 0.519707 |
b9789c0f2981942a54633089abdf3245b58a73a3
| 1,227 |
py
|
Python
|
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
|
GalAster/16
|
47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
|
[
"Unlicense"
] | 3 |
2019-10-03T01:51:38.000Z
|
2019-10-04T16:15:43.000Z
|
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
|
GalAster/16
|
47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
|
[
"Unlicense"
] | null | null | null |
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
|
GalAster/16
|
47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
|
[
"Unlicense"
] | 1 |
2020-03-17T12:58:52.000Z
|
2020-03-17T12:58:52.000Z
|
import os
import pickle
import tensorflow as tf
import wolframclient.serializers as wxf
name = 'karras2018iclr-celebahq-1024x1024'
file = open(name + '.pkl', 'rb')
sess = tf.InteractiveSession()
G, D, Gs = pickle.load(file)
saver = tf.train.Saver()
save_path = "./target/" + name + "/"
model_name = 'model'
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path_full = os.path.join(save_path, model_name)
saver.save(sess, save_path_full)
ckpt = tf.train.get_checkpoint_state(save_path)
reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path)
all_variables = list(reader.get_variable_to_shape_map().keys())
npy = dict(zip(all_variables, map(reader.get_tensor, all_variables)))
wxf.export(npy, name + '.wxf', target_format='wxf')
# Save as protobuf
with tf.Session() as sess:
tf.initialize_all_variables().run()
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
# output_node_names=['G_paper_1/images_out']
output_node_names=['G_paper_1/ToRGB_lod0/add']
)
with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: #
file.write(output_graph_def.SerializeToString()) #
| 34.083333 | 74 | 0.726976 |
b978dfcb152bc099b2de54896ed9a54dfbc29639
| 6,890 |
py
|
Python
|
src/moveGoogle.py
|
Quanta-Robotics/Robot-Blueberry
|
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
|
[
"MIT"
] | 25 |
2021-06-08T07:09:30.000Z
|
2021-12-30T06:28:35.000Z
|
src/moveGoogle.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 2 |
2021-05-23T12:54:51.000Z
|
2021-06-07T17:47:56.000Z
|
src/moveGoogle.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 14 |
2021-06-08T13:02:28.000Z
|
2021-12-30T20:07:18.000Z
|
#!/usr/bin/env python
import os
import os.path
import yaml
import time
import random
import multiprocessing
import RPi.GPIO as GPIO
from talk import say
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from adafruit_servokit import ServoKit
Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}
Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}
for x in Motor1:
GPIO.setup(Motor1[x], GPIO.OUT)
GPIO.setup(Motor2[x], GPIO.OUT)
EN1 = GPIO.PWM(Motor1['EN'], 100)
EN2 = GPIO.PWM(Motor2['EN'], 100)
EN1.start(0)
EN2.start(0)
hand = ServoKit(channels=16)
ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))
servo = readYaml()
if servo == None:
with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)
writeYaml(servoBackUp)
servo = readYaml()
if servo == None:
print('close')
exit()
Initial = servo['Initial_Position']['I2C']
Current = servo['Current_Position']['I2C']
InitialGpio = servo['Initial_Position']['Gpio']
CurrentGpio = servo['Current_Position']['Gpio']
GpioPin = servo['Pin']['Gpio']
for i in range(0,6):
GPIO.setup(GpioPin[i], GPIO.OUT)
Servo = []
for i in range(0,6):
Servo.append(GPIO.PWM(GpioPin[i],50))
Servo[i].start(0)
| 25.330882 | 154 | 0.560377 |
b978fbbcd4002601ca1e2723cae4385002e671d8
| 2,063 |
py
|
Python
|
src/onegov/translator_directory/models/language.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/translator_directory/models/language.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/translator_directory/models/language.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
from sqlalchemy import Index, Column, Text, Table, ForeignKey
from sqlalchemy.orm import object_session
from onegov.core.orm import Base
from onegov.core.orm.types import UUID
spoken_association_table = Table(
'spoken_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
written_association_table = Table(
'written_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
mother_tongue_association_table = Table(
'mother_tongue_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
| 25.469136 | 79 | 0.650994 |
b97a0b2a9f0b601569ce8973596517ed7d8790ec
| 3,588 |
py
|
Python
|
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
|
djemeljanovs/tfjs
|
ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
|
[
"Apache-2.0"
] | null | null | null |
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
|
djemeljanovs/tfjs
|
ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
|
[
"Apache-2.0"
] | null | null | null |
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
|
djemeljanovs/tfjs
|
ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
# Custom op name for fused depthwise conv2d
FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative'
# The grappler op name for fused MatMul which starts with '_'
FUSED_MATMUL = '_FusedMatMul'
def node_from_map(node_map, name):
"""Pulls a node def from a dictionary for a given name.
Args:
node_map: Dictionary containing an entry indexed by name for every node.
name: Identifies the node we want to find.
Returns:
NodeDef of the node with the given name.
Raises:
ValueError: If the node isn't present in the dictionary.
"""
stripped_name = node_name_from_input(name)
if stripped_name not in node_map:
raise ValueError("No node named '%s' found in map." % name)
return node_map[stripped_name]
def values_from_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError(
"Node named '%s' should be a Const op for values_from_const." %
node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
# Whether to scale by gamma after normalization.
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove):
"""Clean up the graph def by removing the skipped nodes and clean up the nodes
with inputs that have been removed.
Args:
input_graph_def: GraphDef object to be cleaned.
node_to_skip: Dict with node names to be skipped.
inputs_to_remove: List of nodes to be removed from inputs of all nodes.
Returns:
GraphDef that has been cleaned.
"""
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in nodes_to_skip:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
for value in inputs_to_remove:
for i, input_node in enumerate(new_node.input):
if input_node == value.name:
new_node.input[i] = value.input[0]
result_graph_def.node.extend([new_node])
result_graph_def.library.CopyFrom(input_graph_def.library)
result_graph_def.versions.CopyFrom(input_graph_def.versions)
return result_graph_def
| 33.849057 | 80 | 0.726031 |
b97af59ee4283114481f3e83dc8e3cf6244bb61c
| 1,014 |
py
|
Python
|
loss_fn/classification_loss_fns/binary_cross_entropy.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 209 |
2021-10-30T08:32:10.000Z
|
2022-03-31T16:18:03.000Z
|
loss_fn/classification_loss_fns/binary_cross_entropy.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 12 |
2021-12-04T10:47:11.000Z
|
2022-03-31T15:39:40.000Z
|
loss_fn/classification_loss_fns/binary_cross_entropy.py
|
apple/ml-cvnets
|
84d992f413e52c0468f86d23196efd9dad885e6f
|
[
"AML"
] | 50 |
2021-11-01T08:15:02.000Z
|
2022-03-29T08:17:34.000Z
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch.nn import functional as F
from torch import Tensor
import argparse
from . import register_classification_loss_fn
from .. import BaseCriteria
| 28.166667 | 87 | 0.667653 |
b97c7f15dd61f4851cffcb3982337f852b3b8da5
| 576 |
py
|
Python
|
Sorting/insertion_sort.py
|
lakshyarawal/pythonPractice
|
4b400342198a8270c5ac0c6306afb555f927c6c1
|
[
"MIT"
] | null | null | null |
Sorting/insertion_sort.py
|
lakshyarawal/pythonPractice
|
4b400342198a8270c5ac0c6306afb555f927c6c1
|
[
"MIT"
] | null | null | null |
Sorting/insertion_sort.py
|
lakshyarawal/pythonPractice
|
4b400342198a8270c5ac0c6306afb555f927c6c1
|
[
"MIT"
] | null | null | null |
""" Insertion Sort Algorithm:"""
"""Implementation"""
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 19.2 | 65 | 0.522569 |
b97c828450c34038ee92e089e3f2b951d2113017
| 903 |
py
|
Python
|
nipype/interfaces/spm/__init__.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 8 |
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
nipype/interfaces/spm/__init__.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 12 |
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
nipype/interfaces/spm/__init__.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 1 |
2020-07-17T12:49:49.000Z
|
2020-07-17T12:49:49.000Z
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-level namespace for spm."""
from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname,
scans_for_fnames)
from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp,
Coregister, Normalize, Normalize12, Segment,
Smooth, NewSegment, DARTEL, DARTELNorm2MNI,
CreateWarped, VBMSegment)
from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold,
OneSampleTTestDesign, TwoSampleTTestDesign,
PairedTTestDesign, MultipleRegressionDesign)
from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice,
ApplyInverseDeformation, ResliceToReference, DicomImport)
| 53.117647 | 77 | 0.653378 |
b97cd7905f5c596cb6d79b67c2c80e83907421d9
| 8,257 |
py
|
Python
|
network.py
|
tobloef/neural-network
|
bd05a8b9eccc0f5a973782247d39f9b5aa33156c
|
[
"MIT"
] | 3 |
2018-01-06T22:27:58.000Z
|
2018-08-12T20:29:51.000Z
|
network.py
|
tobloef/neural-network
|
bd05a8b9eccc0f5a973782247d39f9b5aa33156c
|
[
"MIT"
] | 1 |
2018-03-31T18:49:56.000Z
|
2018-04-19T04:52:33.000Z
|
network.py
|
tobloef/neural-network
|
bd05a8b9eccc0f5a973782247d39f9b5aa33156c
|
[
"MIT"
] | null | null | null |
import numpy as np
from mathUtils import *
| 53.270968 | 286 | 0.657987 |
b97d4675d330154e0b12b91fbd601affd888ea29
| 1,901 |
py
|
Python
|
examples/airflow/dags/etl_orders_7_days.py
|
phixMe/marquez
|
06d71635369893b371a8a9c9e7023f11d7cbb1f8
|
[
"Apache-2.0"
] | null | null | null |
examples/airflow/dags/etl_orders_7_days.py
|
phixMe/marquez
|
06d71635369893b371a8a9c9e7023f11d7cbb1f8
|
[
"Apache-2.0"
] | null | null | null |
examples/airflow/dags/etl_orders_7_days.py
|
phixMe/marquez
|
06d71635369893b371a8a9c9e7023f11d7cbb1f8
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from marquez_airflow import DAG
from airflow.operators.postgres_operator import PostgresOperator
from airflow.utils.dates import days_ago
default_args = {
'owner': 'datascience',
'depends_on_past': False,
'start_date': days_ago(1),
'email_on_failure': False,
'email_on_retry': False,
'email': ['[email protected]']
}
dag = DAG(
'etl_orders_7_days',
schedule_interval='@hourly',
catchup=False,
default_args=default_args,
description='Loads newly placed orders weekly.'
)
t1 = PostgresOperator(
task_id='if_not_exists',
postgres_conn_id='food_delivery_db',
sql='''
CREATE TABLE IF NOT EXISTS orders_7_days (
order_id INTEGER REFERENCES orders(id),
placed_on TIMESTAMP NOT NULL,
discount_id INTEGER REFERENCES discounts(id),
menu_id INTEGER REFERENCES menus(id),
restaurant_id INTEGER REFERENCES restaurants(id),
menu_item_id INTEGER REFERENCES menu_items(id),
category_id INTEGER REFERENCES categories(id)
);''',
dag=dag
)
t2 = PostgresOperator(
task_id='tuncate',
postgres_conn_id='food_delivery_db',
sql='TRUNCATE TABLE orders_7_days;',
dag=dag
)
t3 = PostgresOperator(
task_id='insert',
postgres_conn_id='food_delivery_db',
sql='''
INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id)
SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id
FROM orders AS o
INNER JOIN menu_items AS mi
ON mi.id = o.menu_item_id
INNER JOIN categories AS c
ON c.id = mi.category_id
INNER JOIN menus AS m
ON m.id = c.menu_id
WHERE o.placed_on >= NOW() - interval '7 days'
''',
dag=dag
)
t1 >> t2 >> t3
| 29.246154 | 135 | 0.681746 |
b97deb7d2bd255cd9a3d9f169d969333b63452ec
| 313 |
py
|
Python
|
sample/pizza.py
|
marianarmorgado/python-starter
|
8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
|
[
"MIT"
] | null | null | null |
sample/pizza.py
|
marianarmorgado/python-starter
|
8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
|
[
"MIT"
] | null | null | null |
sample/pizza.py
|
marianarmorgado/python-starter
|
8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
|
[
"MIT"
] | null | null | null |
# store information about a pizza being ordered
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra vegan cheese']
}
# summarize the order
print("You ordered a " + pizza['crust'] + "-crust pizza" +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping)
| 26.083333 | 59 | 0.645367 |
b97e1419e0e45b84ecc462227c812c10beb92718
| 181 |
py
|
Python
|
YouTube/CursoEmVideo/python/ex012.py
|
Fh-Shadow/Progamando
|
f496d83c36e9a079ed06b4e7c34396c57f539de9
|
[
"MIT"
] | null | null | null |
YouTube/CursoEmVideo/python/ex012.py
|
Fh-Shadow/Progamando
|
f496d83c36e9a079ed06b4e7c34396c57f539de9
|
[
"MIT"
] | null | null | null |
YouTube/CursoEmVideo/python/ex012.py
|
Fh-Shadow/Progamando
|
f496d83c36e9a079ed06b4e7c34396c57f539de9
|
[
"MIT"
] | null | null | null |
a = float(input('Qual o preo do produto? R$'))
d = a - (a * 23 / 100)
print('O produto que custava R${:.2f}, na promoo de 23% de desconto vai custar: R${:.2f}' .format(a, d))
| 45.25 | 106 | 0.607735 |
b97e5feb1052b87d359d8e3d9f63ba930bff8e66
| 15,038 |
py
|
Python
|
dnnlib/submission/submit.py
|
gperdrizet/gansformer
|
c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
|
[
"MIT"
] | 1,172 |
2021-03-02T02:00:44.000Z
|
2022-03-31T02:46:45.000Z
|
dnnlib/submission/submit.py
|
gperdrizet/gansformer
|
c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
|
[
"MIT"
] | 37 |
2021-03-03T14:11:11.000Z
|
2022-03-12T15:40:15.000Z
|
dnnlib/submission/submit.py
|
gperdrizet/gansformer
|
c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
|
[
"MIT"
] | 138 |
2021-03-02T06:37:10.000Z
|
2022-03-30T14:59:09.000Z
|
# Submit a function to be run either locally or in a computing cluster.
# Compared to original StyleGAN implementation, we extend the support for automatic training resumption,
# and network recompilation.
import copy
import inspect
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import sys
import time
import traceback
from enum import Enum
from .. import util
from ..util import EasyDict
from . import internal
_user_name_override = None
| 43.337176 | 238 | 0.691847 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.