hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0eed507c0a7d908570e5345420f87553a7bbdb5d
| 788 |
py
|
Python
|
main.py
|
poltavski/social-network-frontend
|
ccc3410e23e42cfc65efd811aba262ec88163481
|
[
"MIT"
] | null | null | null |
main.py
|
poltavski/social-network-frontend
|
ccc3410e23e42cfc65efd811aba262ec88163481
|
[
"MIT"
] | null | null | null |
main.py
|
poltavski/social-network-frontend
|
ccc3410e23e42cfc65efd811aba262ec88163481
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI, Request, Response
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from utils import get_page_data, process_initial
import uvicorn
app = FastAPI()
templates = Jinja2Templates(directory="templates")
app.mount("/static", StaticFiles(directory="static"), name="static")
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=8050, log_level="info")
| 29.185185 | 74 | 0.769036 |
0eed571d3bbd262c5cff9905eccfdacc18b2c6bf
| 4,515 |
py
|
Python
|
Core/Python/create_static_group.py
|
Ku-Al/OpenManage-Enterprise
|
5cc67435d7cedb091edb07311ed9dceeda43277f
|
[
"Apache-2.0"
] | null | null | null |
Core/Python/create_static_group.py
|
Ku-Al/OpenManage-Enterprise
|
5cc67435d7cedb091edb07311ed9dceeda43277f
|
[
"Apache-2.0"
] | null | null | null |
Core/Python/create_static_group.py
|
Ku-Al/OpenManage-Enterprise
|
5cc67435d7cedb091edb07311ed9dceeda43277f
|
[
"Apache-2.0"
] | null | null | null |
#
# Python script using OME API to create a new static group
#
# _author_ = Raajeev Kalyanaraman <[email protected]>
# _version_ = 0.1
#
# Copyright (c) 2020 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
SYNOPSIS:
Script to create a new static group
DESCRIPTION:
This script exercises the OME REST API to create a new static
group. The user is responsible for adding devices to the
group once the group has been successfully created.
For authentication X-Auth is used over Basic Authentication
Note that the credentials entered are not stored to disk.
EXAMPLE:
python create_static_group.py --ip <xx> --user <username>
--password <pwd> --groupname "Random Test Group"
"""
import json
import argparse
from argparse import RawTextHelpFormatter
import urllib3
import requests
def create_static_group(ip_address, user_name, password, group_name):
""" Authenticate with OME and enumerate groups """
try:
session_url = 'https://%s/api/SessionService/Sessions' % ip_address
group_url = "https://%s/api/GroupService/Groups?$filter=Name eq 'Static Groups'" % ip_address
headers = {'content-type': 'application/json'}
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
if session_info.status_code == 201:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
response = requests.get(group_url, headers=headers, verify=False)
if response.status_code == 200:
json_data = response.json()
if json_data['@odata.count'] > 0:
# Technically there should be only one result in the filter
group_id = json_data['value'][0]['Id']
group_payload = {"GroupModel": {
"Name": group_name,
"Description": "",
"MembershipTypeId": 12,
"ParentId": int(group_id)}
}
create_url = 'https://%s/api/GroupService/Actions/GroupService.CreateGroup' % ip_address
create_resp = requests.post(create_url, headers=headers,
verify=False,
data=json.dumps(group_payload))
if create_resp.status_code == 200:
print("New group created : ID =", create_resp.text)
elif create_resp.status_code == 400:
print("Failed group creation ...See error info below")
print(json.dumps(create_resp.json(), indent=4,
sort_keys=False))
else:
print("Unable to retrieve group list from %s" % ip_address)
else:
print("Unable to create a session with appliance %s" % ip_address)
except Exception as error:
print("Unexpected error:", str(error))
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=True,
help="Password for OME Appliance")
parser.add_argument("--groupname", "-g", required=True,
help="A valid name for the group")
args = parser.parse_args()
create_static_group(args.ip, args.user, args.password, args.groupname)
| 44.70297 | 108 | 0.61041 |
0eed82297822ff3d19f2f5807a4ad2a8d7e8d1d9
| 4,531 |
py
|
Python
|
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
|
kagrze/ignite
|
18708a76f86623545311d35bc48673eac9e55591
|
[
"BSD-3-Clause"
] | null | null | null |
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
|
kagrze/ignite
|
18708a76f86623545311d35bc48673eac9e55591
|
[
"BSD-3-Clause"
] | null | null | null |
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
|
kagrze/ignite
|
18708a76f86623545311d35bc48673eac9e55591
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Callable, Optional, Tuple, Union
import numpy as np
from torch.utils.data import DataLoader, Sampler
from torch.utils.data.dataset import Subset, ConcatDataset
import torch.utils.data.distributed as data_dist
from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset
| 43.990291 | 116 | 0.637387 |
0eef441f20577a797d6570e849cc35b3e4804f14
| 6,309 |
py
|
Python
|
saleor/core/jwt.py
|
autobotasia/saleor
|
e03e9f6ab1bddac308a6609d6b576a87e90ae655
|
[
"CC-BY-4.0"
] | 1 |
2022-02-19T13:27:40.000Z
|
2022-02-19T13:27:40.000Z
|
saleor/core/jwt.py
|
autobotasia/saleor
|
e03e9f6ab1bddac308a6609d6b576a87e90ae655
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/core/jwt.py
|
autobotasia/saleor
|
e03e9f6ab1bddac308a6609d6b576a87e90ae655
|
[
"CC-BY-4.0"
] | 2 |
2021-12-03T16:59:37.000Z
|
2022-02-19T13:05:42.000Z
|
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import graphene
import jwt
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from ..account.models import User
from ..app.models import App
from .permissions import (
get_permission_names,
get_permissions_from_codenames,
get_permissions_from_names,
)
JWT_ALGORITHM = "HS256"
SALEOR_AUTH_HEADER = "HTTP_AUTHORIZATION_BEARER"
DEFAULT_AUTH_HEADER = "HTTP_AUTHORIZATION"
AUTH_HEADER_PREFIXES = ["JWT", "BEARER"]
JWT_ACCESS_TYPE = "access"
JWT_REFRESH_TYPE = "refresh"
JWT_THIRDPARTY_ACCESS_TYPE = "thirdparty"
JWT_REFRESH_TOKEN_COOKIE_NAME = "refreshToken"
PERMISSIONS_FIELD = "permissions"
JWT_SALEOR_OWNER_NAME = "saleor"
JWT_OWNER_FIELD = "owner"
def is_saleor_token(token: str) -> bool:
"""Confirm that token was generated by Saleor not by plugin."""
try:
payload = jwt.decode(token, options={"verify_signature": False})
except jwt.PyJWTError:
return False
owner = payload.get(JWT_OWNER_FIELD)
if not owner or owner != JWT_SALEOR_OWNER_NAME:
return False
return True
def create_access_token_for_app(app: "App", user: "User"):
"""Create access token for app.
App can use user jwt token to proceed given operation on the Saleor side.
The token which can be used by App has additional field defining the permissions
assigned to it. The permissions set is the intersection of user permissions and
app permissions.
"""
app_permissions = app.permissions.all()
app_permission_enums = get_permission_names(app_permissions)
permissions = user.effective_permissions
user_permission_enums = get_permission_names(permissions)
app_id = graphene.Node.to_global_id("App", app.id)
additional_payload = {
"app": app_id,
PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),
}
payload = jwt_user_payload(
user,
JWT_THIRDPARTY_ACCESS_TYPE,
exp_delta=settings.JWT_TTL_APP_ACCESS,
additional_payload=additional_payload,
)
return jwt_encode(payload)
| 30.926471 | 84 | 0.703598 |
0eef6d139660d7b5753e9bf6938554e0499dccc1
| 3,513 |
py
|
Python
|
locust/configuration.py
|
pancaprima/locust
|
dba803fcdd13ff2fada4e8b8ee37a163aa519a48
|
[
"MIT"
] | 1 |
2018-09-03T10:05:55.000Z
|
2018-09-03T10:05:55.000Z
|
locust/configuration.py
|
pancaprima/locust
|
dba803fcdd13ff2fada4e8b8ee37a163aa519a48
|
[
"MIT"
] | 14 |
2017-09-20T11:01:44.000Z
|
2020-02-21T18:37:58.000Z
|
locust/configuration.py
|
erlanggakrisnamukti/locust
|
dba803fcdd13ff2fada4e8b8ee37a163aa519a48
|
[
"MIT"
] | 3 |
2018-01-24T09:39:56.000Z
|
2018-08-24T06:30:23.000Z
|
import os, json, logging, jsonpath_rw_ext, jsonpath_rw
from jsonpath_rw import jsonpath, parse
from . import events
from ast import literal_eval
from flask import make_response
logger = logging.getLogger(__name__)
CONFIG_PATH = '/tests/settings/config.json'
| 34.782178 | 114 | 0.562767 |
0eefcd2d4671b89d12e5ea2a56457ebf60bf3929
| 458 |
py
|
Python
|
data/migrations/0023_discardaction_answers.py
|
SIXMON/peps
|
48c09a951a0193ada7b91c8bb6efc4b1232c3520
|
[
"MIT"
] | 5 |
2019-08-29T13:55:47.000Z
|
2021-11-15T08:30:33.000Z
|
data/migrations/0023_discardaction_answers.py
|
SIXMON/peps
|
48c09a951a0193ada7b91c8bb6efc4b1232c3520
|
[
"MIT"
] | 295 |
2019-08-19T12:40:29.000Z
|
2022-01-24T14:03:20.000Z
|
data/migrations/0023_discardaction_answers.py
|
SIXMON/peps
|
48c09a951a0193ada7b91c8bb6efc4b1232c3520
|
[
"MIT"
] | 7 |
2020-05-27T06:28:48.000Z
|
2021-11-17T10:00:54.000Z
|
# Generated by Django 2.2.4 on 2019-11-14 16:48
import django.contrib.postgres.fields.jsonb
from django.db import migrations
| 22.9 | 88 | 0.637555 |
0eefd82c5da7e16c24688892ee11d187a2303e65
| 1,297 |
py
|
Python
|
app/models.py
|
juanitodread/pitaya-falcon
|
f4b889f9fa39072aeb9f1c71fe5f3bb259082e93
|
[
"Apache-2.0"
] | null | null | null |
app/models.py
|
juanitodread/pitaya-falcon
|
f4b889f9fa39072aeb9f1c71fe5f3bb259082e93
|
[
"Apache-2.0"
] | null | null | null |
app/models.py
|
juanitodread/pitaya-falcon
|
f4b889f9fa39072aeb9f1c71fe5f3bb259082e93
|
[
"Apache-2.0"
] | null | null | null |
from json import JSONEncoder
from time import time
| 27.595745 | 109 | 0.621434 |
0ef004434fa16f22e39f1c30a252704c35a2362e
| 2,034 |
py
|
Python
|
compute_pi.py
|
jakobkogler/pi_memorize
|
c82c24f26407f1728ad1e73851b72dea9bf779f6
|
[
"MIT"
] | null | null | null |
compute_pi.py
|
jakobkogler/pi_memorize
|
c82c24f26407f1728ad1e73851b72dea9bf779f6
|
[
"MIT"
] | null | null | null |
compute_pi.py
|
jakobkogler/pi_memorize
|
c82c24f26407f1728ad1e73851b72dea9bf779f6
|
[
"MIT"
] | null | null | null |
"""Compute pi."""
from decimal import Decimal, getcontext
import argparse
import itertools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates pi.')
parser.add_argument('--precision', type=int, default=100,
help='The desired precision of pi (default: 100 digits)')
args = parser.parse_args()
pi_computer = ComputePi()
print(pi_computer.machin_euler(args.precision))
| 29.478261 | 102 | 0.564897 |
0ef0299af0be6f4403ddbf6bc9801b26ba188122
| 1,657 |
py
|
Python
|
scripts/01_deploy_data_types.py
|
LaMemeBete/nodys-smart-contract
|
f67b88d98ebf7063b72f46cb2b014d5de96eb56d
|
[
"MIT",
"Unlicense"
] | null | null | null |
scripts/01_deploy_data_types.py
|
LaMemeBete/nodys-smart-contract
|
f67b88d98ebf7063b72f46cb2b014d5de96eb56d
|
[
"MIT",
"Unlicense"
] | null | null | null |
scripts/01_deploy_data_types.py
|
LaMemeBete/nodys-smart-contract
|
f67b88d98ebf7063b72f46cb2b014d5de96eb56d
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import time
from brownie import (
DataTypes,
TransparentUpgradeableProxy,
ProxyAdmin,
config,
network,
Contract,
)
from scripts.helpful_scripts import get_account, encode_function_data
| 35.255319 | 93 | 0.692818 |
0ef0869b952bf4b7857333b5caa682157e430b0a
| 659 |
py
|
Python
|
modules/BidirectionalLSTM.py
|
omni-us/pytorch-retinanet
|
8d3ee38d50df0afec2ab4dfa0eabb8219eb399f5
|
[
"Apache-2.0"
] | 12 |
2019-08-14T13:32:30.000Z
|
2022-03-09T15:25:33.000Z
|
modules/BidirectionalLSTM.py
|
omni-us/pytorch-retinanet
|
8d3ee38d50df0afec2ab4dfa0eabb8219eb399f5
|
[
"Apache-2.0"
] | 2 |
2019-12-29T21:15:00.000Z
|
2020-01-14T13:51:54.000Z
|
modules/BidirectionalLSTM.py
|
omni-us/pytorch-retinanet
|
8d3ee38d50df0afec2ab4dfa0eabb8219eb399f5
|
[
"Apache-2.0"
] | 6 |
2019-08-03T16:22:41.000Z
|
2020-09-27T16:55:40.000Z
|
import torch.nn as nn
| 28.652174 | 69 | 0.614568 |
0ef1f7b69f59398c929a14885bdad0d62cb19dca
| 5,173 |
py
|
Python
|
main.py
|
JaekwangCha/my_pytorch_templet
|
7b6b67116e9d69abd64631d90b38fedc79be6c8c
|
[
"MIT"
] | null | null | null |
main.py
|
JaekwangCha/my_pytorch_templet
|
7b6b67116e9d69abd64631d90b38fedc79be6c8c
|
[
"MIT"
] | null | null | null |
main.py
|
JaekwangCha/my_pytorch_templet
|
7b6b67116e9d69abd64631d90b38fedc79be6c8c
|
[
"MIT"
] | null | null | null |
# written by Jaekwang Cha
# version 0.1
# ================== IMPORT CUSTOM LEARNING LIBRARIES ===================== #
from customs.train import train, test
from customs.dataset import load_dataset
from customs.model import load_model
# ================== TRAINING SETTINGS ================== #
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--train_method', default='supervised', type=str, help='type of training: supervised(default), unsupervised, reinforce')
parser.add_argument('--task', default='classification', type=str, help='task of training: classification(default), regression')
parser.add_argument('--dataset', default='mnist', type=str, help='dataset to use')
parser.add_argument('--model', default='CNN', type=str, help='model to use')
parser.add_argument('--seed', default=42, type=int, help='random seed (default: 42)')
parser.add_argument('--num_worker', default=1, type=int, help='number of dataloader worker')
parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu', default=0, type=str, help='GPU-id for GPU to use')
parser.add_argument('--multi_gpu', default=0, type=str, help='GPU-ids for multi-GPU usage')
parser.add_argument('--pin_memory', default=True, type=bool, help='pin memory option selector')
parser.add_argument('--save_model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--save_path', default=os.getcwd()+'/weights', type=str, help='Where to save weights')
parser.add_argument('--log_path', default=os.getcwd()+'/Logs', type=str, help='Where to save Logs')
# data setting
parser.add_argument('--val_rate', default=0.2, type=float, help='split rate for the validation data')
parser.add_argument('--transform', default='default', type=str, help='choose the data transform type')
# training parameter setting
parser.add_argument('--n_epoch', default=10, type=int, help='number of total training iteration')
parser.add_argument('--batch_size', default=32, type=int, help='size of minibatch')
parser.add_argument('--test_batch_size', default=32, type=int, help='size of test-minibatch')
# optimizer & scheduler setting
parser.add_argument('--lr', default=0.03, type=float, help='training learning rate')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer select')
parser.add_argument('--scheduler', default='steplr', type=str, help='scheduler select')
opt = parser.parse_args()
# ===================== IMPORT PYTORCH LIBRARIES ================== #
import torch
from torch.utils.data import DataLoader
torch.manual_seed(opt.seed)
# ================== GPU SETTINGS ================== #
# ======================= MAIN SCRIPT ============================= #
if __name__ == '__main__':
main(opt)
| 52.785714 | 159 | 0.605838 |
0ef37debe8fbb6d99817c5ad659e3ff1f210c644
| 4,812 |
py
|
Python
|
test/core/024-sc4-gridftp-http/Rosetta.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 127 |
2015-01-28T19:19:13.000Z
|
2022-03-31T05:57:40.000Z
|
test/core/024-sc4-gridftp-http/Rosetta.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 14 |
2015-04-15T17:44:20.000Z
|
2022-02-22T22:48:49.000Z
|
test/core/024-sc4-gridftp-http/Rosetta.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 70 |
2015-01-22T15:20:32.000Z
|
2022-02-21T22:50:23.000Z
|
#!/usr/bin/env python3
import logging
import sys
import subprocess
from pathlib import Path
from datetime import datetime
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
# --- Work Dir Setup -----------------------------------------------------------
RUN_ID = "024-sc4-gridftp-http-" + datetime.now().strftime("%s")
TOP_DIR = Path.cwd()
WORK_DIR = TOP_DIR / "work"
try:
Path.mkdir(WORK_DIR)
except FileExistsError:
pass
# --- Configuration ------------------------------------------------------------
print("Generating pegasus.properties at: {}".format(TOP_DIR / "pegasus.properties"))
props = Properties()
props["pegasus.dir.useTimestamp"] = "true"
props["pegasus.dir.storage.deep"] = "false"
props["pegasus.data.configuration"] = "nonsharedfs"
with (TOP_DIR / "pegasus.properties").open(mode="w") as f:
props.write(f)
# --- Sites --------------------------------------------------------------------
print("Generating site catalog at: sites.yml")
LOCAL = "local"
CONDOR_POOL = "condorpool"
STAGING_SITE = "staging_site"
try:
pegasus_config = subprocess.run(
["pegasus-config", "--bin"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except FileNotFoundError as e:
print("Unable to find pegasus-config")
assert pegasus_config.returncode == 0
PEGASUS_BIN_DIR = pegasus_config.stdout.decode().strip()
sites = """
pegasus: "5.0"
sites:
-
name: "condor_pool"
arch: "x86_64"
os.type: "linux"
profiles:
condor:
universe: "vanilla"
pegasus:
style: "condor"
-
name: "staging_site"
arch: "x86_64"
os.type: "linux"
directories:
-
type: "sharedScratch"
path: "/lizard/scratch-90-days/http-scratch/ptesting"
fileServers:
-
operation: "get"
url: "http://workflow.isi.edu/shared-scratch/ptesting"
-
operation: "put"
url: "gsiftp://workflow.isi.edu/lizard/scratch-90-days/http-scratch/ptesting"
-
name: "local"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
directories:
-
type: "sharedScratch"
path: "{work_dir}/scratch"
fileServers:
-
operation: "all"
url: "file://{work_dir}/scratch"
-
type: "localStorage"
path: "{work_dir}/outputs"
fileServers:
-
operation: "all"
url: "file://{work_dir}/outputs"
profiles:
env:
PEGASUS_BIN_DIR: "{pegasus_bin_dir}"
""".format(
work_dir=str(WORK_DIR), pegasus_bin_dir=PEGASUS_BIN_DIR
)
with (TOP_DIR / "sites.yml").open(mode="w") as f:
f.write(sites)
# --- Transformations ----------------------------------------------------------
rosetta_exe = Transformation(
"rosetta.exe",
arch=Arch.X86_64,
os_type=OS.LINUX,
site="local",
pfn="file://" + str(TOP_DIR / "rosetta.exe"),
is_stageable=True,
).add_pegasus_profile(clusters_size=3)
tc = TransformationCatalog().add_transformations(rosetta_exe)
# --- Replicas & Workflow ------------------------------------------------------
rc = ReplicaCatalog()
# add all files in minirosetta_database
inputs = list()
get_files(Path("minirosetta_database"))
f1 = File("design.resfile")
inputs.append(f1)
rc.add_replica(LOCAL, f1, str(Path("design.resfile").resolve()))
f2 = File("repack.resfile")
inputs.append(f2)
rc.add_replica(LOCAL, f2, str(Path("repack.resfile").resolve()))
wf = Workflow("rosetta")
pdb_files = list(Path("pdbs").iterdir())
for i in range(10):
current_file = pdb_files[i]
if current_file.is_file():
job = (
Job(rosetta_exe, _id=current_file.name.replace(".pdb", ""))
.add_inputs(File(current_file.name), *inputs)
.add_outputs(File(current_file.name + ".score.sc"), register_replica=True)
.add_args(
"-in:file:s",
current_file.name,
"-out:prefix " + current_file.name + ".",
"-database ./minirosetta_database",
"-linmem_ig 10",
"-nstruct 1",
"-pert_num 2",
"-inner_num 1",
"-jd2::ntrials 1",
)
)
rc.add_replica("local", current_file.name, str(current_file.resolve()))
wf.add_jobs(job)
# write rc to separate file for registration jobs
with (TOP_DIR / "replicas.yml").open("w") as f:
rc.write(f)
wf.add_transformation_catalog(tc)
try:
wf.plan(
dir=str(WORK_DIR),
verbose=5,
sites=[CONDOR_POOL],
staging_sites={CONDOR_POOL: STAGING_SITE},
)
except PegasusClientError as e:
print(e.output)
| 24.932642 | 86 | 0.588113 |
0ef391d627e7c29662611237b93dc0cbb0bb55b3
| 1,600 |
py
|
Python
|
tests/nls_smoother_test.py
|
sisl/CEEM
|
6154587fe3cdb92e8b7f70eedb1262caa1553cc8
|
[
"MIT"
] | 5 |
2020-06-21T16:50:42.000Z
|
2021-03-14T04:02:01.000Z
|
tests/nls_smoother_test.py
|
sisl/CEEM
|
6154587fe3cdb92e8b7f70eedb1262caa1553cc8
|
[
"MIT"
] | 1 |
2021-03-13T07:46:36.000Z
|
2021-03-16T05:14:47.000Z
|
tests/nls_smoother_test.py
|
sisl/CEEM
|
6154587fe3cdb92e8b7f70eedb1262caa1553cc8
|
[
"MIT"
] | 1 |
2021-03-30T12:08:20.000Z
|
2021-03-30T12:08:20.000Z
|
import torch
from ceem.opt_criteria import *
from ceem.systems import LorenzAttractor
from ceem.dynamics import *
from ceem.smoother import *
from ceem import utils
if __name__ == '__main__':
test_smoother()
| 23.880597 | 89 | 0.6325 |
0ef3a6ff8273269894257cdbba761bebf9bbfde6
| 5,787 |
py
|
Python
|
qiskit/visualization/pulse_v2/device_info.py
|
godspeed5/qiskit-terra
|
a5d87c3e4a663ab962704585fba0caef15061246
|
[
"Apache-2.0"
] | 15 |
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
qiskit/visualization/pulse_v2/device_info.py
|
godspeed5/qiskit-terra
|
a5d87c3e4a663ab962704585fba0caef15061246
|
[
"Apache-2.0"
] | 4 |
2020-11-27T09:34:13.000Z
|
2021-04-30T21:13:41.000Z
|
qiskit/visualization/pulse_v2/device_info.py
|
godspeed5/qiskit-terra
|
a5d87c3e4a663ab962704585fba0caef15061246
|
[
"Apache-2.0"
] | 11 |
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""A collection of backend information formatted to generate drawing data.
This instance will be provided to generator functions. The module provides an abstract
class :py:class:``DrawerBackendInfo`` with necessary methods to generate drawing objects.
Because the data structure of backend class may depend on providers, this abstract class
has an abstract factory method `create_from_backend`. Each subclass should provide
the factory method which conforms to the associated provider. By default we provide
:py:class:``OpenPulseBackendInfo`` class that has the factory method taking backends
satisfying OpenPulse specification [1].
This class can be also initialized without the factory method by manually specifying
required information. This may be convenient for visualizing a pulse program for simulator
backend that only has a device Hamiltonian information. This requires two mapping objects
for channel/qubit and channel/frequency along with the system cycle time.
If those information are not provided, this class will be initialized with a set of
empty data and the drawer illustrates a pulse program without any specific information.
Reference:
- [1] Qiskit Backend Specifications for OpenQASM and OpenPulse Experiments,
https://arxiv.org/abs/1809.03452
"""
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Dict, List, Union, Optional
from qiskit import pulse
from qiskit.providers import BaseBackend, BackendConfigurationError
def get_channel_frequency(self, chan: pulse.channels.Channel) -> Union[float, None]:
"""Get frequency of given channel object."""
return self._chan_freq_map.get(chan, None)
class OpenPulseBackendInfo(DrawerBackendInfo):
"""Drawing information of backend that conforms to OpenPulse specification."""
| 39.910345 | 96 | 0.681355 |
0ef3c67e54e013586a797d3526f9d748c2da9ba4
| 8,401 |
py
|
Python
|
django_gotolong/mfund/views.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 15 |
2019-12-06T16:19:45.000Z
|
2021-08-20T13:22:22.000Z
|
django_gotolong/mfund/views.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 14 |
2020-12-08T10:45:05.000Z
|
2021-09-21T17:23:45.000Z
|
django_gotolong/mfund/views.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 9 |
2020-01-01T03:04:29.000Z
|
2021-04-18T08:42:30.000Z
|
# Create your views here.
from .models import Mfund
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.tools import make_subplots
from django.db.models import Q
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views import View
from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min
from django.db.models.functions import Trim, Lower, Round
import pandas as pd
import csv, io
import openpyxl
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update
from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf
| 31.464419 | 104 | 0.644209 |
0ef63c39ffdfa491eb48d1233a4ab5b8fb12a49a
| 5,444 |
py
|
Python
|
m3u8.py
|
akria00/m3u8-Downloader-master
|
37bf4683b0390998a819d0bb5b8af18ffb2166f6
|
[
"Apache-2.0"
] | 2 |
2020-01-10T20:31:12.000Z
|
2020-03-04T19:34:15.000Z
|
m3u8.py
|
akria00/m3u8-Downloader-master
|
37bf4683b0390998a819d0bb5b8af18ffb2166f6
|
[
"Apache-2.0"
] | null | null | null |
m3u8.py
|
akria00/m3u8-Downloader-master
|
37bf4683b0390998a819d0bb5b8af18ffb2166f6
|
[
"Apache-2.0"
] | 1 |
2019-04-19T08:04:05.000Z
|
2019-04-19T08:04:05.000Z
|
#coding: utf-8
from gevent import monkey
monkey.patch_all()
from gevent.pool import Pool
import gevent
import requests
import urllib
import os
import time
import re
import ssl
if __name__ == '__main__':
downloader = Downloader(5)
downloader.run('https://www.xiaodianying.com/filets/2069/dp.m3u8', './video',True)
| 38.609929 | 171 | 0.517083 |
0ef760e6a3a5620b5876eba10c68bc7b0bb1b6c8
| 474 |
py
|
Python
|
buzzbox/restaurants/migrations/0002_restaurant_description.py
|
Danielvalev/kutiika
|
661b850163de942a137157a97d98d90553861044
|
[
"MIT"
] | null | null | null |
buzzbox/restaurants/migrations/0002_restaurant_description.py
|
Danielvalev/kutiika
|
661b850163de942a137157a97d98d90553861044
|
[
"MIT"
] | null | null | null |
buzzbox/restaurants/migrations/0002_restaurant_description.py
|
Danielvalev/kutiika
|
661b850163de942a137157a97d98d90553861044
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-06 10:02
from django.db import migrations, models
| 23.7 | 102 | 0.620253 |
0ef7742a3f6f5d085c7065159824fcf2edcb86c7
| 5,910 |
py
|
Python
|
src/dsrlib/ui/utils.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 8 |
2020-09-06T02:15:10.000Z
|
2022-01-12T22:49:20.000Z
|
src/dsrlib/ui/utils.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 5 |
2021-03-29T20:37:46.000Z
|
2021-09-19T13:20:24.000Z
|
src/dsrlib/ui/utils.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 2 |
2020-09-16T01:45:49.000Z
|
2021-06-12T12:38:15.000Z
|
#!/usr/bin/env python3
import os
import contextlib
from PyQt5 import QtCore, QtWidgets
from dsrlib.settings import Settings
def getSaveFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'save_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
while True:
name, dummy = QtWidgets.QFileDialog.getSaveFileName(parent, _('Save'), path, '*.%s' % extension, options=QtWidgets.QFileDialog.DontConfirmOverwrite)
if not name:
return None
if not name.endswith('.%s' % extension):
name = '%s.%s' % (name, extension)
if os.path.exists(name):
resp = QtWidgets.QMessageBox.question(parent,
_('Overwrite file?'),
_('This file already exists. Overwrite?'),
QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No|QtWidgets.QMessageBox.Cancel)
if resp == QtWidgets.QMessageBox.Yes:
settings.setValue(sname, os.path.dirname(name))
return name
if resp == QtWidgets.QMessageBox.No:
continue
return None
settings.setValue(sname, os.path.dirname(name))
return name
def getOpenFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'open_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
name, dummy = QtWidgets.QFileDialog.getOpenFileName(parent, _('Open file'), path, '*.%s' % extension if extension else '')
if name:
settings.setValue(sname, os.path.dirname(name))
return name
return None
class EnumComboBox(QtWidgets.QComboBox):
valueChanged = QtCore.pyqtSignal(object)
| 37.884615 | 160 | 0.57445 |
0ef7cab0d5cd63afd5bc70bd0539a8ffbacf39c0
| 37,201 |
py
|
Python
|
src/tiden/tidenrunner.py
|
mshonichev/example_pkg
|
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
|
[
"Apache-2.0"
] | null | null | null |
src/tiden/tidenrunner.py
|
mshonichev/example_pkg
|
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
|
[
"Apache-2.0"
] | null | null | null |
src/tiden/tidenrunner.py
|
mshonichev/example_pkg
|
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2017-2020 GridGain Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tidenpluginmanager import PluginManager
from .report.steps import step, InnerReportConfig, Step, add_attachment, AttachmentType
from .util import log_print, unix_path, call_method, create_case, kill_stalled_java, exec_time
from .result import Result
from .util import write_yaml_file, should_be_skipped
from .logger import *
from .runner import get_test_modules, get_long_path_len, get_class_from_module, known_issue_str
from .priority_decorator import get_priority_key
from .sshpool import SshPool
from uuid import uuid4
from traceback import format_exc
from .runner import set_configuration_options, get_configuration_representation, get_actual_configuration
from importlib import import_module
from os import path, mkdir
from time import time
from shutil import copyfile
from os.path import join, basename
from glob import glob
import traceback
| 42.858295 | 121 | 0.597403 |
0ef87bb853368bafa20ca953ac321175f6e8c5af
| 5,425 |
py
|
Python
|
ludwig/data/cache/manager.py
|
ludwig-ai/ludw
|
b9d95bbdb474bc22260269de1bc094bc5455f37c
|
[
"Apache-2.0"
] | 970 |
2020-12-17T15:09:20.000Z
|
2022-03-31T22:58:03.000Z
|
ludwig/data/cache/manager.py
|
ludwig-ai/ludw
|
b9d95bbdb474bc22260269de1bc094bc5455f37c
|
[
"Apache-2.0"
] | 503 |
2020-12-16T21:44:40.000Z
|
2022-03-31T18:21:52.000Z
|
ludwig/data/cache/manager.py
|
ludwig-ai/ludw
|
b9d95bbdb474bc22260269de1bc094bc5455f37c
|
[
"Apache-2.0"
] | 145 |
2020-12-18T07:38:30.000Z
|
2022-03-29T19:05:08.000Z
|
import logging
import os
import re
import uuid
from pathlib import Path
from ludwig.constants import CHECKSUM, META, TEST, TRAINING, VALIDATION
from ludwig.data.cache.util import calculate_checksum
from ludwig.utils import data_utils
from ludwig.utils.fs_utils import delete, path_exists
logger = logging.getLogger(__name__)
def alphanum(v):
"""Filters a string to only its alphanumeric characters."""
return re.sub(r"\W+", "", v)
| 37.413793 | 111 | 0.650323 |
0ef896d76fe90ca7521ad1e92767789c5b227b40
| 2,629 |
py
|
Python
|
test_calc_base.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | 1 |
2021-09-21T01:42:05.000Z
|
2021-09-21T01:42:05.000Z
|
test_calc_base.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
test_calc_base.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
import pprint
from FactorioCalcBase.data.binary import sorted_recipe_list, production_machine_category_list_dict
from FactorioCalcBase.recipe import Recipe
from FactorioCalcBase.calculator_base import CalculatorBase
from FactorioCalcBase.dependency_dict_common_function import dict_add_number
import time
| 40.446154 | 104 | 0.637885 |
0ef9b198b443266a7fb573c35726d29675e45f68
| 2,561 |
py
|
Python
|
lib/py/src/Thrift.py
|
ahfeel/thrift
|
3ac3fa6fede4b2446209cfeb6fcae5900da543cc
|
[
"BSL-1.0"
] | 3 |
2016-02-03T07:28:51.000Z
|
2017-02-28T06:20:21.000Z
|
lib/py/src/Thrift.py
|
shigin/thrift
|
4ca9547ffa73082fc4c3ff349dc23a1fda8dcc48
|
[
"BSL-1.0"
] | null | null | null |
lib/py/src/Thrift.py
|
shigin/thrift
|
4ca9547ffa73082fc4c3ff349dc23a1fda8dcc48
|
[
"BSL-1.0"
] | 8 |
2020-03-12T13:42:59.000Z
|
2021-05-27T06:34:33.000Z
|
# Copyright (c) 2006- Facebook
# Distributed under the Thrift Software License
#
# See accompanying file LICENSE or visit the Thrift site at:
# http://developers.facebook.com/thrift/
| 23.281818 | 62 | 0.635689 |
0ef9be0b4faecf741290076154fb3c5bae164853
| 6,546 |
py
|
Python
|
engine.py
|
nyumaya/wake-word-benchmark
|
d2f7ac091d31403f3398bc3ef2e2de4876a4629e
|
[
"Apache-2.0"
] | null | null | null |
engine.py
|
nyumaya/wake-word-benchmark
|
d2f7ac091d31403f3398bc3ef2e2de4876a4629e
|
[
"Apache-2.0"
] | null | null | null |
engine.py
|
nyumaya/wake-word-benchmark
|
d2f7ac091d31403f3398bc3ef2e2de4876a4629e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Picovoice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from collections import namedtuple
from enum import Enum
import numpy as np
from pocketsphinx import get_model_path
from pocketsphinx.pocketsphinx import Decoder
from engines import Porcupine
from engines import snowboydetect
from engines import AudioRecognition, FeatureExtractor
SensitivityInfo = namedtuple('SensitivityInfo', 'min, max, step')
| 33.397959 | 122 | 0.669111 |
0efb8a4758e96798acb51aad7950963bd5e398c7
| 1,549 |
py
|
Python
|
objO_and_ctxMgr/harakiri.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
objO_and_ctxMgr/harakiri.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
objO_and_ctxMgr/harakiri.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 22:18:58 2020
@author: https://stackoverflow.com/questions/293431/python-object-deleting-itself
@editor: thirschbuechler
this is probably overkill to alternatively exit a with-context, rather than by exception,
but hey, maybe it will be needed, or related to getting rid of the visa-handle within thvisa
# for some reason, __enter__ does not work in the with-context
"""
# NOTE: This is Python 3 code, it should work with python 2, but I haven't tested it.
import weakref #https://docs.python.org/3/library/weakref.html
if __name__ == '__main__': # test if called as executable, not as library
instance = InsaneClass()
instance.__enter__()
instance.commit_suicide()
#print(instance)
print(InsaneClass) # pointer
print(InsaneClass().__enter__()) # an object
print("now, something completely different!")
with InsaneClass() as i:
i.commit_suicide()
print(i)
| 29.226415 | 92 | 0.666882 |
0efbf67a5c5c854b7696ec4d515b55094ea51fb7
| 6,593 |
py
|
Python
|
chapter2/gestures.py
|
srimani-programmer/Opencv-with-Python-Blueprints-second-Edition
|
8762022a58a379229f02d7250d8344087d98516d
|
[
"MIT"
] | 39 |
2019-11-25T21:30:14.000Z
|
2022-03-29T05:12:43.000Z
|
chapter2/gestures.py
|
srimani-programmer/Opencv-with-Python-Blueprints-second-Edition
|
8762022a58a379229f02d7250d8344087d98516d
|
[
"MIT"
] | 2 |
2020-04-19T20:38:15.000Z
|
2021-09-29T05:02:48.000Z
|
chapter2/gestures.py
|
srimani-programmer/Opencv-with-Python-Blueprints-second-Edition
|
8762022a58a379229f02d7250d8344087d98516d
|
[
"MIT"
] | 29 |
2019-12-22T15:18:18.000Z
|
2021-12-25T13:52:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A module containing an algorithm for hand gesture recognition"""
import numpy as np
import cv2
from typing import Tuple
__author__ = "Michael Beyeler"
__license__ = "GNU GPL 3.0 or later"
def recognize(img_gray):
"""Recognizes hand gesture in a single-channel depth image
This method estimates the number of extended fingers based on
a single-channel depth image showing a hand and arm region.
:param img_gray: single-channel depth image
:returns: (num_fingers, img_draw) The estimated number of
extended fingers and an annotated RGB image
"""
# segment arm region
segment = segment_arm(img_gray)
# find the hull of the segmented area, and based on that find the
# convexity defects
(contour, defects) = find_hull_defects(segment)
# detect the number of fingers depending on the contours and convexity
# defects, then draw defects that belong to fingers green, others red
img_draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)
(num_fingers, img_draw) = detect_num_fingers(contour,
defects, img_draw)
return (num_fingers, img_draw)
def segment_arm(frame: np.ndarray, abs_depth_dev: int = 14) -> np.ndarray:
"""Segments arm region
This method accepts a single-channel depth image of an arm and
hand region and extracts the segmented arm region.
It is assumed that the hand is placed in the center of the image.
:param frame: single-channel depth image
:returns: binary image (mask) of segmented arm region, where
arm=255, else=0
"""
height, width = frame.shape
# find center (21x21 pixel) region of imageheight frame
center_half = 10 # half-width of 21 is 21/2-1
center = frame[height // 2 - center_half:height // 2 + center_half,
width // 2 - center_half:width // 2 + center_half]
# find median depth value of center region
med_val = np.median(center)
# try this instead:
frame = np.where(abs(frame - med_val) <= abs_depth_dev,
128, 0).astype(np.uint8)
# morphological
kernel = np.ones((3, 3), np.uint8)
frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)
# connected component
small_kernel = 3
frame[height // 2 - small_kernel:height // 2 + small_kernel,
width // 2 - small_kernel:width // 2 + small_kernel] = 128
mask = np.zeros((height + 2, width + 2), np.uint8)
flood = frame.copy()
cv2.floodFill(flood, mask, (width // 2, height // 2), 255,
flags=4 | (255 << 8))
ret, flooded = cv2.threshold(flood, 129, 255, cv2.THRESH_BINARY)
return flooded
def find_hull_defects(segment: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Find hull defects
This method finds all defects in the hull of a segmented arm
region.
:param segment: a binary image (mask) of a segmented arm region,
where arm=255, else=0
:returns: (max_contour, defects) the largest contour in the image
and all corresponding defects
"""
contours, hierarchy = cv2.findContours(segment, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# find largest area contour
max_contour = max(contours, key=cv2.contourArea)
epsilon = 0.01 * cv2.arcLength(max_contour, True)
max_contour = cv2.approxPolyDP(max_contour, epsilon, True)
# find convexity hull and defects
hull = cv2.convexHull(max_contour, returnPoints=False)
defects = cv2.convexityDefects(max_contour, hull)
return max_contour, defects
def detect_num_fingers(contour: np.ndarray, defects: np.ndarray,
img_draw: np.ndarray, thresh_deg: float = 80.0) -> Tuple[int, np.ndarray]:
"""Detects the number of extended fingers
This method determines the number of extended fingers based on a
contour and convexity defects.
It will annotate an RGB color image of the segmented arm region
with all relevant defect points and the hull.
:param contours: a list of contours
:param defects: a list of convexity defects
:param img_draw: an RGB color image to be annotated
:returns: (num_fingers, img_draw) the estimated number of extended
fingers and an annotated RGB color image
"""
# if there are no convexity defects, possibly no hull found or no
# fingers extended
if defects is None:
return [0, img_draw]
# we assume the wrist will generate two convexity defects (one on each
# side), so if there are no additional defect points, there are no
# fingers extended
if len(defects) <= 2:
return [0, img_draw]
# if there is a sufficient amount of convexity defects, we will find a
# defect point between two fingers so to get the number of fingers,
# start counting at 1
num_fingers = 1
# Defects are of shape (num_defects,1,4)
for defect in defects[:, 0, :]:
# Each defect is an array of four integers.
# First three indexes of start, end and the furthest
# points respectively
# contour is of shape (num_points,1,2) - 2 for point coordinates
start, end, far = [contour[i][0] for i in defect[:3]]
# draw the hull
cv2.line(img_draw, tuple(start), tuple(end), (0, 255, 0), 2)
# if angle is below a threshold, defect point belongs to two
# extended fingers
if angle_rad(start - far, end - far) < deg2rad(thresh_deg):
# increment number of fingers
num_fingers += 1
# draw point as green
cv2.circle(img_draw, tuple(far), 5, (0, 255, 0), -1)
else:
# draw point as red
cv2.circle(img_draw, tuple(far), 5, (0, 0, 255), -1)
# make sure we cap the number of fingers
return min(5, num_fingers), img_draw
def angle_rad(v1, v2):
"""Angle in radians between two vectors
This method returns the angle (in radians) between two array-like
vectors using the cross-product method, which is more accurate for
small angles than the dot-product-acos method.
"""
return np.arctan2(np.linalg.norm(np.cross(v1, v2)), np.dot(v1, v2))
def deg2rad(angle_deg):
"""Convert degrees to radians
This method converts an angle in radians e[0,2*np.pi) into degrees
e[0,360)
"""
return angle_deg / 180.0 * np.pi
| 36.425414 | 97 | 0.64341 |
0efc0f40ba7d7a4e242df39e71061af9c4be7d55
| 4,224 |
py
|
Python
|
satt/trace/logger/panic.py
|
jnippula/satt
|
aff4562b7e94f095d2e13eb10b9ac872484bb5cd
|
[
"Apache-2.0"
] | 54 |
2016-11-09T13:26:40.000Z
|
2019-04-30T16:29:45.000Z
|
satt/trace/logger/panic.py
|
jnippula/satt
|
aff4562b7e94f095d2e13eb10b9ac872484bb5cd
|
[
"Apache-2.0"
] | 2 |
2016-11-09T13:25:19.000Z
|
2017-03-27T04:09:35.000Z
|
satt/trace/logger/panic.py
|
jnippula/satt
|
aff4562b7e94f095d2e13eb10b9ac872484bb5cd
|
[
"Apache-2.0"
] | 10 |
2016-11-28T07:55:40.000Z
|
2019-03-23T12:40:36.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
// Copyright (c) 2015 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'''
""" PanicLogger RAM-tracing
"""
import sys
import time
from logger import Logger
| 38.054054 | 125 | 0.615057 |
0efc1162d67d89e44bbe7d9f3dc36378c583e84a
| 688 |
py
|
Python
|
xlab/cli.py
|
csalcedo001/xlab
|
8c51f035a870dd57339ff0208a3ab27ef6b8b41f
|
[
"Apache-2.0"
] | 1 |
2022-03-23T23:44:14.000Z
|
2022-03-23T23:44:14.000Z
|
xlab/cli.py
|
csalcedo001/xlab
|
8c51f035a870dd57339ff0208a3ab27ef6b8b41f
|
[
"Apache-2.0"
] | null | null | null |
xlab/cli.py
|
csalcedo001/xlab
|
8c51f035a870dd57339ff0208a3ab27ef6b8b41f
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
from . import filesys
MAIN_USAGE_MESSAGE = """
usage: xlab command ...
Options:
positional arguments:
command
project
"""
| 16.380952 | 61 | 0.543605 |
0efc40d3300b3d6d0a1fa06e980fe71072140597
| 16,294 |
py
|
Python
|
python/paddle/optimizer/adamw.py
|
jzhang533/Paddle
|
3227b2c401a80104e0c01dedcef2061ffa1ebbed
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/optimizer/adamw.py
|
jzhang533/Paddle
|
3227b2c401a80104e0c01dedcef2061ffa1ebbed
|
[
"Apache-2.0"
] | 1 |
2021-09-07T10:31:38.000Z
|
2021-09-08T09:18:20.000Z
|
python/paddle/optimizer/adamw.py
|
jzhang533/Paddle
|
3227b2c401a80104e0c01dedcef2061ffa1ebbed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .optimizer import Optimizer
from .adam import Adam
from ..fluid import core
from ..fluid import framework
from ..fluid.framework import Variable
from ..fluid.dygraph import base as imperative_base
from collections import Callable
import paddle
_C_ops = core.ops
__all__ = []
| 42.543081 | 130 | 0.603044 |
0efde6b5a9c1239ffa852e70caccc25e5c41c1dd
| 1,880 |
py
|
Python
|
tests/resources/test_interactions.py
|
VinLau/BAR_API
|
0719a5fbc08872f667590b27347af9bfed669bca
|
[
"MIT"
] | 1 |
2020-07-06T20:12:25.000Z
|
2020-07-06T20:12:25.000Z
|
tests/resources/test_interactions.py
|
VinLau/BAR_API
|
0719a5fbc08872f667590b27347af9bfed669bca
|
[
"MIT"
] | 37 |
2020-06-27T02:58:23.000Z
|
2022-03-29T00:35:28.000Z
|
tests/resources/test_interactions.py
|
VinLau/BAR_API
|
0719a5fbc08872f667590b27347af9bfed669bca
|
[
"MIT"
] | 9 |
2020-06-26T23:09:16.000Z
|
2022-01-26T21:20:46.000Z
|
from api import app
from unittest import TestCase
| 31.864407 | 86 | 0.519149 |
0efdfc79a9eea6c3e7cf614d63469062b5917d5a
| 2,261 |
py
|
Python
|
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py
|
16kozlowskim/Group-20-SE
|
ceb8c319643964a3f478772d8f10090962df567c
|
[
"MIT"
] | null | null | null |
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py
|
16kozlowskim/Group-20-SE
|
ceb8c319643964a3f478772d8f10090962df567c
|
[
"MIT"
] | null | null | null |
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py
|
16kozlowskim/Group-20-SE
|
ceb8c319643964a3f478772d8f10090962df567c
|
[
"MIT"
] | null | null | null |
# install BeautifulSoup4 before running
#
# prints out historical data in csv format:
#
# [date, open, high, low, close, volume]
#
import re, csv, sys, urllib2
from bs4 import BeautifulSoup
# If start date and end date is the same only one value will be returned and
# if not the multiple values which can be used to make calculations
#
# ticker (company symbol)
# interval (d (daily), m (monthly), q (quarterly), y (yearly))
# start_date (YYYYMMDD)
# end_date (YYYYMMDD)
if __name__ == '__main__':
main()
| 29.75 | 205 | 0.658116 |
0eff08358676f71813cab0fd67b31eed87ddaad4
| 5,460 |
py
|
Python
|
client/client.py
|
odontomachus/hotbox
|
d42c48d7f056f2b1f7bd707ad674e737a3c2fe08
|
[
"MIT"
] | null | null | null |
client/client.py
|
odontomachus/hotbox
|
d42c48d7f056f2b1f7bd707ad674e737a3c2fe08
|
[
"MIT"
] | null | null | null |
client/client.py
|
odontomachus/hotbox
|
d42c48d7f056f2b1f7bd707ad674e737a3c2fe08
|
[
"MIT"
] | null | null | null |
import sys
import io
from collections import defaultdict
import struct
from time import sleep
import queue
import threading
import serial
from serial import SerialException
RUN_LABELS = ('Time left', 'Temp 1', 'Temp 2', 'Off Goal', 'Temp Change', 'Duty cycle (/30)', 'Heating', 'Cycle', 'Total time', 'Goal temp')
MSG_RUN_STATUS = 1
MSG_CONFIG = 2
MSG_STATUS = 3
MSG_LENGTHS = {MSG_RUN_STATUS: 20, MSG_CONFIG: 9, MSG_STATUS: 5}
STATE_START = 1
STATE_ACTIVE = 2
STATE_READY = 3
STATE_BOOT = 4
STATE_INIT = 5
STATE_DISCONNECTED = 127 # can't connect to serial
HB_CYCLE = 30
def check_connection(fun):
return inner
| 26.25 | 140 | 0.512637 |
0eff0ae716a4c5a7fc1773362d577d2a440094dc
| 2,549 |
py
|
Python
|
test/functional/abc-sync-chain.py
|
ComputerCraftr/devault
|
546b54df85e3392f85e7ea5fcd4ea9b395ba8f4c
|
[
"MIT"
] | 35 |
2019-02-23T06:21:13.000Z
|
2021-11-15T11:35:13.000Z
|
test/functional/abc-sync-chain.py
|
ComputerCraftr/devault
|
546b54df85e3392f85e7ea5fcd4ea9b395ba8f4c
|
[
"MIT"
] | 60 |
2019-02-25T18:17:03.000Z
|
2021-07-13T00:14:00.000Z
|
test/functional/abc-sync-chain.py
|
ComputerCraftr/devault
|
546b54df85e3392f85e7ea5fcd4ea9b395ba8f4c
|
[
"MIT"
] | 24 |
2019-02-20T05:37:02.000Z
|
2021-10-29T18:42:10.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that a node receiving many (potentially out of order) blocks exits
initial block download (IBD; this occurs once it has passed minimumchainwork)
and continues to sync without seizing.
"""
import random
from test_framework.blocktools import create_block, create_coinbase
from test_framework.mininode import (CBlockHeader,
network_thread_start,
P2PInterface,
msg_block,
msg_headers)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, p2p_port
NUM_IBD_BLOCKS = 50
if __name__ == '__main__':
SyncChainTest().main()
| 31.085366 | 77 | 0.629266 |
16004b3ebbf7944e6af5eebfe55aa2baa0c582bb
| 1,325 |
py
|
Python
|
djangostagram/posts/models.py
|
hongsemy/InstagramWithDjango
|
18cb273668809fb48d829e1ac11438c51505623a
|
[
"MIT"
] | null | null | null |
djangostagram/posts/models.py
|
hongsemy/InstagramWithDjango
|
18cb273668809fb48d829e1ac11438c51505623a
|
[
"MIT"
] | null | null | null |
djangostagram/posts/models.py
|
hongsemy/InstagramWithDjango
|
18cb273668809fb48d829e1ac11438c51505623a
|
[
"MIT"
] | null | null | null |
from django.db import models
from djangostagram.users import models as user_model
# Create your models here.
# This class is used in other models as an inheritance.
# An often-used pattern
| 30.813953 | 90 | 0.627925 |
160140a1d069dde69b115daae82f3d8b2a6cf9c6
| 497 |
py
|
Python
|
guillotina/contrib/workflows/events.py
|
rboixaderg/guillotina
|
fcae65c2185222272f3b8fee4bc2754e81e0e983
|
[
"BSD-2-Clause"
] | 173 |
2017-03-10T18:26:12.000Z
|
2022-03-03T06:48:56.000Z
|
guillotina/contrib/workflows/events.py
|
rboixaderg/guillotina
|
fcae65c2185222272f3b8fee4bc2754e81e0e983
|
[
"BSD-2-Clause"
] | 921 |
2017-03-08T14:04:43.000Z
|
2022-03-30T10:28:56.000Z
|
guillotina/contrib/workflows/events.py
|
rboixaderg/guillotina
|
fcae65c2185222272f3b8fee4bc2754e81e0e983
|
[
"BSD-2-Clause"
] | 60 |
2017-03-16T19:59:44.000Z
|
2022-03-03T06:48:59.000Z
|
from guillotina.contrib.workflows.interfaces import IWorkflowChangedEvent
from guillotina.events import ObjectEvent
from zope.interface import implementer
| 31.0625 | 73 | 0.744467 |
1601ac11a20c04fcd9a8cadea05debe08ac71228
| 6,340 |
py
|
Python
|
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 16 |
2017-06-30T20:05:05.000Z
|
2022-03-08T21:03:19.000Z
|
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 342 |
2017-06-23T21:37:40.000Z
|
2022-03-30T16:44:16.000Z
|
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 33 |
2017-07-01T00:12:20.000Z
|
2022-01-26T18:06:53.000Z
|
"""
Suppress COVID EHR vaccine concepts.
Original Issues: DC-1692
"""
# Python imports
import logging
# Project imports
from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, CDM_TABLES
from utils import pipeline_logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
LOGGER = logging.getLogger(__name__)
SUPPRESSION_RULE_CONCEPT_TABLE = 'covid_vaccine_concepts'
COVID_VACCINE_CONCEPT_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS
with covid_vacc as (
SELECT *
FROM `{{project_id}}.{{dataset_id}}.concept`
WHERE (
-- done by name and vocab --
REGEXP_CONTAINS(concept_name, r'(?i)(COVID)') AND
REGEXP_CONTAINS(concept_name, r'(?i)(VAC)') AND
vocabulary_id not in ('PPI')
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(207)|(208)|(210)|(211)|(212)')
and vocabulary_id = 'CVX'
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(91300)|(91301)|(91302)|(91303)|(91304)')
and vocabulary_id = 'CPT4'
)
),
concepts_via_cr as (
select distinct c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_relationship`
on c.concept_id = concept_id_1
where concept_id_2 in (select concept_id from covid_vacc)
# and concept_id_1 not in (select concept_id from covid_vacc)
and (
relationship_id not in ('Subsumes', 'RxNorm dose form of', 'Dose form group of', 'RxNorm - SPL') OR
(relationship_id = 'RxNorm - SPL' and REGEXP_CONTAINS(concept_name, r'(?i)(COVID)'))
)
),
concepts_via_ca as (
select c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca
on c.concept_id = ca.descendant_concept_id
where ca.ancestor_concept_id in (select concept_id from covid_vacc)
)
select distinct * from covid_vacc
union distinct
select distinct * from concepts_via_ca
union distinct
select distinct * from concepts_via_cr
""")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
| 37.964072 | 108 | 0.681388 |
160368ea260cbc50567d2f17656bb9f30dc2af47
| 3,494 |
py
|
Python
|
pydbhub/httphub.py
|
sum3105/pydbhub
|
501ea2c0ec7785bc06a38961a1366c3c04d7fabd
|
[
"MIT"
] | 18 |
2021-06-03T14:27:55.000Z
|
2022-02-25T17:55:33.000Z
|
pydbhub/httphub.py
|
sum3105/pydbhub
|
501ea2c0ec7785bc06a38961a1366c3c04d7fabd
|
[
"MIT"
] | 3 |
2021-06-20T07:17:51.000Z
|
2021-12-10T15:24:19.000Z
|
pydbhub/httphub.py
|
sum3105/pydbhub
|
501ea2c0ec7785bc06a38961a1366c3c04d7fabd
|
[
"MIT"
] | 5 |
2021-06-29T09:50:40.000Z
|
2021-12-31T12:10:57.000Z
|
import pydbhub
from typing import Any, Dict, List, Tuple
from json.decoder import JSONDecodeError
import requests
import io
def send_request_json(query_url: str, data: Dict[str, Any]) -> Tuple[List[Any], str]:
"""
send_request_json sends a request to DBHub.io, formatting the returned result as JSON
Parameters
----------
query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.json(), None
except JSONDecodeError as e:
return None, e.args[0]
except TypeError as e:
return None, e.args[0]
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_request(query_url: str, data: Dict[str, Any]) -> Tuple[List[bytes], str]:
"""
send_request sends a request to DBHub.io.
Parameters
---- query_url : str
url of the API endpoint
data : Dict[str, Any]
data to be processed to the server.------
Returns
-------
List[bytes]
database file is returned as a list of bytes
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
response = requests.post(query_url, data=data, headers=headers)
response.raise_for_status()
return response.content, None
except requests.exceptions.HTTPError as e:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
def send_upload(query_url: str, data: Dict[str, Any], db_bytes: io.BufferedReader) -> Tuple[List[Any], str]:
"""
send_upload uploads a database to DBHub.io.
Parameters
----------
query_url : str
url of the API endpoint.
data : Dict[str, Any]
data to be processed to the server.
db_bytes : io.BufferedReader
A buffered binary stream of the database file.
Returns
-------
Tuple[List[Any], str]
The returned data is
- a list of JSON object.
- a string describe error if occurs
"""
try:
headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'}
files = {"file": db_bytes}
response = requests.post(query_url, data=data, headers=headers, files=files)
response.raise_for_status()
if response.status_code != 201:
# The returned status code indicates something went wrong
try:
return response.json(), str(response.status_code)
except JSONDecodeError:
return None, str(response.status_code)
return response.json(), None
except requests.exceptions.HTTPError as e:
try:
return response.json(), e.args[0]
except JSONDecodeError:
return None, e.args[0]
except requests.exceptions.RequestException as e:
cause = e.args(0)
return None, str(cause.args[0])
| 30.649123 | 108 | 0.61763 |
1603becbcb60a137e24357b35d07d2dd6b8de743
| 809 |
py
|
Python
|
test_calcscore.py
|
BrandonLeiran/bracket-scoring
|
a099e9a56ee3083c3a9db7d085b11b1dc7fe77f8
|
[
"MIT"
] | null | null | null |
test_calcscore.py
|
BrandonLeiran/bracket-scoring
|
a099e9a56ee3083c3a9db7d085b11b1dc7fe77f8
|
[
"MIT"
] | null | null | null |
test_calcscore.py
|
BrandonLeiran/bracket-scoring
|
a099e9a56ee3083c3a9db7d085b11b1dc7fe77f8
|
[
"MIT"
] | null | null | null |
import pytest
from calcscore import round_score
# you'll be picking what teams make it to the next round
# - so picking 32, then 16, then 8, 4, 2, 1...i.e. round 1-6 winners
# teams will have a name & a seed
# seed doesn't change, so maybe make that not passed around w/ results
| 31.115385 | 74 | 0.68974 |
16045e96f3ff12b08a6e4885879fa2b0a083c578
| 4,803 |
py
|
Python
|
tests/test_get.py
|
bgyori/pyobo
|
f199f62f65fc7faff307b56f979a369202c8ad33
|
[
"MIT"
] | null | null | null |
tests/test_get.py
|
bgyori/pyobo
|
f199f62f65fc7faff307b56f979a369202c8ad33
|
[
"MIT"
] | null | null | null |
tests/test_get.py
|
bgyori/pyobo
|
f199f62f65fc7faff307b56f979a369202c8ad33
|
[
"MIT"
] | null | null | null |
import unittest
from operator import attrgetter
import obonet
from pyobo import SynonymTypeDef, get
from pyobo.struct import Reference
from pyobo.struct.struct import (
iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties,
iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs,
)
from tests.constants import TEST_CHEBI_OBO_PATH
| 39.694215 | 111 | 0.636269 |
16054aa866f43fe130ae74a4adb86263728710d3
| 2,676 |
py
|
Python
|
src/commons.py
|
ymontilla/WebScrapingCatastro
|
a184b5c92199305e28ca7346c01d1e78e0a92c13
|
[
"MIT"
] | null | null | null |
src/commons.py
|
ymontilla/WebScrapingCatastro
|
a184b5c92199305e28ca7346c01d1e78e0a92c13
|
[
"MIT"
] | null | null | null |
src/commons.py
|
ymontilla/WebScrapingCatastro
|
a184b5c92199305e28ca7346c01d1e78e0a92c13
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# +
## Utilidades comunes entre places y OSM.
# +
import csv
import ast
import codecs
from math import cos, asin, sqrt
# +
# -
import pandas as pd
def distance(lat1, lon1, lat2, lon2):
"""
El resultado de la medicin de distancia esta en kilometros.
"""
p = 0.017453292519943295 #Pi/180
a = 0.5 - cos((lat2 - lat1) * p)/2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return 12742 * asin(sqrt(a))
"""
El proceso es muy pesado y no es posible hacer el ananlisis con toda la data de bogot, el nmero de registros es
demasiado grande para caber en memoria. El uso correcto es filtrar los datos antes de hacer el cross join.
"""
| 36.162162 | 114 | 0.612855 |
160586a7f083f1efa16456b4bf747dcafc4be695
| 7,851 |
py
|
Python
|
GamesGetter.py
|
JamescMcE/BasketBet
|
f87719ac793ea50822e8c52fc23191dba9ad6418
|
[
"CC0-1.0"
] | null | null | null |
GamesGetter.py
|
JamescMcE/BasketBet
|
f87719ac793ea50822e8c52fc23191dba9ad6418
|
[
"CC0-1.0"
] | null | null | null |
GamesGetter.py
|
JamescMcE/BasketBet
|
f87719ac793ea50822e8c52fc23191dba9ad6418
|
[
"CC0-1.0"
] | null | null | null |
#This script Imports Game Data from ESPN, and Odds from the ODDS-API, and then imports them into a MySQL table, example in workbench here https://puu.sh/HOKCj/ce199eec8e.png
import mysql.connector
import requests
import json
import datetime
import time
#Connection to the MYSQL Server.
mydb = mysql.connector.connect(
host="",
user="",
password="",
database="basketbet_data"
)
mycursor = mydb.cursor()
#Games List.
allGames=[]
#Gets the game Data from ESPN API given the link.
#Gets the Odds from the ODDS-API.
#Block to keep the script running then sleep for time 300 with counter set at 72 for Games every 5min | Odds every 6hr.
counter=72
startTime = time.time()
while True:
#Today, Yesterday and Tomorrow.
today = datetime.date.today()
yesterday = today + datetime.timedelta(days=-1)
tomorrow = today + datetime.timedelta(days=1)
#Removing the - from the dates for the URLs, then making the URLs.
todayShort = str(today).replace('-', '')
yesterdayShort = str(yesterday).replace('-', '')
tomorrowShort = str(tomorrow).replace('-', '')
yesterdayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + yesterdayShort + '-' + yesterdayShort
todayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + todayShort + '-' + todayShort
tomorrowUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + tomorrowShort + '-' + tomorrowShort
newGetter(yesterdayUrl)
newGetter(todayUrl)
newGetter(tomorrowUrl)
#Inserting or updating the table in MYSQL with the games.
c=0
updateCount=0
newGameCount=0
while c < len(allGames):
query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_ID = %s'
gameID = (str(allGames[c][0]),)
mycursor.execute(query_string, gameID)
if mycursor.fetchone():
updateCount+=1
query_list = [allGames[c][1], allGames[c][2], allGames[c][4], allGames[c][5], allGames[c][3], allGames[c][6], allGames[c][7], allGames[c][8], allGames[c][9], allGames[c][0]]
query_string = 'UPDATE all_games SET Game_Name = %s, Home_Team = %s, Away_Team = %s, Away_Score = %s, Home_Score = %s, Game_Date = %s, Game_Time = %s, Game_Period = %s, Game_Status = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
mydb.commit()
else:
newGameCount+=1
query_string = "INSERT INTO basketbet_data.all_games (Game_ID, Game_Name, Home_Team, Home_Odds, Home_Score, Away_Team, Away_Odds, Away_Score, Game_Date, Game_Time, Game_Period, Game_Status) VALUES (%s, %s, %s, 0, %s, %s, 0, %s, %s, %s, %s, %s)"
mycursor.execute(query_string, allGames[c])
mydb.commit()
c+=1
#Prints to console what games were updated and what new games were inserted.
print('----------------------------------------')
print(str(updateCount) + ' GAMES UPDATED, and ' + str(newGameCount) + ' NEW GAMES inserted.')
print('----------------------------------------')
allGames=[]
#Counter for the Odds script.
if counter==72:
oddsGetter()
counter=0
else:
counter+=1
print('\n')
time.sleep(300 - ((time.time() - startTime) % 300))
| 42.668478 | 257 | 0.584639 |
16071d9e180a990b1f3b40b4034a6c704c0e2258
| 4,302 |
py
|
Python
|
neurodocker/tests/test_neurodocker.py
|
effigies/neurodocker
|
4b0f32d2915b8b0308e3e391d534e05eb29b8d09
|
[
"Apache-2.0"
] | 1 |
2021-01-27T06:00:35.000Z
|
2021-01-27T06:00:35.000Z
|
neurodocker/tests/test_neurodocker.py
|
giovtorres/neurodocker
|
65575f5e44f2c5ef96a5da51d0df54b1af80bb79
|
[
"Apache-2.0"
] | null | null | null |
neurodocker/tests/test_neurodocker.py
|
giovtorres/neurodocker
|
65575f5e44f2c5ef96a5da51d0df54b1af80bb79
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for neurodocker.main"""
# Author: Jakub Kaczmarzyk <[email protected]>
from __future__ import absolute_import, unicode_literals
import sys
import pytest
from neurodocker.neurodocker import create_parser, parse_args, main
| 31.173913 | 79 | 0.58066 |
1607f8c0c3d6768327bf886d9e6092523f205171
| 2,778 |
py
|
Python
|
fuzzers/011-cle-ffconfig/generate.py
|
tmichalak/prjuray
|
53f3c94b58ffc6d405ac20a3b340ae726717ed47
|
[
"0BSD"
] | 39 |
2020-07-17T19:43:40.000Z
|
2022-01-07T02:05:48.000Z
|
fuzzers/011-cle-ffconfig/generate.py
|
tmichalak/prjuray
|
53f3c94b58ffc6d405ac20a3b340ae726717ed47
|
[
"0BSD"
] | 24 |
2020-07-17T20:15:54.000Z
|
2022-01-21T08:29:51.000Z
|
fuzzers/011-cle-ffconfig/generate.py
|
tmichalak/prjuray
|
53f3c94b58ffc6d405ac20a3b340ae726717ed47
|
[
"0BSD"
] | 11 |
2020-07-17T19:43:45.000Z
|
2022-02-09T08:43:23.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
FDCE Primitive: D Flip-Flop with Clock Enable and Asynchronous Clear
FDPE Primitive: D Flip-Flop with Clock Enable and Asynchronous Preset
FDRE Primitive: D Flip-Flop with Clock Enable and Synchronous Reset
FDSE Primitive: D Flip-Flop with Clock Enable and Synchronous Set
LDCE Primitive: Transparent Data Latch with Asynchronous Clear and Gate Enable
LDPE Primitive: Transparent Data Latch with Asynchronous Preset and Gate Enable
'''
from prims import isff, isl
from utils.segmaker import Segmaker
segmk = Segmaker("design.bits", bits_per_word=16)
def loadtop():
'''
i,prim,loc,bel
0,FDPE,SLICE_X12Y100,C5FF
1,FDPE,SLICE_X15Y100,A5FF
2,FDPE_1,SLICE_X16Y100,B5FF
3,LDCE_1,SLICE_X17Y100,BFF
'''
f = open('top.txt', 'r')
f.readline()
ret = {}
for l in f:
i, prim, loc, bel, init = l.split(",")
i = int(i)
init = int(init)
ret[loc] = (i, prim, loc, bel, init)
return ret
top = loadtop()
print("Loading tags from design.txt")
with open("design.txt", "r") as f:
for line in f:
'''
puts $fp "$type $tile $grid_x $grid_y $ff $bel_type $used $usedstr"
CLEM CLEM_X10Y137 30 13 SLICE_X13Y137/AFF REG_INIT 1 FDRE
CLEM CLEM_X10Y137 30 13 SLICE_X12Y137/D2FF FF_INIT 0
'''
line = line.split()
tile_type = line[0]
tile_name = line[1]
grid_x = line[2]
grid_y = line[3]
# Other code uses BEL name
# SLICE_X12Y137/D2FF
site_ff_name = line[4]
site, ff_name = site_ff_name.split('/')
ff_type = line[5]
used = int(line[6])
cel_prim = None
cel_name = None
if used:
cel_name = line[7]
cel_prim = line[8]
cinv = int(line[9])
init = vs2i(line[10])
# A B C D E F G H
which = ff_name[0]
# LUT6 vs LUT5 FF
is2 = '2' in ff_name
if used:
segmk.add_site_tag(site, "%s.ZINI" % ff_name, 1 ^ init)
'''
On name:
The primitives you listed have a control input to set the FF value to zero (clear/reset),
the other three primitives have a control input that sets the FF value to one.
Z => inversion
'''
segmk.add_site_tag(site, "%s.ZRST" % ff_name,
cel_prim in ('FDRE', 'FDCE', 'LDCE'))
segmk.compile()
segmk.write()
| 28.060606 | 101 | 0.596472 |
1608246c353096fff06ae6f3c3c9e80955bceb92
| 2,697 |
py
|
Python
|
hmc/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | 1 |
2021-11-23T15:40:07.000Z
|
2021-11-23T15:40:07.000Z
|
hmc/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | null | null | null |
hmc/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | null | null | null |
from typing import Callable
import numpy as np
from hmc.integrators.states.leapfrog_state import LeapfrogState
from hmc.integrators.fields import riemannian
from hmc.linalg import solve_psd
| 32.890244 | 104 | 0.68743 |
1608a15c941a14be0a253388b661310efd0d4787
| 2,834 |
py
|
Python
|
MultirangerTest.py
|
StuartLiam/DroneNavigationOnboard
|
11ac6a301dfc72b15e337ddf09f5ddc79265a03f
|
[
"MIT"
] | null | null | null |
MultirangerTest.py
|
StuartLiam/DroneNavigationOnboard
|
11ac6a301dfc72b15e337ddf09f5ddc79265a03f
|
[
"MIT"
] | null | null | null |
MultirangerTest.py
|
StuartLiam/DroneNavigationOnboard
|
11ac6a301dfc72b15e337ddf09f5ddc79265a03f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2017 Bitcraze AB
#
# Crazyflie Python Library
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Example scipts that allows a user to "push" the Crazyflie 2.0 around
using your hands while it's hovering.
This examples uses the Flow and Multi-ranger decks to measure distances
in all directions and tries to keep away from anything that comes closer
than 0.2m by setting a velocity in the opposite direction.
The demo is ended by either pressing Ctrl-C or by holding your hand above the
Crazyflie.
For the example to run the following hardware is needed:
* Crazyflie 2.0
* Crazyradio PA
* Flow deck
* Multiranger deck
"""
import logging
import sys
import time
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.positioning.motion_commander import MotionCommander
from cflib.utils.multiranger import Multiranger
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.patches as patches
URI = 'radio://0/80/2M'
if len(sys.argv) > 1:
URI = sys.argv[1]
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
rangeArray = []
cf = Crazyflie(rw_cache='./cache')
with SyncCrazyflie(URI, cf=cf) as scf:
with MotionCommander(scf) as motion_commander:
with Multiranger(scf) as multiranger:
motion_commander.start_turn_left(90)
rangeArray.append(multiranger.front)
time.sleep(0.05)
plt.plot(rangeArray)
| 31.488889 | 77 | 0.693013 |
160b1c97ac3f8a38cfc9b68c4f0651550e3df491
| 266 |
py
|
Python
|
employees/choices.py
|
sauli6692/barbershop
|
862357bd78235e720b2e3b868d2423a57bb4e328
|
[
"MIT"
] | null | null | null |
employees/choices.py
|
sauli6692/barbershop
|
862357bd78235e720b2e3b868d2423a57bb4e328
|
[
"MIT"
] | null | null | null |
employees/choices.py
|
sauli6692/barbershop
|
862357bd78235e720b2e3b868d2423a57bb4e328
|
[
"MIT"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
USER_TYPE_STAFF = 'STAFF'
USER_TYPE_ADMIN = 'ADMIN'
USER_TYPE_BARBER = 'BARBER'
USER_TYPE_CHOICES = (
(USER_TYPE_STAFF, _('Dev')),
(USER_TYPE_ADMIN, _('Admin')),
(USER_TYPE_BARBER, _('Barber')),
)
| 24.181818 | 55 | 0.714286 |
160b335422855d4c69636103d3682d2f66956533
| 821 |
py
|
Python
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151 |
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 |
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338 |
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState
from telemetry.page import page as page_module
from telemetry import story
| 27.366667 | 76 | 0.751523 |
160c8a87b1d001ed3cb1d85873c9a8a8f238d3b2
| 6,537 |
py
|
Python
|
lessons/sqlite_example/database.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 6 |
2019-03-29T06:14:53.000Z
|
2021-10-15T23:42:36.000Z
|
lessons/sqlite_example/database.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 4 |
2019-09-06T10:03:40.000Z
|
2022-03-11T23:30:55.000Z
|
lessons/sqlite_example/database.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 12 |
2019-06-20T19:34:52.000Z
|
2021-10-15T23:42:39.000Z
|
"""
in this example we want to create a user credentials database with:
user_id & password
logger showing connection logs, DB version, errors during fetching & executing
"""
import sqlite3
from lessons.sqlite_example.log import create as create_logger
if "__main__" == __name__:
import os
log_file = os.path.dirname(os.path.abspath(__file__)) + '\\log.txt'
db_file = os.path.dirname(os.path.abspath(__file__)) + '\\db.db'
log = create_logger(log_file=log_file)
database = DataBaseExtention(db_file, log)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# database.execute(database.command.drop_table.format('users'))
# database.execute(database.command.create_users_table)
# database.execute(database.command.add_user.format('cs0008', '123123a'))
# database.execute(database.command.add_user.format('af0006', '123123a'))
# database.execute(database.command.add_user.format('jh0003', '123123a'))
# database.execute(database.command.add_user.format('kb0004', '123123a'))
# database.execute(database.command.add_user.format('op0001', '123123a'))
# database.execute(database.command.add_user.format('gv0001', '123123a'))
# database.execute(database.command.add_user.format('pm0001', '123123a'))
# database.execute(database.command.add_user.format('ps0001', '123123a'))
# database.execute(database.command.add_user.format('qa0000', '123123a'))
# user_credentials = database.get_user_credentials(id='14')
# database.connection.commit()
# database.connection.close()
# print(user_credentials)
# create a simple database with websites table that includes (
# url: varchar(1024),
# popularity_score: integer,
# monthly_visitations: integer
# )
# database.command.create_websites_table = '''
# CREATE TABLE IF NOT EXISTS websites (
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# url TEXT,
# popularity_score INTEGER,
# monthly_visitations INTEGER
# )
# '''
# database.command.add_website = 'INSERT INTO websites (url, popularity_score, monthly_visitations) VALUES (\'{}\', \'{}\', \'{}\');'
# database.execute(database.command.create_websites_table)
# database.execute(database.command.add_website.format('https://www.google.com', 5, 4000000000))
# database.execute(database.command.add_website.format('https://www.ynet.com', 3, 5000000))
# database.execute(database.command.add_website.format('https://www.youtube.com', 6, 1300000000))
# database.execute(database.command.add_website.format('https://www.python.org', 5, 1000000))
# database.command.get_site = 'SELECT url, popularity_score, monthly_visitations FROM websites WHERE url = \'{}\';'
# url, popularity, visitations = database.fetch(database.command.get_site.format('https://www.python.org'))[0]
#
# print(url, popularity, visitations)
database.export_from_table_to_file(
table='websites',
file_name='exported.csv',
titles=('id', 'url', 'popularity_score', 'monthly_visitations')
)
# database.connection.commit()
database.connection.close()
| 39.379518 | 137 | 0.621539 |
160d55ef119cabea32b158df2e672f5773e80b28
| 217 |
py
|
Python
|
backend/app/projectx/routing.py
|
emmawoollett/projectx
|
c061df01d581456884f46c2b8e3b478626501dec
|
[
"MIT"
] | null | null | null |
backend/app/projectx/routing.py
|
emmawoollett/projectx
|
c061df01d581456884f46c2b8e3b478626501dec
|
[
"MIT"
] | null | null | null |
backend/app/projectx/routing.py
|
emmawoollett/projectx
|
c061df01d581456884f46c2b8e3b478626501dec
|
[
"MIT"
] | null | null | null |
from django.urls import re_path
from projectx.consumers import UserWebSocketConsumer
from .consumers import UserWebSocketConsumer
websocket_urlpatterns = [
re_path(r"^ws/$", UserWebSocketConsumer.as_asgi()),
]
| 21.7 | 55 | 0.801843 |
160fd3286e288456d5bdd6bcd283afcbe0cfc945
| 399 |
py
|
Python
|
aldryn_search/cms_apps.py
|
lab360-ch/aldryn-search
|
15a319edac126aa1e44f22d34a7bcb5aec3e3dde
|
[
"BSD-3-Clause"
] | 11 |
2019-03-29T10:32:13.000Z
|
2021-02-26T11:44:44.000Z
|
aldryn_search/cms_apps.py
|
lab360-ch/aldryn-search
|
15a319edac126aa1e44f22d34a7bcb5aec3e3dde
|
[
"BSD-3-Clause"
] | 23 |
2019-01-31T16:20:57.000Z
|
2021-11-10T19:57:58.000Z
|
aldryn_search/cms_apps.py
|
lab360-ch/aldryn-search
|
15a319edac126aa1e44f22d34a7bcb5aec3e3dde
|
[
"BSD-3-Clause"
] | 23 |
2019-02-14T09:59:40.000Z
|
2022-03-10T12:38:48.000Z
|
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from .conf import settings
if settings.ALDRYN_SEARCH_REGISTER_APPHOOK:
apphook_pool.register(AldrynSearchApphook)
| 22.166667 | 55 | 0.77193 |
161021c6a14b006c767d40fee4f27d3f18827442
| 744 |
py
|
Python
|
BizPy/openpyxl/20200513/horizontal_chart.py
|
t2y/python-study
|
52a132ea600d4696164e540d8a8f8f5fc58e097a
|
[
"Apache-2.0"
] | 18 |
2016-08-15T00:24:44.000Z
|
2020-11-30T15:11:52.000Z
|
BizPy/openpyxl/20200513/horizontal_chart.py
|
t2y/python-study
|
52a132ea600d4696164e540d8a8f8f5fc58e097a
|
[
"Apache-2.0"
] | null | null | null |
BizPy/openpyxl/20200513/horizontal_chart.py
|
t2y/python-study
|
52a132ea600d4696164e540d8a8f8f5fc58e097a
|
[
"Apache-2.0"
] | 6 |
2016-09-28T10:47:03.000Z
|
2020-10-14T10:20:06.000Z
|
import pandas as pd
from openpyxl import Workbook
from openpyxl.chart import BarChart, Reference
wb = Workbook()
ws = wb.active
df = pd.read_csv('population.csv')
ws.append(df.columns.tolist())
for row in df.values:
ws.append(list(row))
row_length = 1 + len(df.values)
values = Reference(ws, min_col=2, max_col=2, min_row=1, max_row=row_length)
categories = Reference(ws, min_col=1, min_row=2, max_row=row_length)
chart = BarChart()
chart.type = 'bar'
chart.style = 11
chart.shape = 4
chart.title = ''
chart.x_axis.title = ''
chart.y_axis.title = ''
chart.add_data(values, titles_from_data=True)
chart.set_categories(categories)
ws.add_chart(chart, 'A9')
wb.save('population_horizontal.xlsx')
| 25.655172 | 76 | 0.72043 |
161068852c112b7ab6b2bbf31d699217b497ca00
| 462 |
py
|
Python
|
changes/api/serializer/models/logsource.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | 1 |
2015-11-08T13:00:44.000Z
|
2015-11-08T13:00:44.000Z
|
changes/api/serializer/models/logsource.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
changes/api/serializer/models/logsource.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
from changes.api.serializer import Serializer, register
from changes.models.log import LogSource
| 27.176471 | 55 | 0.582251 |
161139c53368ea4186cb4cad223d2c35a3e06750
| 1,246 |
py
|
Python
|
examples/prostate/data_preparation/utils/nrrd_to_nifti.py
|
IsaacYangSLA/NVFlare
|
8c6582894c9a8431f64479bc9f472fefcd71e5a7
|
[
"Apache-2.0"
] | null | null | null |
examples/prostate/data_preparation/utils/nrrd_to_nifti.py
|
IsaacYangSLA/NVFlare
|
8c6582894c9a8431f64479bc9f472fefcd71e5a7
|
[
"Apache-2.0"
] | null | null | null |
examples/prostate/data_preparation/utils/nrrd_to_nifti.py
|
IsaacYangSLA/NVFlare
|
8c6582894c9a8431f64479bc9f472fefcd71e5a7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nibabel as nib
import nrrd
import numpy as np
parser = argparse.ArgumentParser("Convert nrrd label to nifti with reference image file for affine")
parser.add_argument("--input_path", help="Input nrrd path", type=str)
parser.add_argument("--reference_path", help="Reference image path", type=str)
parser.add_argument("--output_path", help="Output nifti path", type=str)
args = parser.parse_args()
img = nib.load(args.reference_path)
img_affine = img.affine
nrrd = nrrd.read(args.input_path)
data = np.flip(nrrd[0], axis=1)
nft_img = nib.Nifti1Image(data, img_affine)
nib.save(nft_img, args.output_path)
| 35.6 | 100 | 0.764045 |
16117ea75b817e23fa127a364786f0a599ad09cc
| 1,570 |
py
|
Python
|
setup.py
|
jszakmeister/rst2ctags
|
22f4035d9ea1e43a07b91f806014d318b3dc5097
|
[
"BSD-3-Clause"
] | 23 |
2015-03-05T14:12:08.000Z
|
2022-01-08T00:21:39.000Z
|
setup.py
|
jszakmeister/rst2ctags
|
22f4035d9ea1e43a07b91f806014d318b3dc5097
|
[
"BSD-3-Clause"
] | 8 |
2015-03-05T14:15:44.000Z
|
2020-10-02T00:16:55.000Z
|
setup.py
|
jszakmeister/rst2ctags
|
22f4035d9ea1e43a07b91f806014d318b3dc5097
|
[
"BSD-3-Clause"
] | 12 |
2015-03-05T15:12:22.000Z
|
2021-11-09T21:29:55.000Z
|
from setuptools import setup
import io
import os
import re
version_re = re.compile(r'^__version__ = "([^"]*)"$')
# Find the version number.
with open('rst2ctags.py', 'r') as f:
for line in f:
line = line.rstrip()
m = version_re.match(line)
if m:
version = m.group(1)
break
else:
raise RuntimeError("Couldn't find version string in rst2ctags.py")
# Load the description.
readme_path = os.path.join(os.path.dirname(__file__), 'README.rst')
with io.open(readme_path, encoding='utf-8') as f:
long_description = f.read()
setup(
name='rst2ctags',
description='Generates ctags-compatible output for the sections of a '
'reStructuredText document.',
long_description=long_description,
license='BSD',
author='John Szakmeister',
author_email='[email protected]',
url='https://github.com/jszakmeister/rst2ctags',
version=version,
py_modules=['rst2ctags'],
zip_safe=True,
entry_points={
'console_scripts': [
'rst2ctags = rst2ctags:cli_main',
],
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Text Processing',
'Topic :: Text Processing :: Indexing',
'Topic :: Utilities',
]
)
| 26.610169 | 74 | 0.610191 |
161220d89127fbd24716ad1fd95c0f68eb787901
| 50,986 |
py
|
Python
|
py-ws/hardshare/cli.py
|
rerobots/hardshare
|
456e7d1d1eb21d03efc3cd1f7960a1729b62527b
|
[
"Apache-2.0"
] | 8 |
2020-04-14T17:19:57.000Z
|
2022-03-03T08:55:34.000Z
|
py-ws/hardshare/cli.py
|
rerobots/hardshare
|
456e7d1d1eb21d03efc3cd1f7960a1729b62527b
|
[
"Apache-2.0"
] | 11 |
2020-04-01T15:13:37.000Z
|
2021-06-15T22:10:31.000Z
|
py-ws/hardshare/cli.py
|
rerobots/hardshare
|
456e7d1d1eb21d03efc3cd1f7960a1729b62527b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2018 rerobots, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line interface
"""
import argparse
import json
import logging
import logging.handlers
import os
import os.path
import subprocess
import sys
import uuid
import yaml
from aiohttp.client_exceptions import ClientConnectorError as ConnectionError
from .core import WorkspaceInstance
from .mgmt import get_local_config, add_key, add_ssh_path, list_local_keys
from .mgmt import find_wd, modify_local, rm_wd
from .api import HSAPIClient
from .err import Error as HSError
from .addons import camera_main, stop_cameras
from .addons import add_cmdsh, rm_cmdsh, add_vnc, rm_vnc, add_mistyproxy, rm_mistyproxy
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 47.784442 | 109 | 0.530459 |
1612e716ac963ff1c93e60be69cd7a089a9ba5ac
| 3,870 |
py
|
Python
|
app/realty.py
|
JenBanks8585/Labs_CitySpireDS
|
4755bd5ce718ee2f65f6a53a5918bd0cf18b2ddf
|
[
"MIT"
] | null | null | null |
app/realty.py
|
JenBanks8585/Labs_CitySpireDS
|
4755bd5ce718ee2f65f6a53a5918bd0cf18b2ddf
|
[
"MIT"
] | null | null | null |
app/realty.py
|
JenBanks8585/Labs_CitySpireDS
|
4755bd5ce718ee2f65f6a53a5918bd0cf18b2ddf
|
[
"MIT"
] | null | null | null |
"""Realty Info"""
import os
import requests
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
import sqlalchemy
from pydantic import BaseModel, SecretStr
from app import config
from app.walk_score import *
load_dotenv()
router = APIRouter()
headers = {'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host') }
| 28.880597 | 100 | 0.575969 |
1614bfb3f4849c9afe583c49f1da9a5698654285
| 2,648 |
py
|
Python
|
dist/weewx-4.0.0b3/bin/weewx/junk2.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 10 |
2017-01-05T17:30:48.000Z
|
2021-09-18T15:04:20.000Z
|
dist/weewx-4.0.0b3/bin/weewx/junk2.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 2 |
2019-07-21T10:48:42.000Z
|
2022-02-16T20:36:45.000Z
|
dist/weewx-4.0.0b3/bin/weewx/junk2.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 12 |
2017-01-05T18:50:30.000Z
|
2021-10-05T07:35:45.000Z
|
from __future__ import print_function
import time
import weeutil.weeutil
import weewx.manager
import weewx.xtypes
archive_sqlite = {'database_name': '/home/weewx/archive/weepwr.sdb', 'driver': 'weedb.sqlite'}
archive_mysql = {'database_name': 'weewx', 'user': 'weewx', 'password': 'weewx', 'driver': 'weedb.mysql'}
sql_str = "SELECT %s(%s), MIN(usUnits), MAX(usUnits) FROM %s " \
"WHERE dateTime > ? AND dateTime <= ?" % ('avg', 'outTemp', 'archive')
timespan = weeutil.weeutil.TimeSpan(1573245000, 1573246800)
timespan = weeutil.weeutil.TimeSpan(1573245000, 1573245000 + 600)
print('timespan=', timespan)
with weewx.manager.Manager.open(archive_sqlite) as db_manager:
interpolate_dict = {
'aggregate_type': 'diff',
'obs_type': 'ch8_a_energy2',
'table_name': db_manager.table_name,
'start': timespan.start,
'stop': timespan.stop,
}
SQL_TEMPLATE = "SELECT (ch8_a_energy2 - (SELECT ch8_a_energy2 FROM archive WHERE dateTime=%(start)s)) / (%(stop)s - %(start)s) FROM archive WHERE dateTime=%(stop)s;"
SQL_TEMPLATE = """Select a.dateTime as StartTime
, b.dateTime as EndTime
, b.dateTime-a.dateTime as TimeChange
, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a
Inner Join archive b ON b.dateTime>=1573245000 AND b.dateTime<=(1573245000 + 600)"""
SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a, archive b WHERE b.dateTime = (Select MAX(c.dateTime) FROM archive c WHERE c.dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);"""
SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange
FROM archive a, archive b WHERE b.dateTime = (Select MAX(dateTime) FROM archive WHERE dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);"""
SQL_TEMPLATE = "SELECT (b.%(obs_type)s - a.%(obs_type)s) / (b.dateTime-a.dateTime) "\
"FROM archive a, archive b "\
"WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive WHERE dateTime <= %(stop)s) "\
"AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime >= %(start)s);"
sql_stmt = SQL_TEMPLATE % interpolate_dict
print(sql_stmt)
# Get the number of records
with db_manager.connection.cursor() as cursor:
for row in cursor.execute(sql_stmt):
print(row)
| 50.923077 | 203 | 0.692976 |
16156ec4833837e6239f5128828011fb974363b0
| 5,868 |
py
|
Python
|
fast_lemon_api_test.py
|
a6502/fast_lemon_api
|
09a5b6eec3e84d1d006f927e502a7071a28739cc
|
[
"Unlicense"
] | null | null | null |
fast_lemon_api_test.py
|
a6502/fast_lemon_api
|
09a5b6eec3e84d1d006f927e502a7071a28739cc
|
[
"Unlicense"
] | null | null | null |
fast_lemon_api_test.py
|
a6502/fast_lemon_api
|
09a5b6eec3e84d1d006f927e502a7071a28739cc
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env pytest-3
from fastapi.testclient import TestClient
from fast_lemon_api import app
client = TestClient(app)
neworder = {
"isin": "blablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996943663,
"status": "open"
}
order_id = None
| 30.5625 | 80 | 0.387014 |
1616161b4c2c7495b51d0bf323d5ee79ad27b64f
| 4,999 |
py
|
Python
|
tests/regenerate_credentials.py
|
andrewkozlik/pam-u2f
|
5b504783c9af972c790bdcb506867bad7df5e333
|
[
"BSD-2-Clause"
] | null | null | null |
tests/regenerate_credentials.py
|
andrewkozlik/pam-u2f
|
5b504783c9af972c790bdcb506867bad7df5e333
|
[
"BSD-2-Clause"
] | null | null | null |
tests/regenerate_credentials.py
|
andrewkozlik/pam-u2f
|
5b504783c9af972c790bdcb506867bad7df5e333
|
[
"BSD-2-Clause"
] | null | null | null |
#!/bin/python2
import collections
import re
import subprocess
import sys
PUC = "../pamu2fcfg/pamu2fcfg"
resident = ["", "-r"]
presence = ["", "-P"]
pin = ["", "-N"]
verification = ["", "-V"]
Credential = collections.namedtuple("Credential", "keyhandle pubkey attributes oldformat")
sshformat = 0
# Single credentials
print >> sys.stderr, "Generating single credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Double credentials
print >> sys.stderr, "Generating double credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_double_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Mixed credentials
print >> sys.stderr, "Mixed double credentials"
options = [("", ""), ("", "-P"), ("-P", ""), ("-P", "-P")]
for p1, p2 in options:
filename = "credentials/new_mixed_" + p1 +"1" + p2 + "2"
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", p1])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", p2])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
| 34.475862 | 109 | 0.509302 |
16173a166fd943413345036df12245c2a4ab8343
| 5,807 |
py
|
Python
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 13 |
2018-07-23T18:53:35.000Z
|
2021-11-18T19:56:45.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 6 |
2020-04-21T20:38:18.000Z
|
2020-06-16T01:00:15.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 13 |
2018-09-07T13:28:38.000Z
|
2020-07-17T15:06:24.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Scalar Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
if __name__ == "__main__":
test.main()
| 37.707792 | 92 | 0.646633 |
161805dd743777711d517821e54c4fec5cc46ec8
| 7,634 |
py
|
Python
|
mule/util/algorand_util.py
|
bricerisingalgorand/mule
|
721b73f691076e5c3e2ebb8a79313da486fb0f96
|
[
"MIT"
] | null | null | null |
mule/util/algorand_util.py
|
bricerisingalgorand/mule
|
721b73f691076e5c3e2ebb8a79313da486fb0f96
|
[
"MIT"
] | null | null | null |
mule/util/algorand_util.py
|
bricerisingalgorand/mule
|
721b73f691076e5c3e2ebb8a79313da486fb0f96
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import json
import urllib.request
from mule.util import os_util
from mule.util import file_util
from mule.util import time_util
from mule.util import s3_util
from mule.util import semver_util
import platform
def install_node(data_dir, bin_dir, channel, node_package_version='latest'):
"""
Download and install algod.
"""
node_package_dir = file_util.ensure_folder(f"/tmp/algod-pkg-{time_util.get_timestamp()}")
data_dir = file_util.ensure_folder(data_dir)
bin_dir = file_util.ensure_folder(bin_dir)
os_type = os_util.get_os_type()
cpu_arch_type = os_util.get_cpu_arch_type()
if node_package_version == 'latest':
if channel == 'test':
node_package_version = get_latest_package_version('node', 'stable', os_type, cpu_arch_type)
else:
node_package_version = get_latest_package_version('node', channel, os_type, cpu_arch_type)
print(f"Installing {channel} node package version {node_package_version} to:\n\tbin_dir: {bin_dir}\n\tdata_dir: {data_dir}")
node_package_url = build_algo_release_url('node', channel, os_type, cpu_arch_type, node_package_version)
if channel == 'test':
node_package_url = build_algo_release_url('node', 'stable', os_type, cpu_arch_type, node_package_version)
node_package_tar_path = f"{node_package_dir}/node_package.tar.gz"
_ = urllib.request.urlretrieve(node_package_url, node_package_tar_path)
file_util.decompressTarfile(node_package_tar_path, f"{node_package_dir}")
file_util.mv_folder_contents(f"{node_package_dir}/data", data_dir)
file_util.mv_folder_contents(f"{node_package_dir}/bin", bin_dir)
if channel == 'stable':
file_util.copy_file(
os.path.join(node_package_dir, "genesis/mainnet/genesis.json"),
os.path.join(data_dir, 'genesis.json')
)
else:
file_util.copy_file(
os.path.join(node_package_dir, f"genesis/{channel}net/genesis.json"),
os.path.join(data_dir, 'genesis.json')
)
| 38.361809 | 150 | 0.716793 |
161931efe310b9554c601df989d24d47e0bdfff9
| 2,490 |
py
|
Python
|
examples/showcase/src/demos_panels/scrollPanel.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/showcase/src/demos_panels/scrollPanel.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/showcase/src/demos_panels/scrollPanel.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2019-11-18T14:17:59.000Z
|
2019-11-18T14:17:59.000Z
|
"""
The ``ui.ScrollPanel`` class implements a panel that scrolls its contents.
If you want the scroll bars to be always visible, call
``setAlwaysShowScrollBars(True)``. You can also change the current scrolling
position programmatically by calling ``setScrollPosition(vPos)`` and
``setScrollHorizontalPosition(hPos)`` to change the horizontal and vertical
scrolling position, respectively.
It is in the nature of a scrollpanel that if you give it a relative size, it will not work.
This makes it tricky to use it where you want it to fill out a parent widget of unknown size.
To avoid this problem you will have to wrap its content in a SimplePanel and
then use css/oveflow to control its behaviour as shown in the second example:
"container" represents the parent widget that could be any absolute or relative size and
the superscrollpanel will fill it out and apply vertical scrollbars if needed.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
| 42.20339 | 97 | 0.677912 |
1619ba2c67e7c086f7e9ae9363f2ebb460f2febc
| 772 |
py
|
Python
|
psdn.py
|
xiongchiamiov/phone-suitable-domain-name
|
da8d28c5783415f406e19b8ef2cde4c790a4c95d
|
[
"WTFPL"
] | 3 |
2017-10-23T18:31:24.000Z
|
2021-02-01T21:22:24.000Z
|
psdn.py
|
xiongchiamiov/phone-suitable-domain-name
|
da8d28c5783415f406e19b8ef2cde4c790a4c95d
|
[
"WTFPL"
] | null | null | null |
psdn.py
|
xiongchiamiov/phone-suitable-domain-name
|
da8d28c5783415f406e19b8ef2cde4c790a4c95d
|
[
"WTFPL"
] | 1 |
2016-10-14T10:47:41.000Z
|
2016-10-14T10:47:41.000Z
|
#!/usr/bin/env python3
# May you recognize your weaknesses and share your strengths.
# May you share freely, never taking more than you give.
# May you find love and love everyone you find.
import re
import time
import whois
phone_spellable = re.compile(r'^[filoqrsuwxy]+$')
candidate_words = []
with open('/usr/share/dict/words') as f:
for word in f:
word = word.strip()
if phone_spellable.match(word):
candidate_words.append((len(word), word))
candidate_words.sort()
for word in candidate_words:
query = False
while query is False:
try:
query = whois.query('%s.com' % word[1])
except:
print("Sleeping five seconds...")
time.sleep(5)
if not query:
print(word)
| 23.393939 | 61 | 0.634715 |
161a0260062e641dc32fc774ac4b854148c5381e
| 3,310 |
py
|
Python
|
src/requester/py/ElevatorTestCaseList.py
|
akzare/Elevator_Sys_Design
|
2f7d7381d68699515a43ec4cf7a8a8afade726f3
|
[
"MIT"
] | 1 |
2020-09-03T06:36:22.000Z
|
2020-09-03T06:36:22.000Z
|
src/requester/py/ElevatorTestCaseList.py
|
akzare/Elevator_Sys_Design
|
2f7d7381d68699515a43ec4cf7a8a8afade726f3
|
[
"MIT"
] | null | null | null |
src/requester/py/ElevatorTestCaseList.py
|
akzare/Elevator_Sys_Design
|
2f7d7381d68699515a43ec4cf7a8a8afade726f3
|
[
"MIT"
] | null | null | null |
'''
* @file ElevatorTestCaseList.py
* @author Armin Zare Zadeh
* @date 30 July 2020
* @version 0.1
* @brief Implements a class to hold all the test cases during the program life cycle.
'''
#!/usr/bin/env python3
import sys
import ctypes
import ElevatorConfig as cfg
import ElevatorMsgProtocol as msgProto
| 50.151515 | 105 | 0.459517 |
161a66975b57933d5f14b6a51378ceceb0ae3ebd
| 1,725 |
py
|
Python
|
cart/views.py
|
pmaigutyak/mp-cart
|
53adbbdeea7f8f8b2d432b103f7347d89adf3e30
|
[
"0BSD"
] | 1 |
2021-09-25T14:31:48.000Z
|
2021-09-25T14:31:48.000Z
|
cart/views.py
|
pmaigutyak/mp-cart
|
53adbbdeea7f8f8b2d432b103f7347d89adf3e30
|
[
"0BSD"
] | null | null | null |
cart/views.py
|
pmaigutyak/mp-cart
|
53adbbdeea7f8f8b2d432b103f7347d89adf3e30
|
[
"0BSD"
] | 1 |
2021-04-10T18:50:47.000Z
|
2021-04-10T18:50:47.000Z
|
from django.utils.translation import ugettext
from django.views.decorators.http import require_POST
from django.http import JsonResponse
from django.shortcuts import render
from django.core.exceptions import ValidationError
from django.views.decorators.csrf import csrf_exempt
from cart.lib import get_cart
from cart.forms import SelectProductForm, SetQtyForm
| 26.136364 | 75 | 0.697391 |
161a6fecb9358040e2c0bfdcfac12240bdf3bc16
| 2,089 |
py
|
Python
|
ChessAI/src/const.py
|
darius-luca-tech/AI_Projects
|
3cff26878807121e077375e5dbef39390fea0189
|
[
"MIT"
] | 2 |
2020-07-11T14:48:27.000Z
|
2020-08-04T11:24:58.000Z
|
ChessAI/src/const.py
|
darius-luca-tech/AI_Projects
|
3cff26878807121e077375e5dbef39390fea0189
|
[
"MIT"
] | null | null | null |
ChessAI/src/const.py
|
darius-luca-tech/AI_Projects
|
3cff26878807121e077375e5dbef39390fea0189
|
[
"MIT"
] | null | null | null |
#------ game constants -----#
#players
WHITE = 0
BLACK = 1
BOTH = 2
#color for onTurnLabel
PLAYER_COLOR = ["white", "black"]
#figures
PAWN = 1
KNIGHT = 2
BISHOP = 3
ROOK = 4
QUEEN = 5
KING = 6
FIGURE_NAME = [ "", "pawn", "knight", "bishop", "rook", "queen", "king" ]
#used in move 32bit for promotion figure prom_figure = figure-2
PROM_KNIGHT = 0
PROM_BISHOP = 1
PROM_ROOK = 2
PROM_QUEEN = 3
#all lines
A, B, C, D, E, F, G, H = range(8)
#all squares
A1, B1, C1, D1, E1, F1, G1, H1, \
A2, B2, C2, D2, E2, F2, G2, H2, \
A3, B3, C3, D3, E3, F3, G3, H3, \
A4, B4, C4, D4, E4, F4, G4, H4, \
A5, B5, C5, D5, E5, F5, G5, H5, \
A6, B6, C6, D6, E6, F6, G6, H6, \
A7, B7, C7, D7, E7, F7, G7, H7, \
A8, B8, C8, D8, E8, F8, G8, H8 = range(64)
#----- game display constants -----#
DEFAULTBORDERWIDTH = 20
DEFAULTTILEWIDTH = 45
DEFAULTFONTSIZE = (7, 15)
COLORS = { "bg":"#EDC08C",
"border":"#B55602",
"tiles":("#FC9235", "#FFB87A") }
#----- move types -----#
NORMAL_MOVE, CAPTURE, PROMOTION, DOUBLE_STEP, ENPASSANT_CAPTURE, CASTLING, KING_CAPTURE = range(7)
#----- move 32bit reservation -----#
# a single move is stored in 32 bit as follows
# xxxxxxxx xx x xxx xxx xxxxxx xxxxxx xxx
# G F E D C B A
#
# A: move type (0-6)
# B: start sq (0-63)
# C: destination sq (0-63)
# D: start figure (1-6)
# E: captured figure (1-6)
# F: color of moved piece (0-1)
# G: promotion figure (0-3)
#NAME = (start_bit, lenght)
MOVE_TYPE = (0, 3)
MOVE_START = (3, 6)
MOVE_DEST = (9, 6)
MOVE_FIG_START = (15, 3)
MOVE_FIG_CAPTURE = (18, 3)
MOVE_COLOR = (21, 1)
MOVE_PROM = (22, 2)
#----- castling -----#
CASTLING_LEFT = 0
CASTLING_RIGHT = 1
#----- player status -----#
IDELING = 0
PICKING = 1
INF = 1000000
ASCII_FIG = [[],[]]
ASCII_FIG[WHITE] = [ 'x', chr(9817), chr(9816), chr(9815), chr(9814), chr(9813), chr(9812)]
ASCII_FIG[BLACK] = [ 'x', chr(9823), chr(9822), chr(9821), chr(9820), chr(9819), chr(9818)]
#AI constants
CASTLING_RIGHT_LOSS_PENALTY = -40
| 22.706522 | 99 | 0.567736 |
161b1a291b36fd8f7983e45a6a229f8f666d35f1
| 392 |
py
|
Python
|
agent.py
|
kapzlok2408/Pokemon-Showdown-Node-Bot
|
c759eb9106fd2a3da3ebe4692a6730c37b2e5ee3
|
[
"MIT"
] | null | null | null |
agent.py
|
kapzlok2408/Pokemon-Showdown-Node-Bot
|
c759eb9106fd2a3da3ebe4692a6730c37b2e5ee3
|
[
"MIT"
] | null | null | null |
agent.py
|
kapzlok2408/Pokemon-Showdown-Node-Bot
|
c759eb9106fd2a3da3ebe4692a6730c37b2e5ee3
|
[
"MIT"
] | null | null | null |
import gym
import gym_pokemon
import random
if __name__ == "__main__":
env = gym.make("Pokemon-v0")
total_reward = 0.0
total_steps = 0
obs = env.reset()
while True:
action = random.randint(-1,8)
obs, reward, done, _ = env.step(action)
total_reward += reward
total_steps += 1
print("Currently %d steps, total reward of %.2f" % (total_steps, total_reward))
if done:
break
| 20.631579 | 81 | 0.683673 |
161b1ad3ceff80971c5c3ea0ba2b51d497a4a215
| 264 |
py
|
Python
|
Curso-Em-Video-Python/Mundo-2/EXs/EX038.py
|
victor-da-costa/Aprendendo-Python
|
8fd19b93a13953cda30de02de7dac22b4e62fb5b
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/Mundo-2/EXs/EX038.py
|
victor-da-costa/Aprendendo-Python
|
8fd19b93a13953cda30de02de7dac22b4e62fb5b
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/Mundo-2/EXs/EX038.py
|
victor-da-costa/Aprendendo-Python
|
8fd19b93a13953cda30de02de7dac22b4e62fb5b
|
[
"MIT"
] | null | null | null |
num1 = int(input('Digite o 1 nmero: '))
num2 = int(input('Digite o 2 nmero: '))
if num1 > num2:
print('O {} maior que {}'.format(num1, num2))
elif num1 < num2:
print('O {} maior que4 {}'.format(num2, num1))
else:
print('Os nmeros so iguais')
| 29.333333 | 52 | 0.613636 |
161b52cb8725f9e857d4d9abd90c6be8f1cb0dec
| 964 |
py
|
Python
|
setup.py
|
danjjl/ipyfilechooser
|
19d2e906207b2c3426675eda7889267f5956b182
|
[
"MIT"
] | null | null | null |
setup.py
|
danjjl/ipyfilechooser
|
19d2e906207b2c3426675eda7889267f5956b182
|
[
"MIT"
] | null | null | null |
setup.py
|
danjjl/ipyfilechooser
|
19d2e906207b2c3426675eda7889267f5956b182
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
def read(fname):
"""Open files relative to package."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='ipyfilechooser',
version='0.3.1',
author='Thomas Bouve (@crahan)',
author_email='[email protected]',
description=(
'Python file chooser widget for use in '
'Jupyter/IPython in conjunction with ipywidgets'
),
long_description=read('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/crahan/ipyfilechooser',
license='MIT',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
],
install_requires=[
'ipywidgets'
]
)
| 26.777778 | 70 | 0.637967 |
161dd7d6b32c517702822fdd2b972e9c34a403fe
| 8,759 |
py
|
Python
|
appengine/chromium_build_logs/handler.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | 1 |
2018-01-02T05:47:07.000Z
|
2018-01-02T05:47:07.000Z
|
appengine/chromium_build_logs/handler.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/chromium_build_logs/handler.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import appengine_config
import datetime
import json
import logging
import os.path
import pickle
import sys
import urllib
sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'third_party'))
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import cloudstorage
import app
import gtest_parser
# pylint: disable=pointless-string-statement
"""When displaying a list of results, how many to display on one page."""
PAGE_SIZE = 100
def _clean_int(value, default):
"""Convert a value to an int, or the default value if conversion fails."""
try:
return int(value)
except (TypeError, ValueError), _:
return default
application = webapp.WSGIApplication(
[('/', MainAction),
('/gtest_query', GTestQueryAction),
('/suppression_query', SuppressionQueryAction),
('/suppression_summary', SuppressionSummaryAction),
('/unused_suppressions', UnusedSuppressionsAction),
('/list', ListAction),
('/build_step_json', BuildStepJSONAction),
('/status_receiver', StatusReceiverAction),
('/tasks/fetch_builders', FetchBuildersAction),
('/tasks/fetch_steps', FetchStepsAction),
('/tasks/update_parsed_data', UpdateParsedDataAction),
('/viewlog/raw/(.*)', ViewRawLogAction)])
if __name__ == '__main__':
main()
| 31.170819 | 79 | 0.685352 |
161f5fc0724b14420397243336670a4b9fb7062e
| 20,580 |
py
|
Python
|
aws_lambda/pytorch/source/caffe2/python/operator_test/elementwise_op_broadcast_test.py
|
YevhenVieskov/ML-DL-in-production
|
03839abcb93a49d4f05c43aa4e446a040027cdb0
|
[
"MIT"
] | 4 |
2020-09-17T11:50:17.000Z
|
2021-08-25T06:14:10.000Z
|
aws_lambda/pytorch/source/caffe2/python/operator_test/elementwise_op_broadcast_test.py
|
YevhenVieskov/ML-DL-in-production
|
03839abcb93a49d4f05c43aa4e446a040027cdb0
|
[
"MIT"
] | null | null | null |
aws_lambda/pytorch/source/caffe2/python/operator_test/elementwise_op_broadcast_test.py
|
YevhenVieskov/ML-DL-in-production
|
03839abcb93a49d4f05c43aa4e446a040027cdb0
|
[
"MIT"
] | 6 |
2020-10-16T13:28:31.000Z
|
2021-08-25T12:08:34.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
# TODO(jiayq): make them hypothesis tests for better coverage.
if __name__ == "__main__":
unittest.main()
| 42 | 79 | 0.56035 |
161fe3f007696be8bbc024b9cad0f629ab8008f8
| 28,143 |
py
|
Python
|
kayobe/tests/unit/cli/test_commands.py
|
jovial/kayobe
|
49e61fef4a221ee9fcfcee2b7bac02b6acc5bd0c
|
[
"Apache-2.0"
] | null | null | null |
kayobe/tests/unit/cli/test_commands.py
|
jovial/kayobe
|
49e61fef4a221ee9fcfcee2b7bac02b6acc5bd0c
|
[
"Apache-2.0"
] | null | null | null |
kayobe/tests/unit/cli/test_commands.py
|
jovial/kayobe
|
49e61fef4a221ee9fcfcee2b7bac02b6acc5bd0c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import cliff.app
import cliff.commandmanager
import mock
from kayobe.cli import commands
from kayobe import utils
| 37.324934 | 93 | 0.538855 |
1620270422616b41ca7180a5b9004dcde020933a
| 1,590 |
py
|
Python
|
keras2onnx/proto/tfcompat.py
|
CNugteren/keras-onnx
|
b3d6b6486fe56640c48c62dd098e9405e35b4e9f
|
[
"MIT"
] | 1 |
2021-04-15T16:35:54.000Z
|
2021-04-15T16:35:54.000Z
|
keras2onnx/proto/tfcompat.py
|
CNugteren/keras-onnx
|
b3d6b6486fe56640c48c62dd098e9405e35b4e9f
|
[
"MIT"
] | null | null | null |
keras2onnx/proto/tfcompat.py
|
CNugteren/keras-onnx
|
b3d6b6486fe56640c48c62dd098e9405e35b4e9f
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import tensorflow as _tf
from distutils.version import StrictVersion
is_tf2 = StrictVersion(_tf.__version__.split('-')[0]) >= StrictVersion('2.0.0')
if is_tf2:
tensorflow = _tf.compat.v1
def is_subclassed(layer):
"""Returns True if the object is a subclassed layer or subclassed model."""
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
else:
tensorflow = _tf
| 31.8 | 83 | 0.610692 |
16205a78e576c7488204d92806cb7a59f5ca5566
| 11,588 |
py
|
Python
|
back2back/httpmulticlient.py
|
excentis/ByteBlower_python_examples
|
0e082e17413abf5e25f6d14b85e50e7f73e7f965
|
[
"BSD-3-Clause"
] | 2 |
2018-10-04T10:55:55.000Z
|
2018-11-29T08:51:38.000Z
|
back2back/httpmulticlient.py
|
excentis/ByteBlower_python_examples
|
0e082e17413abf5e25f6d14b85e50e7f73e7f965
|
[
"BSD-3-Clause"
] | null | null | null |
back2back/httpmulticlient.py
|
excentis/ByteBlower_python_examples
|
0e082e17413abf5e25f6d14b85e50e7f73e7f965
|
[
"BSD-3-Clause"
] | 3 |
2018-10-04T10:56:29.000Z
|
2019-10-28T10:19:40.000Z
|
"""
HTTP MultiServer/MultiClient for the ByteBlower Python API.
All examples are guaranteed to work with Python 2.7 and above
Copyright 2018, Excentis N.V.
"""
# Needed for python2 / python3 print function compatibility
from __future__ import print_function
# import the ByteBlower module
import byteblowerll.byteblower as byteblower
import time
configuration = {
# Address (IP or FQDN) of the ByteBlower server to use
'server_address': 'byteblower-tp-1300.lab.byteblower.excentis.com',
# Configuration for the first ByteBlower port.
# Will be used as HTTP server.
'port_1_config': {
'interface': 'trunk-1-13',
'mac': '00:bb:01:00:00:01',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# 'ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['3000:3128::24', '64'],
# TCP port number to be used by the HTTP connection.
# On the HTTP server, this will be the port on which the server
# listens.
'tcp_port': 4096
},
# Configuration for the second ByteBlower port.
# Will be used as HTTP client.
'port_2_config': {
'interface': 'trunk-1-25',
'mac': '00:bb:01:00:00:02',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['3000:3128::24', '64'],
# TCP port range the HTTP Clients will use to connect with
# the HTTP server
'tcp_port_min': 32000,
'tcp_port_max': 50000
},
# HTTP Method
# HTTP Method can be GET or PUT
# - GET: Standard HTTP download, we retrieve data from the web server
# - PUT: Standard HTTP upload, the wireless endpoint will push data to the
# webserver
'http_method': 'GET',
# 'http_method': 'PUT',
# total duration, in nanoseconds.
# This is the duration of the flow. When this duration expires,
# all sessions will be stopped.
'duration': 10000000000,
# session duration, in nanoseconds
# Duration of the individual sessions
# 'session_duration': 1500000000,
'session_duration': None,
# session size, in bytes
# The number of bytes transmitted by a session
'session_size': 1 * 1000 * 1000,
# 'session_size': None,
# max concurrent sessions
# Maximum number of sessions that will be running simultaneously
'max_concurrent_sessions': 100,
# maximum number of sessions
# No more than this number of sessions will be created
# 0 means no limit
'max_total_sessions': 0,
# TOS value to use on the HTTP client (and server)
'tos': 0
}
# When this python module is called stand-alone, the run-function must be
# called. This approach makes it possible to include it in a series of
# examples.
if __name__ == "__main__":
example = Example(**configuration)
try:
example.run()
finally:
example.cleanup()
| 36.440252 | 101 | 0.621764 |
16214a743fb88fbf7d2c7ed97c9778c2fbeb46d1
| 4,764 |
py
|
Python
|
tools/pod-xml-to-geojson.py
|
24-timmarsseglingarna/app
|
0c028bd2eb284c6893cb16dd91bd093b2222338f
|
[
"Apache-2.0"
] | null | null | null |
tools/pod-xml-to-geojson.py
|
24-timmarsseglingarna/app
|
0c028bd2eb284c6893cb16dd91bd093b2222338f
|
[
"Apache-2.0"
] | 14 |
2017-08-24T12:46:58.000Z
|
2021-04-21T07:56:58.000Z
|
tools/pod-xml-to-geojson.py
|
24-timmarsseglingarna/app
|
0c028bd2eb284c6893cb16dd91bd093b2222338f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Converts a PoD XML file to a GeoJSON file.
#
# With the --javascript parameter, the generated file is a javascript
# file defining a variable 'basePodSpec'.
#
# Get the PoD XML file from http://dev.24-timmars.nu/PoD/xmlapi_app.php.
import xml.etree.ElementTree as etree
import argparse
import re
import json
import io
import sys
import os.path
import datetime
if sys.version < '3':
import codecs
# points number 9000 and above are not real points; they are used to mark
# area borders
MAXPOINT=8999
if __name__ == '__main__':
run()
| 29.407407 | 79 | 0.553736 |
1621aa767e78100c7f16f615ddf74780115c4b1d
| 9,106 |
py
|
Python
|
rastervision/plugin.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 3 |
2020-07-05T04:04:18.000Z
|
2021-02-05T16:19:55.000Z
|
rastervision/plugin.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/plugin.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1 |
2020-04-27T15:21:53.000Z
|
2020-04-27T15:21:53.000Z
|
import os
import json
import importlib
from pluginbase import PluginBase
import rastervision as rv
from rastervision.protos.plugin_pb2 import PluginConfig as PluginConfigMsg
from rastervision.utils.files import download_if_needed
def load_conf_list(s):
"""Loads a list of items from the config.
Lists should be comma separated.
This takes into account that previous versions of Raster Vision
allowed for a `[ "module" ]` like syntax, even though that didn't
work for multi-value lists.
"""
try:
# A comma separated list of values will be transformed to
# having a list-like string, with ' instead of ". Replacing
# single quotes with double quotes lets us parse it as a JSON list.
return json.loads(s.replace("'", '"'))
except json.JSONDecodeError:
return list(map(lambda x: x.strip(), s.split(',')))
| 40.471111 | 80 | 0.648913 |
1621ccd669a0abec2dea3abc64d60feca57f3bfe
| 2,134 |
py
|
Python
|
acsm/nnutils/resunet.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 52 |
2020-04-02T12:35:55.000Z
|
2022-03-11T07:47:30.000Z
|
acsm/nnutils/resunet.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 8 |
2020-06-04T07:34:34.000Z
|
2021-09-18T21:17:26.000Z
|
acsm/nnutils/resunet.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 6 |
2020-07-12T02:12:18.000Z
|
2021-03-06T05:03:33.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os
import os.path as osp
import numpy as np
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import functools
from . import net_blocks as nb
import pdb
| 27.358974 | 79 | 0.612933 |
162335a5b07a8e07ba6397644e3e4ed7a9f459e2
| 8,442 |
py
|
Python
|
uproot_methods/common/TVector.py
|
marinang/uproot-methods
|
1d16d51ab7da19b4f31070d24e8fbfed3ae3ec8f
|
[
"BSD-3-Clause"
] | null | null | null |
uproot_methods/common/TVector.py
|
marinang/uproot-methods
|
1d16d51ab7da19b4f31070d24e8fbfed3ae3ec8f
|
[
"BSD-3-Clause"
] | null | null | null |
uproot_methods/common/TVector.py
|
marinang/uproot-methods
|
1d16d51ab7da19b4f31070d24e8fbfed3ae3ec8f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numbers
import operator
import awkward
import awkward.util
class ArrayMethods(Common):
def cosdelta(self, other):
denom = self.mag2 * other.mag2
mask = (denom > 0)
denom = denom[mask]
denom[:] = awkward.util.numpy.sqrt(denom)
out = self.dot(other)
out[mask] /= denom
mask = awkward.util.numpy.logical_not(mask)
out[mask] = 1
return awkward.util.numpy.clip(out, -1, 1)
def angle(self, other, normal=None, degrees=False):
out = awkward.util.numpy.arccos(self.cosdelta(other))
if normal is not None:
a = self.unit
b = other.unit
out = out * awkward.util.numpy.sign(normal.dot(a.cross(b)))
if degrees:
out = awkward.util.numpy.multiply(out, 180.0/awkward.util.numpy.pi)
return out
def isopposite(self, other, tolerance=1e-10):
tmp = self + other
tmp.x = awkward.util.numpy.absolute(tmp.x)
tmp.y = awkward.util.numpy.absolute(tmp.y)
tmp.z = awkward.util.numpy.absolute(tmp.z)
out = (tmp.x < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.y < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.z < tolerance)
return out
def isperpendicular(self, other, tolerance=1e-10):
tmp = self.dot(other)
tmp.x = awkward.util.numpy.absolute(tmp.x)
tmp.y = awkward.util.numpy.absolute(tmp.y)
tmp.z = awkward.util.numpy.absolute(tmp.z)
out = (tmp.x < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.y < tolerance)
out = awkward.util.numpy.bitwise_and(out, tmp.z < tolerance)
return out
class Methods(Common):
def cosdelta(self, other):
m1 = self.mag2
m2 = other.mag2
if m1 == 0 or m2 == 0:
return 1.0
r = self.dot(other) / math.sqrt(m1 * m2)
return max(-1.0, min(1.0, r))
def angle(self, other, degrees=False):
out = math.acos(self.cosdelta(other))
if degrees:
out = out * 180.0/math.pi
return out
def isopposite(self, other, tolerance=1e-10):
tmp = self + other
return abs(tmp.x) < tolerance and abs(tmp.y) < tolerance and abs(tmp.z) < tolerance
def isperpendicular(self, other, tolerance=1e-10):
tmp = self.dot(other)
return abs(tmp.x) < tolerance and abs(tmp.y) < tolerance and abs(tmp.z) < tolerance
def __add__(self, other):
return self._vector(operator.add, other)
def __radd__(self, other):
return self._vector(operator.add, other, True)
def __sub__(self, other):
return self._vector(operator.sub, other)
def __rsub__(self, other):
return self._vector(operator.sub, other, True)
def __mul__(self, other):
return self._scalar(operator.mul, other)
def __rmul__(self, other):
return self._scalar(operator.mul, other, True)
def __div__(self, other):
return self._scalar(operator.div, other)
def __rdiv__(self, other):
return self._scalar(operator.div, other, True)
def __truediv__(self, other):
return self._scalar(operator.truediv, other)
def __rtruediv__(self, other):
return self._scalar(operator.truediv, other, True)
def __floordiv__(self, other):
return self._scalar(operator.floordiv, other)
def __rfloordiv__(self, other):
return self._scalar(operator.floordiv, other, True)
def __mod__(self, other):
return self._scalar(operator.mod, other)
def __rmod__(self, other):
return self._scalar(operator.mod, other, True)
def __divmod__(self, other):
return self._scalar(operator.divmod, other)
def __rdivmod__(self, other):
return self._scalar(operator.divmod, other, True)
def __pow__(self, other):
if isinstance(other, (numbers.Number, awkward.util.numpy.number)):
if other == 2:
return self.mag2
else:
return self.mag2**(0.5*other)
else:
self._scalar(operator.pow, other)
# no __rpow__
| 31.036765 | 91 | 0.650912 |
16262857a0ab051d70328d47ffe56eedbe48f8d3
| 1,259 |
py
|
Python
|
tpp/controller/ConversionController.py
|
pennyarcade/TPPP
|
9bb6db774d77f74c54ed2fa004e97c1aa114fff9
|
[
"MIT"
] | null | null | null |
tpp/controller/ConversionController.py
|
pennyarcade/TPPP
|
9bb6db774d77f74c54ed2fa004e97c1aa114fff9
|
[
"MIT"
] | null | null | null |
tpp/controller/ConversionController.py
|
pennyarcade/TPPP
|
9bb6db774d77f74c54ed2fa004e97c1aa114fff9
|
[
"MIT"
] | null | null | null |
"""
Implements a non interactive controller to controt non-interactive visualizers.
(i.e. those that are used for converting TPP souce code into another format)
"""
from tpp.FileParser import FileParser
from tpp.controller.TPPController import TPPController
| 24.686275 | 81 | 0.590151 |
1626ca15f81c599021a7770317db1230752e7b3f
| 4,282 |
py
|
Python
|
scrapers/covid_scraper.py
|
ZachGeo/covidGR_API
|
2f316337dda65bd33ac895df336481c3c2abe2c6
|
[
"MIT"
] | null | null | null |
scrapers/covid_scraper.py
|
ZachGeo/covidGR_API
|
2f316337dda65bd33ac895df336481c3c2abe2c6
|
[
"MIT"
] | null | null | null |
scrapers/covid_scraper.py
|
ZachGeo/covidGR_API
|
2f316337dda65bd33ac895df336481c3c2abe2c6
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from datetime import date
from lxml import html
import requests
import re
import json
if __name__ == '__main__':
cs = CovidScraper()
results = cs.scrape_data()
print(results)
| 32.439394 | 148 | 0.615834 |
16278cfaea317b80559af8d9f8ed6e412d50c446
| 776 |
py
|
Python
|
img/autoeditimg.py
|
schorsche/css3-imageslider
|
6d15b2e77f141b8e871bdce2049ee7b2567981fe
|
[
"MIT"
] | null | null | null |
img/autoeditimg.py
|
schorsche/css3-imageslider
|
6d15b2e77f141b8e871bdce2049ee7b2567981fe
|
[
"MIT"
] | null | null | null |
img/autoeditimg.py
|
schorsche/css3-imageslider
|
6d15b2e77f141b8e871bdce2049ee7b2567981fe
|
[
"MIT"
] | 1 |
2019-02-23T22:54:22.000Z
|
2019-02-23T22:54:22.000Z
|
#!/usr/bin/python2.7
import os
from PIL import Image
DATEI_WEB_GROSSE = 700
if __name__ == '__main__':
main()
| 22.171429 | 68 | 0.68299 |
1627fcf089cd43ce83004fbce276962343e2f2c7
| 785 |
py
|
Python
|
wow/wow.py
|
brisberg/Kiri-Cogs
|
9a5307ff8fbaa5e0560ec518cf26df52347da98d
|
[
"MIT"
] | null | null | null |
wow/wow.py
|
brisberg/Kiri-Cogs
|
9a5307ff8fbaa5e0560ec518cf26df52347da98d
|
[
"MIT"
] | null | null | null |
wow/wow.py
|
brisberg/Kiri-Cogs
|
9a5307ff8fbaa5e0560ec518cf26df52347da98d
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
| 29.074074 | 99 | 0.64586 |
16286e428c3bbec3fb9fbe61340a4121c6311a63
| 274 |
py
|
Python
|
tests/attacks/class_test.py
|
henrik997/privacy-evaluator
|
f1d0e6c10ff58e582a44243788ab66c1d453bfa0
|
[
"MIT"
] | null | null | null |
tests/attacks/class_test.py
|
henrik997/privacy-evaluator
|
f1d0e6c10ff58e582a44243788ab66c1d453bfa0
|
[
"MIT"
] | null | null | null |
tests/attacks/class_test.py
|
henrik997/privacy-evaluator
|
f1d0e6c10ff58e582a44243788ab66c1d453bfa0
|
[
"MIT"
] | null | null | null |
import pytest
from privacy_evaluator.attacks.sample_attack import Sample_Attack
"""
This test only test if no error is thrown when calling the function, can be removed in the future
"""
| 24.909091 | 97 | 0.762774 |
162894b73abedfff0ad797772b95e5e53cb507ab
| 2,412 |
py
|
Python
|
setup.py
|
Oli2/presto-python-client
|
11a89c2528a35d5af6916e9c9175cb3e1f84160b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Oli2/presto-python-client
|
11a89c2528a35d5af6916e9c9175cb3e1f84160b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Oli2/presto-python-client
|
11a89c2528a35d5af6916e9c9175cb3e1f84160b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import re
from setuptools import setup
import textwrap
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('prestodb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='presto-python-client',
author='Presto Team',
author_email='[email protected]',
version=version,
url='https://github.com/prestodb/presto-python-client',
packages=['prestodb'],
package_data={'': ['LICENSE', 'README.md']},
description='Client for the Presto distributed SQL Engine',
long_description=textwrap.dedent("""
Client for Presto (https://prestodb.io), a distributed SQL engine for
interactive and batch big data processing. Provides a low-level client and
a DBAPI 2.0 implementation.
"""),
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Database :: Front-Ends',
],
install_requires=[
'click',
'future',
'ipaddress',
'requests',
'requests_kerberos',
'six',
'typing',
],
extras_require={'tests':[
'httpretty',
'pytest',
'pytest-runner',
]}
)
| 33.041096 | 78 | 0.641376 |
162b50aea1cc09a5257abec74537cee83cae39dc
| 368 |
py
|
Python
|
Graphs/Pie Chart.py
|
TausifAnsari/PyHub
|
f6c949dc6a3974f57d7d146708443d0ceeb4418f
|
[
"MIT"
] | 1 |
2020-09-30T19:31:20.000Z
|
2020-09-30T19:31:20.000Z
|
Graphs/Pie Chart.py
|
TanviSutar/PyHub
|
6281e9f515674fb51f0d0862c26ec18020fa7d83
|
[
"MIT"
] | null | null | null |
Graphs/Pie Chart.py
|
TanviSutar/PyHub
|
6281e9f515674fb51f0d0862c26ec18020fa7d83
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as graph
subject = ["Probability", "Calculas", "Discrete Mathematics", "Adv Engineering Mathematics",
"Linear Algebra", "Cryptography"]
weightage = [250,900,850,1200,290,345]
seperator = [0.05,0,0,0,0.05,0.05]
graph.title("Mathematics Topic Weightage")
graph.pie(weightage,labels=subject,autopct="%0.1f%%", explode=seperator)
graph.show()
| 30.666667 | 93 | 0.741848 |
162b6c04231d6cc1d5159da7ca51127039c4295e
| 6,252 |
py
|
Python
|
exercises/perform_model_selection.py
|
noavilk/IML.HUJI
|
35aa4e6fbe489239e4fe72bf38c0dba3e6c81f37
|
[
"MIT"
] | null | null | null |
exercises/perform_model_selection.py
|
noavilk/IML.HUJI
|
35aa4e6fbe489239e4fe72bf38c0dba3e6c81f37
|
[
"MIT"
] | null | null | null |
exercises/perform_model_selection.py
|
noavilk/IML.HUJI
|
35aa4e6fbe489239e4fe72bf38c0dba3e6c81f37
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotnine as gg
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + np.random.normal(0, noise, n_samples)
train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3))
df_train = pd.DataFrame({"x": train_X.squeeze(), "y": train_y, "type": "Train"})
df_test = pd.DataFrame({"x": test_X.squeeze(), "y": test_y, "type": "test"})
x_stat = np.linspace(-1.4, 2, 100)
df_stat = pd.DataFrame({"x": x_stat, "y": f(x_stat), "type": "Model"})
df = pd.concat([df_test, df_train])
title = f"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})"
p = gg.ggplot() + \
gg.geom_point(df, gg.aes("x", "y", color="type")) + \
gg.geom_line(df_stat, gg.aes("x", "y")) + \
gg.theme_bw() + \
gg.ggtitle(title)
# print(p)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
train_err = []
validation_err = []
for k in range(11):
pf = PolynomialFitting(k)
train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"k": range(11), "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"k": range(11), "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f" Cross Validation for Polynomial Fitting Over Different Degrees k"
p = gg.ggplot(df, gg.aes("k", "avg error", color="type")) + \
gg.geom_point() + \
gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \
gg.labs(y="Average training and validation errors",
title=f"{title} \nWith Noise: {noise}, Num of samples: {n_samples}")
gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False)
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
best_k = np.argmin(np.array(validation_err))
pf = PolynomialFitting(int(best_k))
pf.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = pf.predict(test_X.to_numpy())
print("best k =", best_k)
print("Test = ", round(mean_square_error(test_y.to_numpy(), y_pred), 2))
print("Validation = ", round(validation_err[best_k], 2))
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)
train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
for name, learner, ran in [("Ridge", RidgeRegression, np.linspace(0.001, 0.05, 500)),
("Lasso", Lasso, np.linspace(0.001, 0.5, 500))]:
train_err = []
validation_err = []
for lam in ran:
rg = learner(lam)
train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(),
mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"lambda": ran, "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"lambda": ran, "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f"{name} Regularization Cross Validate Over Different Lambda"
p = gg.ggplot(df, gg.aes("lambda", "avg error", color="type")) + \
gg.geom_line() + \
gg.theme_bw() + gg.labs(y="Average training and validation errors", title=title)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
best_lam = np.argmin(np.array(validation_err))
rg = learner(ran[best_lam])
rg.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = rg.predict(test_X.to_numpy())
print(f"best lambda {name} = {round(ran[best_lam], 3)}")
print(f"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}")
lr = LinearRegression()
lr.fit(train_X.to_numpy(), train_y.to_numpy())
print("Linear Regression Loss = ", lr.loss(test_X.to_numpy(), test_y.to_numpy()))
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(noise=0)
select_polynomial_degree(n_samples=1500, noise=10)
select_regularization_parameter()
| 45.304348 | 117 | 0.644274 |
162c0bbced3e06420246b7de0d2ad6e3745c54ef
| 9,001 |
py
|
Python
|
libraries/tools/media_utils.py
|
unfoldingWord-dev/d43-catalog
|
6c36f59b9b326e0ead45739c09631ef1e57c4932
|
[
"MIT"
] | 1 |
2017-05-18T22:18:31.000Z
|
2017-05-18T22:18:31.000Z
|
libraries/tools/media_utils.py
|
unfoldingWord-dev/d43-catalog
|
6c36f59b9b326e0ead45739c09631ef1e57c4932
|
[
"MIT"
] | 54 |
2016-11-07T03:07:03.000Z
|
2021-04-14T21:24:04.000Z
|
libraries/tools/media_utils.py
|
unfoldingWord-dev/d43-catalog
|
6c36f59b9b326e0ead45739c09631ef1e57c4932
|
[
"MIT"
] | 7 |
2016-10-26T18:15:14.000Z
|
2018-06-01T18:37:32.000Z
|
import re
import copy
def parse_media(media, content_version, project_chapters):
"""
Converts a media object into formats usable in the catalog
:param media: the media object
:type media: dict
:param content_version: the current version of the source content
:type content_version: string
:param project_chapters: a dictionary of project chapters
:type project_chapters: dict
:return: resource_formats, project_formats a list of resource formats and dictionary of project formats
"""
resource_formats = []
project_formats = {}
if 'resource' in media:
resource_formats = _parse_resource(media['resource'], content_version)
if 'projects' in media:
for project in media['projects']:
project_id = project['identifier']
chapters = []
if project_id == 'obs':
# TRICKY: obs projects always have 50 chapters
# This allows empty projects to still publish media.
for x in range(1, 51): # chapters 1..50
chapters.append(str(x).zfill(2))
if project_id in project_chapters:
chapters = project_chapters[project_id]
project_formats[project_id] = _parse_project(project, content_version, chapters)
return resource_formats, project_formats
def _parse_resource(resource, content_version):
"""
Converts a resource media object into formats usable in the catalog
:param resource: the media object
:type resource: dict
:param content_version: the current version of the source content
:type content_version: string
:return: a list of formats
"""
source_version = _expand_keys(resource['version'], {'latest': content_version})
formats = []
if 'media' in resource:
for media in resource['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
else:
# build a single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
return formats
def _parse_project(project, content_version, chapters_ids):
"""
Converts a project media object into formats usable in the catalog
:param project: the media object
:type project: dict
:param content_version: the current version of the source content
:type content_version: string
:param chapters_ids: a list of chapter identifiers in the project
:type chapters_ids: list
:return: a list of formats
"""
source_version = _expand_keys(project['version'], {'latest': content_version})
formats = []
if 'media' in project:
for media in project['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
else:
# build single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
return formats
def _prepare_chapter_formats(media, chapters, expansion_vars):
"""
This is a wrapper around the method `_parse_project_chapter`.
Since we routinely conditionally prepare chapters in multiple places
this handles it in one place
:param media: the media object to inspect
:param chapters: a list of chapter ids
:param expansion_vars: a dictionary of variables that may be expanded in the chapter url
:return:
"""
if 'chapter_url' in media:
chapter_url = _expand_keys(media['chapter_url'], expansion_vars)
chapters = _parse_project_chapter(chapter_url, chapters)
if chapters:
return chapters
return None
def _parse_project_chapter(chapter_url, chapters):
"""
Generates chapter formats for use in the catalog
:param chapter_url: the url template that will be used in the formats
:param chapters: a list of chapter ids
:type chapters: list
:return:
"""
# TODO: this requires that we give a well formatted list of chapter ids and check if the Rc is a book
# only book RCs can have chapter formats
formats = []
for chapter_id in chapters:
format = {
'size': 0,
'length': 0,
'modified': '',
'identifier': chapter_id,
'url': _expand_keys(chapter_url, {'chapter': chapter_id}),
'signature': '',
'build_rules': [
'signing.sign_given_url'
]
}
formats.append(format)
return formats
def _make_expansion_variables(media_block, content_version):
"""
Creates a dictionary of expansion variables for media items.
:param self:
:param media_block:
:param content_version:
:return:
"""
vars = copy.copy(media_block)
# strip black listed keys
black_list = ['url', 'chapter_url']
for key in black_list:
if key in vars:
del vars[key]
# TRICKY: using `latest` as an expansion variable in urls is not explicitly stated in the spec,
# but it's a common misunderstanding so we allow it.
vars['latest'] = '{}'.format(content_version)
return vars
def _expand_keys(target, replacements):
"""
Replaces all the dict keys found in the string with the dict values.
Keys in the string must be delimited by brackets {}
:param target:
:param replacements:
:return:
"""
if isinstance(target, basestring) or isinstance(target, str):
result = target
if not isinstance(replacements, dict):
raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements)))
for key in replacements:
if not isinstance(replacements[key], list):
result = re.sub(r'{\s*' + key + '\s*}', '{}'.format(replacements[key]), result)
return result
elif isinstance(target, int):
return target
else:
raise Exception('Invalid replacement target "{}". Expected string but received {}'.format(target, type(target)))
| 39.47807 | 120 | 0.579602 |
162c1fe872f535df8473bc4c5719a90f0e1d8d91
| 4,518 |
py
|
Python
|
django_customflow/mixins.py
|
Brad19940809/django-customflow
|
502eed512d7c29e8d176c67fa62a7fce0be492d7
|
[
"MIT"
] | 1 |
2019-08-06T09:28:11.000Z
|
2019-08-06T09:28:11.000Z
|
django_customflow/mixins.py
|
Brad19940809/django-customflow
|
502eed512d7c29e8d176c67fa62a7fce0be492d7
|
[
"MIT"
] | null | null | null |
django_customflow/mixins.py
|
Brad19940809/django-customflow
|
502eed512d7c29e8d176c67fa62a7fce0be492d7
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# create_time: 2019/8/5 16:02
# __author__ = 'brad'
from . import utils
from .tasks.base import WaitingTask, BaseTask
| 34.227273 | 112 | 0.626162 |
162c59bea2ea2599ffb8f94490a631231802e6ea
| 2,272 |
py
|
Python
|
video_encoding/fields.py
|
fossabot/django-video-encoding
|
16a88c2d61d28e6f5ec2b49956ce356f8c458c67
|
[
"BSD-3-Clause"
] | 164 |
2019-07-29T17:59:06.000Z
|
2022-03-19T21:36:01.000Z
|
video_encoding/fields.py
|
fossabot/django-video-encoding
|
16a88c2d61d28e6f5ec2b49956ce356f8c458c67
|
[
"BSD-3-Clause"
] | 188 |
2019-03-16T09:53:25.000Z
|
2019-07-25T14:57:24.000Z
|
video_encoding/fields.py
|
fossabot/django-video-encoding
|
16a88c2d61d28e6f5ec2b49956ce356f8c458c67
|
[
"BSD-3-Clause"
] | 80 |
2019-08-03T17:49:08.000Z
|
2022-02-28T16:56:33.000Z
|
from django.db.models.fields.files import (FieldFile, ImageField,
ImageFileDescriptor)
from django.utils.translation import ugettext as _
from .backends import get_backend_class
from .files import VideoFile
| 31.555556 | 78 | 0.636444 |
162cf5942b39cb55c7afb1cde65c73f78fbc4d55
| 8,182 |
py
|
Python
|
test/spec/test_spec.py
|
raghu1121/SLM-Lab
|
58e98b6521f581515d04ebacff5226105237ed9b
|
[
"MIT"
] | 1 |
2021-09-01T11:57:04.000Z
|
2021-09-01T11:57:04.000Z
|
test/spec/test_spec.py
|
ragtz/SLM-Lab
|
42c48af308dfe36401990aca3795bc481cf28c17
|
[
"MIT"
] | null | null | null |
test/spec/test_spec.py
|
ragtz/SLM-Lab
|
42c48af308dfe36401990aca3795bc481cf28c17
|
[
"MIT"
] | null | null | null |
from flaky import flaky
from slm_lab.experiment.control import Trial
from slm_lab.experiment.monitor import InfoSpace
from slm_lab.lib import util
from slm_lab.spec import spec_util
import os
import pandas as pd
import pytest
import sys
# helper method to run all tests in test_spec
| 36.855856 | 82 | 0.744072 |
162d0aa4bb77e9b34f76b8530aaf8f57b28901c9
| 647 |
py
|
Python
|
test/test_modify_group.py
|
Sfairat00/training_python
|
14562b377d19bf22fc077e02efc7e56e73785a55
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_group.py
|
Sfairat00/training_python
|
14562b377d19bf22fc077e02efc7e56e73785a55
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_group.py
|
Sfairat00/training_python
|
14562b377d19bf22fc077e02efc7e56e73785a55
|
[
"Apache-2.0"
] | null | null | null |
from model.group import Group
| 26.958333 | 60 | 0.698609 |
162ffe7bb753d133521ad38601ddfbb5cb83a226
| 4,192 |
py
|
Python
|
readme_metrics/MetricsMiddleware.py
|
readmeio/metrics-sdks-python
|
02bc6e486260641f1a62760d20370157a4928af6
|
[
"0BSD"
] | 2 |
2020-09-23T04:44:22.000Z
|
2021-07-06T18:14:11.000Z
|
readme_metrics/MetricsMiddleware.py
|
readmeio/metrics-sdks-python
|
02bc6e486260641f1a62760d20370157a4928af6
|
[
"0BSD"
] | null | null | null |
readme_metrics/MetricsMiddleware.py
|
readmeio/metrics-sdks-python
|
02bc6e486260641f1a62760d20370157a4928af6
|
[
"0BSD"
] | 1 |
2020-09-23T04:44:25.000Z
|
2020-09-23T04:44:25.000Z
|
import io
import time
import datetime
from readme_metrics.Metrics import Metrics
from readme_metrics.MetricsApiConfig import MetricsApiConfig
from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper
from werkzeug import Request
| 34.933333 | 87 | 0.569656 |
16312fcb11ab7937c366343185da9dd102a4e745
| 4,048 |
py
|
Python
|
kbrl.py
|
deekshaarya4/gymexperiments
|
2d503ba14fcfba41339de25dd78d649bd12693e6
|
[
"MIT"
] | null | null | null |
kbrl.py
|
deekshaarya4/gymexperiments
|
2d503ba14fcfba41339de25dd78d649bd12693e6
|
[
"MIT"
] | null | null | null |
kbrl.py
|
deekshaarya4/gymexperiments
|
2d503ba14fcfba41339de25dd78d649bd12693e6
|
[
"MIT"
] | null | null | null |
import numpy as np
import gym
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='KBRL with KNN')
parser.add_argument('--episodes', nargs='?', type=int, default=500)
parser.add_argument('--max_timesteps', nargs='?', type=int, default=200)
parser.add_argument('environment')
args = parser.parse_args()
env = gym.make(args.environment).env
action_space = env.action_space
# hyperparameters:
epsilon = 1.0
exploration_decay = 0.98
k = 500 # number of nearest neighbors
minimum_num_iters = 500 # number of iterations used for training
num_iter = 0
max_iters = 0
gamma = 0.95
max_state_size = 15000 # because we don't know the state space size in continuous environments
# learning-related variables
states = None
actions = {}
rewards = {}
values = {}
# episode-related variables
episode_beginning = 0
# Ignore sklearn warnings
import warnings
warnings.warn = warn
reward = 0
episode_reward = 0
done = False
cumulative_reward_list = []
for i in range(args.episodes):
observation = env.reset()
sum_reward = 0
for j in range(args.max_timesteps):
env.render()
action = make_move(observation, reward, done)
observation, reward, done, _ = env.step(action)
sum_reward += reward
if done:
break
episode_reward = episode_reward * 0.95 + sum_reward * 0.05
print('Reward for episode '+ str(i)+' : '+str(episode_reward))
cumulative_reward_list.append(episode_reward)
# env.render()
plt.plot(range(0,500), cumulative_reward_list, linewidth=2)
plt.xlabel("Episodes")
plt.ylabel("Cumulative Reward")
plt.title("Performance")
plt.show()
plt.close()
| 30.900763 | 100 | 0.673913 |
1631aec82f9bb8a63392680178fdfa614b25b1c9
| 10,654 |
py
|
Python
|
shardDesigner/shardTemplateDir/shardStemDir/log/elast.py
|
vinci-project/rootShard
|
2f6633c7fb1c1b690c0a38ffbb16af0b50d532bb
|
[
"MIT"
] | null | null | null |
shardDesigner/shardTemplateDir/shardStemDir/log/elast.py
|
vinci-project/rootShard
|
2f6633c7fb1c1b690c0a38ffbb16af0b50d532bb
|
[
"MIT"
] | 7 |
2020-03-02T11:23:41.000Z
|
2022-03-11T23:52:51.000Z
|
shardDesigner/shardTemplateDir/shardStemDir/log/elast.py
|
vinci-project/rootShard
|
2f6633c7fb1c1b690c0a38ffbb16af0b50d532bb
|
[
"MIT"
] | null | null | null |
import elasticsearch
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import time, json, datetime, os
| 41.455253 | 186 | 0.382016 |
1631ce5936a7d3f836485152fc8ba3c55b4623c2
| 722 |
py
|
Python
|
corehq/apps/sms/tests.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1 |
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/sms/tests.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1 |
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
corehq/apps/sms/tests.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from util import clean_phone_number, clean_outgoing_sms_text
from django.test import TestCase
| 32.818182 | 106 | 0.685596 |
16320687d82ed5fd57ef5ebf44c1b6e925a208e1
| 12,169 |
py
|
Python
|
deepchem/models/atomic_conv.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 3 |
2019-05-29T19:18:25.000Z
|
2021-01-25T05:44:05.000Z
|
deepchem/models/atomic_conv.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 10 |
2017-02-23T19:39:22.000Z
|
2017-08-31T22:21:18.000Z
|
deepchem/models/atomic_conv.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 1 |
2018-09-22T00:53:53.000Z
|
2018-09-22T00:53:53.000Z
|
__author__ = "Joseph Gomes"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import sys
from deepchem.models import KerasModel
from deepchem.models.layers import AtomicConvolution
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Layer
import numpy as np
import tensorflow as tf
import itertools
def initializeWeightsBiases(prev_layer_size,
size,
weights=None,
biases=None,
name=None):
"""Initializes weights and biases to be used in a fully-connected layer.
Parameters
----------
prev_layer_size: int
Number of features in previous layer.
size: int
Number of nodes in this layer.
weights: tf.Tensor, optional (Default None)
Weight tensor.
biases: tf.Tensor, optional (Default None)
Bias tensor.
name: str
Name for this op, optional (Defaults to 'fully_connected' if None)
Returns
-------
weights: tf.Variable
Initialized weights.
biases: tf.Variable
Initialized biases.
"""
if weights is None:
weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01)
if biases is None:
biases = tf.zeros([size])
w = tf.Variable(weights, name='w')
b = tf.Variable(biases, name='b')
return w, b
| 36.109792 | 143 | 0.639494 |
163248c24fc9b2b48d8f714d22251c83d3496af1
| 2,694 |
py
|
Python
|
dialogue-engine/test/programytest/config/brain/test_oob.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 104 |
2020-03-30T09:40:00.000Z
|
2022-03-06T22:34:25.000Z
|
dialogue-engine/test/programytest/config/brain/test_oob.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 25 |
2020-06-12T01:36:35.000Z
|
2022-02-19T07:30:44.000Z
|
dialogue-engine/test/programytest/config/brain/test_oob.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 10 |
2020-04-02T23:43:56.000Z
|
2021-05-14T13:47:01.000Z
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.config.brain.oob import BrainOOBConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
| 42.761905 | 126 | 0.72977 |
16329b70c55c3c7cf597931457db274fe5d63821
| 327 |
py
|
Python
|
pypad/active_skill/interfaces/orb_generator_asi.py
|
candyninja001/pypad
|
82bfc104c2524ca54cc415d37d2c21fec471838f
|
[
"MIT"
] | null | null | null |
pypad/active_skill/interfaces/orb_generator_asi.py
|
candyninja001/pypad
|
82bfc104c2524ca54cc415d37d2c21fec471838f
|
[
"MIT"
] | null | null | null |
pypad/active_skill/interfaces/orb_generator_asi.py
|
candyninja001/pypad
|
82bfc104c2524ca54cc415d37d2c21fec471838f
|
[
"MIT"
] | null | null | null |
import abc
from ...orb_attribute import OrbAttribute
# Interface for active skills that create specific orb types (whether board change, orb change, orb spawn, etc)
| 36.333333 | 111 | 0.776758 |
1632af4d460f191002d145c0aa53f5434243e662
| 5,717 |
py
|
Python
|
setup.py
|
DivoK/mystery
|
b656eebe678c64864b2a5762765f36bddd540933
|
[
"MIT"
] | 8 |
2019-05-31T19:46:49.000Z
|
2020-05-14T22:21:35.000Z
|
setup.py
|
DivoK/mystery
|
b656eebe678c64864b2a5762765f36bddd540933
|
[
"MIT"
] | 4 |
2019-06-04T15:24:22.000Z
|
2021-06-01T23:53:37.000Z
|
setup.py
|
DivoK/mystery
|
b656eebe678c64864b2a5762765f36bddd540933
|
[
"MIT"
] | 4 |
2019-06-04T15:08:46.000Z
|
2020-04-25T15:52:00.000Z
|
"""
Core business logic for `mystery`.
This code will run when the package is being built and installed.
"""
import json
import pathlib
import random
import tempfile
import urllib.request
import typing
import setuptools
from setuptools.command.sdist import sdist
# Load the configuration file.
CONFIG_PATH = pathlib.Path('config.json')
CONFIG = json.load(CONFIG_PATH.open('r'))
def _get_lockfile_path() -> pathlib.Path:
"""
Assemble the lockfile's path.
:return: lockfile path.
:rtype: pathlib.Path
"""
return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name'])
def _get_package_list() -> typing.List[str]:
"""
Get a list of possible packages.
:return: list of package names.
:rtype: typing.List[str]
"""
try:
# Get the top PyPI packages and use one of them.
response = urllib.request.urlopen(CONFIG['top_pypi_packages_link'])
possible_packages_raw = response.read()
except urllib.request.URLError:
# Use the offline backup file.
with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file:
possible_packages_raw = backup_file.read()
return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']]
def _choose_mystery_package() -> str:
"""
Choose the underlying mysterious package and handle the lockfile's state.
:return: mystery package name.
:rtype: str
"""
# To keep the chosen dependency consistent in between setup.py runs, 'mystery' uses a temporary lockfile.
dep_lock_path = _get_lockfile_path()
if dep_lock_path.exists():
# Use the locked package and unlink the lockfile.
chosen_package = dep_lock_path.read_text().strip()
dep_lock_path.unlink()
else:
# Choose a package and create the lockfile.
possible_packages = _get_package_list()
chosen_package = random.choice(
[package['project'] for package in possible_packages]
)
dep_lock_path.write_text(chosen_package) # Lock the chosen package of course.
return chosen_package
def _fix_package_name(package_name: str) -> str:
"""
Fix the package name so it could be placed in the __init__.py file.
:param package_name: mystery package name.
:type package_name: str
:return: fixed mystery package name.
:rtype: str
"""
# Transform to eligible package name.
fixed_package_name = package_name.replace('-', '_')
# Special case for the 'backports' modules.
if fixed_package_name.startswith('backports_'):
fixed_package_name.replace('_', '.', 1)
return fixed_package_name
def _write_init_py(package_name: str) -> None:
"""
Dynamically write the __init__.py for the package using the chosen package.
:param chosen_package: mystery package name.
:type chosen_package: str
:rtype: None
"""
package_name = _fix_package_name(package_name)
init_py_path = pathlib.Path('mystery')
init_py_path.mkdir(exist_ok=True)
init_py_path = init_py_path / '__init__.py'
init_py_path.write_text(
f'''
# Here we're trying to import the mystery package (it's "{package_name}" this time).
# If it exists, overwrite 'mystery' in 'sys.modules'. Else, print there was an error.
import sys
try:
import {package_name}
except ImportError as error:
print('Internal error:', error)
print("The mystery package wasn't playing nice. Sorry!")
print('Hint: you can always try to reinstall mystery and get a different package!')
sorry = 'try reinstalling mystery and get a different package!'
else:
sys.modules['mystery'] = {package_name}
sys.modules['mystery'].__mystery_init_py__ = __file__
sys.modules['mystery'].__mystery_package_name__ = '{package_name}'
del sys # We care about this only when mystery fails (and even that's inconsequential).
'''
)
def _get_long_description_data() -> typing.Tuple[str, str]:
"""
Get data regarding the long description of the package.
:return: tuple of the README.md text and the long_description type.
:rtype: typing.Tuple[str, str]
"""
with open('README.md', 'r') as readme:
return (readme.read(), 'text/markdown')
CHOSEN_PACKAGE = _choose_mystery_package()
_write_init_py(CHOSEN_PACKAGE)
LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data()
setuptools.setup(
name='mystery',
version='1.0.2',
description='It is a riddle, wrapped in a mystery, inside an enigma.',
url='https://github.com/DivoK/mystery',
author='Divo Kaplan',
author_email='[email protected]',
packages=setuptools.find_packages(),
install_requires=[CHOSEN_PACKAGE],
cmdclass={'sdist': SDistCommand},
python_requires='>=3.6',
include_package_data=True,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
keywords='mystery setuptools fun python-packages random',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Other Audience',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 32.117978 | 109 | 0.688473 |
1632cc5107307be666384111255532a74d2d121a
| 1,665 |
py
|
Python
|
ADMM_primal.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | 2 |
2020-11-09T10:37:19.000Z
|
2021-07-06T09:24:30.000Z
|
ADMM_primal.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | null | null | null |
ADMM_primal.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | 1 |
2021-06-03T17:07:01.000Z
|
2021-06-03T17:07:01.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =======================================
# File Name: ADMM_primal.py
# Purpose : implementation for ADMM method
# for solving primal problem
# =======================================
from utils import get_params
import numpy as np
import sys
def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618):
"""ADMM_primal
"""
# initialize
m, n = c.shape
pi = np.zeros((m, n))
pi_dag = np.zeros((m, n))
w = np.zeros((m, n))
u = np.zeros(m)
v = np.zeros(n)
rho_tilde = rho * 32
while rho_tilde >= rho:
for _ in range(iters):
r = ((-w + u.reshape((m, 1)) + v.reshape((1, n)) - c) / rho +
mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag)
pi = (r - ((r.sum(axis=1) - r.sum() / (m + n + 1)) / (n + 1)).reshape((m, 1))
- ((r.sum(axis=0) - r.sum() / (m + n + 1)) / (m + 1)).reshape((1, n)))
pi_dag = np.maximum(pi + w / rho, 0.0)
u = u + alpha * rho * (mu - pi.sum(axis=1))
v = v + alpha * rho * (nu - pi.sum(axis=0))
w = w + alpha * rho * (pi - pi_dag)
rho_tilde = rho_tilde / 2
print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1))
print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1))
print('fvall = %.5e' % (c * pi_dag).sum())
if __name__ == '__main__':
try:
print("Test...")
_mu, _nu, _c = get_params(64, 'random')
ADMM_primal(_mu, _nu, _c)
except KeyboardInterrupt:
print (" Ctrl+C pressed...")
sys.exit(1)
| 29.732143 | 89 | 0.465465 |
163306f757b2b46fb97912f794d0169c24de2f36
| 1,117 |
py
|
Python
|
misc_scripts/CleanVCFparams.py
|
pombase/legacy-eg-loader
|
1a324121325ffc3b9a4c15922f7a12756a9c3206
|
[
"Apache-2.0"
] | null | null | null |
misc_scripts/CleanVCFparams.py
|
pombase/legacy-eg-loader
|
1a324121325ffc3b9a4c15922f7a12756a9c3206
|
[
"Apache-2.0"
] | null | null | null |
misc_scripts/CleanVCFparams.py
|
pombase/legacy-eg-loader
|
1a324121325ffc3b9a4c15922f7a12756a9c3206
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import sys
import pprint
import argparse
parser = argparse.ArgumentParser(description='Clean up the data for a given parameter')
parser.add_argument('--infile', help="Path to the VCF file", default='test.vcf')
parser.add_argument('--outfile', help="Path to the new VCF file", default='test.out.vcf')
parser.add_argument('--param', help="Parameter to clean", default='PL')
args = parser.parse_args()
fi = open(args.infile, 'r')
#fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w')
fo = open(args.outfile, 'w')
for line in fi:
if len(line) == 0:
continue
if line[0] == '#':
fo.write(line)
continue
line = line.rstrip()
v = line.split('\t');
params = v[8].split(':')
out = v[0:8]
try:
paramIndex = params.index(args.param)
del params[paramIndex]
out.append(':'.join(params))
for d in v[9:]:
dv = d.split(':')
del dv[paramIndex]
out.append(':'.join(dv))
except ValueError:
out.append(':'.join(params))
out += v[9:]
fo.write("\t".join(out) + "\n")
fi.close()
fo.close()
| 25.386364 | 94 | 0.637422 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.