hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d1d8a76e626c2ee6f2e02dabc04e268863c54e7
| 723 |
py
|
Python
|
algorithm/dynamic_programming/coin_change/solution.py
|
delaanthonio/hackerrank
|
b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27
|
[
"MIT"
] | 1 |
2017-07-02T01:35:39.000Z
|
2017-07-02T01:35:39.000Z
|
algorithm/dynamic_programming/coin_change/solution.py
|
delaanthonio/hackerrank
|
b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27
|
[
"MIT"
] | null | null | null |
algorithm/dynamic_programming/coin_change/solution.py
|
delaanthonio/hackerrank
|
b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27
|
[
"MIT"
] | 1 |
2018-04-03T15:11:56.000Z
|
2018-04-03T15:11:56.000Z
|
#!/usr/bin/env python3
"""
The Coin Change Problem
:author: Dela Anthonio
:hackerrank: https://hackerrank.com/delaanthonio
:problem: https://www.hackerrank.com/challenges/coin-change/problem
"""
from typing import List
def count_ways(amount: int, coins: List[int]) -> int:
"""Return the number of ways we can count to ``amount`` with values ``coins``."""
ways = [1] + [0] * amount
for coin in coins:
for val in range(coin, amount + 1):
ways[val] += ways[val - coin]
return ways[-1]
if __name__ == '__main__':
main()
| 24.1 | 85 | 0.625173 |
5d1e0c02d27663acdb4392c5b988ee86f8972b53
| 147 |
py
|
Python
|
climbproject/climbapp/admin.py
|
javawolfpack/ClimbProject
|
508cf822a1eb0b78f7120a3d469ceb65e3b423f7
|
[
"MIT"
] | null | null | null |
climbproject/climbapp/admin.py
|
javawolfpack/ClimbProject
|
508cf822a1eb0b78f7120a3d469ceb65e3b423f7
|
[
"MIT"
] | 5 |
2018-11-24T16:15:24.000Z
|
2022-02-11T03:40:48.000Z
|
climbproject/climbapp/admin.py
|
javawolfpack/ClimbProject
|
508cf822a1eb0b78f7120a3d469ceb65e3b423f7
|
[
"MIT"
] | 1 |
2018-11-24T16:13:49.000Z
|
2018-11-24T16:13:49.000Z
|
from django.contrib import admin
#from .models import *
from . import models
# Register your models here.
admin.site.register(models.ClimbModel)
| 18.375 | 38 | 0.782313 |
5d1e73e0421ce77dfe2003f2cfbf66fd1ffd338e
| 1,264 |
py
|
Python
|
setup.py
|
TheMagicNacho/artemis-nozzle
|
5c02672feb7b437a4ff0ccc45394de3010bcd5ab
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
TheMagicNacho/artemis-nozzle
|
5c02672feb7b437a4ff0ccc45394de3010bcd5ab
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
TheMagicNacho/artemis-nozzle
|
5c02672feb7b437a4ff0ccc45394de3010bcd5ab
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
from runpy import run_path
from setuptools import setup
# Get the version from the relevant file
d = run_path('skaero/version.py')
__version__ = d['__version__']
setup(
name="scikit-aero",
version=__version__,
description="Aeronautical engineering calculations in Python.",
author="Juan Luis Cano",
author_email="[email protected]",
url="https://github.com/Juanlu001/scikit-aero",
license="BSD",
keywords=[
"aero", "aeronautical", "aerospace",
"engineering", "atmosphere", "gas"
],
requires=["numpy", "scipy"],
packages=[
"skaero",
"skaero.atmosphere", "skaero.gasdynamics",
"skaero.util"
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics"
],
long_description=open('README.rst').read()
)
| 30.829268 | 70 | 0.625791 |
5d1e977ff682cc24e27dda8c4298d920050e0d35
| 1,226 |
py
|
Python
|
appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 5 |
2019-03-06T12:28:47.000Z
|
2022-01-06T14:06:02.000Z
|
appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 6 |
2021-02-02T22:40:49.000Z
|
2022-03-12T00:27:54.000Z
|
appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 4 |
2019-03-06T14:29:25.000Z
|
2020-06-02T15:16:40.000Z
|
"""
8
"""
n=int(input("n:"))
queens = [0]*(n+1)
#
col_flags=[0]*(n+1)
#
diag_flags = [0]*(2*n)
#
diag2_flags = [0] * (2*n)
count = 0
queen(n)
print(f"{count}\n")
| 15.518987 | 67 | 0.58646 |
5d1ef3d88231b985a99b51b59e99bc1d40f0567f
| 24,820 |
py
|
Python
|
multimodal_affinities/evaluation/analysis/plots_producer.py
|
amzn/multimodal-affinities
|
23045eb6a9387ce0c9c6f5a15227cf1cc4282626
|
[
"CC-BY-4.0"
] | 6 |
2021-07-06T12:48:18.000Z
|
2021-12-06T01:52:24.000Z
|
multimodal_affinities/evaluation/analysis/plots_producer.py
|
amzn/multimodal-affinities
|
23045eb6a9387ce0c9c6f5a15227cf1cc4282626
|
[
"CC-BY-4.0"
] | null | null | null |
multimodal_affinities/evaluation/analysis/plots_producer.py
|
amzn/multimodal-affinities
|
23045eb6a9387ce0c9c6f5a15227cf1cc4282626
|
[
"CC-BY-4.0"
] | 5 |
2021-07-10T08:09:17.000Z
|
2022-03-24T16:27:15.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: CC-BY-4.0
import os
import cv2
from collections import namedtuple
import imageio
from PIL import Image
from random import randrange
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import torch
import matplotlib
matplotlib.use('Agg') # Required for gif animations
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.patches as patches
from multimodal_affinities.visualization.vis_handler import VisHandler
from multimodal_affinities.visualization.image_utils import resize_image
from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple
| 50.040323 | 148 | 0.621112 |
5d208ff94339d61c9f91237707d44d87ad7cd192
| 6,294 |
py
|
Python
|
openstates/openstates-master/openstates/de/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/de/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/de/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
import re
import lxml.html
from openstates.utils import LXMLMixin
from billy.scrape.legislators import LegislatorScraper, Legislator
| 35.359551 | 82 | 0.522402 |
5d20ce5f76b15dfb9e999e6d113dbf7e789ecd49
| 10,386 |
py
|
Python
|
simpleredial/dataloader/fine_grained_test_dataloader.py
|
gmftbyGMFTBY/SimpleReDial-v1
|
f45b8eb23d1499ec617b4cc4f417d83d8f2b6bde
|
[
"MIT"
] | 36 |
2021-10-13T10:32:08.000Z
|
2022-03-20T07:50:05.000Z
|
simpleredial/dataloader/fine_grained_test_dataloader.py
|
gmftbyGMFTBY/SimpleReDial-v1
|
f45b8eb23d1499ec617b4cc4f417d83d8f2b6bde
|
[
"MIT"
] | 3 |
2021-11-24T10:57:59.000Z
|
2022-03-27T15:37:40.000Z
|
simpleredial/dataloader/fine_grained_test_dataloader.py
|
gmftbyGMFTBY/SimpleReDial-v1
|
f45b8eb23d1499ec617b4cc4f417d83d8f2b6bde
|
[
"MIT"
] | 1 |
2022-03-15T07:13:22.000Z
|
2022-03-15T07:13:22.000Z
|
from header import *
from .utils import *
from .util_func import *
'''Only for Testing'''
| 41.378486 | 106 | 0.501637 |
5d21c5c96591c41db957d015a344d7b68da97b7a
| 4,396 |
py
|
Python
|
dabing/DABING-MIB.py
|
SvajkaJ/dabing
|
8ddd8c1056b182b52f76028e23cd2ba8418a0dec
|
[
"MIT"
] | null | null | null |
dabing/DABING-MIB.py
|
SvajkaJ/dabing
|
8ddd8c1056b182b52f76028e23cd2ba8418a0dec
|
[
"MIT"
] | null | null | null |
dabing/DABING-MIB.py
|
SvajkaJ/dabing
|
8ddd8c1056b182b52f76028e23cd2ba8418a0dec
|
[
"MIT"
] | null | null | null |
#
# PySNMP MIB module DABING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file://..\DABING-MIB.mib
# Produced by pysmi-0.3.4 at Tue Mar 22 12:53:47 2022
# On host ? platform ? version ? by user ?
# Using Python version 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 22:45:29) [MSC v.1916 32 bit (Intel)]
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, IpAddress, ObjectIdentity, iso, Counter32, Unsigned32, Bits, NotificationType, TimeTicks, Counter64, enterprises, MibIdentifier, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "IpAddress", "ObjectIdentity", "iso", "Counter32", "Unsigned32", "Bits", "NotificationType", "TimeTicks", "Counter64", "enterprises", "MibIdentifier", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
dabing = ModuleIdentity((1, 3, 6, 1, 4, 1, 55532))
dabing.setRevisions(('2022-03-17 00:00',))
if mibBuilder.loadTexts: dabing.setLastUpdated('202203170000Z')
if mibBuilder.loadTexts: dabing.setOrganization('www.stuba.sk')
Parameters = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 1))
Agent = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 2))
Manager = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 3))
Notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4))
NotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 1))
NotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 2))
channel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 1), OctetString().clone('12C')).setMaxAccess("readonly")
if mibBuilder.loadTexts: channel.setStatus('current')
interval = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 2), Integer32().clone(960)).setMaxAccess("readonly")
if mibBuilder.loadTexts: interval.setStatus('current')
trapEnabled = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapEnabled.setStatus('current')
agentIdentifier = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIdentifier.setStatus('current')
agentLabel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 2), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentLabel.setStatus('current')
agentStatus = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentStatus.setStatus('current')
managerHostname = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerHostname.setStatus('current')
managerPort = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 2), Integer32().clone(162)).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerPort.setStatus('current')
genericPayload = MibScalar((1, 3, 6, 1, 4, 1, 55532, 4, 2, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: genericPayload.setStatus('current')
malfunctionTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 1)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: malfunctionTrap.setStatus('current')
testTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 2)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: testTrap.setStatus('current')
mibBuilder.exportSymbols("DABING-MIB", Notifications=Notifications, channel=channel, PYSNMP_MODULE_ID=dabing, testTrap=testTrap, malfunctionTrap=malfunctionTrap, Parameters=Parameters, agentLabel=agentLabel, managerPort=managerPort, trapEnabled=trapEnabled, managerHostname=managerHostname, Manager=Manager, NotificationPrefix=NotificationPrefix, Agent=Agent, genericPayload=genericPayload, NotificationObjects=NotificationObjects, agentIdentifier=agentIdentifier, dabing=dabing, agentStatus=agentStatus, interval=interval)
| 93.531915 | 523 | 0.75364 |
5d228bf6a4bad705b90b99a4ee75f695228944a7
| 956 |
py
|
Python
|
parameter_setup/run_setup_extra_vis.py
|
kharris/allen-voxel-network
|
3c39cf7e7400c09f78ebe9d1d9f8a6d7b9ef6d7b
|
[
"BSD-2-Clause"
] | 7 |
2018-04-02T01:30:31.000Z
|
2020-02-23T03:31:57.000Z
|
parameter_setup/run_setup_extra_vis.py
|
yijizhao/allen-voxel-network
|
3c39cf7e7400c09f78ebe9d1d9f8a6d7b9ef6d7b
|
[
"BSD-2-Clause"
] | null | null | null |
parameter_setup/run_setup_extra_vis.py
|
yijizhao/allen-voxel-network
|
3c39cf7e7400c09f78ebe9d1d9f8a6d7b9ef6d7b
|
[
"BSD-2-Clause"
] | 2 |
2016-11-10T03:27:29.000Z
|
2018-08-06T17:32:05.000Z
|
import os
import numpy as np
save_stem='extra_vis_friday_harbor'
data_dir='../../data/sdk_new_100'
resolution=100
cre=False
source_acronyms=['VISal','VISam','VISl','VISp','VISpl','VISpm',
'VISli','VISpor','VISrl','VISa']
lambda_list = np.logspace(3,12,10)
scale_lambda=True
min_vox=0
# save_file_name='visual_output.hdf5'
#source_coverage=0.90
source_coverage=0.95
#source_shell = 1
source_shell=None
save_dir=os.path.join('../../data/connectivities',save_stem)
experiments_fn=None
target_acronyms=source_acronyms
solver=os.path.abspath('../smoothness_c/solve')
cmdfile=os.path.join(save_dir,'model_fitting_cmds')
selected_fit_cmds=os.path.join(save_dir,'model_fitting_after_selection_cmds')
save_mtx=True
cross_val_matrices=True
cross_val=5
fit_gaussian=False
select_one_lambda=False
if select_one_lambda:
lambda_fn='lambda_opt'
else:
lambda_fn='lambda_ipsi_contra_opt'
laplacian='free'
shuffle_seed=666
max_injection_volume=0.7
| 26.555556 | 77 | 0.789749 |
5d246013ebe48dbfeb0d5a33d4837a599aed75ec
| 396 |
py
|
Python
|
examples/runall.py
|
GNiklas/MOSSEPy
|
fbae1294beefe48f321bc5dbbc70e6c72d3ffe1f
|
[
"MIT"
] | null | null | null |
examples/runall.py
|
GNiklas/MOSSEPy
|
fbae1294beefe48f321bc5dbbc70e6c72d3ffe1f
|
[
"MIT"
] | null | null | null |
examples/runall.py
|
GNiklas/MOSSEPy
|
fbae1294beefe48f321bc5dbbc70e6c72d3ffe1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 09:42:39 2020
@author: niklas
"""
from mossepy.mosse_tracker import MOSSE
# choose position of object in first frame
# that should be done by mouse click
objPos = [256, 256]
# choose tracker type
tracker = MOSSE()
# initialize object position in first frame
tracker.setObjPos(objPos)
# start tracking
tracker.trackImg()
| 19.8 | 43 | 0.729798 |
5d252f7a220679f8642989c387a00db59609427b
| 3,194 |
py
|
Python
|
core/formulas.py
|
mike006322/PolynomialCalculator
|
bf56b0e773a3461ab2aa958d0d90e08f80a4d201
|
[
"MIT"
] | null | null | null |
core/formulas.py
|
mike006322/PolynomialCalculator
|
bf56b0e773a3461ab2aa958d0d90e08f80a4d201
|
[
"MIT"
] | null | null | null |
core/formulas.py
|
mike006322/PolynomialCalculator
|
bf56b0e773a3461ab2aa958d0d90e08f80a4d201
|
[
"MIT"
] | null | null | null |
def solve(polynomial):
"""
input is polynomial
if more than one variable, returns 'too many variables'
looks for formula to apply to coefficients
returns solution or 'I cannot solve yet...'
"""
if len(polynomial.term_matrix[0]) > 2:
return 'too many variables'
elif len(polynomial.term_matrix[0]) == 1:
return polynomial.term_matrix[1][0]
elif len(polynomial.term_matrix[0]) == 2:
degree = polynomial.term_matrix[1][1]
if degree == 1:
if len(polynomial.term_matrix) == 2:
return 0
else:
return -polynomial.term_matrix[2][0]/polynomial.term_matrix[1][0]
if degree == 2:
ans = quadratic_formula(polynomial)
return ans
if degree > 2:
return Durand_Kerner(polynomial)
def quadratic_formula(polynomial):
"""
input is single-variable polynomial of degree 2
returns zeros
"""
if len(polynomial.term_matrix) == 3:
if polynomial.term_matrix[2][1] == 1:
a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return 0, -b/a
a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return (-c/a)**.5, -(-c/a)**.5
if len(polynomial.term_matrix) == 2:
a, b, c, = polynomial.term_matrix[1][0], 0, 0
elif len(polynomial.term_matrix) == 3:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0
else:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0]
ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a
ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a
if ans1 == ans2:
return ans1
return ans1, ans2
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0001):
"""
returns boolean whether abs(a-b) is less than abs_total or rel_total*max(a, b)
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def Durand_Kerner(f):
"""
input polynomial
returns numerical approximation of all complex roots
"""
roots = []
for i in range(f.degree()):
roots.append((0.4 + 0.9j)**i)
diff = 1
diff_temp = 0
while diff > .00000001 and not isclose(diff_temp, diff):
iterate()
for i in range(len(roots)):
if isclose(roots[i].real, round(roots[i].real)):
temp = round(roots[i].real)
roots[i] -= roots[i].real
roots[i] += temp
if isclose(roots[i].imag, round(roots[i].imag)):
temp = round(roots[i].imag)
roots[i] -= roots[i].imag*1j
roots[i] += temp*1j
return roots
if __name__ == '__main__':
pass
| 31.623762 | 106 | 0.556669 |
5d25df408d94ab31e94a89b3213ae144d0477893
| 5,761 |
py
|
Python
|
manim/mobject/svg/style_utils.py
|
5Points7Edges/manim
|
1c2a5099133dbf0abdd5517b2ac93cfc8275b842
|
[
"MIT"
] | null | null | null |
manim/mobject/svg/style_utils.py
|
5Points7Edges/manim
|
1c2a5099133dbf0abdd5517b2ac93cfc8275b842
|
[
"MIT"
] | null | null | null |
manim/mobject/svg/style_utils.py
|
5Points7Edges/manim
|
1c2a5099133dbf0abdd5517b2ac93cfc8275b842
|
[
"MIT"
] | null | null | null |
"""Utility functions for parsing SVG styles."""
__all__ = ["cascade_element_style", "parse_style", "parse_color_string"]
from xml.dom.minidom import Element as MinidomElement
from colour import web2hex
from ...utils.color import rgb_to_hex
from typing import Dict, List
CASCADING_STYLING_ATTRIBUTES: List[str] = [
"fill",
"stroke",
"fill-opacity",
"stroke-opacity",
]
# The default styling specifications for SVG images,
# according to https://www.w3.org/TR/SVG/painting.html
# (ctrl-F for "initial")
SVG_DEFAULT_ATTRIBUTES: Dict[str, str] = {
"fill": "black",
"fill-opacity": "1",
"stroke": "none",
"stroke-opacity": "1",
}
def cascade_element_style(
element: MinidomElement, inherited: Dict[str, str]
) -> Dict[str, str]:
"""Collect the element's style attributes based upon both its inheritance and its own attributes.
SVG uses cascading element styles. A closer ancestor's style takes precedence over a more distant ancestor's
style. In order to correctly calculate the styles, the attributes are passed down through the inheritance tree,
updating where necessary.
Note that this method only copies the values and does not parse them. See :meth:`parse_color_string` for converting
from SVG attributes to manim keyword arguments.
Parameters
----------
element : :class:`MinidomElement`
Element of the SVG parse tree
inherited : :class:`dict`
Dictionary of SVG attributes inherited from the parent element.
Returns
-------
:class:`dict`
Dictionary mapping svg attributes to values with `element`'s values overriding inherited values.
"""
style = inherited.copy()
# cascade the regular elements.
for attr in CASCADING_STYLING_ATTRIBUTES:
entry = element.getAttribute(attr)
if entry:
style[attr] = entry
# the style attribute should be handled separately in order to
# break it up nicely. furthermore, style takes priority over other
# attributes in the same element.
style_specs = element.getAttribute("style")
if style_specs:
for style_spec in style_specs.split(";"):
try:
key, value = style_spec.split(":")
except ValueError as e:
if not style_spec.strip():
# there was just a stray semicolon at the end, producing an emptystring
pass
else:
raise e
else:
style[key.strip()] = value.strip()
return style
def parse_color_string(color_spec: str) -> str:
"""Handle the SVG-specific color strings and convert them to HTML #rrggbb format.
Parameters
----------
color_spec : :class:`str`
String in any web-compatible format
Returns
-------
:class:`str`
Hexadecimal color string in the format `#rrggbb`
"""
if color_spec[0:3] == "rgb":
# these are only in integer form, but the Colour module wants them in floats.
splits = color_spec[4:-1].split(",")
if splits[0][-1] == "%":
# if the last character of the first number is a percentage,
# then interpret the number as a percentage
parsed_rgbs = [float(i[:-1]) / 100.0 for i in splits]
else:
parsed_rgbs = [int(i) / 255.0 for i in splits]
hex_color = rgb_to_hex(parsed_rgbs)
elif color_spec[0] == "#":
# its OK, parse as hex color standard.
hex_color = color_spec
else:
# attempt to convert color names like "red" to hex color
hex_color = web2hex(color_spec, force_long=True)
return hex_color
def fill_default_values(svg_style: Dict) -> None:
"""
Fill in the default values for properties of SVG elements,
if they are not currently set in the style dictionary.
Parameters
----------
svg_style : :class:`dict`
Style dictionary with SVG property names. Some may be missing.
Returns
-------
:class:`dict`
Style attributes; none are missing.
"""
for key in SVG_DEFAULT_ATTRIBUTES:
if key not in svg_style:
svg_style[key] = SVG_DEFAULT_ATTRIBUTES[key]
def parse_style(svg_style: Dict[str, str]) -> Dict:
"""Convert a dictionary of SVG attributes to Manim VMobject keyword arguments.
Parameters
----------
svg_style : :class:`dict`
Style attributes as a string-to-string dictionary. Keys are valid SVG element attributes (fill, stroke, etc)
Returns
-------
:class:`dict`
Style attributes, but in manim kwargs form, e.g., keys are fill_color, stroke_color
"""
manim_style = {}
fill_default_values(svg_style)
if "fill-opacity" in svg_style:
manim_style["fill_opacity"] = float(svg_style["fill-opacity"])
if "stroke-opacity" in svg_style:
manim_style["stroke_opacity"] = float(svg_style["stroke-opacity"])
# nones need to be handled specially
if "fill" in svg_style:
if svg_style["fill"] == "none":
manim_style["fill_opacity"] = 0
else:
manim_style["fill_color"] = parse_color_string(svg_style["fill"])
if "stroke" in svg_style:
if svg_style["stroke"] == "none":
# In order to not break animations.creation.Write,
# we interpret no stroke as stroke-width of zero and
# color the same as the fill color, if it exists.
manim_style["stroke_width"] = 0
if "fill_color" in manim_style:
manim_style["stroke_color"] = manim_style["fill_color"]
else:
manim_style["stroke_color"] = parse_color_string(svg_style["stroke"])
return manim_style
| 31.140541 | 119 | 0.635306 |
5d2760eb1387adc84de54a02742f29d1beeb4ef2
| 1,361 |
py
|
Python
|
iotrigger.py
|
mm011106/iotrigger
|
643ced0440a8c4fb95ade56399f813c88ac8ddd6
|
[
"Apache-2.0"
] | null | null | null |
iotrigger.py
|
mm011106/iotrigger
|
643ced0440a8c4fb95ade56399f813c88ac8ddd6
|
[
"Apache-2.0"
] | 1 |
2017-01-08T14:22:32.000Z
|
2019-01-08T23:51:53.000Z
|
iotrigger.py
|
mm011106/iotrigger
|
643ced0440a8c4fb95ade56399f813c88ac8ddd6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
import os
import RPi.GPIO as GPIO #
import json
from time import sleep #
from twython import Twython
f=open("tw_config.json",'r')
config=json.load(f)
f.close()
CONSUMER_KEY =config['consumer_key']
CONSUMER_SECRET =config['consumer_secret']
ACCESS_TOKEN =config['access_token']
ACCESS_SECRET =config['access_secret']
dist=config['dist']
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)
trigger_input=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(trigger_input, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(trigger_input, GPIO.RISING, callback=on_positive_edge, bouncetime=1000)
ledstate = GPIO.LOW
try:
while True:
sleep(0.01)
except KeyboardInterrupt: #
pass
GPIO.cleanup() #
| 21.951613 | 93 | 0.714916 |
5d27ee1b746e920a8fbbde21e0ae74440138e1ce
| 2,293 |
py
|
Python
|
src/wheezy/template/tests/test_utils.py
|
nxsofsys/wheezy.template
|
b65b70b2927974790ff2413843ec752dd9c6c609
|
[
"MIT"
] | 2 |
2017-02-08T11:48:41.000Z
|
2017-12-18T08:04:13.000Z
|
src/wheezy/template/tests/test_utils.py
|
ezotrank/wheezy.template
|
d54bc667303fd4de7314f659e17dd317ac3e3a82
|
[
"MIT"
] | null | null | null |
src/wheezy/template/tests/test_utils.py
|
ezotrank/wheezy.template
|
d54bc667303fd4de7314f659e17dd317ac3e3a82
|
[
"MIT"
] | 1 |
2022-03-04T20:26:20.000Z
|
2022-03-04T20:26:20.000Z
|
""" Unit tests for ``wheezy.templates.utils``.
"""
import unittest
| 31.847222 | 62 | 0.602704 |
5d28c827c798225b6b2063067a58a432acbd8766
| 34,967 |
py
|
Python
|
akshare/economic/macro_constitute.py
|
peterrosetu/akshare
|
9eac9ccb531b6e07d39140830d65349ea9441dc3
|
[
"MIT"
] | 1 |
2020-12-26T23:39:05.000Z
|
2020-12-26T23:39:05.000Z
|
akshare/economic/macro_constitute.py
|
Hecate2/akshare
|
2d8904f5cf242ab784f748ab2f886329ebf69742
|
[
"MIT"
] | null | null | null |
akshare/economic/macro_constitute.py
|
Hecate2/akshare
|
2d8904f5cf242ab784f748ab2f886329ebf69742
|
[
"MIT"
] | 2 |
2020-09-23T08:50:14.000Z
|
2020-09-28T09:57:07.000Z
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/21 12:08
Desc: ---
"""
import json
import time
import pandas as pd
import requests
from tqdm import tqdm
from akshare.economic.cons import (
JS_CONS_GOLD_ETF_URL,
JS_CONS_SLIVER_ETF_URL,
JS_CONS_OPEC_URL,
)
def macro_cons_gold_volume():
"""
ETFSPDR Gold Trust, 20041118-
:return: pandas.Series
2004-11-18 8.09
2004-11-19 57.85
2004-11-22 87.09
2004-11-23 87.09
2004-11-24 96.42
...
2019-10-20 924.64
2019-10-21 924.64
2019-10-22 919.66
2019-10-23 918.48
2019-10-24 918.48
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"][""] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["()"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_volume"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_change():
"""
ETFSPDR Gold Trust, 20041118-
:return: pandas.Series
2004-11-18 0
2004-11-19 49.76
2004-11-22 29.24
2004-11-23 0.00
2004-11-24 9.33
...
2019-10-20 0.00
2019-10-21 0.00
2019-10-22 -4.98
2019-10-23 -1.18
2019-10-24 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"][""] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["/()"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_change"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_amount():
"""
ETFSPDR Gold Trust, 20041118-
:return: pandas.Series
2004-11-18 114920000.00
2004-11-19 828806907.20
2004-11-22 1253785205.50
2004-11-23 1254751438.19
2004-11-24 1390568824.08
...
2019-10-20 44286078486.23
2019-10-21 44333677232.68
2019-10-22 43907962483.56
2019-10-23 44120217405.82
2019-10-24 44120217405.82
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"][""] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["()"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_amount"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_volume():
"""
ETF--iShares Silver Trust, 20060429-
:return: pandas.Series
2006-04-29 653.17
2006-05-02 653.17
2006-05-03 995.28
2006-05-04 1197.43
2006-05-05 1306.29
...
2019-10-17 11847.91
2019-10-18 11847.91
2019-10-21 11813.02
2019-10-22 11751.96
2019-10-23 11751.96
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"][""] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["()"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 1]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_volume"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df[""]
temp_append_df.name = "silver_volume"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_change():
"""
ETF--iShares Silver Trust, 20060429-
:return: pandas.Series
2006-04-29 0
2006-05-02 0.00
2006-05-03 342.11
2006-05-04 202.15
2006-05-05 108.86
...
2019-10-17 -58.16
2019-10-18 0.00
2019-10-21 -34.89
2019-10-22 -61.06
2019-10-23 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"][""] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["/()"]
temp_df.name = "silver_change"
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_change"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["/"]
temp_append_df.name = "silver_change"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_amount():
"""
ETF--iShares Silver Trust, 20060429-
:return: pandas.Series
2006-04-29 263651152
2006-05-02 263651152
2006-05-03 445408550
2006-05-04 555123947
2006-05-05 574713264
...
2019-10-17 Show All
2019-10-18 Show All
2019-10-21 Show All
2019-10-22 Show All
2019-10-23 Show All
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"][""] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["()"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_amount"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df[""]
temp_append_df.name = "silver_amount"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_opec_near_change():
"""
-, 20170118-
:return: pandas.Series
\
2017-01-18 -0.87 3.56 -0.25 -0.87 0.95 4.26 0.20 3.13 -11.35
2017-02-13 -4.17 -2.32 -1.67 -1.00 5.02 -16.57 -14.12 6.47 10.18
2017-03-14 -0.02 -1.82 -0.44 -0.69 3.61 -6.20 -0.93 -1.11 5.80
2017-04-12 0.45 -1.87 -0.28 0.19 -2.87 -0.85 -0.95 -6.08 -2.98
2017-05-11 -0.75 9.71 -0.06 0.88 -3.47 -3.91 0.03 -6.16 5.08
2017-06-13 0.96 -5.42 0.22 -0.13 0.45 4.44 0.00 17.82 17.42
2017-07-12 -0.09 6.60 -0.21 -0.77 1.67 6.06 -0.02 12.70 9.67
2017-08-10 -0.10 -1.93 0.85 0.71 0.69 -3.31 -0.74 15.43 3.43
2017-09-12 0.41 0.83 -0.03 -3.23 -0.23 -2.31 0.01 -11.23 13.83
2017-10-11 -0.85 -0.29 -0.05 1.44 0.09 3.16 -0.17 5.39 5.08
2017-11-13 -3.84 6.98 0.71 0.18 -1.13 -13.10 -0.37 4.23 -5.44
2017-12-13 1.41 -10.87 -0.51 -0.47 -0.22 0.10 -0.53 0.61 9.58
2018-01-18 3.03 4.48 -0.72 -0.01 1.32 0.79 -0.25 -0.70 7.57
2018-04-12 -4.95 -8.17 0.26 -0.91 0.33 -1.31 0.23 -3.72 1.82
2018-05-14 1.77 -0.78 0.31 -0.93 1.00 -0.07 0.08 0.69 -0.83
2018-06-12 3.90 1.40 0.06 0.18 0.56 2.77 -0.57 -2.43 -5.35
2018-07-11 0.46 -8.83 -0.09 0.35 -2.27 7.15 2.73 -25.43 2.78
2018-08-13 1.38 1.17 0.42 -0.34 -5.63 2.41 7.85 -5.67 7.05
2018-09-12 -1.40 -0.80 0.40 18.80 -15.00 9.00 0.80 25.60 7.40
2018-10-11 -0.80 5.70 53.10 -0.10 -15.00 0.80 0.60 10.30 2.60
2018-11-13 -0.40 2.20 -0.30 0.30 -15.60 465.30 -3.30 6.00 -1.70
2018-12-12 -0.50 0.30 0.10 -1.10 -38.00 -2.30 4.50 -1.10 -3.00
2019-03-14 0.20 2.20 0.50 0.70 1.20 -7.00 -1.40 2.30 1.00
2019-04-10 -0.70 0.70 52.40 0.90 -2.80 -12.60 -0.10 19.60 1.10
2019-06-13 0.60 7.40 -0.10 2.30 -22.70 9.40 1.30 -0.30 -9.20
2017-01-18 -14.93 -0.63 -4.52 -22.09
2017-02-13 -49.62 -15.93 -3.05 -89.02
2017-03-14 -6.81 -3.69 -1.60 -13.95
2017-04-12 4.16 -3.27 -2.59 -15.27
2017-05-11 4.92 -6.23 -2.60 -1.82
2017-06-13 0.23 -1.80 -0.77 33.61
2017-07-12 5.13 -0.07 -1.36 39.35
2017-08-10 3.18 -0.67 -1.58 17.26
2017-09-12 -1.03 -2.02 -3.19 -7.91
2017-10-11 -0.07 -0.84 -5.19 8.85
2017-11-13 1.69 -0.60 -4.36 -15.09
2017-12-13 -4.54 -3.55 -4.16 -13.35
2018-01-18 -1.09 -0.70 -8.22 4.24
2018-04-12 -4.69 4.49 -5.53 -20.14
2018-05-14 4.65 0.61 -4.17 1.21
2018-06-12 8.55 -0.63 -4.25 3.54
2018-07-11 40.54 3.51 -4.75 17.34
2018-08-13 -5.28 6.92 -4.77 4.07
2018-09-12 3.80 1.20 -3.60 27.80
2018-10-11 10.80 3.00 -4.20 13.20
2018-11-13 12.70 14.20 -4.00 12.70
2018-12-12 37.70 7.10 -5.20 -1.10
2019-03-14 -8.60 -0.40 -14.20 -22.10
2019-04-10 -32.40 -0.90 -28.90 -53.40
2019-06-13 -7.60 0.30 -3.50 -23.60
"""
t = time.time()
big_df = pd.DataFrame()
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) #
all_date_list = res.json()["data"]
bar = tqdm(reversed(all_date_list[:-1]))
for item in bar:
bar.set_description(f"Please wait for a moment, now downing {item}'s data")
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
try:
temp_df = temp_df[['', '', '', '', '', '', '', '', '',
'', '', '']].iloc[-1, :]
except:
temp_df = temp_df[['', '', '', '', '', '', '', '', '',
'', '', '']].iloc[-1, :]
big_df[temp_df.name] = temp_df
big_df = big_df.T
big_df.columns.name = ""
big_df = big_df.astype(float)
return big_df
def _macro_cons_opec_month():
"""
-, 20170118-
, ,
:return: pandas.Series
\
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6
2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8
2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5
2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8
2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0
2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3
2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8
2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1
2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5
2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8
2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0
2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1
2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0
2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1
2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1
2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0
2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7
2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5
2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8
2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1
2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6
2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1
2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3
2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3
2017-01-18 1047.4 307.1 202.1 3308.5
2017-02-13 994.6 293.1 200.4 3213.9
2017-03-14 979.7 292.5 198.7 3195.8
2017-04-12 999.4 289.5 197.2 3192.8
2017-05-11 995.4 284.2 195.6 3173.2
2017-06-13 994.0 288.5 196.3 3213.9
2017-07-12 995.0 289.8 193.8 3261.1
2017-08-10 1006.7 290.5 193.2 3286.9
2017-09-12 1002.2 290.1 191.8 3275.5
2017-10-11 997.5 290.5 189.0 3274.8
2017-11-13 1000.0 291.1 186.3 3258.9
2017-12-13 999.6 288.3 183.4 3244.8
2018-01-18 991.8 287.8 174.5 3241.6
2018-04-12 993.4 286.4 148.8 3195.8
2018-05-14 995.9 287.2 143.6 3193.0
2018-06-12 998.7 286.5 139.2 3186.9
2018-07-11 1042.0 289.7 134.0 3232.7
2018-08-13 1038.7 295.9 127.8 3232.3
2018-09-12 1040.1 297.2 123.5 3256.5
2018-10-11 1051.2 300.4 119.7 3276.1
2018-11-13 1063.0 316.0 117.1 3290.0
2018-12-12 1101.6 324.6 113.7 3296.5
2019-03-14 1008.7 307.2 100.8 3054.9
2019-04-10 979.4 305.9 73.2 3002.2
2019-06-13 969.0 306.1 74.1 2987.6
"""
t = time.time()
res = requests.get(
JS_CONS_OPEC_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
big_df = pd.DataFrame()
for country in [item["datas"] for item in json_data["list"]][0].keys():
try:
value_list = [item["datas"][country] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df[""]
temp_df.name = country
big_df = big_df.append(temp_df)
except:
continue
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) #
all_date_list = res.json()["data"]
need_date_list = [item for item in all_date_list if
item.split("-")[0] + item.split("-")[1] + item.split("-")[2] not in date_list]
for item in reversed(need_date_list):
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df[['', '', '', '', '', '', '', '', '', '',
'', '', '']].iloc[-2, :]
big_df[item] = temp_df
return big_df.T
def macro_cons_opec_month():
"""
-, 20170118-
,
20200312:fix:
https://datacenter.jin10.com/reportType/dc_opec_report
:return: pandas.Series
\
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6
2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8
2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5
2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8
2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0
2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3
2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8
2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1
2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5
2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8
2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0
2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1
2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0
2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1
2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1
2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0
2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7
2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5
2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8
2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1
2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6
2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1
2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3
2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3
2017-01-18 1047.4 307.1 202.1 3308.5
2017-02-13 994.6 293.1 200.4 3213.9
2017-03-14 979.7 292.5 198.7 3195.8
2017-04-12 999.4 289.5 197.2 3192.8
2017-05-11 995.4 284.2 195.6 3173.2
2017-06-13 994.0 288.5 196.3 3213.9
2017-07-12 995.0 289.8 193.8 3261.1
2017-08-10 1006.7 290.5 193.2 3286.9
2017-09-12 1002.2 290.1 191.8 3275.5
2017-10-11 997.5 290.5 189.0 3274.8
2017-11-13 1000.0 291.1 186.3 3258.9
2017-12-13 999.6 288.3 183.4 3244.8
2018-01-18 991.8 287.8 174.5 3241.6
2018-04-12 993.4 286.4 148.8 3195.8
2018-05-14 995.9 287.2 143.6 3193.0
2018-06-12 998.7 286.5 139.2 3186.9
2018-07-11 1042.0 289.7 134.0 3232.7
2018-08-13 1038.7 295.9 127.8 3232.3
2018-09-12 1040.1 297.2 123.5 3256.5
2018-10-11 1051.2 300.4 119.7 3276.1
2018-11-13 1063.0 316.0 117.1 3290.0
2018-12-12 1101.6 324.6 113.7 3296.5
2019-03-14 1008.7 307.2 100.8 3054.9
2019-04-10 979.4 305.9 73.2 3002.2
2019-06-13 969.0 306.1 74.1 2987.6
"""
t = time.time()
big_df = pd.DataFrame()
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) #
all_date_list = res.json()["data"]
bar = tqdm(reversed(all_date_list))
for item in bar:
bar.set_description(f"Please wait for a moment, now downing {item}'s data")
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
try:
temp_df = temp_df[['', '', '', '', '', '', '', '', '',
'', '', '']].iloc[-2, :]
except:
temp_df = temp_df[['', '', '', '', '', '', '', '', '',
'', '', '']].iloc[-1, :]
big_df[temp_df.name] = temp_df
big_df = big_df.T
big_df.columns.name = ""
big_df = big_df.astype(float)
return big_df
if __name__ == "__main__":
macro_cons_gold_volume_df = macro_cons_gold_volume()
print(macro_cons_gold_volume_df)
macro_cons_gold_change_df = macro_cons_gold_change()
print(macro_cons_gold_change_df)
macro_cons_gold_amount_df = macro_cons_gold_amount()
print(macro_cons_gold_amount_df)
print(pd.concat([macro_cons_gold_volume_df, macro_cons_gold_change_df, macro_cons_gold_amount_df], axis=1))
macro_cons_silver_volume_df = macro_cons_silver_volume()
print(macro_cons_silver_volume_df)
macro_cons_silver_change_df = macro_cons_silver_change()
print(macro_cons_silver_change_df)
macro_cons_silver_amount_df = macro_cons_silver_amount()
print(macro_cons_silver_amount_df)
print(pd.concat([macro_cons_silver_volume_df, macro_cons_silver_change_df, macro_cons_silver_amount_df], axis=1))
macro_cons_opec_near_change_df = macro_cons_opec_near_change()
print(macro_cons_opec_near_change_df)
macro_cons_opec_month_df = macro_cons_opec_month()
print(macro_cons_opec_month_df)
| 42.128916 | 140 | 0.574885 |
5d29262ef748030566b97eaf9b5c7c914c6c44fd
| 1,811 |
py
|
Python
|
test/testers/winforms/scrollbar/__init__.py
|
ABEMBARKA/monoUI
|
5fda266ad2db8f89580a40b525973d86cd8de939
|
[
"MIT"
] | 1 |
2019-08-13T15:22:12.000Z
|
2019-08-13T15:22:12.000Z
|
test/testers/winforms/scrollbar/__init__.py
|
ABEMBARKA/monoUI
|
5fda266ad2db8f89580a40b525973d86cd8de939
|
[
"MIT"
] | null | null | null |
test/testers/winforms/scrollbar/__init__.py
|
ABEMBARKA/monoUI
|
5fda266ad2db8f89580a40b525973d86cd8de939
|
[
"MIT"
] | 1 |
2019-08-13T15:22:17.000Z
|
2019-08-13T15:22:17.000Z
|
##############################################################################
# Written by: Cachen Chen <[email protected]>
# Date: 08/06/2008
# Description: Application wrapper for scrollbar.py
# Used by the scrollbar-*.py tests
##############################################################################$
'Application wrapper for scrollbar'
from strongwind import *
from os.path import exists
from sys import path
def launchScrollBar(exe=None):
'Launch ScrollBar with accessibility enabled and return a scrollbar object. Log an error and return None if something goes wrong'
if exe is None:
# make sure we can find the sample application
harness_dir = path[0]
i = harness_dir.rfind("/")
j = harness_dir[:i].rfind("/")
uiaqa_path = harness_dir[:j]
if uiaqa_path is None:
raise IOError, "When launching an application you must provide the "\
"full path or set the\nUIAQA_HOME environment "\
"variable."
exe = '%s/samples/winforms/scrollbar.py' % uiaqa_path
if not os.path.exists(exe):
raise IOError, "%s does not exist" % exe
args = [exe]
(app, subproc) = cache.launchApplication(args=args, name='ipy', wait=config.LONG_DELAY)
scrollbar = ScrollBar(app, subproc)
cache.addApplication(scrollbar)
scrollbar.scrollBarFrame.app = scrollbar
return scrollbar
# class to represent the application
| 32.339286 | 134 | 0.602982 |
5d2a93367402f8cedeb2edebdd44e28110111fbf
| 4,209 |
py
|
Python
|
save_tweets.py
|
iglesiasmanu/data_analysis
|
61127c91ad0eb11ecdc7258e186e430e9dddb0b6
|
[
"BSD-3-Clause"
] | null | null | null |
save_tweets.py
|
iglesiasmanu/data_analysis
|
61127c91ad0eb11ecdc7258e186e430e9dddb0b6
|
[
"BSD-3-Clause"
] | null | null | null |
save_tweets.py
|
iglesiasmanu/data_analysis
|
61127c91ad0eb11ecdc7258e186e430e9dddb0b6
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from os import path
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
from sqlalchemy.orm.exc import NoResultFound
from database import session, Tweet, Hashtag, User
consumer_key = "0qFf4T2xPWVIycLmAwk3rDQ55"
consumer_secret = "LcHpujASn4fIIrQ8sikbCTQ3oyU6T6opchFVWBBqwICahzSE64"
access_token = "4271002872-XLo7TNnE3qvYevqLmT1RBuiJ5CJ3o0DCr3WReAT"
acces_token_secret = "ulZ3dA25zuC6BGJgaFowCSTIm6gKVtOa4x9y7tO0IUDIx"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, acces_token_secret)
| 32.882813 | 80 | 0.623188 |
5d2b1bb602059e1df9b567f6022d9d62a73d9127
| 907 |
py
|
Python
|
app/views/main.py
|
chrisjws-harness/flaskSaaS
|
f42558c523de23f03a098044df164ead3539a4dd
|
[
"MIT"
] | null | null | null |
app/views/main.py
|
chrisjws-harness/flaskSaaS
|
f42558c523de23f03a098044df164ead3539a4dd
|
[
"MIT"
] | null | null | null |
app/views/main.py
|
chrisjws-harness/flaskSaaS
|
f42558c523de23f03a098044df164ead3539a4dd
|
[
"MIT"
] | null | null | null |
from flask import render_template, jsonify
from app import app
import random
| 19.717391 | 59 | 0.61301 |
5d2e7fe7c422e728c2698140a25e0895a9bb3030
| 254 |
py
|
Python
|
base_sample/numpy_mat.py
|
keepangry/ai_algorithm
|
21d8024296a2f2d2797448ed34eb383359259684
|
[
"Apache-2.0"
] | 2 |
2018-08-29T11:09:36.000Z
|
2018-10-22T11:46:36.000Z
|
base_sample/numpy_mat.py
|
keepangry/ai_algorithm
|
21d8024296a2f2d2797448ed34eb383359259684
|
[
"Apache-2.0"
] | null | null | null |
base_sample/numpy_mat.py
|
keepangry/ai_algorithm
|
21d8024296a2f2d2797448ed34eb383359259684
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
'''
@author: yangsen
@license:
@contact:
@software:
@file: numpy_mat.py
@time: 18-8-25 9:56
@desc:
'''
import numpy as np
a = np.arange(9).reshape(3,3)
#
a[1]
a[[1,2]]
a[np.array([1,2])]
#
a[:,1]
a[:,[1,2]]
a[:,np.array([1,2])]
| 11.043478 | 29 | 0.574803 |
5d2ea06d1190699ab0cefe6b4179c68d747feca4
| 17,002 |
py
|
Python
|
ai_traineree/agents/rainbow.py
|
laszukdawid/ai-traineree
|
af32940eba8e11012de87b60d78f10f5a3b96c79
|
[
"Apache-2.0"
] | 22 |
2020-08-21T03:03:05.000Z
|
2022-02-22T10:15:36.000Z
|
ai_traineree/agents/rainbow.py
|
laszukdawid/ai-traineree
|
af32940eba8e11012de87b60d78f10f5a3b96c79
|
[
"Apache-2.0"
] | 23 |
2021-02-13T20:35:45.000Z
|
2022-02-06T20:15:37.000Z
|
ai_traineree/agents/rainbow.py
|
laszukdawid/ai-traineree
|
af32940eba8e11012de87b60d78f10f5a3b96c79
|
[
"Apache-2.0"
] | 6 |
2020-10-11T08:36:09.000Z
|
2021-11-20T18:31:03.000Z
|
import copy
from typing import Callable, Dict, List, Optional
import torch
import torch.nn as nn
import torch.optim as optim
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import soft_update
from ai_traineree.buffers import NStepBuffer, PERBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.heads import RainbowNet
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
def act(self, obs: ObsType, eps: float = 0.) -> int:
"""
Returns actions for given state as per current policy.
Parameters:
state: Current available state from the environment.
epislon: Epsilon value in the epislon-greedy policy.
"""
# Epsilon-greedy action selection
if self._rng.random() < eps:
# TODO: Update with action_space.sample() once implemented
assert len(self.action_space.shape) == 1, "Only 1D is supported right now"
return self._rng.randint(self.action_space.low, self.action_space.high)
t_obs = to_tensor(self.state_transform(obs)).float().unsqueeze(0).to(self.device)
self.dist_probs = self.net.act(t_obs)
q_values = (self.dist_probs * self.z_atoms).sum(-1)
return int(q_values.argmax(-1)) # Action maximizes state-action value Q(s, a)
def learn(self, experiences: Dict[str, List]) -> None:
"""
Parameters:
experiences: Contains all experiences for the agent. Typically sampled from the memory buffer.
Five keys are expected, i.e. `state`, `action`, `reward`, `next_state`, `done`.
Each key contains a array and all arrays have to have the same length.
"""
rewards = to_tensor(experiences['reward']).float().to(self.device)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device)
states = to_tensor(experiences['state']).float().to(self.device)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
actions = to_tensor(experiences['action']).type(torch.long).to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1)
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape
assert actions.shape == (self.batch_size, 1) # Discrete domain
with torch.no_grad():
prob_next = self.target_net.act(next_states)
q_next = (prob_next * self.z_atoms).sum(-1) * self.z_delta
if self.using_double_q:
duel_prob_next = self.net.act(next_states)
a_next = torch.argmax((duel_prob_next * self.z_atoms).sum(-1), dim=-1)
else:
a_next = torch.argmax(q_next, dim=-1)
prob_next = prob_next[self.__batch_indices, a_next, :]
m = self.net.dist_projection(rewards, 1 - dones, self.gamma ** self.n_steps, prob_next)
assert m.shape == (self.batch_size, self.num_atoms)
log_prob = self.net(states, log_prob=True)
assert log_prob.shape == (self.batch_size,) + self.action_size + (self.num_atoms,)
log_prob = log_prob[self.__batch_indices, actions.squeeze(), :]
assert log_prob.shape == m.shape == (self.batch_size, self.num_atoms)
# Cross-entropy loss error and the loss is batch mean
error = -torch.sum(m * log_prob, 1)
assert error.shape == (self.batch_size,)
loss = error.mean()
assert loss >= 0
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)
self.optimizer.step()
self._loss = float(loss.item())
if hasattr(self.buffer, 'priority_update'):
assert (~torch.isnan(error)).any()
self.buffer.priority_update(experiences['index'], error.detach().cpu().numpy())
# Update networks - sync local & target
soft_update(self.target_net, self.net, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Returns agent's state dictionary.
Returns:
State dicrionary for internal networks.
"""
return {"net": self.net.state_dict(), "target_net": self.target_net.state_dict()}
def get_state(self) -> AgentState:
"""Provides agent's internal state."""
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def save_state(self, path: str) -> None:
"""Saves agent's state into a file.
Parameters:
path: String path where to write the state.
"""
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, path: str) -> None:
"""Loads state from a file under provided path.
Parameters:
path: String path indicating where the state is stored.
"""
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.net.load_state_dict(agent_state['net'])
self.target_net.load_state_dict(agent_state['target_net'])
def save_buffer(self, path: str) -> None:
"""Saves data from the buffer into a file under provided path.
Parameters:
path: String path where to write the buffer.
"""
import json
dump = self.buffer.dump_buffer(serialize=True)
with open(path, 'w') as f:
json.dump(dump, f)
def load_buffer(self, path: str) -> None:
"""Loads data into the buffer from provided file path.
Parameters:
path: String path indicating where the buffer is stored.
"""
import json
with open(path, 'r') as f:
buffer_dump = json.load(f)
self.buffer.load_buffer(buffer_dump)
| 45.218085 | 118 | 0.648688 |
5d2fb89dd83715259f4676be3c051b02287f606c
| 4,169 |
py
|
Python
|
nvvm/core/nvvm.py
|
uchytilc/PyCu
|
9ba25281611bf4dbd70d37f4eba0574f817d6928
|
[
"MIT"
] | null | null | null |
nvvm/core/nvvm.py
|
uchytilc/PyCu
|
9ba25281611bf4dbd70d37f4eba0574f817d6928
|
[
"MIT"
] | null | null | null |
nvvm/core/nvvm.py
|
uchytilc/PyCu
|
9ba25281611bf4dbd70d37f4eba0574f817d6928
|
[
"MIT"
] | null | null | null |
from pycu.nvvm import (get_libdevice, ir_version, version, add_module_to_program, compile_program,
create_program, destroy_program, get_compiled_result, get_compiled_result_size,
get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program)
import os
import sys
from ctypes import c_char_p
import weakref
| 28.951389 | 181 | 0.708803 |
5d314e81984e8fdd23c8fa9711722c873d27574a
| 160 |
py
|
Python
|
fieldservice/fieldservice/doctype/fieldservice_settings/test_fieldservice_settings.py
|
itsdaveit/fieldservice
|
90bd813fb01f23a18df3b24fc67ec86c4d8be5a5
|
[
"MIT"
] | null | null | null |
fieldservice/fieldservice/doctype/fieldservice_settings/test_fieldservice_settings.py
|
itsdaveit/fieldservice
|
90bd813fb01f23a18df3b24fc67ec86c4d8be5a5
|
[
"MIT"
] | null | null | null |
fieldservice/fieldservice/doctype/fieldservice_settings/test_fieldservice_settings.py
|
itsdaveit/fieldservice
|
90bd813fb01f23a18df3b24fc67ec86c4d8be5a5
|
[
"MIT"
] | 1 |
2021-11-09T10:26:06.000Z
|
2021-11-09T10:26:06.000Z
|
# Copyright (c) 2022, itsdve GmbH and Contributors
# See license.txt
# import frappe
import unittest
| 17.777778 | 50 | 0.79375 |
5d332a4a020a6faa75a8b1522601b2bced79121d
| 2,621 |
py
|
Python
|
Codes/Data Processing.py
|
BrickerP/Investment-
|
8b57c0d157a7eaa38d693c8d42ce1bc7dc7bdde9
|
[
"Apache-2.0"
] | null | null | null |
Codes/Data Processing.py
|
BrickerP/Investment-
|
8b57c0d157a7eaa38d693c8d42ce1bc7dc7bdde9
|
[
"Apache-2.0"
] | null | null | null |
Codes/Data Processing.py
|
BrickerP/Investment-
|
8b57c0d157a7eaa38d693c8d42ce1bc7dc7bdde9
|
[
"Apache-2.0"
] | 1 |
2022-01-07T06:25:54.000Z
|
2022-01-07T06:25:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 14:51:01 2021
@author: 75638
"""
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 10000)
def process_data(path1,path2):
'''
1.path1: file path of different factor
2.path2:file path of SP500members
3.remove anomalies
4.normalized data
5.fill NaN with 0
'''
#read factor.xlsx
factor=pd.read_excel(path1,index_col=0)
#remove anomalies which is greater than median+5*std or less than median-s*std
for date in factor:
median=factor[date].quantile(0.5)
std=factor[date].std()
min=median-5*std
max=median+5*std
factor[date]=factor[date].clip(min,max)
#normalize data
for date in factor:
mean=factor[date].mean()
std=factor[date].std()
factor[date]=(factor[date]-mean)/std
# fill NAN
for date in factor:
median=factor[date].quantile(0.5)
factor.fillna(median,inplace=True)
#read SP500 member datas
member=pd.read_excel(path2,index_col=0)
#merge industry data
factor=pd.merge(member,factor,left_index=True,right_index=True)
# save processed data
factor.to_csv('C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\data\\volatility.csv')
return factor
if __name__ == '__main__':
path1='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\original_data\\volatility.xlsx'
path2='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\SP500\\SP500members.xlsx'
data=process_data(path1,path2)
| 33.177215 | 97 | 0.591759 |
5d33f0625c53288d64064188bcbc357613405301
| 1,448 |
py
|
Python
|
tests/test_env.py
|
Majanao/pytorch-blender
|
eb5effb033094d037e7bdc2238c00806be7012ae
|
[
"MIT"
] | 381 |
2019-07-03T14:15:16.000Z
|
2022-03-30T08:58:26.000Z
|
tests/test_env.py
|
ANABUR920/pytorch-blender
|
eb5effb033094d037e7bdc2238c00806be7012ae
|
[
"MIT"
] | 18 |
2020-01-15T17:36:08.000Z
|
2021-12-31T08:37:54.000Z
|
tests/test_env.py
|
ANABUR920/pytorch-blender
|
eb5effb033094d037e7bdc2238c00806be7012ae
|
[
"MIT"
] | 34 |
2019-07-09T03:15:02.000Z
|
2022-01-13T17:36:20.000Z
|
import pytest
from pathlib import Path
from blendtorch import btt
BLENDDIR = Path(__file__).parent/'blender'
| 26.814815 | 81 | 0.641575 |
5d356088cb332b6a1cde85497c82874f1681387b
| 20 |
py
|
Python
|
sitetree/__init__.py
|
sitkatech/django-sitetree
|
5d7e9d503f97ff021c5c04855e04e098b3d2488c
|
[
"BSD-3-Clause"
] | 3 |
2019-02-12T01:58:42.000Z
|
2019-06-08T10:50:33.000Z
|
sitetree/__init__.py
|
sitkatech/django-sitetree
|
5d7e9d503f97ff021c5c04855e04e098b3d2488c
|
[
"BSD-3-Clause"
] | null | null | null |
sitetree/__init__.py
|
sitkatech/django-sitetree
|
5d7e9d503f97ff021c5c04855e04e098b3d2488c
|
[
"BSD-3-Clause"
] | null | null | null |
VERSION = (0, 9, 5)
| 10 | 19 | 0.5 |
5d36081f930dd6c0a745b46f1b5a299e738d247f
| 20,670 |
py
|
Python
|
deepvariant/runtime_by_region_vis.py
|
tahashmi/deepvariant
|
441c1809d3290f4a20b29a0a0bbf8ecfb929a6e3
|
[
"BSD-3-Clause"
] | 4 |
2019-03-30T13:25:25.000Z
|
2020-10-14T18:47:21.000Z
|
deepvariant/runtime_by_region_vis.py
|
FrogEnthusiast7/deepvariant
|
84516dfacd1ed856a34507becb21848aa12e77a8
|
[
"BSD-3-Clause"
] | 1 |
2021-06-18T15:04:47.000Z
|
2021-06-18T15:04:47.000Z
|
deepvariant/runtime_by_region_vis.py
|
FrogEnthusiast7/deepvariant
|
84516dfacd1ed856a34507becb21848aa12e77a8
|
[
"BSD-3-Clause"
] | 1 |
2019-09-04T16:59:18.000Z
|
2019-09-04T16:59:18.000Z
|
# Copyright 2020 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
r"""Create a visual report of make_examples runtime by region.
Use this script to visualize the runtime-by-region data generated by running
make_examples with --runtime_by_region.
"""
from typing import Dict, Sequence, List, Tuple, Text, Any, Union
from absl import app
from absl import flags
import altair as alt
import pandas as pd
import tensorflow as tf
from third_party.nucleus.io import sharded_file_utils
# Altair uses a lot of method chaining, such as
# chart.mark_bar().encode(...).properties(...), so using backslash
# continuation to break this into separate lines makes the code more readable.
# pylint: disable=g-backslash-continuation
VEGA_URL = 'https://storage.googleapis.com/deepvariant/lib/vega'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input', None, 'TSV file that was produced when running make_examples '
'with --runtime_by_region. Can be sharded, e.g. /path/[email protected].')
flags.DEFINE_string(
'title', None, 'Title will be shown at the top of the report and will '
'be used as a prefix for downloaded image files.')
flags.DEFINE_string('output', 'runtime_by_region_report.html',
'Path for the output report, which will be an html file.')
RUNTIME_COLUMNS = [
'get reads', 'find candidates', 'make pileup images', 'write outputs'
]
COUNT_COLUMNS = ['num reads', 'num candidates', 'num examples']
CSS_STYLES = """
<style>
body {
font-family: sans-serif;
}
.chart-container {
padding: 30px;
}
</style>
"""
def read_sharded_runtime_tsvs(path_string: str) -> pd.DataFrame:
"""Imports data from a single or sharded path into a pandas dataframe.
Args:
path_string: The path to the input file, which may be sharded.
Returns:
A dataframe matching the TSV file(s) but with added Task column.
"""
if sharded_file_utils.is_sharded_file_spec(path_string):
paths = sharded_file_utils.generate_sharded_filenames(path_string)
else:
paths = [path_string]
list_of_dataframes = []
for i, path in enumerate(paths):
if path.startswith('gs://'):
# Once pandas is updated to 0.24+, pd.read_csv will work for gs://
# without this workaround.
with tf.io.gfile.GFile(path) as f:
d = pd.read_csv(f, sep='\t')
else:
d = pd.read_csv(path, sep='\t')
d['Task'] = i
list_of_dataframes.append(d)
return pd.concat(list_of_dataframes, axis=0, ignore_index=True)
def format_runtime_string(raw_seconds: float) -> str:
"""Creates a nice format string from a potentially large number of seconds.
Args:
raw_seconds: A number of seconds.
Returns:
The seconds divided into hours, minutes, and remaining seconds, formatted
nicely. For example, 2h3m5.012s.
"""
minutes, seconds = divmod(raw_seconds, 60)
hours, minutes = divmod(minutes, 60)
seconds = round(seconds, 3)
output = ''
if hours > 0:
output += f'{int(hours)}h'
if minutes > 0:
output += f'{int(minutes)}m'
if seconds > 0 or not output:
output += f'{seconds}s'
return output
def calculate_totals(df: pd.DataFrame) -> pd.DataFrame:
"""Calculates total runtime, formats it nicely, and sorts by it.
Args:
df: A dataframe of runtime profiling numbers.
Returns:
The same dataframe with some additional summary columns.
"""
# 'total runtime' is a simple sum of the runtime columns.
df['total runtime'] = df[RUNTIME_COLUMNS].sum(axis=1)
# Create a formatted runtime string for tooltips.
df['Runtime'] = df['total runtime'].apply(format_runtime_string)
# Sort by descending total region runtime.
df.sort_values(by='total runtime', inplace=True, ascending=False)
return df
def summarize_by_task(df: pd.DataFrame) -> pd.DataFrame:
"""Groups regions to get the total runtime for each task.
Args:
df: A dataframe of runtime profiling numbers.
Returns:
The dataframe grouped by task.
"""
by_task = df.groupby(by=['Task']).sum()
return by_task.reset_index()
def stage_histogram(d: pd.DataFrame, title: str = '') -> alt.Chart:
"""Plots a histogram of runtimes stacked by stage.
Args:
d: A dataframe of runtimes, either by region or by task.
title: A title for the plot.
Returns:
An altair chart.
"""
columns_used = RUNTIME_COLUMNS
d = d[columns_used]
return alt.Chart(d).transform_fold(
RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \
.mark_bar(opacity=0.3) \
.encode(
x=alt.X('runtime_by_stage:Q', bin=alt.Bin(maxbins=100),
title='Runtime (seconds)'),
y=alt.Y('count()', title='Count of regions', stack=None),
color=alt.Color('Stage:N', sort=None)
).properties(title=title)
def correlation_scatter_charts(d: pd.DataFrame, title: str = '') -> alt.Chart:
"""Produces a grid of scatter plots of runtimes of stages versus covariates.
Args:
d: A pandas dataframe of runtime by regions.
title: A title for the plot.
Returns:
An altair chart
"""
columns_used = ['region', 'total runtime'] + RUNTIME_COLUMNS + COUNT_COLUMNS
d = d[columns_used]
return alt.Chart(d).mark_circle(opacity=0.1).encode(
x=alt.X(alt.repeat('column'), type='quantitative',
axis=alt.Axis(labelExpr="datum.value + 's'")),
y=alt.Y(alt.repeat('row'), type='quantitative'),
tooltip='region'
).properties(width=100, height=100) \
.repeat(
column=['total runtime'] + RUNTIME_COLUMNS,
row=COUNT_COLUMNS,
).properties(title=title)
def totals_by_stage(d: pd.DataFrame) -> alt.Chart:
"""Plots total runtimes for each stage.
Args:
d: A dataframe of runtimes.
Returns:
An altair chart.
"""
stage_totals_series = d.sum()[RUNTIME_COLUMNS]
stage_totals = pd.DataFrame(
stage_totals_series, columns=['Runtime (seconds)'])
stage_totals.reset_index(inplace=True)
stage_totals = stage_totals.rename(columns={'index': 'Stage'})
stage_totals['Runtime'] = stage_totals['Runtime (seconds)'].apply(
format_runtime_string)
return alt.Chart(stage_totals).mark_bar().encode(
x='Runtime (seconds)',
y=alt.Y('Stage', sort=None),
tooltip=['Runtime'],
fill=alt.Fill('Stage',
sort=None)).properties(title='Overall runtime by stage')
def pareto_by_task_tooltip(row: pd.Series) -> str:
"""For one row of a dataframe, computes a tooltip description.
Args:
row: A Pandas Series, one row of a dataframe containing some specific
cumulative sum columns.
Returns:
A string to show as the tooltip for a pareto curve.
"""
return (f"{row['task cumsum order'] * 100:.2f}% of regions "
f"account for {row['task cumsum fraction'] * 100:.2f}% of "
f"the runtime in task {row['Task']}")
def calculate_pareto_metrics(df_subset: pd.DataFrame) -> pd.DataFrame:
"""Calculates cumulative sums for a subset of a dataframe.
Args:
df_subset: A dataframe subset of one task.
Returns:
The same dataframe subset with some additional columns.
"""
# These are the same for all regions in the same task, for the scatter plot:
df_subset['task total runtime'] = df_subset['total runtime'].sum()
df_subset['Runtime for task'] = df_subset['task total runtime'].apply(
format_runtime_string)
df_subset['task num examples'] = df_subset['num examples'].sum()
# These are cumulative sums for the pareto curves:
df_subset['task cumsum fraction'] = df_subset['total runtime'].cumsum(
) / df_subset['total runtime'].sum()
n = len(df_subset)
df_subset['task cumsum order'] = list(map(lambda x: x / n, range(0, n)))
df_subset['tooltip'] = df_subset.apply(pareto_by_task_tooltip, axis=1)
return df_subset
def pareto_and_runtimes_by_task(df: pd.DataFrame) -> alt.Chart:
"""Creates an interactive Pareto curve and scatter plot of task runtimes.
Tracing each curve shows to what extent a small proportion of long-running
regions contribute disproportionately to the overall runtime. That is,
"The longest-running X% of regions account for Y% of the total runtime."
There is a curve for each task.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
grouped = df.groupby(df['Task'], sort=False)
df = grouped.apply(calculate_pareto_metrics)
# Sample along the Pareto curve, ensuring the longest regions are shown.
if len(df) > 5000:
x = 1000
df = pd.concat([df.nlargest(x, 'total runtime'), df.sample(5000 - x)])
# Limit columns to greatly reduce the size of the html report.
columns_used = [
'task cumsum order', 'task cumsum fraction', 'tooltip', 'Task',
'task total runtime', 'task num examples', 'Runtime for task'
]
df = df[columns_used]
# Brushing on the task_scatter plot highlights the same tasks in the Pareto
# curve.
brush = alt.selection_interval()
pareto_by_task = alt.Chart(df).mark_line(size=2).encode(
x=alt.X(
'task cumsum order',
title='The longest-runtime X% of regions',
axis=alt.Axis(format='%')),
y=alt.Y(
'task cumsum fraction',
title='Account for Y% of the total runtime',
axis=alt.Axis(format='%')),
tooltip='tooltip',
color=alt.condition(brush, 'Task:N', alt.value('lightgray'))).properties(
title='Pareto curve for each task').interactive()
# This chart needs to use the same dataframe as the first chart to enable the
# brushing on one to affect the other. Using max(task) for 'text' is a
# trick that causes bundling by task to avoid showing multiple overlapping
# points which otherwise make the text look funky.
task_scatter = alt.Chart(df).mark_point(size=10).encode(
x=alt.X('max(task total runtime)', title='Runtime (seconds)'),
y=alt.Y('task num examples:Q', title='Number of examples'),
color=alt.condition(brush, 'Task:N', alt.value('lightgray')),
tooltip=['Task', 'Runtime for task']
) \
.properties(title='Total runtime for each task (drag to highlight)') \
.add_selection(brush)
return pareto_by_task | task_scatter
def individual_region_bars(small_df: pd.DataFrame,
title: Union[str, Dict[str, str]] = '') -> alt.Chart:
"""Makes a stacked bar chart with runtime of each stage for individual regions.
Args:
small_df: A dataframe of regions, each of which will be shown as a bar.
title: A title for the plot. If a dict, it should contain 'title' and/or
'subtitle'.
Returns:
An altair chart.
"""
columns_used = ['region', 'Runtime'] + RUNTIME_COLUMNS
d = small_df[columns_used]
return alt.Chart(d).transform_fold(
RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \
.mark_bar().encode(
x=alt.X('region:N', sort=None),
y=alt.Y('runtime_by_stage:Q', scale=alt.Scale(type='linear'), title='Runtime (seconds)'),
fill=alt.Fill('Stage:N', sort=None),
tooltip='Runtime:N'
).properties(title=title)
def selected_longest_and_median_regions(df: pd.DataFrame) -> alt.Chart:
"""Creates a stacked bar charts of the top 20 and median 20 regions.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
num_rows = len(df)
mid = round(num_rows / 2)
return individual_region_bars(df.iloc[0:20], 'Top runtime regions') \
| individual_region_bars(df.iloc[mid-10:mid+11], 'Median runtime regions')
def top_regions_producing_zero_examples(df: pd.DataFrame) -> alt.Chart:
"""Creates a chart of the top regions that produced zero examples.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
regions_with_zero_examples = df[df['num examples'] == 0]
runtime_of_zeros = regions_with_zero_examples['total runtime'].sum() / 3600
total_runtime = df['total runtime'].sum() / 3600
subtitle = (
f'Spent {runtime_of_zeros:.2f} hours processing the '
f'{len(regions_with_zero_examples)} regions that produced no examples, '
f'which is {runtime_of_zeros / total_runtime * 100:.2f}% of the total '
f'runtime of {total_runtime:.2f} hours.')
return individual_region_bars(
regions_with_zero_examples.nlargest(50, 'total runtime'),
title={
'text': 'The longest-running regions that produced no examples',
'subtitle': subtitle
})
def write_to_html_report(charts: List[Dict[Text, alt.Chart]], title: str,
subtitle: str, html_output: Any) -> None:
"""Makes the html report with all the charts inserted.
Args:
charts: A list of altair chart objects.
title: The title to show at the top of the report.
subtitle: The subtitle to show just below the title on the report.
html_output: a writable file object.
Returns:
None. Writes into the html_output file object.
"""
# Start the HTML document.
html_output.write('<!DOCTYPE html>\n<html>\n<head>')
# Add dependencies vega and vega-lite, which render the altair charts.
html_output.write('<script type="text/javascript" src="{}/vega@5"></script>'
'\n'.format(VEGA_URL))
html_output.write(
'<script type="text/javascript" src="{}/[email protected]"></script>'
'\n'.format(VEGA_URL))
html_output.write(
'<script type="text/javascript" src="{}/vega-embed@6"></script>'
'\n'.format(VEGA_URL))
# Add styles (CSS).
html_output.write(CSS_STYLES)
html_output.write('</head>\n<body>')
html_output.write('<h1>{}</h1>\n'.format(title))
html_output.write('<h2>{}</h2>\n'.format(subtitle))
# Make a div containing all the charts.
html_output.write('<div>')
for chart in charts:
html_output.write(
'<div class="chart-container" id="vis_{}"></div>\n'.format(chart['id']))
html_output.write('</div>')
# Add JSON vega specs and hook them up to the divs with VegaEmbed.
html_output.write('<script>\n')
for chart in charts:
html_output.write('var spec_{} = {};\n'.format(chart['id'],
chart['chart'].to_json()))
download_filename = '{}_{}'.format(title.replace(' ', '_'), chart['id'])
embed_options = {'mode': 'vega-lite', 'downloadFileName': download_filename}
html_output.write('vegaEmbed("#vis_{}", spec_{}, {})\n'.format(
chart['id'], chart['id'], embed_options))
html_output.write('</script>\n')
# Close HTML document.
html_output.write('</body></html>')
def read_data_and_make_dataframes(
input_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Loads data from a file into one dataframe as-is and one by task.
Args:
input_path: str, path of the input TSV file (may be sharded).
Returns:
df: A dataframe with one row per region.
by_task: A dataframe with one row per task.
"""
df = read_sharded_runtime_tsvs(input_path)
df = calculate_totals(df)
by_task = summarize_by_task(df)
return df, by_task
def make_all_charts(
df: pd.DataFrame,
by_task: pd.DataFrame) -> List[Dict[Text, Union[str, alt.Chart]]]:
"""Creates charts and puts them in a list with their ID names.
Args:
df: A dataframe with one row per region.
by_task: A dataframe with one row per task.
Returns:
list of dicts, each containing a chart and a descriptive ID.
"""
charts = [{
'id': 'total_by_stage',
'chart': totals_by_stage(by_task)
}, {
'id': 'pareto_and_runtimes_by_task',
'chart': pareto_and_runtimes_by_task(df)
}, {
'id': 'histogram_by_task',
'chart': stage_histogram(by_task, title='Stage runtimes for each task')
}, {
'id': 'selected_longest_and_median_regions',
'chart': selected_longest_and_median_regions(df)
}, {
'id': 'zero_examples',
'chart': top_regions_producing_zero_examples(df)
}]
# Altair shows a max of 5000 data points.
if len(df) <= 5000:
# With up to 5000 points, just show them all.
charts.extend([{
'id': 'histogram',
'chart': stage_histogram(df, title='Runtime by stage for all regions')
}, {
'id': 'scatter_grid',
'chart': correlation_scatter_charts(df, title='Trends for all regions')
}])
else:
# With too many points, make different subsets to show trends better.
top_100 = df.nlargest(100, 'total runtime')
top_5000 = df.nlargest(5000, 'total runtime')
# Sample the bottom 99% to avoid outliers that obscure general trends.
bottom_99_percent = df.nsmallest(int(len(df) * .99), 'total runtime')
if len(bottom_99_percent) > 5000:
bottom_99_percent = bottom_99_percent.sample(5000)
charts.extend([{
'id':
'histogram_bottom_99_percent',
'chart':
stage_histogram(
bottom_99_percent,
title='Runtime by stage for regions in the bottom 99%')
}, {
'id':
'histogram_top_100',
'chart':
stage_histogram(
top_100, title='Runtime by stage for regions in the top 100')
}, {
'id':
'scatter_grid_top_5000',
'chart':
correlation_scatter_charts(
top_5000, title='Trends for regions in the top 5000')
}, {
'id':
'scatter_grid_bottom_99_percent',
'chart':
correlation_scatter_charts(
bottom_99_percent, title='Trends for regions in the bottom 99%')
}])
return charts
def make_report(input_path: str, title: str,
html_output: tf.io.gfile.GFile) -> None:
"""Reads data, creates charts, and composes the charts into an HTML report.
Args:
input_path: Path of the input TSV file (or sharded files).
title: Title to put at the top of the report.
html_output: Writable file object where output will be written.
"""
# Load data into pandas dataframes and add summary columns.
df, by_task = read_data_and_make_dataframes(input_path)
# Build all the charts.
charts = make_all_charts(df, by_task)
# Write a subtitle with some top-level stats.
subtitle = (f'Runtime profiling for make_examples on {len(df)} regions '
f'across {len(by_task)} task{"(s)" if len(by_task) > 1 else ""}')
# Write the HTML report with all the charts.
write_to_html_report(
charts=charts, title=title, subtitle=subtitle, html_output=html_output)
if __name__ == '__main__':
flags.mark_flags_as_required(['input', 'title'])
app.run(main)
| 34.335548 | 97 | 0.677504 |
5d36d6dbf217342990cb49eda55af38f42824619
| 4,238 |
py
|
Python
|
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import sys
import unittest
from dynd import nd, ndt
"""
class TestFields(unittest.TestCase):
def test_simple(self):
a = nd.array([
(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd'),
(5, 6, 'def', 'ghi')],
type='3 * {x: int32, y: int32, z: string, w: string}')
# Selecting a single field
b = nd.fields(a, 'x')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.int32],
['x']))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
# Selecting two fields
b = nd.fields(a, 'z', 'y')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32],
['z', 'y']))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
# Selecting three fields
b = nd.fields(a, 'w', 'y', 'z')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32, ndt.string],
['w', 'y', 'z']))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
# Reordering all four fields
b = nd.fields(a, 'w', 'y', 'x', 'z')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32, ndt.int32, ndt.string],
['w', 'y', 'x', 'z']))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
def test_fixed_var(self):
a = nd.array([
[(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd')],
[(5, 6, 'def', 'ghi')],
[(7, 8, 'alpha', 'beta'),
(9, 10, 'X', 'Y'),
(11, 12, 'the', 'end')]],
type='3 * var * {x: int32, y: int32, z: string, w: string}')
# Selecting a single field
b = nd.fields(a, 'x')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.int32],
['x']))))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
# Selecting two fields
b = nd.fields(a, 'z', 'y')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32],
['z', 'y']))))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
# Selecting three fields
b = nd.fields(a, 'w', 'y', 'z')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32, ndt.string],
['w', 'y', 'z']))))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
# Reordering all four fields
b = nd.fields(a, 'w', 'y', 'x', 'z')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32, ndt.int32, ndt.string],
['w', 'y', 'x', 'z']))))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
def test_bad_field_name(self):
a = nd.array([
(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd'),
(5, 6, 'def', 'ghi')],
type='3 * {x: int32, y: int32, z: string, w: string}')
self.assertRaises(RuntimeError, nd.fields, a, 'y', 'v')
"""
if __name__ == '__main__':
unittest.main()
| 42.808081 | 76 | 0.464606 |
5d373f0e4790917fc2d0b3ea420a4ad7a8c76024
| 4,096 |
py
|
Python
|
xeofs/pandas/_transformer.py
|
nicrie/xeofs
|
4c0ed49b45794ce0abb641c98b82638b2faa4828
|
[
"MIT"
] | 3 |
2022-02-22T07:56:09.000Z
|
2022-03-30T10:47:20.000Z
|
xeofs/pandas/_transformer.py
|
nicrie/xeofs
|
4c0ed49b45794ce0abb641c98b82638b2faa4828
|
[
"MIT"
] | 13 |
2022-02-15T13:44:34.000Z
|
2022-03-15T22:51:01.000Z
|
xeofs/pandas/_transformer.py
|
nicrie/xeofs
|
4c0ed49b45794ce0abb641c98b82638b2faa4828
|
[
"MIT"
] | 2 |
2022-02-17T19:02:59.000Z
|
2022-02-22T07:56:15.000Z
|
from typing import Union, Iterable, List
import numpy as np
import pandas as pd
from ..models._transformer import _ArrayTransformer, _MultiArrayTransformer
| 35.310345 | 101 | 0.616211 |
5d38d81bebcb78fdcd3ec6d9d6e334cd582c79d5
| 1,004 |
py
|
Python
|
tests/bogus_python_model.py
|
FossilizedContainers/fossilized-controller
|
5aa14112b3728a619a37233906366c1cda2a0a77
|
[
"MIT"
] | 1 |
2022-01-24T21:54:46.000Z
|
2022-01-24T21:54:46.000Z
|
tests/bogus_python_model.py
|
FossilizedContainers/fossilized-controller
|
5aa14112b3728a619a37233906366c1cda2a0a77
|
[
"MIT"
] | null | null | null |
tests/bogus_python_model.py
|
FossilizedContainers/fossilized-controller
|
5aa14112b3728a619a37233906366c1cda2a0a77
|
[
"MIT"
] | null | null | null |
import os
import sys
import lipd
# import pythonAdapter, assumes in ../python-adapter/
tests_dir = os.path.dirname(os.path.realpath(__file__))
fc_dir = os.path.dirname(tests_dir)
python_adapter_dir = os.path.join(fc_dir, "python-adapter")
sys.path.append(python_adapter_dir)
import adapter
# have to call adapter in the adapter.py file as adapter.adapter
adapter = adapter.global_adapter
adapter.register(fake_model)
adapter.start_server()
| 26.421053 | 85 | 0.739044 |
5d38e4a873930da8bc4504369cb7f1bca6894323
| 13,421 |
py
|
Python
|
tello_control_ui.py
|
banne2266/UAV-autopilot-NCTU-2021
|
1a25d4add2de9659516d045054935e3b6e04d06d
|
[
"MIT"
] | null | null | null |
tello_control_ui.py
|
banne2266/UAV-autopilot-NCTU-2021
|
1a25d4add2de9659516d045054935e3b6e04d06d
|
[
"MIT"
] | null | null | null |
tello_control_ui.py
|
banne2266/UAV-autopilot-NCTU-2021
|
1a25d4add2de9659516d045054935e3b6e04d06d
|
[
"MIT"
] | null | null | null |
from PIL import Image
from PIL import ImageTk
import tkinter as tki
from tkinter import Toplevel, Scale
import threading
import datetime
import cv2
import os
import time
import platform
| 37.177285 | 113 | 0.580583 |
5d3a6779a16e847e6ab8367c806b8cd0393b9b7c
| 160 |
py
|
Python
|
__temp/examples/rhino/mesh-stanford-dragon.py
|
robin-gdwl/examples_topop-desc
|
3a10dfc891c3e6998029c7baf8a5a7a501870fe2
|
[
"MIT"
] | null | null | null |
__temp/examples/rhino/mesh-stanford-dragon.py
|
robin-gdwl/examples_topop-desc
|
3a10dfc891c3e6998029c7baf8a5a7a501870fe2
|
[
"MIT"
] | null | null | null |
__temp/examples/rhino/mesh-stanford-dragon.py
|
robin-gdwl/examples_topop-desc
|
3a10dfc891c3e6998029c7baf8a5a7a501870fe2
|
[
"MIT"
] | 1 |
2022-01-16T02:32:43.000Z
|
2022-01-16T02:32:43.000Z
|
import compas
import compas_rhino
from compas.datastructures import Mesh
mesh = Mesh.from_ply(compas.get('stanford_dragon.ply'))
compas_rhino.mesh_draw(mesh)
| 20 | 55 | 0.825 |
5d3b019f7105ea70804aca52b749a325dbd4f20c
| 416 |
py
|
Python
|
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
|
vbsteja/code
|
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
|
[
"Apache-2.0"
] | 3 |
2018-08-06T15:34:58.000Z
|
2022-02-11T14:19:05.000Z
|
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
|
vbsteja/code
|
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
|
[
"Apache-2.0"
] | null | null | null |
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
|
vbsteja/code
|
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
|
[
"Apache-2.0"
] | 3 |
2018-08-06T15:35:01.000Z
|
2020-08-08T07:53:07.000Z
|
# Neural Networks Demystified
# Part 1: Data + Architecture
#
# Supporting code for short YouTube series on artificial neural networks.
#
# Stephen Welch
# @stephencwelch
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
| 24.470588 | 73 | 0.673077 |
5d3c91e42dac2041a621585dba8f1dfdc1e88107
| 19,048 |
py
|
Python
|
manubot/process/util.py
|
benstear/manubot
|
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
|
[
"BSD-3-Clause"
] | null | null | null |
manubot/process/util.py
|
benstear/manubot
|
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
|
[
"BSD-3-Clause"
] | null | null | null |
manubot/process/util.py
|
benstear/manubot
|
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import logging
import os
import pathlib
import re
import textwrap
import warnings
from typing import List, Optional
import jinja2
import pandas
import requests
import requests_cache
import yaml
from manubot.util import read_serialized_data, read_serialized_dict
from manubot.process.bibliography import load_manual_references
from manubot.process.ci import get_continuous_integration_parameters
from manubot.process.metadata import (
get_header_includes,
get_thumbnail_url,
get_manuscript_urls,
get_software_versions,
)
from manubot.process.manuscript import (
datetime_now,
get_manuscript_stats,
get_text,
)
from manubot.cite.citekey import (
citekey_to_csl_item,
shorten_citekey,
is_valid_citekey,
standardize_citekey,
)
def check_collisions(citekeys_df):
"""
Check for short_citekey hash collisions
"""
collision_df = citekeys_df[["standard_citekey", "short_citekey"]].drop_duplicates()
collision_df = collision_df[collision_df.short_citekey.duplicated(keep=False)]
if not collision_df.empty:
logging.error(f"OMF! Hash collision. Congratulations.\n{collision_df}")
return collision_df
def check_multiple_citation_strings(citekeys_df):
"""
Identify different citation strings referring the the same reference.
"""
message = textwrap.dedent(
f"""\
{len(citekeys_df)} unique citations strings extracted from text
{citekeys_df.standard_citekey.nunique()} unique standard citations\
"""
)
logging.info(message)
multi_df = citekeys_df[citekeys_df.standard_citekey.duplicated(keep=False)]
if not multi_df.empty:
table = multi_df.to_string(
index=False, columns=["standard_citekey", "manuscript_citekey"]
)
logging.warning(f"Multiple citekeys detected for the same reference:\n{table}")
return multi_df
def read_variable_files(paths: List[str], variables: Optional[dict] = None) -> dict:
"""
Read multiple serialized data files into a user_variables dictionary.
Provide `paths` (a list of URLs or local file paths).
Paths can optionally have a namespace prepended.
For example:
```python
paths = [
'https://git.io/vbkqm', # update the dictionary's top-level
'namespace_1=https://git.io/vbkqm', # store under 'namespace_1' key
'namespace_2=some_local_path.json', # store under 'namespace_2' key
]
```
If a namespace is not provided, the JSON must contain a dictionary as its
top level. Namespaces should consist only of ASCII alphanumeric characters
(includes underscores, first character cannot be numeric).
Pass a dictionary to `variables` to update an existing dictionary rather
than create a new dictionary.
"""
if variables is None:
variables = {}
for path in paths:
logging.info(f"Reading user-provided templating variables at {path!r}")
# Match only namespaces that are valid jinja2 variable names
# http://jinja.pocoo.org/docs/2.10/api/#identifier-naming
match = re.match(r"([a-zA-Z_][a-zA-Z0-9_]*)=(.+)", path)
if match:
namespace, path = match.groups()
logging.info(
f"Using the {namespace!r} namespace for template variables from {path!r}"
)
try:
if match:
obj = {namespace: read_serialized_data(path)}
else:
obj = read_serialized_dict(path)
except Exception:
logging.exception(f"Error reading template variables from {path!r}")
continue
assert isinstance(obj, dict)
conflicts = variables.keys() & obj.keys()
if conflicts:
logging.warning(
f"Template variables in {path!r} overwrite existing "
"values for the following keys:\n" + "\n".join(conflicts)
)
variables.update(obj)
logging.debug(
f"Reading user-provided templating variables complete:\n"
f"{json.dumps(variables, indent=2, ensure_ascii=False)}"
)
return variables
def add_author_affiliations(variables: dict) -> dict:
"""
Edit variables to contain numbered author affiliations. Specifically,
add a list of affiliation_numbers for each author and add a list of
affiliations to the top-level of variables. If no authors have any
affiliations, variables is left unmodified.
"""
rows = list()
for author in variables["authors"]:
if "affiliations" not in author:
continue
if not isinstance(author["affiliations"], list):
warnings.warn(
f"Expected list for {author['name']}'s affiliations. "
f"Assuming multiple affiliations are `; ` separated. "
f"Please switch affiliations to a list.",
category=DeprecationWarning,
)
author["affiliations"] = author["affiliations"].split("; ")
for affiliation in author["affiliations"]:
rows.append((author["name"], affiliation))
if not rows:
return variables
affil_map_df = pandas.DataFrame(rows, columns=["name", "affiliation"])
affiliation_df = affil_map_df[["affiliation"]].drop_duplicates()
affiliation_df["affiliation_number"] = range(1, 1 + len(affiliation_df))
affil_map_df = affil_map_df.merge(affiliation_df)
name_to_numbers = {
name: sorted(df.affiliation_number) for name, df in affil_map_df.groupby("name")
}
for author in variables["authors"]:
author["affiliation_numbers"] = name_to_numbers.get(author["name"], [])
variables["affiliations"] = affiliation_df.to_dict(orient="records")
return variables
def load_variables(args) -> dict:
"""
Read `metadata.yaml` and files specified by `--template-variables-path` to generate
manuscript variables available for jinja2 templating.
Returns a dictionary, refered to as `variables`, with the following keys:
- `pandoc`: a dictionary for passing options to Pandoc via the `yaml_metadata_block`.
Fields in `pandoc` are either generated by Manubot or hard-coded by the user if `metadata.yaml`
includes a `pandoc` dictionary.
- `manubot`: a dictionary for manubot-related information and metadata.
Fields in `manubot` are either generated by Manubot or hard-coded by the user if `metadata.yaml`
includes a `manubot` dictionary.
- All fields from a manuscript's `metadata.yaml` that are not interpreted by Manubot are
copied to `variables`. Interpreted fields include `pandoc`, `manubot`, `title`,
`keywords`, `authors` (formerly `author_info`, now deprecated), `lang`, and `thumbnail`.
- User-specified fields inserted according to the `--template-variables-path` option.
User-specified variables take highest precedence and can overwrite values for existing
keys like `pandoc` or `manubot` (dangerous).
"""
# Generated manuscript variables
variables = {"pandoc": {}, "manubot": {}}
# Read metadata which contains pandoc_yaml_metadata
# as well as authors information.
if args.meta_yaml_path.is_file():
metadata = read_serialized_dict(args.meta_yaml_path)
else:
metadata = {}
logging.warning(
f"missing {args.meta_yaml_path} file with yaml_metadata_block for pandoc"
)
# Interpreted keys that are intended for pandoc
move_to_pandoc = "title", "keywords", "lang"
for key in move_to_pandoc:
if key in metadata:
variables["pandoc"][key] = metadata.pop(key)
# Add date to metadata
now = datetime_now()
logging.info(
f"Using {now:%Z} timezone.\n"
f"Dating manuscript with the current datetime: {now.isoformat()}"
)
variables["pandoc"]["date-meta"] = now.date().isoformat()
variables["manubot"]["date"] = f"{now:%B} {now.day}, {now.year}"
# Process authors metadata
if "author_info" in metadata:
authors = metadata.pop("author_info", [])
warnings.warn(
"metadata.yaml: 'author_info' is deprecated. Use 'authors' instead.",
category=DeprecationWarning,
)
else:
authors = metadata.pop("authors", [])
if authors is None:
authors = []
variables["pandoc"]["author-meta"] = [author["name"] for author in authors]
variables["manubot"]["authors"] = authors
add_author_affiliations(variables["manubot"])
# Set repository version metadata for CI builds
ci_params = get_continuous_integration_parameters()
if ci_params:
variables["manubot"]["ci_source"] = ci_params
# Add manuscript URLs
variables["manubot"].update(get_manuscript_urls(metadata.pop("html_url", None)))
# Add software versions
variables["manubot"].update(get_software_versions())
# Add thumbnail URL if present
thumbnail_url = get_thumbnail_url(metadata.pop("thumbnail", None))
if thumbnail_url:
variables["manubot"]["thumbnail_url"] = thumbnail_url
# Update variables with metadata.yaml pandoc/manubot dicts
for key in "pandoc", "manubot":
dict_ = metadata.pop(key, {})
if not isinstance(dict_, dict):
logging.warning(
f"load_variables expected metadata.yaml field {key!r} to be a dict."
f"Received a {dict_.__class__.__name__!r} instead."
)
continue
variables[key].update(dict_)
# Update variables with uninterpreted metadata.yaml fields
variables.update(metadata)
# Update variables with user-provided variables here
variables = read_variable_files(args.template_variables_path, variables)
# Add header-includes metadata with <meta> information for the HTML output's <head>
variables["pandoc"]["header-includes"] = get_header_includes(variables)
assert args.skip_citations
# Extend Pandoc's metadata.bibliography field with manual references paths
bibliographies = variables["pandoc"].get("bibliography", [])
if isinstance(bibliographies, str):
bibliographies = [bibliographies]
assert isinstance(bibliographies, list)
bibliographies.extend(args.manual_references_paths)
bibliographies = list(map(os.fspath, bibliographies))
variables["pandoc"]["bibliography"] = bibliographies
# enable pandoc-manubot-cite option to write bibliography to a file
variables["pandoc"]["manubot-output-bibliography"] = os.fspath(args.references_path)
variables["pandoc"]["manubot-output-citekeys"] = os.fspath(args.citations_path)
variables["pandoc"]["manubot-requests-cache-path"] = os.fspath(
args.requests_cache_path
)
variables["pandoc"]["manubot-clear-requests-cache"] = args.clear_requests_cache
return variables
def get_citekeys_df(citekeys: list, citekey_aliases: dict = {}):
"""
Generate and return citekeys_df.
citekeys_df is a pandas.DataFrame with the following columns:
- manuscript_citekey: citation keys extracted from the manuscript content files.
- detagged_citekey: manuscript_citekey but with tag citekeys dereferenced
- standard_citekey: detagged_citekey standardized
- short_citekey: standard_citekey hashed to create a shortened citekey
"""
citekeys_df = pandas.DataFrame(
{"manuscript_citekey": list(citekeys)}
).drop_duplicates()
citekeys_df["detagged_citekey"] = citekeys_df.manuscript_citekey.map(
lambda citekey: citekey_aliases.get(citekey, citekey)
)
for citation in citekeys_df.detagged_citekey:
is_valid_citekey(citation, allow_raw=True)
citekeys_df["standard_citekey"] = citekeys_df.detagged_citekey.map(
standardize_citekey
)
citekeys_df["short_citekey"] = citekeys_df.standard_citekey.map(shorten_citekey)
citekeys_df = citekeys_df.sort_values(["standard_citekey", "detagged_citekey"])
check_collisions(citekeys_df)
check_multiple_citation_strings(citekeys_df)
return citekeys_df
def read_citations_tsv(path) -> dict:
"""
Read citekey aliases from a citation-tags.tsv file.
"""
if not path.is_file():
logging.info(
f"no citation tags file at {path} "
"Not reading citekey_aliases from citation-tags.tsv."
)
return {}
tag_df = pandas.read_csv(path, sep="\t")
na_rows_df = tag_df[tag_df.isnull().any(axis="columns")]
if not na_rows_df.empty:
logging.error(
f"{path} contains rows with missing values:\n"
f"{na_rows_df}\n"
"This error can be caused by using spaces rather than tabs to delimit fields.\n"
"Proceeding to reread TSV with delim_whitespace=True."
)
tag_df = pandas.read_csv(path, delim_whitespace=True)
tag_df["manuscript_citekey"] = "tag:" + tag_df.tag
tag_df = tag_df.rename(columns={"citation": "detagged_citekey"})
citekey_aliases = dict(
zip(tag_df["manuscript_citekey"], tag_df["detagged_citekey"])
)
return citekey_aliases
def _citation_tags_to_reference_links(args) -> str:
"""
Convert citation-tags.tsv to markdown reference link syntax
"""
citekey_aliases = read_citations_tsv(args.citation_tags_path)
if not citekey_aliases:
return ""
text = "\n\n"
for key, value in citekey_aliases.items():
text += f"[@{key}]: {value}\n"
logging.warning(
"citation-tags.tsv is deprecated. "
f"Consider deleting citation-tags.tsv and inserting the following paragraph into your Markdown content:{text}"
)
return text
def generate_csl_items(
citekeys: list,
manual_refs: dict = {},
requests_cache_path: Optional[str] = None,
clear_requests_cache: Optional[bool] = False,
) -> list:
"""
General CSL (citeproc) items for standard_citekeys in citekeys_df.
Parameters:
- citekeys: list of standard_citekeys
- manual_refs: mapping from standard_citekey to csl_item for manual references
- requests_cache_path: path for the requests cache database.
Passed as cache_name to `requests_cache.install_cache`.
requests_cache may append an extension to this path, so it is not always the exact
path to the cache. If None, do not use requests_cache.
- clear_requests_cache: If True, clear the requests cache before generating citekey metadata.
"""
# Deduplicate citations
citekeys = list(dict.fromkeys(citekeys))
# Install cache
if requests_cache_path is not None:
requests # require `import requests` in case this is essential for monkey patching by requests_cache.
requests_cache.install_cache(requests_cache_path, include_get_headers=True)
cache = requests_cache.get_cache()
if clear_requests_cache:
logging.info("Clearing requests-cache")
requests_cache.clear()
logging.info(
f"requests-cache starting with {len(cache.responses)} cached responses"
)
csl_items = list()
failures = list()
for standard_citekey in citekeys:
if standard_citekey in manual_refs:
csl_items.append(manual_refs[standard_citekey])
continue
elif standard_citekey.startswith("raw:"):
logging.error(
f"CSL JSON Data with a standard_citekey of {standard_citekey!r} not found in manual-references.json. "
"Metadata must be provided for raw citekeys."
)
failures.append(standard_citekey)
try:
csl_item = citekey_to_csl_item(standard_citekey)
csl_items.append(csl_item)
except Exception:
logging.exception(f"Citeproc retrieval failure for {standard_citekey!r}")
failures.append(standard_citekey)
# Uninstall cache
if requests_cache_path is not None:
logging.info(
f"requests-cache finished with {len(cache.responses)} cached responses"
)
requests_cache.uninstall_cache()
if failures:
message = "CSL JSON Data retrieval failed for the following standardized citation keys:\n{}".format(
"\n".join(failures)
)
logging.error(message)
return csl_items
def _generate_csl_items(args, citekeys_df):
"""
General CSL (citeproc) items for standard_citekeys in citekeys_df.
Writes references.json to disk and logs warnings for potential problems.
"""
# Read manual references (overrides) in JSON CSL
manual_refs = load_manual_references(args.manual_references_paths)
# Retrieve CSL Items
csl_items = generate_csl_items(
citekeys=citekeys_df.standard_citekey.unique(),
manual_refs=manual_refs,
requests_cache_path=args.requests_cache_path,
clear_requests_cache=args.clear_requests_cache,
)
# Write CSL JSON bibliography for Pandoc.
write_csl_json(csl_items, args.references_path)
return csl_items
def write_csl_json(csl_items, path):
"""
Write CSL Items to a JSON file at `path`.
If `path` evaluates as False, do nothing.
"""
if not path:
return
path = pathlib.Path(path)
with path.open("w", encoding="utf-8") as write_file:
json.dump(csl_items, write_file, indent=2, ensure_ascii=False)
write_file.write("\n")
def template_with_jinja2(text, variables):
"""
Template using jinja2 with the variables dictionary unpacked as keyword
arguments.
"""
jinja_environment = jinja2.Environment(
loader=jinja2.BaseLoader(),
undefined=jinja2.make_logging_undefined(logging.getLogger()),
autoescape=False,
comment_start_string="{##",
comment_end_string="##}",
extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"],
)
template = jinja_environment.from_string(text)
return template.render(**variables)
def prepare_manuscript(args):
"""
Compile manuscript, creating manuscript.md and references.json as inputs
for pandoc.
"""
text = get_text(args.content_directory)
assert args.skip_citations
text += _citation_tags_to_reference_links(args)
variables = load_variables(args)
variables["manubot"]["manuscript_stats"] = get_manuscript_stats(text)
with args.variables_path.open("w", encoding="utf-8") as write_file:
json.dump(variables, write_file, ensure_ascii=False, indent=2)
write_file.write("\n")
text = template_with_jinja2(text, variables)
# Write manuscript for pandoc
with args.manuscript_path.open("w", encoding="utf-8") as write_file:
yaml.dump(
variables["pandoc"],
write_file,
default_flow_style=False,
explicit_start=True,
explicit_end=True,
width=float("inf"),
)
write_file.write("\n")
write_file.write(text)
| 37.496063 | 118 | 0.679546 |
5d3efa01c738d69c4c33cd7d548df41216a056d7
| 3,738 |
py
|
Python
|
iba_scrape.py
|
wmwilcox/mix-mind
|
02da016f314bb5f30f267f1f46c67c6d4a4c370c
|
[
"Apache-2.0"
] | 1 |
2021-05-02T19:50:44.000Z
|
2021-05-02T19:50:44.000Z
|
iba_scrape.py
|
wmwilcox/mix-mind
|
02da016f314bb5f30f267f1f46c67c6d4a4c370c
|
[
"Apache-2.0"
] | 34 |
2018-08-07T13:09:29.000Z
|
2021-05-13T17:25:18.000Z
|
iba_scrape.py
|
wmwilcox/mix-mind
|
02da016f314bb5f30f267f1f46c67c6d4a4c370c
|
[
"Apache-2.0"
] | 4 |
2019-02-14T04:17:24.000Z
|
2021-05-14T15:33:39.000Z
|
#! /usr/bin/env python
# scrape the IBA pages for cocktail lists
import sys
import xml.etree.ElementTree as ET
from lxml import html
import requests
from pprint import pprint
from collections import OrderedDict
import json
url = 'http://iba-world.com/new-era-drinks/'
jsonfile = 'IBA_new_era_drinks.json'
url = 'http://iba-world.com/iba-cocktails/'
jsonfile = 'IBA_unforgettables.json'
url = 'http://iba-world.com/contemporary-classics/'
jsonfile = 'IBA_contemporary_classics.json'
jsonfile = 'IBA_.json'
recipes = OrderedDict()
page = requests.get(url)
tree = html.fromstring(page.content)
items = tree.findall(".//div[@class='blog_list_item_lists']")
for item in items:
name = item.find(".//h3").text
name = ' '.join([word.capitalize() for word in name.split()])
body = item.find(".//div[@class='blog_text']")
recipes[name] = {'unit': 'cL'}
print name
children = [c for c in body.iterchildren()]
n = 0
if children[1].tag == 'ul':
n = -1
style = children[n+1].text
if style is None:
try:
style = children[n+1].find('span').text
except:
pass
recipes[name]['style'] = style
recipes[name]['ingredients'] = OrderedDict()
if not children[n+2].tag == 'ul':
print "adapting <p> ingredients:", children[n+2].text
ing_list = ET.tostring(children[n+2]).lstrip('<p>').rstrip('</p>\n').split('<br />\n')
else:
ing_list = [i.text for i in children[n+2].iterchildren()]
for ingredient in ing_list:
if len(ingredient.split()) == 1:
recipes[name]['ingredients'][ingredient.lower()] = ''
continue
unit = ingredient.split()[1].lower()
if unit == 'cl':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = float(ingredient.split()[0])
elif unit == 'bar' or unit == 'to': # bar spoon
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[3:]])] = ' '.join(ingredient.split()[:3])
elif unit == 'dashes' or unit == 'drops' or unit == 'with':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = ' '.join(ingredient.split()[:2])
elif unit == 'dash':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = 'dash'
else:
print "using literal: ", ingredient
literal = {'1': 'one', '2': 'two', 'A': 'one'}
try:
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[1:]])] = literal[ingredient.split()[0]]
except:
recipes[name]['ingredients'][ingredient.lower()] = ''
# Get full description from the link
ref_url = item.find(".//a[@class='top_hover_image']").attrib.get('href')
detail_page = requests.get(ref_url)
detail_tree = html.fromstring(detail_page.content)
use_next = False
for child in detail_tree.find(".//div[@class='col-sm-9']").iterchildren():
if use_next and child.tag == 'p':
recipes[name]['IBA_description'] = child.text
break
if child.tag =='ul':
use_next = True
with open(jsonfile, 'w') as fp:
json.dump(recipes, fp, indent=4, separators=(',', ': '))
print "Wrote out as {}".format(jsonfile)
sys.exit(0)
raw = sys.argv[1]
with open(raw) as fp:
for line in fp.readlines():
if line.lstrip().startswith(r'<h3>'):
print line.lstrip()
# super hax
if line.startswith(r'<p>'):
print line
if line.startswith(r'<li>'):
print line
if not line.lstrip().startswith('<'):
print line
| 33.981818 | 132 | 0.584805 |
5d3f6e4da89be36858bff2a63bb4de2ff240849a
| 244 |
py
|
Python
|
config.py
|
ggiaquin16/GroupProject19
|
f491abc4e8f127552dc7384f3378e14029da8008
|
[
"CC-BY-3.0"
] | null | null | null |
config.py
|
ggiaquin16/GroupProject19
|
f491abc4e8f127552dc7384f3378e14029da8008
|
[
"CC-BY-3.0"
] | null | null | null |
config.py
|
ggiaquin16/GroupProject19
|
f491abc4e8f127552dc7384f3378e14029da8008
|
[
"CC-BY-3.0"
] | null | null | null |
api_key = "9N7hvPP9yFrjBnELpBdthluBjiOWzJZw"
mongo_url = 'mongodb://localhost:27017'
mongo_db = 'CarPopularity'
mongo_collections = ['CarSalesByYear', 'PopularCarsByRegion']
years_data = ['2019', '2018', '2017', '2016', '2015']
test_mode = True
| 40.666667 | 61 | 0.75 |
5d4008c47be6196efe901a8e83cca011533d0bf1
| 2,648 |
py
|
Python
|
pytorch_ares/pytorch_ares/attack_torch/mim.py
|
thu-ml/realsafe
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 107 |
2020-06-15T09:55:11.000Z
|
2020-12-20T11:27:11.000Z
|
pytorch_ares/pytorch_ares/attack_torch/mim.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 7 |
2020-06-14T03:00:18.000Z
|
2020-12-07T07:10:10.000Z
|
pytorch_ares/pytorch_ares/attack_torch/mim.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 19 |
2020-06-14T08:35:33.000Z
|
2020-12-19T13:43:41.000Z
|
import imp
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from pytorch_ares.attack_torch.utils import loss_adv
| 39.522388 | 118 | 0.583837 |
5d4029e498cad9d638e5fe5f4c3a65f28490da96
| 303 |
py
|
Python
|
src/utils/templatetags/menubutton.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils/templatetags/menubutton.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils/templatetags/menubutton.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
from django import template
register = template.Library()
| 25.25 | 93 | 0.716172 |
5d41c3b8ea2fc0ea3e45c5b6768c95bfbb166b0c
| 1,965 |
py
|
Python
|
wiki/tests.py
|
Prones94/Make_Wiki
|
f8816eb31bb370f48affff8568a6b0d0ffaf7cd4
|
[
"MIT"
] | null | null | null |
wiki/tests.py
|
Prones94/Make_Wiki
|
f8816eb31bb370f48affff8568a6b0d0ffaf7cd4
|
[
"MIT"
] | 5 |
2020-06-06T01:41:16.000Z
|
2021-06-10T20:09:01.000Z
|
wiki/tests.py
|
Prones94/Make_Wiki
|
f8816eb31bb370f48affff8568a6b0d0ffaf7cd4
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
from django.utils.text import slugify
# Create your tests here.
'''
Steps to writing a test
1. Set up your test data
2. Make a request (GET, POST)
3a. Check if response matches what we expect
3b. Check if database matches what we expect
'''
| 33.87931 | 85 | 0.641221 |
5d42291558faa9e742ab82a57f0f93c0ba5ed168
| 66 |
py
|
Python
|
birdy/__init__.py
|
tkiapril/birdy
|
cf6a8f8d31c9363dbf7398ae3d78fe3069a5a936
|
[
"MIT"
] | 1 |
2015-01-07T17:47:54.000Z
|
2015-01-07T17:47:54.000Z
|
birdy/__init__.py
|
tkiapril/birdy
|
cf6a8f8d31c9363dbf7398ae3d78fe3069a5a936
|
[
"MIT"
] | null | null | null |
birdy/__init__.py
|
tkiapril/birdy
|
cf6a8f8d31c9363dbf7398ae3d78fe3069a5a936
|
[
"MIT"
] | null | null | null |
__author__ = 'Mitja Pagon <[email protected]>'
__version__ = '0.2'
| 22 | 45 | 0.712121 |
5d424aaa1fdb6fb518af8c5169d61b82bae9ef0f
| 1,928 |
py
|
Python
|
ares/defense/randomization.py
|
KuanKuanQAQ/ares
|
40dbefc18f6438e1812021fe6d6c3195f22ca295
|
[
"MIT"
] | 206 |
2020-12-31T09:43:11.000Z
|
2022-03-30T07:02:41.000Z
|
ares/defense/randomization.py
|
afoolboy/ares
|
89610d41fdde194e4ad916d29961aaed73383692
|
[
"MIT"
] | 7 |
2021-01-26T06:45:44.000Z
|
2022-02-26T05:25:48.000Z
|
ares/defense/randomization.py
|
afoolboy/ares
|
89610d41fdde194e4ad916d29961aaed73383692
|
[
"MIT"
] | 61 |
2020-12-29T14:02:41.000Z
|
2022-03-26T14:21:10.000Z
|
''' The randomization defense method, which applies random . '''
import tensorflow as tf
from ares.defense.input_transformation import input_transformation
def randomize(xs, scale_min=0.875, pad_value=0.0):
''' Apply random rescaling and padding to xs.
:param xs: A batch of inputs for some classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
:return: A new tensor with same shape and dtype as xs.
'''
ratio = tf.random.uniform((), minval=scale_min, maxval=1.0)
height, width = tf.cast(xs.shape[1].value * ratio, tf.int32), tf.cast(xs.shape[2].value * ratio, tf.int32)
xs_rescaled = tf.image.resize(xs, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True, preserve_aspect_ratio=False)
height_rem, width_rem = xs.shape[1].value - height, xs.shape[2].value - width
pad_left = tf.random_uniform((), 0, width_rem, dtype=tf.int32)
pad_right = width_rem - pad_left
pad_top = tf.random_uniform((), 0, height_rem, dtype=tf.int32)
pad_bottom = height_rem - pad_top
xs_padded = tf.pad(xs_rescaled, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
constant_values=pad_value)
xs_padded.set_shape(xs.shape)
return xs_padded
def randomization(scale_min=0.875, pad_value=0.0):
''' A decorator to apply randomize rescaling and padding to input of the classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
'''
return lambda rs_class: input_transformation(rs_class, randomize, args_fn, kwargs_fn)
| 43.818182 | 110 | 0.688797 |
5d43ba93812ece31b158196b6ad2d32a374bd0f8
| 147 |
py
|
Python
|
annotate/backend/admin.py
|
hopeogbons/image-annotation
|
2d8b1799bc791428fd3ab29d8052195996923130
|
[
"Apache-2.0"
] | null | null | null |
annotate/backend/admin.py
|
hopeogbons/image-annotation
|
2d8b1799bc791428fd3ab29d8052195996923130
|
[
"Apache-2.0"
] | 11 |
2021-03-09T10:15:39.000Z
|
2022-02-26T13:53:51.000Z
|
annotate/backend/admin.py
|
hopeogbons/image-annotation
|
2d8b1799bc791428fd3ab29d8052195996923130
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from annotate.backend.models import Image, Annotation
admin.site.register(Image)
admin.site.register(Annotation)
| 24.5 | 53 | 0.836735 |
5d4867285dd6af6ea7e2fbae337fc27c75376241
| 1,893 |
py
|
Python
|
c/create.py
|
LMS57/domato
|
005739f55b49ead0ac47ea14b324decee05a7625
|
[
"Apache-2.0"
] | null | null | null |
c/create.py
|
LMS57/domato
|
005739f55b49ead0ac47ea14b324decee05a7625
|
[
"Apache-2.0"
] | null | null | null |
c/create.py
|
LMS57/domato
|
005739f55b49ead0ac47ea14b324decee05a7625
|
[
"Apache-2.0"
] | null | null | null |
data = open('./original').readlines()
alphabet = {
"<":"lt",
">":"gt",
"=":"=",
"-":'-',
"+":"+",
"-":"-",
"~":"~",
"!":"ex",
"%":"%",
"^":"^",
"&":"&",
"*":"*",
"(":"(",
")":"right_paran",
"[":"[",
"]":"]",
"{":"{",
"}":"}",
"[":"[",
"]":"]",
"|":"|",
";":";",
":":":",
",":",",
".":".",
"?":"?",
"/":"/",
}
start = 0
current = ""
space = "<space>"
declared = []
referenced = []
for x in data:
x = x.strip()
if x == "":
continue
if '%%' == x:
start = 1
continue
elif start != 1:
continue
if x == "test":
break;
x = x.split(' ')
if len(x) == 1:#item declaration or end
if x[0] == ';':
current = ""
else:
current = x[0]
declared.append(item(x[0]))
print ""
else:
x = x[1:]
tmp = item(current)+'\t=\t'
for y in range(len(x)):
referenced.append(item(x[y]))
tmp += item(x[y])
if y != len(x)-1 and "'" not in x[y+1] and "'" not in x[y]:
tmp+=space
print tmp
referenced = set(referenced)
final = []
for x in referenced:
if x not in declared:
final.append(x)
print ""
for x in final:
tmp = x+'\t=\t'
x = x[1:-1]
print tmp + x.lower()
| 18.742574 | 71 | 0.320655 |
5d48a2b09ec3e91f3ac7c94a610ddffec5774abc
| 10,500 |
py
|
Python
|
AppServer/google/appengine/api/memcache/memcache_distributed.py
|
isabella232/scale-safe
|
8b887726768106b6b67d7be6ea257bee5cd83f9a
|
[
"Apache-2.0"
] | 3 |
2016-06-12T01:18:49.000Z
|
2018-07-16T18:20:23.000Z
|
AppServer/google/appengine/api/memcache/memcache_distributed.py
|
davgit/appscale
|
17d35a14fa5a56975de1e3517bec9e7f9047d82a
|
[
"Apache-2.0"
] | 1 |
2021-06-08T10:04:35.000Z
|
2021-06-08T10:04:35.000Z
|
AppServer/google/appengine/api/memcache/memcache_distributed.py
|
davgit/appscale
|
17d35a14fa5a56975de1e3517bec9e7f9047d82a
|
[
"Apache-2.0"
] | 1 |
2020-05-25T02:59:15.000Z
|
2020-05-25T02:59:15.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Non-stub version of the memcache API, keeping all data in memcached.
Uses the python-memcached library to interface with memcached.
"""
import base64
import cPickle
import logging
import memcache
import os
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.runtime import apiproxy_errors
MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest
MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest
MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse
MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse
from google.appengine.api.memcache import TYPE_INT
from google.appengine.api.memcache import TYPE_LONG
| 33.980583 | 77 | 0.701619 |
5d493476e5ae3fc5c2137c7a547ce012434fae4f
| 4,927 |
py
|
Python
|
inflateutils/exportmesh.py
|
arpruss/inflatemesh
|
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
|
[
"MIT"
] | 8 |
2017-11-30T14:03:25.000Z
|
2021-03-02T03:16:01.000Z
|
inflateutils/exportmesh.py
|
arpruss/inflatemesh
|
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
|
[
"MIT"
] | 2 |
2019-03-15T04:10:04.000Z
|
2021-01-11T17:44:31.000Z
|
inflateutils/exportmesh.py
|
arpruss/inflatemesh
|
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
|
[
"MIT"
] | 2 |
2018-04-08T10:59:39.000Z
|
2021-01-28T03:37:57.000Z
|
from struct import pack
from .vector import *
from .formatdecimal import decimal
from numbers import Number
import os
import sys
try:
basestring
except:
basestring = str
def toSCADModule(polys, moduleName, digitsAfterDecimal=9, colorOverride=None):
"""
INPUT:
polys: list of (color,polyhedra) pairs (counterclockwise triangles), or a list of (color,triangle) pairs (TODO: currently uses first color for all in latter case)
moduleName: OpenSCAD module name
OUTPUT: string with OpenSCAD code implementing the polys
"""
polys = toPolyhedra(polys)
scad = []
scad.append("module " +moduleName+ "() {")
for rgb,poly in polys:
if colorOverride != "" and (colorOverride or rgb):
line = " color(%s) " % describeColor(colorOverride if colorOverride else tuple(min(max(c,0.),1.0) for c in rgb))
else:
line = " "
pointsDict = {}
i = 0
line += "polyhedron(points=["
points = []
for face in poly:
for v in reversed(face):
if tuple(v) not in pointsDict:
pointsDict[tuple(v)] = i
points.append( ("[%s,%s,%s]") % tuple(decimal(x,digitsAfterDecimal) for x in v) )
i += 1
line += ",".join(points)
line += "], faces=["
line += ",".join( "[" + ",".join(str(pointsDict[tuple(v)]) for v in reversed(face)) + "]" for face in poly ) + "]"
line += ");"
scad.append(line)
scad.append("}\n")
return "\n".join(scad)
def saveSCAD(filename, polys, moduleName="object1", quiet=False):
"""
filename: filename to write OpenSCAD file
polys: list of (color,polyhedra) pairs (counterclockwise triangles)
moduleName: OpenSCAD module name
quiet: give no status message if set
"""
if not quiet: sys.stderr.write("Saving %s\n" % filename)
if filename:
with open(filename, "w") as f:
f.write(toSCADModule(polys, moduleName))
f.write("\n" + moduleName + "();\n")
else:
sys.stdout.write(toSCADModule(polys, moduleName))
sys.stdout.write("\n" + moduleName + "();\n")
def saveSTL(filename, mesh, swapYZ=False, quiet=False):
"""
filename: filename to save STL file
mesh: list of (color,triangle) pairs (counterclockwise)
swapYZ: should Y/Z axes be swapped?
quiet: give no status message if set
"""
mesh = toMesh(mesh)
if not quiet: sys.stderr.write("Saving %s\n" % filename)
minY = float("inf")
minVector = Vector(float("inf"),float("inf"),float("inf"))
numTriangles = 0
if swapYZ:
matrix = Matrix( (1,0,0), (0,0,-1), (0,1,0) )
else:
matrix = Matrix.identity(3)
mono = True
for rgb,triangle in mesh:
if rgb is not None:
mono = False
numTriangles += 1
for vertex in triangle:
vertex = matrix*vertex
minVector = Vector(min(minVector[i], vertex[i]) for i in range(3))
minVector -= Vector(0.001,0.001,0.001) # make sure all STL coordinates are strictly positive as per Wikipedia
if filename:
with open(filename, "wb") as f:
writeSTL(f.write)
else:
if sys.platform == "win32":
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
writeSTL(lambda data : os.write(sys.stdout.fileno(), data))
| 33.517007 | 166 | 0.558149 |
5d4b0bc52d1482cd0028c140868e692cfb38b3c0
| 3,982 |
py
|
Python
|
Assignment1/Identification/match_module.py
|
arywatt/FDS_2020_2021
|
392f360b219c6ef5e2c685da1f3c8aab7415ce32
|
[
"MIT"
] | null | null | null |
Assignment1/Identification/match_module.py
|
arywatt/FDS_2020_2021
|
392f360b219c6ef5e2c685da1f3c8aab7415ce32
|
[
"MIT"
] | null | null | null |
Assignment1/Identification/match_module.py
|
arywatt/FDS_2020_2021
|
392f360b219c6ef5e2c685da1f3c8aab7415ce32
|
[
"MIT"
] | 1 |
2020-10-29T08:38:42.000Z
|
2020-10-29T08:38:42.000Z
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import histogram_module
import dist_module
# model_images - list of file names of model images
# query_images - list of file names of query images
#
# dist_type - string which specifies distance type: 'chi2', 'l2', 'intersect'
# hist_type - string which specifies histogram type: 'grayvalue', 'dxdy', 'rgb', 'rg'
#
# note: use functions 'get_dist_by_name', 'get_hist_by_name' and 'is_grayvalue_hist' to obtain
# handles to distance and histogram functions, and to find out whether histogram function
# expects grayvalue or color image
# For each image file from 'query_images' find and visualize the 5 nearest images from 'model_image'.
#
# Note: use the previously implemented function 'find_best_match'
# Note: use subplot command to show all the images in the same Python figure, one row per query image
| 33.745763 | 101 | 0.633852 |
5d4c8cdbc546fb237f365ef954c77cb12a3738d8
| 1,566 |
py
|
Python
|
dycco/__main__.py
|
rojalator/dycco
|
84ace8727aef84bb3d886cdaa3d3aef1089f1935
|
[
"MIT"
] | null | null | null |
dycco/__main__.py
|
rojalator/dycco
|
84ace8727aef84bb3d886cdaa3d3aef1089f1935
|
[
"MIT"
] | 1 |
2022-03-22T07:35:15.000Z
|
2022-03-22T09:15:44.000Z
|
dycco/__main__.py
|
rojalator/dycco
|
84ace8727aef84bb3d886cdaa3d3aef1089f1935
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import sys
from .dycco import document
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(prog='dycco', description='Literate-style documentation generator.')
arg_parser.add_argument('source_file', nargs='+', default=sys.stdin, help='Source files to document')
arg_parser.add_argument('-o', '--output-dir', default='docs', help='Output directory (will be created if necessary)')
arg_parser.add_argument('-a', '--asciidoc3', action='store_true', default=False, dest='use_ascii',
help='Process with asciidoc3 instead of markdown (you will have to install asciidoc3, of course)')
arg_parser.add_argument('-e', '--escape-html', action='store_true', default=False, dest='escape_html',
help='Run the documentation through html.escape() before markdown or asciidoc3')
arg_parser.add_argument('-f', '--single-file', action='store_true', default=False, dest='single_file',
help='Just produce a .md or .adoc file in single-column to be processed externally')
args = arg_parser.parse_args()
sys.exit(main(args.source_file, args.output_dir, args.use_ascii, args.escape_html, args.single_file))
| 46.058824 | 121 | 0.707535 |
5d4c9607e3defd3816cf4fbd7853e01e09dcb111
| 14,354 |
py
|
Python
|
jumpy/jumpy/ndarray.py
|
rghwer/testdocs
|
8fafa40407411ed7a3f8216e691e42e0c7d32083
|
[
"Apache-2.0"
] | 13,006 |
2015-02-13T18:35:31.000Z
|
2022-03-18T12:11:44.000Z
|
jumpy/jumpy/ndarray.py
|
pxiuqin/deeplearning4j
|
e11ddf3c24d355b43d36431687b807c8561aaae4
|
[
"Apache-2.0"
] | 5,319 |
2015-02-13T08:21:46.000Z
|
2019-06-12T14:56:50.000Z
|
jumpy/jumpy/ndarray.py
|
pxiuqin/deeplearning4j
|
e11ddf3c24d355b43d36431687b807c8561aaae4
|
[
"Apache-2.0"
] | 4,719 |
2015-02-13T22:48:55.000Z
|
2022-03-22T07:25:36.000Z
|
################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .java_classes import *
import numpy as np
import ctypes
import warnings
native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps()
# DATA TYPE MANAGEMENT
DOUBLE = DataType.DOUBLE
FLOAT = DataType.FLOAT
HALF = DataType.HALF
LONG = DataType.LONG
INT = DataType.INT
SHORT = DataType.SHORT
UBYTE = DataType.UBYTE
BYTE = DataType.BYTE
BOOL = DataType.BOOL
UTF8 = DataType.UTF8
COMPRESSED = DataType.COMPRESSED
UNKNOWN = DataType.UNKNOWN
SUPPORTED_JAVA_DTYPES = [
DOUBLE,
FLOAT,
HALF,
LONG,
INT,
SHORT,
BOOL
#UTF8
]
SUPPORTED_PYTHON_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.bool_
#np.str_
]
_PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
_J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
def set_context_dtype(dtype):
'''
Sets the dtype for nd4j
# Arguments
dtype: 'float' or 'double'
'''
dtype_map = {
'float32': 'float',
'float64': 'double'
}
dtype = dtype_map.get(dtype, dtype)
if dtype not in ['float', 'double']:
raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype))
dtype_ = DataTypeUtil.getDtypeFromContext(dtype)
DataTypeUtil.setDTypeForContext(dtype_)
if get_context_dtype() != dtype:
warnings.warn("Can not set context dtype now. Set it at the beginning of your program.")
def get_context_dtype():
'''
Returns the nd4j dtype
'''
dtype = DataTypeUtil.getDtypeFromContext()
return DataTypeUtil.getDTypeForName(dtype)
_refs = []
def _from_numpy(np_array):
'''
Convert numpy array to nd4j array
'''
pointer_address, _ = np_array.__array_interface__['data']
_refs.append(np_array)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_array.size
pointer.limit(size)
jdtype = _dtype_py2j(np_array.dtype)
'''
mapping = {
DOUBLE: DoublePointer,
FLOAT: FloatPointer,
HALF: HalfPointer,
LONG: LongPointer,
INT: IntPointer,
SHORT: ShortPointer,
BOOL: BoolPointer
}
pc = mapping[jdtype]
#pointer = pc(pointer)
'''
buff = Nd4j.createBuffer(pointer, size, jdtype)
assert buff.address() == pointer_address
_refs.append(buff)
elem_size = buff.getElementSize()
assert elem_size == np_array.dtype.itemsize
strides = np_array.strides
strides = [dim / elem_size for dim in strides]
shape = np_array.shape
nd4j_array = Nd4j.create(buff, shape, strides, 0)
assert buff.address() == nd4j_array.data().address()
return nd4j_array
def _to_numpy(nd4j_array):
'''
Convert nd4j array to numpy array
'''
buff = nd4j_array.data()
address = buff.pointer().address()
dtype = nd4j_array.dataType().toString()
mapping = {
'DOUBLE': ctypes.c_double,
'FLOAT': ctypes.c_float,
'HALF': ctypes.c_short,
'LONG': ctypes.c_long,
'INT': ctypes.c_int,
'SHORT': ctypes.c_short,
'BOOL': ctypes.c_bool
}
Pointer = ctypes.POINTER(mapping[dtype])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
| 27.980507 | 106 | 0.544448 |
5d4ce281f4ac42992169e4a43a604e5e249ccc55
| 592 |
py
|
Python
|
Python/usec_mode.py
|
hanayik/StimSync
|
f08ec01a36c47b00bfe4937b5a6eb2a60af0713d
|
[
"BSD-2-Clause"
] | 6 |
2017-12-04T18:33:45.000Z
|
2021-08-04T02:07:21.000Z
|
source/Python/usec_mode.py
|
neurolabusc/StimSync
|
749908572bda3073b0911566d50fe92d74d3cdb7
|
[
"BSD-2-Clause"
] | null | null | null |
source/Python/usec_mode.py
|
neurolabusc/StimSync
|
749908572bda3073b0911566d50fe92d74d3cdb7
|
[
"BSD-2-Clause"
] | 3 |
2018-01-13T12:17:18.000Z
|
2021-08-01T06:43:10.000Z
|
import serial
ser = serial.Serial('/dev/tty.usbmodem7071', 115200, timeout=10)
ser.write("\xb1\xa3\xb5\xb5") #set usec mode 177,163,181,181
ser.flush()
ser.flushInput()
obs = ser.read(8)
if len(obs) != 8:
print('Error: no buttons presses detected')
print 'Observed data (as hex): '+ obs.encode('hex')
obsBin = [ord(c) for c in obs]
usec = (obsBin[3] << 24)+ (obsBin[4] << 16)+ (obsBin[5] << 8)+obsBin[6]
keys = (obsBin[1] << 8)+obsBin[2]
print 'keys pressed %d at %d usec' % (keys, usec)
ser.write("\xb1\xa3\xa9\xa9") #turn off oscilloscope: set keyboard mode 177,163,169,169
ser.close()
| 37 | 87 | 0.663851 |
5d4e94cca5bcc101399e2e8aec4db86507599854
| 4,839 |
py
|
Python
|
torchaudio/datasets/libritts.py
|
hahaxun/audio
|
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
|
[
"BSD-2-Clause"
] | 1 |
2021-04-20T09:04:24.000Z
|
2021-04-20T09:04:24.000Z
|
torchaudio/datasets/libritts.py
|
hahaxun/audio
|
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/datasets/libritts.py
|
hahaxun/audio
|
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from typing import Tuple
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
walk_files,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriTTS"
_CHECKSUMS = {
"http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207",
"http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d",
"http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f",
"http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4",
"http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8",
"http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d",
"http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f",
}
| 32.046358 | 98 | 0.619963 |
5d4ed462393daeadb0c9bc3293879acfa5af3ab3
| 2,164 |
py
|
Python
|
Others/Source/19/19.2/barh_test.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | 1 |
2018-05-30T01:38:23.000Z
|
2018-05-30T01:38:23.000Z
|
Others/Source/19/19.2/barh_test.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | null | null | null |
Others/Source/19/19.2/barh_test.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#########################################################################
# : <a href="http://www.crazyit.org">Java</a> #
# author yeeku.H.lee [email protected] #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import matplotlib.pyplot as plt
import numpy as np
#
x_data = ['2011', '2012', '2013', '2014', '2015', '2016', '2017']
y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000]
y_data2 = [52000, 54200, 51500,58300, 56800, 59500, 62700]
bar_width=0.3
# Yrange(len(x_data), 012...
plt.barh(y=range(len(x_data)), width=y_data, label='Java',
color='steelblue', alpha=0.8, height=bar_width)
# Ynp.arange(len(x_data))+bar_width,
# bar_width1+bar_width2+bar_width...
plt.barh(y=np.arange(len(x_data))+bar_width, width=y_data2,
label='Android', color='indianred', alpha=0.8, height=bar_width)
# , ha, va
for y, x in enumerate(y_data):
plt.text(x+5000, y-bar_width/2, '%s' % x, ha='center', va='bottom')
for y, x in enumerate(y_data2):
plt.text(x+5000, y+bar_width/2, '%s' % x, ha='center', va='bottom')
# Y
plt.yticks(np.arange(len(x_data))+bar_width/2, x_data)
#
plt.title("JavaAndroid")
#
plt.xlabel("")
plt.ylabel("")
#
plt.legend()
plt.show()
| 46.042553 | 74 | 0.420055 |
5d4ed8a99839b3110a2db17a408cf4dde65b3291
| 2,336 |
py
|
Python
|
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
|
kendny/study_docker
|
edb376fb69319a78e05f60faa5dcc88d527602c4
|
[
"BSD-2-Clause"
] | 2 |
2019-05-09T01:41:16.000Z
|
2022-01-06T01:06:07.000Z
|
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
|
walkacross/docker_in_practice
|
da24da76b4fa3eabca5004abd59d7eef7a48988b
|
[
"BSD-2-Clause"
] | null | null | null |
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
|
walkacross/docker_in_practice
|
da24da76b4fa3eabca5004abd59d7eef7a48988b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 20:13:57 2018
@author: allen
"""
import random, os, json, datetime, time
from flask import Flask, Response
from pymongo import MongoClient
from bson import json_util
app = Flask(__name__)
MONGO_URI = "mongodb://mongodb:27017" # "mongodb:<container_name>:27017"
mongdb_client= MongoClient(MONGO_URI)
random_numbers = mongdb_client.demo.random_numbers
time.sleep(5) # hack for the mongoDb database to get running
######################
##
##########################
from pymodm.connection import connect
from pymongo.write_concern import WriteConcern
from pymodm import MongoModel, fields
# Connect to MongoDB and call the connection "my-app".
connect("mongodb://mongodb:27017/myDatabase", alias="my-app")
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.config['DEBUG'] = True
app.run(host='0.0.0.0', port=port)
| 26.545455 | 110 | 0.630137 |
5d4f428d1c149bf1e2a1658ede1f6e9adcddbdd2
| 1,523 |
py
|
Python
|
goethe/eval/analogy_space.py
|
HPI-DeepLearning/wort2vek
|
bc91c2752a8516665d270c7a7a793ec484c970c4
|
[
"MIT"
] | 4 |
2017-05-01T01:02:40.000Z
|
2022-02-03T16:14:19.000Z
|
goethe/eval/analogy_space.py
|
HPI-DeepLearning/wort2vek
|
bc91c2752a8516665d270c7a7a793ec484c970c4
|
[
"MIT"
] | 6 |
2017-04-06T22:10:09.000Z
|
2017-04-06T22:10:57.000Z
|
goethe/eval/analogy_space.py
|
HPI-DeepLearning/wort2vek
|
bc91c2752a8516665d270c7a7a793ec484c970c4
|
[
"MIT"
] | null | null | null |
#! /usr/bin/Python
from gensim.models.keyedvectors import KeyedVectors
from scipy import spatial
from numpy import linalg
import argparse
import sys
vector_file = sys.argv[1]
if len(sys.argv) != 6:
print('arguments wrong!')
print(len(sys.argv))
exit()
else:
words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]]
print(words)
wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True)
print('WVs loaded.')
for w in words:
if w not in wvs.vocab:
print('out of vocab!')
exit()
#print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3))
w1 = wvs[words[0]]
w2 = wvs[words[1]]
w3 = wvs[words[2]]
w4 = wvs[words[3]]
m1 = w1 / linalg.norm(w1)
m2 = w2 / linalg.norm(w2)
m3 = w3 / linalg.norm(w3)
m4 = w4 / linalg.norm(w4)
diff1 = w1 - w2
diff2 = w3 - w4
miff1 = m1 - m2
miff2 = m3 - m4
print('-------Word Space---------')
print('to word-4: ', 1-spatial.distance.cosine(m2+m3-m1, m4))
print('to word-3: ', 1-spatial.distance.cosine(m1+m4-m2, m3))
print('to word-2: ', 1-spatial.distance.cosine(m4+m1-m3, m2))
print('to word-1: ', 1-spatial.distance.cosine(m2+m3-m4, m1))
print('------Analogy Space-------')
print(' cosine: ', 1-spatial.distance.cosine(diff1, diff2))
print(' Euclidean: ', 1-linalg.norm(diff1-diff2)/(linalg.norm(diff1)+linalg.norm(diff2)))
print(' M-cosine: ', 1-spatial.distance.cosine(miff1, miff2))
print('M-Euclidean: ', 1-linalg.norm(miff1-miff2)/(linalg.norm(miff1)+linalg.norm(miff2)))
| 27.690909 | 91 | 0.644123 |
5d5195b07f67a7785033de940e7003695bbf2ec4
| 2,497 |
py
|
Python
|
localgraphclustering/algorithms/eig2_nL.py
|
vishalbelsare/LocalGraphClustering
|
a6325350997932d548a876deb259c2387fc2c809
|
[
"MIT"
] | 106 |
2017-09-06T04:47:02.000Z
|
2022-03-30T07:43:27.000Z
|
localgraphclustering/algorithms/eig2_nL.py
|
vishalbelsare/LocalGraphClustering
|
a6325350997932d548a876deb259c2387fc2c809
|
[
"MIT"
] | 51 |
2017-09-06T02:22:09.000Z
|
2021-12-15T11:39:28.000Z
|
localgraphclustering/algorithms/eig2_nL.py
|
vishalbelsare/LocalGraphClustering
|
a6325350997932d548a876deb259c2387fc2c809
|
[
"MIT"
] | 38 |
2017-09-04T21:45:13.000Z
|
2022-01-19T09:48:25.000Z
|
import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
| 33.293333 | 87 | 0.621946 |
5d527097e73751e96803cabcd187b0fd2d52470c
| 1,737 |
py
|
Python
|
build/common/hex2carray.py
|
isabella232/nanos-nonsecure-firmware
|
d1ce2e0e01a8ed6d8840a24308e16f6560a626aa
|
[
"Apache-2.0"
] | 16 |
2018-03-20T11:52:29.000Z
|
2021-02-12T07:39:54.000Z
|
build/common/hex2carray.py
|
LedgerHQ/nanos-nonsecure-firmware
|
d1ce2e0e01a8ed6d8840a24308e16f6560a626aa
|
[
"Apache-2.0"
] | 1 |
2022-03-06T09:56:16.000Z
|
2022-03-06T09:56:16.000Z
|
build/common/hex2carray.py
|
isabella232/nanos-nonsecure-firmware
|
d1ce2e0e01a8ed6d8840a24308e16f6560a626aa
|
[
"Apache-2.0"
] | 7 |
2017-08-24T00:42:09.000Z
|
2022-03-06T09:51:51.000Z
|
"""
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from ledgerblue.hexParser import IntelHexParser
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--hex", help="Hex file to be converted as a C array")
args = parser.parse_args()
if args.hex == None:
raise Exception("Missing hex filename to sign")
parser = IntelHexParser(args.hex)
for a in parser.getAreas():
if (len(a.data) > 0x10000):
raise BaseException("data must be splitted in chunks of 64k")
print "0x" + hexU8(a.start >> 24) + ", 0x" + hexU8(a.start >> 16) + ", 0x" + hexU8(a.start >> 8) + ", 0x" + hexU8(a.start) + ", "
print "0x" + hexU8(len(a.data) >> 24) + ", 0x" + hexU8(len(a.data) >> 16) + ", 0x" + hexU8(len(a.data) >> 8) + ", 0x" + hexU8(len(a.data)) + ", "
# low @ to high @
offset = 0
while offset < len(a.data):
string = ""
for i in range(8):
if offset+i < len(a.data):
string += " 0x" + hexU8(a.data[offset+i]) + ","
print string
offset+=8
| 31.581818 | 146 | 0.599885 |
5d52775ef423ec088ebd9b5618d6a0b7639f157e
| 2,418 |
py
|
Python
|
setup.py
|
xames3/vdoxa
|
8fa945449bb34447ded0c421214c0252ff523d4a
|
[
"Apache-2.0"
] | 1 |
2020-02-04T08:18:54.000Z
|
2020-02-04T08:18:54.000Z
|
setup.py
|
xames3/vdoxa
|
8fa945449bb34447ded0c421214c0252ff523d4a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
xames3/vdoxa
|
8fa945449bb34447ded0c421214c0252ff523d4a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 XAMES3. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""
vdoXA is an open-source python package for trimming the videos.
It is built as a subsystem for < XXXXX Not to be named XXXXX > project.
Originally inspired by my colleague's work, I thought of improving the
concept and build a tool to simplify the process. I hope it comes with
strong support for continuous updates, reliable functions and overall
ease of use.
Read complete documentation at: <https://github.com/xames3/vdoxa>.
"""
from setuptools import find_packages, setup
from vdoxa.vars import dev
doclines = __doc__.split('\n')
def use_readme() -> str:
"""Use `README.md` for parsing long description."""
with open('README.md') as file:
return file.read()
with open('requirements.txt', 'r') as requirements:
required_packages = [package.rstrip() for package in requirements]
setup(
name=dev.PROJECT_NAME,
version=dev.PROJECT_VERSION,
url=dev.PROJECT_LINK,
download_url=dev.PROJECT_LINK,
author=dev.AUTHOR,
author_email=dev.AUTHOR_EMAIL,
maintainer=dev.AUTHOR,
maintainer_email=dev.AUTHOR_EMAIL,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
],
license=dev.PROJECT_LICENSE,
description=f'{doclines[1]}',
long_description=use_readme(),
long_description_content_type='text/markdown',
keywords='opencv2 cv2 moviepy',
zip_safe=False,
install_requires=required_packages,
python_requires='~=3.6',
include_package_data=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'vdoxa = vdoxa.parser:main',
],
}
)
| 31 | 72 | 0.715054 |
5d52a5f4ab272695a4c951a2d0a2e0909bf0ed0b
| 1,413 |
py
|
Python
|
application/modules/login.py
|
BaggerFast/Simple_votings
|
843769fa6fd2c04feb542e6b301b7b4810260d4e
|
[
"MIT"
] | null | null | null |
application/modules/login.py
|
BaggerFast/Simple_votings
|
843769fa6fd2c04feb542e6b301b7b4810260d4e
|
[
"MIT"
] | null | null | null |
application/modules/login.py
|
BaggerFast/Simple_votings
|
843769fa6fd2c04feb542e6b301b7b4810260d4e
|
[
"MIT"
] | null | null | null |
from django.contrib import messages
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import View
from application.forms import AuthenticateForm
from application.views import get_navbar, Page
| 36.230769 | 95 | 0.640481 |
5d52ada1ae418220d17ef038d3cc8e85cc6253d2
| 2,938 |
py
|
Python
|
little_questions/utils/log.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
little_questions/utils/log.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
little_questions/utils/log.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import logging
import sys
| 28.803922 | 74 | 0.596664 |
5d53556c82d1a27255c1497656b5efc347cde76d
| 1,035 |
py
|
Python
|
alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
| 28.75 | 112 | 0.699517 |
5d53e848dc1be11f4d81bb7ffe655fc1c2f327c3
| 1,923 |
py
|
Python
|
cvstudio/view/widgets/loading_dialog/loading_dialog.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 32 |
2019-10-31T03:10:52.000Z
|
2020-12-23T11:50:53.000Z
|
cvstudio/view/widgets/loading_dialog/loading_dialog.py
|
haruiz/CvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 19 |
2019-10-31T15:06:05.000Z
|
2020-06-15T02:21:55.000Z
|
cvstudio/view/widgets/loading_dialog/loading_dialog.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 8 |
2019-10-31T03:32:50.000Z
|
2020-07-17T20:47:37.000Z
|
import os
from PyQt5 import QtCore
from PyQt5.QtCore import QRect, QPoint
from PyQt5.QtGui import QMovie, QCloseEvent, QShowEvent
from PyQt5.QtWidgets import QDialog, QLabel, QVBoxLayout, QApplication, QWidget
| 36.283019 | 95 | 0.637546 |
5d546fd247cbdfbb018dec6e3f4e3273ffdefdb8
| 3,115 |
py
|
Python
|
pysnmp-with-texts/MWORKS-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8 |
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/MWORKS-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4 |
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/MWORKS-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module MWORKS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MWORKS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:16:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, Unsigned32, ObjectIdentity, IpAddress, Bits, MibIdentifier, Integer32, enterprises, ModuleIdentity, TimeTicks, Counter32, NotificationType, iso, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "ObjectIdentity", "IpAddress", "Bits", "MibIdentifier", "Integer32", "enterprises", "ModuleIdentity", "TimeTicks", "Counter32", "NotificationType", "iso", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tecElite = MibIdentifier((1, 3, 6, 1, 4, 1, 217))
meterWorks = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16))
mw501 = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1))
mwMem = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 1))
mwHeap = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 2))
mwMemCeiling = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemCeiling.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemCeiling.setDescription('bytes of memory the agent memory manager will allow the agent to use.')
mwMemUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemUsed.setDescription("bytes of memory that meterworks has malloc'ed. some of this may be in free pools.")
mwHeapTotal = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapTotal.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapTotal.setDescription('bytes of memory given to the heap manager.')
mwHeapUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapUsed.setDescription('bytes of available memory in the heap.')
mibBuilder.exportSymbols("MWORKS-MIB", mwHeap=mwHeap, mwHeapUsed=mwHeapUsed, mwMemCeiling=mwMemCeiling, meterWorks=meterWorks, tecElite=tecElite, mwMem=mwMem, mw501=mw501, mwHeapTotal=mwHeapTotal, mwMemUsed=mwMemUsed)
| 97.34375 | 505 | 0.759551 |
5d54ea522a32fa91aca889c9606f036f2de763c3
| 3,935 |
py
|
Python
|
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
|
mpire-nxus/nxus_unity_sdk
|
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
|
[
"MIT"
] | 1 |
2018-03-13T02:44:15.000Z
|
2018-03-13T02:44:15.000Z
|
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
|
mpire-nxus/nxus_unity_sdk
|
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
|
[
"MIT"
] | null | null | null |
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
|
mpire-nxus/nxus_unity_sdk
|
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import re
from subprocess import Popen, PIPE
import argparse
from pbxproj import XcodeProject, TreeType
from pbxproj import FileOptions
if __name__ == "__main__":
main()
| 38.578431 | 164 | 0.706226 |
5d553e6733970b4280761ad4ec3ddb284ae1146d
| 1,382 |
py
|
Python
|
vars_in_python.py
|
klyusba/python-quiz
|
9f469417458f8ba6b21f9507cc860ca4547ea67b
|
[
"MIT"
] | null | null | null |
vars_in_python.py
|
klyusba/python-quiz
|
9f469417458f8ba6b21f9507cc860ca4547ea67b
|
[
"MIT"
] | null | null | null |
vars_in_python.py
|
klyusba/python-quiz
|
9f469417458f8ba6b21f9507cc860ca4547ea67b
|
[
"MIT"
] | null | null | null |
# == 1 ==
bar = [1, 2]
print(foo(bar))
# == 2 ==
bar = [1, 2]
print(foo(bar))
# == 3 ==
bar = [1, 2]
print(foo())
# == 4 ==
bar = [1, 2]
print(foo(bar), bar)
# == 5 ==
bar = [1, 2]
print(foo(bar), bar)
# == 6 ==
try:
bar = 1 / 0
print(bar)
except ZeroDivisionError as bar:
print(bar)
print(bar)
# == 7 ==
bar = [1, 2]
print(list(bar for bar in bar))
print(bar)
# == 8 ==
bar = [1, 2]
f = lambda: sum(bar)
print(f())
bar = [1, 2, 3, ]
print(f())
# == 9 ==
bar = [1, 2]
f = foo(bar)
print(f())
bar = [1, 2, 3, ]
print(f())
# == 10 ==
bar = [1, 2]
foo = []
for i in bar:
foo.append(lambda: i)
print([f() for f in foo])
# == 11 ==
bar = [1, 2]
foo = [
lambda: i
for i in bar
]
print(list(f() for f in foo))
# == 12 ==
bar = [1, 2]
foo = [
lambda: i
for i in bar
]
print(list(f() for f in foo))
bar = [1, 2, 3, ]
print(list(f() for f in foo))
bar[:] = [1, 2, 3, ]
print(list(f() for f in foo))
# == 13 ==
bar = [1, 2]
foo = [
lambda i=i: i
for i in bar
]
print(list(f() for f in foo))
| 11.145161 | 32 | 0.469609 |
5d55a06354d86f35af5fb38858161328b7581a23
| 10,786 |
py
|
Python
|
hack/dev/gh-replay-events.py
|
sm43/pipelines-as-code
|
bd21e48c96ab128d533701ecd1a2df7a0d136d65
|
[
"Apache-2.0"
] | null | null | null |
hack/dev/gh-replay-events.py
|
sm43/pipelines-as-code
|
bd21e48c96ab128d533701ecd1a2df7a0d136d65
|
[
"Apache-2.0"
] | null | null | null |
hack/dev/gh-replay-events.py
|
sm43/pipelines-as-code
|
bd21e48c96ab128d533701ecd1a2df7a0d136d65
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Chmouel Boudjnah <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# See README.md for documentation
import typing
import argparse
import base64
import hashlib
import hmac
import json
import os
import subprocess
import sys
import time
import requests
import ghapp_token
NAMESPACE = "pipelines-as-code"
SECRET_NAME = "pipelines-as-code-secret"
ELNAME = "pipelines-as-code"
EXPIRE_MINUTES_AS_SECONDS = (
int(os.environ.get("GITHUBAPP_TOKEN_EXPIRATION_MINUTES", 10)) * 60
)
if __name__ == "__main__":
main(parse_args())
| 30.555241 | 113 | 0.623215 |
5d579c372853402ecfd7e953a09a9d04c6d7c725
| 114 |
py
|
Python
|
nintendeals/noa/api/__init__.py
|
Pooroomoo/nintendeals
|
993f4d159ff405ed82cd2bb023c7b75d921d0acb
|
[
"MIT"
] | 37 |
2020-04-30T13:48:02.000Z
|
2022-03-09T04:55:54.000Z
|
nintendeals/noa/api/__init__.py
|
Pooroomoo/nintendeals
|
993f4d159ff405ed82cd2bb023c7b75d921d0acb
|
[
"MIT"
] | 4 |
2020-05-09T03:17:44.000Z
|
2021-04-28T00:53:55.000Z
|
nintendeals/noa/api/__init__.py
|
Pooroomoo/nintendeals
|
993f4d159ff405ed82cd2bb023c7b75d921d0acb
|
[
"MIT"
] | 5 |
2020-07-22T06:42:27.000Z
|
2022-02-07T22:35:57.000Z
|
from .algolia import search_by_nsuid
from .algolia import search_by_platform
from .algolia import search_by_query
| 28.5 | 39 | 0.868421 |
5d58e721508a643ec9487a7f661ca1a66cd5a971
| 3,659 |
py
|
Python
|
076_Minimum_Window_Substring.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
076_Minimum_Window_Substring.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
076_Minimum_Window_Substring.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
# Other solution
# V2
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Refer from:
# https://leetcode.com/problems/minimum-window-substring/solution/
# Sliding Window
# We start with two pointers, leftleft and rightright initially pointing to the first element of the string S.
# We use the rightright pointer to expand the window until we get a desirable window i.e. a window that contains all of the characters of T.
# Once we have a window with all the characters, we can move the left pointer ahead one by one. If the window is still a desirable one we keep on updating the minimum window size.
# If the window is not desirable any more, we repeat step 2 onwards.
# The current window is s[i:j] and the result window is s[I:J]. In need[c] I store how many times I
# need character c (can be negative) and missing tells how many characters are still missing.
# In the loop, first add the new character to the window. Then, if nothing is missing,
# remove as much as possible from the window start and then update the result.
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Optimized Sliding Window
# A small improvement to the above approach can reduce the time complexity of the algorithm to O(2*filtered_S+S+T),
# where filtered(S) is the string formed from S by removing all the elements not present in T
| 36.959596 | 179 | 0.52774 |
5d5c6de0926f1a98ed21db39c4944a17b7f61725
| 823 |
py
|
Python
|
home/migrations/0002_auto_20171017_0412.py
|
Taywee/amberherbert.com
|
6bf384d7cdf18dc613252fe4dde38545150eabbc
|
[
"MIT"
] | null | null | null |
home/migrations/0002_auto_20171017_0412.py
|
Taywee/amberherbert.com
|
6bf384d7cdf18dc613252fe4dde38545150eabbc
|
[
"MIT"
] | 2 |
2017-10-15T20:36:59.000Z
|
2017-10-17T05:27:49.000Z
|
home/migrations/0002_auto_20171017_0412.py
|
Taywee/amberherbert.com
|
6bf384d7cdf18dc613252fe4dde38545150eabbc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 04:12
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
| 35.782609 | 384 | 0.684083 |
5d5caf5c5d1415de34379e45359c322cac37e6ff
| 2,766 |
py
|
Python
|
lib/adv_model.py
|
chawins/entangle-rep
|
3e9e0d6e7536b0de0e35d7f8717f2ccc8e887759
|
[
"MIT"
] | 15 |
2019-06-30T12:30:17.000Z
|
2021-12-07T20:20:36.000Z
|
lib/adv_model.py
|
chawins/entangle-rep
|
3e9e0d6e7536b0de0e35d7f8717f2ccc8e887759
|
[
"MIT"
] | 2 |
2020-06-11T10:10:52.000Z
|
2021-12-21T08:50:33.000Z
|
lib/adv_model.py
|
chawins/entangle-rep
|
3e9e0d6e7536b0de0e35d7f8717f2ccc8e887759
|
[
"MIT"
] | 9 |
2019-07-09T14:52:30.000Z
|
2020-10-27T19:18:34.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
| 35.012658 | 78 | 0.578453 |
5d5d633b271390741583d9b310e4391f6dfe899f
| 4,673 |
py
|
Python
|
fs_image/rpm/storage/tests/storage_base_test.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
fs_image/rpm/storage/tests/storage_base_test.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
fs_image/rpm/storage/tests/storage_base_test.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import patch, MagicMock
from typing import List, Tuple
from .. import Storage # Module import to ensure we get plugins
| 41.723214 | 79 | 0.558742 |
5d5d8bde571d6e8d8f2723242cd35348a71ff40f
| 8,457 |
py
|
Python
|
sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetKMSCryptoKeyVersionResult',
'AwaitableGetKMSCryptoKeyVersionResult',
'get_kms_crypto_key_version',
'get_kms_crypto_key_version_output',
]
def get_kms_crypto_key_version(crypto_key: Optional[str] = None,
version: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKMSCryptoKeyVersionResult:
"""
Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).
A CryptoKeyVersion represents an individual cryptographic key, and the associated key material.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="us-central1")
my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key",
key_ring=my_key_ring.id)
my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"])
```
:param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the
`kms.CryptoKey` resource/datasource.
:param int version: The version number for this CryptoKeyVersion. Defaults to `1`.
"""
__args__ = dict()
__args__['cryptoKey'] = crypto_key
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion', __args__, opts=opts, typ=GetKMSCryptoKeyVersionResult).value
return AwaitableGetKMSCryptoKeyVersionResult(
algorithm=__ret__.algorithm,
crypto_key=__ret__.crypto_key,
id=__ret__.id,
name=__ret__.name,
protection_level=__ret__.protection_level,
public_keys=__ret__.public_keys,
state=__ret__.state,
version=__ret__.version)
| 41.253659 | 247 | 0.684522 |
5d5e98fd28e904d5e1f509a5b35e66ec3047cc56
| 353 |
py
|
Python
|
lecture11/subsets.py
|
nd-cse-30872-fa20/cse-30872-fa20-examples
|
7a991a0499e03bf91ac8ba40c99245d5d926e20c
|
[
"MIT"
] | null | null | null |
lecture11/subsets.py
|
nd-cse-30872-fa20/cse-30872-fa20-examples
|
7a991a0499e03bf91ac8ba40c99245d5d926e20c
|
[
"MIT"
] | null | null | null |
lecture11/subsets.py
|
nd-cse-30872-fa20/cse-30872-fa20-examples
|
7a991a0499e03bf91ac8ba40c99245d5d926e20c
|
[
"MIT"
] | 2 |
2020-08-10T15:05:39.000Z
|
2020-08-12T15:16:01.000Z
|
#!/usr/bin/env python3
import itertools
# Constants
NUMBERS = range(0, 10)
# Main Execution
if __name__ == '__main__':
main()
| 16.045455 | 62 | 0.580737 |
5d5ec924b9a968b7509ed5badaef99e8de842bde
| 21,520 |
py
|
Python
|
src/toil/batchSystems/abstractBatchSystem.py
|
Hexotical/toil
|
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
|
[
"Apache-2.0"
] | 348 |
2018-07-08T03:38:28.000Z
|
2022-03-11T18:57:44.000Z
|
src/toil/batchSystems/abstractBatchSystem.py
|
Hexotical/toil
|
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
|
[
"Apache-2.0"
] | 1,700 |
2018-07-05T18:28:49.000Z
|
2022-03-31T14:09:04.000Z
|
src/toil/batchSystems/abstractBatchSystem.py
|
Hexotical/toil
|
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
|
[
"Apache-2.0"
] | 126 |
2018-07-11T18:59:29.000Z
|
2022-01-24T03:14:02.000Z
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
import os
import shutil
from abc import ABC, abstractmethod
from argparse import ArgumentParser, _ArgumentGroup
from contextlib import contextmanager
from typing import (Any,
Callable,
ContextManager,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
NamedTuple)
from toil.common import Toil, cacheDirName, Config
from toil.deferred import DeferredFunctionManager
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.job import JobDescription
from toil.resource import Resource
logger = logging.getLogger(__name__)
# Value to use as exitStatus in UpdatedBatchJobInfo.exitStatus when status is not available.
EXIT_STATUS_UNAVAILABLE_VALUE = 255
# Information required for worker cleanup on shutdown of the batch system.
# FIXME: Return value should be a set (then also fix the tests)
def getSchedulingStatusMessage(self) -> Optional[str]:
"""
Get a log message fragment for the user about anything that might be
going wrong in the batch system, if available.
If no useful message is available, return None.
This can be used to report what resource is the limiting factor when
scheduling jobs, for example. If the leader thinks the workflow is
stuck, the message can be displayed to the user to help them diagnose
why it might be stuck.
:return: User-directed message about scheduling state.
"""
# Default implementation returns None.
# Override to provide scheduling status information.
return None
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
"""
raise NotImplementedError()
OptionType = TypeVar('OptionType')
def getWorkerContexts(self) -> List[ContextManager[Any]]:
"""
Get a list of picklable context manager objects to wrap worker work in,
in order.
Can be used to ask the Toil worker to do things in-process (such as
configuring environment variables, hot-deploying user scripts, or
cleaning up a node) that would otherwise require a wrapping "executor"
process.
"""
return []
class BatchSystemSupport(AbstractBatchSystem):
"""
Partial implementation of AbstractBatchSystem, support methods.
"""
def __init__(self, config: Config, maxCores: float, maxMemory: int, maxDisk: int) -> None:
"""
Initializes initial state of the object
:param toil.common.Config config: object is setup by the toilSetup script and
has configuration parameters for the jobtree. You can add code
to that script to get parameters for your batch system.
:param float maxCores: the maximum number of cores the batch system can
request for any one job
:param int maxMemory: the maximum amount of memory the batch system can
request for any one job, in bytes
:param int maxDisk: the maximum amount of disk space the batch system can
request for any one job, in bytes
"""
super().__init__()
self.config = config
self.maxCores = maxCores
self.maxMemory = maxMemory
self.maxDisk = maxDisk
self.environment: Dict[str, str] = {}
self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir,
workflowID=self.config.workflowID,
cleanWorkDir=self.config.cleanWorkDir)
def checkResourceRequest(self, memory: int, cores: float, disk: int, job_name: str = '', detail: str = '') -> None:
"""
Check resource request is not greater than that available or allowed.
:param int memory: amount of memory being requested, in bytes
:param float cores: number of cores being requested
:param int disk: amount of disk space being requested, in bytes
:param str job_name: Name of the job being checked, for generating a useful error report.
:param str detail: Batch-system-specific message to include in the error.
:raise InsufficientSystemResources: raised when a resource is requested in an amount
greater than allowed
"""
batch_system = self.__class__.__name__ or 'this batch system'
for resource, requested, available in [('cores', cores, self.maxCores),
('memory', memory, self.maxMemory),
('disk', disk, self.maxDisk)]:
assert requested is not None
if requested > available:
unit = 'bytes of ' if resource in ('disk', 'memory') else ''
R = f'The job {job_name} is r' if job_name else 'R'
if resource == 'disk':
msg = (f'{R}equesting {requested} {unit}{resource} for temporary space, '
f'more than the maximum of {available} {unit}{resource} of free space on '
f'{self.config.workDir} that {batch_system} was configured with, or enforced '
f'by --max{resource.capitalize()}. Try setting/changing the toil option '
f'"--workDir" or changing the base temporary directory by setting TMPDIR.')
else:
msg = (f'{R}equesting {requested} {unit}{resource}, more than the maximum of '
f'{available} {unit}{resource} that {batch_system} was configured with, '
f'or enforced by --max{resource.capitalize()}.')
if detail:
msg += detail
raise InsufficientSystemResources(msg)
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
:param str name: the environment variable to be set on the worker.
:param str value: if given, the environment variable given by name will be set to this value.
if None, the variable's current value will be used as the value on the worker
:raise RuntimeError: if value is None and the name cannot be found in the environment
"""
if value is None:
try:
value = os.environ[name]
except KeyError:
raise RuntimeError(f"{name} does not exist in current environment")
self.environment[name] = value
def formatStdOutErrPath(self, toil_job_id: int, cluster_job_id: str, std: str) -> str:
"""
Format path for batch system standard output/error and other files
generated by the batch system itself.
Files will be written to the Toil work directory (which may
be on a shared file system) with names containing both the Toil and
batch system job IDs, for ease of debugging job failures.
:param: int toil_job_id : The unique id that Toil gives a job.
:param: cluster_job_id : What the cluster, for example, GridEngine, uses as its internal job id.
:param: string std : The provenance of the stream (for example: 'err' for 'stderr' or 'out' for 'stdout')
:rtype: string : Formatted filename; however if self.config.noStdOutErr is true,
returns '/dev/null' or equivalent.
"""
if self.config.noStdOutErr:
return os.devnull
fileName: str = f'toil_{self.config.workflowID}.{toil_job_id}.{cluster_job_id}.{std}.log'
workDir: str = Toil.getToilWorkDir(self.config.workDir)
return os.path.join(workDir, fileName)
| 42.613861 | 188 | 0.660037 |
5d6066cbcb225097d2a844447d53060bd3350b74
| 1,969 |
py
|
Python
|
demo/other_demo.py
|
Heartfilia/lite_tools
|
b3432ba7cb60502ac64d45e23022e20555fb1588
|
[
"MIT"
] | 5 |
2021-05-10T07:35:47.000Z
|
2022-03-07T01:31:12.000Z
|
demo/other_demo.py
|
Heartfilia/lite_tools
|
b3432ba7cb60502ac64d45e23022e20555fb1588
|
[
"MIT"
] | null | null | null |
demo/other_demo.py
|
Heartfilia/lite_tools
|
b3432ba7cb60502ac64d45e23022e20555fb1588
|
[
"MIT"
] | 1 |
2022-03-03T03:23:26.000Z
|
2022-03-03T03:23:26.000Z
|
# -*- coding: utf-8 -*-
from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d
# about hashlib ==> get_md5, get_sha, get_sha3 || default mode=256
s = "test_information" #
print(get_md5(s)) # 5414ffd88fcb58417e64ecec51bb3a6b
print(get_md5(s, upper=True)) # 5414FFD88FCB58417E64ECEC51BB3A6B
print(get_md5(s, to_bin=True)) # b'T\x14\xff\xd8\x8f\xcbXA~d\xec\xecQ\xbb:k' #
print(get_sha(s)) # d09869fdf901465c8566f0e2debfa3f6a3d878a8157e199c7c4c6dd755617f33
print(get_sha(s, to_bin=True)) # b'\xd0\x98i\xfd\xf9\x01F\\\x85f\xf0\xe2\xde\xbf\xa3\xf6\xa3\xd8x\xa8\x15~\x19\x9c|Lm\xd7Ua\x7f3'
print(get_sha(s, mode=1)) # ada5dfdf0c9a76a84958310b838a70b6fd6d01f6 # default mode=256 // mode: 1 224 256 384 512
print(get_sha3(s)) # 9c539ca35c6719f546e67837ff37fe7791e53fe40715cd4da0167c78c9adc2e8
print(get_sha3(s, to_bin=True)) # b'\x9cS\x9c\xa3\\g\x19\xf5F\xe6x7\xff7\xfew\x91\xe5?\xe4\x07\x15\xcdM\xa0\x16|x\xc9\xad\xc2\xe8'
print(get_sha3(s, mode=1)) # return "" // SUPPORT: sha3_224 sha3_256 sha3_384 sha3_512// only need inputting: 224 256 384 512 # default mode=256 // mode: 224 256 384 512
print(get_sha3(s, mode=384)) # 95c09e20a139843eae877a64cd95d6a629b3c9ff383b5460557aab2612682d4228d05fe41606a79acf5ae1c4de35160c
# about base64 ==> get_b64e, get_b64d
res_b64_encode = get_b64e(s)
print(res_b64_encode) # dGVzdF9pbmZvcm1hdGlvbg==
res_b64_bin = get_b64e(s, to_bin=True)
print(res_b64_bin) # b'dGVzdF9pbmZvcm1hdGlvbg=='
res_b32_encode = get_b64e(s, mode=32) # default mode=64 // mode: 16 32 64 85
print(res_b32_encode) # ORSXG5C7NFXGM33SNVQXI2LPNY======
res_b64_decode = get_b64d(res_b64_encode)
print(res_b64_decode) # test_information
res_b32_decode = get_b64d(res_b32_encode, mode=32) # default mode=64 // mode: 16 32 64 85
print(res_b32_decode) # test_information
| 57.911765 | 176 | 0.718639 |
5d61828a9a51cb5ce1865c213ffd2c5903a688a4
| 47 |
py
|
Python
|
prereise/gather/solardata/tests/__init__.py
|
terrywqf/PreREISE
|
f8052dd37091eaa15024725d5c92a3ef0ee311ee
|
[
"MIT"
] | null | null | null |
prereise/gather/solardata/tests/__init__.py
|
terrywqf/PreREISE
|
f8052dd37091eaa15024725d5c92a3ef0ee311ee
|
[
"MIT"
] | null | null | null |
prereise/gather/solardata/tests/__init__.py
|
terrywqf/PreREISE
|
f8052dd37091eaa15024725d5c92a3ef0ee311ee
|
[
"MIT"
] | null | null | null |
__all__ = ["mock_pv_info", "test_pv_tracking"]
| 23.5 | 46 | 0.744681 |
5d61d078db118ba78d2af9ae995c1fa84aa2f450
| 2,306 |
py
|
Python
|
arfit/cp_utils.py
|
farr/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 5 |
2015-04-29T21:46:52.000Z
|
2021-05-13T04:59:23.000Z
|
arfit/cp_utils.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | null | null | null |
arfit/cp_utils.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 2 |
2015-12-03T12:08:32.000Z
|
2018-05-26T16:20:31.000Z
|
import carmcmc as cm
from gatspy.periodic import LombScargleFast
import matplotlib.pyplot as plt
import numpy as np
| 37.193548 | 131 | 0.691674 |
5d65143322eb65bf3b9638c414999e21eb0323db
| 1,319 |
py
|
Python
|
pdiffcopy/hashing.py
|
xolox/python-pdiffcopy
|
ed765af92c0c0823818d545e61384753912a5725
|
[
"MIT"
] | 5 |
2020-03-07T00:01:24.000Z
|
2020-12-03T03:44:26.000Z
|
pdiffcopy/hashing.py
|
xolox/python-pdiffcopy
|
ed765af92c0c0823818d545e61384753912a5725
|
[
"MIT"
] | null | null | null |
pdiffcopy/hashing.py
|
xolox/python-pdiffcopy
|
ed765af92c0c0823818d545e61384753912a5725
|
[
"MIT"
] | null | null | null |
# Fast large file synchronization inspired by rsync.
#
# Author: Peter Odding <[email protected]>
# Last Change: March 6, 2020
# URL: https://pdiffcopy.readthedocs.io
"""Parallel hashing of files using :mod:`multiprocessing` and :mod:`pdiffcopy.mp`."""
# Standard library modules.
import functools
import hashlib
import os
# External dependencies.
from six.moves import range
# Modules included in our package.
from pdiffcopy.mp import WorkerPool
# Public identifiers that require documentation.
__all__ = ("compute_hashes", "hash_worker")
def hash_worker(offset, block_size, filename, method):
"""Worker function to be run in child processes."""
with open(filename, "rb") as handle:
handle.seek(offset)
context = hashlib.new(method)
context.update(handle.read(block_size))
return offset, context.hexdigest()
| 31.404762 | 106 | 0.720243 |
5d65c01cd0ad11126b7931d199f6927d742a24e8
| 2,170 |
py
|
Python
|
pyscf/nao/test/test_0003_na2_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 3 |
2021-02-28T00:52:53.000Z
|
2021-03-01T06:23:33.000Z
|
pyscf/nao/test/test_0003_na2_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 36 |
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/nao/test/test_0003_na2_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 4 |
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from pyscf.nao.m_siesta_utils import get_siesta_command, get_pseudo
if __name__ == "__main__": unittest.main()
| 31.449275 | 87 | 0.657604 |
5d65f544314984a2fee60579b78ec312b1835ccc
| 491 |
py
|
Python
|
tests/moz_library/rental_books_test.py
|
mozkzki/moz-library
|
fb925414405a9fcba8bb7194cf983ba18c920e2f
|
[
"MIT"
] | null | null | null |
tests/moz_library/rental_books_test.py
|
mozkzki/moz-library
|
fb925414405a9fcba8bb7194cf983ba18c920e2f
|
[
"MIT"
] | 35 |
2021-10-09T13:08:33.000Z
|
2022-03-29T14:26:59.000Z
|
tests/moz_library/rental_books_test.py
|
mozkzki/moz-library
|
fb925414405a9fcba8bb7194cf983ba18c920e2f
|
[
"MIT"
] | null | null | null |
import pytest
from moz_library.rental_books import RentalBooks
| 27.277778 | 64 | 0.737271 |
5d66ab6b71d371d38fa50d90c8734a50bf50ee30
| 2,625 |
py
|
Python
|
examples/src/Charts/MultiCategoryChart.py
|
aspose-slides/Aspose.Slides-for-Python-via-.NET
|
c55ad5c71f942598f1e67e22a52cbcd1cb286467
|
[
"MIT"
] | null | null | null |
examples/src/Charts/MultiCategoryChart.py
|
aspose-slides/Aspose.Slides-for-Python-via-.NET
|
c55ad5c71f942598f1e67e22a52cbcd1cb286467
|
[
"MIT"
] | null | null | null |
examples/src/Charts/MultiCategoryChart.py
|
aspose-slides/Aspose.Slides-for-Python-via-.NET
|
c55ad5c71f942598f1e67e22a52cbcd1cb286467
|
[
"MIT"
] | null | null | null |
import aspose.pydrawing as drawing
import aspose.slides as slides
| 51.470588 | 106 | 0.708952 |
5d66c8be8ed85e591a27c5733a7d2e134250bc39
| 9,393 |
py
|
Python
|
netbox/extras/forms/filtersets.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | null | null | null |
netbox/extras/forms/filtersets.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | null | null | null |
netbox/extras/forms/filtersets.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import gettext as _
from dcim.models import DeviceRole, DeviceType, Platform, Region, Site, SiteGroup
from extras.choices import *
from extras.models import *
from extras.utils import FeatureQuery
from netbox.forms.base import NetBoxModelFilterSetForm
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, APISelectMultiple, BOOLEAN_WITH_BLANK_CHOICES, ContentTypeChoiceField,
ContentTypeMultipleChoiceField, DateTimePicker, DynamicModelMultipleChoiceField, FilterForm, MultipleChoiceField,
StaticSelect, TagFilterField,
)
from virtualization.models import Cluster, ClusterGroup, ClusterType
__all__ = (
'ConfigContextFilterForm',
'CustomFieldFilterForm',
'CustomLinkFilterForm',
'ExportTemplateFilterForm',
'JournalEntryFilterForm',
'LocalConfigContextFilterForm',
'ObjectChangeFilterForm',
'TagFilterForm',
'WebhookFilterForm',
)
| 29.261682 | 117 | 0.639519 |
5d66ef032fbd2dcf091b5ffde482a5d596613146
| 1,940 |
py
|
Python
|
bin/write2cly.py
|
docdiesel/smartmetertools
|
3b7449c7a9069696af078631aa5440f53d0f57bc
|
[
"MIT"
] | 1 |
2019-05-30T08:28:31.000Z
|
2019-05-30T08:28:31.000Z
|
bin/write2cly.py
|
docdiesel/smartmetertools
|
3b7449c7a9069696af078631aa5440f53d0f57bc
|
[
"MIT"
] | null | null | null |
bin/write2cly.py
|
docdiesel/smartmetertools
|
3b7449c7a9069696af078631aa5440f53d0f57bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
## write2cly.py - reads json (generated by sml_reader.py) from stdin
## - writes values to Corlysis time series InfluxDB
##
## Writes data from smart meter to time series database (InfluxDB)
## at Corlysis.com [1]. You need to configure your database and token
## in the config section.
##
## [1] https://corlysis.com/
##==== license section ========
## This code is under MIT License: Copyright (C) 2019 Bernd Knnen
## License details see https://choosealicense.com/licenses/mit/
##==== config section ========
# define corlysis settings here - set db and token at least
cly_base_url = 'https://corlysis.com:8086/write'
cly_parameters = {
"db": "energy",
"u" : "token",
"p" : "placeyourtokenhere",
"precision": "ms"}
# assign readable field names
config = {
"1.8.0": "Bezug",
"2.8.0": "Einspeisung",
"16.7.0": "Wirkleistung"
}
##==== code section ==== no need to change lines below ====
##-- import libraries
import json, sys, requests
import requests
import time
# load json from stdin
try:
myjson = json.load(sys.stdin)
except:
sys.stderr.write('!! error loading json')
exit(1)
# decode json
try:
line = "meter_data "
# add each meter value to line
for obis in myjson['data']:
key = config[obis] # set human readable field name
value = myjson['data'][obis] # get value from smart meter
line += key + '=' + str(value) + ',' # add key=value to insert line
# cut off last comma
line = line[:-1]
# add timestamp as unix timestamp in ms
line += ' ' + str(int(time.time()*1000)) #+ '\n'
# post data into time series database; http response should be 204
r = requests.post(cly_base_url, params=cly_parameters, data=line)
if r.status_code != 204 :
sys.stderr.write(r.status_code)
sys.stderr.write(r.content)
# catch if input is no valid json
except:
sys.stderr.write('!!error: no data block in json')
exit(2)
| 25.526316 | 71 | 0.652062 |
5d672137217c3f190c65f38cc034a58e8ab7815b
| 1,440 |
py
|
Python
|
dns/rdtypes/ANY/__init__.py
|
Ashiq5/dnspython
|
5449af5318d88bada34f661247f3bcb16f58f057
|
[
"ISC"
] | null | null | null |
dns/rdtypes/ANY/__init__.py
|
Ashiq5/dnspython
|
5449af5318d88bada34f661247f3bcb16f58f057
|
[
"ISC"
] | null | null | null |
dns/rdtypes/ANY/__init__.py
|
Ashiq5/dnspython
|
5449af5318d88bada34f661247f3bcb16f58f057
|
[
"ISC"
] | null | null | null |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Class ANY (generic) rdata type classes."""
__all__ = [
'AFSDB',
'AMTRELAY',
'AVC',
'CAA',
'CDNSKEY',
'CDS',
'CERT',
'CNAME',
'CSYNC',
'DLV',
'DNAME',
'DNSKEY',
'DS',
'EUI48',
'EUI64',
'GPOS',
'HINFO',
'HIP',
'ISDN',
'LOC',
'MX',
'NINFO',
'NS',
'NSEC',
'NSEC3',
'NSEC3PARAM',
'OPENPGPKEY',
'OPT',
'PTR',
'RP',
'RRSIG',
'RT',
'SMIMEA',
'SOA',
'SPF',
'SSHFP',
'TKEY',
'TLSA',
'TSIG',
'TXT',
'URI',
'X25',
]
| 22.5 | 75 | 0.620833 |
5d67812696614c7eac2050cda2d994e16e9201d7
| 10,519 |
py
|
Python
|
01_test.py
|
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
|
6653d7abbaafe09fb17768d9902bb77db24945d4
|
[
"MIT"
] | 3 |
2020-09-18T10:33:37.000Z
|
2020-11-04T12:53:50.000Z
|
01_test.py
|
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
|
6653d7abbaafe09fb17768d9902bb77db24945d4
|
[
"MIT"
] | 4 |
2020-09-26T01:07:55.000Z
|
2022-02-10T01:30:27.000Z
|
01_test.py
|
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
|
6653d7abbaafe09fb17768d9902bb77db24945d4
|
[
"MIT"
] | null | null | null |
"""
@file 01_test.py
@brief Script for test
@author Toshiki Nakamura, Yuki Nikaido, and Yohei Kawaguchi (Hitachi Ltd.)
Copyright (C) 2020 Hitachi, Ltd. All right reserved.
"""
########################################################################
# import default python-library
########################################################################
import os
import glob
import csv
import re
import itertools
import sys
########################################################################
########################################################################
# import additional python-library
########################################################################
import numpy
# from import
from tqdm import tqdm
from sklearn import metrics
# original lib
import common as com
import keras_model
########################################################################
########################################################################
# load parameter.yaml
########################################################################
param = com.yaml_load()
#######################################################################
########################################################################
# def
########################################################################
def get_machine_id_list_for_test(target_dir,
dir_name="test",
ext="json"):
"""
target_dir : str
base directory path of "dev_data" or "eval_data"
test_dir_name : str (default="test")
directory containing test data
ext : str (default="wav)
file extension of audio files
return :
machine_id_list : list [ str ]
list of machine IDs extracted from the names of test files
"""
# create test files
dir_path = os.path.abspath("{dir}/{dir_name}/*.{ext}".format(dir=target_dir, dir_name=dir_name, ext=ext))
file_paths = sorted(glob.glob(dir_path))
# extract id
machine_id_list = sorted(list(set(itertools.chain.from_iterable(
[re.findall('id_[0-9][0-9]', ext_id) for ext_id in file_paths]))))
return machine_id_list
def test_file_list_generator(target_dir,
id_name,
dir_name="test",
prefix_normal="normal",
prefix_anomaly="anomaly",
ext="json"):
"""
target_dir : str
base directory path of the dev_data or eval_data
id_name : str
id of wav file in <<test_dir_name>> directory
dir_name : str (default="test")
directory containing test data
prefix_normal : str (default="normal")
normal directory name
prefix_anomaly : str (default="anomaly")
anomaly directory name
ext : str (default="wav")
file extension of audio files
return :
if the mode is "development":
test_files : list [ str ]
file list for test
test_labels : list [ boolean ]
label info. list for test
* normal/anomaly = 0/1
if the mode is "evaluation":
test_files : list [ str ]
file list for test
"""
com.logger.info("target_dir : {}".format(target_dir+"_"+id_name))
# development
if mode:
normal_files = sorted(
glob.glob("{dir}/{dir_name}/{prefix_normal}_{id_name}*.{ext}".format(dir=target_dir,
dir_name=dir_name,
prefix_normal=prefix_normal,
id_name=id_name,
ext=ext)))
normal_labels = numpy.zeros(len(normal_files))
anomaly_files = sorted(
glob.glob("{dir}/{dir_name}/{prefix_anomaly}_{id_name}*.{ext}".format(dir=target_dir,
dir_name=dir_name,
prefix_anomaly=prefix_anomaly,
id_name=id_name,
ext=ext)))
anomaly_labels = numpy.ones(len(anomaly_files))
files = numpy.concatenate((normal_files, anomaly_files), axis=0)
labels = numpy.concatenate((normal_labels, anomaly_labels), axis=0)
com.logger.info("test_file num : {num}".format(num=len(files)))
if len(files) == 0:
com.logger.exception("no_wav_file!!")
print("\n========================================")
# evaluation
else:
files = sorted(
glob.glob("{dir}/{dir_name}/*{id_name}*.{ext}".format(dir=target_dir,
dir_name=dir_name,
id_name=id_name,
ext=ext)))
labels = None
com.logger.info("test_file num : {num}".format(num=len(files)))
if len(files) == 0:
com.logger.exception("no_wav_file!!")
print("\n=========================================")
return files, labels
########################################################################
########################################################################
# main 01_test.py
########################################################################
if __name__ == "__main__":
# check mode
# "development": mode == True
# "evaluation": mode == False
mode = com.command_line_chk()
if mode is None:
sys.exit(-1)
# make output result directory
os.makedirs(param["result_directory"], exist_ok=True)
# load base directory
dirs = com.select_dirs(param=param, mode=mode)
# initialize lines in csv for AUC and pAUC
csv_lines = []
# loop of the base directory
for idx, target_dir in enumerate(dirs):
print("\n===========================")
print("[{idx}/{total}] {dirname}".format(dirname=target_dir, idx=idx+1, total=len(dirs)))
machine_type = os.path.split(target_dir)[1]
print("============== MODEL LOAD ==============")
# set model path
model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"],
machine_type=machine_type)
# load model file
if not os.path.exists(model_file):
com.logger.error("{} model not found ".format(machine_type))
sys.exit(-1)
model = keras_model.load_model(model_file)
model.summary()
if mode:
# results by type
csv_lines.append([machine_type])
csv_lines.append(["id", "AUC", "pAUC"])
performance = []
machine_id_list = get_machine_id_list_for_test(target_dir)
print(machine_id_list)
for id_str in machine_id_list:
# load test file
test_files, y_true = test_file_list_generator(target_dir, id_str)
# setup anomaly score file path
anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{id_str}.csv".format(
result=param["result_directory"],
machine_type=machine_type,
id_str=id_str)
anomaly_score_list = []
print("\n============== BEGIN TEST FOR A MACHINE ID ==============")
y_pred = [0. for k in test_files]
for file_idx, file_path in tqdm(enumerate(test_files), total=len(test_files)):
try:
data = com.file_to_vector_array(file_path,
n_mels=param["feature"]["n_mels"],
frames=param["feature"]["frames"],
n_fft=param["feature"]["n_fft"],
hop_length=param["feature"]["hop_length"],
power=param["feature"]["power"])
errors = numpy.mean(numpy.square(data - model.predict(data)), axis=1)
y_pred[file_idx] = numpy.mean(errors)
anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]])
except:
com.logger.error("file broken!!: {}".format(file_path))
# save anomaly score
save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)
com.logger.info("anomaly score result -> {}".format(anomaly_score_csv))
if mode:
# append AUC and pAUC to lists
auc = metrics.roc_auc_score(y_true, y_pred)
p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"])
csv_lines.append([id_str.split("_", 1)[1], auc, p_auc])
performance.append([auc, p_auc])
com.logger.info("AUC : {}".format(auc))
com.logger.info("pAUC : {}".format(p_auc))
print("\n============ END OF TEST FOR A MACHINE ID ============")
if mode:
# calculate averages for AUCs and pAUCs
averaged_performance = numpy.mean(numpy.array(performance, dtype=float), axis=0)
csv_lines.append(["Average"] + list(averaged_performance))
csv_lines.append([])
if mode:
# output results
result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"])
com.logger.info("AUC and pAUC results -> {}".format(result_path))
save_csv(save_file_path=result_path, save_data=csv_lines)
| 42.587045 | 118 | 0.456412 |
5d68067044bd41e0c94f3b4e115e6a6243c834c1
| 1,247 |
py
|
Python
|
src/text_split/split.py
|
i1123581321/word_split
|
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
|
[
"MIT"
] | null | null | null |
src/text_split/split.py
|
i1123581321/word_split
|
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
|
[
"MIT"
] | null | null | null |
src/text_split/split.py
|
i1123581321/word_split
|
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
|
[
"MIT"
] | null | null | null |
import argparse
import os
parser = argparse.ArgumentParser(description="a simple parser")
parser.add_argument("filename", type=str)
parser.add_argument("lineno", nargs="+", type=int)
parser.add_argument("--same_length", action=argparse.BooleanOptionalAction)
| 30.414634 | 77 | 0.585405 |
5d68e4a866db0948470d395c5ba6d5ad5676d177
| 23,037 |
py
|
Python
|
src/sentry/models/event.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/event.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/event.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import six
import string
import warnings
import pytz
from collections import OrderedDict
from dateutil.parser import parse as parse_date
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from hashlib import md5
from semaphore.processing import StoreNormalizer
from sentry import eventtypes
from sentry.db.models import (
BoundedBigIntegerField,
BoundedIntegerField,
Model,
NodeData,
NodeField,
sane_repr,
)
from sentry.db.models.manager import EventManager
from sentry.interfaces.base import get_interfaces
from sentry.utils import json
from sentry.utils.cache import memoize
from sentry.utils.canonical import CanonicalKeyDict, CanonicalKeyView
from sentry.utils.safe import get_path
from sentry.utils.strings import truncatechars
def get_interface(self, name):
return self.interfaces.get(name)
def get_legacy_message(self):
# TODO(mitsuhiko): remove this code once it's unused. It's still
# being used by plugin code and once the message rename is through
# plugins should instead swithc to the actual message attribute or
# this method could return what currently is real_message.
return (
get_path(self.data, "logentry", "formatted")
or get_path(self.data, "logentry", "message")
or self.message
)
def get_event_type(self):
"""
Return the type of this event.
See ``sentry.eventtypes``.
"""
return self.data.get("type", "default")
def get_event_metadata(self):
"""
Return the metadata of this event.
See ``sentry.eventtypes``.
"""
# For some inexplicable reason we have some cases where the data
# is completely empty. In that case we want to hobble along
# further.
return self.data.get("metadata") or {}
def get_grouping_config(self):
"""Returns the event grouping config."""
from sentry.grouping.api import get_grouping_config_dict_for_event_data
return get_grouping_config_dict_for_event_data(self.data, self.project)
def get_hashes(self, force_config=None):
"""
Returns the calculated hashes for the event. This uses the stored
information if available. Grouping hashes will take into account
fingerprinting and checksums.
"""
# If we have hashes stored in the data we use them, otherwise we
# fall back to generating new ones from the data. We can only use
# this if we do not force a different config.
if force_config is None:
hashes = self.data.get("hashes")
if hashes is not None:
return hashes
return filter(
None, [x.get_hash() for x in self.get_grouping_variants(force_config).values()]
)
def get_grouping_variants(self, force_config=None, normalize_stacktraces=False):
"""
This is similar to `get_hashes` but will instead return the
grouping components for each variant in a dictionary.
If `normalize_stacktraces` is set to `True` then the event data will be
modified for `in_app` in addition to event variants being created. This
means that after calling that function the event data has been modified
in place.
"""
from sentry.grouping.api import get_grouping_variants_for_event, load_grouping_config
from sentry.stacktraces.processing import normalize_stacktraces_for_grouping
# Forcing configs has two separate modes. One is where just the
# config ID is given in which case it's merged with the stored or
# default config dictionary
if force_config is not None:
if isinstance(force_config, six.string_types):
stored_config = self.get_grouping_config()
config = dict(stored_config)
config["id"] = force_config
else:
config = force_config
# Otherwise we just use the same grouping config as stored. if
# this is None the `get_grouping_variants_for_event` will fill in
# the default.
else:
config = self.data.get("grouping_config")
config = load_grouping_config(config)
if normalize_stacktraces:
normalize_stacktraces_for_grouping(self.data, config)
return get_grouping_variants_for_event(self, config)
def get_primary_hash(self):
# TODO: This *might* need to be protected from an IndexError?
return self.get_hashes()[0]
# For compatibility, still used by plugins.
def get_tags(self):
return self.tags
def get_tag(self, key):
for t, v in self.get_tags():
if t == key:
return v
return None
def get_raw_data(self):
"""Returns the internal raw event data dict."""
return dict(self.data.items())
def get_email_subject(self):
template = self.project.get_option("mail:subject_template")
if template:
template = EventSubjectTemplate(template)
else:
template = DEFAULT_SUBJECT_TEMPLATE
return truncatechars(template.safe_substitute(EventSubjectTemplateData(self)), 128).encode(
"utf-8"
)
def get_environment(self):
from sentry.models import Environment
if not hasattr(self, "_environment_cache"):
self._environment_cache = Environment.objects.get(
organization_id=self.project.organization_id,
name=Environment.get_name_or_default(self.get_tag("environment")),
)
return self._environment_cache
def get_minimal_user(self):
"""
A minimal 'User' interface object that gives us enough information
to render a user badge.
"""
return self.get_interface("user")
def as_dict(self):
"""Returns the data in normalized form for external consumers."""
# We use a OrderedDict to keep elements ordered for a potential JSON serializer
data = OrderedDict()
data["event_id"] = self.event_id
data["project"] = self.project_id
data["release"] = self.release
data["dist"] = self.dist
data["platform"] = self.platform
data["message"] = self.real_message
data["datetime"] = self.datetime
data["time_spent"] = self.time_spent
data["tags"] = [(k.split("sentry:", 1)[-1], v) for (k, v) in self.tags]
for k, v in sorted(six.iteritems(self.data)):
if k in data:
continue
if k == "sdk":
v = {v_k: v_v for v_k, v_v in six.iteritems(v) if v_k != "client_ip"}
data[k] = v
# for a long time culprit was not persisted. In those cases put
# the culprit in from the group.
if data.get("culprit") is None and self.group_id:
data["culprit"] = self.group.culprit
# Override title and location with dynamically generated data
data["title"] = self.title
data["location"] = self.location
return data
# ============================================
# DEPRECATED
# ============================================
# deprecated accessors
def error(self): # TODO why is this not a property?
warnings.warn("Event.error is deprecated, use Event.title", DeprecationWarning)
return self.title
error.short_description = _("error")
class Event(EventCommon, Model):
"""
An event backed by data stored in postgres.
"""
__core__ = False
group_id = BoundedBigIntegerField(blank=True, null=True)
event_id = models.CharField(max_length=32, null=True, db_column="message_id")
project_id = BoundedBigIntegerField(blank=True, null=True)
message = models.TextField()
platform = models.CharField(max_length=64, null=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
time_spent = BoundedIntegerField(null=True)
data = NodeField(
blank=True,
null=True,
ref_func=lambda x: x.project_id or x.project.id,
ref_version=2,
wrapper=EventDict,
)
objects = EventManager()
__repr__ = sane_repr("project_id", "group_id")
class EventSubjectTemplate(string.Template):
idpattern = r"(tag:)?[_a-z][_a-z0-9]*"
class EventSubjectTemplateData(object):
tag_aliases = {"release": "sentry:release", "dist": "sentry:dist", "user": "sentry:user"}
DEFAULT_SUBJECT_TEMPLATE = EventSubjectTemplate("$shortID - $title")
| 33.778592 | 99 | 0.623996 |
5d6b89b1e8e521a2e81232c6e63ef4c5529270e8
| 2,920 |
py
|
Python
|
Assignment3/src/data/make_nowcast_dataset.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | null | null | null |
Assignment3/src/data/make_nowcast_dataset.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | null | null | null |
Assignment3/src/data/make_nowcast_dataset.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | 4 |
2022-02-12T23:59:54.000Z
|
2022-02-16T22:53:32.000Z
|
"""
Makes training and test dataset for nowcasting model using SEVIR
"""
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import h5py
os.environ["HDF5_USE_FILE_LOCKING"]='FALSE'
import sys
import numpy as np
import tensorflow as tf
from nowcast_generator import get_nowcast_test_generator
# parser = argparse.ArgumentParser(description='Make nowcast training & test datasets using SEVIR')
# parser.add_argument('--sevir_data', type=str, help='location of SEVIR dataset',default='../../data/sevir')
# parser.add_argument('--sevir_catalog', type=str, help='location of SEVIR dataset',default='../../data/CATALOG.csv')
# parser.add_argument('--output_location', type=str, help='location of SEVIR dataset',default='../../data/interim')
# parser.add_argument('--n_chunks', type=int, help='Number of chucks to use (increase if memory limited)',default=10)
#args = parser.parse_args()
def generate_data(sevir_location,sevir_catalog,output_location,n_chunks=10):
"""
Runs data processing scripts to extract training set from SEVIR
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
#trn_generator = get_nowcast_train_generator(sevir_catalog=args.sevir_catalog,sevir_location=args.sevir_data)
tst_generator = get_nowcast_test_generator(sevir_catalog,sevir_location)
#ogger.info('Reading/writing training data to %s' % ('%s/nowcast_training.h5' % args.output_location))
#read_write_chunks('%s/nowcast_training.h5' % args.output_location,trn_generator,args.n_chunks)
logger.info('Reading/writing testing data to ' + output_location+'/nowcast_testing.h5')
read_write_chunks(output_location+'/nowcast_testing.h5',tst_generator,n_chunks)
| 42.941176 | 117 | 0.7 |
5d6e1f190b9f10fc581499ca4a914cfa2670ffb2
| 9,576 |
py
|
Python
|
blender-plugin/import_cast.py
|
rtasan/ApexCastImporter
|
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
|
[
"MIT"
] | null | null | null |
blender-plugin/import_cast.py
|
rtasan/ApexCastImporter
|
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
|
[
"MIT"
] | 3 |
2021-05-24T12:29:43.000Z
|
2021-05-28T13:07:39.000Z
|
blender-plugin/import_cast.py
|
rtasan/ApexCastImporter
|
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
|
[
"MIT"
] | null | null | null |
# The Original importer was created by Nick
# Copyright (c) 2020 Nick
import bpy
import bmesh
import os
import array
import math
from mathutils import *
from bpy_extras.image_utils import load_image
from .cast import Cast, Model, Animation, Curve, NotificationTrack, Mesh, Skeleton, Bone, Material, File
| 33.957447 | 130 | 0.631788 |
5d6e47beb4576bf2e083ccdcb792c2e2830c83c4
| 50,279 |
py
|
Python
|
user_program/usb4vc_ui.py
|
dekuNukem/USB4VC
|
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
|
[
"MIT"
] | 78 |
2022-02-07T16:48:11.000Z
|
2022-03-31T12:25:35.000Z
|
user_program/usb4vc_ui.py
|
dekuNukem/USB4VC
|
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
|
[
"MIT"
] | 1 |
2022-02-26T20:16:08.000Z
|
2022-02-26T20:24:04.000Z
|
user_program/usb4vc_ui.py
|
dekuNukem/USB4VC
|
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
|
[
"MIT"
] | 1 |
2022-02-24T03:34:15.000Z
|
2022-02-24T03:34:15.000Z
|
# https://luma-oled.readthedocs.io/en/latest/software.html
import os
import sys
import time
import threading
import usb4vc_oled
from luma.core.render import canvas
import RPi.GPIO as GPIO
import usb4vc_usb_scan
import usb4vc_shared
import usb4vc_show_ev
import usb4vc_check_update
import json
import subprocess
from subprocess import Popen, PIPE
from usb4vc_shared import this_app_dir_path
from usb4vc_shared import config_dir_path
from usb4vc_shared import firmware_dir_path
from usb4vc_shared import temp_dir_path
from usb4vc_shared import ensure_dir
from usb4vc_shared import i2c_bootloader_pbid
from usb4vc_shared import usb_bootloader_pbid
config_file_path = os.path.join(config_dir_path, 'config.json')
ensure_dir(this_app_dir_path)
ensure_dir(config_dir_path)
ensure_dir(firmware_dir_path)
ensure_dir(temp_dir_path)
PLUS_BUTTON_PIN = 27
MINUS_BUTTON_PIN = 19
ENTER_BUTTON_PIN = 22
SHUTDOWN_BUTTON_PIN = 21
PBOARD_RESET_PIN = 25
PBOARD_BOOT0_PIN = 12
SLEEP_LED_PIN = 26
GPIO.setmode(GPIO.BCM)
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
GPIO.setup(SLEEP_LED_PIN, GPIO.OUT)
GPIO.output(SLEEP_LED_PIN, GPIO.LOW)
SPI_MOSI_MAGIC = 0xde
SPI_MOSI_MSG_TYPE_SET_PROTOCOL = 2
set_protocl_spi_msg_template = [SPI_MOSI_MAGIC, 0, SPI_MOSI_MSG_TYPE_SET_PROTOCOL] + [0]*29
PBOARD_ID_UNKNOWN = 0
PBOARD_ID_IBMPC = 1
PBOARD_ID_ADB = 2
pboard_info_spi_msg = [0] * 32
this_pboard_id = PBOARD_ID_UNKNOWN
USBGP_BTN_SOUTH = 0x130
USBGP_BTN_EAST = 0x131
USBGP_BTN_C = 0x132
USBGP_BTN_NORTH = 0x133
USBGP_BTN_WEST = 0x134
USBGP_BTN_Z = 0x135
USBGP_BTN_TL = 0x136
USBGP_BTN_TR = 0x137
USBGP_BTN_TL2 = 0x138
USBGP_BTN_TR2 = 0x139
USBGP_BTN_SELECT = 0x13a
USBGP_BTN_START = 0x13b
USBGP_BTN_MODE = 0x13c
USBGP_BTN_THUMBL = 0x13d
USBGP_BTN_THUMBR = 0x13e
USBGP_BTN_A = USBGP_BTN_SOUTH
USBGP_BTN_B = USBGP_BTN_EAST
USBGP_BTN_X = USBGP_BTN_NORTH
USBGP_BTN_Y = USBGP_BTN_WEST
USBGP_ABS_X = 0x00 # left stick X
USBGP_ABS_Y = 0x01 # left stick Y
USBGP_ABS_Z = 0x02 # left analog trigger
USBGP_ABS_RX = 0x03 # right stick X
USBGP_ABS_RY = 0x04 # right stick Y
USBGP_ABS_RZ = 0x05 # right analog trigger
USBGP_ABS_HAT0X = 0x10 # D-pad X
USBGP_ABS_HAT0Y = 0x11 # D-pad Y
GENERIC_USB_GAMEPAD_TO_MOUSE_KB_DEAULT_MAPPING = {
"MAPPING_TYPE": "DEFAULT_MOUSE_KB",
'BTN_TL': {'code': 'BTN_LEFT'},
'BTN_TR': {'code': 'BTN_RIGHT'},
'BTN_TL2': {'code': 'BTN_LEFT'},
'BTN_TR2': {'code': 'BTN_RIGHT'},
'ABS_X': {'code': 'REL_X'},
'ABS_Y': {'code': 'REL_Y'},
'ABS_HAT0X': {'code': 'KEY_RIGHT', 'code_neg': 'KEY_LEFT'},
'ABS_HAT0Y': {'code': 'KEY_DOWN', 'code_neg': 'KEY_UP'}
}
IBM_GENERIC_USB_GAMEPAD_TO_15PIN_GAMEPORT_GAMEPAD_DEAULT_MAPPING = {
"MAPPING_TYPE": "DEFAULT_15PIN",
# buttons to buttons
'BTN_SOUTH': {'code':'IBM_GGP_BTN_1'},
'BTN_NORTH': {'code':'IBM_GGP_BTN_2'},
'BTN_EAST': {'code':'IBM_GGP_BTN_3'},
'BTN_WEST': {'code':'IBM_GGP_BTN_4'},
'BTN_TL': {'code':'IBM_GGP_BTN_1'},
'BTN_TR': {'code':'IBM_GGP_BTN_2'},
'BTN_Z': {'code':'IBM_GGP_BTN_3'},
'BTN_C': {'code':'IBM_GGP_BTN_4'},
'BTN_TL2': {'code':'IBM_GGP_BTN_1'},
'BTN_TR2': {'code':'IBM_GGP_BTN_2'},
# analog axes to analog axes
'ABS_X': {'code':'IBM_GGP_JS1_X'},
'ABS_Y': {'code':'IBM_GGP_JS1_Y'},
'ABS_HAT0X': {'code':'IBM_GGP_JS1_X'},
'ABS_HAT0Y': {'code':'IBM_GGP_JS1_Y'},
'ABS_RX': {'code':'IBM_GGP_JS2_X'},
'ABS_RY': {'code':'IBM_GGP_JS2_Y'},
}
PROTOCOL_OFF = {'pid':0, 'display_name':"OFF"}
PROTOCOL_AT_PS2_KB = {'pid':1, 'display_name':"AT/PS2"}
PROTOCOL_XT_KB = {'pid':2, 'display_name':"PC XT"}
PROTOCOL_ADB_KB = {'pid':3, 'display_name':"ADB"}
PROTOCOL_PS2_MOUSE_NORMAL = {'pid':4, 'display_name':"PS/2"}
PROTOCOL_MICROSOFT_SERIAL_MOUSE = {'pid':5, 'display_name':"Microsft Serial"}
PROTOCOL_ADB_MOUSE = {'pid':6, 'display_name':"ADB"}
PROTOCOL_15PIN_GAMEPORT_GAMEPAD = {'pid':7, 'display_name':"Generic 15-Pin", 'mapping':IBM_GENERIC_USB_GAMEPAD_TO_15PIN_GAMEPORT_GAMEPAD_DEAULT_MAPPING}
PROTOCOL_MOUSESYSTEMS_SERIAL_MOUSE = {'pid':8, 'display_name':"MouseSys Serial"}
PROTOCOL_USB_GP_TO_MOUSE_KB = {'pid':0, 'display_name':'Mouse & KB', 'mapping':GENERIC_USB_GAMEPAD_TO_MOUSE_KB_DEAULT_MAPPING}
PROTOCOL_RAW_KEYBOARD = {'pid':125, 'display_name':"Raw data"}
PROTOCOL_RAW_MOUSE = {'pid':126, 'display_name':"Raw data"}
PROTOCOL_RAW_GAMEPAD = {'pid':127, 'display_name':"Raw data"}
custom_profile_list = []
try:
onlyfiles = [f for f in os.listdir(config_dir_path) if os.path.isfile(os.path.join(config_dir_path, f))]
json_map_files = [os.path.join(config_dir_path, x) for x in onlyfiles if x.lower().startswith('usb4vc_map') and x.lower().endswith(".json")]
for item in json_map_files:
print('loading json file:', item)
with open(item) as json_file:
custom_profile_list.append(json.load(json_file))
except Exception as e:
print('exception json load:', e)
ibmpc_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_AT_PS2_KB, PROTOCOL_XT_KB]
ibmpc_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_PS2_MOUSE_NORMAL, PROTOCOL_MICROSOFT_SERIAL_MOUSE, PROTOCOL_MOUSESYSTEMS_SERIAL_MOUSE]
ibmpc_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_15PIN_GAMEPORT_GAMEPAD, PROTOCOL_USB_GP_TO_MOUSE_KB]
adb_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_ADB_KB]
adb_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_ADB_MOUSE]
adb_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_USB_GP_TO_MOUSE_KB]
raw_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_KEYBOARD]
raw_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_MOUSE]
raw_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_GAMEPAD]
mouse_sensitivity_list = [1, 1.25, 1.5, 1.75, 0.25, 0.5, 0.75]
"""
key is protocol card ID
conf_dict[pbid]:
hw revision
current keyboard protocol
current mouse protocol
current gamepad procotol
mouse sensitivity
"""
configuration_dict = {}
LINUX_EXIT_CODE_TIMEOUT = 124
curve_vertial_axis_x_pos = 80
curve_horizontal_axis_width = 32
curve_linear = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 34: 34, 35: 35, 36: 36, 37: 37, 38: 38, 39: 39, 40: 40, 41: 41, 42: 42, 43: 43, 44: 44, 45: 45, 46: 46, 47: 47, 48: 48, 49: 49, 50: 50, 51: 51, 52: 52, 53: 53, 54: 54, 55: 55, 56: 56, 57: 57, 58: 58, 59: 59, 60: 60, 61: 61, 62: 62, 63: 63, 64: 64, 65: 65, 66: 66, 67: 67, 68: 68, 69: 69, 70: 70, 71: 71, 72: 72, 73: 73, 74: 74, 75: 75, 76: 76, 77: 77, 78: 78, 79: 79, 80: 80, 81: 81, 82: 82, 83: 83, 84: 84, 85: 85, 86: 86, 87: 87, 88: 88, 89: 89, 90: 90, 91: 91, 92: 92, 93: 93, 94: 94, 95: 95, 96: 96, 97: 97, 98: 98, 99: 99, 100: 100, 101: 101, 102: 102, 103: 103, 104: 104, 105: 105, 106: 106, 107: 107, 108: 108, 109: 109, 110: 110, 111: 111, 112: 112, 113: 113, 114: 114, 115: 115, 116: 116, 117: 117, 118: 118, 119: 119, 120: 120, 121: 121, 122: 122, 123: 123, 124: 124, 125: 125, 126: 126, 127: 127}
curve1 = {0: 1, 1: 1, 2: 2, 3: 2, 4: 3, 5: 4, 6: 4, 7: 5, 8: 5, 9: 6, 10: 6, 11: 7, 12: 7, 13: 8, 14: 8, 15: 9, 16: 9, 17: 10, 18: 11, 19: 11, 20: 12, 21: 12, 22: 13, 23: 13, 24: 14, 25: 15, 26: 15, 27: 16, 28: 16, 29: 17, 30: 18, 31: 18, 32: 19, 33: 19, 34: 20, 35: 21, 36: 21, 37: 22, 38: 22, 39: 23, 40: 24, 41: 24, 42: 25, 43: 26, 44: 26, 45: 27, 46: 28, 47: 28, 48: 29, 49: 30, 50: 30, 51: 31, 52: 32, 53: 33, 54: 33, 55: 34, 56: 35, 57: 36, 58: 36, 59: 37, 60: 38, 61: 39, 62: 39, 63: 40, 64: 41, 65: 42, 66: 43, 67: 44, 68: 45, 69: 46, 70: 46, 71: 47, 72: 48, 73: 49, 74: 50, 75: 51, 76: 52, 77: 53, 78: 55, 79: 56, 80: 57, 81: 58, 82: 59, 83: 60, 84: 61, 85: 62, 86: 63, 87: 65, 88: 66, 89: 67, 90: 68, 91: 70, 92: 71, 93: 72, 94: 73, 95: 75, 96: 76, 97: 77, 98: 79, 99: 80, 100: 81, 101: 83, 102: 84, 103: 86, 104: 87, 105: 89, 106: 90, 107: 92, 108: 93, 109: 95, 110: 96, 111: 98, 112: 100, 113: 101, 114: 103, 115: 105, 116: 106, 117: 108, 118: 110, 119: 112, 120: 113, 121: 115, 122: 117, 123: 119, 124: 121, 125: 123, 126: 125, 127: 127}
curve2 = {0: 1, 1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 2, 8: 2, 9: 3, 10: 3, 11: 3, 12: 3, 13: 4, 14: 4, 15: 4, 16: 4, 17: 5, 18: 5, 19: 5, 20: 5, 21: 6, 22: 6, 23: 6, 24: 7, 25: 7, 26: 7, 27: 8, 28: 8, 29: 8, 30: 8, 31: 9, 32: 9, 33: 9, 34: 10, 35: 10, 36: 10, 37: 11, 38: 11, 39: 12, 40: 12, 41: 12, 42: 13, 43: 13, 44: 13, 45: 14, 46: 14, 47: 15, 48: 15, 49: 15, 50: 16, 51: 16, 52: 17, 53: 17, 54: 18, 55: 18, 56: 19, 57: 19, 58: 20, 59: 20, 60: 21, 61: 21, 62: 22, 63: 22, 64: 23, 65: 23, 66: 24, 67: 24, 68: 25, 69: 26, 70: 26, 71: 27, 72: 28, 73: 28, 74: 29, 75: 30, 76: 30, 77: 31, 78: 32, 79: 33, 80: 34, 81: 35, 82: 36, 83: 37, 84: 38, 85: 39, 86: 40, 87: 41, 88: 42, 89: 43, 90: 44, 91: 45, 92: 47, 93: 48, 94: 49, 95: 51, 96: 52, 97: 53, 98: 55, 99: 56, 100: 58, 101: 59, 102: 61, 103: 63, 104: 64, 105: 66, 106: 68, 107: 70, 108: 71, 109: 73, 110: 75, 111: 78, 112: 80, 113: 82, 114: 84, 115: 86, 116: 89, 117: 92, 118: 94, 119: 96, 120: 100, 121: 102, 122: 106, 123: 110, 124: 112, 125: 116, 126: 120, 127: 125}
curve3 = {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 3, 32: 3, 33: 3, 34: 3, 35: 3, 36: 3, 37: 3, 38: 4, 39: 4, 40: 4, 41: 4, 42: 4, 43: 4, 44: 5, 45: 5, 46: 5, 47: 5, 48: 5, 49: 6, 50: 6, 51: 6, 52: 6, 53: 7, 54: 7, 55: 7, 56: 7, 57: 8, 58: 8, 59: 8, 60: 8, 61: 9, 62: 9, 63: 9, 64: 10, 65: 10, 66: 10, 67: 11, 68: 11, 69: 11, 70: 12, 71: 12, 72: 12, 73: 13, 74: 13, 75: 14, 76: 14, 77: 15, 78: 15, 79: 16, 80: 16, 81: 17, 82: 17, 83: 18, 84: 19, 85: 19, 86: 20, 87: 21, 88: 21, 89: 22, 90: 23, 91: 24, 92: 25, 93: 26, 94: 27, 95: 28, 96: 29, 97: 30, 98: 32, 99: 33, 100: 34, 101: 35, 102: 37, 103: 38, 104: 40, 105: 41, 106: 43, 107: 45, 108: 46, 109: 48, 110: 50, 111: 52, 112: 54, 113: 56, 114: 59, 115: 61, 116: 64, 117: 66, 118: 69, 119: 72, 120: 76, 121: 79, 122: 83, 123: 87, 124: 92, 125: 99, 126: 104, 127: 118}
joystick_curve_list = [curve_linear, curve1, curve2, curve3]
pboard_database = {
PBOARD_ID_UNKNOWN:{'author':'Unknown', 'fw_ver':(0,0,0), 'full_name':'Unknown', 'hw_rev':0, 'protocol_list_keyboard':raw_keyboard_protocols, 'protocol_list_mouse':raw_mouse_protocols, 'protocol_list_gamepad':raw_gamepad_protocols},
PBOARD_ID_IBMPC:{'author':'dekuNukem', 'fw_ver':(0,0,0), 'full_name':'IBM PC Compatible', 'hw_rev':0, 'protocol_list_keyboard':ibmpc_keyboard_protocols, 'protocol_list_mouse':ibmpc_mouse_protocols, 'protocol_list_gamepad':ibmpc_gamepad_protocols},
PBOARD_ID_ADB:{'author':'dekuNukem', 'fw_ver':(0,0,0), 'full_name':'Apple Desktop Bus', 'hw_rev':0, 'protocol_list_keyboard':adb_keyboard_protocols, 'protocol_list_mouse':adb_mouse_protocols, 'protocol_list_gamepad':adb_gamepad_protocols},
}
plus_button = my_button(PLUS_BUTTON_PIN)
minus_button = my_button(MINUS_BUTTON_PIN)
enter_button = my_button(ENTER_BUTTON_PIN)
shutdown_button = my_button(SHUTDOWN_BUTTON_PIN)
my_oled = oled_sleep_control()
my_menu = None
ui_thread = threading.Thread(target=ui_worker, daemon=True)
| 51.462641 | 1,075 | 0.63269 |
5d6f6e6ed3bbf01cb5af3d5c038344399c98f74f
| 384 |
py
|
Python
|
study/migrations/0003_auto_20200224_2316.py
|
hpathipati/Quick-Tutor
|
17476d79b87f51b12a6c8fc435d1a6506bff1e04
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
study/migrations/0003_auto_20200224_2316.py
|
hpathipati/Quick-Tutor
|
17476d79b87f51b12a6c8fc435d1a6506bff1e04
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
study/migrations/0003_auto_20200224_2316.py
|
hpathipati/Quick-Tutor
|
17476d79b87f51b12a6c8fc435d1a6506bff1e04
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-24 23:16
from django.db import migrations, models
| 20.210526 | 63 | 0.588542 |
5d6fd80c1e9176894348ae0d83e6981dbb3ecb3a
| 103,544 |
py
|
Python
|
tests/unit/resources/test_resource.py
|
gzecchi/oneview-python
|
949bc67ca3eaed324a6dc058620145d9e067e25b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/test_resource.py
|
gzecchi/oneview-python
|
949bc67ca3eaed324a6dc058620145d9e067e25b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/test_resource.py
|
gzecchi/oneview-python
|
949bc67ca3eaed324a6dc058620145d9e067e25b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import io
import unittest
import mock
from mock import call
from tests.test_utils import mock_builtin
from hpOneView.connection import connection
from hpOneView import exceptions
from hpOneView.resources.resource import (ResourceClient, ResourceHelper, ResourceFileHandlerMixin,
ResourceZeroBodyMixin, ResourcePatchMixin, ResourceUtilizationMixin,
ResourceSchemaMixin, Resource,
RESOURCE_CLIENT_INVALID_ID, UNRECOGNIZED_URI, TaskMonitor,
RESOURCE_CLIENT_TASK_EXPECTED, RESOURCE_ID_OR_URI_REQUIRED,
transform_list_to_dict, extract_id_from_uri, merge_resources,
merge_default_values, unavailable_method)
def test_build_uri_with_uri_should_work(self):
input = "/rest/testuri/09USE7335NW3"
expected_output = "/rest/testuri/09USE7335NW3"
result = self.resource_client._helper.build_uri(input)
self.assertEqual(expected_output, result)
def test_build_uri_with_none_should_raise_exception(self):
try:
self.resource_client._helper.build_uri(None)
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_empty_str_should_raise_exception(self):
try:
self.resource_client._helper.build_uri('')
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
class FakeResource(object):
class ResourceClientTest(unittest.TestCase):
URI = "/rest/testuri"
TYPE_V200 = 'typeV200'
TYPE_V300 = 'typeV300'
DEFAULT_VALUES = {
'200': {'type': TYPE_V200},
'300': {'type': TYPE_V300}
}
def test_delete_dict_invalid_uri(self):
dict_to_delete = {"task": "task",
"uri": ""}
try:
self.resource_client.delete(dict_to_delete, False, -1)
except exceptions.HPOneViewUnknownType as e:
self.assertEqual("Unknown object type", e.args[0])
else:
self.fail()
def test_delete_with_none(self):
try:
self.resource_client.delete(None)
except ValueError as e:
self.assertTrue("Resource" in e.args[0])
else:
self.fail()
def test_get_utilization_with_empty(self):
try:
self.resource_client.get_utilization('')
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_merge_api_default_values(self):
resource = {'name': 'resource1'}
default_values = {
'200': {"type": "EnclosureGroupV200"},
'300': {"type": "EnclosureGroupV300"}
}
expected = {'name': 'resource1', "type": "EnclosureGroupV300"}
resource_client = ResourceClient(self.connection, self.URI)
result = resource_client.merge_default_values(resource, default_values)
self.assertEqual(result, expected)
def test_transform_list_to_dict(self):
list = ['one', 'two', {'tree': 3}, 'four', 5]
dict_transformed = transform_list_to_dict(list=list)
self.assertEqual(dict_transformed,
{'5': True,
'four': True,
'one': True,
'tree': 3,
'two': True})
| 42.21117 | 127 | 0.665727 |
5d72ba13d02bd291e6fb1bfd1e3d024e5c0779f3
| 316 |
py
|
Python
|
workalendar/usa/colorado.py
|
vanadium23/workalendar
|
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
|
[
"MIT"
] | null | null | null |
workalendar/usa/colorado.py
|
vanadium23/workalendar
|
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
|
[
"MIT"
] | null | null | null |
workalendar/usa/colorado.py
|
vanadium23/workalendar
|
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitedStates
| 26.333333 | 66 | 0.677215 |
5d72e502922a05a806cd65843fb08ea2947b5e7b
| 3,358 |
py
|
Python
|
dataloaders/augmentation.py
|
thierrypin/gei-pool
|
0a9e79b01148735f0e975c50d2476e41ba20af4f
|
[
"MIT"
] | null | null | null |
dataloaders/augmentation.py
|
thierrypin/gei-pool
|
0a9e79b01148735f0e975c50d2476e41ba20af4f
|
[
"MIT"
] | null | null | null |
dataloaders/augmentation.py
|
thierrypin/gei-pool
|
0a9e79b01148735f0e975c50d2476e41ba20af4f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import numpy as np
# Generic data augmentation
##########
# Images #
##########
def horizontal_flip(p=0.5):
return fc
def vertical_flip(p=0.5):
return fc
def gaussian_noise(p=0.5, mean=0, sigma=0.02):
return fc
def black_vstripe(p=0.5, size=10):
return fc
def black_hstripe(p=0.5, size=10):
return fc
def default_augmenter(p=0.5, strip_size=3, mean=0, sigma=0.02):
"""Default data augmentation with horizontal flip, vertical flip, gaussian noise, black hstripe, and black vstripe.
Returns:
Augmenter object. Use as: aug.augment(img)
"""
print("Using default image augmenter")
return Augmenter([ horizontal_flip(p), gaussian_noise(p, mean, sigma), black_hstripe(p, size=strip_size), black_vstripe(p, size=strip_size) ])
##########
# Videos #
##########
def default_augmenter_vid(p=0.5, strip_size=3, mean=0, sigma=0.02):
"""Default data augmentation with horizontal flip, gaussian noise, black hstripe, and black vstripe.
Returns:
Augmenter object. Use as: aug.augment(img)
"""
return Augmenter([ horizontal_flip_vid(p), gaussian_noise(p, mean, sigma), black_hstripe_vid(p, size=strip_size), black_vstripe_vid(p, size=strip_size) ])
| 25.24812 | 158 | 0.549136 |
5d72ea525d5fca207b00f29574de0ed2864d8b1b
| 7,229 |
py
|
Python
|
cigeo/admin.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 1 |
2019-05-26T22:24:01.000Z
|
2019-05-26T22:24:01.000Z
|
cigeo/admin.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 6 |
2019-01-22T14:53:43.000Z
|
2020-09-22T16:20:28.000Z
|
cigeo/admin.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.gis import geos
from leaflet.admin import LeafletGeoAdmin, LeafletGeoAdminMixin
from .models import Lau1
from .models import Nuts3
from .models import Airport
from .models import Road
from .models import PublicTransportStop
from .models import RailwayStation
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
import nested_admin
import uuid
import json
# Register your models here.
admin.site.register(Lau1, LAU1Admin)
admin.site.register(Nuts3, NUTS3Admin)
admin.site.register(Road, RoadAdmin)
admin.site.register(PublicTransportStop, PublicTransportStopAdmin)
admin.site.register(RailwayStation, RailwayStationAdmin)
admin.site.register(Airport, AirportAdmin)
| 30.761702 | 80 | 0.580025 |
5d730d1afb5f1402b6e9a016eacea8ab0f918612
| 858 |
py
|
Python
|
umbra/monitor/main.py
|
RafaelAPB/umbra
|
cf075bbe73e46540e9edee25f9ec3d0828620d5f
|
[
"Apache-2.0"
] | null | null | null |
umbra/monitor/main.py
|
RafaelAPB/umbra
|
cf075bbe73e46540e9edee25f9ec3d0828620d5f
|
[
"Apache-2.0"
] | null | null | null |
umbra/monitor/main.py
|
RafaelAPB/umbra
|
cf075bbe73e46540e9edee25f9ec3d0828620d5f
|
[
"Apache-2.0"
] | null | null | null |
import logging
import json
import asyncio
from google.protobuf import json_format
from umbra.common.protobuf.umbra_grpc import MonitorBase
from umbra.common.protobuf.umbra_pb2 import Instruction, Snapshot
from umbra.monitor.tools import Tools
logger = logging.getLogger(__name__)
logging.getLogger("hpack").setLevel(logging.WARNING)
| 31.777778 | 99 | 0.757576 |
5d733960d2eb830da7ca11bb10495536367425c3
| 6,207 |
py
|
Python
|
pycs/spells/hunters_mark.py
|
dwagon/pycs
|
4d02acbf380526d3bf0380f6bb8b757a827024b8
|
[
"MIT"
] | null | null | null |
pycs/spells/hunters_mark.py
|
dwagon/pycs
|
4d02acbf380526d3bf0380f6bb8b757a827024b8
|
[
"MIT"
] | null | null | null |
pycs/spells/hunters_mark.py
|
dwagon/pycs
|
4d02acbf380526d3bf0380f6bb8b757a827024b8
|
[
"MIT"
] | null | null | null |
"""https://www.dndbeyond.com/spells/hunters-mark"""
from unittest.mock import patch
import dice
from pycs.constant import ActionCategory
from pycs.constant import SpellType
from pycs.creature import Creature
from pycs.effect import Effect
from pycs.gear import Shortbow
from pycs.spell import SpellAction
from pycs.spells.spelltest import SpellTest
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
##############################################################################
# EOF
| 41.10596 | 85 | 0.475914 |
5d7378673807f7e0283f1553a575bc82a4166826
| 390 |
py
|
Python
|
utilities.py
|
armandok/pySLAM-D
|
ef7398806e021885b29702adf55acbedaf544ce6
|
[
"MIT"
] | 10 |
2020-12-24T16:40:46.000Z
|
2022-02-01T18:09:13.000Z
|
utilities.py
|
armandok/pySLAM-D
|
ef7398806e021885b29702adf55acbedaf544ce6
|
[
"MIT"
] | null | null | null |
utilities.py
|
armandok/pySLAM-D
|
ef7398806e021885b29702adf55acbedaf544ce6
|
[
"MIT"
] | null | null | null |
import numpy as np
| 30 | 92 | 0.694872 |
5d7393f1d5071cf9d02bab3da993f038421d4d57
| 1,175 |
py
|
Python
|
robosuite/models/grippers/__init__.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | 397 |
2020-09-28T02:49:58.000Z
|
2022-03-30T18:08:19.000Z
|
robosuite/models/grippers/__init__.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | 169 |
2020-09-28T02:17:59.000Z
|
2022-03-29T13:32:43.000Z
|
robosuite/models/grippers/__init__.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | 131 |
2020-09-28T14:50:35.000Z
|
2022-03-31T02:27:33.000Z
|
from .gripper_model import GripperModel
from .gripper_factory import gripper_factory
from .gripper_tester import GripperTester
from .panda_gripper import PandaGripper
from .rethink_gripper import RethinkGripper
from .robotiq_85_gripper import Robotiq85Gripper
from .robotiq_three_finger_gripper import RobotiqThreeFingerGripper, RobotiqThreeFingerDexterousGripper
from .panda_gripper import PandaGripper
from .jaco_three_finger_gripper import JacoThreeFingerGripper, JacoThreeFingerDexterousGripper
from .robotiq_140_gripper import Robotiq140Gripper
from .wiping_gripper import WipingGripper
from .null_gripper import NullGripper
GRIPPER_MAPPING = {
"RethinkGripper": RethinkGripper,
"PandaGripper": PandaGripper,
"JacoThreeFingerGripper": JacoThreeFingerGripper,
"JacoThreeFingerDexterousGripper": JacoThreeFingerDexterousGripper,
"WipingGripper": WipingGripper,
"Robotiq85Gripper": Robotiq85Gripper,
"Robotiq140Gripper": Robotiq140Gripper,
"RobotiqThreeFingerGripper": RobotiqThreeFingerGripper,
"RobotiqThreeFingerDexterousGripper": RobotiqThreeFingerDexterousGripper,
None: NullGripper,
}
ALL_GRIPPERS = GRIPPER_MAPPING.keys()
| 39.166667 | 103 | 0.846809 |
5d7596fcdc1125f69dea760f3f07ca8ccf07185d
| 7,509 |
py
|
Python
|
src/pose/visualizations/visualizations.py
|
Idein/chainer-hand-pose
|
45c7b629a74bf13da8cc9b47d0ded7099c139e9b
|
[
"Apache-2.0"
] | 11 |
2019-12-14T07:55:52.000Z
|
2021-06-22T06:38:34.000Z
|
src/pose/visualizations/visualizations.py
|
terasakisatoshi/chainer-hand-pose
|
a47e0c61c4fea3369db566eea3d539d1c9398bf7
|
[
"Apache-2.0"
] | 1 |
2020-06-17T21:39:48.000Z
|
2020-06-26T13:16:43.000Z
|
src/pose/visualizations/visualizations.py
|
terasakisatoshi/chainer-hand-pose
|
a47e0c61c4fea3369db566eea3d539d1c9398bf7
|
[
"Apache-2.0"
] | 3 |
2019-12-11T13:47:54.000Z
|
2020-10-23T07:10:15.000Z
|
import logging
logger = logging.getLogger(__name__)
import random
import chainercv
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
from pose.hand_dataset.geometry_utils import normalize_joint_zyx
from pose.hand_dataset.image_utils import normalize_depth
# Decimal Code (R,G,B)
BASE_COLOR = {
"RED": (255, 0, 0),
"GREEN": (0, 255, 0),
"BLUE": (0, 0, 255),
"YELLOW": (255, 255, 0),
"CYAN": (0, 255, 255),
"MAGENTA": (255, 0, 255),
}
def vis_image(img, ax=None):
"""
extend chainercv.visualizations.vis_image
"""
C, H, W = img.shape
if C == 1:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# remove channnel dimension
ax.imshow(img.squeeze())
else:
ax = chainercv.visualizations.vis_image(img, ax)
return ax
def vis_point(point, img=None, color=None, ax=None):
"""
Visualize points in an image, customized to our purpose.
Base implementation is taken from chainercv.visualizations.vis_image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
c = np.asarray(color) / 255. if color is not None else None
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x), (z,y,x).
# (K, N) -> (N, K)
pts = point[i].transpose() # (K,N) -> (N,K)
# resort coordinate order : yx -> xy or zyx -> xyz
pts = pts[::-1]
ax.scatter(*pts, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_edge(point, indices, img=None, color=None, ax=None):
"""
Visualize edges in an image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
if color is not None:
color = np.asarray(color) / 255.
else:
color = [None] * len(indices)
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x) or (z,y,x).
pts = point[i]
for ((s, t), c) in zip(indices, color):
# Select point which consists edge. It is a pair or point (start, target).
# Note that [::-1] does resort coordinate order: yx -> xy or zyx -> xyz
edge = pts[[s, t]].transpose()
edge = edge[::-1]
ax.plot(*edge, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
| 31.953191 | 96 | 0.632574 |
5d75bb550217d28f2cb95a0798b2a193f98c5dc4
| 190 |
py
|
Python
|
publication-erdf/flask_service.py
|
ticapix/automated-tasks
|
a0c73ad2939c6f1a2d91aea6fd309b5005455191
|
[
"Unlicense"
] | null | null | null |
publication-erdf/flask_service.py
|
ticapix/automated-tasks
|
a0c73ad2939c6f1a2d91aea6fd309b5005455191
|
[
"Unlicense"
] | null | null | null |
publication-erdf/flask_service.py
|
ticapix/automated-tasks
|
a0c73ad2939c6f1a2d91aea6fd309b5005455191
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
app.run()
| 14.615385 | 28 | 0.673684 |
536f43c7085300c239b6e7cb90378b2df33381ad
| 1,134 |
py
|
Python
|
tools/nn/speaker.py
|
mikiec84/speaking_detection
|
ed680138627c156e1f7b0af20d6517e2bea754cc
|
[
"MIT"
] | null | null | null |
tools/nn/speaker.py
|
mikiec84/speaking_detection
|
ed680138627c156e1f7b0af20d6517e2bea754cc
|
[
"MIT"
] | null | null | null |
tools/nn/speaker.py
|
mikiec84/speaking_detection
|
ed680138627c156e1f7b0af20d6517e2bea754cc
|
[
"MIT"
] | null | null | null |
import os
import skimage.io
from torch.nn import Module
import torch.nn
from torchvision.models import resnet18
from nn.speaker_dataset import Dataset # @UnusedImport
os.environ['TORCH_MODEL_ZOO'] = '../data/'
VIDTIMIT_PATH = '../data/vidtimit/'
skimage.io.use_plugin('pil')
| 23.625 | 74 | 0.613757 |
536ff8da70c0647265f2448d9db35e0d757a366c
| 1,551 |
py
|
Python
|
tensorflow_model_analysis/util_test.py
|
mdreves/model-analysis
|
73760b27b763e322a92ea80ff0a768ad9ef74526
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/util_test.py
|
mdreves/model-analysis
|
73760b27b763e322a92ea80ff0a768ad9ef74526
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/util_test.py
|
mdreves/model-analysis
|
73760b27b763e322a92ea80ff0a768ad9ef74526
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tests for util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_analysis import util
if __name__ == '__main__':
tf.test.main()
| 32.3125 | 74 | 0.691812 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.