hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
5d83ad6cff3db1b6e4ae4b1be6ce413b57641a09
9,537
py
Python
ics/mergeGatingSets.py
victorfica/utils
b61935a860838a0e70afde7c9ecf2c68f51a2c4b
[ "MIT" ]
5
2015-12-16T01:23:07.000Z
2020-04-27T11:41:43.000Z
ics/mergeGatingSets.py
victorfica/utils
b61935a860838a0e70afde7c9ecf2c68f51a2c4b
[ "MIT" ]
1
2021-05-06T23:47:20.000Z
2021-05-06T23:48:33.000Z
ics/mergeGatingSets.py
victorfica/utils
b61935a860838a0e70afde7c9ecf2c68f51a2c4b
[ "MIT" ]
6
2016-04-29T14:04:22.000Z
2021-05-06T23:49:34.000Z
#!/usr/bin/env python """ Usage examples: python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv sbatch -n 1 -t 3-0 -c 4 -o functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv" sbatch -n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv" sbatch -n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv" sbatch -n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv" python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv To delete all tmp files use: find . -name \merged_tmp*.feather -type f -delete """ if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Extract features and merge batches into one CSV.') parser.add_argument('--folder', type=str, help='Data folder containing all batch folders.', default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata') parser.add_argument('--function', type=str, help='Name of extraction to apply ("functions")', default='functions') parser.add_argument('--subsets', type=str, help='Filename listing subsets for analysis.', default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv') parser.add_argument('--out', type=str, help='Output filename for CSV.', default='merged_out.csv') parser.add_argument('--ncpus', type=int, help='Number of CPUs/cores to use for parallelization.', default=1) parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each batch.') parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one batch.') parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate metadata.') parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to CSV') parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com') args = parser.parse_args() try: import parmap from multiprocessing import Pool _PARMAP = True except: _PARMAP = False print('Could not find package "parmap", parallelization not enabled.') import itertools import pandas as pd import numpy as np from os.path import join as opj import os from functools import partial import time import sys import feather """Make sure the utils are on path before importing""" sys.path.append(args.utils) # from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples from ics import * if args.matchingonly: metaDf = testMatching(args.folder) metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out)) print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_' + args.out)) else: subsets, markers, functions, exclude = parseSubsets(args.subsets) features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=5)), 'bool_functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, mincells=0)), 'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets, functions=functions, markers=markers, compressions=[('ALL', 2), (['IFNg','IL2', 'TNFa'], 2)])), 'functions':(extractFunctionsGBY, dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)])), 'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))} extractionFunc, extractionKwargs = features[args.function] if args.testbatch: print('Test: processing samples from one batch') if args.testsamples: print('Test: processing two samples per batch') outFile = opj(args.folder, args.out) if args.feather: outFile = outFile.replace('.csv', '.feather') wrote = mergeBatches(args.folder, extractionFunc=extractionFunc, extractionKwargs=extractionKwargs, testsamples=args.testsamples, testbatch=args.testbatch, outFile=outFile, metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'], filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]}, useFeather=int(args.feather), ncpus=args.ncpus) if wrote == outFile: print('Wrote extracted data to %s.' % outFile) else: print('Error writing file to disk: %s' % wrote)
52.401099
284
0.554996
5d84d543ecd3e19e8c3f8af82dd1e6cc5ea16443
88
py
Python
meeting.py
zoni/ulauncher-meet
1b76627c69dfc539645acd27e30c9b8fd8fe08ae
[ "MIT" ]
4
2021-04-30T21:39:03.000Z
2021-08-30T11:38:21.000Z
meeting.py
zoni/ulauncher-meet
1b76627c69dfc539645acd27e30c9b8fd8fe08ae
[ "MIT" ]
1
2021-05-31T11:05:37.000Z
2021-05-31T11:05:37.000Z
meeting.py
zoni/ulauncher-meet
1b76627c69dfc539645acd27e30c9b8fd8fe08ae
[ "MIT" ]
2
2021-05-07T09:47:20.000Z
2021-08-25T16:27:24.000Z
from dataclasses import dataclass
11
33
0.715909
5d85c596f37801463f956fbf7ef5af170636decb
1,000
py
Python
setup.py
uuosio/uuosio.gscdk
a2e364d4499c1372567aa5933e2d8e02340a8385
[ "BSD-3-Clause" ]
6
2021-09-03T09:02:39.000Z
2022-01-12T06:31:09.000Z
setup.py
uuosio/uuosio.gscdk
a2e364d4499c1372567aa5933e2d8e02340a8385
[ "BSD-3-Clause" ]
1
2021-11-01T16:46:09.000Z
2021-11-04T12:51:45.000Z
setup.py
uuosio/uuosio.gscdk
a2e364d4499c1372567aa5933e2d8e02340a8385
[ "BSD-3-Clause" ]
2
2021-11-10T01:56:15.000Z
2022-01-13T14:27:31.000Z
import os import shutil import setuptools # from skbuild import setup from distutils.core import setup from distutils.sysconfig import get_python_lib import glob # if os.path.exists('pysrc/tinygo'): # shutil.rmtree('pysrc/tinygo') # shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo') release_files = [] for root, dirs, files in os.walk("pysrc/tinygo"): for f in files: release_files.append(os.path.join(root.replace('pysrc/', ''), f)) # print(release_files) setup( name="gscdk", version="0.3.5", description="Go Smart Contract Development Kit", author='The UUOSIO Team', license="BSD-3-Clause", url="https://github.com/uuosio/uuosio.gscdk", packages=['gscdk'], package_dir={'gscdk': 'pysrc'}, package_data={ # "": ["*"], 'gscdk': release_files, }, setup_requires=['wheel'] # scripts=['compiler/build/release/tinygo/bin/eosio-go'], # install_requires=[ # ], # include_package_data=True )
24.390244
77
0.658
5d861ae24ab41a343997586ea4f68f7cd661d4d3
301
py
Python
tests/data_creator_action.py
michalurbanski/bkgames
69b1d16ae27d3118dd78449ce7deecbd6e1b95e7
[ "MIT" ]
null
null
null
tests/data_creator_action.py
michalurbanski/bkgames
69b1d16ae27d3118dd78449ce7deecbd6e1b95e7
[ "MIT" ]
null
null
null
tests/data_creator_action.py
michalurbanski/bkgames
69b1d16ae27d3118dd78449ce7deecbd6e1b95e7
[ "MIT" ]
null
null
null
from typing import Callable
33.444444
104
0.750831
5d8772d2443bc37d077b4e1088b8652b560de433
387
py
Python
Python/Numpy/Min and Max/min_and_max.py
brianchiang-tw/HackerRank
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
[ "MIT" ]
2
2020-05-28T07:15:00.000Z
2020-07-21T08:34:06.000Z
Python/Numpy/Min and Max/min_and_max.py
brianchiang-tw/HackerRank
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
[ "MIT" ]
null
null
null
Python/Numpy/Min and Max/min_and_max.py
brianchiang-tw/HackerRank
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
[ "MIT" ]
null
null
null
import numpy as np if __name__ == '__main__': h, w = map( int, input().split() ) row_list = [] for i in range(h): single_row = list( map(int, input().split() ) ) np_row = np.array( single_row ) row_list.append( np_row ) min_of_each_row = np.min( row_list, axis = 1) max_of_min = np.max( min_of_each_row ) print( max_of_min )
15.48
56
0.573643
5d87b775f0d8dfc2c8f2bb9538693bb8aa0d1ec6
22,757
py
Python
allure/pytest_plugin.py
allure-framework/allure-pytest
d55180aaeb21233e7ca577ffc6f67a07837c63f2
[ "Apache-2.0" ]
112
2017-01-24T21:37:49.000Z
2022-03-25T22:32:12.000Z
venv/Lib/site-packages/allure/pytest_plugin.py
Arthii01052/conduit
3427d76d0fa364cb5d19bdd6da4aeb0a22fe9660
[ "MIT" ]
56
2017-01-21T20:01:41.000Z
2019-01-14T13:35:53.000Z
venv/Lib/site-packages/allure/pytest_plugin.py
Arthii01052/conduit
3427d76d0fa364cb5d19bdd6da4aeb0a22fe9660
[ "MIT" ]
52
2017-01-23T13:40:40.000Z
2022-03-30T00:02:31.000Z
import uuid import pickle import pytest import argparse from collections import namedtuple from six import text_type from allure.common import AllureImpl, StepContext from allure.constants import Status, AttachmentType, Severity, \ FAILED_STATUSES, Label, SKIPPED_STATUSES from allure.utils import parent_module, parent_down_from_module, labels_of, \ all_of, get_exception_message, now, mangle_testnames from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel MASTER_HELPER = AllureHelper() CollectFail = namedtuple('CollectFail', 'name status message trace')
39.168675
183
0.572483
5d88a4a57aa7fe412e25b74cc37254832f74121b
1,113
py
Python
treenode/debug.py
domlysi/django-treenode
86e7c76e2b2d60c071cfce6ad1493b2b51f2d304
[ "MIT" ]
null
null
null
treenode/debug.py
domlysi/django-treenode
86e7c76e2b2d60c071cfce6ad1493b2b51f2d304
[ "MIT" ]
null
null
null
treenode/debug.py
domlysi/django-treenode
86e7c76e2b2d60c071cfce6ad1493b2b51f2d304
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from django.conf import settings from django.db import connection import logging import timeit logger = logging.getLogger(__name__)
26.5
74
0.637017
5d8a580c6383e0fa95c751c549eb6cc5184e491f
3,705
py
Python
String_tool.py
vibhorvk/BlendString
3bf62083716b3b1f4976abeb3528771eeb79e2cf
[ "MIT" ]
null
null
null
String_tool.py
vibhorvk/BlendString
3bf62083716b3b1f4976abeb3528771eeb79e2cf
[ "MIT" ]
null
null
null
String_tool.py
vibhorvk/BlendString
3bf62083716b3b1f4976abeb3528771eeb79e2cf
[ "MIT" ]
null
null
null
bl_info = { "name": "STRING", "blender": (2, 80, 0), "category": "Object", 'Author' : 'Vibhor Gupta' } import bpy import bmesh def register(): bpy.utils.register_class(STRING) def unregister(): bpy.utils.unregister_class(STRING) # This allows you to run the script directly from Blender's Text editor # to test the add-on without having to install it. if __name__ == "__main__": register()
29.879032
107
0.515789
5d8b272fb8d2699d2cf3ea1fd7de71c67f398d16
1,130
py
Python
Code/DataHandlers/__init__.py
aricsanders/pyMez3
13e2b9900af2287db0cc42a0190d31da165ce174
[ "Unlicense" ]
2
2017-01-29T00:46:01.000Z
2017-07-25T17:23:04.000Z
Code/DataHandlers/__init__.py
aricsanders/pyMez3
13e2b9900af2287db0cc42a0190d31da165ce174
[ "Unlicense" ]
1
2019-08-02T03:59:41.000Z
2019-08-02T03:59:41.000Z
Code/DataHandlers/__init__.py
aricsanders/pyMez3
13e2b9900af2287db0cc42a0190d31da165ce174
[ "Unlicense" ]
null
null
null
""" The DataHandlers subpackage is designed to manipulate data, by allowing different data types to be opened, created, saved and updated. The subpackage is further divided into modules grouped by a common theme. Classes for data that are already on disk normally follows the following pattern: `instance=ClassName(file_path,**options)` For Example to open a XML file that you don't know the model, use `xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')' or `xml=XMLBase('MyXML.xml')` All data models normally have save(), str() and if appropriate show() methods. Examples -------- <a href="../../../Examples/How_To_Open_S2p.html"> How to open a s2p file </a> Import Structure ---------------- DataHandlers typically import from Utils but __NOT__ from Analysis, InstrumentControl or FrontEnds Help ----- <a href="../index.html">`pyMez.Code`</a> <div> <a href="../../../pyMez_Documentation.html">Documentation Home</a> | <a href="../../index.html">API Documentation Home</a> | <a href="../../../Examples/html/Examples_Home.html">Examples</a> | <a href="../../../Reference_Index.html">Index </a> </div> """
25.681818
118
0.70531
5d8ca67bf97053442519e2fbb8cf359ecbb33654
228
py
Python
djangomail/backends/dummy.py
somenzz/djangomail
7d4f833cd71289a51eb935757d8b628e9c9f8aa1
[ "MIT" ]
1
2021-06-01T07:51:18.000Z
2021-06-01T07:51:18.000Z
djangomail/backends/dummy.py
somenzz/djangomail
7d4f833cd71289a51eb935757d8b628e9c9f8aa1
[ "MIT" ]
null
null
null
djangomail/backends/dummy.py
somenzz/djangomail
7d4f833cd71289a51eb935757d8b628e9c9f8aa1
[ "MIT" ]
1
2022-01-24T13:38:14.000Z
2022-01-24T13:38:14.000Z
""" Dummy email backend that does nothing. """ from djangomail.backends.base import BaseEmailBackend
20.727273
53
0.763158
5d8ccdbb74a4a8ff8a56e579b885b0bbd0743a4f
7,666
py
Python
awx/plugins/library/scan_services.py
Avinesh/awx
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
[ "Apache-2.0" ]
17
2021-04-03T01:40:17.000Z
2022-03-03T11:45:20.000Z
awx/plugins/library/scan_services.py
Avinesh/awx
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
[ "Apache-2.0" ]
24
2021-05-18T21:13:35.000Z
2022-03-29T10:23:52.000Z
awx/plugins/library/scan_services.py
Avinesh/awx
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
[ "Apache-2.0" ]
14
2021-04-06T20:05:41.000Z
2022-03-24T14:16:03.000Z
#!/usr/bin/env python import re from ansible.module_utils.basic import * # noqa DOCUMENTATION = ''' --- module: scan_services short_description: Return service state information as fact data description: - Return service state information as fact data for various service management utilities version_added: "1.9" options: requirements: [ ] author: Matthew Jones ''' EXAMPLES = ''' - monit: scan_services # Example fact output: # host | success >> { # "ansible_facts": { # "services": { # "network": { # "source": "sysv", # "state": "running", # "name": "network" # }, # "arp-ethers.service": { # "source": "systemd", # "state": "stopped", # "name": "arp-ethers.service" # } # } # } ''' def main(): module = AnsibleModule(argument_spec = dict()) # noqa service_modules = (ServiceScanService, SystemctlScanService) all_services = {} incomplete_warning = False for svc_module in service_modules: svcmod = svc_module(module) svc = svcmod.gather_services() if svc is not None: all_services.update(svc) if svcmod.incomplete_warning: incomplete_warning = True if len(all_services) == 0: results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.") else: results = dict(ansible_facts=dict(services=all_services)) if incomplete_warning: results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges." module.exit_json(**results) main()
40.136126
155
0.530655
5d8cdce66649554dda1ee6deb1afd812b2f3ebbf
2,146
py
Python
app.py
duckm8795/runscope-circleci
2fd42e64bddb4b8f34c437c2d834b92369c9a2bf
[ "Apache-2.0" ]
null
null
null
app.py
duckm8795/runscope-circleci
2fd42e64bddb4b8f34c437c2d834b92369c9a2bf
[ "Apache-2.0" ]
null
null
null
app.py
duckm8795/runscope-circleci
2fd42e64bddb4b8f34c437c2d834b92369c9a2bf
[ "Apache-2.0" ]
null
null
null
import requests import sys import time import os if __name__ == '__main__': main()
31.101449
154
0.605312
5d8dbbff6df38e6773044260538db7a759525964
16,585
py
Python
spyse/client.py
fabaff/spyse-python
f286514ac052ebe6fa98f877d251d8f3cd4db1c4
[ "MIT" ]
9
2021-07-28T11:59:07.000Z
2022-02-17T02:25:06.000Z
spyse/client.py
fabaff/spyse-python
f286514ac052ebe6fa98f877d251d8f3cd4db1c4
[ "MIT" ]
2
2021-11-27T02:03:03.000Z
2022-02-02T11:33:34.000Z
spyse/client.py
fabaff/spyse-python
f286514ac052ebe6fa98f877d251d8f3cd4db1c4
[ "MIT" ]
7
2021-08-05T04:02:09.000Z
2022-03-04T14:11:04.000Z
import requests from typing import List, Optional from .models import AS, Domain, IP, CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord from .response import Response from .search_query import SearchQuery from limiter import get_limiter, limit
42.308673
117
0.662466
5d8e72c2a2b92c4afc6d55b1c762592baf4c02a2
147
py
Python
talleres_inov_docente/figures/plot_helpers.py
jfcaballero/Tutorial-sobre-scikit-learn-abreviado
1e2aa1f9132c277162135a5463068801edab8d15
[ "CC0-1.0" ]
576
2016-03-20T10:05:58.000Z
2022-03-20T05:58:32.000Z
talleres_inov_docente/figures/plot_helpers.py
jfcaballero/Tutorial-sobre-scikit-learn-abreviado
1e2aa1f9132c277162135a5463068801edab8d15
[ "CC0-1.0" ]
64
2016-03-20T08:56:49.000Z
2019-03-13T15:37:55.000Z
talleres_inov_docente/figures/plot_helpers.py
jfcaballero/Tutorial-sobre-scikit-learn-abreviado
1e2aa1f9132c277162135a5463068801edab8d15
[ "CC0-1.0" ]
570
2016-03-20T19:23:07.000Z
2021-12-12T12:22:14.000Z
from matplotlib.colors import ListedColormap cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50']) cm2 = ListedColormap(['#0000aa', '#ff2020'])
29.4
55
0.707483
5d8e73966f8c10521ed6c383e9307e6cc3d33a3d
612
py
Python
Applications/FlaskApp/errorpages.py
cemac-ccs/FlaskMWE
e8ce3cbca0d402bd9fdb1feb10290f2e7b11907b
[ "MIT" ]
null
null
null
Applications/FlaskApp/errorpages.py
cemac-ccs/FlaskMWE
e8ce3cbca0d402bd9fdb1feb10290f2e7b11907b
[ "MIT" ]
null
null
null
Applications/FlaskApp/errorpages.py
cemac-ccs/FlaskMWE
e8ce3cbca0d402bd9fdb1feb10290f2e7b11907b
[ "MIT" ]
null
null
null
from flask import render_template # Error Pages ----------------------------------------------------------------
27.818182
78
0.637255
5d8e9f525045331a16efdc9df5a7b8042480b89c
1,118
py
Python
cppgym/ToyText/BlackJack.py
anhydrous99/cppgym
0b1009a74faebfe5a31bcfd6a86c74cf13464d56
[ "MIT" ]
null
null
null
cppgym/ToyText/BlackJack.py
anhydrous99/cppgym
0b1009a74faebfe5a31bcfd6a86c74cf13464d56
[ "MIT" ]
1
2021-01-03T10:21:36.000Z
2021-01-26T03:59:07.000Z
cppgym/ToyText/BlackJack.py
anhydrous99/cppgym
0b1009a74faebfe5a31bcfd6a86c74cf13464d56
[ "MIT" ]
null
null
null
from .._BlackJack import BlackJackCPP import gym import ctypes import numpy as np from gym import spaces
26.619048
53
0.593918
5d8f21c23f88e0ce2aa150c385f666597b203749
5,827
py
Python
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
ajothomas/beam
4774c1caf3dac3b6a7dd161f82559a26fa380920
[ "Apache-2.0", "BSD-3-Clause" ]
5
2019-07-27T11:54:33.000Z
2021-06-06T11:53:36.000Z
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
ajothomas/beam
4774c1caf3dac3b6a7dd161f82559a26fa380920
[ "Apache-2.0", "BSD-3-Clause" ]
12
2019-04-15T15:27:23.000Z
2019-07-01T18:13:10.000Z
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
ajothomas/beam
4774c1caf3dac3b6a7dd161f82559a26fa380920
[ "Apache-2.0", "BSD-3-Clause" ]
1
2021-06-03T19:54:48.000Z
2021-06-03T19:54:48.000Z
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for consumer_tracking_pipeline_visitor.""" # pytype: skip-file import logging import unittest import apache_beam as beam from apache_beam import pvalue from apache_beam.pipeline import Pipeline from apache_beam.pvalue import AsList from apache_beam.runners.direct import DirectRunner from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor from apache_beam.transforms import CoGroupByKey from apache_beam.transforms import Create from apache_beam.transforms import DoFn from apache_beam.transforms import FlatMap from apache_beam.transforms import Flatten from apache_beam.transforms import ParDo # Disable frequent lint warning due to pipe operator for chaining transforms. # pylint: disable=expression-not-assigned # pylint: disable=pointless-statement if __name__ == '__main__': logging.getLogger().setLevel(logging.DEBUG) unittest.main()
35.530488
105
0.727476
5d8fd5fa2bcd3f5669762aabbd18717b761f3d16
30,184
py
Python
gluon/main.py
scudette/rekall-agent-server
e553f1ae5279f75a8f5b0c0c4847766b60ed86eb
[ "BSD-3-Clause" ]
21
2018-02-16T17:43:59.000Z
2021-12-29T12:08:28.000Z
gluon/main.py
scudette/rekall-agent-server
e553f1ae5279f75a8f5b0c0c4847766b60ed86eb
[ "BSD-3-Clause" ]
12
2017-11-01T14:54:29.000Z
2018-02-01T22:02:12.000Z
gluon/main.py
scudette/rekall-agent-server
e553f1ae5279f75a8f5b0c0c4847766b60ed86eb
[ "BSD-3-Clause" ]
8
2018-10-08T03:48:00.000Z
2022-03-31T12:13:01.000Z
#!/bin/env python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <[email protected]> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The gluon wsgi application --------------------------- """ from __future__ import print_function if False: import import_all # DO NOT REMOVE PART OF FREEZE PROCESS import gc import os import re import copy import sys import time import datetime import signal import socket import random import string from gluon._compat import Cookie, urllib2 #from thread import allocate_lock from gluon.fileutils import abspath, write_file from gluon.settings import global_settings from gluon.utils import web2py_uuid from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders from gluon.globals import current # Remarks: # calling script has inserted path to script directory into sys.path # applications_parent (path to applications/, site-packages/ etc) # defaults to that directory set sys.path to # ("", gluon_parent/site-packages, gluon_parent, ...) # # this is wrong: # web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # because we do not want the path to this file which may be Library.zip # gluon_parent is the directory containing gluon, web2py.py, logging.conf # and the handlers. # applications_parent (web2py_path) is the directory containing applications/ # and routes.py # The two are identical unless web2py_path is changed via the web2py.py -f folder option # main.web2py_path is the same as applications_parent (for backward compatibility) web2py_path = global_settings.applications_parent # backward compatibility create_missing_folders() # set up logging for subsequent imports import logging import logging.config # This needed to prevent exception on Python 2.5: # NameError: name 'gluon' is not defined # See http://bugs.python.org/issue1436 # attention!, the import Tkinter in messageboxhandler, changes locale ... import gluon.messageboxhandler logging.gluon = gluon # so we must restore it! Thanks ozancag import locale locale.setlocale(locale.LC_CTYPE, "C") # IMPORTANT, web2py requires locale "C" exists = os.path.exists pjoin = os.path.join try: logging.config.fileConfig(abspath("logging.conf")) except: # fails on GAE or when logfile is missing logging.basicConfig() logger = logging.getLogger("web2py") from gluon.restricted import RestrictedError from gluon.http import HTTP, redirect from gluon.globals import Request, Response, Session from gluon.compileapp import build_environment, run_models_in, \ run_controller_in, run_view_in from gluon.contenttype import contenttype from pydal.base import BaseAdapter from gluon.validators import CRYPT from gluon.html import URL, xmlescape from gluon.utils import is_valid_ip_address, getipaddrinfo from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, \ try_rewrite_on_error, fixup_missing_path_info from gluon import newcron __all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] requests = 0 # gc timer # Security Checks: validate URL and session_id here, # accept_language is validated in languages # pattern used to validate client address regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6 try: version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r') raw_version_string = version_info.read().split()[-1].strip() version_info.close() global_settings.web2py_version = raw_version_string web2py_version = global_settings.web2py_version except: raise RuntimeError("Cannot determine web2py version") try: from gluon import rocket except: if not global_settings.web2py_runtime_gae: logger.warn('unable to import Rocket') load_routes() HTTPS_SCHEMES = set(('https', 'HTTPS')) def get_client(env): """ Guesses the client address from the environment variables First tries 'http_x_forwarded_for', secondly 'remote_addr' if all fails, assume '127.0.0.1' or '::1' (running locally) """ eget = env.get g = regex_client.search(eget('http_x_forwarded_for', '')) client = (g.group() or '').split(',')[0] if g else None if client in (None, '', 'unknown'): g = regex_client.search(eget('remote_addr', '')) if g: client = g.group() elif env.http_host.startswith('['): # IPv6 client = '::1' else: client = '127.0.0.1' # IPv4 if not is_valid_ip_address(client): raise HTTP(400, "Bad Request (request.client=%s)" % client) return client def serve_controller(request, response, session): """ This function is used to generate a dynamic page. It first runs all models, then runs the function in the controller, and then tries to render the output using a view/template. this function must run from the [application] folder. A typical example would be the call to the url /[application]/[controller]/[function] that would result in a call to [function]() in applications/[application]/[controller].py rendered by applications/[application]/views/[controller]/[function].html """ # ################################################## # build environment for controller and view # ################################################## environment = build_environment(request, response, session) # set default view, controller can override it response.view = '%s/%s.%s' % (request.controller, request.function, request.extension) # also, make sure the flash is passed through # ################################################## # process models, controller and view (if required) # ################################################## run_models_in(environment) response._view_environment = copy.copy(environment) page = run_controller_in(request.controller, request.function, environment) if isinstance(page, dict): response._vars = page response._view_environment.update(page) page = run_view_in(response._view_environment) # logic to garbage collect after exec, not always, once every 100 requests global requests requests = ('requests' in globals()) and (requests + 1) % 100 or 0 if not requests: gc.collect() # end garbage collection logic # ################################################## # set default headers it not set # ################################################## default_headers = [ ('Content-Type', contenttype('.' + request.extension)), ('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'), ('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())), ('Pragma', 'no-cache')] for key, value in default_headers: response.headers.setdefault(key, value) raise HTTP(response.status, page, **response.headers) def wsgibase(environ, responder): """ The gluon wsgi application. The first function called when a page is requested (static or dynamic). It can be called by paste.httpserver or by apache mod_wsgi (or any WSGI-compatible server). - fills request with info - the environment variables, replacing '.' with '_' - adds web2py path and version info - compensates for fcgi missing path_info and query_string - validates the path in url The url path must be either: 1. for static pages: - /<application>/static/<file> 2. for dynamic pages: - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] The naming conventions are: - application, controller, function and extension may only contain `[a-zA-Z0-9_]` - file and sub may also contain '-', '=', '.' and '/' """ eget = environ.get current.__dict__.clear() request = Request(environ) response = Response() session = Session() env = request.env #env.web2py_path = global_settings.applications_parent env.web2py_version = web2py_version #env.update(global_settings) static_file = False http_response = None try: try: try: # ################################################## # handle fcgi missing path_info and query_string # select rewrite parameters # rewrite incoming URL # parse rewritten header variables # parse rewritten URL # serve file if static # ################################################## fixup_missing_path_info(environ) (static_file, version, environ) = url_in(request, environ) response.status = env.web2py_status_code or response.status if static_file: if eget('QUERY_STRING', '').startswith('attachment'): response.headers['Content-Disposition'] \ = 'attachment' if version: response.headers['Cache-Control'] = 'max-age=315360000' response.headers[ 'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT' response.stream(static_file, request=request) # ################################################## # fill in request items # ################################################## app = request.application # must go after url_in! if not global_settings.local_hosts: local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1']) if not global_settings.web2py_runtime_gae: try: fqdn = socket.getfqdn() local_hosts.add(socket.gethostname()) local_hosts.add(fqdn) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(fqdn)]) if env.server_name: local_hosts.add(env.server_name) local_hosts.update([ addrinfo[4][0] for addrinfo in getipaddrinfo(env.server_name)]) except (socket.gaierror, TypeError): pass global_settings.local_hosts = list(local_hosts) else: local_hosts = global_settings.local_hosts client = get_client(env) x_req_with = str(env.http_x_requested_with).lower() cmd_opts = global_settings.cmd_options request.update( client = client, folder = abspath('applications', app) + os.sep, ajax = x_req_with == 'xmlhttprequest', cid = env.http_web2py_component_element, is_local = (env.remote_addr in local_hosts and client == env.remote_addr), is_shell = False, is_scheduler = False, is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \ request.env.http_x_forwarded_proto in HTTPS_SCHEMES \ or env.https == 'on' ) request.url = environ['PATH_INFO'] # ################################################## # access the requested application # ################################################## disabled = pjoin(request.folder, 'DISABLED') if not exists(request.folder): if app == rwthread.routes.default_application \ and app != 'welcome': redirect(URL('welcome', 'default', 'index')) elif rwthread.routes.error_handler: _handler = rwthread.routes.error_handler redirect(URL(_handler['application'], _handler['controller'], _handler['function'], args=app)) else: raise HTTP(404, rwthread.routes.error_message % 'invalid request', web2py_error='invalid application') elif not request.is_local and exists(disabled): five0three = os.path.join(request.folder,'static','503.html') if os.path.exists(five0three): raise HTTP(503, file(five0three, 'r').read()) else: raise HTTP(503, "<html><body><h1>Temporarily down for maintenance</h1></body></html>") # ################################################## # build missing folders # ################################################## create_missing_app_folders(request) # ################################################## # get the GET and POST data # ################################################## #parse_get_post_vars(request, environ) # ################################################## # expose wsgi hooks for convenience # ################################################## request.wsgi = LazyWSGI(environ, request, response) # ################################################## # load cookies # ################################################## if env.http_cookie: for single_cookie in env.http_cookie.split(';'): single_cookie = single_cookie.strip() if single_cookie: try: request.cookies.load(single_cookie) except Cookie.CookieError: pass # single invalid cookie ignore # ################################################## # try load session or create new session file # ################################################## if not env.web2py_disable_session: session.connect(request, response) # ################################################## # run controller # ################################################## if global_settings.debugging and app != "admin": import gluon.debug # activate the debugger gluon.debug.dbg.do_debug(mainpyfile=request.folder) serve_controller(request, response, session) except HTTP as hr: http_response = hr if static_file: return http_response.to(responder, env=env) if request.body: request.body.close() if hasattr(current, 'request'): # ################################################## # on success, try store session in database # ################################################## if not env.web2py_disable_session: session._try_store_in_db(request, response) # ################################################## # on success, commit database # ################################################## if response.do_not_commit is True: BaseAdapter.close_all_instances(None) elif response.custom_commit: BaseAdapter.close_all_instances(response.custom_commit) else: BaseAdapter.close_all_instances('commit') # ################################################## # if session not in db try store session on filesystem # this must be done after trying to commit database! # ################################################## if not env.web2py_disable_session: session._try_store_in_cookie_or_file(request, response) # Set header so client can distinguish component requests. if request.cid: http_response.headers.setdefault( 'web2py-component-content', 'replace') if request.ajax: if response.flash: http_response.headers['web2py-component-flash'] = \ urllib2.quote(xmlescape(response.flash).replace(b'\n', b'')) if response.js: http_response.headers['web2py-component-command'] = \ urllib2.quote(response.js.replace('\n', '')) # ################################################## # store cookies in headers # ################################################## session._fixup_before_save() http_response.cookies2headers(response.cookies) ticket = None except RestrictedError as e: if request.body: request.body.close() # ################################################## # on application error, rollback database # ################################################## # log tickets before rollback if not in DB if not request.tickets_db: ticket = e.log(request) or 'unknown' # rollback if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') # if tickets in db, reconnect and store it in db if request.tickets_db: ticket = e.log(request) or 'unknown' http_response = \ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) except: if request.body: request.body.close() # ################################################## # on application error, rollback database # ################################################## try: if response._custom_rollback: response._custom_rollback() else: BaseAdapter.close_all_instances('rollback') except: pass e = RestrictedError('Framework', '', '', locals()) ticket = e.log(request) or 'unrecoverable' http_response = \ HTTP(500, rwthread.routes.error_message_ticket % dict(ticket=ticket), web2py_error='ticket %s' % ticket) finally: if response and hasattr(response, 'session_file') \ and response.session_file: response.session_file.close() session._unlock(response) http_response, new_environ = try_rewrite_on_error( http_response, request, environ, ticket) if not http_response: return wsgibase(new_environ, responder) if global_settings.web2py_crontype == 'soft': newcron.softcron(global_settings.applications_parent).start() return http_response.to(responder, env=env) def save_password(password, port): """ Used by main() to save the password in the parameters_port.py file. """ password_file = abspath('parameters_%i.py' % port) if password == '<random>': # make up a new password chars = string.letters + string.digits password = ''.join([random.choice(chars) for _ in range(8)]) cpassword = CRYPT()(password)[0] print('******************* IMPORTANT!!! ************************') print('your admin password is "%s"' % password) print('*********************************************************') elif password == '<recycle>': # reuse the current password if any if exists(password_file): return else: password = '' elif password.startswith('<pam_user:'): # use the pam password for specified user cpassword = password[1:-1] else: # use provided password cpassword = CRYPT()(password)[0] fp = open(password_file, 'w') if password: fp.write('password="%s"\n' % cpassword) else: fp.write('password=None\n') fp.close() def appfactory(wsgiapp=wsgibase, logfilename='httpserver.log', profiler_dir=None, profilerfilename=None): """ generates a wsgi application that does logging and profiling and calls wsgibase Args: wsgiapp: the base application logfilename: where to store apache-compatible requests log profiler_dir: where to store profile files """ if profilerfilename is not None: raise BaseException("Deprecated API") if profiler_dir: profiler_dir = abspath(profiler_dir) logger.warn('profiler is on. will use dir %s', profiler_dir) if not os.path.isdir(profiler_dir): try: os.makedirs(profiler_dir) except: raise BaseException("Can't create dir %s" % profiler_dir) filepath = pjoin(profiler_dir, 'wtest') try: filehandle = open( filepath, 'w' ) filehandle.close() os.unlink(filepath) except IOError: raise BaseException("Unable to write to dir %s" % profiler_dir) def app_with_logging(environ, responder): """ a wsgi app that does logging and profiling and calls wsgibase """ status_headers = [] def responder2(s, h): """ wsgi responder app """ status_headers.append(s) status_headers.append(h) return responder(s, h) time_in = time.time() ret = [0] if not profiler_dir: ret[0] = wsgiapp(environ, responder2) else: import cProfile prof = cProfile.Profile() prof.enable() ret[0] = wsgiapp(environ, responder2) prof.disable() destfile = pjoin(profiler_dir, "req_%s.prof" % web2py_uuid()) prof.dump_stats(destfile) try: line = '%s, %s, %s, %s, %s, %s, %f\n' % ( environ['REMOTE_ADDR'], datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), environ['REQUEST_METHOD'], environ['PATH_INFO'].replace(',', '%2C'), environ['SERVER_PROTOCOL'], (status_headers[0])[:3], time.time() - time_in, ) if not logfilename: sys.stdout.write(line) elif isinstance(logfilename, str): write_file(logfilename, line, 'a') else: logfilename.write(line) except: pass return ret[0] return app_with_logging
37.919598
117
0.52286
5d90667a56cb7c978d1072b2a27a14dbab5c4dfc
3,798
py
Python
agents/EWPublisherAgent.py
marc4gov/tokenspice2
1993383674f35b20e11e54606b3dac8e4c05c0f9
[ "Apache-2.0" ]
1
2021-01-12T08:06:21.000Z
2021-01-12T08:06:21.000Z
agents/EWPublisherAgent.py
marc4gov/tokenspice2
1993383674f35b20e11e54606b3dac8e4c05c0f9
[ "Apache-2.0" ]
null
null
null
agents/EWPublisherAgent.py
marc4gov/tokenspice2
1993383674f35b20e11e54606b3dac8e4c05c0f9
[ "Apache-2.0" ]
null
null
null
import logging log = logging.getLogger('marketagents') from enforce_typing import enforce_types # type: ignore[import] import random from agents.PublisherAgent import PublisherAgent from agents.PoolAgent import PoolAgent from util import constants from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens from web3tools.web3util import toBase18
37.98
80
0.662191
5d922c20991f9207f7464898983a068c11cac2a8
1,625
py
Python
lcls_orbit/__init__.py
slaclab/lcls-orbit
e2b8738c4af2dfed40fce4b898bf9b2a820d5f56
[ "BSD-3-Clause-LBNL" ]
null
null
null
lcls_orbit/__init__.py
slaclab/lcls-orbit
e2b8738c4af2dfed40fce4b898bf9b2a820d5f56
[ "BSD-3-Clause-LBNL" ]
null
null
null
lcls_orbit/__init__.py
slaclab/lcls-orbit
e2b8738c4af2dfed40fce4b898bf9b2a820d5f56
[ "BSD-3-Clause-LBNL" ]
1
2021-11-16T01:03:53.000Z
2021-11-16T01:03:53.000Z
import numpy as np from . import _version __version__ = _version.get_versions()['version'] HXR_COLORS = ("#000000", "#02004a", "#030069", "#04008f", "#0500b3", "#0700ff") SXR_COLORS = ("#000000", "#330000", "#520000", "#850000", "#ad0000", "#ff0000") HXR_AREAS = { "GUN" : [2017.911, 2018.712], "L0" : [2018.712, 2024.791], "DL1_1": [2024.791, 2031.992], "DL1_2": [2031.992, 2035.035], "L1": [2035.035, 2044.167], "BC1": [2044.167, 2059.733], "L2": [2059.733, 2410.698], "BC2": [2410.698, 2438.400], "L3": [2438.400, 3042.005], "CLTH_0": [3042.005, 3050.512], "CLTH_1": [3050.512, 3058.457], "CLTH_2": [3058.457, 3110.961], "BSYH_1": [3110.961, 3117.409], "BSYH_2": [3117.409, 3224.022], "LTUH": [3224.022, 3562.739], "UNDH": [3562.739, 3718.483], "DMPH_1": [3718.483, 3734.407], "DMPH_2": [3734.407, 3765.481] } HXR_AREAS = {np.mean(value): key for key, value in HXR_AREAS.items()} SXR_AREAS = { "GUN" : [2017.911, 2017.911], "L0" : [2018.712, 2024.791], "DL1_1": [2024.791, 2031.992], "DL1_2": [2031.992, 2035.035], "L1": [2035.035, 2044.167], "BC1": [2044.167, 2059.733], "L2": [2059.733, 2410.698], "BC2": [2410.698, 2438.400], "L3": [2438.400, 3042.005], "CLTH_0": [3042.005, 3050.512], "CLTH_1": [3050.512, 3058.457], "CLTS": [3177.650, 3224.022], "BSYS": [3224.022, 3565.656], "LTUS": [3565.656, 3718.483], "UNDS": [3718.483, 3734.407], "DMPS_1": [3734.407, 3734.407], "DMPS_2": [3734.407, 3765.481] } SXR_AREAS = {np.mean(value): key for key, value in SXR_AREAS.items()}
30.660377
79
0.563077
5d930df4a535163668fcaae7a75a25d2de903db1
13,123
py
Python
tests/test_optimizers_v2/test_optimizers_v2.py
OverLordGoldDragon/dummy
5192b91c57721f37b906f670ad954a46f98bf5b5
[ "MIT" ]
null
null
null
tests/test_optimizers_v2/test_optimizers_v2.py
OverLordGoldDragon/dummy
5192b91c57721f37b906f670ad954a46f98bf5b5
[ "MIT" ]
null
null
null
tests/test_optimizers_v2/test_optimizers_v2.py
OverLordGoldDragon/dummy
5192b91c57721f37b906f670ad954a46f98bf5b5
[ "MIT" ]
null
null
null
import os import tempfile import numpy as np import tensorflow as tf from time import time from termcolor import cprint from unittest import TestCase from .. import K from .. import Input, Dense, GRU, Bidirectional, Embedding from .. import Model, load_model from .. import l2 from .. import maxnorm from .. import Adam, Nadam, SGD from .. import AdamW, NadamW, SGDW from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval print("TF version: %s" % tf.__version__) tf_eager = bool(os.environ["TF_EAGER"] == "True") if tf_eager: print("TF running eagerly") else: tf.compat.v1.disable_eager_execution() print("TF running in graph mode")
44.334459
83
0.586375
5d93683480faf496a5e564f3b162607a289a4f92
21,601
py
Python
koku/reporting/migrations/0099_ocp_performance.py
Vasyka/koku
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
[ "Apache-2.0" ]
2
2022-01-12T03:42:39.000Z
2022-01-12T03:42:40.000Z
koku/reporting/migrations/0099_ocp_performance.py
Vasyka/koku
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
[ "Apache-2.0" ]
null
null
null
koku/reporting/migrations/0099_ocp_performance.py
Vasyka/koku
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
[ "Apache-2.0" ]
1
2021-07-21T09:33:59.000Z
2021-07-21T09:33:59.000Z
# Generated by Django 2.2.10 on 2020-02-18 12:51 import django.contrib.postgres.indexes from django.db import migrations from django.db import models
51.800959
173
0.734781
5d942ae26a14855b18361770889fe0b68867154b
1,433
py
Python
sympy/tensor/tests/test_functions.py
iamabhishek0/sympy
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
[ "BSD-3-Clause" ]
8,323
2015-01-02T15:51:43.000Z
2022-03-31T13:13:19.000Z
sympy/tensor/tests/test_functions.py
iamabhishek0/sympy
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
[ "BSD-3-Clause" ]
15,102
2015-01-01T01:33:17.000Z
2022-03-31T22:53:13.000Z
sympy/tensor/tests/test_functions.py
iamabhishek0/sympy
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
[ "BSD-3-Clause" ]
4,490
2015-01-01T17:48:07.000Z
2022-03-31T17:24:05.000Z
from sympy.tensor.functions import TensorProduct from sympy import MatrixSymbol, Matrix, Array from sympy.abc import x, y, z from sympy.abc import i, j, k, l A = MatrixSymbol("A", 3, 3) B = MatrixSymbol("B", 3, 3) C = MatrixSymbol("C", 3, 3)
25.589286
74
0.567341
5d94582a86f1cb5da7910dcf3af0df5fef4be108
898
py
Python
app/views.py
Kgermando/sem
c76e97e1d526d4e92a925adb6bceee426f999655
[ "Apache-2.0" ]
null
null
null
app/views.py
Kgermando/sem
c76e97e1d526d4e92a925adb6bceee426f999655
[ "Apache-2.0" ]
null
null
null
app/views.py
Kgermando/sem
c76e97e1d526d4e92a925adb6bceee426f999655
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render # Create your views here.
26.411765
59
0.604677
5d9653a594b04879fa4c04919b21be2e555546ba
1,033
py
Python
webcrawler/crawler/spiders/baselensspider.py
HansZimmer5000/LensComparison
e4d9b68211604c4569c4ca9b1e1b4fce2a8c1ea8
[ "Apache-2.0" ]
null
null
null
webcrawler/crawler/spiders/baselensspider.py
HansZimmer5000/LensComparison
e4d9b68211604c4569c4ca9b1e1b4fce2a8c1ea8
[ "Apache-2.0" ]
null
null
null
webcrawler/crawler/spiders/baselensspider.py
HansZimmer5000/LensComparison
e4d9b68211604c4569c4ca9b1e1b4fce2a8c1ea8
[ "Apache-2.0" ]
null
null
null
# This module is about my webcrawler with the use of scrapy. # Its a generell web crawler, but the import and use of GhAdapter makes it usefull for geizhals.de sites. from abc import ABC, abstractmethod import scrapy
27.184211
105
0.804453
5d968294f2b19ad9cf4d5cc885fbe7be0f0e3330
15,289
py
Python
byol_train.py
fjbriones/deep-text-recognition-benchmark
c85d12aa56495fe221656bac4c8cb159a28456b1
[ "Apache-2.0" ]
null
null
null
byol_train.py
fjbriones/deep-text-recognition-benchmark
c85d12aa56495fe221656bac4c8cb159a28456b1
[ "Apache-2.0" ]
null
null
null
byol_train.py
fjbriones/deep-text-recognition-benchmark
c85d12aa56495fe221656bac4c8cb159a28456b1
[ "Apache-2.0" ]
null
null
null
import os import sys import time import random import string import argparse import torch import torch.backends.cudnn as cudnn import torch.nn.init as init import torch.optim as optim import torch.utils.data import numpy as np from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset from simclr_model import FeaturesModel as Model from test import validation from byol_pytorch import BYOL from imgaug import augmenters as iaa import imgaug as ia from tqdm import tqdm import matplotlib.pyplot as plt device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def train(opt): """ dataset preparation """ if not opt.data_filtering_off: print('Filtering the images containing characters which are not in opt.character') print('Filtering the images whose label is longer than opt.batch_max_length') # see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130 opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') ia.seed(1) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms) valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=opt.batch_size, shuffle=True, # 'True' to check training progress with validation function. num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print('-' * 80) log.write('-' * 80 + '\n') log.close() if opt.rgb: opt.input_channel = 3 model = Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling) # weight initialization for name, param in model.named_parameters(): if 'localization_fc2' in name: print(f'Skip {name} as it is already initialized') continue try: if 'bias' in name: init.constant_(param, 0.0) elif 'weight' in name: init.kaiming_normal_(param) except Exception as e: # for batchnorm. if 'weight' in name: param.data.fill_(1) continue # data parallel for multi-GPU model = torch.nn.DataParallel(model).to(device) model.train() if opt.saved_model != '': print(f'loading pretrained model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print("Model:") print(model) image_transforms = iaa.Sequential([iaa.SomeOf((1, 5), [iaa.LinearContrast((0.5, 1.0)), iaa.GaussianBlur((0.5, 1.5)), iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True), iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True), iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)), iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'), iaa.PerspectiveTransform(scale=(0.01, 0.02))], random_order=True)]) byol_learner = BYOL( model, image_size=(32,100), hidden_layer=-1, channels=1, augment_fn=image_transforms, augmented=True) print(byol_learner) # filter that only require gradient decent filtered_parameters = [] params_num = [] for p in filter(lambda p: p.requires_grad, byol_learner.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num : ', sum(params_num)) # setup optimizer if opt.optimizer == 'adam': optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) elif opt.optimizer == 'adadelta': optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay) elif opt.optimizer == 'sgd': optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov) else: raise Exception('Unknown optimizer') print("Optimizer:") print(optimizer) """ final options """ # print(opt) with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log = '------------ Options -------------\n' args = vars(opt) for k, v in args.items(): opt_log += f'{str(k)}: {str(v)}\n' opt_log += '---------------------------------------\n' print(opt_log) opt_file.write(opt_log) """ start training """ start_iter = 0 if opt.saved_model != '': try: start_iter = int(opt.saved_model.split('_')[-1].split('.')[0]) print(f'continue to train, start_iter: {start_iter}') except: pass #LR Scheduler: scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1) best_loss = None iteration = start_iter print(device) loss_avg = Averager() valid_loss_avg = Averager() # kl_loss_avg = Averager() # kl_loss = torch.nn.KLDivLoss() epoch = 0 while(True): # train part for i in tqdm(range(opt.valInterval)): image_tensors, _ = train_dataset.get_batch() image = image_tensors.to(device) optimizer.zero_grad() loss = byol_learner(image) loss.backward() if opt.grad_clip: torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip) optimizer.step() scheduler.step() byol_learner.update_moving_average() loss_avg.add(loss) if iteration==0: print("Epoch {:06d} Loss: {:.04f}".format(iteration, loss_avg.val())) iteration += 1 byol_learner.eval() model.eval() with torch.no_grad(): for image_tensors, _ in valid_loader: image = image_tensors.to(device) val_loss = byol_learner(image) valid_loss_avg.add(val_loss) # features = model(image) # features = features.view(-1, 26, features.shape[1]) # kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):]) # kl_loss_avg.add(kl_div) model.train() byol_learner.train() with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: log.write("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\n') print("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val())) if best_loss is None: best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') elif best_loss > valid_loss_avg.val(): best_loss = valid_loss_avg.val() torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') scheduler.step() loss_avg.reset() valid_loss_avg.reset() if epoch % 5 == 0: torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') if (iteration + 1) >= opt.num_iter: print('end the training') torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth') sys.exit() epoch +=1 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--exp_name', help='Where to store logs and models') parser.add_argument('--train_data', required=True, help='path to training dataset') parser.add_argument('--valid_data', required=True, help='path to validation dataset') parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting') parser.add_argument('--workers', type=int, help='number of data loading workers', default=4) parser.add_argument('--batch_size', type=int, default=192, help='input batch size') parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for') parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation') parser.add_argument('--saved_model', default='', help="path to model to continue training") parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning') parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help="Optimizer") parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9') parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95') parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8') parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD') parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5') parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode') """ Data processing """ parser.add_argument('--select_data', type=str, default='MJ-ST', help='select training data (default is MJ-ST, which means MJ and ST used as training data)') parser.add_argument('--batch_ratio', type=str, default='0.5-0.5', help='assign ratio for each selected data in the batch') parser.add_argument('--total_data_usage_ratio', type=str, default='1.0', help='total data usage ratio, this ratio is multiplied to total number of data.') parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length') parser.add_argument('--imgH', type=int, default=32, help='the height of the input image') parser.add_argument('--imgW', type=int, default=100, help='the width of the input image') parser.add_argument('--rgb', action='store_true', help='use rgb input') parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label') parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode') parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize') parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode') """ Model Architecture """ parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS') parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet') parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM') parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN') parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor') parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state') parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay') parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training') parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of the final layer') opt = parser.parse_args() if not opt.exp_name: opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL' opt.exp_name += f'-Seed{opt.manualSeed}' # print(opt.exp_name) os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True) """ vocab / character number configuration """ if opt.sensitive: # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' opt.character = string.printable[:-6] # same with ASTER setting (use 94 char). """ Seed and GPU setting """ # print("Random Seed: ", opt.manualSeed) random.seed(opt.manualSeed) np.random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) torch.cuda.manual_seed(opt.manualSeed) cudnn.benchmark = True cudnn.deterministic = True opt.num_gpu = torch.cuda.device_count() # print('device count', opt.num_gpu) if opt.num_gpu > 1: print('------ Use multi-GPU setting ------') print('if you stuck too long time with multi-GPU setting, try to set --workers 0') # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1 opt.workers = opt.workers * opt.num_gpu opt.batch_size = opt.batch_size * opt.num_gpu """ previous version print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size) opt.batch_size = opt.batch_size * opt.num_gpu print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.') If you dont care about it, just commnet out these line.) opt.num_iter = int(opt.num_iter / opt.num_gpu) """ train(opt)
45.502976
152
0.636013
5d96fee6a1d130e8653363f3a24275073276610b
1,496
py
Python
app/__init__.py
dulin/tornado-test
8ceeb9f2b50b4cd0f18baa9149140721feec1925
[ "MIT" ]
null
null
null
app/__init__.py
dulin/tornado-test
8ceeb9f2b50b4cd0f18baa9149140721feec1925
[ "MIT" ]
null
null
null
app/__init__.py
dulin/tornado-test
8ceeb9f2b50b4cd0f18baa9149140721feec1925
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # -*- mode: python -*- import aiopg import psycopg2 import tornado.locks from tornado.options import define, options from tornado.web import Application from app.application import Application define('port', default=8080, help="listening port") define('bind_address', default="", help="bind address") define("db_host", default="127.0.0.1", help="database host") define("db_port", default=5432, help="database port") define("db_database", default="tornado", help="database name") define("db_user", default="tornado", help="database user") define("db_password", default="tornado", help="database password")
32.521739
81
0.675802
5d9879f28b9c13e35ac740f4cb5632764b5c35dd
2,426
py
Python
pipescaler/core/stage.py
KarlTDebiec/PipeScaler
b990ece8f3dd2c3506c226ed871871997fc57beb
[ "BSD-3-Clause" ]
1
2022-02-07T03:47:53.000Z
2022-02-07T03:47:53.000Z
pipescaler/core/stage.py
KarlTDebiec/PipeScaler
b990ece8f3dd2c3506c226ed871871997fc57beb
[ "BSD-3-Clause" ]
49
2022-01-17T15:16:22.000Z
2022-03-28T03:00:39.000Z
pipescaler/core/stage.py
KarlTDebiec/PipeScaler
b990ece8f3dd2c3506c226ed871871997fc57beb
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # pipescaler/core/stage.py # # Copyright (C) 2020-2021 Karl T Debiec # All rights reserved. # # This software may be modified and distributed under the terms of the # BSD license. from __future__ import annotations from abc import ABC, abstractmethod from importlib.util import module_from_spec, spec_from_file_location from typing import Any, List, Optional from pipescaler.common import validate_input_path
27.568182
83
0.633965
5d99e63583440bf3da1b852644d47a0c0ec5d4a3
349
py
Python
src/project/api/rankings/urls.py
jSkrod/djangae-react-browser-games-app
28c5064f0a126021afb08b195839305aba6b35a2
[ "CC-BY-4.0", "MIT" ]
null
null
null
src/project/api/rankings/urls.py
jSkrod/djangae-react-browser-games-app
28c5064f0a126021afb08b195839305aba6b35a2
[ "CC-BY-4.0", "MIT" ]
null
null
null
src/project/api/rankings/urls.py
jSkrod/djangae-react-browser-games-app
28c5064f0a126021afb08b195839305aba6b35a2
[ "CC-BY-4.0", "MIT" ]
null
null
null
from django.conf.urls import url, include from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame urlpatterns = [ url(r'add_ranking$', AddRanking.as_view()), url(r'add_score$', AddScore.as_view()), url(r'get_scores_game$', GetScoresGame.as_view()), url(r'get_scores_user$', GetScoresUser.as_view()) ]
38.777778
87
0.739255
5d9a08394431a2356f36800cd3badfa0bed3c07f
7,730
py
Python
qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
wdczdj/qiskit-metal
c77805f66da60021ef8d10d668715c1dc2ebcd1d
[ "Apache-2.0" ]
null
null
null
qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
wdczdj/qiskit-metal
c77805f66da60021ef8d10d668715c1dc2ebcd1d
[ "Apache-2.0" ]
null
null
null
qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
wdczdj/qiskit-metal
c77805f66da60021ef8d10d668715c1dc2ebcd1d
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core import QComponent import numpy as np
41.336898
100
0.51022
5d9a69d3f7389d018e9e0d4577b31c493762c8e2
4,895
py
Python
ThirdParty/protobuf-registry/python/protobufs/services/feature/actions/get_flags_pb2.py
getcircle/luno-ios
d18260abb537496d86cf607c170dd5e91c406f0f
[ "MIT" ]
null
null
null
ThirdParty/protobuf-registry/python/protobufs/services/feature/actions/get_flags_pb2.py
getcircle/luno-ios
d18260abb537496d86cf607c170dd5e91c406f0f
[ "MIT" ]
null
null
null
ThirdParty/protobuf-registry/python/protobufs/services/feature/actions/get_flags_pb2.py
getcircle/luno-ios
d18260abb537496d86cf607c170dd5e91c406f0f
[ "MIT" ]
null
null
null
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: protobufs/services/feature/actions/get_flags.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='protobufs/services/feature/actions/get_flags.proto', package='services.feature.actions.get_flags', syntax='proto3', serialized_pb=b'\n2protobufs/services/feature/actions/get_flags.proto\x12\"services.feature.actions.get_flags\"\x0b\n\tRequestV1\"\x84\x01\n\nResponseV1\x12H\n\x05\x66lags\x18\x01 \x03(\x0b\x32\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\x1a,\n\nFlagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01\x62\x06proto3' ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _REQUESTV1 = _descriptor.Descriptor( name='RequestV1', full_name='services.feature.actions.get_flags.RequestV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=90, serialized_end=101, ) _RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor( name='FlagsEntry', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=192, serialized_end=236, ) _RESPONSEV1 = _descriptor.Descriptor( name='ResponseV1', full_name='services.feature.actions.get_flags.ResponseV1', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_RESPONSEV1_FLAGSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=236, ) _RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1 _RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1 DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1 RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict( DESCRIPTOR = _REQUESTV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1) )) _sym_db.RegisterMessage(RequestV1) ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict( FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict( DESCRIPTOR = _RESPONSEV1_FLAGSENTRY, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry) )) , DESCRIPTOR = _RESPONSEV1, __module__ = 'protobufs.services.feature.actions.get_flags_pb2' # @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1) )) _sym_db.RegisterMessage(ResponseV1) _sym_db.RegisterMessage(ResponseV1.FlagsEntry) _RESPONSEV1_FLAGSENTRY.has_options = True _RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001') # @@protoc_insertion_point(module_scope)
32.852349
371
0.765475
5d9cfb201fcd48e23406da7a37202a4d1d0051f3
1,758
py
Python
Medium/102_2.py
Hellofafar/Leetcode
7a459e9742958e63be8886874904e5ab2489411a
[ "CNRI-Python" ]
6
2017-09-25T18:05:50.000Z
2019-03-27T00:23:15.000Z
Medium/102_2.py
Hellofafar/Leetcode
7a459e9742958e63be8886874904e5ab2489411a
[ "CNRI-Python" ]
1
2017-10-29T12:04:41.000Z
2018-08-16T18:00:37.000Z
Medium/102_2.py
Hellofafar/Leetcode
7a459e9742958e63be8886874904e5ab2489411a
[ "CNRI-Python" ]
null
null
null
# ------------------------------ # Binary Tree Level Order Traversal # # Description: # Given a binary tree, return the level order traversal of its nodes' values. (ie, from # left to right, level by level). # # For example: # Given binary tree [3,9,20,null,null,15,7], # 3 # / \ # 9 20 # / \ # 15 7 # return its level order traversal as: # [ # [3], # [9,20], # [15,7] # ] # # Version: 2.0 # 11/11/19 by Jianfa # ------------------------------ # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None # Used for testing if __name__ == "__main__": test = Solution() # ------------------------------ # Summary: # Similar BFS solution but use a little more spaces. # On 102.py, using list.pop(0) actually takes O(n) time because it needs to remap the index # of values. Use collections.deque instead. # # O(N) time O(N) space
26.636364
108
0.527873
5d9e54e4e20b43e465756409507c8caedb39d5b5
7,969
py
Python
mturk/comparison_among_different_models/sample_from_models_for_comparison.py
qiaone/GIF
2c551e844748c72395fc91fb080c7a2f9c8d5285
[ "MIT" ]
322
2020-08-28T22:23:09.000Z
2022-03-25T09:42:12.000Z
mturk/comparison_among_different_models/sample_from_models_for_comparison.py
qiaone/GIF
2c551e844748c72395fc91fb080c7a2f9c8d5285
[ "MIT" ]
25
2020-11-03T02:03:51.000Z
2022-03-18T13:06:42.000Z
mturk/comparison_among_different_models/sample_from_models_for_comparison.py
qiaone/GIF
2c551e844748c72395fc91fb080c7a2f9c8d5285
[ "MIT" ]
59
2020-08-28T23:32:08.000Z
2022-03-30T03:29:35.000Z
import sys sys.path.append('../../') import constants as cnst import os os.environ['PYTHONHASHSEED'] = '2' import tqdm from model.stg2_generator import StyledGenerator import numpy as np from my_utils.visualize_flame_overlay import OverLayViz from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp from my_utils.generate_gif import generate_from_flame_sequence from my_utils.generic_utils import save_set_of_images from my_utils import compute_fid import constants from dataset_loaders import fast_image_reshape import torch from my_utils import generic_utils from my_utils.eye_centering import position_to_given_location # General settings save_images = True code_size = 236 use_inst_norm = True core_tensor_res = 4 resolution = 256 alpha = 1 step_max = int(np.log2(resolution) - 2) root_out_dir = f'{cnst.output_root}sample/' num_smpl_to_eval_on = 1000 use_styled_conv_stylegan2 = True flength = 5000 cam_t = np.array([0., 0., 0]) camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength) run_ids_1 = [29, ] # with sqrt(2) # run_ids_1 = [7, 24, 8, 3] # run_ids_1 = [7, 8, 3] settings_for_runs = \ {24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True}, 7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False}, 3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False}, 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},} overlay_visualizer = OverLayViz() # overlay_visualizer.setup_renderer(mesh_file=None) flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32') fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item() for i, key in enumerate(fl_param_dict): flame_param = fl_param_dict[key] flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'], flame_param['tex'], flame_param['lit'].flatten())) # tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157]) # flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1) # import ipdb; ipdb.set_trace() flm_params[i, :] = flame_param.astype('float32') if i == num_smpl_to_eval_on - 1: break batch_size = 64 flame_decoder = overlay_visualizer.deca.flame.eval() for run_idx in run_ids_1: # import ipdb; ipdb.set_trace() generator_1 = torch.nn.DataParallel( StyledGenerator(embedding_vocab_size=69158, rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'], normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'], core_tensor_res=core_tensor_res, w_truncation_factor=1.0, apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'], n_mlp=8)).cuda() model_idx = settings_for_runs[run_idx]['model_idx'] ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model') generator_1.load_state_dict(ckpt1['generator_running']) generator_1 = generator_1.eval() # images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32') pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size)) pbar.set_description('Generating_images') flame_mesh_imgs = None mdl_id = 'mdl2_' if settings_for_runs[run_idx]['name'] == 'full_model': mdl_id = 'mdl1_' for batch_idx in pbar: flm_batch = flm_params[batch_idx:batch_idx+batch_size, :] flm_batch = torch.from_numpy(flm_batch).cuda() flm_batch = position_to_given_location(flame_decoder, flm_batch) batch_size_true = flm_batch.shape[0] if settings_for_runs[run_idx]['normal_maps_as_cond'] or \ settings_for_runs[run_idx]['rendered_flame_as_condition']: cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:] shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]] exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]] pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]] # import ipdb; ipdb.set_trace() light_code = \ flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3)) texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:] norma_map_img, _, _, _, rend_flm = \ overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code), camera_params=cam) rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1 norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1 rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear') norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear') else: rend_flm = None norma_map_img = None gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'], settings_for_runs[run_idx]['rendered_flame_as_condition']) # torch.manual_seed(2) identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long, device='cuda') mdl_1_gen_images = generic_utils.get_images_from_flame_params( flame_params=gen_1_in.cpu().numpy(), pose=None, model=generator_1, step=step_max, alpha=alpha, input_indices=identity_embeddings.cpu().numpy()) # import ipdb; ipdb.set_trace() images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy() flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy() save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}', images=(images + 1) / 2, show_prog_bar=True) #save flam rndr save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name']) save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True) # save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy()) # save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())
47.718563
119
0.676371
5d9ff4014ab10b4aacbc6a629a0aa9ded18d3d4a
895
py
Python
setup.py
philippWassibauer/django-activity-stream
766a372aea4803ef5fe051a5de16dde5b5efcc72
[ "BSD-3-Clause" ]
4
2015-05-21T04:28:43.000Z
2019-04-27T15:12:32.000Z
setup.py
philippWassibauer/django-activity-stream
766a372aea4803ef5fe051a5de16dde5b5efcc72
[ "BSD-3-Clause" ]
null
null
null
setup.py
philippWassibauer/django-activity-stream
766a372aea4803ef5fe051a5de16dde5b5efcc72
[ "BSD-3-Clause" ]
2
2018-02-10T22:31:07.000Z
2021-02-14T07:43:35.000Z
from distutils.core import setup """ django-activity-stream instalation script """ setup( name = 'activity_stream', description = 'generic activity feed system for users', author = 'Philipp Wassibauer', author_email = '[email protected]', url='http://github.com/philippWassibauer/django-activity-stream', download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master', license='MIT', version = __import__('activity_stream').__version__, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
33.148148
93
0.660335
5da0ff4d3e7dbb3fe7c21095720798fb7df7ef6b
742
py
Python
02/selenium.02.py
study-machine-learning/dongheon.shin
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
[ "MIT" ]
2
2017-09-24T02:29:48.000Z
2017-10-05T11:15:22.000Z
02/selenium.02.py
study-machine-learning/dongheon.shin
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
[ "MIT" ]
null
null
null
02/selenium.02.py
study-machine-learning/dongheon.shin
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
[ "MIT" ]
null
null
null
from selenium import webdriver username = "henlix" password = "my_password" browser = webdriver.PhantomJS() browser.implicitly_wait(5) url_login = "https://nid.naver.com/nidlogin.login" browser.get(url_login) el = browser.find_element_by_id("id") el.clear() el.send_keys(username) el = browser.find_element_by_id("pw") el.clear() el.send_keys(password) form = browser.find_element_by_css_selector("input.btn_global[type=submit]") form.submit() url_shopping_list = "https://order.pay.naver.com/home?tabMenu=SHOPPING" browser.get(url_shopping_list) products = browser.find_elements_by_css_selector(".p_info span") for product in products: print("- ", product.text) # PYTHONIOENCODING=utf-8:surrogateescape python3 selenium.02.py
22.484848
76
0.777628
5da246d54547ba7b297b610234129f3853586daf
343
py
Python
visualization/matplotlib/barwitherror.py
Licas/datascienceexamples
cbb1293dbae875cb3f166dbde00b2ab629a43ece
[ "MIT" ]
null
null
null
visualization/matplotlib/barwitherror.py
Licas/datascienceexamples
cbb1293dbae875cb3f166dbde00b2ab629a43ece
[ "MIT" ]
null
null
null
visualization/matplotlib/barwitherror.py
Licas/datascienceexamples
cbb1293dbae875cb3f166dbde00b2ab629a43ece
[ "MIT" ]
null
null
null
from matplotlib import pyplot as plt drinks = ["cappuccino", "latte", "chai", "americano", "mocha", "espresso"] ounces_of_milk = [6, 9, 4, 0, 9, 0] error = [0.6, 0.9, 0.4, 0, 0.9, 0] #Yerr -> element at i position represents +/- error[i] variance on bar[i] value plt.bar( range(len(drinks)),ounces_of_milk, yerr=error, capsize=15) plt.show()
38.111111
79
0.667638
5da2dcdc36e76038fe4e53bad3d9602bb03e2dea
38,961
py
Python
Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py
matan-xmcyber/content
7f02301c140b35956af3cd20cb8dfc64f34afb3e
[ "MIT" ]
null
null
null
Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py
matan-xmcyber/content
7f02301c140b35956af3cd20cb8dfc64f34afb3e
[ "MIT" ]
3
2019-12-13T13:27:20.000Z
2020-01-01T14:27:45.000Z
Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py
matan-xmcyber/content
7f02301c140b35956af3cd20cb8dfc64f34afb3e
[ "MIT" ]
null
null
null
import demistomock as demisto from CommonServerPython import * """ IMPORTS """ import json import urllib3 import dateparser import traceback from typing import Any, Dict, List, Union import logging from argus_api import session as argus_session from argus_api.api.currentuser.v1.user import get_current_user from argus_api.api.cases.v2.case import ( add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case, ) from argus_api.api.events.v1 import get_event_by_path from argus_api.api.events.v1.case.case import get_events_for_case from argus_api.api.events.v1.aggregated import ( find_aggregated_events, list_aggregated_events, ) from argus_api.api.events.v1.payload import get_payload from argus_api.api.events.v1.pcap import get_pcap from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events from argus_api.api.pdns.v3.search import search_records from argus_api.api.reputation.v1.observation import ( fetch_observations_for_domain, fetch_observations_for_i_p, ) # Disable insecure warnings urllib3.disable_warnings() """ CONSTANTS """ DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ" PRETTY_DATE_FORMAT = "%b %d, %Y, %H:%M:%S" FETCH_TAG = demisto.params().get("fetch_tag") """ HELPER FUNCTIONS """ """ COMMAND FUNCTIONS """ """ MAIN FUNCTION """ """ ENTRY POINT """ if __name__ in ("__main__", "__builtin__", "builtins"): main()
35.711274
117
0.65273
5da38d402c0b885654d90358b5f682eddb296488
672
py
Python
03.py
SnowWolf75/aoc-2020
1745a6cf46dac097869e5af99194b710e78bed28
[ "Unlicense" ]
null
null
null
03.py
SnowWolf75/aoc-2020
1745a6cf46dac097869e5af99194b710e78bed28
[ "Unlicense" ]
null
null
null
03.py
SnowWolf75/aoc-2020
1745a6cf46dac097869e5af99194b710e78bed28
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python3 import sys, os import unittest from lib.common import * filename = "inputs/2020_12_03_input.txt"
16.8
40
0.638393
5da38e943cdd95f554ae0517d32417a9a5d31b05
699
py
Python
scripts/examples/tools/capturebat.py
fortinet/ips-bph-framework
145e14cced2181f388ade07d78b4f0e9452143dd
[ "Apache-2.0" ]
21
2019-10-24T04:59:52.000Z
2021-05-11T12:47:17.000Z
scripts/examples/tools/capturebat.py
fortinet/ips-bph-framework
145e14cced2181f388ade07d78b4f0e9452143dd
[ "Apache-2.0" ]
null
null
null
scripts/examples/tools/capturebat.py
fortinet/ips-bph-framework
145e14cced2181f388ade07d78b4f0e9452143dd
[ "Apache-2.0" ]
9
2019-10-26T16:56:08.000Z
2021-03-15T14:10:21.000Z
# Tool Imports from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat # Core Imports from bph.core.server.template import BphTemplateServer as TemplateServer from bph.core.sample import BphSample as Sample from bph.core.sample import BphLabFile as LabFile from bph.core.session import BphSession as Session session = Session(project_name='blackhat_arsenal_2019') session.start() templateserver = TemplateServer() templateserver.start() capturebat = CaptureBat() capturebat.cleanup() capturebat.execute() capturebat.start() capturebat.execute(delay=15) capturebat.stop() capturebat.execute() capturebat.collect() capturebat.execute() capturebat.files()
24.964286
73
0.786838
5da47e4e4410b3e8309f308ed349c9a9599c9032
2,225
py
Python
dymos/utils/test/test_hermite.py
kaushikponnapalli/dymos
3fba91d0fc2c0e8460717b1bec80774676287739
[ "Apache-2.0" ]
104
2018-09-08T16:52:27.000Z
2022-03-10T23:35:30.000Z
dymos/utils/test/test_hermite.py
kaushikponnapalli/dymos
3fba91d0fc2c0e8460717b1bec80774676287739
[ "Apache-2.0" ]
628
2018-06-27T20:32:59.000Z
2022-03-31T19:24:32.000Z
dymos/utils/test/test_hermite.py
kaushikponnapalli/dymos
3fba91d0fc2c0e8460717b1bec80774676287739
[ "Apache-2.0" ]
46
2018-06-27T20:54:07.000Z
2021-12-19T07:23:32.000Z
import unittest import numpy as np from numpy.testing import assert_almost_equal from dymos.utils.hermite import hermite_matrices if __name__ == '__main__': # pragma: no cover unittest.main()
31.785714
79
0.613483
5da53bca28edc1a3193db977a15e4f5897d0d909
2,569
py
Python
xl_auth/settings.py
libris/xl_auth
33d705c287d2ecd81920d37c3751d947cd52588c
[ "Apache-2.0" ]
7
2017-09-04T10:24:02.000Z
2019-12-02T13:12:30.000Z
xl_auth/settings.py
libris/xl_auth
33d705c287d2ecd81920d37c3751d947cd52588c
[ "Apache-2.0" ]
140
2017-09-06T07:02:18.000Z
2022-02-26T01:26:25.000Z
xl_auth/settings.py
libris/xl_auth
33d705c287d2ecd81920d37c3751d947cd52588c
[ "Apache-2.0" ]
2
2017-09-13T16:42:57.000Z
2018-02-15T15:32:40.000Z
# -*- coding: utf-8 -*- """Application configuration.""" from __future__ import absolute_import, division, print_function, unicode_literals import os from . import __author__, __name__, __version__
34.716216
82
0.696769
5da5c1523876b5ad6f15a38ad4bcfea7774fd3c9
3,083
py
Python
tests/mrp/test_mrp_auth.py
evanreichard/pyatv
d41bd749bbf8f8a9365e7fd36c1164543e334565
[ "MIT" ]
null
null
null
tests/mrp/test_mrp_auth.py
evanreichard/pyatv
d41bd749bbf8f8a9365e7fd36c1164543e334565
[ "MIT" ]
1
2020-06-13T15:14:47.000Z
2020-06-13T15:14:47.000Z
tests/mrp/test_mrp_auth.py
evanreichard/pyatv
d41bd749bbf8f8a9365e7fd36c1164543e334565
[ "MIT" ]
null
null
null
"""Functional authentication tests with fake MRP Apple TV.""" import inspect from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop import pyatv from pyatv import exceptions from pyatv.const import Protocol from pyatv.conf import MrpService, AppleTV from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS from tests.fake_device import FakeAppleTV
32.114583
81
0.705157
5da69a858193b1623616f277374a6ced50dc8b34
352
py
Python
tests_app/tests/functional/key_constructor/bits/models.py
maryokhin/drf-extensions
8223db2bdddaf3cd99f951b2291210c5fd5b0e6f
[ "MIT" ]
1
2019-06-18T16:40:33.000Z
2019-06-18T16:40:33.000Z
tests_app/tests/functional/key_constructor/bits/models.py
maryokhin/drf-extensions
8223db2bdddaf3cd99f951b2291210c5fd5b0e6f
[ "MIT" ]
null
null
null
tests_app/tests/functional/key_constructor/bits/models.py
maryokhin/drf-extensions
8223db2bdddaf3cd99f951b2291210c5fd5b0e6f
[ "MIT" ]
1
2018-07-17T00:13:19.000Z
2018-07-17T00:13:19.000Z
# -*- coding: utf-8 -*- from django.db import models
22
60
0.713068
5da7c35c6f555424a35c54ce0dd94e20ac56d5b8
4,116
py
Python
ngraph_onnx/onnx_importer/utils/numeric_limits.py
cliveseldon/ngraph-onnx
a2d20afdc7acd5064e4717612ad372d864d03d3d
[ "Apache-2.0" ]
null
null
null
ngraph_onnx/onnx_importer/utils/numeric_limits.py
cliveseldon/ngraph-onnx
a2d20afdc7acd5064e4717612ad372d864d03d3d
[ "Apache-2.0" ]
null
null
null
ngraph_onnx/onnx_importer/utils/numeric_limits.py
cliveseldon/ngraph-onnx
a2d20afdc7acd5064e4717612ad372d864d03d3d
[ "Apache-2.0" ]
null
null
null
# ****************************************************************************** # Copyright 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import numbers from typing import Union
34.588235
94
0.640671
5da817172273224f419b42630ca0117dc06b1363
5,158
py
Python
curvpack/utils.py
AbhilashReddyM/curvpack
74351624ec9ec50ec4445c7be85a48a4eabb029a
[ "BSD-3-Clause" ]
8
2019-04-30T19:31:57.000Z
2022-02-25T14:50:56.000Z
curvpack/utils.py
AbhilashReddyM/curvpack
74351624ec9ec50ec4445c7be85a48a4eabb029a
[ "BSD-3-Clause" ]
1
2019-06-14T06:32:40.000Z
2019-06-14T18:26:01.000Z
curvpack/utils.py
AbhilashReddyM/curvpack
74351624ec9ec50ec4445c7be85a48a4eabb029a
[ "BSD-3-Clause" ]
3
2020-04-18T10:13:55.000Z
2022-02-02T03:53:04.000Z
import numpy as np # The first two functions are modified from MNE surface project. LIcense follows # This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source Initiative. # # Copyright (c) 2011-2019, authors of MNE-Python. All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # Neither the names of MNE-Python authors nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. def triangle_neighbors(tris, npts): """Efficiently compute vertex neighboring triangles. Returns the triangles in the 1-ring of a given vertex """ # this code replaces the following, but is faster (vectorized): # # this['neighbor_tri'] = [list() for _ in xrange(this['np'])] # for p in xrange(this['ntri']): # verts = this['tris'][p] # this['neighbor_tri'][verts[0]].append(p) # this['neighbor_tri'][verts[1]].append(p) # this['neighbor_tri'][verts[2]].append(p) # this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']] # verts = tris.ravel() counts = np.bincount(verts, minlength=npts) reord = np.argsort(verts) tri_idx = np.unravel_index(reord, (len(tris), 3))[0] idx = np.cumsum(np.r_[0, counts]) # the sort below slows it down a bit, but is needed for equivalence neighbor_tri = np.array([np.sort(tri_idx[v1:v2]) for v1, v2 in zip(idx[:-1], idx[1:])]) return neighbor_tri def get_surf_neighbors(tris,neighbor_tri, k): """Get vertices of 1-ring """ verts = tris[neighbor_tri[k]] verts = np.setdiff1d(verts, [k], assume_unique=False) nneighbors = len(verts) return verts def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2): """ INPUT: Vertices : vertices Faces : vertex connectivity FaceNormals : Outer Normal per face, having magnitude equal to area of face e0,e1,e2 : edge vectors OUTPUT: VertNormals : Unit normal at the vertex """ VertNormals =np.zeros(vertices.shape) #edge lengths de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2) de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2) de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2) L2=np.c_[de0**2,de1**2,de2**2] #Calculate weights according to N.Max [1999] for normals wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis] wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis] wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis] # #Calculate the weights according to MWA for normals # wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis] # wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis] # wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis] verts=faces.T[0] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j]) verts=faces.T[1] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j]) verts=faces.T[2] for j in [0,1,2]: VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j]) VertNormals=normr(VertNormals) return VertNormals def fastcross(x, y): """Compute cross product between list of 3D vectors Input x : Mx3 array y : Mx3 array Output z : Mx3 array Cross product of x and y. """ if max([x.shape[0], y.shape[0]]) >= 500: return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1], x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2], x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]] else: return np.cross(x, y) def normr(vec): """ Normalizes an array of vectors. e.g. to convert a np array of vectors to unit vectors """ return vec/np.sqrt((vec**2).sum(axis=1))[:,np.newaxis]
44.852174
757
0.65917
5da84607f0ca3d7ead02486a100adef7e245823f
14,805
py
Python
tests/unit/core/streams/test_stream_zero.py
tethys-platform/tethys
c27daf5a832b05f9d771b04355001c331bc08766
[ "ECL-2.0", "Apache-2.0" ]
2
2020-05-20T19:03:14.000Z
2020-06-03T20:43:34.000Z
tests/unit/core/streams/test_stream_zero.py
tethys-platform/tethys
c27daf5a832b05f9d771b04355001c331bc08766
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/unit/core/streams/test_stream_zero.py
tethys-platform/tethys
c27daf5a832b05f9d771b04355001c331bc08766
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright 2020 Konstruktor, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import platform import time from unittest import mock from unittest.mock import patch, call from pytest import fixture from tethys.core.pipes.pipe_zero import ZeroPipe from tethys.core.sessions.sess_zero import ZeroSession from tethys.core.stations.station_zero import ZeroStation from tethys.core.streams.stream_zero import ZeroStream from tethys.core.transports.transport_zero import ZeroTransport # conn context def test_new_connection_context(self, stream): with stream.connection_context(): MockTransport.connect.assert_called_once_with(stream) MockTransport.disconnect.assert_not_called() MockTransport.disconnect.assert_called_once_with(stream) def test_old_connection_context(self, stream): MockTransport._connections[stream.id] = stream with stream.connection_context(): MockTransport.connect.assert_not_called() MockTransport.disconnect.assert_not_called() # heartbeat # open # close # read # write # ack # redirect # open/close context
30.779626
88
0.640662
5da8c5b2385d6f73170c02cb6de27d3641c827fa
6,653
py
Python
amd64-linux/lib/ppc64_simple_components.py
qiyancos/Simics-3.0.31
9bd52d5abad023ee87a37306382a338abf7885f1
[ "BSD-4-Clause", "FSFAP" ]
1
2020-06-15T10:41:18.000Z
2020-06-15T10:41:18.000Z
amd64-linux/lib/ppc64_simple_components.py
qiyancos/Simics-3.0.31
9bd52d5abad023ee87a37306382a338abf7885f1
[ "BSD-4-Clause", "FSFAP" ]
null
null
null
amd64-linux/lib/ppc64_simple_components.py
qiyancos/Simics-3.0.31
9bd52d5abad023ee87a37306382a338abf7885f1
[ "BSD-4-Clause", "FSFAP" ]
3
2020-08-10T10:25:02.000Z
2021-09-12T01:12:09.000Z
## Copyright 2005-2007 Virtutech AB ## ## The contents herein are Source Code which are a subset of Licensed ## Software pursuant to the terms of the Virtutech Simics Software ## License Agreement (the "Agreement"), and are being distributed under ## the Agreement. You should have received a copy of the Agreement with ## this Licensed Software; if not, please contact Virtutech for a copy ## of the Agreement prior to using this Licensed Software. ## ## By using this Source Code, you agree to be bound by all of the terms ## of the Agreement, and use of this Source Code is subject to the terms ## the Agreement. ## ## This Source Code and any derivatives thereof are provided on an "as ## is" basis. Virtutech makes no warranties with respect to the Source ## Code or any derivatives thereof and disclaims all implied warranties, ## including, without limitation, warranties of merchantability and ## fitness for a particular purpose and non-infringement. from sim_core import * from components import * import time # Generic Simple System for PPC64 Processors ppc64_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency in MHz.'], ['memory_megs', Sim_Attr_Required, 'i', 'The amount of RAM in megabytes.'], ['map_offset', Sim_Attr_Optional, 'i', 'Base address for device mappings. ' \ 'Offsets at 4 GB and above will not work'], ['time_of_day', Sim_Attr_Optional, 's', 'Date and time to initialize the OpenFirmware RTC to']]
37.587571
78
0.599579
5da9e73a716a3d83801c56b0312fd8f4d87f351c
385
py
Python
Front-end (Django)/course/migrations/0002_subject_number_of_questions.py
shadow0403bsr/AutomatedGradingSoftware
5031d22683a05f937615b3b8997152c285a2f930
[ "MIT" ]
null
null
null
Front-end (Django)/course/migrations/0002_subject_number_of_questions.py
shadow0403bsr/AutomatedGradingSoftware
5031d22683a05f937615b3b8997152c285a2f930
[ "MIT" ]
null
null
null
Front-end (Django)/course/migrations/0002_subject_number_of_questions.py
shadow0403bsr/AutomatedGradingSoftware
5031d22683a05f937615b3b8997152c285a2f930
[ "MIT" ]
null
null
null
# Generated by Django 3.0.1 on 2020-02-15 06:02 from django.db import migrations, models
20.263158
49
0.597403
5da9e91d7d69e5378260fe7c404a58e9aa312b9e
3,101
py
Python
cornflow/tests/unit/test_dags.py
pchtsp/corn
2811ad400f3f3681a159984eabf4fee1fc99b433
[ "MIT" ]
5
2021-11-24T02:43:22.000Z
2021-12-10T09:28:32.000Z
cornflow/tests/unit/test_dags.py
pchtsp/corn
2811ad400f3f3681a159984eabf4fee1fc99b433
[ "MIT" ]
125
2021-09-01T12:06:48.000Z
2022-03-30T11:32:57.000Z
cornflow/tests/unit/test_dags.py
pchtsp/corn
2811ad400f3f3681a159984eabf4fee1fc99b433
[ "MIT" ]
1
2022-03-23T17:57:59.000Z
2022-03-23T17:57:59.000Z
""" Unit test for the DAG endpoints """ # Import from libraries import json # Import from internal modules from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL from cornflow.tests.const import ( DAG_URL, EXECUTION_URL_NORUN, CASE_PATH, INSTANCE_URL, ) from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock
27.936937
80
0.563689
5daa62bfbc58bf60d68725712bf46468f85577d3
10,673
py
Python
nets/resnet.py
xwshi/faster-rcnn-keras
bfd99e3d0e786ada75a212c007111364b2c86312
[ "MIT" ]
null
null
null
nets/resnet.py
xwshi/faster-rcnn-keras
bfd99e3d0e786ada75a212c007111364b2c86312
[ "MIT" ]
null
null
null
nets/resnet.py
xwshi/faster-rcnn-keras
bfd99e3d0e786ada75a212c007111364b2c86312
[ "MIT" ]
null
null
null
#-------------------------------------------------------------# # ResNet50 #-------------------------------------------------------------# import keras.backend as K from keras import backend as K from keras import initializers, layers, regularizers from keras.engine import InputSpec, Layer from keras.initializers import random_normal from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D)
45.417021
148
0.599082
5daa836b2adc23c7d1169d134b820b47732f82c0
1,309
py
Python
app/api/deps.py
congdh/fastapi-realworld
42c8630aedf594b69bc96a327b04dfe636a785fe
[ "MIT" ]
null
null
null
app/api/deps.py
congdh/fastapi-realworld
42c8630aedf594b69bc96a327b04dfe636a785fe
[ "MIT" ]
null
null
null
app/api/deps.py
congdh/fastapi-realworld
42c8630aedf594b69bc96a327b04dfe636a785fe
[ "MIT" ]
null
null
null
from typing import Generator from fastapi import Depends, HTTPException from fastapi.security import APIKeyHeader from sqlalchemy.orm import Session from starlette import status from app import crud, models from app.core import security from app.db.session import SessionLocal JWT_TOKEN_PREFIX = "Token" # noqa: S105
26.714286
81
0.694423
5daaea4f5cbe880e71d3bbf0f6ec12e332c717ab
2,343
py
Python
src/raiden_libs/contract_info.py
netcriptus/raiden-services
3955d91852c616f6ba0a3a979757edbd852b2c6d
[ "MIT" ]
13
2019-02-07T23:23:33.000Z
2021-07-03T16:00:53.000Z
src/raiden_libs/contract_info.py
netcriptus/raiden-services
3955d91852c616f6ba0a3a979757edbd852b2c6d
[ "MIT" ]
1,095
2019-01-21T09:30:57.000Z
2022-03-25T05:13:30.000Z
src/raiden_libs/contract_info.py
netcriptus/raiden-services
3955d91852c616f6ba0a3a979757edbd852b2c6d
[ "MIT" ]
18
2019-01-21T09:17:19.000Z
2022-02-23T15:53:17.000Z
import sys from typing import Dict, List, Tuple import structlog from eth_utils import to_canonical_address from raiden.utils.typing import Address, BlockNumber, ChainID, Optional from raiden_contracts.contract_manager import ( ContractDevEnvironment, ContractManager, contracts_precompiled_path, get_contracts_deployment_info, ) log = structlog.get_logger(__name__) CONTRACT_MANAGER = ContractManager(contracts_precompiled_path()) def get_contract_addresses_and_start_block( chain_id: ChainID, contracts: List[str], address_overwrites: Dict[str, Address], development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO, contracts_version: Optional[str] = None, ) -> Tuple[Dict[str, Address], BlockNumber]: """Returns contract addresses and start query block for a given chain and contracts version. The default contracts can be overwritten by the additional parameters. Args: chain_id: The chain id to look for deployed contracts. contracts: The list of contracts which should be considered address_overwrites: Dict of addresses which should be used instead of the ones in the requested deployment. contracts_version: The version of the contracts to use. Returns: A dictionary with the contract addresses and start block for the given information """ contract_data = get_contracts_deployment_info( chain_id=chain_id, version=contracts_version, development_environment=development_environment, ) if not contract_data: log.error( "No deployed contracts were found at the default registry", contracts_version=contracts_version, ) sys.exit(1) # Get deployed addresses for those contracts which have no overwrites addresses = { c: ( address_overwrites.get(c) or to_canonical_address(contract_data["contracts"][c]["address"]) ) for c in contracts } # Set start block to zero if any contract addresses are overwritten if any(address_overwrites.values()): start_block = BlockNumber(0) else: start_block = BlockNumber( max(0, min(contract_data["contracts"][c]["block_number"] for c in contracts)) ) return addresses, start_block
33.956522
96
0.714042
5dab624f1bba960c93bdbcfc0dd2115a637b7aae
8,749
py
Python
meta_dataset/models/functional_classifiers.py
letyrodridc/meta-dataset
d868ea1c767cce46fa6723f6f77c29552754fcc9
[ "Apache-2.0" ]
null
null
null
meta_dataset/models/functional_classifiers.py
letyrodridc/meta-dataset
d868ea1c767cce46fa6723f6f77c29552754fcc9
[ "Apache-2.0" ]
null
null
null
meta_dataset/models/functional_classifiers.py
letyrodridc/meta-dataset
d868ea1c767cce46fa6723f6f77c29552754fcc9
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2022 The Meta-Dataset Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python2,python3 """Classifier-related code.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gin.tf from meta_dataset.models import functional_backbones import tensorflow.compat.v1 as tf def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier, cosine_logits_multiplier, use_weight_norm): """Passes embeddings through the linear layer defined by w_fc and b_fc. Args: embeddings: A Tensor of size [batch size, embedding dim]. w_fc: A Tensor of size [embedding dim, num outputs]. b_fc: Either None, or a Tensor of size [num outputs] or []. If cosine_classifier is False, it can not be None. cosine_classifier: A bool. If true, a cosine classifier is used which does not require the bias b_fc. cosine_logits_multiplier: A float. Only used if cosine_classifier is True, and multiplies the resulting logits. use_weight_norm: A bool. Whether weight norm was used. If so, then if using cosine classifier, normalize only the embeddings but not the weights. Returns: logits: A Tensor of size [batch size, num outputs]. """ if cosine_classifier: # Each column of the weight matrix may be interpreted as a class # representation (of the same dimenionality as the embedding space). The # logit for an embedding vector belonging to that class is the cosine # similarity between that embedding and that class representation. embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3) if not use_weight_norm: # Only normalize the weights if weight norm was not used. w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3) logits = tf.matmul(embeddings, w_fc) # Scale the logits as passing numbers in [-1, 1] to softmax is not very # expressive. logits *= cosine_logits_multiplier else: assert b_fc is not None logits = tf.matmul(embeddings, w_fc) + b_fc return logits
41.861244
80
0.686821
5dacac5c524e8494c9a0a1e27e5a00cc81bbbd7d
13,499
py
Python
app.py
Shrinidhi-C/Context-Based-Question-Answering
f2e0bbc03003aae65f4cabddecd5cd9fcdbfb333
[ "Apache-2.0" ]
16
2021-03-09T17:00:27.000Z
2022-01-07T15:49:46.000Z
app.py
Shrinidhi-C/Context-Based-Question-Answering
f2e0bbc03003aae65f4cabddecd5cd9fcdbfb333
[ "Apache-2.0" ]
1
2021-06-03T13:01:41.000Z
2021-06-03T13:01:41.000Z
app.py
Karthik-Bhaskar/Context-Based-Question-Answering
f2e0bbc03003aae65f4cabddecd5cd9fcdbfb333
[ "Apache-2.0" ]
7
2021-03-10T11:33:18.000Z
2022-01-07T17:48:17.000Z
import os import threading import shutil from datetime import timedelta, datetime from flask import Flask, render_template, request, session, jsonify, url_for, redirect from haystack.document_store.elasticsearch import * from haystack.preprocessor.utils import convert_files_to_dicts from haystack.preprocessor.cleaning import clean_wiki_text from haystack import Finder from haystack.retriever.sparse import ElasticsearchRetriever from haystack.reader.transformers import TransformersReader from elasticsearch import Elasticsearch es = ( Elasticsearch() ) # Replace with Elasticsearch(["http://elasticsearch:9200/"], verify_certs=True) to build docker image session_time = 60 # Session Timeout in Minutes app = Flask(__name__) app.secret_key = "cbqa_123" app.permanent_session_lifetime = timedelta(minutes=session_time) user_id = 0 # User ID to keep track w.r.t sessions and context data current_users = dict() # Used to store user id with time of login user_doc_store = dict() # Document store object of the user id user_settings = dict() # User settings for GPU and Pre-trained models choice # Handles pre-processing the context and uploads the pre-processed context to Elasticsearch # Each user is assigned with a separate Elasticsearch index starting with "user_{user_id}" # Documents & textual context are deleted from them temp folder named with user_id under users dir after uploading to Es # Handles setting up reader and retriever # Handles deletion of context data completely from the server after the session time ends and deletes user id from dict session_timer = threading.Thread(target=user_session_timer) session_timer.start() # Handles users w.r.t new session or already in session # Handles context documents uploads # Handles context added through the textbox # Provides extracted answers for the posted question # Handles GPU setting changes. # Handles pre-trained model choice setting changes. # Handles session timeout redirection # Handles removing of session identifier from session dict, This works only when app tab is open until session completes # Comment the below block in case of building a docker image or running on WSGI server like gunicorn if __name__ == "__main__": app.run(host="0.0.0.0")
38.132768
120
0.588414
5dad62563785343452980a6c164a9cfda04650c2
7,486
py
Python
timevortex/utils/filestorage.py
timevortexproject/timevortex
2bc1a50b255524af8582e6624dee280d64d3c9f3
[ "MIT" ]
null
null
null
timevortex/utils/filestorage.py
timevortexproject/timevortex
2bc1a50b255524af8582e6624dee280d64d3c9f3
[ "MIT" ]
null
null
null
timevortex/utils/filestorage.py
timevortexproject/timevortex
2bc1a50b255524af8582e6624dee280d64d3c9f3
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # -*- coding: utf8 -*- # -*- Mode: Python; py-indent-offset: 4 -*- """File storage adapter for timevortex project""" import os from os import listdir, makedirs from os.path import isfile, join, exists from time import tzname from datetime import datetime import pytz import dateutil.parser from django.conf import settings from django.utils import timezone from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID SETTINGS_FILE_STORAGE_FOLDER = "SETTINGS_FILE_STORAGE_FOLDER" SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = "/tmp/data/" def get_lines_number(file_path): """Get lines number """ return sum(1 for line in open(file_path)) def get_series_per_file(site_folder, file_prefix): """Get series per file """ series = {} for filename in listdir(site_folder): is_file = isfile(join(site_folder, filename)) if is_file and file_prefix in filename: complete_filename = "%s/%s" % (site_folder, filename) with open(complete_filename, "r") as filed: temp_series = filed.readlines() for line in temp_series: array_line = line.split("\t") if len(array_line) >= 2: series[array_line[1]] = array_line[0] return series def get_last_file_name(site_folder, file_prefix): """Get last filename """ old_date = None last_filename = "" for new_filename in listdir(site_folder): is_file = isfile(join(site_folder, new_filename)) if is_file and file_prefix in new_filename: old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename) return last_filename def update_last_file_name(file_prefix, old_date, last_filename, new_filename): """Update last file name """ try: new_date = new_filename.replace(file_prefix, "") new_date = datetime.strptime(new_date, "%Y-%m-%d") if old_date is None or new_date > old_date: return new_date, new_filename except ValueError: LOGGER.error("Not right file") return old_date, last_filename FILE_STORAGE_SPACE = FileStorage(getattr(settings, SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER))
34.497696
119
0.612209
5daed49fa4c053c06f93d18e081f06b652a982e8
4,078
py
Python
main_tg.py
olegush/quiz-bot
ae370d42f32c42b290a507924a801c63901d5148
[ "MIT" ]
null
null
null
main_tg.py
olegush/quiz-bot
ae370d42f32c42b290a507924a801c63901d5148
[ "MIT" ]
null
null
null
main_tg.py
olegush/quiz-bot
ae370d42f32c42b290a507924a801c63901d5148
[ "MIT" ]
null
null
null
import os import logging import logging.config from functools import partial from dotenv import load_dotenv from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove from telegram.ext import (Updater, CommandHandler, MessageHandler, RegexHandler, ConversationHandler, Filters) from redis import Redis from tg_logging import create_logger from quiz_tools import get_question_and_answer, format_answer, format_question QUESTION, ATTEMPT = range(2) if __name__ == '__main__': main()
31.612403
102
0.632908
5daf4ad3d9f3b39d8355c443ca683a3b5708554c
3,082
py
Python
tf_fourier_features/fourier_features_mlp.py
titu1994/tf_fourier_features
3aead078ae79a278b9975e21f44560a7f51e3f31
[ "MIT" ]
37
2020-06-20T21:39:30.000Z
2021-11-08T09:31:22.000Z
tf_fourier_features/fourier_features_mlp.py
titu1994/tf_fourier_features
3aead078ae79a278b9975e21f44560a7f51e3f31
[ "MIT" ]
null
null
null
tf_fourier_features/fourier_features_mlp.py
titu1994/tf_fourier_features
3aead078ae79a278b9975e21f44560a7f51e3f31
[ "MIT" ]
5
2020-06-22T10:24:11.000Z
2021-09-10T10:40:08.000Z
import tensorflow as tf from typing import Optional from tf_fourier_features import fourier_features
48.15625
156
0.633679
5db0d4e1070be8121a1c33bca072f550967ebe82
6,969
py
Python
eek/spider.py
fusionbox/eek
8e962b7ad80c594a3498190fead016db826771e0
[ "BSD-2-Clause-FreeBSD" ]
5
2015-05-11T18:13:51.000Z
2021-07-17T04:53:27.000Z
eek/spider.py
fusionbox/eek
8e962b7ad80c594a3498190fead016db826771e0
[ "BSD-2-Clause-FreeBSD" ]
1
2015-03-06T20:32:14.000Z
2015-03-06T20:32:14.000Z
eek/spider.py
fusionbox/eek
8e962b7ad80c594a3498190fead016db826771e0
[ "BSD-2-Clause-FreeBSD" ]
2
2015-07-15T12:41:32.000Z
2015-10-12T21:40:14.000Z
import urlparse import csv import sys import re import collections import time import requests from eek import robotparser # this project's version from bs4 import BeautifulSoup try: import lxml except ImportError: HTML_PARSER = None else: HTML_PARSER = 'lxml' encoding_re = re.compile("charset\s*=\s*(\S+?)(;|$)") html_re = re.compile("text/html") headers = ['url', 'title', 'description', 'keywords', 'allow', 'disallow', 'noindex', 'meta robots', 'canonical', 'referer', 'status'] def encoding_from_content_type(content_type): """ Extracts the charset from a Content-Type header. >>> encoding_from_content_type('text/html; charset=utf-8') 'utf-8' >>> encoding_from_content_type('text/html') >>> """ if not content_type: return None match = encoding_re.search(content_type) return match and match.group(1) or None def lremove(string, prefix): """ Remove a prefix from a string, if it exists. >>> lremove('www.foo.com', 'www.') 'foo.com' >>> lremove('foo.com', 'www.') 'foo.com' """ if string.startswith(prefix): return string[len(prefix):] else: return string def beautify(response): content_type = response.headers.get('content-type') if content_type: if not html_re.search(content_type): raise NotHtmlException encoding = encoding_from_content_type(content_type) else: encoding = None try: return BeautifulSoup( response.content, features=HTML_PARSER, from_encoding=encoding, ) except UnicodeEncodeError: raise NotHtmlException def get_links(response): if 300 <= response.status_code < 400 and response.headers['location']: # redirect yield urlparse.urldefrag( urlparse.urljoin(response.url, response.headers['location'], False) )[0] try: html = beautify(response) for i in html.find_all('a', href=True): yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0] except NotHtmlException: pass
29.529661
99
0.605252
5db204f5af9206eceaf400a510a5e3d05316e861
2,647
py
Python
observations/r/zea_mays.py
hajime9652/observations
2c8b1ac31025938cb17762e540f2f592e302d5de
[ "Apache-2.0" ]
199
2017-07-24T01:34:27.000Z
2022-01-29T00:50:55.000Z
observations/r/zea_mays.py
hajime9652/observations
2c8b1ac31025938cb17762e540f2f592e302d5de
[ "Apache-2.0" ]
46
2017-09-05T19:27:20.000Z
2019-01-07T09:47:26.000Z
observations/r/zea_mays.py
hajime9652/observations
2c8b1ac31025938cb17762e540f2f592e302d5de
[ "Apache-2.0" ]
45
2017-07-26T00:10:44.000Z
2022-03-16T20:44:59.000Z
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import numpy as np import os import sys from observations.util import maybe_download_and_extract def zea_mays(path): """Darwin's Heights of Cross- and Self-fertilized Zea May Pairs Darwin (1876) studied the growth of pairs of zea may (aka corn) seedlings, one produced by cross-fertilization and the other produced by self-fertilization, but otherwise grown under identical conditions. His goal was to demonstrate the greater vigour of the cross-fertilized plants. The data recorded are the final height (inches, to the nearest 1/8th) of the plants in each pair. In the *Design of Experiments*, Fisher (1935) used these data to illustrate a paired t-test (well, a one-sample test on the mean difference, `cross - self`). Later in the book (section 21), he used this data to illustrate an early example of a non-parametric permutation test, treating each paired difference as having (randomly) either a positive or negative sign. A data frame with 15 observations on the following 4 variables. `pair` pair number, a numeric vector `pot` pot, a factor with levels `1` `2` `3` `4` `cross` height of cross fertilized plant, a numeric vector `self` height of self fertilized plant, a numeric vector `diff` `cross - self` for each pair Darwin, C. (1876). *The Effect of Cross- and Self-fertilization in the Vegetable Kingdom*, 2nd Ed. London: John Murray. Andrews, D. and Herzberg, A. (1985) *Data: a collection of problems from many fields for the student and research worker*. New York: Springer. Data retrieved from: `https://www.stat.cmu.edu/StatDat/` Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `zea_mays.csv`. Returns: Tuple of np.ndarray `x_train` with 15 rows and 5 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'zea_mays.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv' maybe_download_and_extract(path, url, save_file_name='zea_mays.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
32.679012
74
0.705327
5db3dae6928f712e933165c643051e536448b1fb
359
py
Python
ois_api_client/v3_0/dto/Lines.py
peterkulik/ois_api_client
51dabcc9f920f89982c4419bb058f5a88193cee0
[ "MIT" ]
7
2020-10-22T08:15:29.000Z
2022-01-27T07:59:39.000Z
ois_api_client/v3_0/dto/Lines.py
peterkulik/ois_api_client
51dabcc9f920f89982c4419bb058f5a88193cee0
[ "MIT" ]
null
null
null
ois_api_client/v3_0/dto/Lines.py
peterkulik/ois_api_client
51dabcc9f920f89982c4419bb058f5a88193cee0
[ "MIT" ]
null
null
null
from typing import List from dataclasses import dataclass from .Line import Line
22.4375
117
0.743733
5db3e4eb84b3d9fc5559048f6229e0e36618f2f4
1,545
py
Python
parsing_documents.py
leylafenix/belief-network-irs
9094e4cde738bd93ed1747dc958b5acb0e0fa684
[ "MIT" ]
null
null
null
parsing_documents.py
leylafenix/belief-network-irs
9094e4cde738bd93ed1747dc958b5acb0e0fa684
[ "MIT" ]
null
null
null
parsing_documents.py
leylafenix/belief-network-irs
9094e4cde738bd93ed1747dc958b5acb0e0fa684
[ "MIT" ]
null
null
null
__author__ = 'Jose Gabriel' import os import pprint if __name__ == '__main__': s = "adi" + os.sep + "ADI.ALL" out_folder = "test_index" try: # averiguar como preguntar si una carpeta o fichero existe en python os.mkdir(out_folder) except FileExistsError: pass parse_all(s, out_folder)
26.186441
79
0.514563
5db477aeb7079f4d5d6c834082c575659d075877
161
py
Python
groups/admin.py
caktus/rapidsms-groups
eda6f30cdc60cf57833f1d37ba08e59454da8987
[ "BSD-3-Clause" ]
1
2016-06-19T07:34:19.000Z
2016-06-19T07:34:19.000Z
groups/admin.py
caktus/rapidsms-groups
eda6f30cdc60cf57833f1d37ba08e59454da8987
[ "BSD-3-Clause" ]
null
null
null
groups/admin.py
caktus/rapidsms-groups
eda6f30cdc60cf57833f1d37ba08e59454da8987
[ "BSD-3-Clause" ]
1
2016-08-31T05:02:03.000Z
2016-08-31T05:02:03.000Z
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from django.contrib import admin from groups.models import Group admin.site.register(Group)
17.888889
43
0.751553
5db48c51cdf7033a6dcea32b1d26408dd6d2dbc0
1,891
py
Python
avod/datasets/kitti/kitti_aug_test.py
Ascend-Huawei/AVOD
ea62372517bbfa9d4020bc5ab2739ee182c63c56
[ "BSD-2-Clause" ]
null
null
null
avod/datasets/kitti/kitti_aug_test.py
Ascend-Huawei/AVOD
ea62372517bbfa9d4020bc5ab2739ee182c63c56
[ "BSD-2-Clause" ]
null
null
null
avod/datasets/kitti/kitti_aug_test.py
Ascend-Huawei/AVOD
ea62372517bbfa9d4020bc5ab2739ee182c63c56
[ "BSD-2-Clause" ]
null
null
null
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from npu_bridge.npu_init import * import unittest import numpy as np from avod.datasets.kitti import kitti_aug
33.767857
78
0.670016
5db5a1d6975c3995d47712215ed2acd01be9b8ad
208
py
Python
application/model/_base.py
keysona/blog
783e0bdbed1e4d8ec9857ee609b39c9dfb958670
[ "MIT" ]
null
null
null
application/model/_base.py
keysona/blog
783e0bdbed1e4d8ec9857ee609b39c9dfb958670
[ "MIT" ]
null
null
null
application/model/_base.py
keysona/blog
783e0bdbed1e4d8ec9857ee609b39c9dfb958670
[ "MIT" ]
null
null
null
from flask_sqlalchemy import SQLAlchemy, Model # class BaseModel(Model): # def save(self): # db.session.add(self) # db.session.commit(self) # def delete(self): # db.session. db = SQLAlchemy()
16
46
0.673077
5db6309005811059a99432092acd5ed62236c399
23,282
py
Python
kvmagent/kvmagent/plugins/prometheus.py
qianfei11/zstack-utility
e791bc6b6ae3a74e202f6fce84bde498c715aee8
[ "Apache-2.0" ]
null
null
null
kvmagent/kvmagent/plugins/prometheus.py
qianfei11/zstack-utility
e791bc6b6ae3a74e202f6fce84bde498c715aee8
[ "Apache-2.0" ]
null
null
null
kvmagent/kvmagent/plugins/prometheus.py
qianfei11/zstack-utility
e791bc6b6ae3a74e202f6fce84bde498c715aee8
[ "Apache-2.0" ]
null
null
null
import os.path import threading import typing from prometheus_client import start_http_server from prometheus_client.core import GaugeMetricFamily, REGISTRY from kvmagent import kvmagent from zstacklib.utils import http from zstacklib.utils import jsonobject from zstacklib.utils import lock from zstacklib.utils import lvm from zstacklib.utils import misc from zstacklib.utils import thread from zstacklib.utils.bash import * from zstacklib.utils.ip import get_nic_supported_max_speed logger = log.get_logger(__name__) collector_dict = {} # type: Dict[str, threading.Thread] latest_collect_result = {} collectResultLock = threading.RLock() QEMU_CMD = kvmagent.get_qemu_path().split("/")[-1] def convert_raid_state_to_int(state): """ :type state: str """ state = state.lower() if state == "optimal": return 0 elif state == "degraded": return 5 else: return 100 def convert_disk_state_to_int(state): """ :type state: str """ state = state.lower() if "online" in state or "jobd" in state: return 0 elif "rebuild" in state: return 5 elif "failed" in state: return 10 elif "unconfigured" in state: return 15 else: return 100 collect_node_disk_wwid_last_time = None collect_node_disk_wwid_last_result = None kvmagent.register_prometheus_collector(collect_host_network_statistics) kvmagent.register_prometheus_collector(collect_host_capacity_statistics) kvmagent.register_prometheus_collector(collect_vm_statistics) kvmagent.register_prometheus_collector(collect_node_disk_wwid) if misc.isMiniHost(): kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics) kvmagent.register_prometheus_collector(collect_raid_state) kvmagent.register_prometheus_collector(collect_equipment_state)
36.492163
157
0.583541
5db634e6fdac00dd4f3ce30f7fe7fbdaae184512
6,924
py
Python
recipes/libstudxml/all/conanfile.py
rockandsalt/conan-center-index
d739adcec3e4dd4c250eff559ceb738e420673dd
[ "MIT" ]
562
2019-09-04T12:23:43.000Z
2022-03-29T16:41:43.000Z
recipes/libstudxml/all/conanfile.py
rockandsalt/conan-center-index
d739adcec3e4dd4c250eff559ceb738e420673dd
[ "MIT" ]
9,799
2019-09-04T12:02:11.000Z
2022-03-31T23:55:45.000Z
recipes/libstudxml/all/conanfile.py
rockandsalt/conan-center-index
d739adcec3e4dd4c250eff559ceb738e420673dd
[ "MIT" ]
1,126
2019-09-04T11:57:46.000Z
2022-03-31T16:43:38.000Z
from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools from conans.errors import ConanInvalidConfiguration import os import shutil required_conan_version = ">=1.33.0"
41.963636
124
0.633449
5db692792275a8f7aff10d7781c4cef5d88900db
6,263
py
Python
dataset/WebCariA.py
KeleiHe/DAAN
04e153c55f8d63e824adbee828e524573afe6a1c
[ "Apache-2.0" ]
9
2020-07-24T03:32:17.000Z
2022-03-25T12:01:24.000Z
dataset/WebCariA.py
KeleiHe/DAAN
04e153c55f8d63e824adbee828e524573afe6a1c
[ "Apache-2.0" ]
1
2020-10-14T17:22:43.000Z
2020-10-14T17:22:43.000Z
dataset/WebCariA.py
KeleiHe/DAAN
04e153c55f8d63e824adbee828e524573afe6a1c
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Wen Ji & Kelei He ([email protected]) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os
38.900621
113
0.440683
5db7554e0b55f70192702d11bfb40d5a1d8f2459
4,081
py
Python
moonworm/crawler/state/json_state.py
zomglings/moonworm
930e60199629b6a04adecc7f9ff9450e51bb4640
[ "Apache-2.0" ]
10
2021-12-08T22:35:58.000Z
2022-03-30T07:38:12.000Z
moonworm/crawler/state/json_state.py
zomglings/moonworm
930e60199629b6a04adecc7f9ff9450e51bb4640
[ "Apache-2.0" ]
29
2021-11-04T12:30:31.000Z
2022-03-03T21:29:08.000Z
moonworm/crawler/state/json_state.py
zomglings/moonworm
930e60199629b6a04adecc7f9ff9450e51bb4640
[ "Apache-2.0" ]
5
2021-11-06T02:25:09.000Z
2022-02-15T03:09:26.000Z
import datetime import json import time from typing import Optional from web3.datastructures import AttributeDict from .event_scanner_state import EventScannerState
35.486957
109
0.626072
5db81c5e24b93ba19d16beaadd48634b1c9fd58a
4,934
py
Python
npbench/benchmarks/nbody/nbody_dace.py
frahlg/npbench
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
[ "BSD-3-Clause" ]
27
2021-05-10T11:49:13.000Z
2022-03-22T18:07:19.000Z
npbench/benchmarks/nbody/nbody_dace.py
frahlg/npbench
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
[ "BSD-3-Clause" ]
3
2021-12-01T13:03:17.000Z
2022-03-17T10:53:00.000Z
npbench/benchmarks/nbody/nbody_dace.py
frahlg/npbench
1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26
[ "BSD-3-Clause" ]
7
2021-06-24T03:40:25.000Z
2022-01-26T09:04:33.000Z
# Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py # TODO: Add GPL-3.0 License import numpy as np import dace as dc """ Create Your Own N-body Simulation (With Python) Philip Mocz (2020) Princeton Univeristy, @PMocz Simulate orbits of stars interacting due to gravity Code calculates pairwise forces according to Newton's Law of Gravity """ N, Nt = (dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt')) # @dc.program # def hstack(out: dc.float64[N, 3], a: dc.float64[N], # b: dc.float64[N], c: dc.float64[N]): # out[:, 0] = a # out[:, 1] = b # out[:, 2] = c
29.195266
85
0.561005
5db8b350508cfde3359da0d0ee1d9036c8e97549
817
py
Python
application/__init__.py
Healthy-Kokoro/Hiroshima
87c6c533f97f55ceb33553a2409076bcd21a36d2
[ "MIT" ]
null
null
null
application/__init__.py
Healthy-Kokoro/Hiroshima
87c6c533f97f55ceb33553a2409076bcd21a36d2
[ "MIT" ]
null
null
null
application/__init__.py
Healthy-Kokoro/Hiroshima
87c6c533f97f55ceb33553a2409076bcd21a36d2
[ "MIT" ]
null
null
null
# Third-party imports from flask import Flask from flask_sqlalchemy import SQLAlchemy configurations = { 'development': 'configurations.DevelopmentConfiguration', 'testing': 'configurations.TestingConfiguration', 'staging': 'configurations.StagingConfiguration', 'production': 'configurations.ProductionConfiguration' } database = SQLAlchemy()
30.259259
64
0.831089
5db8ca5f5d703991674fff33fa5c1ac47210c351
692
py
Python
lesson5/exceptions_except.py
drednout/letspython
9747442d63873b5f71e2c15ed5528bd98ad5ac31
[ "BSD-2-Clause" ]
1
2015-11-26T15:53:58.000Z
2015-11-26T15:53:58.000Z
lesson5/exceptions_except.py
drednout/letspython
9747442d63873b5f71e2c15ed5528bd98ad5ac31
[ "BSD-2-Clause" ]
null
null
null
lesson5/exceptions_except.py
drednout/letspython
9747442d63873b5f71e2c15ed5528bd98ad5ac31
[ "BSD-2-Clause" ]
null
null
null
if __name__ == "__main__": fridge = { "beer": 2, "milk": 1, "meat": 3, } print("I wanna drink 1 bottle of beer...") take_beer(fridge) print("Oooh, great!") print("I wanna drink 2 bottle of beer...") try: take_beer(fridge, 2) except Exception as e: print("Error: {}. Let's continue".format(e)) print("Fallback. Try to take 1 bottle of beer...") take_beer(fridge, 1) print("Oooh, awesome!")
22.322581
54
0.559249
5db9f201356818f114d992f32b2d46869da4d326
23,877
py
Python
synapse/storage/data_stores/state/store.py
juhovan/synapse
57feeab364325374b14ff67ac97c288983cc5cde
[ "Apache-2.0" ]
1
2020-07-12T00:18:52.000Z
2020-07-12T00:18:52.000Z
synapse/storage/data_stores/state/store.py
juhovan/synapse
57feeab364325374b14ff67ac97c288983cc5cde
[ "Apache-2.0" ]
null
null
null
synapse/storage/data_stores/state/store.py
juhovan/synapse
57feeab364325374b14ff67ac97c288983cc5cde
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import namedtuple from typing import Dict, Iterable, List, Set, Tuple from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore from synapse.storage.database import Database from synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 def _get_state_for_groups_using_cache( self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter ) -> Tuple[Dict[int, StateMap[str]], Set[int]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key, querying from a specific cache. Args: groups: list of state groups for which we want to get the state. cache: the cache of group ids to state dicts which we will pass through - either the normal state cache or the specific members state cache. state_filter: The state filter used to fetch state from the database. Returns: Tuple of dict of state_group_id to state map of entries in the cache, and the state group ids either missing from the cache or incomplete. """ results = {} incomplete_groups = set() for group in set(groups): state_dict_ids, got_all = self._get_state_for_group_using_cache( cache, group, state_filter ) results[group] = state_dict_ids if not got_all: incomplete_groups.add(group) return results, incomplete_groups def _insert_into_cache( self, group_to_state_dict, state_filter, cache_seq_num_members, cache_seq_num_non_members, ): """Inserts results from querying the database into the relevant cache. Args: group_to_state_dict (dict): The new entries pulled from database. Map from state group to state dict state_filter (StateFilter): The state filter used to fetch state from the database. cache_seq_num_members (int): Sequence number of member cache since last lookup in cache cache_seq_num_non_members (int): Sequence number of member cache since last lookup in cache """ # We need to work out which types we've fetched from the DB for the # member vs non-member caches. This should be as accurate as possible, # but can be an underestimate (e.g. when we have wild cards) member_filter, non_member_filter = state_filter.get_member_split() if member_filter.is_full(): # We fetched all member events member_types = None else: # `concrete_types()` will only return a subset when there are wild # cards in the filter, but that's fine. member_types = member_filter.concrete_types() if non_member_filter.is_full(): # We fetched all non member events non_member_types = None else: non_member_types = non_member_filter.concrete_types() for group, group_state_dict in group_to_state_dict.items(): state_dict_members = {} state_dict_non_members = {} for k, v in group_state_dict.items(): if k[0] == EventTypes.Member: state_dict_members[k] = v else: state_dict_non_members[k] = v self._state_group_members_cache.update( cache_seq_num_members, key=group, value=state_dict_members, fetched_keys=member_types, ) self._state_group_cache.update( cache_seq_num_non_members, key=group, value=state_dict_non_members, fetched_keys=non_member_types, ) def store_state_group( self, event_id, room_id, prev_group, delta_ids, current_state_ids ): """Store a new set of state, returning a newly assigned state group. Args: event_id (str): The event ID for which the state was calculated room_id (str) prev_group (int|None): A previous state group for the room, optional. delta_ids (dict|None): The delta between state at `prev_group` and `current_state_ids`, if `prev_group` was given. Same format as `current_state_ids`. current_state_ids (dict): The state to store. Map of (type, state_key) to event_id. Returns: Deferred[int]: The state group ID """ return self.db.runInteraction("store_state_group", _store_state_group_txn) def purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete ) -> defer.Deferred: """Deletes no longer referenced state groups and de-deltas any state groups that reference them. Args: room_id: The room the state groups belong to (must all be in the same room). state_groups_to_delete (Collection[int]): Set of all state groups to delete. """ return self.db.runInteraction( "purge_unreferenced_state_groups", self._purge_unreferenced_state_groups, room_id, state_groups_to_delete, ) def purge_room_state(self, room_id, state_groups_to_delete): """Deletes all record of a room from state tables Args: room_id (str): state_groups_to_delete (list[int]): State groups to delete """ return self.db.runInteraction( "purge_room_state", self._purge_room_state_txn, room_id, state_groups_to_delete, )
37.307813
88
0.590568
5dba0da51e77fecfd4eb4bbfdb42e2e652206d09
1,322
py
Python
core/migrations/0011_itemvariation_variation.py
manulangat1/djcommerce
2cd92631479ef949e0f05a255f2f50feca728802
[ "MIT" ]
1
2020-02-08T16:29:41.000Z
2020-02-08T16:29:41.000Z
core/migrations/0011_itemvariation_variation.py
manulangat1/djcommerce
2cd92631479ef949e0f05a255f2f50feca728802
[ "MIT" ]
15
2020-05-04T13:22:32.000Z
2022-03-12T00:27:28.000Z
core/migrations/0011_itemvariation_variation.py
manulangat1/djcommerce
2cd92631479ef949e0f05a255f2f50feca728802
[ "MIT" ]
1
2020-10-17T08:54:31.000Z
2020-10-17T08:54:31.000Z
# Generated by Django 2.2.6 on 2020-02-09 12:24 from django.db import migrations, models import django.db.models.deletion
34.789474
115
0.555219
5dba8f581c63a89cafcdb31c2be81f0648adb964
1,422
py
Python
mnist/convolutional.py
Colins-Ford/mnist-webapp
20e9b6f5520d5bda957d9501347f787450555db8
[ "Apache-2.0" ]
null
null
null
mnist/convolutional.py
Colins-Ford/mnist-webapp
20e9b6f5520d5bda957d9501347f787450555db8
[ "Apache-2.0" ]
null
null
null
mnist/convolutional.py
Colins-Ford/mnist-webapp
20e9b6f5520d5bda957d9501347f787450555db8
[ "Apache-2.0" ]
null
null
null
import os from mnist import model import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets("data/dataset/", one_hot=True) # model with tf.variable_scope("convolutional"): x = tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32) y, variables = model.convolutional(x, keep_prob) # train y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver(variables) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(20000): batch = data.train.next_batch(50) if i % 100 == 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}) print("step %d, training accuracy %g" % (i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0})) path = saver.save( sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'), write_meta_graph=False, write_state=False) print("Saved:", path)
37.421053
100
0.69339
5dba9613244bd2e35eb89625f766b4f652fe90d8
2,901
py
Python
parse_scripts/import_osm.py
nokout/au_address
07138ecd8fedab9566435b609cb8124b67ad42ff
[ "MIT" ]
1
2018-11-16T15:41:38.000Z
2018-11-16T15:41:38.000Z
training/parse_scripts/import_osm.py
crccheck/us-address-parser
826fd365cba065a0588fa013cddbb23a8dac27a9
[ "MIT" ]
6
2016-10-05T11:21:36.000Z
2016-10-18T15:11:20.000Z
parse_scripts/import_osm.py
nokout/au_address
07138ecd8fedab9566435b609cb8124b67ad42ff
[ "MIT" ]
null
null
null
import requests import codecs query1 = """<union> <query type="way"> <has-kv k="addr:housenumber"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:type"/> <has-kv k="addr:state"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> <query type="way"> <has-kv k="addr:housenumber"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:type"/> <has-kv k="addr:city"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> <query type="way"> <has-kv k="addr:housenumber"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:type"/> <has-kv k="addr:postcode"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> <query type="node"> <has-kv k="addr:housenumber"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:type"/> <has-kv k="addr:state"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> <query type="node"> <has-kv k="addr:housenumber"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:type"/> <has-kv k="addr:city"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> <query type="node"> <has-kv k="addr:housenumber"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:type"/> <has-kv k="addr:postcode"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> </union> <print/>""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 6) r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1) r1.encoding = 'utf-8' f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+') f.write(r1.text) query2 = """<union> <query type="way"> <has-kv k="addr:street"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:prefix"/> <has-kv k="addr:street:type"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> <query type="node"> <has-kv k="addr:street"/> <has-kv k="addr:street:name"/> <has-kv k="addr:street:prefix"/> <has-kv k="addr:street:type"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> </union> <print/>""" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2) #r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2) #f = codecs.open("data/osm_data_street.xml", "wb", "utf-8") #r2.encoding = 'utf-8' #f.write(r2.text) query3 = """<union> <query type="way"> <has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> <query type="node"> <has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/> <bbox-query e="%s" n="%s" s="%s" w="%s"/> </query> </union> <print/> """ % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2) if __name__ == '__main__' : r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3) f = codecs.open("data/osm_data_full_addr.xml", "wb", "utf-8") r3.encoding = 'utf-8' f.write(r3.text)
28.441176
99
0.558083
5dbd482917f27cdd677d99ffd355bb76525f3a13
4,110
py
Python
tools/test_net_batch.py
abhirevan/pedestrian-detector
f4fa4cd59315ea515ace3c529b716ff3173e2205
[ "BSD-2-Clause" ]
null
null
null
tools/test_net_batch.py
abhirevan/pedestrian-detector
f4fa4cd59315ea515ace3c529b716ff3173e2205
[ "BSD-2-Clause" ]
null
null
null
tools/test_net_batch.py
abhirevan/pedestrian-detector
f4fa4cd59315ea515ace3c529b716ff3173e2205
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Test a Fast R-CNN network on an image database.""" import _init_paths from fast_rcnn.test import test_net from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list from datasets.factory import get_imdb import caffe import argparse import pprint import time, os, sys import pandas as pd def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline') parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use', default=0, type=int, required=True) parser.add_argument('--dir', dest='dir', help='Directory of the model files', default="", type=str, required=True) parser.add_argument('--models', dest='model_files', help='Text file with names of models', default=None, type=str, required=True) parser.add_argument('--prototxt', dest='prototxt', help='prototxt', default=None, type=str, required=True) parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='ped_test_small', type=str, required=True) parser.add_argument('--cfg', dest='cfg_file', help='cfg', default='experiments/cfgs/faster_rcnn_end2end.yml', type=str) parser.add_argument('--res', dest='res_file', help='result file', default='', type=str, required=True) args = parser.parse_args() return args if __name__ == '__main__': # args = parse_args() gpu_id = 0 # dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup' # model_files = 'test.txt' args = parse_args() print('Called with args:') print(args) run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file) # run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)
33.145161
119
0.615572
5dbe1aa985f0f74b54e5721ad988a0ced87ead89
469
py
Python
mygallary/urls.py
mangowilliam/my_gallary
4c87fe055e5c28d6ca6a27ea5bde7df380750006
[ "MIT" ]
null
null
null
mygallary/urls.py
mangowilliam/my_gallary
4c87fe055e5c28d6ca6a27ea5bde7df380750006
[ "MIT" ]
6
2021-03-19T02:06:21.000Z
2022-03-11T23:53:21.000Z
mygallary/urls.py
mangowilliam/my_gallary
4c87fe055e5c28d6ca6a27ea5bde7df380750006
[ "MIT" ]
null
null
null
from django.conf import settings from django.conf.urls.static import static from django.conf.urls import url from . import views urlpatterns = [ url('^$', views.gallary,name = 'gallary'), url(r'^search/', views.search_image, name='search_image'), url(r'^details/(\d+)',views.search_location,name ='images') ] if settings.DEBUG: urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
20.391304
81
0.656716
5dbebf189d084ec54743890289ba79eb7c5bba5c
5,831
py
Python
yolox/data/datasets/mot.py
ldelzott/ByteTrack
5f8ab49a913a551d041918607a0bd2473602ad39
[ "MIT" ]
null
null
null
yolox/data/datasets/mot.py
ldelzott/ByteTrack
5f8ab49a913a551d041918607a0bd2473602ad39
[ "MIT" ]
null
null
null
yolox/data/datasets/mot.py
ldelzott/ByteTrack
5f8ab49a913a551d041918607a0bd2473602ad39
[ "MIT" ]
null
null
null
import cv2 import numpy as np from pycocotools.coco import COCO import os from ..dataloading import get_yolox_datadir from .datasets_wrapper import Dataset
40.776224
165
0.510547
5dbf449975065338e5216b26f0b50de7db0d2cd0
4,740
py
Python
src/poetry/console/commands/remove.py
pkoch/poetry
d22c5a7187d8b5a30196a7df58111b3c90be7d22
[ "MIT" ]
null
null
null
src/poetry/console/commands/remove.py
pkoch/poetry
d22c5a7187d8b5a30196a7df58111b3c90be7d22
[ "MIT" ]
null
null
null
src/poetry/console/commands/remove.py
pkoch/poetry
d22c5a7187d8b5a30196a7df58111b3c90be7d22
[ "MIT" ]
null
null
null
from __future__ import annotations from typing import Any from cleo.helpers import argument from cleo.helpers import option from tomlkit.toml_document import TOMLDocument try: from poetry.core.packages.dependency_group import MAIN_GROUP except ImportError: MAIN_GROUP = "default" from poetry.console.commands.installer_command import InstallerCommand
34.347826
88
0.59789
5dbff166d1570c685dadc7e901e806b3102dde0f
3,316
py
Python
orrinjelo/aoc2021/day_11.py
orrinjelo/AdventOfCode2021
6fce5c48ec3dc602b393824f592a5c6db2a8b66f
[ "MIT" ]
null
null
null
orrinjelo/aoc2021/day_11.py
orrinjelo/AdventOfCode2021
6fce5c48ec3dc602b393824f592a5c6db2a8b66f
[ "MIT" ]
null
null
null
orrinjelo/aoc2021/day_11.py
orrinjelo/AdventOfCode2021
6fce5c48ec3dc602b393824f592a5c6db2a8b66f
[ "MIT" ]
null
null
null
from orrinjelo.utils.decorators import timeit import numpy as np visited = [] # = Test ================================================ inputlist = [ '5483143223', '2745854711', '5264556173', '6141336146', '6357385478', '4167524645', '2176841721', '6882881134', '4846848554', '5283751526', ] import pygame import sys
22.557823
85
0.518697
5dc01810c4c1797d877a743bdf67e61535eee657
1,914
py
Python
exercise_2/exercise_2.1.py
lukaszbinden/ethz-iacv-2020
271de804315de98b816cda3e2498958ffa87ad59
[ "MIT" ]
null
null
null
exercise_2/exercise_2.1.py
lukaszbinden/ethz-iacv-2020
271de804315de98b816cda3e2498958ffa87ad59
[ "MIT" ]
null
null
null
exercise_2/exercise_2.1.py
lukaszbinden/ethz-iacv-2020
271de804315de98b816cda3e2498958ffa87ad59
[ "MIT" ]
null
null
null
camera_width = 640 camera_height = 480 film_back_width = 1.417 film_back_height = 0.945 x_center = 320 y_center = 240 P_1 = (-0.023, -0.261, 2.376) p_11 = P_1[0] p_12 = P_1[1] p_13 = P_1[2] P_2 = (0.659, -0.071, 2.082) p_21 = P_2[0] p_22 = P_2[1] p_23 = P_2[2] p_1_prime = (52, 163) x_1 = p_1_prime[0] y_1 = p_1_prime[1] p_2_prime = (218, 216) x_2 = p_2_prime[0] y_2 = p_2_prime[1] f = 1.378 k_x = camera_width / film_back_width k_y = camera_height / film_back_height # f_k_x = f * k_x f_k_x = f # f_k_y = f * k_y f_k_y = f u_1_prime = (x_1 - x_center) / k_x v_1_prime = (y_1 - y_center) / k_y u_2_prime = (x_2 - x_center) / k_x v_2_prime = (y_2 - y_center) / k_y c_1_prime = (f_k_x * p_21 + (p_13 - p_23) * u_2_prime - u_2_prime/u_1_prime * f_k_x * p_11) / (f_k_x * (1 - u_2_prime/u_1_prime)) c_2_prime = (f_k_y * p_22 - (p_23 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_2_prime) / f_k_y c_2_prime_alt = (f_k_y * p_12 - (p_13 - (p_13*u_1_prime - f_k_x*(p_11 - c_1_prime))/u_1_prime) * v_1_prime) / f_k_y c_3_prime = p_13 - (f_k_x / u_1_prime) * (p_11 - c_1_prime) rho_1_prime = p_13 - c_3_prime rho_2_prime = p_23 - c_3_prime print(f"C' = ({c_1_prime}, {c_2_prime}, {c_3_prime})") print(f"c_2_prime_alt = {c_2_prime_alt}") print(f"rho_1_prime = {rho_1_prime}") print(f"rho_2_prime = {rho_2_prime}") print("------------------") r_11 = f_k_x * (p_11 - c_1_prime) r_12 = f_k_y * (p_12 - c_2_prime) r_13 = 1 * (p_13 - c_3_prime) l_11 = rho_1_prime * u_1_prime l_12 = rho_1_prime * v_1_prime l_13 = rho_1_prime * 1 print(f"L: ({l_11}, {l_12}, {l_13})") print(f"R: ({r_11}, {r_12}, {r_13})") print("------------------") r_21 = f_k_x * (p_21 - c_1_prime) r_22 = f_k_y * (p_22 - c_2_prime) r_23 = 1 * (p_23 - c_3_prime) l_21 = rho_2_prime * u_2_prime l_22 = rho_2_prime * v_2_prime l_23 = rho_2_prime * 1 print(f"L: ({l_11}, {l_12}, {l_13})") print(f"R: ({r_11}, {r_12}, {r_13})")
23.060241
129
0.642633
5dc0c299dbdb6b798fc1619ba108af859bcce78e
3,329
py
Python
services/train/single.py
paper2code/torch2vec-restful-service
6c4412d84d067268bf988b1f31cef716a2ed23a5
[ "MIT" ]
2
2020-09-13T18:08:52.000Z
2020-09-19T05:26:50.000Z
services/train/single.py
paper2code/torch2vec-restful-service
6c4412d84d067268bf988b1f31cef716a2ed23a5
[ "MIT" ]
null
null
null
services/train/single.py
paper2code/torch2vec-restful-service
6c4412d84d067268bf988b1f31cef716a2ed23a5
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Aug 26 19:15:34 2020 @author: deviantpadam """ import pandas as pd import numpy as np import concurrent.futures import os import tqdm from collections import Counter from torch2vec.data import DataPreparation from torch2vec.torch2vec import DM # train = pd.read_csv('/home/deviantpadam/Downloads/example.csv',delimiter='\t') # train = pd.read_csv('/home/deviantpadam/Downloads/example (1).csv') train = pd.read_csv('../data/suggest_dump.txt',delimiter='\t') train = cleaner(train) corpus = train['authors']+' '+train['title']+' '+train['summary']+' '+train['subject1']+' '+train['subject2']+' '+train['task'] corpus.name = 'text' corpus = pd.concat([train['subject1'],train['subject2'],train['task'],corpus],axis=1) data = DataPreparation(corpus.reset_index(),f_size=3) data.tokenize() bigrams = _get_bigrams(data.corpus.values,min_count=700) data.corpus = phraser(data.corpus.values) bigrams = _get_bigrams(data.corpus.values,min_count=500) data.corpus = phraser(data.corpus.values) data.vocab_builder() doc, context, target_noise_ids = data.get_data(window_size=5,num_noise_words=10) model = DM(vec_dim=100,num_docs=len(data),num_words=data.vocab_size).cuda() num_workers = os.cpu_count() model.fit(doc_ids=doc,context=context,target_noise_ids=target_noise_ids,epochs=20,batch_size=8000,num_workers=num_workers) model.save_model(data.document_ids,data.args,file_name='weights')
36.988889
151
0.681586
5dc0fa811b71f512df88503ac7e13855083e0792
8,399
py
Python
tests/sources/test_document_oereblex.py
geo-bl-ch/pyramid_oereb
767375a4adda4589e12c4257377fc30258cdfcb3
[ "BSD-2-Clause" ]
null
null
null
tests/sources/test_document_oereblex.py
geo-bl-ch/pyramid_oereb
767375a4adda4589e12c4257377fc30258cdfcb3
[ "BSD-2-Clause" ]
null
null
null
tests/sources/test_document_oereblex.py
geo-bl-ch/pyramid_oereb
767375a4adda4589e12c4257377fc30258cdfcb3
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import datetime import pytest import requests_mock from geolink_formatter.entity import Document, File from requests.auth import HTTPBasicAuth from pyramid_oereb.contrib.sources.document import OEREBlexSource from pyramid_oereb.lib.records.documents import DocumentRecord, LegalProvisionRecord from pyramid_oereb.lib.records.office import OfficeRecord from tests.mockrequest import MockParameter def test_read(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') source.read(MockParameter(), 100) assert len(source.records) == 2 document = source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name == {'de': 'Landeskanzlei'} assert document.canton == 'BL' assert document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 5 def test_read_related_decree_as_main(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', related_decree_as_main=True) source.read(MockParameter(), 100) assert len(source.records) == 3 document = source.records[0] assert isinstance(document, DocumentRecord) assert isinstance(document.responsible_office, OfficeRecord) assert document.responsible_office.name == {'de': 'Landeskanzlei'} assert document.canton == 'BL' assert document.text_at_web == { 'de': 'http://oereblex.example.com/api/attachments/313' } assert len(document.references) == 4 def test_read_with_version_in_url(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.1.1/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True) source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_version(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.0.0.xml', 'rb') as f: m.get('http://oereblex.example.com/api/1.0.0/geolinks/100.xml', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', pass_version=True, version='1.0.0') source.read(MockParameter(), 100) assert len(source.records) == 2 def test_read_with_specified_language(): with requests_mock.mock() as m: with open('./tests/resources/geolink_v1.1.1.xml', 'rb') as f: m.get('http://oereblex.example.com/api/geolinks/100.xml?locale=fr', content=f.read()) source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL') params = MockParameter() params.set_language('fr') source.read(params, 100) assert len(source.records) == 2 document = source.records[0] assert document.responsible_office.name == {'fr': 'Landeskanzlei'} assert document.text_at_web == { 'fr': 'http://oereblex.example.com/api/attachments/313' } def test_authentication(): auth = { 'username': 'test', 'password': 'test' } source = OEREBlexSource(host='http://oereblex.example.com', language='de', canton='BL', auth=auth) assert isinstance(source._auth, HTTPBasicAuth) def test_get_document_title(): document = Document([], id='1', title='Test') result = {'de': 'Test'} assert OEREBlexSource._get_document_title(document, File(), 'de') == result
37.328889
102
0.604239
5dc1607dc008e8af7451051e5d28ffb9f945411a
998
py
Python
apps/zsh/singletons.py
codecat555/codecat555-fidgetingbits_knausj_talon
62f9be0459e6631c99d58eee97054ddd970cc5f3
[ "MIT" ]
4
2021-02-04T07:36:05.000Z
2021-07-03T06:53:30.000Z
apps/zsh/singletons.py
codecat555/codecat555-fidgetingbits_knausj_talon
62f9be0459e6631c99d58eee97054ddd970cc5f3
[ "MIT" ]
null
null
null
apps/zsh/singletons.py
codecat555/codecat555-fidgetingbits_knausj_talon
62f9be0459e6631c99d58eee97054ddd970cc5f3
[ "MIT" ]
null
null
null
# A rarely-updated module to assist in writing reload-safe talon modules using # things like threads, which are not normally safe for reloading with talon. # If this file is ever updated, you'll need to restart talon. import logging _singletons = {}
28.514286
78
0.645291
5dc1f18fa3f023890a5249859cd11435ad90ffca
1,088
py
Python
trainNN/run_bichrom.py
yztxwd/Bichrom
3939b8e52816a02b34122feef27c8e0a06e31d8e
[ "MIT" ]
3
2021-02-09T14:07:48.000Z
2021-06-21T18:31:54.000Z
trainNN/run_bichrom.py
yztxwd/Bichrom
3939b8e52816a02b34122feef27c8e0a06e31d8e
[ "MIT" ]
5
2021-02-05T03:46:37.000Z
2022-03-16T16:34:41.000Z
trainNN/run_bichrom.py
yztxwd/Bichrom
3939b8e52816a02b34122feef27c8e0a06e31d8e
[ "MIT" ]
4
2021-01-09T19:59:51.000Z
2021-11-12T21:08:40.000Z
import argparse import yaml from subprocess import call from train import train_bichrom if __name__ == '__main__': # parsing parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom') parser.add_argument('-training_schema_yaml', required=True, help='YAML file with paths to train, test and val data') parser.add_argument('-len', help='Size of genomic windows', required=True, type=int) parser.add_argument('-outdir', required=True, help='Output directory') parser.add_argument('-nbins', type=int, required=True, help='Number of bins') args = parser.parse_args() # load the yaml file with input data paths: with open(args.training_schema_yaml, 'r') as f: try: data_paths = yaml.safe_load(f) except yaml.YAMLError as exc: print(exc) # create the output directory: outdir = args.outdir call(['mkdir', outdir]) train_bichrom(data_paths=data_paths, outdir=outdir, seq_len=args.len, bin_size=int(args.len/args.nbins))
38.857143
81
0.662684
5dc26a359dc3c1de1c2351ad0bab013c4dbc10a0
3,687
py
Python
setup.py
Fronius-SED/rapidyaml
20d44ff0c43085d08cb17f37fd6b0b305938a3ea
[ "MIT" ]
null
null
null
setup.py
Fronius-SED/rapidyaml
20d44ff0c43085d08cb17f37fd6b0b305938a3ea
[ "MIT" ]
null
null
null
setup.py
Fronius-SED/rapidyaml
20d44ff0c43085d08cb17f37fd6b0b305938a3ea
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # SPDX-License-Identifier: MIT import os import shutil import sys from pathlib import Path from distutils import log from setuptools import setup from setuptools.command.sdist import sdist as SdistCommand from cmake_build_extension import BuildExtension, CMakeExtension TOP_DIR = (Path(__file__).parent).resolve() # Where the Python library is actually found. PYTHON_DIR = "api/python" setup_kw = {} # Read in the package version when not in a git repository. VERSION_FILE = os.path.join(PYTHON_DIR, 'ryml', 'version.py') if not (TOP_DIR / '.git').exists() and os.path.exists(VERSION_FILE): exec(open(VERSION_FILE).read()) setup_kw['version'] = version else: setup_kw['use_scm_version']= { "version_scheme": "post-release", "local_scheme": "no-local-version", "write_to": VERSION_FILE, } # Read in the module description from the README.md file. README_FILE = TOP_DIR / "README.md" if README_FILE.exists(): with open(TOP_DIR / "README.md", "r") as fh: setup_kw['long_description'] = fh.read() setup_kw['long_description_content_type'] = "text/markdown" # define a CMake package cmake_args = dict( name='ryml.ryml', install_prefix='', source_dir='', cmake_component='python', cmake_configure_options=[ "-DRYML_BUILD_API:BOOL=ON", # Force cmake to use the Python interpreter we are currently using to # run setup.py "-DPython3_EXECUTABLE:FILEPATH="+sys.executable, ], ) try: ext = CMakeExtension(**cmake_args) except TypeError: del cmake_args['cmake_component'] ext = CMakeExtension(**cmake_args) # If the CMakeExtension doesn't support `cmake_component` then we have to # do some manual cleanup. _BuildExtension=BuildExtension setup( # Package human readable information name='rapidyaml', #author='Joao Paulo Magalhaes', description='Rapid YAML - a library to parse and emit YAML, and do it fast.', url='https://github.com/biojppm/rapidyaml', license='MIT', license_files=['LICENSE.txt'], # Package contents control cmdclass={ "build_ext": BuildExtension, }, package_dir={"": PYTHON_DIR}, packages=['ryml'], ext_modules=[ext], include_package_data=True, # Requirements python_requires=">=3.7", setup_requires=['setuptools_scm'], # Extra arguments **setup_kw, )
32.342105
81
0.641714
5dc297d8b74fa875a21b1642232b22d90653124f
5,193
py
Python
litex_boards/targets/digilent_arty_z7.py
machdyne/litex-boards
2311db18f8c92f80f03226fa984e6110caf25b88
[ "BSD-2-Clause" ]
null
null
null
litex_boards/targets/digilent_arty_z7.py
machdyne/litex-boards
2311db18f8c92f80f03226fa984e6110caf25b88
[ "BSD-2-Clause" ]
null
null
null
litex_boards/targets/digilent_arty_z7.py
machdyne/litex-boards
2311db18f8c92f80f03226fa984e6110caf25b88
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 # # This file is part of LiteX-Boards. # # Copyright (c) 2021 Gwenhael Goavec-Merou <[email protected]> # SPDX-License-Identifier: BSD-2-Clause import argparse import subprocess from migen import * from litex_boards.platforms import digilent_arty_z7 from litex.build import tools from litex.build.xilinx import common as xil_common from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict from litex.soc.interconnect import axi from litex.soc.interconnect import wishbone from litex.soc.cores.clock import * from litex.soc.integration.soc_core import * from litex.soc.integration.soc import SoCRegion from litex.soc.integration.builder import * from litex.soc.cores.led import LedChaser # CRG ---------------------------------------------------------------------------------------------- # BaseSoC ------------------------------------------------------------------------------------------ # Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description="LiteX SoC on Arty Z7") parser.add_argument("--toolchain", default="vivado", help="FPGA toolchain (vivado, symbiflow or yosys+nextpnr).") parser.add_argument("--build", action="store_true", help="Build bitstream.") parser.add_argument("--load", action="store_true", help="Load bitstream.") parser.add_argument("--variant", default="z7-20", help="Board variant (z7-20 or z7-10).") parser.add_argument("--sys-clk-freq", default=125e6, help="System clock frequency.") builder_args(parser) soc_core_args(parser) vivado_build_args(parser) parser.set_defaults(cpu_type="zynq7000") args = parser.parse_args() soc = BaseSoC( variant = args.variant, toolchain = args.toolchain, sys_clk_freq=int(float(args.sys_clk_freq)), **soc_core_argdict(args) ) builder = Builder(soc, **builder_argdict(args)) builder_kwargs = vivado_build_argdict(args) if args.toolchain == "vivado" else {} builder.build(**builder_kwargs, run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit")) if __name__ == "__main__": main()
38.753731
123
0.561333
5dc2ee5e1dddc721c798287488da3cf41eba8ae1
6,899
py
Python
goose/parsers.py
allmalaysianews/article-extractor
8d0ff3ed01258d0fad56fc22d2c1852e603096b4
[ "Apache-2.0" ]
null
null
null
goose/parsers.py
allmalaysianews/article-extractor
8d0ff3ed01258d0fad56fc22d2c1852e603096b4
[ "Apache-2.0" ]
null
null
null
goose/parsers.py
allmalaysianews/article-extractor
8d0ff3ed01258d0fad56fc22d2c1852e603096b4
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """\ This is a python port of "Goose" orignialy licensed to Gravity.com under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Python port was written by Xavier Grangier for Recrutae Gravity.com licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import lxml.html as lxmlhtml from lxml.html import soupparser from lxml import etree from copy import deepcopy from goose.text import innerTrim from goose.text import encodeValue
28.159184
84
0.584433
5dc3595f9215ec36727d03f139d3d859982ac98f
2,352
py
Python
src/infrastructure/database/postgres/sqlhandler.py
SoyBeansLab/daizu-online-judge-backend
873f81fdad2f216e28b83341a6d88b0e21078d6e
[ "MIT" ]
7
2019-05-14T08:40:35.000Z
2019-08-20T08:15:21.000Z
src/infrastructure/database/postgres/sqlhandler.py
SoyBeansLab/daizu-online-judge-backend
873f81fdad2f216e28b83341a6d88b0e21078d6e
[ "MIT" ]
76
2019-05-14T08:56:40.000Z
2020-10-18T16:25:33.000Z
src/infrastructure/database/postgres/sqlhandler.py
SoyBeansLab/daizu-online-judge-backend
873f81fdad2f216e28b83341a6d88b0e21078d6e
[ "MIT" ]
3
2019-12-12T01:44:31.000Z
2020-11-22T03:24:40.000Z
from logging import getLogger import os from typing import List, Union import psycopg2 from interface.database.sqlhandler import Cursor as AbsCursor from interface.database.sqlhandler import Result as AbsResult from interface.database.sqlhandler import SqlHandler as AbsSqlHandler from exceptions.waf import SqlTransactionException logger = getLogger("daizu").getChild("infrastracture.SqlHandler")
29.4
75
0.627551
5dc369e87fe800f32ded0cd2dc49e361f6723160
1,001
py
Python
virtualisation/wrapper/parser/xmlparser.py
CityPulse/CP_Resourcemanagement
aa670fa89d5e086a98ade3ccc152518be55abf2e
[ "MIT" ]
2
2016-11-03T14:57:45.000Z
2019-05-13T13:21:08.000Z
virtualisation/wrapper/parser/xmlparser.py
CityPulse/CP_Resourcemanagement
aa670fa89d5e086a98ade3ccc152518be55abf2e
[ "MIT" ]
null
null
null
virtualisation/wrapper/parser/xmlparser.py
CityPulse/CP_Resourcemanagement
aa670fa89d5e086a98ade3ccc152518be55abf2e
[ "MIT" ]
1
2020-07-23T11:27:15.000Z
2020-07-23T11:27:15.000Z
from virtualisation.clock.abstractclock import AbstractClock __author__ = 'Marten Fischer ([email protected])' from virtualisation.wrapper.parser.abstractparser import AbstractParser from virtualisation.misc.jsonobject import JSONObject as JOb import datetime as dt
31.28125
91
0.682318
5dc48b0f27c0b76d7893695e9d44f12dbfa7a376
19,404
py
Python
plaso/formatters/interface.py
jonathan-greig/plaso
b88a6e54c06a162295d09b016bddbfbfe7ca9070
[ "Apache-2.0" ]
1,253
2015-01-02T13:58:02.000Z
2022-03-31T08:43:39.000Z
plaso/formatters/interface.py
jonathan-greig/plaso
b88a6e54c06a162295d09b016bddbfbfe7ca9070
[ "Apache-2.0" ]
3,388
2015-01-02T11:17:58.000Z
2022-03-30T10:21:45.000Z
plaso/formatters/interface.py
jonathan-greig/plaso
b88a6e54c06a162295d09b016bddbfbfe7ca9070
[ "Apache-2.0" ]
376
2015-01-20T07:04:54.000Z
2022-03-04T23:53:00.000Z
# -*- coding: utf-8 -*- """This file contains the event formatters interface classes. The l2t_csv and other formats are dependent on a message field, referred to as description_long and description_short in l2t_csv. Plaso no longer stores these field explicitly. A formatter, with a format string definition, is used to convert the event object values into a formatted string that is similar to the description_long and description_short field. """ import abc import re from plaso.formatters import logger def AddHelper(self, helper): """Adds an event formatter helper. Args: helper (EventFormatterHelper): event formatter helper to add. """ self.helpers.append(helper)
33.112628
79
0.704958
5dc4b2786f8172c270a1fc651693530424b90630
190
py
Python
python_program/condition.py
LiuKaiqiang94/PyStudyExample
b30212718b218c71e06b68677f55c33e3a1dbf46
[ "MIT" ]
5
2018-09-10T02:52:35.000Z
2018-09-20T07:50:42.000Z
python_program/condition.py
LiuKaiqiang94/PyStudyExample
b30212718b218c71e06b68677f55c33e3a1dbf46
[ "MIT" ]
null
null
null
python_program/condition.py
LiuKaiqiang94/PyStudyExample
b30212718b218c71e06b68677f55c33e3a1dbf46
[ "MIT" ]
null
null
null
main()
13.571429
33
0.442105
5dc534af6da39531ee8b4ae7b4baf8841a23115e
1,608
py
Python
Annotated_video/test/Annotatedvideo_worm.py
Rukaume/LRCN
0d1928cc72544f59a4335fea7febc561d3dfc118
[ "MIT" ]
1
2020-11-07T05:57:32.000Z
2020-11-07T05:57:32.000Z
Annotated_video/test/Annotatedvideo_worm.py
Rukaume/LRCN
0d1928cc72544f59a4335fea7febc561d3dfc118
[ "MIT" ]
1
2020-11-07T00:30:22.000Z
2021-01-26T02:22:16.000Z
Annotated_video/test/Annotatedvideo_worm.py
Rukaume/LRCN
0d1928cc72544f59a4335fea7febc561d3dfc118
[ "MIT" ]
1
2020-11-07T05:57:52.000Z
2020-11-07T05:57:52.000Z
# -*- coding: utf-8 -*- """ Created on Fri Sep 4 22:27:11 2020 @author: Miyazaki """ imdir = "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/chamber3" resultdir= "C:/Users/Miyazaki/Desktop/hayashi_lab/20200527_lethargus_analysis/renamed_pillar_chamber-N2/result0918.csv" import os, cv2, shutil from tqdm import tqdm import pandas as pd os.chdir(imdir) os.makedirs("../annotatedimages", exist_ok = True) imlist = os.listdir("./") imlist = [i for i in imlist if os.path.splitext(i)[1] == '.jpg' \ or os.path.splitext(i)[1] == '.png'] imlist.sort() result = pd.read_csv(resultdir) font = cv2.FONT_HERSHEY_SIMPLEX for i in tqdm(range(len(imlist))): if int(result.loc[i]) == 0: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'quiescent',(10,500), font, 1,(255,0,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 1: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'dwell',(10,500), font, 1,(0,255,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 2: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'forward',(10,500), font, 1,(0,0,255),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) elif int(result.loc[i]) == 3: tempim = cv2.imread(imlist[i]) tempim = cv2.putText(tempim,'backward',(10,500), font, 1,(100,100,0),2,cv2.LINE_AA) cv2.imwrite('../annotatedimages/{}'.format(imlist[i]), tempim) else: pass
36.545455
119
0.670398
5dc789560fb397b3832cccec69534dcbf26e36d2
5,902
py
Python
emilia/modules/math.py
masterisira/ELIZA_OF-master
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
[ "Unlicense" ]
null
null
null
emilia/modules/math.py
masterisira/ELIZA_OF-master
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
[ "Unlicense" ]
null
null
null
emilia/modules/math.py
masterisira/ELIZA_OF-master
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
[ "Unlicense" ]
null
null
null
from typing import List import requests from telegram import Message, Update, Bot, MessageEntity from telegram.ext import CommandHandler, run_async from emilia import dispatcher from emilia.modules.disable import DisableAbleCommandHandler from emilia.modules.helper_funcs.alternate import send_message import pynewtonmath as newton import math __help__ = """ Under Developmeent.. More features soon - /cos: Cosine `/cos pi` - /sin: Sine `/sin 0` - /tan: Tangent `/tan 0` - /arccos: Inverse Cosine `/arccos 1` - /arcsin: Inverse Sine `/arcsin 0` - /arctan: Inverse Tangent `/arctan 0` - /abs: Absolute Value `/abs -1` - /log: Logarithm `/log 2l8` __Keep in mind__: To find the tangent line of a function at a certain x value, send the request as c|f(x) where c is the given x value and f(x) is the function expression, the separator is a vertical bar '|'. See the table above for an example request. To find the area under a function, send the request as c:d|f(x) where c is the starting x value, d is the ending x value, and f(x) is the function under which you want the curve between the two x values. To compute fractions, enter expressions as numerator(over)denominator. For example, to process 2/4 you must send in your expression as 2(over)4. The result expression will be in standard math notation (1/2, 3/4). """ SIMPLIFY_HANDLER = DisableAbleCommandHandler("math", simplify, pass_args=True) FACTOR_HANDLER = DisableAbleCommandHandler("factor", factor, pass_args=True) DERIVE_HANDLER = DisableAbleCommandHandler("derive", derive, pass_args=True) INTEGRATE_HANDLER = DisableAbleCommandHandler("integrate", integrate, pass_args=True) ZEROES_HANDLER = DisableAbleCommandHandler("zeroes", zeroes, pass_args=True) TANGENT_HANDLER = DisableAbleCommandHandler("tangent", tangent, pass_args=True) AREA_HANDLER = DisableAbleCommandHandler("area", area, pass_args=True) COS_HANDLER = DisableAbleCommandHandler("cos", cos, pass_args=True) SIN_HANDLER = DisableAbleCommandHandler("sin", sin, pass_args=True) TAN_HANDLER = DisableAbleCommandHandler("tan", tan, pass_args=True) ARCCOS_HANDLER = DisableAbleCommandHandler("arccos", arccos, pass_args=True) ARCSIN_HANDLER = DisableAbleCommandHandler("arcsin", arcsin, pass_args=True) ARCTAN_HANDLER = DisableAbleCommandHandler("arctan", arctan, pass_args=True) ABS_HANDLER = DisableAbleCommandHandler("abs", abs, pass_args=True) LOG_HANDLER = DisableAbleCommandHandler("log", log, pass_args=True) dispatcher.add_handler(SIMPLIFY_HANDLER) dispatcher.add_handler(FACTOR_HANDLER) dispatcher.add_handler(DERIVE_HANDLER) dispatcher.add_handler(INTEGRATE_HANDLER) dispatcher.add_handler(ZEROES_HANDLER) dispatcher.add_handler(TANGENT_HANDLER) dispatcher.add_handler(AREA_HANDLER) dispatcher.add_handler(COS_HANDLER) dispatcher.add_handler(SIN_HANDLER) dispatcher.add_handler(TAN_HANDLER) dispatcher.add_handler(ARCCOS_HANDLER) dispatcher.add_handler(ARCSIN_HANDLER) dispatcher.add_handler(ARCTAN_HANDLER) dispatcher.add_handler(ABS_HANDLER) dispatcher.add_handler(LOG_HANDLER) __mod_name__ = "Math" __command_list__ = ["math","factor","derive","integrate","zeroes","tangent","area","cos","sin","tan","arccos","arcsin","arctan","abs","log"] __handlers__ = [ SIMPLIFY_HANDLER,FACTOR_HANDLER,DERIVE_HANDLER,INTEGRATE_HANDLER,TANGENT_HANDLER,ZEROES_HANDLER,AREA_HANDLER,COS_HANDLER,SIN_HANDLER,TAN_HANDLER,ARCCOS_HANDLER,ARCSIN_HANDLER,ARCTAN_HANDLER,ABS_HANDLER,LOG_HANDLER ]
35.769697
252
0.763978
5dc7dadf50bfb05ab92b9d5e96fde0df19295e15
4,996
py
Python
services/IAm.py
matteobjornsson/serverless-rock-paper-scissors
32b6f11644c59dc3bb159ee9e1118fed26a3983d
[ "MIT" ]
null
null
null
services/IAm.py
matteobjornsson/serverless-rock-paper-scissors
32b6f11644c59dc3bb159ee9e1118fed26a3983d
[ "MIT" ]
null
null
null
services/IAm.py
matteobjornsson/serverless-rock-paper-scissors
32b6f11644c59dc3bb159ee9e1118fed26a3983d
[ "MIT" ]
1
2021-04-20T23:55:37.000Z
2021-04-20T23:55:37.000Z
# # Created on Thu Apr 22 2021 # Matteo Bjornsson # import boto3 from botocore.exceptions import ClientError import logging logging.basicConfig(filename="rps.log", level=logging.INFO) iam_resource = boto3.resource("iam") sts_client = boto3.client("sts") def create_role( iam_role_name: str, assume_role_policy_json: str, policy_arns: list ) -> iam_resource.Role: """ Create an IAM role with a given policy. :param assume_role_policy_json: A json string that represents the assume role policy defining what resources are allowed to assume the role. :param policy_arns: a list of strings representing existing policy arns to also attach to the role :return: IAM role object This method was adapted from the create_iam_role_for_lambda() method found here: https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html """ try: role = iam_resource.create_role( RoleName=iam_role_name, AssumeRolePolicyDocument=assume_role_policy_json, ) # wait for the creation to complete iam_resource.meta.client.get_waiter("role_exists").wait(RoleName=iam_role_name) # attach the additional supplied policies for arn in policy_arns: role.attach_policy(PolicyArn=arn) except ClientError as error: if error.response["Error"]["Code"] == "EntityAlreadyExists": role = iam_resource.Role(iam_role_name) logging.warning("The role %s already exists. Using it.", iam_role_name) return role else: logging.error(error.response["Error"]["Message"]) logging.exception( "Couldn't create role %s or attach policy %s.", iam_role_name, str(policy_arns), ) raise else: logging.info("Created IAM role %s.", role.name) logging.info("Attached policies %s to role %s.", policy_arns, role.name) return role def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy: """ Create an IAM policy of given name and json description. Policies define permissions in AWS and can be associated with IAM roles. :param policy_json: just be a valid policy json string :return: IAM Policy object """ try: policy = iam_resource.create_policy( PolicyName=policy_name, PolicyDocument=policy_json ) except ClientError as error: if error.response["Error"]["Code"] == "EntityAlreadyExists": policy = get_policy_by_name(policy_name) logging.warning("The policy %s already exists. Using it.", policy.arn) return policy else: logging.error(error.response["Error"]["Message"]) logging.exception("Couldn't create policy %s", policy_name) raise else: logging.info("Created Policy '%s'", policy_name) return policy def get_policy_by_name(policy_name: str) -> iam_resource.Policy: """ Get an existing policy by name. :return: IAM Policy object """ # sts provides the account number of the current credentials account_id = sts_client.get_caller_identity()["Account"] # policy arns consist of an account id and policy name policy_arn = f"arn:aws:iam::{account_id}:policy/{policy_name}" # policies are created in the Python SDK via their arn policy = iam_resource.Policy(policy_arn) return policy def delete_role(iam_role) -> dict: """ Delete a role. :param iam_role: this parameter is an IAM role object, such as returned by create_role() """ try: # remove all policies before deleting role for policy in iam_role.attached_policies.all(): policy.detach_role(RoleName=iam_role.name) response = iam_role.delete() except ClientError as error: logging.error(error.response["Error"]["Message"]) logging.error("Couldn't delete role %s", iam_role.name) else: logging.info("Deleted role '%s'", iam_role.name) return response def delete_policy(iam_policy) -> dict: """ Delete a role. :param iam_policy: this parameter is an IAM policy object, such as returned by create_policy() """ try: response = iam_policy.delete() except ClientError as error: logging.error(error.response["Error"]["Message"]) logging.error("Couldn't delete policy %s", iam_policy.arn) else: logging.info("Deleted policy '%s'", iam_policy.arn) return response if __name__ == "__main__": # brief functionality test with delete() cleanup at end policy_json_file = "./policy/lambda_policy.json" with open(policy_json_file) as file: policy_json = file.read() policy_name = "test_policy" policy = create_policy(policy_name, policy_json) print("new policy arn: ", policy.arn) policy.delete()
34.937063
116
0.664532
5dcb3695e4bb82f323f1875e14dd30d0eb26c6e3
199
py
Python
stograde/common/run_status.py
babatana/stograde
c1c447e99c44c23cef9dd857e669861f3708ae77
[ "MIT" ]
null
null
null
stograde/common/run_status.py
babatana/stograde
c1c447e99c44c23cef9dd857e669861f3708ae77
[ "MIT" ]
null
null
null
stograde/common/run_status.py
babatana/stograde
c1c447e99c44c23cef9dd857e669861f3708ae77
[ "MIT" ]
null
null
null
from enum import auto, Enum
19.9
33
0.683417
5dcb43dbc68228752388169be1e8b17fd1bf9290
112
py
Python
recsys/__init__.py
shenghuiliuu/recsys
d706d1ae2558816c1e11ca790baeb7748200b404
[ "MIT" ]
50
2016-10-27T07:28:35.000Z
2022-03-30T01:32:32.000Z
recsys/__init__.py
shenghuiliuu/recsys
d706d1ae2558816c1e11ca790baeb7748200b404
[ "MIT" ]
5
2016-11-10T16:22:37.000Z
2020-09-16T10:26:59.000Z
recsys/__init__.py
shenghuiliuu/recsys
d706d1ae2558816c1e11ca790baeb7748200b404
[ "MIT" ]
22
2016-11-19T08:56:22.000Z
2021-06-23T16:13:10.000Z
__all__ = ['cross_validation', 'metrics', 'datasets', 'recommender']
14
31
0.4375
5dcbed3a8321b9c6b63677f0f51fde0daacfda04
21,917
py
Python
audiomate/annotations/label_list.py
CostanzoPablo/audiomate
080402eadaa81f77f64c8680510a2de64bc18e74
[ "MIT" ]
133
2018-05-18T13:54:10.000Z
2022-02-15T02:14:20.000Z
audiomate/annotations/label_list.py
CostanzoPablo/audiomate
080402eadaa81f77f64c8680510a2de64bc18e74
[ "MIT" ]
68
2018-06-03T16:42:09.000Z
2021-01-29T10:58:30.000Z
audiomate/annotations/label_list.py
CostanzoPablo/audiomate
080402eadaa81f77f64c8680510a2de64bc18e74
[ "MIT" ]
37
2018-11-02T02:40:29.000Z
2021-11-30T07:44:50.000Z
import collections import copy import intervaltree from .label import Label def addl(self, value, start=0.0, end=float('inf')): """ Shortcut for ``add(Label(value, start, end))``. """ self.add(Label(value, start=start, end=end)) def update(self, labels): """ Add a list of labels to the end of the list. Args: labels (list): Labels to add. """ ivs = [] for label in labels: label.label_list = self ivs.append(intervaltree.Interval(label.start, label.end, label)) self.label_tree.update(ivs) def apply(self, fn): """ Apply the given function `fn` to every label in this label list. `fn` is a function of one argument that receives the current label which can then be edited in place. Args: fn (func): Function to apply to every label Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('another_label', 2.0, 3.0) ... ]) >>> def shift_labels(label): ... label.start += 1.0 ... label.end += 1.0 ... >>> ll.apply(shift_labels) >>> ll.labels [Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)] """ for label in self.labels: fn(label) def merge_overlaps(self, threshold=0.0): """ Merge overlapping labels with the same value. Two labels are considered overlapping, if ``l2.start - l1.end < threshold``. Args: threshold (float): Maximal distance between two labels to be considered as overlapping. (default: 0.0) Example: >>> ll = LabelList(labels=[ ... Label('a_label', 1.0, 2.0), ... Label('a_label', 1.5, 2.7), ... Label('b_label', 1.0, 2.0), ... ]) >>> ll.merge_overlapping_labels() >>> ll.labels [ Label('a_label', 1.0, 2.7), Label('b_label', 1.0, 2.0), ] """ updated_labels = [] all_intervals = self.label_tree.copy() # recursivly find a group of overlapping labels with the same value # For every remaining interval # - Find overlapping intervals recursively # - Remove them # - Create a concatenated new label while not all_intervals.is_empty(): next_interval = list(all_intervals)[0] overlapping = recursive_overlaps(next_interval) ov_start = float('inf') ov_end = 0.0 ov_value = next_interval.data.value for overlap in overlapping: ov_start = min(ov_start, overlap.begin) ov_end = max(ov_end, overlap.end) all_intervals.discard(overlap) updated_labels.append(Label( ov_value, ov_start, ov_end )) # Replace the old labels with the updated ones self.label_tree.clear() self.update(updated_labels) # # Statistics # def label_total_duration(self): """ Return for each distinct label value the total duration of all occurrences. Returns: dict: A dictionary containing for every label-value (key) the total duration in seconds (value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3, 5), >>> Label('b', 5, 8), >>> Label('a', 8, 10), >>> Label('b', 10, 14), >>> Label('a', 15, 18.5) >>> ]) >>> ll.label_total_duration() {'a': 7.5 'b': 7.0} """ durations = collections.defaultdict(float) for label in self: durations[label.value] += label.duration return durations def label_values(self): """ Return a list of all occuring label values. Returns: list: Lexicographically sorted list (str) of label values. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14), >>> Label('d', 15, 18) >>> ]) >>> ll.label_values() ['a', 'b', 'c', 'd'] """ all_labels = {l.value for l in self} return sorted(all_labels) def label_count(self): """ Return for each label the number of occurrences within the list. Returns: dict: A dictionary containing for every label-value (key) the number of occurrences (value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('a', 7.2, 10.5), >>> Label('b', 10.5, 14), >>> Label('a', 15, 18) >>> ]) >>> ll.label_count() {'a': 3 'b': 2} """ occurrences = collections.defaultdict(int) for label in self: occurrences[label.value] += 1 return occurrences def all_tokens(self, delimiter=' '): """ Return a list of all tokens occurring in the label-list. Args: delimiter (str): The delimiter used to split labels into tokens. See :meth:`audiomate.annotations.Label.tokenized` Returns: :class:`set`: A set of distinct tokens. """ tokens = set() for label in self: tokens = tokens.union(set(label.tokenized(delimiter=delimiter))) return tokens # # Query Label Values # def join(self, delimiter=' ', overlap_threshold=0.1): """ Return a string with all labels concatenated together. The order of the labels is defined by the start of the label. If the overlapping between two labels is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): A string to join two consecutive labels. overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str: A string with all labels concatenated together. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c', start=7.0, end=10.2), >>> Label('d', start=10.3, end=14.0) >>> ]) >>> ll.join(' - ') 'a - b - c - d' """ sorted_by_start = sorted(self.labels) concat_values = [] last_label_end = None for label in sorted_by_start: if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0): concat_values.append(label.value) last_label_end = label.end else: raise ValueError('Labels overlap, not able to define the correct order') return delimiter.join(concat_values) def tokenized(self, delimiter=' ', overlap_threshold=0.1): """ Return a ordered list of tokens based on all labels. Joins all token from all labels (``label.tokenized()```). If the overlapping between two labels is greater than ``overlap_threshold``, an Exception is thrown. Args: delimiter (str): The delimiter used to split labels into tokens. (default: space) overlap_threshold (float): Maximum overlap between two consecutive labels. Returns: str: A list containing tokens of all labels ordered according to the label order. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a d q', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('c a', start=7.0, end=10.2), >>> Label('f g', start=10.3, end=14.0) >>> ]) >>> ll.tokenized(delimiter=' ', overlap_threshold=0.1) ['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g'] """ sorted_by_start = sorted(self.labels) tokens = [] last_label_end = None for label in sorted_by_start: if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0): tokens.extend(label.tokenized(delimiter=delimiter)) last_label_end = label.end else: raise ValueError('Labels overlap, not able to define the correct order') return tokens # # Restructuring # def separated(self): """ Create a separate Label-List for every distinct label-value. Returns: dict: A dictionary with distinct label-values as keys. Every value is a LabelList containing only labels with the same value. Example: >>> ll = LabelList(idx='some', labels=[ >>> Label('a', start=0, end=4), >>> Label('b', start=3.95, end=6.0), >>> Label('a', start=7.0, end=10.2), >>> Label('b', start=10.3, end=14.0) >>> ]) >>> s = ll.separate() >>> s['a'].labels [Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)] >>> s['b'].labels [Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)] """ separated_lls = collections.defaultdict(LabelList) for label in self.labels: separated_lls[label.value].add(label) for ll in separated_lls.values(): ll.idx = self.idx return separated_lls def labels_in_range(self, start, end, fully_included=False): """ Return a list of labels, that are within the given range. Also labels that only overlap are included. Args: start(float): Start-time in seconds. end(float): End-time in seconds. fully_included(bool): If ``True``, only labels fully included in the range are returned. Otherwise also overlapping ones are returned. (default ``False``) Returns: list: List of labels in the range. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14) >>>]) >>> ll.labels_in_range(6.2, 10.1) [Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)] """ if fully_included: intervals = self.label_tree.envelop(start, end) else: intervals = self.label_tree.overlap(start, end) return [iv.data for iv in intervals] def split(self, cutting_points, shift_times=False, overlap=0.0): """ Split the label-list into x parts and return them as new label-lists. x is defined by the number of cutting-points (``x == len(cutting_points) + 1``). The result is a list of label-lists corresponding to each part. Label-list 0 contains labels between ``0`` and ``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args: cutting_points(list): List of floats defining the points in seconds, where the label-list is splitted. shift_times(bool): If True, start and end-time are shifted in splitted label-lists. So the start is relative to the cutting point and not to the beginning of the original label-list. overlap(float): Amount of overlap in seconds. This amount is subtracted from a start-cutting-point, and added to a end-cutting-point. Returns: list: A list of of: class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>> Label('c', 11, 15), >>>]) >>> >>> res = ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times = True``, the times are adjusted to be relative to the cutting-points for every label-list but the first. >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>>]) >>> >>> res = ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ] """ if len(cutting_points) == 0: raise ValueError('At least one cutting-point is needed!') # we have to loop in sorted order cutting_points = sorted(cutting_points) splits = [] iv_start = 0.0 for i in range(len(cutting_points) + 1): if i < len(cutting_points): iv_end = cutting_points[i] else: iv_end = float('inf') # get all intervals intersecting range intervals = self.label_tree.overlap( iv_start - overlap, iv_end + overlap ) cp_splits = LabelList(idx=self.idx) # Extract labels from intervals with updated times for iv in intervals: label = copy.deepcopy(iv.data) label.start = max(0, iv_start - overlap, label.start) label.end = min(iv_end + overlap, label.end) if shift_times: orig_start = max(0, iv_start - overlap) label.start -= orig_start label.end -= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end return splits # # Convenience Constructors #
31.580692
115
0.505087
5dcd4858a80507237d1cda30d4f8de4336f40710
3,044
py
Python
src/views/age_results_widget.py
RubyMarsden/Crayfish
33bbb1248beec2fc40eee59e462711dd8cbc33da
[ "MIT" ]
null
null
null
src/views/age_results_widget.py
RubyMarsden/Crayfish
33bbb1248beec2fc40eee59e462711dd8cbc33da
[ "MIT" ]
8
2021-03-19T06:35:48.000Z
2021-03-31T14:23:24.000Z
src/views/age_results_widget.py
RubyMarsden/Crayfish
33bbb1248beec2fc40eee59e462711dd8cbc33da
[ "MIT" ]
null
null
null
import matplotlib from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QHBoxLayout, QDialog, QPushButton, QWidget, QVBoxLayout, QLabel matplotlib.use('QT5Agg') import matplotlib.pyplot as plt from models.data_key import DataKey from utils import ui_utils
30.747475
100
0.598555
5dcd80b6fc81b5df240724a020510872ace9a270
791
py
Python
examples/single_run/ocaes_single_run.py
EnergyModels/OCAES
d848d9fa621767e036824110de87450d524b7687
[ "MIT" ]
null
null
null
examples/single_run/ocaes_single_run.py
EnergyModels/OCAES
d848d9fa621767e036824110de87450d524b7687
[ "MIT" ]
null
null
null
examples/single_run/ocaes_single_run.py
EnergyModels/OCAES
d848d9fa621767e036824110de87450d524b7687
[ "MIT" ]
null
null
null
import pandas as pd from OCAES import ocaes # ---------------------- # create and run model # ---------------------- data = pd.read_csv('timeseries_inputs_2019.csv') inputs = ocaes.get_default_inputs() # inputs['C_well'] = 5000.0 # inputs['X_well'] = 50.0 # inputs['L_well'] = 50.0 # inputs['X_cmp'] = 0 # inputs['X_exp'] = 0 model = ocaes(data, inputs) df, s = model.get_full_results() revenue, LCOE, COVE, avoided_emissions = model.post_process(s) s['revenue'] = revenue s['LCOE'] = LCOE s['COVE'] = COVE s['avoided_emissions'] = avoided_emissions df.to_csv('results_timeseries.csv') s.to_csv('results_values.csv') print(model.calculate_LCOE(s)) # ---------------------- # create plots using built-in functions # ---------------------- model.plot_overview() model.plot_power_energy()
23.969697
62
0.637168