hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
8a92d260f5ba3c3243955569573ecad3cecaf8e9
2,079
py
Python
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py
jiaxiaolei/my_snap_demo
0444077c763e029eb67af7242537cebb3c3d6aa4
[ "Apache-2.0" ]
null
null
null
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py
jiaxiaolei/my_snap_demo
0444077c763e029eb67af7242537cebb3c3d6aa4
[ "Apache-2.0" ]
4
2019-11-20T02:45:19.000Z
2019-12-03T03:14:15.000Z
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py
jiaxiaolei/my_snap_demo
0444077c763e029eb67af7242537cebb3c3d6aa4
[ "Apache-2.0" ]
null
null
null
# Copyright (C) 2014-2015 LiuLang <[email protected]> # Use of this source code is governed by GPLv3 license that can be found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import zlib CHUNK = 2 ** 20
21.65625
72
0.556037
8a92dd9cacd718af3ee73590efc1c1d73a3833aa
12,093
py
Python
beansdbadmin/core/client.py
ariesdevil/beansdbadmin
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
[ "BSD-3-Clause" ]
11
2018-08-28T09:16:02.000Z
2021-11-08T09:39:15.000Z
beansdbadmin/core/client.py
ariesdevil/beansdbadmin
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
[ "BSD-3-Clause" ]
2
2019-08-29T03:27:24.000Z
2020-07-24T02:45:39.000Z
beansdbadmin/core/client.py
ariesdevil/beansdbadmin
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
[ "BSD-3-Clause" ]
4
2019-05-10T12:10:31.000Z
2020-07-17T03:22:02.000Z
#!/usr/bin/python # encoding: utf-8 '''a rich client 1. for one server (instead of multi like in libmc.Client) 2. encapsulate @, ?, gc ... use is instead of libmc.Client ''' import telnetlib import logging import libmc import string import urllib import itertools import warnings from collections import defaultdict from beansdbadmin.core.hint import parse_new_hint_body from beansdbadmin.core.data import parse_records from beansdbadmin.core.hash import get_khash64 def get_buckets_keys_count(store): """ return dict: buckets -> count """ st = {} try: for line in (store.get('@') or '').split('\n'): if line: d, _, c = line.split(' ') if not d.endswith('/'): continue st[int(d[0], 16)] = int(c) return st except IOError: raise Exception("cannot get @ from %s" % (store)) def get_primary_buckets(store): """ return possible primary buckets, might be wrong on temporary nodes, result is list of buckets in integer """ ss = get_buckets_keys_count(store) bucket_list = ss.items() bucket_list = [x for x in bucket_list if x[1] > 0] if not bucket_list: return None bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True) result = [bucket_list[0]] for i in bucket_list[1:]: if result[-1][1] / i[1] >= 2: break result.append(i) return [x[0] for x in result] def get_key_info_disk(store, key): '''return ver, vhash, flag, vsz, ts, fid, pos''' info = store.get('??' + key) if info: return [int(x) for x in info.split()] def test_new(addr, bucket): b = bucket c = DBClient(addr) print "stats:", c.stats() print 'version:', c.get_server_version() print "isold:", c.is_old() print "dir root:", c.get_dir("@") print "bucket key count:", c.get_bucket_keys_count(int(b)) print "item_count:", c.item_count() print "primary_buckets", get_primary_buckets(c) leaf = c.get_dir("@" + b + "000000") print "a dir leaf:", leaf khash_str = list(leaf)[0] print "a khash_str", khash_str r = c.get_records_by_khash(khash_str)[0] k = r[0] print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:] print "key info mem:", c.get_key_info_mem(k) print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \ c.get_key_info_disk(k) print "key version:", c.get_version(k) print "collision_summary", c.get_collision_summary(int(b)) print "gc status:", c.get_gc_status() if __name__ == '__main__': test_new("rosa3a:7900", '3')
30.308271
103
0.561399
8a963372962a426bfe2a29c3f4ef8694684f359b
1,448
py
Python
Simulator/Geometry/RectOverlap.py
cuixiongyi/RBE595
fc5c6aa6c479eb14186a9168e47724b7b3d06cde
[ "MIT" ]
null
null
null
Simulator/Geometry/RectOverlap.py
cuixiongyi/RBE595
fc5c6aa6c479eb14186a9168e47724b7b3d06cde
[ "MIT" ]
null
null
null
Simulator/Geometry/RectOverlap.py
cuixiongyi/RBE595
fc5c6aa6c479eb14186a9168e47724b7b3d06cde
[ "MIT" ]
null
null
null
import matplotlib.pyplot __author__ = 'xiongyi' line1 = [(200, 100), (200, 400)] line2 = [(190, 190), (210, 210)] if __name__ == '__main__': matplotlib.pyplot.plot((line1[0][0],line1[1][0]),(line1[0][1],line1[1][1])) matplotlib.pyplot.hold(True) matplotlib.pyplot.plot((line2[0][0],line2[1][0]),(line2[0][1],line2[1][1])) print(overlap()) matplotlib.pyplot.show()
26.814815
79
0.566989
8a96a020d6c369841c24ae3ddad9a09c8b54550c
4,434
py
Python
gino/loader.py
p4l1ly/gino
bbe63ed841bf989a0f47b6cae64db85b0b606794
[ "BSD-3-Clause" ]
null
null
null
gino/loader.py
p4l1ly/gino
bbe63ed841bf989a0f47b6cae64db85b0b606794
[ "BSD-3-Clause" ]
null
null
null
gino/loader.py
p4l1ly/gino
bbe63ed841bf989a0f47b6cae64db85b0b606794
[ "BSD-3-Clause" ]
null
null
null
from sqlalchemy import select from sqlalchemy.schema import Column from .declarative import Model class ModelLoader(Loader): class AliasLoader(ModelLoader): class ColumnLoader(Loader): class TupleLoader(Loader): class CallableLoader(Loader): class ValueLoader(Loader):
28.063291
79
0.570591
8a9705a2e78a0cfbf1bbd48dd0bfdf9b979f2917
3,751
py
Python
emission/clients/choice/choice.py
Andrew-Tan/e-mission-server
91d59bee86e63d803e401f10f4b6a2502effedda
[ "BSD-3-Clause" ]
null
null
null
emission/clients/choice/choice.py
Andrew-Tan/e-mission-server
91d59bee86e63d803e401f10f4b6a2502effedda
[ "BSD-3-Clause" ]
1
2017-08-31T19:54:16.000Z
2017-08-31T19:54:16.000Z
emission/clients/choice/choice.py
Andrew-Tan/e-mission-server
91d59bee86e63d803e401f10f4b6a2502effedda
[ "BSD-3-Clause" ]
null
null
null
# Standard imports import logging import math import json from uuid import UUID from datetime import datetime, timedelta import time # Our imports from emission.core.get_database import get_trip_db, get_section_db import emission.analysis.result.carbon as carbon import emission.core.common as common import emission.net.api.stats as stats from emission.core.wrapper.user import User from emission.clients.leaderboard import leaderboard from emission.clients.gamified import gamified from emission.clients.recommendation import recommendation from emission.clients.commontrips import commontrips from emission.clients.data import data # TODO: Consider subclassing to provide client specific user functions # These are copy/pasted from our first client, the carshare study # TODO: Simplify this. runBackgroundTasks are currently only invoked from the # result precomputation code. We could change that code to pass in the day, and # remove this interface. Extra credit: should we pass in the day, or a date # range? Passing in the date range could make it possible for us to run the # scripts more than once a day...
39.484211
103
0.724607
8a975211bf46410d2e2a9a98de298bed52013baa
6,589
py
Python
lib/formatter/text.py
ylafon/redbot
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
[ "Unlicense" ]
null
null
null
lib/formatter/text.py
ylafon/redbot
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
[ "Unlicense" ]
null
null
null
lib/formatter/text.py
ylafon/redbot
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
[ "Unlicense" ]
1
2021-06-01T12:08:29.000Z
2021-06-01T12:08:29.000Z
#!/usr/bin/env python """ HAR Formatter for REDbot. """ __author__ = "Jerome Renard <[email protected]>" __copyright__ = """\ Copyright (c) 2008-2010 Mark Nottingham Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import operator import nbhttp.error as nberr import redbot.speak as rs from redbot.formatter import Formatter nl = u"\n" # TODO: errors and status on stderr with CLI?
33.277778
101
0.587039
8a9862396c2189c4e0deacb6232ab6ab3fc808e2
5,999
py
Python
lib/ioe_pot.py
ifurusato/ros
77b1361e78f68f00ba2d3e3db908bb5ce0f973f5
[ "MIT" ]
9
2020-10-12T08:49:55.000Z
2021-07-23T14:20:05.000Z
lib/ioe_pot.py
fanmuzhi/ros
04534a35901341c4aaa9084bff3d46851795357d
[ "MIT" ]
12
2020-07-22T19:08:58.000Z
2022-02-03T03:17:03.000Z
lib/ioe_pot.py
fanmuzhi/ros
04534a35901341c4aaa9084bff3d46851795357d
[ "MIT" ]
3
2020-07-19T20:43:19.000Z
2022-03-02T09:15:51.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part # of the Robot Operating System project, released under the MIT License. Please # see the LICENSE file included as part of this package. # # author: Murray Altheim # created: 2020-09-19 # modified: 2020-09-19 # import sys, colorsys import ioexpander as io from colorama import init, Fore, Style init() from lib.logger import Logger # .............................................................................. # return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min #EOF
44.437037
138
0.543091
8a98a3e8f4fe8ffe2c483dbdada681b7ff1782e2
490
py
Python
stubs/micropython-esp32-1_12/urequests.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
38
2020-10-18T21:59:44.000Z
2022-03-17T03:03:28.000Z
stubs/micropython-esp32-1_12/urequests.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
176
2020-10-18T14:31:03.000Z
2022-03-30T23:22:39.000Z
stubs/micropython-esp32-1_12/urequests.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
6
2020-12-28T21:11:12.000Z
2022-02-06T04:07:50.000Z
""" Module: 'urequests' on esp32 1.12.0 """ # MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-20', machine='ESP32 module (spiram) with ESP32') # Stubber: 1.3.2 usocket = None
12.564103
135
0.563265
8a995f399ed25fbe111acb3f8ad5749b538eef0a
433
py
Python
python/re_user.py
seckcoder/lang-learn
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
[ "Unlicense" ]
1
2017-10-14T04:23:45.000Z
2017-10-14T04:23:45.000Z
python/re_user.py
seckcoder/lang-learn
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
[ "Unlicense" ]
null
null
null
python/re_user.py
seckcoder/lang-learn
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python #-*- coding=utf-8 -*- # # Copyright 2012 Jike Inc. All Rights Reserved. # Author: [email protected] import re from urlparse import urlparse parse1()
22.789474
57
0.637413
8a9978555063ed5f44aba19723290d6745163dd2
2,806
py
Python
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
LukHad/AccountBook
8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8
[ "MIT" ]
null
null
null
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
LukHad/AccountBook
8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8
[ "MIT" ]
null
null
null
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
LukHad/AccountBook
8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8
[ "MIT" ]
null
null
null
from kivy.uix.gridlayout import GridLayout from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg from kivy.uix.anchorlayout import AnchorLayout from kivy.uix.boxlayout import BoxLayout from kivy.uix.button import Button import matplotlib.pyplot as plt import matplotlib import datetime from TransactionBook.model.Filter import Filter from datetime import datetime from kivy.uix.popup import Popup from kivy.properties import NumericProperty, ReferenceListProperty from kivy.uix.checkbox import CheckBox from kivy.core.window import Window if __name__ == "__main__": from kivy.base import runTouchApp c = MultiSelectPopUp(title="Test", option_list=["Item1", "Item2", "Item3"], callback=cb, option_init=[True, False, True]) runTouchApp(c)
35.075
125
0.679259
8a9a247a499b63acd31b3bc3a6e73d3d156a0e43
1,903
py
Python
Assignment1/Part2/Bridge2.py
MormonJesus69420/Knowledge-Based-Systems-Project
8b1e330c64dd58743513f3e48efb6569457beb94
[ "WTFPL" ]
null
null
null
Assignment1/Part2/Bridge2.py
MormonJesus69420/Knowledge-Based-Systems-Project
8b1e330c64dd58743513f3e48efb6569457beb94
[ "WTFPL" ]
null
null
null
Assignment1/Part2/Bridge2.py
MormonJesus69420/Knowledge-Based-Systems-Project
8b1e330c64dd58743513f3e48efb6569457beb94
[ "WTFPL" ]
null
null
null
from dataclasses import dataclass, field from typing import List from Car2 import Car
28.833333
80
0.62743
8a9ada50ee04b4224d0c5731fe46fe28317d335c
19,192
py
Python
lib/tuner_interface.py
jefflundberg/locast2plex
3ab747a13c47888507c08f17d0afacad09894019
[ "MIT" ]
null
null
null
lib/tuner_interface.py
jefflundberg/locast2plex
3ab747a13c47888507c08f17d0afacad09894019
[ "MIT" ]
null
null
null
lib/tuner_interface.py
jefflundberg/locast2plex
3ab747a13c47888507c08f17d0afacad09894019
[ "MIT" ]
null
null
null
import subprocess import threading import time import errno import socket import urllib import pathlib from io import StringIO from http.server import BaseHTTPRequestHandler, HTTPServer import lib.stations as stations import lib.epg2xml as epg2xml import lib.channels_m3u as channels_m3u from lib.templates import templates # with help from https://www.acmesystems.it/python_http # and https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler # mostly from https://github.com/ZeWaren/python-upnp-ssdp-example # and https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port def start(config, locast, location): serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serverSocket.bind((config["main"]['bind_ip'], int(config["main"]['bind_port']))) serverSocket.listen(int(config["main"]["concurrent_listeners"])) print("Now listening for requests.") for i in range(int(config["main"]["concurrent_listeners"])): PlexHttpServer(serverSocket, config, locast, location)
43.12809
169
0.546321
8a9cd2106529aad0ea2a1405ec139e1af2cab3e4
1,130
py
Python
{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/local/pages/views.py
dcs3spp/cookiecutter-django-api
d575dda07930743c05a27eb968489867831d97de
[ "Apache-1.1" ]
null
null
null
{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/local/pages/views.py
dcs3spp/cookiecutter-django-api
d575dda07930743c05a27eb968489867831d97de
[ "Apache-1.1" ]
null
null
null
{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/local/pages/views.py
dcs3spp/cookiecutter-django-api
d575dda07930743c05a27eb968489867831d97de
[ "Apache-1.1" ]
null
null
null
from django import template from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.template import loader
35.3125
71
0.718584
8a9d019bec9e50c7c8d759ea60e658149d43ef2a
2,561
py
Python
audiomentations/core/utils.py
jeongyoonlee/audiomentations
7f0112ae310989430e0ef7eb32c4116114810966
[ "MIT" ]
1
2021-02-03T19:12:04.000Z
2021-02-03T19:12:04.000Z
audiomentations/core/utils.py
jeongyoonlee/audiomentations
7f0112ae310989430e0ef7eb32c4116114810966
[ "MIT" ]
null
null
null
audiomentations/core/utils.py
jeongyoonlee/audiomentations
7f0112ae310989430e0ef7eb32c4116114810966
[ "MIT" ]
1
2021-07-08T07:33:10.000Z
2021-07-08T07:33:10.000Z
import os from pathlib import Path import numpy as np AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav") def get_file_paths( root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True ): """Return a list of paths to all files with the given filename extensions in a directory. Also traverses subdirectories by default. """ file_paths = [] for root, dirs, filenames in os.walk(root_path): filenames = sorted(filenames) for filename in filenames: input_path = os.path.abspath(root) file_path = os.path.join(input_path, filename) if filename.lower().endswith(filename_endings): file_paths.append(Path(file_path)) if not traverse_subdirectories: # prevent descending into subfolders break return file_paths def calculate_rms(samples): """Given a numpy array of audio samples, return its Root Mean Square (RMS).""" return np.sqrt(np.mean(np.square(samples), axis=-1)) def calculate_desired_noise_rms(clean_rms, snr): """ Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR), calculate the desired RMS of a noise sound to be mixed in. Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20 :param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0 :param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60 :return: """ a = float(snr) / 20 noise_rms = clean_rms / (10 ** a) return noise_rms def is_waveform_multichannel(samples): """ Return bool that answers the question: Is the given ndarray a multichannel waveform or not? :param samples: numpy ndarray :return: """ return len(samples.shape) > 1 def is_spectrogram_multichannel(spectrogram): """ Return bool that answers the question: Is the given ndarray a multichannel spectrogram? :param samples: numpy ndarray :return: """ return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1 def convert_float_samples_to_int16(y): """Convert floating-point numpy array of audio samples to int16.""" if not issubclass(y.dtype.type, np.floating): raise ValueError("input samples not floating-point") return (y * np.iinfo(np.int16).max).astype(np.int16)
31.617284
132
0.689184
8a9d4177e423a6db85599cff72c82ba14d5a1522
883
py
Python
algorithm/python/LeetCode/isValid.py
HoneyS2/meaningful
78659de1ed74121db4ade211f6565ddc6d117041
[ "MIT" ]
null
null
null
algorithm/python/LeetCode/isValid.py
HoneyS2/meaningful
78659de1ed74121db4ade211f6565ddc6d117041
[ "MIT" ]
null
null
null
algorithm/python/LeetCode/isValid.py
HoneyS2/meaningful
78659de1ed74121db4ade211f6565ddc6d117041
[ "MIT" ]
null
null
null
s = "([}}])" stack = [] if len(s) % 2 == 1: print(False) exit() for i in s: if i == "(": stack.append("(") elif i == "[": stack.append("[") elif i == "{": stack.append("{") elif i == ")": if len(stack) < 1: print(False) exit() if stack[-1] == "(": stack.pop() else: print(False) exit() elif i == "]": if len(stack) < 1: print(False) exit() if stack[-1] == "[": stack.pop() else: print(False) exit() elif i == "}": if len(stack) < 1: print(False) exit() if stack[-1] == "{": stack.pop() else: print(False) exit() if len(stack) == 0: print(True) else: print(False)
18.395833
28
0.347678
8a9d8f1b16e1dbb065ddd8280ce1c889563a6417
4,831
py
Python
JupyterHTMLSlides/core.py
williamegomezo/JupyterSlides
403fe15e360eb1d79bf813b923eb569a81ab0934
[ "MIT" ]
1
2019-07-26T20:59:47.000Z
2019-07-26T20:59:47.000Z
JupyterHTMLSlides/core.py
williamegomezo/JupyterSlides
403fe15e360eb1d79bf813b923eb569a81ab0934
[ "MIT" ]
null
null
null
JupyterHTMLSlides/core.py
williamegomezo/JupyterSlides
403fe15e360eb1d79bf813b923eb569a81ab0934
[ "MIT" ]
null
null
null
import random import string import os from IPython.display import display, HTML from .utils import html_loader from .utils import get_content from jinja2 import Template
33.783217
100
0.580211
8a9e11dd86387cdd76e5db9dfd7ce9770e952aef
30,203
py
Python
tests/test_wallet.py
NickeZ/lightning
f376a9c24cc71d139393196dea86b5a39aee7db8
[ "MIT" ]
1
2020-05-07T22:28:20.000Z
2020-05-07T22:28:20.000Z
tests/test_wallet.py
satoshinakamoto007/lightning
ff968e773074061d6f76cb81c6c61a1047ffaef1
[ "MIT" ]
1
2020-05-03T00:56:31.000Z
2020-05-03T00:56:31.000Z
tests/test_wallet.py
satoshinakamoto007/lightning
ff968e773074061d6f76cb81c6c61a1047ffaef1
[ "MIT" ]
null
null
null
from decimal import Decimal from fixtures import * # noqa: F401,F403 from fixtures import TEST_NETWORK from flaky import flaky # noqa: F401 from pyln.client import RpcError, Millisatoshi from utils import ( only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT, VALGRIND ) import os import pytest import subprocess import time import unittest def test_minconf_withdraw(node_factory, bitcoind): """Issue 2518: ensure that ridiculous confirmation levels don't overflow The number of confirmations is used to compute a maximum height that is to be accepted. If the current height is smaller than the number of confirmations we wrap around and just select everything. The fix is to clamp the maxheight parameter to a positive small number. """ amount = 1000000 # Don't get any funds from previous runs. l1 = node_factory.get_node(random_hsm=True) addr = l1.rpc.newaddr()['bech32'] # Add some funds to withdraw later for i in range(10): l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01) bitcoind.generate_block(1) wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10) with pytest.raises(RpcError): l1.rpc.withdraw(destination=addr, satoshi=10000, feerate='normal', minconf=9999999) def test_addfunds_from_block(node_factory, bitcoind): """Send funds to the daemon without telling it explicitly """ # Previous runs with same bitcoind can leave funds! l1 = node_factory.get_node(random_hsm=True) addr = l1.rpc.newaddr()['bech32'] bitcoind.rpc.sendtoaddress(addr, 0.1) bitcoind.generate_block(1) wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1) outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;') assert only_one(outputs)['value'] == 10000000 # The address we detect must match what was paid to. output = only_one(l1.rpc.listfunds()['outputs']) assert output['address'] == addr # Send all our money to a P2WPKH address this time. addr = l1.rpc.newaddr("bech32")['bech32'] l1.rpc.withdraw(addr, "all") bitcoind.generate_block(1) time.sleep(1) # The address we detect must match what was paid to. output = only_one(l1.rpc.listfunds()['outputs']) assert output['address'] == addr # this test does a 'listtransactions' on a yet unconfirmed channel def test_fundchannel_listtransaction(node_factory, bitcoind): l1, l2 = node_factory.get_nodes(2) l1.fundwallet(10**6) l1.connect(l2) txid = l1.rpc.fundchannel(l2.info['id'], 10**5)['txid'] # next call warned about SQL Accessing a null column # and crashed the daemon for accessing random memory or null txs = l1.rpc.listtransactions()['transactions'] tx = [t for t in txs if t['hash'] == txid][0] assert tx['blockheight'] == 0 def test_withdraw_nlocktime(node_factory): """ Test that we don't set the nLockTime to 0 for withdrawal transactions. """ l1 = node_factory.get_node(1) l1.fundwallet(10**4) addr = l1.rpc.newaddr()["bech32"] tx = l1.rpc.withdraw(addr, 10**3)["tx"] nlocktime = node_factory.bitcoind.rpc.decoderawtransaction(tx)["locktime"] tip = node_factory.bitcoind.rpc.getblockcount() assert nlocktime > 0 and nlocktime <= tip
40.704852
130
0.65957
8a9ed02f0755897cb2a1b2ac5fabcbb264f6bbee
18,025
py
Python
microbepy/plot/mutation_plot.py
ScienceStacks/MicrobEPy
704435e66c58677bab24f27820458870092924e2
[ "MIT" ]
1
2019-05-04T00:31:05.000Z
2019-05-04T00:31:05.000Z
microbepy/plot/mutation_plot.py
ScienceStacks/MicrobEPy
704435e66c58677bab24f27820458870092924e2
[ "MIT" ]
null
null
null
microbepy/plot/mutation_plot.py
ScienceStacks/MicrobEPy
704435e66c58677bab24f27820458870092924e2
[ "MIT" ]
null
null
null
"""Provides plots of mutations for Isolates and Lines.""" from microbepy.common import constants as cn from microbepy.common.dataframe_sorter import DataframeSorter from microbepy.common.isolate import Isolate from microbepy.common import util from microbepy.correlation import genome_correlation from microbepy.data.model_data_provider import ModelDataProvider from microbepy.data import util_data from microbepy.plot.mutation_cofraction import MutationCofraction from microbepy.plot.util_plot import PlotParms import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns COLORS = ['red', 'green', 'blue'] SPECIES = {cn.SPECIES_MIX_DVH: "DVH", cn.SPECIES_MIX_MMP: "MMP", None: "both"} FONTSIZE_TITLE = 16 FONTSIZE_LABEL = 8 MAX_LINES = 9 MIN_FRACTION = 0.25 THRESHOLD_FRAC = 0.2 MAX_SIGLVL = 0.01 COLORBAR_MIN = 1.0 COLORBAR_MAX = 4.0
35.62253
78
0.676782
8a9ed7740bcb98fbae13ca6bc7e08c9cb1a32fd1
4,384
py
Python
semantic-segmentation/deeplabv3plus/dataset_utils.py
shikisawamura/nnabla-examples
baf4e4cc620dedbf4368683325c0fb868676850d
[ "Apache-2.0" ]
1
2020-08-03T12:49:25.000Z
2020-08-03T12:49:25.000Z
semantic-segmentation/deeplabv3plus/dataset_utils.py
takuseno/nnabla-examples
070d25078ad3d5458744dbfd390cdd926e20e573
[ "Apache-2.0" ]
null
null
null
semantic-segmentation/deeplabv3plus/dataset_utils.py
takuseno/nnabla-examples
070d25078ad3d5458744dbfd390cdd926e20e573
[ "Apache-2.0" ]
1
2020-04-25T06:11:28.000Z
2020-04-25T06:11:28.000Z
# Copyright (c) 2017 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import os from scipy.misc import imread from args import get_args import matplotlib.pyplot as plt def encode_label(label): ''' Converting pixel values to corresponding class numbers. Assuming that the input label in 3-dim(h,w,c) and in BGR fromat read from cv2 ''' h, w, c = label.shape new_label = np.zeros((h, w, 1), dtype=np.int32) cls_to_clr_map = get_color() for i in range(cls_to_clr_map.shape[0]): #new_label[(label == cls_to_clr_map[i])[:,:,0]] = i #new_label[np.argwhere((label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))]=i print(np.where((label.astype(np.int32) == [120, 0, 128]).all(axis=2))) if i == 21: new_label[np.where( (label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = 255 else: new_label[np.where( (label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = i return new_label # this method should generate train-image.txt and train-label.txt def main(): ''' Arguments: train-file = txt file containing randomly selected image filenames to be taken as training set. val-file = txt file containing randomly selected image filenames to be taken as validation set. data-dir = dataset directory Usage: python dataset_utils.py --train-file="" --val-file="" --data_dir="" ''' args = get_args() data_dir = args.data_dir if not os.path.exists(data_dir+'SegmentationClass/' + 'encoded/'): os.makedirs(data_dir+'SegmentationClass/' + 'encoded/') for filename in os.listdir(data_dir+'SegmentationClass/'): if os.path.isdir(data_dir+'SegmentationClass/' + filename): continue label = imread(data_dir+'SegmentationClass/' + filename).astype('float32') label = encode_label(label) np.save(data_dir+'SegmentationClass/' + 'encoded/' + filename.split('.')[0] + '.npy', label) generate_path_files(args.data_dir, args.train_file, args.val_file) if __name__ == '__main__': main()
38.79646
334
0.619297
8a9edfbe7de3c135419c8254312b876a5177e47f
10,044
py
Python
train.py
shamilcm/fairseq-py
ceb2f1200c9e5b8bf42a1033e7638d3e8586609a
[ "BSD-3-Clause" ]
1
2021-04-20T07:33:12.000Z
2021-04-20T07:33:12.000Z
train.py
shamilcm/fairseq-py
ceb2f1200c9e5b8bf42a1033e7638d3e8586609a
[ "BSD-3-Clause" ]
null
null
null
train.py
shamilcm/fairseq-py
ceb2f1200c9e5b8bf42a1033e7638d3e8586609a
[ "BSD-3-Clause" ]
3
2018-04-20T11:00:16.000Z
2020-04-25T09:31:14.000Z
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # import collections import os import torch import math from fairseq import bleu, data, options, utils from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter from fairseq.multiprocessing_trainer import MultiprocessingTrainer from fairseq.progress_bar import progress_bar from fairseq.sequence_generator import SequenceGenerator def train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus): """Train the model for one epoch.""" itr = dataset.dataloader(args.train_subset, batch_size=args.batch_size, test_batch_size=args.test_batch_size, valid_batch_size=args.valid_batch_size, num_workers=args.workers, max_tokens=args.max_tokens, seed=args.seed, epoch=epoch, max_positions=args.max_positions, sample_without_replacement=args.sample_without_replacement) loss_meter = AverageMeter() bsz_meter = AverageMeter() # sentences per batch wpb_meter = AverageMeter() # words per batch wps_meter = TimeMeter() # words per second clip_meter = AverageMeter() # % of updates clipped gnorm_meter = AverageMeter() # gradient norm desc = '| epoch {:03d}'.format(epoch) lr = trainer.get_lr() with progress_bar(itr, desc, leave=False) as t: for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset): loss, grad_norm = trainer.train_step(sample, criterion) ntokens = sum(s['ntokens'] for s in sample) src_size = sum(s['src_tokens'].size(0) for s in sample) loss_meter.update(loss, ntokens) bsz_meter.update(src_size) wpb_meter.update(ntokens) wps_meter.update(ntokens) clip_meter.update(1 if grad_norm > args.clip_norm else 0) gnorm_meter.update(grad_norm) t.set_postfix(collections.OrderedDict([ ('loss', '{:.2f} ({:.2f})'.format(loss, loss_meter.avg)), ('wps', '{:5d}'.format(round(wps_meter.avg))), ('wpb', '{:5d}'.format(round(wpb_meter.avg))), ('bsz', '{:5d}'.format(round(bsz_meter.avg))), ('lr', lr), ('clip', '{:3.0f}%'.format(clip_meter.avg * 100)), ('gnorm', '{:.4f}'.format(gnorm_meter.avg)), ])) if i == 0: # ignore the first mini-batch in words-per-second calculation wps_meter.reset() if args.save_interval > 0 and (i + 1) % args.save_interval == 0: trainer.save_checkpoint(args, epoch, i + 1) fmt = desc + ' | train loss {:2.2f} | train ppl {:3.2f}' fmt += ' | s/checkpoint {:7d} | words/s {:6d} | words/batch {:6d}' fmt += ' | bsz {:5d} | lr {:0.6f} | clip {:3.0f}% | gnorm {:.4f}' t.write(fmt.format(loss_meter.avg, math.pow(2, loss_meter.avg), round(wps_meter.elapsed_time), round(wps_meter.avg), round(wpb_meter.avg), round(bsz_meter.avg), lr, clip_meter.avg * 100, gnorm_meter.avg)) def validate(args, epoch, trainer, criterion, dataset, subset, ngpus): """Evaluate the model on the validation set and return the average loss.""" itr = dataset.dataloader(subset, batch_size=None, max_tokens=args.max_tokens, max_positions=args.max_positions) loss_meter = AverageMeter() desc = '| epoch {:03d} | valid on \'{}\' subset'.format(epoch, subset) with progress_bar(itr, desc, leave=False) as t: for _, sample in data.skip_group_enumerator(t, ngpus): ntokens = sum(s['ntokens'] for s in sample) loss = trainer.valid_step(sample, criterion) loss_meter.update(loss, ntokens) t.set_postfix(loss='{:.2f}'.format(loss_meter.avg)) val_loss = loss_meter.avg t.write(desc + ' | valid loss {:2.2f} | valid ppl {:3.2f}' .format(val_loss, math.pow(2, val_loss))) # update and return the learning rate return val_loss def score_test(args, model, dataset, subset, beam, cuda_device): """Evaluate the model on the test set and return the BLEU scorer.""" translator = SequenceGenerator([model], dataset.dst_dict, beam_size=beam) if torch.cuda.is_available(): translator.cuda() scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk()) itr = dataset.dataloader(subset, batch_size=4, max_positions=args.max_positions) for _, _, ref, hypos in translator.generate_batched_itr(itr, cuda_device=cuda_device): scorer.add(ref.int().cpu(), hypos[0]['tokens'].int().cpu()) return scorer if __name__ == '__main__': main()
44.052632
129
0.616587
8a9f03cac960929d8e8a292c8e92367e90e1a3eb
7,311
py
Python
storm_control/sc_library/log_timing.py
jeffmoffitt/storm-control
522add1e196e0b7964f574481fd90c20a74b575e
[ "MIT" ]
null
null
null
storm_control/sc_library/log_timing.py
jeffmoffitt/storm-control
522add1e196e0b7964f574481fd90c20a74b575e
[ "MIT" ]
null
null
null
storm_control/sc_library/log_timing.py
jeffmoffitt/storm-control
522add1e196e0b7964f574481fd90c20a74b575e
[ "MIT" ]
1
2020-11-10T06:39:18.000Z
2020-11-10T06:39:18.000Z
#!/usr/bin/env python """ This parses a log file series (i.e. log, log.1, log.2, etc..) and outputs timing and call frequency information for HAL messages. Hazen 5/18 """ from datetime import datetime import os pattern = '%Y-%m-%d %H:%M:%S,%f' def getIterable(dict_or_list): """ Returns an iterable given a dictionary of a list. """ if isinstance(dict_or_list, dict): iterable = list(dict_or_list.values()) elif isinstance(dict_or_list, list): iterable = dict_or_list else: raise Exception("Unknown type '" + str(type(dict_or_list)) + "'") return iterable def groupByMsgType(messages): """ Returns a dictionary keyed by message type, with a list of one or more message objects per message type. """ return groupByX(lambda x : x.getType(), messages) def groupBySource(messages): """ Returns a dictionary keyed by message source, with a list of one or more message objects per message source. """ return groupByX(lambda x : x.getSource(), messages) def groupByX(grp_fn, messages): """ Returns a dictionary keyed by the requested group. """ m_grp = {} for msg in getIterable(messages): # Ignore messages that we don't have all the timing for. if msg.isComplete() or not ignore_incomplete: m_type = grp_fn(msg) if m_type in m_grp: m_grp[m_type].append(msg) else: m_grp[m_type] = [msg] return m_grp def logTiming(basename, ignore_incomplete = False): """ Returns a dictionary of Message objects keyed by their ID number. """ zero_time = None messages = {} for ext in [".5", ".4", ".3", ".2", ".1", ""]: fname = basename + ".out" + ext if not os.path.exists(fname): print(fname, "not found.") continue with open(fname) as fp: for line in fp: try: [time, command] = map(lambda x: x.strip(), line.split(":hal4000:INFO:")) except ValueError: continue if zero_time is None: zero_time = time # Message queued. if (command.startswith("queued,")): [m_id, source, m_type] = command.split(",")[1:] messages[m_id] = Message(m_type = m_type, source = source, time = time, zero_time = zero_time) # Message sent. elif (command.startswith("sent,")): m_id = command.split(",")[1] messages[m_id].sent(time) # Message processed. elif (command.startswith("processed,")): m_id = command.split(",")[1] messages[m_id].processed(time) elif (command.startswith("worker done,")): m_id = command.split(",")[1] messages[m_id].incNWorkers() # Ignore messages that we don't have all the timing for. if not ignore_incomplete: temp = {} for m_id in messages: msg = messages[m_id] if msg.isComplete(): temp[m_id] = msg return temp else: return messages def processingTime(messages): """ Returns the total processing time for a collection of messages. """ accum_time = 0 for msg in getIterable(messages): if isinstance(msg, list): for elt in msg: accum_time += elt.getProcessingTime() else: accum_time += msg.getProcessingTime() return accum_time def queuedTime(messages): """ Returns the total queued time for a a collection of messages. """ accum_time = 0 for msg in getIterable(messages): if isinstance(msg, list): for elt in msg: accum_time += elt.getQueuedTime() else: accum_time += msg.getQueuedTime() return accum_time if (__name__ == "__main__"): import sys if (len(sys.argv) != 2): print("usage: <log file>") exit() messages = logTiming(sys.argv[1]) groups = groupByMsgType(messages) print() print("All messages:") for key in sorted(groups): grp = groups[key] print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp))) print("Total queued time {0:.3f} seconds".format(queuedTime(groups))) print("Total processing time {0:.3f} seconds".format(processingTime(groups))) print() print("Film messages:") groups = groupByMsgType(groupBySource(messages)["film"]) for key in sorted(groups): grp = groups[key] print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp))) print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
27.90458
93
0.548078
8a9f1f85d541893b6f50e7a4580e2b294f4022fb
1,830
py
Python
django_simple_jsonschema/management/commands/check_schema.py
38elements/django-simple-jsonschema
ab08aaa3453c40a41d443869643113f23eb40db6
[ "MIT" ]
1
2017-04-27T20:15:46.000Z
2017-04-27T20:15:46.000Z
django_simple_jsonschema/management/commands/check_schema.py
38elements/django-simple-jsonschema
ab08aaa3453c40a41d443869643113f23eb40db6
[ "MIT" ]
null
null
null
django_simple_jsonschema/management/commands/check_schema.py
38elements/django-simple-jsonschema
ab08aaa3453c40a41d443869643113f23eb40db6
[ "MIT" ]
2
2016-02-20T10:53:09.000Z
2018-07-12T14:47:01.000Z
from django.core.management.base import BaseCommand from django.utils import termcolors from jsonschema import Draft4Validator from jsonschema.exceptions import SchemaError import json
35.192308
96
0.572678
8aa0f73f3e1949691f35856c47f4d0a99caef5b9
4,247
py
Python
lib/interface.py
keke185321/combine-copy-
de2eba77d8db5c9c1908aac1262590b80c2348ce
[ "Apache-2.0" ]
null
null
null
lib/interface.py
keke185321/combine-copy-
de2eba77d8db5c9c1908aac1262590b80c2348ce
[ "Apache-2.0" ]
null
null
null
lib/interface.py
keke185321/combine-copy-
de2eba77d8db5c9c1908aac1262590b80c2348ce
[ "Apache-2.0" ]
null
null
null
import cv2, time import numpy as np import Tkinter """ Wraps up some interfaces to opencv user interface methods (displaying image frames, event handling, etc). If desired, an alternative UI could be built and imported into get_pulse.py instead. Opencv is used to perform much of the data analysis, but there is no reason it has to be used to handle the UI as well. It just happens to be very effective for our purposes. """ """ The rest of this file defines some GUI plotting functionality. There are plenty of other ways to do simple x-y data plots in python, but this application uses cv2.imshow to do real-time data plotting and handle user interaction. This is entirely independent of the data calculation functions, so it can be replaced in the get_pulse.py application easily. """ def combine(left, right): """Stack images horizontally. """ h = max(left.shape[0], right.shape[0]) w = left.shape[1] + right.shape[1] hoff = left.shape[0] shape = list(left.shape) shape[0] = h shape[1] = w comb = np.zeros(tuple(shape),left.dtype) # left will be on left, aligned top, with right on right comb[:left.shape[0],:left.shape[1]] = left comb[:right.shape[0],left.shape[1]:] = right return comb
31.932331
82
0.533318
8aa1a1e63a87d2e580e76379c3a2ac6b8f3e051d
16,125
py
Python
nltk/tag/brill.py
FGDBTKD/nltk
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
[ "Apache-2.0" ]
null
null
null
nltk/tag/brill.py
FGDBTKD/nltk
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
[ "Apache-2.0" ]
null
null
null
nltk/tag/brill.py
FGDBTKD/nltk
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
[ "Apache-2.0" ]
1
2019-10-18T08:58:45.000Z
2019-10-18T08:58:45.000Z
# -*- coding: utf-8 -*- # Natural Language Toolkit: Transformation-based learning # # Copyright (C) 2001-2018 NLTK Project # Author: Marcus Uneson <[email protected]> # based on previous (nltk2) version by # Christopher Maloof, Edward Loper, Steven Bird # URL: <http://nltk.org/> # For license information, see LICENSE.TXT from __future__ import print_function, division from collections import defaultdict, Counter from nltk.tag import TaggerI from nltk.tbl import Feature, Template from nltk import jsontags ###################################################################### # Brill Templates ###################################################################### def nltkdemo18(): """ Return 18 templates, from the original nltk demo, in multi-feature syntax """ return [ Template(Pos([-1])), Template(Pos([1])), Template(Pos([-2])), Template(Pos([2])), Template(Pos([-2, -1])), Template(Pos([1, 2])), Template(Pos([-3, -2, -1])), Template(Pos([1, 2, 3])), Template(Pos([-1]), Pos([1])), Template(Word([-1])), Template(Word([1])), Template(Word([-2])), Template(Word([2])), Template(Word([-2, -1])), Template(Word([1, 2])), Template(Word([-3, -2, -1])), Template(Word([1, 2, 3])), Template(Word([-1]), Word([1])), ] def nltkdemo18plus(): """ Return 18 templates, from the original nltk demo, and additionally a few multi-feature ones (the motivation is easy comparison with nltkdemo18) """ return nltkdemo18() + [ Template(Word([-1]), Pos([1])), Template(Pos([-1]), Word([1])), Template(Word([-1]), Word([0]), Pos([1])), Template(Pos([-1]), Word([0]), Word([1])), Template(Pos([-1]), Word([0]), Pos([1])), ] def fntbl37(): """ Return 37 templates taken from the postagging task of the fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/ (37 is after excluding a handful which do not condition on Pos[0]; fntbl can do that but the current nltk implementation cannot.) """ return [ Template(Word([0]), Word([1]), Word([2])), Template(Word([-1]), Word([0]), Word([1])), Template(Word([0]), Word([-1])), Template(Word([0]), Word([1])), Template(Word([0]), Word([2])), Template(Word([0]), Word([-2])), Template(Word([1, 2])), Template(Word([-2, -1])), Template(Word([1, 2, 3])), Template(Word([-3, -2, -1])), Template(Word([0]), Pos([2])), Template(Word([0]), Pos([-2])), Template(Word([0]), Pos([1])), Template(Word([0]), Pos([-1])), Template(Word([0])), Template(Word([-2])), Template(Word([2])), Template(Word([1])), Template(Word([-1])), Template(Pos([-1]), Pos([1])), Template(Pos([1]), Pos([2])), Template(Pos([-1]), Pos([-2])), Template(Pos([1])), Template(Pos([-1])), Template(Pos([-2])), Template(Pos([2])), Template(Pos([1, 2, 3])), Template(Pos([1, 2])), Template(Pos([-3, -2, -1])), Template(Pos([-2, -1])), Template(Pos([1]), Word([0]), Word([1])), Template(Pos([1]), Word([0]), Word([-1])), Template(Pos([-1]), Word([-1]), Word([0])), Template(Pos([-1]), Word([0]), Word([1])), Template(Pos([-2]), Pos([-1])), Template(Pos([1]), Pos([2])), Template(Pos([1]), Pos([2]), Word([1])) ] def brill24(): """ Return 24 templates of the seminal TBL paper, Brill (1995) """ return [ Template(Pos([-1])), Template(Pos([1])), Template(Pos([-2])), Template(Pos([2])), Template(Pos([-2, -1])), Template(Pos([1, 2])), Template(Pos([-3, -2, -1])), Template(Pos([1, 2, 3])), Template(Pos([-1]), Pos([1])), Template(Pos([-2]), Pos([-1])), Template(Pos([1]), Pos([2])), Template(Word([-1])), Template(Word([1])), Template(Word([-2])), Template(Word([2])), Template(Word([-2, -1])), Template(Word([1, 2])), Template(Word([-1, 0])), Template(Word([0, 1])), Template(Word([0])), Template(Word([-1]), Pos([-1])), Template(Word([1]), Pos([1])), Template(Word([0]), Word([-1]), Pos([-1])), Template(Word([0]), Word([1]), Pos([1])), ] def describe_template_sets(): """ Print the available template sets in this demo, with a short description" """ import inspect import sys # a bit of magic to get all functions in this module templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction) for (name, obj) in templatesets: if name == "describe_template_sets": continue print(name, obj.__doc__, "\n") ###################################################################### # The Brill Tagger ######################################################################
37.941176
107
0.57631
8aa1f2759e7626cdb380e9f05aa634b55bf1bbc2
7,812
py
Python
superglue_parsers/wsc.py
agentsolaris/xlnn
0ab07d1ac526cadc2964379aef0a44927e0618eb
[ "Apache-2.0" ]
null
null
null
superglue_parsers/wsc.py
agentsolaris/xlnn
0ab07d1ac526cadc2964379aef0a44927e0618eb
[ "Apache-2.0" ]
null
null
null
superglue_parsers/wsc.py
agentsolaris/xlnn
0ab07d1ac526cadc2964379aef0a44927e0618eb
[ "Apache-2.0" ]
null
null
null
import json import logging import sys import numpy as np import torch from task_config import SuperGLUE_LABEL_MAPPING from snorkel.mtl.data import MultitaskDataset sys.path.append("..") # Adds higher directory to python modules path. logger = logging.getLogger(__name__) TASK_NAME = "WSC"
33.101695
88
0.592422
8aa22dad95839c5aa4e52f5c6ec5b084424226d6
1,534
py
Python
simplimental/simplimental.py
TimmyCarbone/simplimental
e46a0e63ce33e36b1e4ca3a473ad15d0732614ed
[ "MIT" ]
2
2015-11-25T15:12:05.000Z
2017-06-22T16:36:58.000Z
simplimental/simplimental.py
TimmyCarbone/simplimental
e46a0e63ce33e36b1e4ca3a473ad15d0732614ed
[ "MIT" ]
null
null
null
simplimental/simplimental.py
TimmyCarbone/simplimental
e46a0e63ce33e36b1e4ca3a473ad15d0732614ed
[ "MIT" ]
null
null
null
import re import json __all__ = ["Simplimental"]
21.605634
72
0.612777
8aa2d7e8d015afdc94844a8b1cce4b350015d579
3,637
py
Python
Python/Examples/Macros/SettingsAxesOptimization.py
archformco/RoboDK-API
b3d0cad6a83f505811e2be273453ccb4579324f1
[ "MIT" ]
161
2018-03-23T01:27:08.000Z
2022-03-23T12:18:35.000Z
Python/Examples/Macros/SettingsAxesOptimization.py
OxideDevX/RoboDK-API
50357c38b2fcf58cf82d9b7bf61021cb900fd358
[ "MIT" ]
26
2018-11-19T10:18:58.000Z
2022-03-28T18:37:11.000Z
Python/Examples/Macros/SettingsAxesOptimization.py
OxideDevX/RoboDK-API
50357c38b2fcf58cf82d9b7bf61021cb900fd358
[ "MIT" ]
85
2018-03-22T19:25:35.000Z
2022-03-30T04:46:59.000Z
# This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string. # You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings. # It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API. # # More information about the RoboDK API here: # https://robodk.com/doc/en/RoboDK-API.html # For more information visit: # https://robodk.com/doc/en/PythonAPI/robolink.html from robolink import * # RoboDK API # JSON tools import json # Start the RoboDK API RDK = Robolink() # Ask the user to select a robot arm (6 axis robot wich can have external axes) robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM) # Default optimization settings test template AxesOptimSettings = { # Optimization parameters: "Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled "Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead "MaxIter": 650, # Max. number of iterations "Tol": 0.0016, # Tolerance to stop iterations # Absolute Reference joints (double): "AbsJnt_1": 104.17, "AbsJnt_2": 11.22, "AbsJnt_3": 15.97, "AbsJnt_4": -87.48, "AbsJnt_5": -75.36, "AbsJnt_6": 63.03, "AbsJnt_7": 174.13, "AbsJnt_8": 173.60, "AbsJnt_9": 0, # Using Absolute reference joints (0: No, 1: Yes): "AbsOn_1": 1, "AbsOn_2": 1, "AbsOn_3": 1, "AbsOn_4": 1, "AbsOn_5": 1, "AbsOn_6": 1, "AbsOn_7": 1, "AbsOn_8": 1, "AbsOn_9": 1, # Weight for absolute reference joints (double): "AbsW_1": 100, "AbsW_2": 100, "AbsW_3": 100, "AbsW_4": 89, "AbsW_5": 90, "AbsW_6": 92, "AbsW_7": 92, "AbsW_8": 96, "AbsW_9": 50, # Using for relative joint motion smoothing (0: No, 1: Yes): "RelOn_1": 1, "RelOn_2": 1, "RelOn_3": 1, "RelOn_4": 1, "RelOn_5": 1, "RelOn_6": 1, "RelOn_7": 1, "RelOn_8": 1, "RelOn_9": 1, # Weight for relative joint motion (double): "RelW_1": 5, "RelW_2": 47, "RelW_3": 44, "RelW_4": 43, "RelW_5": 36, "RelW_6": 47, "RelW_7": 53, "RelW_8": 59, "RelW_9": 0, } # Update one value, for example, make it active: ToUpdate = {} ToUpdate["Active"] = 1 json_str = json.dumps(json.dumps(ToUpdate)) status = robot.setParam("OptimAxes", json_str) print(status) # Example to make a partial or full update count = 1 while True: for i in range(7): # Partial update ToUpdate = {} ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4 ToUpdate["AbsOn_" + str(i+1)] = count % 2 ToUpdate["AbsW_" + str(i+1)] = (count+i) json_str = json.dumps(json.dumps(ToUpdate)) status = robot.setParam("OptimAxes", json_str) print(status) # Full update #OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4 #OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i) #OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2 # Full update #print(robot.setParam("OptimAxes", str(AxesOptimSettings))) count = count + 1 # Read settings json_data = robot.setParam("OptimAxes") json_object = json.loads(json_data) print(json.dumps(json_object, indent=4)) pause(0.2) # Example to read the current axes optimization settings: while True: json_data = robot.setParam("OptimAxes") json_object = json.loads(json_data) print(json.dumps(json_object, indent=4)) pause(0.2)
28.414063
133
0.62854
8aa372fac8202953aac93a2529989a1508f2b506
1,072
py
Python
tests/test_grammar.py
Vipul97/SLR-Parser
3de5609235d173d29ad9bd9ed7bdfe2a813ab1bd
[ "MIT" ]
5
2018-10-30T04:09:46.000Z
2020-03-17T04:47:06.000Z
tests/test_grammar.py
Vipul97/SLR-Parser
3de5609235d173d29ad9bd9ed7bdfe2a813ab1bd
[ "MIT" ]
null
null
null
tests/test_grammar.py
Vipul97/SLR-Parser
3de5609235d173d29ad9bd9ed7bdfe2a813ab1bd
[ "MIT" ]
5
2019-06-16T20:16:46.000Z
2020-04-14T06:44:32.000Z
from slr_parser.grammar import Grammar import unittest if __name__ == '__main__': unittest.main()
29.777778
114
0.488806
8aa50b5f8d204a63672c266b3319435ba3678601
2,686
py
Python
insight/migrations/0001_initial.py
leonhead/chess-insight
b893295719df21b4fee10d4e7b01639ded8b42b4
[ "MIT" ]
null
null
null
insight/migrations/0001_initial.py
leonhead/chess-insight
b893295719df21b4fee10d4e7b01639ded8b42b4
[ "MIT" ]
null
null
null
insight/migrations/0001_initial.py
leonhead/chess-insight
b893295719df21b4fee10d4e7b01639ded8b42b4
[ "MIT" ]
null
null
null
# Generated by Django 3.1 on 2020-09-08 07:43 from django.db import migrations, models import django.db.models.deletion
41.323077
127
0.562919
8aa613f84bb4cdd381d01e4e99ee1eab1597c53c
1,732
py
Python
tests/test_merge.py
jmerizia/parallel-pytorch
d27b2fd145d25f1329a039c99b8895783bfc71e5
[ "MIT" ]
null
null
null
tests/test_merge.py
jmerizia/parallel-pytorch
d27b2fd145d25f1329a039c99b8895783bfc71e5
[ "MIT" ]
null
null
null
tests/test_merge.py
jmerizia/parallel-pytorch
d27b2fd145d25f1329a039c99b8895783bfc71e5
[ "MIT" ]
null
null
null
import torch import numpy as np from mpi4py import MPI from parallel_pytorch.ops import tensor_merge from parallel_pytorch.utils import abort_on_exception def run_all(): test_1() test_2() if __name__ == '__main__': run_all()
29.355932
103
0.561778
8aa6533a09d6a4b3ba6f06626bf481622c2da357
542
py
Python
day07/main.py
tebriel/aoc2021
65ca19be3ad66dc52eee9ca31cf12306695a24e9
[ "Unlicense" ]
null
null
null
day07/main.py
tebriel/aoc2021
65ca19be3ad66dc52eee9ca31cf12306695a24e9
[ "Unlicense" ]
null
null
null
day07/main.py
tebriel/aoc2021
65ca19be3ad66dc52eee9ca31cf12306695a24e9
[ "Unlicense" ]
null
null
null
"""Day 07""" if __name__ == '__main__': process('test.txt') process('input.txt')
25.809524
74
0.573801
8aa6ff7f14bd0c2736eb3afb641dd73452250888
1,276
py
Python
src/ceres_infer/utils.py
pritchardlabatpsu/cga
0a71c672b1348cebc724560643fd908d636fc133
[ "MIT" ]
null
null
null
src/ceres_infer/utils.py
pritchardlabatpsu/cga
0a71c672b1348cebc724560643fd908d636fc133
[ "MIT" ]
null
null
null
src/ceres_infer/utils.py
pritchardlabatpsu/cga
0a71c672b1348cebc724560643fd908d636fc133
[ "MIT" ]
1
2022-02-08T01:06:20.000Z
2022-02-08T01:06:20.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ utilities @author: boyangzhao """ import pandas as pd import re
24.075472
96
0.530564
8aa76a43878c4baa56da24cd2df4e08dd1f12800
4,779
py
Python
MAIN/Screens/Settings/category_2/__init__.py
aragubas/fogoso
bd24e049ee994410320e87fb3706c95bd8c9801f
[ "Apache-2.0" ]
null
null
null
MAIN/Screens/Settings/category_2/__init__.py
aragubas/fogoso
bd24e049ee994410320e87fb3706c95bd8c9801f
[ "Apache-2.0" ]
null
null
null
MAIN/Screens/Settings/category_2/__init__.py
aragubas/fogoso
bd24e049ee994410320e87fb3706c95bd8c9801f
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3.7 # Copyright 2020 Aragubas # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # -- Imports -- # from ENGINE import APPDATA as reg from ENGINE import UTILS as utils import ENGINE as tge from Fogoso.MAIN import ClassesUtils as gameObjs from Fogoso import MAIN as gameMain import pygame, sys import importlib import time from random import randint OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton OptionsScreen_NumberFormatting = gameObjs.UpDownButton ElementsX = 0 ElementsY = 0
42.669643
306
0.765432
8aa779160503c74402f97032140e39891a948a62
1,279
py
Python
tests/test_toggle.py
ConnectionMaster/robotpy-wpilib-utilities
b62e563c7df113e9e513a36b9039f47f34157be1
[ "BSD-3-Clause" ]
14
2015-10-20T02:56:17.000Z
2020-03-17T04:44:12.000Z
tests/test_toggle.py
robotpy/robotpy-wpilib-utilities
80f753a1d315d234d6ecdd79be544ec01ca091ae
[ "BSD-3-Clause" ]
107
2015-01-26T23:47:10.000Z
2022-03-16T13:57:36.000Z
tests/test_toggle.py
ConnectionMaster/robotpy-wpilib-utilities
b62e563c7df113e9e513a36b9039f47f34157be1
[ "BSD-3-Clause" ]
21
2016-01-01T01:44:40.000Z
2022-03-15T18:00:35.000Z
from robotpy_ext.control.toggle import Toggle from robotpy_ext.misc.precise_delay import NotifierDelay
23.685185
56
0.689601
8aa8401fd27f8fa99c12308b325e2e4f0cfa3068
2,872
py
Python
tests/test.py
kjanik70/tflearn
db5176773299b67a2a75c5889fb2aba7fd0fea8a
[ "MIT" ]
10,882
2016-03-31T16:03:11.000Z
2022-03-26T03:00:27.000Z
tests/test.py
min0355/tflearn
db5176773299b67a2a75c5889fb2aba7fd0fea8a
[ "MIT" ]
1,079
2016-04-02T06:14:16.000Z
2022-02-27T10:04:47.000Z
tests/test.py
min0355/tflearn
db5176773299b67a2a75c5889fb2aba7fd0fea8a
[ "MIT" ]
3,014
2016-03-31T16:03:26.000Z
2022-03-30T20:36:53.000Z
''' This file contains test cases for tflearn ''' import tensorflow.compat.v1 as tf import tflearn import unittest if __name__ == "__main__": unittest.main()
30.88172
82
0.547354
8aaa6ef648c6ab0a8f38e3df5ebf0a4f712b233a
2,313
py
Python
infrastructure-provisioning/src/general/api/install_libs.py
roolrd/incubator-datalab
2045207ecd1b381193f1a1ec143cc968716ad989
[ "Apache-2.0" ]
66
2020-10-03T08:36:48.000Z
2022-03-20T23:16:20.000Z
infrastructure-provisioning/src/general/api/install_libs.py
roolrd/incubator-datalab
2045207ecd1b381193f1a1ec143cc968716ad989
[ "Apache-2.0" ]
48
2019-02-28T12:11:33.000Z
2020-09-15T08:27:08.000Z
infrastructure-provisioning/src/general/api/install_libs.py
roolrd/incubator-datalab
2045207ecd1b381193f1a1ec143cc968716ad989
[ "Apache-2.0" ]
44
2019-01-14T10:31:55.000Z
2020-09-22T17:53:33.000Z
#!/usr/bin/python3 # ***************************************************************************** # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ****************************************************************************** import json import os import sys import subprocess if __name__ == "__main__": success = True try: subprocess.run('cd /root; fab install-libs', shell=True, check=True) except: success = False reply = dict() reply['request_id'] = os.environ['request_id'] if success: reply['status'] = 'ok' else: reply['status'] = 'err' reply['response'] = dict() try: with open("/root/result.json") as f: reply['response']['result'] = json.loads(f.read()) except: reply['response']['result'] = {"error": "Failed to open result.json"} reply['response']['log'] = "/var/log/datalab/{0}/{0}_{1}_{2}.log".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id']) with open("/response/{}_{}_{}.json".format(os.environ['conf_resource'], os.environ['project_name'], os.environ['request_id']), 'w') as response_file: response_file.write(json.dumps(reply)) try: subprocess.run('chmod 666 /response/*', shell=True, check=True) except: success = False if not success: sys.exit(1)
35.584615
105
0.565932
8aab4acf40735c2dc3547887c3be02d0b2808eff
1,584
py
Python
model_zoo/official/nlp/bert_thor/src/evaluation_config.py
GuoSuiming/mindspore
48afc4cfa53d970c0b20eedfb46e039db2a133d5
[ "Apache-2.0" ]
55
2020-12-17T10:26:06.000Z
2022-03-28T07:18:26.000Z
model_zoo/official/nlp/bert_thor/src/evaluation_config.py
forwhat461/mindspore
59a277756eb4faad9ac9afcc7fd526e8277d4994
[ "Apache-2.0" ]
1
2020-12-29T06:46:38.000Z
2020-12-29T06:46:38.000Z
model_zoo/official/nlp/bert_thor/src/evaluation_config.py
forwhat461/mindspore
59a277756eb4faad9ac9afcc7fd526e8277d4994
[ "Apache-2.0" ]
14
2021-01-29T02:39:47.000Z
2022-03-23T05:00:26.000Z
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ config settings, will be used in finetune.py """ from easydict import EasyDict as edict import mindspore.common.dtype as mstype from .bert_model import BertConfig cfg = edict({ 'task': 'NER', 'num_labels': 41, 'data_file': '', 'schema_file': None, 'finetune_ckpt': '', 'use_crf': False, 'clue_benchmark': False, }) bert_net_cfg = BertConfig( batch_size=8 if not cfg.clue_benchmark else 1, seq_length=512, vocab_size=30522, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, use_relative_positions=False, input_mask_from_dataset=True, token_type_ids_from_dataset=True, dtype=mstype.float32, compute_type=mstype.float16, )
28.8
78
0.693813
8aad801ac3abc226337a71ef38e5ff434b1f3490
1,052
py
Python
portal/apps/core/management/commands/sync_articleviewedby.py
Artis-Physis/utopia-cms
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
[ "BSD-3-Clause" ]
8
2020-12-15T17:11:08.000Z
2021-12-13T22:08:33.000Z
portal/apps/core/management/commands/sync_articleviewedby.py
Artis-Physis/utopia-cms
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
[ "BSD-3-Clause" ]
28
2020-12-15T17:34:03.000Z
2022-02-01T04:09:10.000Z
portal/apps/core/management/commands/sync_articleviewedby.py
Artis-Physis/utopia-cms
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
[ "BSD-3-Clause" ]
7
2020-12-15T19:59:17.000Z
2021-11-24T16:47:06.000Z
# -*- coding: utf-8 -*- # utopia-cms 2020. Anbal Pacheco. from django.core.management import BaseCommand from django.db.utils import IntegrityError from apps import core_articleviewedby_mdb from core.models import ArticleViewedBy
37.571429
114
0.640684
8aad8dc0d7dead55101c7087ad08700bb763b130
7,900
py
Python
examples/minkunet.py
dendisuhubdy/MinkowskiEngine
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
[ "MIT" ]
1
2019-05-12T00:06:10.000Z
2019-05-12T00:06:10.000Z
examples/minkunet.py
dendisuhubdy/MinkowskiEngine
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
[ "MIT" ]
null
null
null
examples/minkunet.py
dendisuhubdy/MinkowskiEngine
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
[ "MIT" ]
null
null
null
import torch import torch.nn as nn from torch.optim import SGD import MinkowskiEngine as ME from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck from examples.common import data_loader from examples.resnet import ResNetBase if __name__ == '__main__': # loss and network criterion = nn.CrossEntropyLoss() net = MinkUNet14A(in_channels=3, out_channels=5, D=2) print(net) # a data loader must return a tuple of coords, features, and labels. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') net = net.to(device) optimizer = SGD(net.parameters(), lr=1e-2) for i in range(10): optimizer.zero_grad() # Get new data coords, feat, label = data_loader(is_classification=False) input = ME.SparseTensor(feat, coords=coords).to(device) label = label.to(device) # Forward output = net(input) # Loss loss = criterion(output.F, label) print('Iteration: ', i, ', Loss: ', loss.item()) # Gradient loss.backward() optimizer.step() # Saving and loading a network torch.save(net.state_dict(), 'test.pth') net.load_state_dict(torch.load('test.pth'))
30.501931
80
0.603291
8aad8de20813d57dc973493fe2b63ad495089392
549
py
Python
setup.py
swfrench/nginx-access-tailer
5e060396ca749935c622e8e9c50b659b39e3675b
[ "BSD-3-Clause" ]
null
null
null
setup.py
swfrench/nginx-access-tailer
5e060396ca749935c622e8e9c50b659b39e3675b
[ "BSD-3-Clause" ]
null
null
null
setup.py
swfrench/nginx-access-tailer
5e060396ca749935c622e8e9c50b659b39e3675b
[ "BSD-3-Clause" ]
null
null
null
"""TODO.""" from setuptools import setup setup( name='nginx-access-tailer', version='0.1', author='swfrench', url='https://github.com/swfrench/nginx-tailer', packages=['nginx_access_tailer',], license='BSD three-clause license', entry_points={ 'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main'], }, install_requires=[ 'python-gflags >= 3.1.1', 'google-cloud-monitoring >= 0.25.0', ], test_suite='nose.collector', tests_require=['nose', 'mock'], )
24.954545
87
0.626594
8aae1314a34df4a8c2038ff3f05e19541e560962
2,489
py
Python
tests/integration/test_cmk_describe.py
oglok/CPU-Manager-for-Kubernetes
503f37dcb20452699ce789b6628fa3ebeb9ffb54
[ "Apache-2.0" ]
null
null
null
tests/integration/test_cmk_describe.py
oglok/CPU-Manager-for-Kubernetes
503f37dcb20452699ce789b6628fa3ebeb9ffb54
[ "Apache-2.0" ]
null
null
null
tests/integration/test_cmk_describe.py
oglok/CPU-Manager-for-Kubernetes
503f37dcb20452699ce789b6628fa3ebeb9ffb54
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .. import helpers from . import integration
21.273504
74
0.451185
8aaee662db93c29bfc4e01c664b5f8c132a76382
1,331
py
Python
setup.py
richardARPANET/persistent-celery-beat-scheduler
d2cbdd12394eec282ccb97ac5ff894353c2e4ffd
[ "Apache-2.0" ]
4
2018-04-04T13:03:08.000Z
2018-04-16T18:50:45.000Z
setup.py
richardARPANET/persistent-celery-beat-scheduler
d2cbdd12394eec282ccb97ac5ff894353c2e4ffd
[ "Apache-2.0" ]
null
null
null
setup.py
richardARPANET/persistent-celery-beat-scheduler
d2cbdd12394eec282ccb97ac5ff894353c2e4ffd
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -* import os from setuptools import find_packages, setup # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) with open('requirements.txt') as f: install_requires = f.read().splitlines() setup( name='persistent-celery-beat-scheduler', version='0.1.1.dev0', packages=find_packages('src', exclude=('tests',)), package_dir={'': 'src'}, include_package_data=True, zip_safe=False, description=( 'Celery Beat Scheduler that stores the scheduler data in Redis.' ), author='Richard O\'Dwyer', author_email='[email protected]', license='Apache 2', long_description='https://github.com/richardasaurus/persistent-celery-beat-scheduler', install_requires=install_requires, classifiers=[ 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Internet :: WWW/HTTP', ], )
31.690476
90
0.643877
8aafa8be4338ac950ec6be097349874901cbc17e
3,807
py
Python
tests/test_client.py
mgobec/python-memcached
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
[ "Apache-2.0" ]
1
2019-07-19T18:09:38.000Z
2019-07-19T18:09:38.000Z
tests/test_client.py
mgobec/python-memcached
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
[ "Apache-2.0" ]
null
null
null
tests/test_client.py
mgobec/python-memcached
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
[ "Apache-2.0" ]
null
null
null
import collections import unittest import driver from driver.protocol import * _server = ('localhost', 11211) _dead_retry = 30 _socket_timeout = 3 _max_receive_size = 4096 def _raise_exception(message): raise Exception(message)
34.609091
105
0.677699
8ab02ecbf400acde29e043cc50c322067db1b570
1,654
py
Python
GREYATOM-PROJECT----DATA--WRANGLING-WITH-PANDAS/code.py
Preethinaidu14/greyatom-python-for-data-science
5b758dd6123d9fc50031c43771b30d69e366c044
[ "MIT" ]
null
null
null
GREYATOM-PROJECT----DATA--WRANGLING-WITH-PANDAS/code.py
Preethinaidu14/greyatom-python-for-data-science
5b758dd6123d9fc50031c43771b30d69e366c044
[ "MIT" ]
null
null
null
GREYATOM-PROJECT----DATA--WRANGLING-WITH-PANDAS/code.py
Preethinaidu14/greyatom-python-for-data-science
5b758dd6123d9fc50031c43771b30d69e366c044
[ "MIT" ]
null
null
null
# -------------- # Import packages import numpy as np import pandas as pd from scipy.stats import mode path # code starts here bank = pd.read_csv(path) categorical_var = bank.select_dtypes(include = 'object') print(categorical_var) numerical_var = bank.select_dtypes(include = 'number') print(numerical_var) # code ends here # -------------- # code starts here banks = bank.drop('Loan_ID',axis = 1) print(banks) print(banks.isnull().sum()) bank_mode = banks.mode().iloc[0] banks = banks.fillna(bank_mode) #code ends here # -------------- # Code starts here avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount') # code ends here # -------------- # code starts here loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).value_counts() #print(loan_approved_se) loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).value_counts() print(loan_approved_nse) Loan_Status = 614 percentage_se = (56/Loan_Status)*100 percentage_nse = (366/Loan_Status)*100 # code ends here # -------------- # code starts here loan_term = banks['Loan_Amount_Term'].apply (lambda x : int(x)/12) print(loan_term.value_counts()) big_loan = [i for i in loan_term if i >= 25] big_loan_term = len(big_loan) print(big_loan_term) #[loan_term.value_counts()[i] for i in range(len(loan_terms)) if loan_term.value_counts().index[i] >= 25] # code ends here # -------------- # code starts here loan_groupby = banks.groupby('Loan_Status') loan_groupby = loan_groupby['ApplicantIncome','Credit_History'] mean_values = loan_groupby.mean() # code ends here
19.458824
105
0.688634
8ab2d6d56bce4e65f9e2921fdc0ec8fdc7ecb7fb
855
py
Python
venv/Lib/site-packages/patsy/test_regressions.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
710
2015-01-07T20:08:59.000Z
2022-03-08T14:30:13.000Z
venv/Lib/site-packages/patsy/test_regressions.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
142
2015-01-07T02:20:27.000Z
2021-11-15T04:23:02.000Z
venv/Lib/site-packages/patsy/test_regressions.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
101
2015-01-15T16:35:12.000Z
2022-02-19T06:50:02.000Z
# This file is part of Patsy # Copyright (C) 2013 Nathaniel Smith <[email protected]> # See file LICENSE.txt for license information. # Regression tests for fixed bugs (when not otherwise better covered somewhere # else) from patsy import (EvalEnvironment, dmatrix, build_design_matrices, PatsyError, Origin)
34.2
78
0.645614
8ab404c67e6f07e674ae9c5b07f6e6e0e0f914ac
7,764
py
Python
skimage/io/_plugins/pil_plugin.py
smheidrich/scikit-image
e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb
[ "BSD-3-Clause" ]
3
2019-02-28T16:05:36.000Z
2020-04-03T17:29:07.000Z
Lib/site-packages/skimage/io/_plugins/pil_plugin.py
caiyongji/Anaconda-py36.5-tensorflow-built-env
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
[ "PSF-2.0" ]
26
2020-03-24T18:07:06.000Z
2022-03-12T00:12:27.000Z
Lib/site-packages/skimage/io/_plugins/pil_plugin.py
caiyongji/Anaconda-py36.5-tensorflow-built-env
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
[ "PSF-2.0" ]
3
2019-12-31T23:21:40.000Z
2020-04-03T17:29:08.000Z
__all__ = ['imread', 'imsave'] import numpy as np from PIL import Image from ...util import img_as_ubyte, img_as_uint def imread(fname, dtype=None, img_num=None, **kwargs): """Load an image from file. Parameters ---------- fname : str or file File name or file-like-object. dtype : numpy dtype object or string specifier Specifies data type of array elements. img_num : int, optional Specifies which image to read in a file with multiple images (zero-indexed). kwargs : keyword pairs, optional Addition keyword arguments to pass through. Notes ----- Files are read using the Python Imaging Library. See PIL docs [1]_ for a list of supported formats. References ---------- .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html """ if isinstance(fname, str): with open(fname, 'rb') as f: im = Image.open(f) return pil_to_ndarray(im, dtype=dtype, img_num=img_num) else: im = Image.open(fname) return pil_to_ndarray(im, dtype=dtype, img_num=img_num) def pil_to_ndarray(image, dtype=None, img_num=None): """Import a PIL Image object to an ndarray, in memory. Parameters ---------- Refer to ``imread``. """ try: # this will raise an IOError if the file is not readable image.getdata()[0] except IOError as e: site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries" pillow_error_message = str(e) error_message = ('Could not load "%s" \n' 'Reason: "%s"\n' 'Please see documentation at: %s' % (image.filename, pillow_error_message, site)) raise ValueError(error_message) frames = [] grayscale = None i = 0 while 1: try: image.seek(i) except EOFError: break frame = image if img_num is not None and img_num != i: image.getdata()[0] i += 1 continue if image.format == 'PNG' and image.mode == 'I' and dtype is None: dtype = 'uint16' if image.mode == 'P': if grayscale is None: grayscale = _palette_is_grayscale(image) if grayscale: frame = image.convert('L') else: if image.format == 'PNG' and 'transparency' in image.info: frame = image.convert('RGBA') else: frame = image.convert('RGB') elif image.mode == '1': frame = image.convert('L') elif 'A' in image.mode: frame = image.convert('RGBA') elif image.mode == 'CMYK': frame = image.convert('RGB') if image.mode.startswith('I;16'): shape = image.size dtype = '>u2' if image.mode.endswith('B') else '<u2' if 'S' in image.mode: dtype = dtype.replace('u', 'i') frame = np.fromstring(frame.tobytes(), dtype) frame.shape = shape[::-1] else: frame = np.array(frame, dtype=dtype) frames.append(frame) i += 1 if img_num is not None: break if hasattr(image, 'fp') and image.fp: image.fp.close() if img_num is None and len(frames) > 1: return np.array(frames) elif frames: return frames[0] elif img_num: raise IndexError('Could not find image #%s' % img_num) def _palette_is_grayscale(pil_image): """Return True if PIL image in palette mode is grayscale. Parameters ---------- pil_image : PIL image PIL Image that is in Palette mode. Returns ------- is_grayscale : bool True if all colors in image palette are gray. """ assert pil_image.mode == 'P' # get palette as an array with R, G, B columns palette = np.asarray(pil_image.getpalette()).reshape((256, 3)) # Not all palette colors are used; unused colors have junk values. start, stop = pil_image.getextrema() valid_palette = palette[start:stop + 1] # Image is grayscale if channel differences (R - G and G - B) # are all zero. return np.allclose(np.diff(valid_palette), 0) def ndarray_to_pil(arr, format_str=None): """Export an ndarray to a PIL object. Parameters ---------- Refer to ``imsave``. """ if arr.ndim == 3: arr = img_as_ubyte(arr) mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]] elif format_str in ['png', 'PNG']: mode = 'I;16' mode_base = 'I' if arr.dtype.kind == 'f': arr = img_as_uint(arr) elif arr.max() < 256 and arr.min() >= 0: arr = arr.astype(np.uint8) mode = mode_base = 'L' else: arr = img_as_uint(arr) else: arr = img_as_ubyte(arr) mode = 'L' mode_base = 'L' try: array_buffer = arr.tobytes() except AttributeError: array_buffer = arr.tostring() # Numpy < 1.9 if arr.ndim == 2: im = Image.new(mode_base, arr.T.shape) try: im.frombytes(array_buffer, 'raw', mode) except AttributeError: im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7 else: image_shape = (arr.shape[1], arr.shape[0]) try: im = Image.frombytes(mode, image_shape, array_buffer) except AttributeError: im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7 return im def imsave(fname, arr, format_str=None, **kwargs): """Save an image to disk. Parameters ---------- fname : str or file-like object Name of destination file. arr : ndarray of uint8 or float Array (image) to save. Arrays of data-type uint8 should have values in [0, 255], whereas floating-point arrays must be in [0, 1]. format_str: str Format to save as, this is defaulted to PNG if using a file-like object; this will be derived from the extension if fname is a string kwargs: dict Keyword arguments to the Pillow save function (or tifffile save function, for Tiff files). These are format dependent. For example, Pillow's JPEG save function supports an integer ``quality`` argument with values in [1, 95], while TIFFFile supports a ``compress`` integer argument with values in [0, 9]. Notes ----- Use the Python Imaging Library. See PIL docs [1]_ for a list of other supported formats. All images besides single channel PNGs are converted using `img_as_uint8`. Single Channel PNGs have the following behavior: - Integer values in [0, 255] and Boolean types -> img_as_uint8 - Floating point and other integers -> img_as_uint16 References ---------- .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html """ # default to PNG if file-like object if not isinstance(fname, str) and format_str is None: format_str = "PNG" # Check for png in filename if (isinstance(fname, str) and fname.lower().endswith(".png")): format_str = "PNG" arr = np.asanyarray(arr) if arr.dtype.kind == 'b': arr = arr.astype(np.uint8) if arr.ndim not in (2, 3): raise ValueError("Invalid shape for image array: %s" % (arr.shape, )) if arr.ndim == 3: if arr.shape[2] not in (3, 4): raise ValueError("Invalid number of channels in image array.") img = ndarray_to_pil(arr, format_str=format_str) img.save(fname, format=format_str, **kwargs)
29.861538
93
0.579341
8ab47b215dd213a094ad1c94dce6a5f882e00bd7
695
py
Python
examples/tellurium-files/linearChain.py
ShaikAsifullah/distributed-tellurium
007e9b3842b614edd34908c001119c6da1d41897
[ "Apache-2.0" ]
1
2019-06-19T04:40:33.000Z
2019-06-19T04:40:33.000Z
examples/tellurium-files/linearChain.py
ShaikAsifullah/distributed-tellurium
007e9b3842b614edd34908c001119c6da1d41897
[ "Apache-2.0" ]
null
null
null
examples/tellurium-files/linearChain.py
ShaikAsifullah/distributed-tellurium
007e9b3842b614edd34908c001119c6da1d41897
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Linear chain of reactions. """ from __future__ import print_function, division import tellurium as te model = ''' model feedback() // Reactions: J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h); J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2); J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3); J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4); J4: S4 -> $X1; (V4 * S4) / (KS4 + S4); // Species initializations: S1 = 0; S2 = 0; S3 = 0; S4 = 0; X0 = 10; X1 = 0; // Variable initialization: VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5; end''' r = te.loada(model) result = r.simulate(0, 40, 500) r.plotWithLegend(result)
24.821429
64
0.515108
8ab58aaa336c1e253b3a0048b5e6954db5635335
276
py
Python
backend/app/schemas/__init__.py
kommurisaikumar/savings-manager-server
ed699abddf3cecdd4056aaee0129fbb1ef3762f6
[ "MIT" ]
null
null
null
backend/app/schemas/__init__.py
kommurisaikumar/savings-manager-server
ed699abddf3cecdd4056aaee0129fbb1ef3762f6
[ "MIT" ]
null
null
null
backend/app/schemas/__init__.py
kommurisaikumar/savings-manager-server
ed699abddf3cecdd4056aaee0129fbb1ef3762f6
[ "MIT" ]
null
null
null
from .users import User, UserCreate, UserUpdate from .transactions import Transaction, TransactionCreate, TransactionUpdate from .accounts import Account, AccountList, AccountSingle, AccountCreate, AccountUpdate from .categories import Category, CategoryCreate, CategoryUpdate
69
87
0.858696
8ab7c4d71edafc2000970ee8f5e485db6a4fa978
872
py
Python
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
ciskoinch8/vimrc
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
[ "MIT" ]
463
2015-01-15T08:17:42.000Z
2022-03-28T15:10:20.000Z
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
ciskoinch8/vimrc
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
[ "MIT" ]
52
2015-01-06T02:43:59.000Z
2022-03-14T11:15:21.000Z
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
ciskoinch8/vimrc
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
[ "MIT" ]
249
2015-01-07T22:49:49.000Z
2022-03-18T02:32:06.000Z
def method_accepting_cls(cls, self): # Using plain `super()` is not valid here, since there's no `__class__` cell found # (Exact exception would be 'RuntimeError: super(): __class__ cell not found') # Instead, we expect to *not* see a warning about `super-with-arguments`. # Explicitly passing `cls`, and `self` to `super()` is what's required. super(cls, self).__init__()
24.222222
86
0.65711
8ab863848d8379f82bfc5f650de33e10615f3285
8,132
py
Python
machine.py
yukti07/Dell_Hire_hack
9422b7aaa0b96292191b4b880c0a8fb772fd1864
[ "MIT" ]
null
null
null
machine.py
yukti07/Dell_Hire_hack
9422b7aaa0b96292191b4b880c0a8fb772fd1864
[ "MIT" ]
null
null
null
machine.py
yukti07/Dell_Hire_hack
9422b7aaa0b96292191b4b880c0a8fb772fd1864
[ "MIT" ]
null
null
null
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from flask import flash import numpy as np
47.835294
405
0.684702
8ab8993b826c4cf13cc7b962623c2d00cc2adcf7
6,435
py
Python
TM-GCN-master/experiment_bitcoin_baseline_link_prediction.py
OsmanMalik/TM-GCN
31b19a538f264f6c30b5503ecefb497ee865b4d7
[ "Apache-2.0" ]
14
2020-11-04T17:10:19.000Z
2022-03-04T07:48:22.000Z
TM-GCN-master/experiment_bitcoin_baseline_link_prediction.py
OsmanMalik/TM-GCN
31b19a538f264f6c30b5503ecefb497ee865b4d7
[ "Apache-2.0" ]
2
2021-09-06T09:38:12.000Z
2021-09-06T09:50:52.000Z
TensorGCN-master/experiment_bitcoin_baseline_link_prediction.py
NaimahmedNesaragi/TM-GCN
275d057a7261d8e6b544dad66b7daa7943d11c4f
[ "Apache-2.0" ]
6
2021-01-11T23:42:39.000Z
2022-01-31T08:37:13.000Z
# This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline # The point of this script is to do link prediction # Imports and aliases import pickle import torch as t import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.datasets as datasets import numpy as np import matplotlib.pyplot as plt import cProfile import pandas as pd import datetime from scipy.sparse import csr_matrix import os.path import embedding_help_functions as ehf import scipy.io as sio unsq = t.unsqueeze sq = t.squeeze # Settings alpha_vec = [.75, .76, .77, .78, .79, .80, .81, .82, .83, .84, .85, .86, .87, .88, .89, .90, .91, .92, .93, .94, .95] no_layers = 1 dataset = "OTC" # OTC or Alpha no_epochs = 1000 mat_f_name = "saved_content_bitcoin_otc.mat" no_trials = 1 beta1 = 19 beta2 = 19 cutoff = 95 eval_type = "MAP-MRR" # "MAP-MRR" or "F1" data_loc = "data/Bitcoin_" + dataset + "/" S_train, S_val, S_test = 95, 20, 20 lr = 0.01 momentum = 0.9 # Load and return relevant data A, A_labels, C_train, C_val, C_test, N = ehf.load_data(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False) # Create features for the nodes X_train, X_val, X_test = ehf.create_node_features(A, S_train, S_val, S_test, same_block_size=False) # Extract edges and labels from A_labels, and augment with nonexisting edges # edges, beta edges = A_labels._indices() edges_aug, labels = ehf.augment_edges(edges, N, beta1, beta2, cutoff) # Divide adjacency matrices and labels into training, validation and testing sets edges_train, target_train, e_train, edges_val, target_val, e_val, edges_test, target_test, e_test = ehf.split_data(edges_aug, labels, S_train, S_val, S_test, same_block_size = False) if no_trials > 1: ep_acc_loss_vec = [] for tr in range(no_trials): for alpha in alpha_vec: class_weights = t.tensor([alpha, 1.0-alpha]) save_res_fname = "results_BASELINE_layers" + str(no_layers) + "_w" + str(round(float(class_weights[0])*100)) + "_" + dataset + "_link_prediction" # Create gcn for training if no_layers == 2: gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,6,2], nonlin2="selu") elif no_layers == 1: gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,2]) # Train optimizer = t.optim.SGD(gcn.parameters(), lr=lr, momentum=momentum) criterion = nn.CrossEntropyLoss(weight=class_weights) # Takes arguments (output, target) if eval_type == "F1": ep_acc_loss = np.zeros((no_epochs,12)) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test) elif eval_type == "MAP-MRR": ep_acc_loss = np.zeros((no_epochs,9)) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test) for ep in range(no_epochs): # Compute loss and take step optimizer.zero_grad() output_train = gcn() loss_train = criterion(output_train, target_train[edges_train[0]!=0]) loss_train.backward() optimizer.step() # Things that don't require gradient with t.no_grad(): if ep % 100 == 0: # Compute stats for training data; no point in doing more often than this guess_train = t.argmax(output_train, dim=1) if eval_type == "F1": precision_train, recall_train, f1_train = ehf.compute_f1(guess_train, target_train[edges_train[0]!=0]) elif eval_type == "MAP-MRR": MAP_train, MRR_train = ehf.compute_MAP_MRR(output_train, target_train[edges_train[0]!=0], edges_train[:, edges_train[0]!=0]) # Compute stats for validation data output_val = gcn(C_val[:-1], X_val[:-1], e_val) guess_val = t.argmax(output_val, dim=1) if eval_type == "F1": precision_val, recall_val, f1_val = ehf.compute_f1(guess_val, target_val[edges_val[0]!=0]) elif eval_type == "MAP-MRR": MAP_val, MRR_val = ehf.compute_MAP_MRR(output_val, target_val[edges_val[0]!=0], edges_val[:, edges_val[0]!=0]) loss_val = criterion(output_val, target_val[edges_val[0]!=0]) # Compute stats for test data output_test = gcn(C_test[:-1], X_test[:-1], e_test) guess_test = t.argmax(output_test, dim=1) if eval_type == "F1": precision_test, recall_test, f1_test = ehf.compute_f1(guess_test, target_test[edges_test[0]!=0]) elif eval_type == "MAP-MRR": MAP_test, MRR_test = ehf.compute_MAP_MRR(output_test, target_test[edges_test[0]!=0], edges_test[:, edges_test[0]!=0]) loss_test = criterion(output_test, target_test[edges_test[0]!=0]) # Print if eval_type == "F1": ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, alpha, tr, ep) elif eval_type == "MAP-MRR": print("alpha/Tr/Ep %.2f/%d/%d. Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (alpha, tr, ep, MAP_train, MRR_train, loss_train)) print("alpha/Tr/Ep %.2f/%d/%d. Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (alpha, tr, ep, MAP_val, MRR_val, loss_val)) print("alpha/Tr/Ep %.2f/%d/%d. Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (alpha, tr, ep, MAP_test, MRR_test, loss_test)) # Store values with results if eval_type == "F1": ep_acc_loss[ep] = [precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test] elif eval_type == "MAP-MRR": ep_acc_loss[ep] = [MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test] if eval_type == "F1": ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, is_final=True) elif eval_type == "MAP-MRR": print("FINAL: Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (MAP_train, MRR_train, loss_train)) print("FINAL: Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (MAP_val, MRR_val, loss_val)) print("FINAL: Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (MAP_test, MRR_test, loss_test)) if no_trials == 1: pickle.dump(ep_acc_loss, open(save_res_fname, "wb")) print("Results saved for single trial") else: ep_acc_loss_vec.append(ep_acc_loss) if no_trials > 1: pickle.dump(ep_acc_loss_vec, open(save_res_fname + "_no_trials" + str(no_trials), "wb")) print("Results saved for all trials")
45.638298
191
0.707537
8ab94a7177eff40dfe2d54daa4adb7bbd8788e95
1,084
py
Python
elm_mnist/elm_mnist.py
ahara/-blog
926ae4808ede6efb1e64381a19a210235a97ac36
[ "MIT" ]
null
null
null
elm_mnist/elm_mnist.py
ahara/-blog
926ae4808ede6efb1e64381a19a210235a97ac36
[ "MIT" ]
null
null
null
elm_mnist/elm_mnist.py
ahara/-blog
926ae4808ede6efb1e64381a19a210235a97ac36
[ "MIT" ]
null
null
null
import cPickle import numpy as np from elm import ELMClassifier from sklearn import linear_model if __name__ == '__main__': # Load data sets train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist()) # Build ELM cls = ELMClassifier(n_hidden=7000, alpha=0.93, activation_func='multiquadric', regressor=linear_model.Ridge(), random_state=21398023) cls.fit(train_x, train_y) # Evaluate model print 'Validation error:', cls.score(val_x, val_y) print 'Test error:', cls.score(test_x, test_y)
32.848485
85
0.621771
8abb81ca4107a0dafeae1ce248a3690886bc60c3
1,960
py
Python
Coding_Part/bob.py
qizhu8/CSCI6230-HW02
c889c0532db7ff4f25e134937469e5e6181416f0
[ "Apache-2.0" ]
null
null
null
Coding_Part/bob.py
qizhu8/CSCI6230-HW02
c889c0532db7ff4f25e134937469e5e6181416f0
[ "Apache-2.0" ]
null
null
null
Coding_Part/bob.py
qizhu8/CSCI6230-HW02
c889c0532db7ff4f25e134937469e5e6181416f0
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- #!/usr/bin/env python3 from PKC_Classes import NetworkUser, KDC from DES import DES from RSA_Class import RSA import socket import os import sys import threading import time if sys.version_info[0] < 3: raise Exception("Must be using Python 3") bob = NetworkUser('Alice', DES(), RSA(9973, 97), 200) print('bob:', bob.uid) # socket communication kdc_host, kdc_port = 'localhost', 9999 bob_host, bob_port = 'localhost', 9200 # talk to kdc for sess key try: sock_with_kdc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock_with_kdc.connect((kdc_host, kdc_port)) print(sock_with_kdc.recv(1024)) # send cipher_key bob_cipher_key_packet = bob.send_cipher_key() sock_with_kdc.send(bob_cipher_key_packet.encode()) kdc_bob_cipher_key_packet = sock_with_kdc.recv(1024).decode() print(kdc_bob_cipher_key_packet) bob.process_packet(kdc_bob_cipher_key_packet) except socket.error as msg: print(msg); sys.exit(1) # sock_with_kdc.shutdown(socket.SHUT_WR) # talk to bob try: sock_self = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock_self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock_self.bind((bob_host, bob_port)) sock_self.listen(10) except socket.error as msg: print(msg); sys.exit(1) while 1: conn, addr = sock_self.accept() thread = threading.Thread(target=reply_conn, args=(conn, addr)) thread.start() # sock_self.close()
26.849315
69
0.694388
8abbc734ea1294bef8b90bd4c5b933a5890bb4db
10,257
py
Python
proj/scripts/cluster/baselines/triplets_greyscale.py
zqma/IIC
9d4e30b51535c6ca381389d9c22ce45be4d11883
[ "MIT" ]
null
null
null
proj/scripts/cluster/baselines/triplets_greyscale.py
zqma/IIC
9d4e30b51535c6ca381389d9c22ce45be4d11883
[ "MIT" ]
null
null
null
proj/scripts/cluster/baselines/triplets_greyscale.py
zqma/IIC
9d4e30b51535c6ca381389d9c22ce45be4d11883
[ "MIT" ]
null
null
null
from __future__ import print_function import argparse import itertools import os import pickle import sys from datetime import datetime import matplotlib import numpy as np import torch matplotlib.use('Agg') import matplotlib.pyplot as plt import proj.archs as archs from proj.utils.cluster.general import config_to_str, get_opt, update_lr from proj.utils.cluster.baselines.triplets import make_triplets_data, \ triplets_eval, triplets_loss """ Triplets. Makes output distribution same as that of attractor, and different to that of repeller. Greyscale version (no sobel). """ # Options ---------------------------------------------------------------------- parser = argparse.ArgumentParser() parser.add_argument("--model_ind", type=int, required=True) parser.add_argument("--arch", type=str, required=True) parser.add_argument("--opt", type=str, default="Adam") parser.add_argument("--dataset", type=str, required=True) parser.add_argument("--dataset_root", type=str, required=True) parser.add_argument("--gt_k", type=int, required=True) parser.add_argument("--output_k", type=int, required=True) parser.add_argument("--lr", type=float, default=0.01) parser.add_argument("--lr_schedule", type=int, nargs="+", default=[]) parser.add_argument("--lr_mult", type=float, default=0.1) parser.add_argument("--num_epochs", type=int, default=1000) parser.add_argument("--batch_sz", type=int, required=True) # num pairs parser.add_argument("--out_root", type=str, default="/scratch/shared/slow/xuji/iid_private") parser.add_argument("--restart", dest="restart", default=False, action="store_true") parser.add_argument("--test_code", dest="test_code", default=False, action="store_true") parser.add_argument("--save_freq", type=int, default=10) parser.add_argument("--kmeans_on_features", default=False, action="store_true") # transforms # used for "positive" sample parser.add_argument("--demean", dest="demean", default=False, action="store_true") parser.add_argument("--per_img_demean", dest="per_img_demean", default=False, action="store_true") parser.add_argument("--data_mean", type=float, nargs="+", default=[0.5, 0.5, 0.5]) parser.add_argument("--data_std", type=float, nargs="+", default=[0.5, 0.5, 0.5]) parser.add_argument("--crop_orig", dest="crop_orig", default=False, action="store_true") parser.add_argument("--crop_other", dest="crop_other", default=False, action="store_true") parser.add_argument("--tf1_crop", type=str, default="random") # type name parser.add_argument("--tf2_crop", type=str, default="random") parser.add_argument("--tf1_crop_sz", type=int, default=84) parser.add_argument("--tf2_crop_szs", type=int, nargs="+", default=[84]) # allow diff crop for imgs_tf parser.add_argument("--tf3_crop_diff", dest="tf3_crop_diff", default=False, action="store_true") parser.add_argument("--tf3_crop_sz", type=int, default=0) parser.add_argument("--input_sz", type=int, default=96) parser.add_argument("--rot_val", type=float, default=0.) parser.add_argument("--always_rot", dest="always_rot", default=False, action="store_true") parser.add_argument("--no_jitter", dest="no_jitter", default=False, action="store_true") parser.add_argument("--no_flip", dest="no_flip", default=False, action="store_true") config = parser.parse_args() # Fixed settings and checks ---------------------------------------------------- config.in_channels = 1 if config.output_k != config.gt_k: assert (config.output_k > config.gt_k) assert (config.kmeans_on_features) config.out_dir = os.path.join(config.out_root, str(config.model_ind)) config.dataloader_batch_sz = config.batch_sz config.num_dataloaders = 1 if not os.path.exists(config.out_dir): os.makedirs(config.out_dir) if config.restart: given_config = config reloaded_config_path = os.path.join(given_config.out_dir, "config.pickle") print("Loading restarting config from: %s" % reloaded_config_path) with open(reloaded_config_path, "rb") as config_f: config = pickle.load(config_f) assert (config.model_ind == given_config.model_ind) config.restart = True # copy over new num_epochs and lr schedule config.num_epochs = given_config.num_epochs config.lr_schedule = given_config.lr_schedule if not hasattr(config, "kmeans_on_features"): config.kmeans_on_features = False else: print("Config: %s" % config_to_str(config)) # Data, nets, optimisers ------------------------------------------------------- dataloader_original, dataloader_positive, dataloader_negative, \ dataloader_test = make_triplets_data(config) train_dataloaders = [dataloader_original, dataloader_positive, dataloader_negative] net = archs.__dict__[config.arch](config) if config.restart: model_path = os.path.join(config.out_dir, "latest_net.pytorch") taking_best = not os.path.exists(model_path) if taking_best: print("using best instead of latest") model_path = os.path.join(config.out_dir, "best_net.pytorch") net.load_state_dict( torch.load(model_path, map_location=lambda storage, loc: storage)) net.cuda() net = torch.nn.DataParallel(net) net.train() optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr) if config.restart: opt_path = os.path.join(config.out_dir, "latest_optimiser.pytorch") if taking_best: opt_path = os.path.join(config.out_dir, "best_optimiser.pytorch") optimiser.load_state_dict(torch.load(opt_path)) # Results storage -------------------------------------------------------------- if config.restart: if not taking_best: next_epoch = config.last_epoch + 1 # corresponds to last saved model else: next_epoch = np.argmax(np.array(config.epoch_acc)) + 1 print("starting from epoch %d" % next_epoch) config.epoch_acc = config.epoch_acc[:next_epoch] # in case we overshot config.epoch_loss = config.epoch_loss[:next_epoch] config.masses = config.masses[:next_epoch, :] config.per_class_acc = config.per_class_acc[:next_epoch, :] else: config.epoch_acc = [] config.epoch_loss = [] config.masses = None config.per_class_acc = None _ = triplets_eval(config, net, dataloader_test=dataloader_test, sobel=False) print("Pre: time %s: \n %s" % (datetime.now(), config.epoch_acc[-1])) sys.stdout.flush() next_epoch = 1 fig, axarr = plt.subplots(4, sharex=False, figsize=(20, 20)) # Train ------------------------------------------------------------------------ for e_i in xrange(next_epoch, config.num_epochs): print("Starting e_i: %d" % (e_i)) if e_i in config.lr_schedule: optimiser = update_lr(optimiser, lr_mult=config.lr_mult) avg_loss = 0. # over heads and head_epochs (and sub_heads) avg_loss_count = 0 sys.stdout.flush() iterators = (d for d in train_dataloaders) b_i = 0 for tup in itertools.izip(*iterators): net.module.zero_grad() imgs_orig = tup[0][0].cuda() imgs_pos = tup[1][0].cuda() imgs_neg = tup[2][0].cuda() outs_orig = net(imgs_orig) outs_pos = net(imgs_pos) outs_neg = net(imgs_neg) curr_loss = triplets_loss(outs_orig, outs_pos, outs_neg) if ((b_i % 100) == 0) or (e_i == next_epoch and b_i < 10): print("Model ind %d epoch %d batch %d " "loss %f time %s" % \ (config.model_ind, e_i, b_i, curr_loss.item(), datetime.now())) sys.stdout.flush() if not np.isfinite(float(curr_loss.item())): print("Loss is not finite... %s:" % str(curr_loss.item())) exit(1) avg_loss += curr_loss.item() avg_loss_count += 1 curr_loss.backward() optimiser.step() b_i += 1 if b_i == 2 and config.test_code: break avg_loss = float(avg_loss / avg_loss_count) config.epoch_loss.append(avg_loss) # Eval and storage ----------------------------------------------------------- # when epoch over both heads is finished is_best = triplets_eval(config, net, dataloader_test=dataloader_test, sobel=False) print("Time %s, acc %s" % (datetime.now(), config.epoch_acc[-1])) sys.stdout.flush() axarr[0].clear() axarr[0].plot(config.epoch_acc) axarr[0].set_title("acc, top: %f" % max(config.epoch_acc)) axarr[1].clear() axarr[1].plot(config.epoch_loss) axarr[1].set_title("Loss") axarr[2].clear() for c in xrange(config.gt_k): axarr[2].plot(config.masses[:, c]) axarr[2].set_title("masses") axarr[3].clear() for c in xrange(config.gt_k): axarr[3].plot(config.per_class_acc[:, c]) axarr[3].set_title("per_class_acc") fig.tight_layout() fig.canvas.draw_idle() fig.savefig(os.path.join(config.out_dir, "plots.png")) if is_best or (e_i % config.save_freq == 0): net.module.cpu() if is_best: torch.save(net.module.state_dict(), os.path.join(config.out_dir, "best_net.pytorch")) torch.save(optimiser.state_dict(), os.path.join(config.out_dir, "best_optimiser.pytorch")) if e_i % config.save_freq == 0: torch.save(net.module.state_dict(), os.path.join(config.out_dir, "latest_net.pytorch")) torch.save(optimiser.state_dict(), os.path.join(config.out_dir, "latest_optimiser.pytorch")) config.last_epoch = e_i # for last saved version net.module.cuda() with open(os.path.join(config.out_dir, "config.pickle"), 'wb') as outfile: pickle.dump(config, outfile) with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file: text_file.write("%s" % config) if config.test_code: exit(0)
33.963576
82
0.632641
8abc0d6dcbf21ec8770db13b5b8c148d9b2c8d8e
1,607
py
Python
migrations/versions/0084_add_job_stats.py
cds-snc/notifier-api
90b385ec49efbaee7e607516fc7d9f08991af813
[ "MIT" ]
41
2019-11-28T16:58:41.000Z
2022-01-28T21:11:16.000Z
migrations/versions/0084_add_job_stats.py
cds-snc/notification-api
b1c1064f291eb860b494c3fa65ac256ad70bf47c
[ "MIT" ]
1,083
2019-07-08T12:57:24.000Z
2022-03-08T18:53:40.000Z
migrations/versions/0084_add_job_stats.py
cds-snc/notifier-api
90b385ec49efbaee7e607516fc7d9f08991af813
[ "MIT" ]
9
2020-01-24T19:56:43.000Z
2022-01-27T21:36:53.000Z
"""empty message Revision ID: 0084_add_job_stats Revises: 0083_add_perm_types_and_svc_perm Create Date: 2017-05-12 13:16:14.147368 """ # revision identifiers, used by Alembic. revision = "0084_add_job_stats" down_revision = "0083_add_perm_types_and_svc_perm" import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql
35.711111
96
0.683261
8abc2535fb59574434dff13ed4c596ed4d606f9e
4,279
py
Python
addons/twofactor/tests/test_models.py
tsukaeru/RDM-osf.io
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
[ "Apache-2.0" ]
11
2018-12-11T16:39:40.000Z
2022-02-26T09:51:32.000Z
addons/twofactor/tests/test_models.py
tsukaeru/RDM-osf.io
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
[ "Apache-2.0" ]
52
2018-04-13T05:03:21.000Z
2022-03-22T02:56:19.000Z
addons/twofactor/tests/test_models.py
tsukaeru/RDM-osf.io
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
[ "Apache-2.0" ]
16
2018-07-09T01:44:51.000Z
2021-06-30T01:57:16.000Z
import unittest from future.moves.urllib.parse import urlparse, urljoin, parse_qs import pytest from addons.twofactor.tests.utils import _valid_code from nose.tools import (assert_equal, assert_false, assert_is_none, assert_is_not_none, assert_true) from osf_tests.factories import UserFactory pytestmark = pytest.mark.django_db
32.416667
78
0.646646
8abd39aa48321431318051d54854247571fa2704
311
py
Python
betterloader/standard_transforms.py
BinItAI/BetterLoader
29ebcc22b53db6417a4b14d95f0a1e7f5afe7af8
[ "MIT" ]
39
2020-08-11T09:58:08.000Z
2022-02-24T19:22:42.000Z
betterloader/standard_transforms.py
BinItAI/BetterLoader
29ebcc22b53db6417a4b14d95f0a1e7f5afe7af8
[ "MIT" ]
21
2020-08-11T09:58:46.000Z
2021-05-10T12:50:12.000Z
betterloader/standard_transforms.py
BinItAI/BetterLoader
29ebcc22b53db6417a4b14d95f0a1e7f5afe7af8
[ "MIT" ]
2
2020-10-29T14:51:01.000Z
2021-01-08T09:40:34.000Z
import numpy as np from torchvision import transforms np.random.seed(1)
19.4375
37
0.678457
8abed448e30652e384272b8cc640eedca2d718cf
1,708
py
Python
lanedet/runner/utils/net_utils.py
ztjsw/lanedet
c957e1f70695e39063231612637e22fcad2769f5
[ "Apache-2.0" ]
1
2021-05-22T09:36:17.000Z
2021-05-22T09:36:17.000Z
lanedet/runner/utils/net_utils.py
ztjsw/lanedet
c957e1f70695e39063231612637e22fcad2769f5
[ "Apache-2.0" ]
null
null
null
lanedet/runner/utils/net_utils.py
ztjsw/lanedet
c957e1f70695e39063231612637e22fcad2769f5
[ "Apache-2.0" ]
null
null
null
import torch import os from torch import nn import numpy as np import torch.nn.functional from termcolor import colored from .logger import get_logger
34.16
84
0.648712
8abf08c703d4b07df642c217bba0fae7c6cdc10b
141
py
Python
hexafuel_oil/hexafuel_oil_app/apps.py
zante95/Hexafuel-Oil
41dc4c9d855c74d4bb7dd86f3ac3fb1db27b663b
[ "MIT" ]
null
null
null
hexafuel_oil/hexafuel_oil_app/apps.py
zante95/Hexafuel-Oil
41dc4c9d855c74d4bb7dd86f3ac3fb1db27b663b
[ "MIT" ]
null
null
null
hexafuel_oil/hexafuel_oil_app/apps.py
zante95/Hexafuel-Oil
41dc4c9d855c74d4bb7dd86f3ac3fb1db27b663b
[ "MIT" ]
null
null
null
from django.apps import AppConfig #pragma: no cover
23.5
56
0.77305
8ac004a4f19bb41d9cfa8a39529011d30c5a08dc
5,455
py
Python
main.py
jonodrew/matchex
531e7cd1c328cb9dc34b601a06648bd2c3e709e6
[ "MIT" ]
null
null
null
main.py
jonodrew/matchex
531e7cd1c328cb9dc34b601a06648bd2c3e709e6
[ "MIT" ]
null
null
null
main.py
jonodrew/matchex
531e7cd1c328cb9dc34b601a06648bd2c3e709e6
[ "MIT" ]
null
null
null
from __future__ import division from timeit import default_timer as timer import csv import numpy as np import itertools from munkres import Munkres, print_matrix, make_cost_matrix import sys from classes import * from functions import * from math import sqrt import Tkinter as tk import tkFileDialog as filedialog root = tk.Tk() root.withdraw() p_file = filedialog.askopenfilename(title='Please select the posting file') c_file = filedialog.askopenfilename(title='Please select the candidate file') """for use with /users/java_jonathan/postings_lge.csv and /Users/java_jonathan/candidates_lge.csv""" # p_file = raw_input("Please enter the path for the postings file: ") # p_file = p_file.strip() # c_file = raw_input("Please enter the path for the candidate file: ") # c_file = c_file.strip() start = timer() with open(p_file,'r') as f: #with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f: reader = csv.reader(f) postingsAll = list(reader) with open(c_file,'r') as f: reader = csv.reader(f) candidatesAll = list(reader) """create empty lists to fill with lists of lists output by iterating function below""" names = [] totalMatrix = [] for list in candidatesAll: candidate = Candidate(*list) names.append(candidate.name) n = 0 for list in postingsAll: posting = Posting(*list) totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate) +matchLocation(posting,candidate) + matchCompetency(posting,candidate) + matchSkill(posting,candidate)+matchCohort(posting,candidate)) n += 1 l = len(names) names.extend([0] * (n-l)) totalMatrix.extend([0] * (n**2 - len(totalMatrix))) totalMatrix = np.asarray(totalMatrix) totalMatrix = np.reshape(totalMatrix,(n,-1)) #at this point the matrix is structured as candidates down and jobs across totalMatrix = np.transpose(totalMatrix) #now it's switched! totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix) totalMatrix = np.array(totalMatrix) minSuitability = 18 check = [] result = [] m = Munkres() indexes = m.compute(totalMatrix) #print_matrix(totalMatrix, msg='Lowest cost through this matrix:') total = 0.0 unhappy_candidates = 0 medium_candidates = 0 tenpc_candidates = 0 qs_candidates = 0 vs_candidates = 0 f = open('output.txt', 'w') for row, column in indexes: if column < l: value = totalMatrix[row][column] if value > minSuitability*0.9: tenpc_candidates += 1 elif value > minSuitability*0.75: medium_candidates += 1 elif value > minSuitability/2: unhappy_candidates += 1 elif value > minSuitability*0.25: qs_candidates += 1 elif value > minSuitability*0.1: vs_candidates += 1 total += value check.append(column+1) result.append((row,column)) f.write('For candidate %s: \nOptimal position: %d (score %s)\n' % (names[column], column+1, value)) else: pass globalSatisfaction = 100*(1-(total/(l*minSuitability))) print('Global satisfaction: %.2f%%' % globalSatisfaction) print('Candidates who are more than 90%% suitable: %d' % vs_candidates) print('Candidates who are more than 75%% suitable: %d' % qs_candidates) print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates)) print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates) print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates) #output from excel: correct = [1,3,5,9,10,2,4,8,6,7] #this function tests output above against Excel: #test(correct,check) topMatrix = topFive(names,totalMatrix) #print(topMatrix) np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',', newline='\n', header='', footer='', comments='# ') np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',', newline='\n', header='', footer='', comments='# ') end = timer() print(end-start) """ #posting = [Posting(*postingsAll)] #print(posting[0].anchor) #print(posting) #print(candidatesAll) #print(postingsAll) #print(postingsAll[0].name) #print(preferences) #print(postings) #split up files into relative blocks postCode = [lists[0] for lists in postings] postDept = [lists[1] for lists in postings] postAnchor = [lists[2] for lists in postings] postSkills = [lists[3:5] for lists in postings] postLocation = [lists[5] for lists in postings] postCompetencies = [lists[7:10] for lists in postings] postSecurity = [lists[10] for lists in postings] #with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f: #gives first column ie candidate a a=totalMatrix[:,[0]] #b = totalMatrix[:,[0]] #print(a) #converts 1D matrix to list for ease a = np.array(a).tolist() #print(a) #creates list called output containing rank of score output = [0] * len(a) for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])): output[x] = i print(output) #creates tuples of rank, job and appends to list jobRank = [] # for rank, b in zip(output, postCode): # jobScore = (rank,b) # list(jobScore) # jobRank.append(jobScore) # print(jobRank) output = [0] * len(a) for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])): output[x] = i print(output) # #print(a) # jobRank = sorted(jobRank, reverse=False) # print(jobRank) # print('For candidate a, the best position is %s') % (jobRank[0][1]) # print(candidate[0].skills) """
30.646067
88
0.698075
8ac00891cba917dcea99bd7701a43788bba03334
3,142
py
Python
pip_info/setup.py
95616ARG/SyReNN
19abf589e84ee67317134573054c648bb25c244d
[ "MIT" ]
36
2019-08-19T06:17:52.000Z
2022-03-11T09:02:40.000Z
pip_info/setup.py
95616ARG/SyReNN
19abf589e84ee67317134573054c648bb25c244d
[ "MIT" ]
8
2020-04-09T20:59:04.000Z
2022-03-11T23:56:50.000Z
pip_info/setup.py
95616ARG/SyReNN
19abf589e84ee67317134573054c648bb25c244d
[ "MIT" ]
4
2021-01-13T11:17:55.000Z
2021-06-28T19:36:04.000Z
"""Setup script for PySyReNN. Adapted from: https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/ """ import codecs import os import re from setuptools import setup, find_packages ################################################################### NAME = "pysyrenn" PACKAGES = [ "syrenn_proto", "pysyrenn", "pysyrenn.frontend", "pysyrenn.helpers", ] META_PATH = "__metadata__.py" KEYWORDS = ["class", "attribute", "boilerplate"] CLASSIFIERS = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Natural Language :: English", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Python Modules", ] INSTALL_REQUIRES = ["torch"] with open("requirements.txt") as requirements: reading = False for line in requirements.readlines(): if line.startswith("# PYSYRENN"): reading = True elif line.startswith("# END"): reading = False elif line.startswith("#"): pass elif reading: INSTALL_REQUIRES.append(line.strip().split("==")[0]) ################################################################### HERE = os.path.abspath(os.path.dirname(__file__)) def read(*parts): """ Build an absolute path from *parts* and and return the contents of the resulting file. Assume UTF-8 encoding. """ with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f: return f.read() META_FILE = read(META_PATH) def find_meta(meta): """Extract __*meta*__ from META_FILE. """ meta_match = re.search( r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M ) if meta_match: return meta_match.group(1) raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta)) if __name__ == "__main__": setup( name=NAME, description=find_meta("description"), license=find_meta("license"), url=find_meta("uri"), version=find_meta("version"), author=find_meta("author"), author_email=find_meta("email"), maintainer=find_meta("author"), maintainer_email=find_meta("email"), keywords=KEYWORDS, long_description=read("README.md"), long_description_content_type="text/markdown", packages=PACKAGES, package_dir={"": "."}, package_data={"": ["pysyrenn/**/*.py"]}, zip_safe=False, classifiers=CLASSIFIERS, install_requires=INSTALL_REQUIRES, )
30.803922
77
0.595799
8ac046daf66291ca73b420ce81a183abc787e157
51
py
Python
neptune/generated/swagger_client/path_constants.py
jiji-online/neptune-cli
50cf680a80d141497f9331ab7cdaee49fcb90b0c
[ "Apache-2.0" ]
null
null
null
neptune/generated/swagger_client/path_constants.py
jiji-online/neptune-cli
50cf680a80d141497f9331ab7cdaee49fcb90b0c
[ "Apache-2.0" ]
null
null
null
neptune/generated/swagger_client/path_constants.py
jiji-online/neptune-cli
50cf680a80d141497f9331ab7cdaee49fcb90b0c
[ "Apache-2.0" ]
null
null
null
REST_PATH = u"" WS_PATH = u"/api/notifications/v1"
17
34
0.705882
8ac1dd9d7bf008d9dc5cac34b41e0856589877ec
358
py
Python
load/__init__.py
andrewp-as-is/load.py
6ad643d82379a63f9c79d0dd994101ff0b490183
[ "Unlicense" ]
null
null
null
load/__init__.py
andrewp-as-is/load.py
6ad643d82379a63f9c79d0dd994101ff0b490183
[ "Unlicense" ]
null
null
null
load/__init__.py
andrewp-as-is/load.py
6ad643d82379a63f9c79d0dd994101ff0b490183
[ "Unlicense" ]
null
null
null
__all__ = ["load"] import imp import importlib def load(name, path): """Load and initialize a module implemented as a Python source file and return its module object""" if hasattr(importlib, "machinery"): loader = importlib.machinery.SourceFileLoader(name, path) return loader.load_module() return imp.load_source(name, path)
25.571429
103
0.709497
8ac22e55a9c9778c66e3a1d86342cccdc465c6de
4,117
py
Python
pygears/svgen/modules/sieve.py
Risto97/pygears
19393e85101a16762cb3bbbf3010946ef69217f2
[ "MIT" ]
null
null
null
pygears/svgen/modules/sieve.py
Risto97/pygears
19393e85101a16762cb3bbbf3010946ef69217f2
[ "MIT" ]
null
null
null
pygears/svgen/modules/sieve.py
Risto97/pygears
19393e85101a16762cb3bbbf3010946ef69217f2
[ "MIT" ]
null
null
null
import itertools from pygears.common.sieve import sieve from pygears.svgen.inst import SVGenInstPlugin from pygears.svgen.svmod import SVModuleGen from functools import partial from pygears.svgen.svgen import SVGenPlugin from pygears.svgen.util import svgen_visitor from pygears.core.hier_node import HierVisitorBase from pygears.svgen.inst import svgen_inst from pygears.rtl.gear import RTLGearHierVisitor, is_gear_instance class SVGenSievePlugin(SVGenInstPlugin, SVGenPlugin):
32.674603
83
0.589264
8ac2a36b9aed8734fe00d975f21caf0ecc7d8aef
5,461
py
Python
examples/my_model_test.py
gzpyy/qlib
56fdd028c8296c75f2a32bdb51869f010dd4f6d1
[ "MIT" ]
null
null
null
examples/my_model_test.py
gzpyy/qlib
56fdd028c8296c75f2a32bdb51869f010dd4f6d1
[ "MIT" ]
null
null
null
examples/my_model_test.py
gzpyy/qlib
56fdd028c8296c75f2a32bdb51869f010dd4f6d1
[ "MIT" ]
null
null
null
#encoding=utf-8 import qlib import pandas as pd import pickle import xgboost as xgb import numpy as np import re from qlib.constant import REG_US from qlib.utils import exists_qlib_data, init_instance_by_config from qlib.workflow import R from qlib.workflow.record_temp import SignalRecord, PortAnaRecord from qlib.utils import flatten_dict from qlib.data import LocalExpressionProvider from qlib.data.ops import Operators, OpsList from qlib.data.base import Feature from pyecharts import options as opts from pyecharts.charts import Kline, Line, Grid from my_data_handler import MyAlphaHandler # model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model' model_file = r'.\mlruns\1\148ef1cd7acd48deac3eadc339ad3008\artifacts\trained_model' with open(model_file, 'rb') as fi: model = pickle.load(fi) exprs, columns = MyAlphaHandler.get_custom_config() raw_data = pd.read_csv('../stock_data/TSLA.csv', parse_dates=['time']) raw_data['data_time'] = raw_data['time'].dt.strftime("%Y-%m-%d %H:%M:00") raw_data.set_index('time', inplace=True) raw_data["vwap"] = np.nan raw_data.sort_index(inplace=True) # print(raw_data) Operators.register(OpsList + [MyFeature]) obj = dict() for field in exprs: expression = eval(my_parse_field(field)) series = expression.load('TSLA', "2022-01-02", "2022-02-28", "1min") series = series.astype(np.float32) obj[field] = series data = pd.DataFrame(obj) data.columns = columns view_time_start = '2022-02-11' view_time_end = '2022-02-12' pre_data = raw_data.loc[view_time_start:view_time_end].copy() pred=model.model.predict(xgb.DMatrix(data.loc[view_time_start:view_time_end])) pre_data['pred_score'] = pred records = pre_data.to_dict("records") cash = 50000 position = {} hold_thresh = 5 score_thresh = 0.001 x_axises, y_axises, mark_points, money = [], [], [], [] for record in records: x_axises.append(record['data_time']) y_axises.append([ record['open'], record['close'], record['low'], record['high'] ]) if 'hold_cnt' in position: position['hold_cnt'] += 1 if position and (record['open'] >= position['close'] * 1.01 or record['open'] < position['close'] * 0.995 or record['pred_score'] < -score_thresh or position['hold_cnt'] >= hold_thresh): cash += position['amount'] * record['open'] position = {} #print("sell") mark_points.append(opts.MarkPointItem( coord=[record['data_time'], record['high']], symbol='triangle', symbol_size=7, itemstyle_opts=opts.ItemStyleOpts(color="green") )) elif record['pred_score'] > score_thresh and not position: position = dict(record) position['amount'] = int(cash / position['open']) cash -= position['amount'] * position['open'] # buy #print("buy") position['hold_cnt'] = 0 mark_points.append(opts.MarkPointItem( coord=[record['data_time'], record['high']], symbol='arrow', symbol_size=7, itemstyle_opts=opts.ItemStyleOpts(color="yellow") )) cur_money = cash if position: cur_money += position['amount'] * record['close'] money.append(cur_money) if position: cash += position['amount'] * records[-1]['close'] print("cash:", cash) kline_graph = ( Kline() .add_xaxis(x_axises) .add_yaxis( "kline", y_axises, markpoint_opts=opts.MarkPointOpts( data=mark_points ), ) .set_global_opts( xaxis_opts=opts.AxisOpts(is_scale=True), yaxis_opts=opts.AxisOpts( is_scale=True, splitarea_opts=opts.SplitAreaOpts( is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1) ), ), title_opts=opts.TitleOpts(title="%s_%s" % (view_time_start, view_time_end)), datazoom_opts=[opts.DataZoomOpts(type_="inside", xaxis_index=[0, 1],)], ) ) kline_line = ( Line() .add_xaxis(xaxis_data=x_axises) .add_yaxis( series_name="cur_money", y_axis=money, is_smooth=True, linestyle_opts=opts.LineStyleOpts(opacity=0.5), label_opts=opts.LabelOpts(is_show=False), markline_opts=opts.MarkLineOpts( data=[opts.MarkLineItem(y=50000)] ), ) .set_global_opts( xaxis_opts=opts.AxisOpts( type_="category", grid_index=2, axislabel_opts=opts.LabelOpts(is_show=False), ), yaxis_opts=opts.AxisOpts( min_='dataMin' ) ) ) grid_chart = Grid(init_opts=opts.InitOpts(width='2000px', height='900px')) grid_chart.add( kline_graph, grid_opts=opts.GridOpts(pos_left="3%", pos_right="10%", height="50%"), ) grid_chart.add( kline_line, grid_opts=opts.GridOpts( pos_left="3%", pos_right="10%", pos_top="60%", height="30%" ), ) grid_chart.render("kline_markline.html")
33.29878
190
0.655741
8ac2e2407dd1965a468039faf082dce81ec81f6c
109
py
Python
realfastapi/routes/endpoints/default.py
wborbajr/RealFastAPI
d97ca994c4c164387632cda814e80c026435a9f7
[ "MIT" ]
null
null
null
realfastapi/routes/endpoints/default.py
wborbajr/RealFastAPI
d97ca994c4c164387632cda814e80c026435a9f7
[ "MIT" ]
null
null
null
realfastapi/routes/endpoints/default.py
wborbajr/RealFastAPI
d97ca994c4c164387632cda814e80c026435a9f7
[ "MIT" ]
null
null
null
from fastapi import APIRouter router = APIRouter()
12.111111
29
0.669725
8ac30fc95afe68d34f716111b4aac384fefa954a
2,291
py
Python
graphzoom/embed_methods/dgi/execute.py
junhoher/GraphZoom
5073b49a34badf7bc6c25bd2a6cc6c78b4ee7d5a
[ "MIT" ]
16
2019-10-18T06:31:29.000Z
2021-09-23T12:46:19.000Z
graphzoom/embed_methods/dgi/execute.py
junhoher/GraphZoom
5073b49a34badf7bc6c25bd2a6cc6c78b4ee7d5a
[ "MIT" ]
7
2019-10-18T06:36:32.000Z
2022-02-10T01:37:04.000Z
graphzoom/embed_methods/dgi/execute.py
junhoher/GraphZoom
5073b49a34badf7bc6c25bd2a6cc6c78b4ee7d5a
[ "MIT" ]
4
2019-11-15T12:47:11.000Z
2021-02-15T07:26:24.000Z
import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import networkx as nx import time from embed_methods.dgi.models import DGI, LogReg from embed_methods.dgi.utils import process
24.634409
109
0.656482
8ac447e8327f451aa635702a06c66e0d74dc0eb1
1,668
py
Python
tools/ci/deploy_to_github_release.py
rodb70/RDMnet
94d17e1dfda2d1f56b120f6342231c43bf6862b0
[ "Apache-2.0" ]
30
2018-07-16T15:54:19.000Z
2021-11-21T21:17:36.000Z
tools/ci/deploy_to_github_release.py
rodb70/RDMnet
94d17e1dfda2d1f56b120f6342231c43bf6862b0
[ "Apache-2.0" ]
27
2019-04-12T22:45:25.000Z
2021-08-13T15:20:04.000Z
tools/ci/deploy_to_github_release.py
rodb70/RDMnet
94d17e1dfda2d1f56b120f6342231c43bf6862b0
[ "Apache-2.0" ]
12
2019-06-28T19:28:58.000Z
2021-11-17T12:10:44.000Z
"""Deploys binaries to a GitHub release given the specified tag name.""" import argparse import os import time from github import Github THIS_FILE_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) GH_REPO_IDENT = "ETCLabs/RDMnet" GH_USERNAME = "svc-etclabs" GH_API_TOKEN = os.getenv("SVC_ETCLABS_REPO_TOKEN") def deploy_binaries(version: str): """Deploys staged binaries to a new GitHub Release.""" g = Github(login_or_token=GH_USERNAME, password=GH_API_TOKEN) repo = g.get_repo(GH_REPO_IDENT) print(f"Waiting for the correct GitHub tag v{version} to become available...") keep_trying = True while keep_trying: for tag in repo.get_tags(): if tag.name == f"v{version}": keep_trying = False # Tag now exists break if keep_trying: time.sleep(5) print(f"Tag v{version} available. Creating release...") new_release = repo.create_git_release( tag=f"v{version}", name=f"RDMnet v{version}", message=f"Automated release of RDMnet for v{version}", ) new_release.upload_asset("RDMnetSetup_x86.msi") new_release.upload_asset("RDMnetSetup_x64.msi") new_release.upload_asset("RDMnet.pkg") if __name__ == "__main__": main()
29.785714
82
0.682854
8ac489649919e5a666b90d4e91cad4bcbdd5e983
1,513
py
Python
matchms/filtering/add_losses.py
maximskorik/matchms
922f5afaef123a793194bdd74391027477cbb844
[ "Apache-2.0" ]
null
null
null
matchms/filtering/add_losses.py
maximskorik/matchms
922f5afaef123a793194bdd74391027477cbb844
[ "Apache-2.0" ]
null
null
null
matchms/filtering/add_losses.py
maximskorik/matchms
922f5afaef123a793194bdd74391027477cbb844
[ "Apache-2.0" ]
null
null
null
import logging import numpy from ..Fragments import Fragments from ..typing import SpectrumType logger = logging.getLogger("matchms") def add_losses(spectrum_in: SpectrumType, loss_mz_from=0.0, loss_mz_to=1000.0) -> SpectrumType: """Derive losses based on precursor mass. Parameters ---------- spectrum_in: Input spectrum. loss_mz_from: Minimum allowed m/z value for losses. Default is 0.0. loss_mz_to: Maximum allowed m/z value for losses. Default is 1000.0. """ if spectrum_in is None: return None spectrum = spectrum_in.clone() precursor_mz = spectrum.get("precursor_mz", None) if precursor_mz: assert isinstance(precursor_mz, (float, int)), ("Expected 'precursor_mz' to be a scalar number.", "Consider applying 'add_precursor_mz' filter first.") peaks_mz, peaks_intensities = spectrum.peaks.mz, spectrum.peaks.intensities losses_mz = (precursor_mz - peaks_mz)[::-1] losses_intensities = peaks_intensities[::-1] # Add losses which are within given boundaries mask = numpy.where((losses_mz >= loss_mz_from) & (losses_mz <= loss_mz_to)) spectrum.losses = Fragments(mz=losses_mz[mask], intensities=losses_intensities[mask]) else: logger.warning("No precursor_mz found. Consider applying 'add_precursor_mz' filter first.") return spectrum
35.186047
109
0.639128
8ac8388c155952144c99a47c3c6e38eeff168835
10,829
py
Python
cornflow_client/schema/dictSchema.py
baobabsoluciones/cornflow-client
f9996f0b841885d26639cb63c8ba6090387de57f
[ "MIT" ]
3
2021-05-12T11:21:26.000Z
2022-02-22T19:23:46.000Z
cornflow_client/schema/dictSchema.py
baobabsoluciones/cornflow-client
f9996f0b841885d26639cb63c8ba6090387de57f
[ "MIT" ]
17
2021-03-14T17:09:46.000Z
2022-02-28T19:12:37.000Z
cornflow_client/schema/dictSchema.py
baobabsoluciones/cornflow-client
f9996f0b841885d26639cb63c8ba6090387de57f
[ "MIT" ]
2
2020-10-03T20:00:19.000Z
2022-03-24T11:52:22.000Z
import re from .dict_functions import gen_schema, ParameterSchema, sort_dict from cornflow_client.constants import JSON_TYPES, DATASCHEMA
35.739274
117
0.559793
8ac83a9b0ffc4d89a43ceecc29a99652f8c7e2f2
5,869
py
Python
rspub/util/test/test_resourcefilter.py
EHRI/rspub-core
1f6b0c84825037b7df442ae0d258d5d897ff6905
[ "Apache-2.0" ]
1
2017-02-01T15:03:29.000Z
2017-02-01T15:03:29.000Z
rspub/util/test/test_resourcefilter.py
EHRI/rspub-core
1f6b0c84825037b7df442ae0d258d5d897ff6905
[ "Apache-2.0" ]
3
2017-02-15T12:25:22.000Z
2017-04-10T13:51:54.000Z
rspub/util/test/test_resourcefilter.py
EHRI/rspub-core
1f6b0c84825037b7df442ae0d258d5d897ff6905
[ "Apache-2.0" ]
3
2017-02-15T09:04:39.000Z
2021-06-21T09:01:59.000Z
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import os import platform import unittest import rspub.util.resourcefilter as rf
35.143713
80
0.645596
8ac88b2d708e6c6e6407bbbd9d9661fb3c6143fd
495
py
Python
molecule/ubuntu/tests/test_grafana.py
fiaasco/grafana
6a5963e43033d88b5bb4760d47755da1069ec26b
[ "MIT" ]
null
null
null
molecule/ubuntu/tests/test_grafana.py
fiaasco/grafana
6a5963e43033d88b5bb4760d47755da1069ec26b
[ "MIT" ]
null
null
null
molecule/ubuntu/tests/test_grafana.py
fiaasco/grafana
6a5963e43033d88b5bb4760d47755da1069ec26b
[ "MIT" ]
null
null
null
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_package(host): """ check if packages are installed """ assert host.package('grafana').is_installed def test_service(host): """ Testing whether the service is running and enabled """ assert host.service('grafana-server').is_enabled assert host.service('grafana-server').is_running
24.75
63
0.739394
8ac941eb3b632a517433fbaf339a5dae04e7e556
6,534
py
Python
heatsink.py
sww1235/heatsink-calc
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
[ "MIT" ]
1
2020-11-20T07:09:00.000Z
2020-11-20T07:09:00.000Z
heatsink.py
sww1235/heatsink-calc
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
[ "MIT" ]
null
null
null
heatsink.py
sww1235/heatsink-calc
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
[ "MIT" ]
null
null
null
"""Class representations of heatsinks.""" import math from scipy import constants as const from materials import Aluminium_6063 as aluminium
44.148649
78
0.520814
8ac9b0e158167d7f3345bc07a8dd57de92905440
66
py
Python
scripts/get_file_name_as_variable.py
amin-henteti/airflow-dags
eb1e9a1a77d3c868e031cbe7420eae952ce5e767
[ "Apache-2.0" ]
null
null
null
scripts/get_file_name_as_variable.py
amin-henteti/airflow-dags
eb1e9a1a77d3c868e031cbe7420eae952ce5e767
[ "Apache-2.0" ]
null
null
null
scripts/get_file_name_as_variable.py
amin-henteti/airflow-dags
eb1e9a1a77d3c868e031cbe7420eae952ce5e767
[ "Apache-2.0" ]
null
null
null
import inspect foo()
13.2
31
0.621212
8ac9d8732422cf52f01f2fd448863e8bbd5e7b4d
2,879
py
Python
sovrin/test/did/helper.py
sovrin-foundation/old-sovrin
d4e705054b7252c62fea00114060035c6eb314a4
[ "Apache-2.0" ]
3
2017-07-19T14:26:31.000Z
2020-05-16T16:09:37.000Z
sovrin/test/did/helper.py
sovrin-foundation/old-sovrin
d4e705054b7252c62fea00114060035c6eb314a4
[ "Apache-2.0" ]
null
null
null
sovrin/test/did/helper.py
sovrin-foundation/old-sovrin
d4e705054b7252c62fea00114060035c6eb314a4
[ "Apache-2.0" ]
3
2017-10-28T08:19:00.000Z
2021-06-06T10:48:55.000Z
import base58 from plenum.common.signer_did import DidSigner from plenum.common.verifier import DidVerifier from plenum.common.eventually import eventually from plenum.test.helper import assertEquality from sovrin.common.identity import Identity MsgForSigning = {'sender': 'Mario', 'msg': 'Lorem ipsum'}
35.109756
79
0.733241
8aca0af3be9ee2ea88050772027c439546656c4a
3,651
py
Python
tests/test_EdiblesSpectrum.py
jancami/edibles
51263b24c5e8aef786692011289b906a810ad2f7
[ "MIT" ]
8
2020-04-15T10:44:48.000Z
2021-06-21T15:58:19.000Z
tests/test_EdiblesSpectrum.py
jancami/edibles
51263b24c5e8aef786692011289b906a810ad2f7
[ "MIT" ]
100
2020-05-08T13:20:41.000Z
2022-01-11T20:04:52.000Z
tests/test_EdiblesSpectrum.py
jancami/edibles
51263b24c5e8aef786692011289b906a810ad2f7
[ "MIT" ]
8
2020-05-27T00:39:39.000Z
2021-06-23T14:07:16.000Z
import astropy import datetime import numpy as np from edibles.utils.edibles_spectrum import EdiblesSpectrum if __name__ == "__main__": filename = "HD170740_w860_redl_20140915_O12.fits" testEdiblesSpectrum(filename=filename)
34.443396
80
0.707751
8acad105c230508195bd3af6419dc374a38241b0
6,670
py
Python
swift/common/ondisk.py
citrix-openstack-build/swift
34340ddf49a84f3b3398012c2b60be1215033559
[ "Apache-2.0" ]
1
2016-03-14T23:38:37.000Z
2016-03-14T23:38:37.000Z
swift/common/ondisk.py
vimeo/swift
5eea524d3ea6d29c2b6f34927c0130090e7ed44d
[ "Apache-2.0" ]
null
null
null
swift/common/ondisk.py
vimeo/swift
5eea524d3ea6d29c2b6f34927c0130090e7ed44d
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2010-2013 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Methods & Attributes for shared 'on-disk' data layouts.""" import os import sys import errno from hashlib import md5 from random import shuffle from ConfigParser import ConfigParser, NoSectionError, NoOptionError from swift import gettext_ as _ from swift.common.utils import listdir, quote # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. _hash_conf = ConfigParser() HASH_PATH_SUFFIX = '' HASH_PATH_PREFIX = '' if _hash_conf.read('/etc/swift/swift.conf'): try: HASH_PATH_SUFFIX = _hash_conf.get('swift-hash', 'swift_hash_path_suffix') except (NoSectionError, NoOptionError): pass try: HASH_PATH_PREFIX = _hash_conf.get('swift-hash', 'swift_hash_path_prefix') except (NoSectionError, NoOptionError): pass def hash_path(account, container=None, object=None, raw_digest=False): """ Get the canonical hash for an account/container/object :param account: Account :param container: Container :param object: Object :param raw_digest: If True, return the raw version rather than a hex digest :returns: hash string """ if object and not container: raise ValueError('container is required if object is provided') paths = [account] if container: paths.append(container) if object: paths.append(object) if raw_digest: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).digest() else: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).hexdigest() def normalize_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return "%016.05f" % (float(timestamp)) def validate_device_partition(device, partition): """ Validate that a device and a partition are valid and won't lead to directory traversal when used. :param device: device to validate :param partition: partition to validate :raises: ValueError if given an invalid device or partition """ invalid_device = False invalid_partition = False if not device or '/' in device or device in ['.', '..']: invalid_device = True if not partition or '/' in partition or partition in ['.', '..']: invalid_partition = True if invalid_device: raise ValueError('Invalid device: %s' % quote(device or '')) elif invalid_partition: raise ValueError('Invalid partition: %s' % quote(partition or '')) def storage_directory(datadir, partition, name_hash): """ Get the storage directory :param datadir: Base data directory :param partition: Partition :param name_hash: Account, container or object name hash :returns: Storage directory """ return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
36.054054
79
0.625487
8acb675f5ab5c65b02ffbf255720c5176625a170
1,923
py
Python
.OLD_FILES/dossiers2_old1/custom/cache.py
KIHestad/WoT-Dossier-Parser-Create-Struct
9eadeeead59b7b6cf78dc6a1e1e89fe2dffb260e
[ "MIT" ]
null
null
null
.OLD_FILES/dossiers2_old1/custom/cache.py
KIHestad/WoT-Dossier-Parser-Create-Struct
9eadeeead59b7b6cf78dc6a1e1e89fe2dffb260e
[ "MIT" ]
null
null
null
.OLD_FILES/dossiers2_old1/custom/cache.py
KIHestad/WoT-Dossier-Parser-Create-Struct
9eadeeead59b7b6cf78dc6a1e1e89fe2dffb260e
[ "MIT" ]
2
2021-11-10T19:12:57.000Z
2022-03-13T10:04:48.000Z
# uncompyle6 version 2.11.3 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.10 (default, May 23 2015, 09:40:32) [MSC v.1500 32 bit (Intel)] # Embedded file name: scripts/common/dossiers2/custom/cache.py import nations from items import vehicles _g_cache = {}
40.0625
148
0.693708
8acb71f44d08977a58d847a4d25a262b4cc3e603
35,471
py
Python
src/parser.py
harkiratbehl/PyGM
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
[ "MIT" ]
2
2019-02-13T11:30:08.000Z
2021-02-14T04:20:44.000Z
src/parser.py
harkiratbehl/PyGM
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
[ "MIT" ]
null
null
null
src/parser.py
harkiratbehl/PyGM
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
[ "MIT" ]
null
null
null
#!/usr/bin/python from code import TreeNode from code import ThreeAddressCode from lexer import tokens from random import * from symbol_table import SymbolTable from symbol_table import SymbolTableNode import logging import ply.lex as lex import ply.yacc as yacc import sys from codegen import convert_tac from code import Code from codegen import generate_assembly three_addr_code = ThreeAddressCode() assembly_code = Code() parsed = [] symbol_table = SymbolTable() var_list = [] generated = {'temp': [], 'scope': ['scope_0'], 'label': [], 'str_list': []} precedence = ( ('left','IDENTIFIER'), ('right','ASSIGN_OP'), ('left','COMMA'), ('left','LSQUARE'), ('left','RSQUARE'), ('left','LCURLY'), ('left','RCURLY'), ('left','DDD'), ('left','DOT'), ('left','SEMICOLON'), ('left','COLON'), ('left','SINGLE_QUOTES'), ('left','DOUBLE_QUOTES'), ('left','DECIMAL_LIT'), ('left','OCTAL_LIT'), ('left','HEX_LIT'), ('left','FLOAT_LIT'), ('left','STRING_LIT'), ('left','NEWLINE'), ('left','BREAK'), ('left','CONTINUE'), ('left','RETURN'), ('left','RROUND'), ('left','LROUND'), ('left', 'OR_OR'), ('left', 'AMP_AMP'), ('left', 'EQ_EQ', 'NOT_EQ','LT','LT_EQ','GT','GT_EQ'), ('left', 'PLUS', 'MINUS','OR','CARET'), ('left', 'STAR', 'DIVIDE','MODULO','AMP','AND_OR','LS','RS'), ) def p_SourceFile(p): '''SourceFile : PACKAGE IDENTIFIER SEMICOLON ImportDeclList TopLevelDeclList ''' parsed.append(p.slice) # TODO: Ignoring package name and Imports for now p[0] = p[5] var_list = symbol_table.make_var_list() three_addr_code = convert_tac(p[0].TAC) symbol_table.fill_next_use(three_addr_code) assembly_code = generate_assembly(three_addr_code,var_list,symbol_table) # p[0].TAC.print_code() # three_addr_code.print_code() assembly_code.print_code() # symbol_table.print_symbol_table() return def p_ImportDeclList(p): '''ImportDeclList : ImportDecl SEMICOLON ImportDeclList | empty ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_TopLevelDeclList(p): '''TopLevelDeclList : TopLevelDecl SEMICOLON TopLevelDeclList | empty ''' parsed.append(p.slice) if len(p) == 4: if p[3] != None: p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) else: p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]], p[1].TAC) return def p_TopLevelDecl(p): '''TopLevelDecl : Declaration | FunctionDecl ''' parsed.append(p.slice) p[0] = p[1] return def p_ImportDecl(p): '''ImportDecl : IMPORT LROUND ImportSpecList RROUND | IMPORT ImportSpec ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_ImportSpecList(p): '''ImportSpecList : ImportSpec SEMICOLON ImportSpecList | empty ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_ImportSpec(p): '''ImportSpec : DOT string_lit | IDENTIFIER string_lit | empty string_lit ''' parsed.append(p.slice) # TODO: Ignoring Imports for now return def p_Block(p): '''Block : LCURLY ScopeStart StatementList ScopeEnd RCURLY ''' parsed.append(p.slice) p[0] = p[3] p[0].data = p[2].data p[0].name = 'Block' return def p_ScopeStart(p): '''ScopeStart : empty ''' parsed.append(p.slice) symbol_table.add_scope(gen('scope')) p[0] = TreeNode('ScopeStart', symbol_table.current_scope, 'None') return def p_ScopeEnd(p): '''ScopeEnd : empty ''' parsed.append(p.slice) symbol_table.end_scope() return def p_StatementList(p): '''StatementList : Statement SEMICOLON StatementList | empty ''' parsed.append(p.slice) if len(p) == 4: p[0] = TreeNode('StatementList', 0, 'INT', 0, [p[1].data] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) else: p[0] = TreeNode('StatementList', 0, 'INT') return def p_Statement(p): '''Statement : Declaration | SimpleStmt | ReturnStmt | Block | IfStmt | SwitchStmt | ForStmt | BreakStmt | ContinueStmt | GotoStmt | PrintIntStmt | PrintStrStmt ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Statement' return def p_PrintIntStmt(p): '''PrintIntStmt : PRINTLN LROUND IDENTIFIER RROUND | PRINTLN LROUND int_lit RROUND ''' if hasattr(p[3], 'name') and p[3].name == 'int_lit': p[0] = p[3] # p[0].isLvalue = 0 else: p[0] = TreeNode('IDENTIFIER', p[3], 'INT', 1, []) p[0].TAC.add_line(['print_int', check_variable(p[0]), '', '']) p[0].name = 'PrintIntStmt' return def p_PrintStrStmt(p): '''PrintStrStmt : PRINTLN LROUND string_lit RROUND ''' p[0] = p[3] name = symbol_table.current_scope + '_' + gen('str_list') parametersNode = SymbolTableNode(p[3].data, p[3].input_type) newNode = SymbolTableNode(name, p[3].input_type, parameters = [parametersNode]) symbol_table.add_var(newNode) p[0].TAC.add_line(['print_str', name, '', '']) p[0].name = 'PrintStrStmt' return def p_Declaration(p): '''Declaration : ConstDecl | TypeDecl | VarDecl ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Declaration' return def p_ConstDecl(p): '''ConstDecl : CONST LROUND ConstSpecList RROUND | CONST ConstSpec ''' parsed.append(p.slice) return def p_ConstSpecList(p): '''ConstSpecList : empty | ConstSpecList ConstSpec SEMICOLON ''' parsed.append(p.slice) return def p_ConstSpec(p): '''ConstSpec : IDENTIFIER | IdentifierList | IDENTIFIER EQ Expression | IdentifierList EQ ExpressionList | IDENTIFIER Type EQ Expression | IdentifierList Type EQ ExpressionList ''' parsed.append(p.slice) return def p_IdentifierList(p): '''IdentifierList : IDENTIFIER COMMA IdentifierBotList ''' parsed.append(p.slice) node = TreeNode('IDENTIFIER', p[1], 'INT', 1) p[0] = TreeNode('IdentifierList', 0, 'None', 0, [node] + p[3].children, p[3].TAC) return def p_IdentifierBotList(p): '''IdentifierBotList : IDENTIFIER COMMA IdentifierBotList | IDENTIFIER ''' parsed.append(p.slice) if len(p) == 2: node = TreeNode('IDENTIFIER', p[1], 'INT', 1) p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node]) elif len(p) == 4: node = TreeNode('IDENTIFIER', p[1], 'INT', 1) p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node] + p[3].children, p[3].TAC) return def p_ExpressionList(p): '''ExpressionList : Expression COMMA ExpressionBotList ''' parsed.append(p.slice) p[0] = TreeNode('ExpressionList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) return def p_ExpressionBotList(p): '''ExpressionBotList : Expression COMMA ExpressionBotList | Expression ''' parsed.append(p.slice) if len(p) == 2: p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]], p[1].TAC) elif len(p) == 4: p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) return def p_TypeDecl(p): '''TypeDecl : TYPE TypeSpecTopList ''' parsed.append(p.slice) return def p_TypeSpecTopList(p): '''TypeSpecTopList : TypeSpec | LROUND TypeSpecList RROUND ''' parsed.append(p.slice) return def p_TypeSpecList(p): '''TypeSpecList : empty | TypeSpecList TypeSpec SEMICOLON ''' parsed.append(p.slice) return def p_TypeSpec(p): '''TypeSpec : AliasDecl | TypeDef ''' parsed.append(p.slice) return def p_AliasDecl(p): '''AliasDecl : IDENTIFIER EQ Type ''' parsed.append(p.slice) return def p_TypeDef(p): '''TypeDef : IDENTIFIER Type ''' parsed.append(p.slice) return def p_Type(p): '''Type : TypeLit | StandardTypes | LROUND Type RROUND ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] else: p[0] = p[2] p[0].name = 'Type' return def p_StandardTypes(p): '''StandardTypes : PREDEFINED_TYPES ''' parsed.append(p.slice) p[0] = TreeNode('StandardTypes', p[1], 'NONE') return def p_TypeLit(p): '''TypeLit : ArrayType | StructType | FunctionType | PointerType ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'TypeLit' return def p_PointerType(p): '''PointerType : STAR Type ''' parsed.append(p.slice) return def p_ArrayType(p): '''ArrayType : LSQUARE ArrayLength RSQUARE Type ''' parsed.append(p.slice) p[0] = TreeNode('ArrayType', p[2].data, p[4].data) return def p_ArrayLength(p): '''ArrayLength : Expression ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'ArrayLength' return def p_StructType(p): '''StructType : STRUCT LCURLY FieldDeclList RCURLY ''' parsed.append(p.slice) return def p_FieldDeclList(p): '''FieldDeclList : empty | FieldDeclList FieldDecl SEMICOLON ''' parsed.append(p.slice) return def p_FieldDecl(p): '''FieldDecl : IdentifierList Type TagTop | IDENTIFIER Type TagTop ''' parsed.append(p.slice) return def p_TagTop(p): '''TagTop : empty | Tag ''' parsed.append(p.slice) return def p_Tag(p): '''Tag : string_lit ''' parsed.append(p.slice) return def p_FunctionType(p): '''FunctionType : FUNC Signature ''' parsed.append(p.slice) return def p_Signature(p): '''Signature : Parameters | Parameters Result ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Signature' s = 'scope_' + str(len(generated['scope'])) symbol_table.new_scope(s) for child in p[1].children: symbol_table.add_identifier(child, s) newNode = SymbolTableNode(s + '_' + child.data, child.input_type) symbol_table.add_var(newNode, s) # symbol_table.print_symbol_table() if len(p) == 2: p[0].input_type = TreeNode('Result', 0, 'None') else: p[0].input_type = p[2] return def p_Result(p): '''Result : Parameters | Type ''' parsed.append(p.slice) if p[1].name == 'Type': p[0] = TreeNode('Result', 1, 'None', 0, [p[1]]) else: p[0] = p[1] p[0].name = 'Result' return def p_Parameters(p): '''Parameters : LROUND RROUND | LROUND ParameterList RROUND ''' parsed.append(p.slice) if len(p) == 3: p[0] = TreeNode('Parameters', 0, 'None') else: p[0] = p[2] p[0].name = 'Parameters' return def p_ParameterList(p): '''ParameterList : ParameterDecl | ParameterList COMMA ParameterDecl ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] p[0].name = 'ParameterList' elif len(p) == 4: p[0] = TreeNode('ParameterList', p[1].data + p[3].data, 'None', 0, p[1].children + p[3].children, p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) return def p_ParameterDecl(p): '''ParameterDecl : IdentifierList Type | IDENTIFIER Type | Type ''' parsed.append(p.slice) p[0] = TreeNode('ParameterDecl', 0, 'None') if len(p) == 3: if hasattr(p[1], 'name') and p[1].name == 'IdentifierList': for node in p[1].children: p[0].data += 1 node.input_type = p[2].data p[0].children += [node] else: node = TreeNode('IDENTIFIER', p[1], p[2].data, 1) p[0].data += 1 p[0].children += [node] else: p[0].data += 1 p[0].children += [p[1]] return def p_VarDecl(p): '''VarDecl : VAR VarSpecTopList ''' parsed.append(p.slice) p[0] = p[2] p[0].name = 'VarDecl' return def p_VarSpecTopList(p): '''VarSpecTopList : VarSpec | LROUND VarSpecList RROUND ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] else: p[0] = p[2] p[0].name = 'VarSpecTopList' return def p_VarSpecList(p): '''VarSpecList : empty | VarSpecList VarSpec SEMICOLON ''' return def p_VarSpec(p): '''VarSpec : IDENTIFIER Type | IDENTIFIER EQ Expression | IDENTIFIER Type EQ Expression | IdentifierList Type | IdentifierList EQ ExpressionList | IdentifierList Type EQ ExpressionList ''' # Insert into symbol table p[0] = TreeNode('VarSpec', 0, 'NONE') if hasattr(p[1], 'name') and p[1].name == 'IdentifierList': zero_val = TreeNode('decimal_lit', 0, 'INT') # l1 = len(p[1].children) # if len(p) == 3: # expr_list = TreeNode('Expr_List', 0, 'NONE', 0, [zero_val] * l1) # elif len(p) == 4: # expr_list = p[3] # elif len(p) == 5: # expr_list = p[4] # l2 = len(expr_list.children) # p[0].TAC.append_TAC(expr_list.TAC) # p[0].TAC.append_TAC(p[1].TAC) # if l1 == l2: # for i in range(l1): # p[0].TAC.add_line(['=', p[1].children[i], expr_list.children[i].data, '']) # else: # print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)") else: p[1] = TreeNode('IDENTIFIER',p[1],'INT',1) if p[2].input_type != 'NONE': # array case # p[2].print_node() if symbol_table.add_identifier(p[1], size = p[2].data) == False: print_error("Unable to add to SymbolTable") return name = symbol_table.search_identifier(p[1].data) newNode = SymbolTableNode(name, p[1].input_type,size = p[2].data) symbol_table.add_var(newNode) p[0] = TreeNode('VarSpec',p[1].data,'INT') # expr = TreeNode('Expr', 0, 'NONE') # if len(p) == 4: # expr = p[3] # p[0].TAC.append_TAC(p[3].TAC) # p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), '']) # elif len(p) == 5: # expr = p[4] # p[0].TAC.append_TAC(p[4].TAC) # p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), '']) return def p_FunctionDecl(p): '''FunctionDecl : FUNC FunctionName Signature | FUNC FunctionName Signature FunctionBody ''' parsed.append(p.slice) # symbol_table.print_symbol_table() p[0] = TreeNode('FunctionDecl', 0, 'INT') # print symbol_table.current_scope # p[4].TAC.print_code() symbol_table.add_function(p[2].data, p[3].input_type, p[3].children) if len(p) == 5: noOfParams = 0 for f in symbol_table.symbol_table[symbol_table.current_scope]['functions']: if f.name == p[2].data: noOfParams = len(f.parameters) p[0].TAC.add_line(['func', check_variable(p[2]), str(noOfParams), '']) for child in reversed(p[3].children): p[0].TAC.add_line(['getparam', p[4].data + '_' + child.data, '', '']) p[0].TAC.add_line(['stack_push', '', '', '']) p[0].TAC.append_TAC(p[4].TAC) return def p_FunctionName(p): '''FunctionName : IDENTIFIER ''' parsed.append(p.slice) p[0] = TreeNode('FunctionName', p[1], 'INT', 1) return def p_FunctionBody(p): '''FunctionBody : Block ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'FunctionBody' return def p_SimpleStmt(p): '''SimpleStmt : Expression | Assignment | ShortVarDecl | IncDecStmt ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'SimpleStmt' return def p_IncDecStmt(p): '''IncDecStmt : Expression PLUS_PLUS | Expression MINUS_MINUS ''' parsed.append(p.slice) one_val = TreeNode('IncDecStmt', '1', 'INT') p[0] = p[1] if p[1].isLvalue == 1: if p[2] == '++': p[0].TAC.add_line(['+', check_variable(p[1]), check_variable(p[1]), one_val.data]) else: p[0].TAC.add_line(['-', check_variable(p[1]), check_variable(p[1]), one_val.data]) else: print_error("Lvalue required") p[0].name = 'IncDecStmt' return def p_ShortVarDecl(p): '''ShortVarDecl : ExpressionList ASSIGN_OP ExpressionList | Expression ASSIGN_OP Expression ''' parsed.append(p.slice) # TODO: Add in symbol table p[0] = TreeNode('ShortVarDecl', 0, 'INT') if p[1].name == 'ExpressionList': l1 = len(p[1].children) l2 = len(p[3].children) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) if l1 == l2: for i in range(l1): if p[1].children[i].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.add_identifier(p[1].children[i]) == False: print_error("Unable to add to SymbolTable") return p[0].TAC.add_line([p[2], check_variable(p[1].children[i]), check_variable(p[3].children[i]), '']) else: print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)") elif p[1].name == 'Expression': if p[1].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.add_identifier(p[1]) == False: print_error("Unable to add to SymbolTable") return p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.add_line([p[2], check_variable(p[1]), check_variable(p[3]), '']) return def p_Assignment(p): '''Assignment : ExpressionList assign_op ExpressionList | Expression assign_op Expression ''' parsed.append(p.slice) p[0] = TreeNode('Assignment', 0, 'INT') if p[1].name == 'ExpressionList': l1 = len(p[1].children) l2 = len(p[3].children) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) if l1 == l2: for i in range(l1): if p[1].children[i].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.search_identifier(p[1].children[i].data) == False and p[1].children[i].data not in generated['temp']: print_error("Variable " + p[1].children[i].data + " is undefined") return if p[3].children[i].isLvalue == 1 and symbol_table.search_identifier(p[3].children[i].data) == False and p[3].children[i].data not in generated['temp']: print_error("Variable " + p[3].children[i].data + " is undefined") return p[0].TAC.add_line([p[2].data, check_variable(p[1].children[i]), check_variable(p[3].children[i]), '']) else: print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)") elif p[1].name == 'Expression': if p[1].isLvalue == 0: print_error("Lvalue required") return else: if symbol_table.search_identifier(p[1].data) == False and p[1].data not in generated['temp']: print_error("Variable " + p[1].data + " is undefined") return if p[3].isLvalue == 1 and symbol_table.search_identifier(p[3].data) == False and p[3].data not in generated['temp']: print_error("Variable " + p[3].data + " is undefined") return # print symbol_table.current_scope p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.add_line([p[2].data, check_variable(p[1]), check_variable(p[3]), '']) return def p_assign_op(p): '''assign_op : EQ | PLUS_EQ | MINUS_EQ | OR_EQ | CARET_EQ | STAR_EQ | DIVIDE_EQ | MODULO_EQ | LS_EQ | RS_EQ | AMP_EQ | AND_OR_EQ ''' parsed.append(p.slice) p[0] = TreeNode('assign_op', p[1], 'OPERATOR') return def p_IfStmt(p): '''IfStmt : IF Expression Block | IF Expression Block ELSE elseTail ''' parsed.append(p.slice) if len(p) == 4: l1 = gen('label') p[0] = TreeNode('IfStmt', 0, 'INT') p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1]) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line(['label', l1, '', '']) if len(p) == 6: l1 = gen('label') l2 = gen('label') p[0] = TreeNode('IfStmt', 0, 'INT') p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1]) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line(['goto', l2, '', '']) p[0].TAC.add_line(['label', l1, '', '']) p[0].TAC.append_TAC(p[5].TAC) p[0].TAC.add_line(['label', l2, '', '']) return def p_elseTail(p): '''elseTail : IfStmt | Block ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'elseTail' return def p_SwitchStmt(p): '''SwitchStmt : ExprSwitchStmt ''' parsed.append(p.slice) p[0] = TreeNode('SwitchStmt', 0, 'INT', 0, [], p[1].TAC) return def p_ExprSwitchStmt(p): '''ExprSwitchStmt : SWITCH SimpleStmt SEMICOLON LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH SimpleStmt SEMICOLON Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY ''' parsed.append(p.slice) if len(p) == 8: l1 = gen('label') l2 = gen('label') p[0] = TreeNode('ExprSwitchStmt', 0, 'INT') p[0].TAC.append_TAC(p[2].TAC) t1 = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1) p[0].TAC.add_line(['=', check_variable(t1) , check_variable(p[2]), '']) p[0].TAC.append_TAC(p[5].data) for i in range(len(p[5].children)): p[0].TAC.add_line(['ifgotoeq', check_variable(t1), p[5].children[i][0], p[5].children[i][1]]) p[0].TAC.add_line(['goto', l2, '', '']) for i in range(p[5].TAC.length()): if i in p[5].TAC.leaders[1:]: p[0].TAC.add_line(['goto', l2, '', '']) p[0].TAC.add_line(p[5].TAC.code[i]) p[0].TAC.add_line(['label', l2, '', '']) return def p_ExprCaseClauseList(p): '''ExprCaseClauseList : empty | ExprCaseClauseList ExprCaseClause ''' parsed.append(p.slice) TAC1 = ThreeAddressCode() TAC2 = ThreeAddressCode() if len(p) == 3: TAC1 = p[1].data TAC2 = p[2].data p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT', 0, p[1].children + p[2].children, p[1].TAC) p[0].TAC.add_leader(p[0].TAC.length()) p[0].TAC.append_TAC(p[2].TAC) p[0].data.append_TAC(TAC2) else: p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT') return def p_ExprCaseClause(p): '''ExprCaseClause : ExprSwitchCase COLON StatementList ''' parsed.append(p.slice) l1 = gen('label') p[0] = TreeNode('ExprCaseClause', 0, 'INT') # p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.add_line(['label', l1, '', '']) # p[0].TAC.add_line(['ifgotoneq', p[1].children, p[1].children, l1]) p[0].TAC.append_TAC(p[3].TAC) p[0].children = [[p[1].data,l1]] p[0].data = p[1].TAC return def p_ExprSwitchCase(p): '''ExprSwitchCase : CASE ExpressionList | DEFAULT | CASE Expression ''' parsed.append(p.slice) p[0] = TreeNode('ExprSwitchCase', 0, 'INT') if len(p) == 3: p[0].data = p[2].data p[0].TAC = p[2].TAC return def p_ForStmt(p): '''ForStmt : FOR Expression Block | FOR Block ''' parsed.append(p.slice) p[0] = TreeNode('ForStmt', 0, 'INT') if len(p) == 4: l1 = gen('label') l2 = gen('label') p[0].TAC.add_line(['label', l1, '', '']) p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['ifgotoeq',check_variable(p[2]), '0', l2]) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line(['goto', l1, '', '']) p[0].TAC.add_line(['label', l2, '', '']) if len(p) == 3: l1 = gen('label') # l2 = gen('label') p[0].TAC.add_line(['label', l1, '', '']) p[0].TAC.append_TAC(p[2].TAC) p[0].TAC.add_line(['goto', l1, '', '']) # p[0].TAC.add_line([l2]) return def p_ReturnStmt(p): '''ReturnStmt : RETURN | RETURN Expression | RETURN ExpressionList ''' parsed.append(p.slice) if len(p) == 2: p[0] = TreeNode('ReturnStmt', 0, 'None') p[0].TAC.add_line(['return', '', '', '']) if len(p) == 3: if p[2].name == 'Expression': p[0] = p[2] p[0].name = 'ReturnStmt' p[0].TAC.add_line(['return', check_variable(p[2]), '', '']) return def p_BreakStmt(p): '''BreakStmt : BREAK IDENTIFIER ''' parsed.append(p.slice) return def p_ContinueStmt(p): '''ContinueStmt : CONTINUE IDENTIFIER ''' parsed.append(p.slice) return def p_GotoStmt(p): '''GotoStmt : GOTO IDENTIFIER ''' parsed.append(p.slice) return def p_Expression(p): '''Expression : UnaryExpr | Expression OR_OR Expression | Expression AMP_AMP Expression | Expression EQ_EQ Expression | Expression NOT_EQ Expression | Expression LT Expression | Expression LT_EQ Expression | Expression GT Expression | Expression GT_EQ Expression | Expression PLUS Expression | Expression MINUS Expression | Expression OR Expression | Expression CARET Expression | Expression STAR Expression | Expression DIVIDE Expression | Expression MODULO Expression | Expression LS Expression | Expression RS Expression | Expression AMP Expression | Expression AND_OR Expression ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] elif len(p) == 4: p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1, [], p[1].TAC) p[0].TAC.append_TAC(p[3].TAC) p[0].TAC.add_line([p[2],check_variable(p[0]), check_variable(p[1]), check_variable(p[3])]) p[0].name = 'Expression' return def p_UnaryExpr(p): '''UnaryExpr : PrimaryExpr | unary_op UnaryExpr ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] elif len(p) == 3: p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1) p[0].TAC.add_line([check_variable(p[1]), check_variable(p[0]), check_variable(p[2]), '']) p[0].name = 'UnaryExpr' return def p_unary_op(p): '''unary_op : PLUS | MINUS | NOT | CARET | STAR | AMP | LT_MINUS ''' parsed.append(p.slice) p[0] = TreeNode('unary_op', p[1], 'OPERATOR') return def p_PrimaryExpr(p): '''PrimaryExpr : Operand | IDENTIFIER | PrimaryExpr Selector | PrimaryExpr Index | PrimaryExpr Arguments ''' parsed.append(p.slice) if len(p) == 2: if p.slice[1].type == 'IDENTIFIER': p[0] = TreeNode('IDENTIFIER', p[1], 'INT', 1) elif p[1].name == 'Operand': p[0] = p[1] elif len(p) == 3: if p[2].name == 'Index': p[0] = TreeNode('IDENTIFIER', p[1].data, 'INT', 1, p[2].data) elif p[2].name == 'Arguments': p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1) p[0].TAC.append_TAC(p[1].TAC) p[0].TAC.append_TAC(p[2].TAC) # p[1].print_node() func = check_variable(p[1]).split("_") scope, funcName = "_".join(func[:2]), "_".join(func[2:]) temp = 0 for f in symbol_table.symbol_table[scope]['functions']: if f.name == funcName: temp = len(f.parameters) # p[2].print_node() for child in p[2].children: p[0].TAC.add_line(['putparam', check_variable(child), '', '']) if temp != p[2].data: print_error('Function ' + funcName + ' requires ' + str(temp) + ' parameters but ' + str(p[2].data) + ' supplied') p[0].TAC.add_line(['call', check_variable(p[1]), str(p[2].data), '']) p[0].TAC.add_line(['return_value', check_variable(p[0]), '', '']) p[0].name = 'PrimaryExpr' return def p_Operand(p): '''Operand : Literal | LROUND Expression RROUND ''' parsed.append(p.slice) if len(p) == 2: p[0] = p[1] else: p[0] = p[2] p[0].name = 'Operand' return def p_Literal(p): '''Literal : BasicLit | FunctionLit ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'Literal' return def p_BasicLit(p): '''BasicLit : int_lit | float_lit | string_lit | rune_lit ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'BasicLit' return def p_int_lit(p): '''int_lit : decimal_lit | octal_lit | hex_lit ''' parsed.append(p.slice) p[0] = p[1] p[0].name = 'int_lit' return def p_decimal_lit(p): '''decimal_lit : DECIMAL_LIT ''' parsed.append(p.slice) p[0] = TreeNode('decimal_lit', p[1], 'INT') return def p_octal_lit(p): '''octal_lit : OCTAL_LIT ''' parsed.append(p.slice) p[0] = TreeNode('octal_lit', p[1], 'OCT') return def p_hex_lit(p): '''hex_lit : HEX_LIT ''' parsed.append(p.slice) p[0] = TreeNode('hex_lit', p[1], 'HEX') return def p_float_lit(p): '''float_lit : FLOAT_LIT ''' parsed.append(p.slice) p[0] = TreeNode('float_lit', p[1], 'FLOAT') return def p_FunctionLit(p): '''FunctionLit : FUNC Signature FunctionBody ''' parsed.append(p.slice) # Anonymous Function # Not implemented yet return def p_Selector(p): '''Selector : DOT IDENTIFIER ''' parsed.append(p.slice) return def p_Index(p): '''Index : LSQUARE Expression RSQUARE ''' parsed.append(p.slice) p[0] = p[2] p[0].name = 'Index' return def p_Arguments(p): '''Arguments : LROUND RROUND | LROUND ExpressionList RROUND | LROUND Expression RROUND | LROUND Type RROUND | LROUND Type COMMA ExpressionList RROUND | LROUND Type COMMA Expression RROUND ''' # print p.slice parsed.append(p.slice) if len(p) == 3: p[0] = TreeNode('Arguments', 0, 'None') if len(p) == 4: if p[2].name == 'Expression': p[0] = TreeNode('Arguments', 1, 'None', 0, [p[2]], p[2].TAC) if p[2].name == 'ExpressionList': p[0] = p[2] p[0].name = 'Arguments' p[0].data = len(p[2].children) return def p_string_lit(p): '''string_lit : STRING_LIT ''' parsed.append(p.slice) p[0] = TreeNode('string_lit', p[1], 'STRING') return def p_rune_lit(p): '''rune_lit : RUNE_LIT ''' parsed.append(p.slice) p[0] = TreeNode('rune_lit', p[1], 'RUNE') return def p_empty(p): 'empty :' pass # Standard Logger logging.basicConfig( level = logging.DEBUG, filename = "parselog.txt", filemode = "w", format = "%(filename)10s:%(lineno)4d:%(message)s" ) log = logging.getLogger() yacc.yacc(debug=True, debuglog=log) input_file = sys.argv[1] import os if os.path.isfile(input_file) is False: print('Input file ' + input_file + ' does not exist') sys.exit(1) input_code = open(input_file, 'r').read() if input_code[len(input_code)-1] != '\n': input_code += '\n' yacc.parse(input_code, debug=log, tracking=True)
29.050778
172
0.5361
8acb8cd4dc2d6e35f38c30493bd708782f4c4cfd
3,400
py
Python
render_video.py
frostburn/branch-cut-mandelbrot
26c4d2db75a32b9190d40a09ebfb8a67fc4829e8
[ "MIT" ]
null
null
null
render_video.py
frostburn/branch-cut-mandelbrot
26c4d2db75a32b9190d40a09ebfb8a67fc4829e8
[ "MIT" ]
null
null
null
render_video.py
frostburn/branch-cut-mandelbrot
26c4d2db75a32b9190d40a09ebfb8a67fc4829e8
[ "MIT" ]
null
null
null
import argparse import imageio import progressbar from _routines import ffi, lib from pylab import * from random import Random RESOLUTIONS = { "2160p": (3840, 2160), "1440p": (2560, 1440), "1080p": (1920, 1080), "720p": (1280, 720), "480p": (854, 480), "360p": (640, 360), "240p": (426, 240), "160p": (284, 160), "80p": (142, 80), "40p": (71, 40), } if __name__ == '__main__': parser = argparse.ArgumentParser(description='Render audio samples') parser.add_argument('outfile', type=str, help='Output file name') parser.add_argument('--params', type=str, help='Parameter YAML file name') parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution') parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W') parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H') parser.add_argument('--framerate', type=int, help='Video frame rate') parser.add_argument('--video-quality', type=int, help='Video quality factor') parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds') args = parser.parse_args() if not args.framerate: args.framerate = 24 if not args.video_quality: args.video_quality = 10 writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1) # Compute derived parameters if args.resolution: width, height = RESOLUTIONS[args.resolution] if not args.width: args.width = width if not args.height: args.height = height if (not args.width) or (not args.height): raise ValueError("Invalid or missing resolution") if not args.video_duration: raise ValueError("Missing video duration") args.aspect = args.width / args.height args.num_frames = int(args.video_duration * args.framerate) args.dt = 1.0 / args.num_frames do_render(args, writer) writer.close()
34.693878
113
0.627059
8accb038864b63aa2e837e9fa4c1312771a520cd
1,238
py
Python
tests/mqtt/test_subscribe.py
smurfix/hbmqtt
914440cd18b43fbe56496a73bb1259132811c539
[ "MIT" ]
null
null
null
tests/mqtt/test_subscribe.py
smurfix/hbmqtt
914440cd18b43fbe56496a73bb1259132811c539
[ "MIT" ]
null
null
null
tests/mqtt/test_subscribe.py
smurfix/hbmqtt
914440cd18b43fbe56496a73bb1259132811c539
[ "MIT" ]
null
null
null
# Copyright (c) 2015 Nicolas JOUANIN # # See the file license.txt for copying permission. import anyio import unittest from hbmqtt.mqtt.subscribe import SubscribePacket, SubscribePayload from hbmqtt.mqtt.packet import PacketIdVariableHeader from hbmqtt.mqtt.constants import QOS_1, QOS_2 from hbmqtt.adapters import BufferAdapter
35.371429
83
0.671244
8ace9182901a299fe90834f06095914657f35b9c
8,392
py
Python
examples/cmrc2018_example/main.trainer.py
fangd123/TextBrewer
866f4363d9bd964f00aa60b0db5e9252a7905448
[ "Apache-2.0" ]
1,121
2020-03-02T02:24:00.000Z
2022-03-31T06:33:49.000Z
examples/cmrc2018_example/main.trainer.py
fangd123/TextBrewer
866f4363d9bd964f00aa60b0db5e9252a7905448
[ "Apache-2.0" ]
85
2020-03-04T09:46:17.000Z
2022-03-30T09:33:35.000Z
examples/cmrc2018_example/main.trainer.py
fangd123/TextBrewer
866f4363d9bd964f00aa60b0db5e9252a7905448
[ "Apache-2.0" ]
200
2020-03-02T07:23:21.000Z
2022-03-30T08:26:24.000Z
import logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%Y/%m/%d %H:%M:%S', level=logging.INFO, ) logger = logging.getLogger("Main") import os,random import numpy as np import torch from processing import convert_examples_to_features, read_squad_examples from processing import ChineseFullTokenizer from pytorch_pretrained_bert.my_modeling import BertConfig from optimization import BERTAdam import config from utils import read_and_convert, divide_parameters from modeling import BertForQASimple, BertForQASimpleAdaptorTraining from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer from torch.utils.data import TensorDataset, DataLoader, RandomSampler from functools import partial from train_eval import predict if __name__ == "__main__": main()
45.362162
130
0.674094
8ad1153bc4951b73c09bcd9a5a044f2aeefb38fb
13,832
py
Python
gym/gym/benchmarks/__init__.py
youngwoon/DnC-RL-Tensorflow
02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f
[ "MIT" ]
9
2019-02-01T22:45:57.000Z
2022-01-08T16:13:24.000Z
gym/gym/benchmarks/__init__.py
youngwoon/DnC-RL-Tensorflow
02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f
[ "MIT" ]
null
null
null
gym/gym/benchmarks/__init__.py
youngwoon/DnC-RL-Tensorflow
02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f
[ "MIT" ]
1
2020-04-07T20:09:48.000Z
2020-04-07T20:09:48.000Z
# EXPERIMENTAL: all may be removed soon from gym.benchmarks import scoring from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere register_benchmark( id='Atari200M', scorer=scoring.TotalReward(), name='Atari200M', view_group="Atari", description='7 Atari games, with pixel observations', tasks=[ { 'env_id': 'BeamRiderNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 363.9, 'reward_ceiling': 60000.0, }, { 'env_id': 'BreakoutNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 1.7, 'reward_ceiling': 800.0, }, { 'env_id': 'EnduroNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 0.0, 'reward_ceiling': 5000.0, }, { 'env_id': 'PongNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': -20.7, 'reward_ceiling': 21.0, }, { 'env_id': 'QbertNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 163.9, 'reward_ceiling': 40000.0, }, { 'env_id': 'SeaquestNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 68.4, 'reward_ceiling': 100000.0, }, { 'env_id': 'SpaceInvadersNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(2e8), 'reward_floor': 148.0, 'reward_ceiling': 30000.0, }, ]) register_benchmark( id='Atari40M', scorer=scoring.TotalReward(), name='Atari40M', view_group="Atari", description='7 Atari games, with pixel observations', tasks=[ { 'env_id': 'BeamRiderNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 363.9, 'reward_ceiling': 60000.0, }, { 'env_id': 'BreakoutNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 1.7, 'reward_ceiling': 800.0, }, { 'env_id': 'EnduroNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 0.0, 'reward_ceiling': 5000.0, }, { 'env_id': 'PongNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': -20.7, 'reward_ceiling': 21.0, }, { 'env_id': 'QbertNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 163.9, 'reward_ceiling': 40000.0, }, { 'env_id': 'SeaquestNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 68.4, 'reward_ceiling': 100000.0, }, { 'env_id': 'SpaceInvadersNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 148.0, 'reward_ceiling': 30000.0, } ]) register_benchmark( id='AtariExploration40M', scorer=scoring.TotalReward(), name='AtariExploration40M', view_group="Atari", description='7 Atari games, with pixel observations', tasks=[ { 'env_id': 'FreewayNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 0.1, 'reward_ceiling': 31.0, }, { 'env_id': 'GravitarNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 245.5, 'reward_ceiling': 1000.0, }, { 'env_id': 'MontezumaRevengeNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 25.0, 'reward_ceiling': 10000.0, }, { 'env_id': 'PitfallNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': -348.8, 'reward_ceiling': 1000.0, }, { 'env_id': 'PrivateEyeNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 662.8, 'reward_ceiling': 100.0, }, { 'env_id': 'SolarisNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 2047.2, 'reward_ceiling': 5000.0, }, { 'env_id': 'VentureNoFrameskip-v4', 'trials': 2, 'max_timesteps': int(4e7), 'reward_floor': 18.0, 'reward_ceiling': 100.0, } ]) register_benchmark( id='ClassicControl2-v0', name='ClassicControl2', view_group="Control", description='Simple classic control benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'CartPole-v0', 'trials': 1, 'max_timesteps': 2000, }, {'env_id': 'Pendulum-v0', 'trials': 1, 'max_timesteps': 1000, }, ]) register_benchmark( id='ClassicControl-v0', name='ClassicControl', view_group="Control", description='Simple classic control benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'CartPole-v1', 'trials': 3, 'max_timesteps': 100000, 'reward_floor': 0.0, 'reward_ceiling': 500.0, }, {'env_id': 'Acrobot-v1', 'trials': 3, 'max_timesteps': 100000, 'reward_floor': -500.0, 'reward_ceiling': 0.0, }, {'env_id': 'MountainCar-v0', 'trials': 3, 'max_timesteps': 100000, 'reward_floor': -200.0, 'reward_ceiling': -100.0, }, {'env_id': 'Pendulum-v0', 'trials': 3, 'max_timesteps': 200000, 'reward_floor': -1400.0, 'reward_ceiling': 0.0, }, ]) ### Autogenerated by tinkerbell.benchmark.convert_benchmark.py register_benchmark( id='Mujoco10M-v0', name='Mujoco10M', view_group="Control", description='Mujoco benchmark with 10M steps', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'Ant-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'Hopper-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'Humanoid-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'HumanoidStandup-v1', 'trials': 1, 'max_timesteps': 1000000, }, {'env_id': 'Walker2d-v1', 'trials': 1, 'max_timesteps': 1000000, } ]) register_benchmark( id='Mujoco1M-v0', name='Mujoco1M', view_group="Control", description='Mujoco benchmark with 1M steps', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'HalfCheetah-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': -280.0, 'reward_ceiling': 4000.0, }, {'env_id': 'Hopper-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 16.0, 'reward_ceiling': 4000.0, }, {'env_id': 'InvertedDoublePendulum-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 53.0, 'reward_ceiling': 10000.0, }, {'env_id': 'InvertedPendulum-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 5.6, 'reward_ceiling': 1000.0, }, {'env_id': 'Reacher-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': -43.0, 'reward_ceiling': -0.5, }, {'env_id': 'Swimmer-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 0.23, 'reward_ceiling': 500.0, }, {'env_id': 'Walker2d-v1', 'trials': 3, 'max_timesteps': 1000000, 'reward_floor': 1.6, 'reward_ceiling': 5500.0, } ]) register_benchmark( id='MinecraftEasy-v0', name='MinecraftEasy', view_group="Minecraft", description='Minecraft easy benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftBasic-v0', 'trials': 2, 'max_timesteps': 600000, 'reward_floor': -2200.0, 'reward_ceiling': 1000.0, }, {'env_id': 'MinecraftDefaultFlat1-v0', 'trials': 2, 'max_timesteps': 2000000, 'reward_floor': -500.0, 'reward_ceiling': 0.0, }, {'env_id': 'MinecraftTrickyArena1-v0', 'trials': 2, 'max_timesteps': 300000, 'reward_floor': -1000.0, 'reward_ceiling': 2800.0, }, {'env_id': 'MinecraftEating1-v0', 'trials': 2, 'max_timesteps': 300000, 'reward_floor': -300.0, 'reward_ceiling': 300.0, }, ]) register_benchmark( id='MinecraftMedium-v0', name='MinecraftMedium', view_group="Minecraft", description='Minecraft medium benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftCliffWalking1-v0', 'trials': 2, 'max_timesteps': 400000, 'reward_floor': -100.0, 'reward_ceiling': 100.0, }, {'env_id': 'MinecraftVertical-v0', 'trials': 2, 'max_timesteps': 900000, 'reward_floor': -1000.0, 'reward_ceiling': 8040.0, }, {'env_id': 'MinecraftMaze1-v0', 'trials': 2, 'max_timesteps': 600000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, {'env_id': 'MinecraftMaze2-v0', 'trials': 2, 'max_timesteps': 2000000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, ]) register_benchmark( id='MinecraftHard-v0', name='MinecraftHard', view_group="Minecraft", description='Minecraft hard benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftObstacles-v0', 'trials': 1, 'max_timesteps': 900000, 'reward_floor': -1000.0, 'reward_ceiling': 2080.0, }, {'env_id': 'MinecraftSimpleRoomMaze-v0', 'trials': 1, 'max_timesteps': 900000, 'reward_floor': -1000.0, 'reward_ceiling': 4160.0, }, {'env_id': 'MinecraftAttic-v0', 'trials': 1, 'max_timesteps': 600000, 'reward_floor': -1000.0, 'reward_ceiling': 1040.0, }, {'env_id': 'MinecraftComplexityUsage-v0', 'trials': 1, 'max_timesteps': 600000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, ]) register_benchmark( id='MinecraftVeryHard-v0', name='MinecraftVeryHard', view_group="Minecraft", description='Minecraft very hard benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftMedium-v0', 'trials': 2, 'max_timesteps': 1800000, 'reward_floor': -10000.0, 'reward_ceiling': 16280.0, }, {'env_id': 'MinecraftHard-v0', 'trials': 2, 'max_timesteps': 2400000, 'reward_floor': -10000.0, 'reward_ceiling': 32640.0, }, ]) register_benchmark( id='MinecraftImpossible-v0', name='MinecraftImpossible', view_group="Minecraft", description='Minecraft impossible benchmark', scorer=scoring.ClipTo01ThenAverage(), tasks=[ {'env_id': 'MinecraftDefaultWorld1-v0', 'trials': 2, 'max_timesteps': 6000000, 'reward_floor': -1000.0, 'reward_ceiling': 1000.0, }, ]) bandit_tasks = [] for n_arms in [5, 10, 50]: for n_episodes in [10, 100, 500]: bandit_tasks.append({ 'env_id': 'BernoulliBandit-{k}.arms-{n}.episodes-v0'.format(k=n_arms, n=n_episodes), 'trials': 1, 'max_timesteps': 10 ** 9, 'reward_floor': 0, 'reward_ceiling': n_episodes, }) register_benchmark( id='BernoulliBandit-v0', name='BernoulliBandit', description='Multi-armed Bernoulli bandits', scorer=scoring.ClipTo01ThenAverage(num_episodes=1000), tasks=bandit_tasks ) tabular_mdp_tasks = [] for n_states in [10]: for n_actions in [5]: for episode_length in [10]: for n_episodes in [10, 25, 50, 75, 100]: tabular_mdp_tasks.append({ 'env_id': 'RandomTabularMDP-{s}.states-{a}.actions-{t}.timesteps-{n}.episodes-v0'.format( s=n_states, a=n_actions, t=episode_length, n=n_episodes, ), 'trials': 1, 'max_timesteps': 10 ** 9, 'reward_floor': 0, 'reward_ceiling': episode_length * n_episodes * 2, }) register_benchmark( id='RandomTabularMDP-v0', name='RandomTabularMDP', description='Random tabular MDPs', scorer=scoring.ClipTo01ThenAverage(num_episodes=1000), tasks=tabular_mdp_tasks )
28.286299
135
0.510049
8ad19946c7489c1b3a99e589e195e1b73244786f
9,538
py
Python
hypnettorch/data/timeseries/preprocess_audioset.py
pennfranc/hypnettorch
69d4c455028289ebe3d040af0955d909a9fef3ae
[ "Apache-2.0" ]
31
2021-10-20T19:38:41.000Z
2022-03-28T08:23:32.000Z
hypnettorch/data/timeseries/preprocess_audioset.py
pennfranc/hypnettorch
69d4c455028289ebe3d040af0955d909a9fef3ae
[ "Apache-2.0" ]
2
2022-02-14T08:25:43.000Z
2022-03-26T18:10:52.000Z
hypnettorch/data/timeseries/preprocess_audioset.py
pennfranc/hypnettorch
69d4c455028289ebe3d040af0955d909a9fef3ae
[ "Apache-2.0" ]
5
2021-11-04T10:10:29.000Z
2022-03-21T09:00:22.000Z
#!/usr/bin/env python3 # Copyright 2020 Benjamin Ehret # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # title :data/timeseries/preprocess_audioset.py # author :be # contact :[email protected] # created :31/03/2020 # version :1.0 # python_version :3.7 """ Script to structure the audioset dataset, which can then be used via :class:`data.timeseries.audioset_data.AudiosetData`. The result of this script is available at https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0 If you want to recreate or modify this dataset, download the audioset data from https://research.google.com/audioset/download.html and extract the tar.gz into the following folder: ``datasets/sequential/audioset/audioset_download``. Subsequently executing this script will create a pickle file containing the 100 class subset of audioset used in this study. The dataset is stored in tensorflow files. Since we work with pytorch and there is no utility to read tensorflow files, we extract the data and safe them as numpy arrays in a pickle file. Furthermore the data are preprocessed to fit our continual learning experiments. The original dataset provides three subsets with different compositions of samples and classes. Since we only work with a subset of classes and samples, we load all available data and then filter and structure them according to our criteria. We use the same criteria as Kemker et al. Classes and samples are restricted in the following way: Classes: - no restriction according to ontology file (parsed from ontology.json) - no parent / child relationship (parsed from ontology.json) - confidence level > 70% (data was copied from website into txt file) - number of samples: we only take classes that have more samples than a certain threshold Samples: - since samples can have multiple labels, we only use samples which only belong to one of the classes we use - we exclude samples that don't have the full length of 10 seconds The chosen classes and samples are then split into train and test data and saved to a pickle file. """ import numpy as np import pickle import tensorflow as tf import os import json from warnings import warn warn('The script was created for one time usage and has to be adapted when ' + 'reusing it. All paths specified here are absolute.') # Tensorflow eager mode needs to be enabled for dataset mapping to work! tf.enable_eager_execution() # Set paths and parameters data_dir = '../../datasets/sequential/audioset/' download_dir = os.path.join(data_dir,'audioset_download') fpath_conf_data = os.path.join(data_dir, 'confidence_data.csv') fpath_label_inds = os.path.join(data_dir, 'class_labels_indices.csv') fpath_ontology = os.path.join(data_dir, 'ontology.json') target_path = os.path.join(data_dir, 'audioset_data_balanced.pickle') n_classes = 100 n_sample = 1000 test_frac = 0.20 ### Load data by serializing files and applying decode function. def decode(serialized_example): """Decode data from TFRecord files. Args: serialized_example: serialized_example as created by tf.data.TFRecordDataset Returns: (tuple): Tuple containing: - **audio** (numpy.ndarray): Array of shape (10,128) representing one sample with 10 timesteps and 128 features - **label** (numpy.ndarray): Array of shape (1,) containing the class of the corresponding sample """ sequence_features = { 'audio_embedding': tf.FixedLenSequenceFeature([], tf.string), } context_features = { 'start_time_seconds': tf.FixedLenFeature([], tf.float32), 'labels': tf.VarLenFeature(dtype=tf.int64), } context_parsed, sequence_parsed = tf.parse_single_sequence_example( serialized_example, sequence_features=sequence_features, context_features=context_features ) audio = tf.decode_raw(sequence_parsed['audio_embedding'], tf.uint8) label = tf.cast(context_parsed['labels'], tf.int64) return audio, label # Apply decode function to all dataset entries using map function. # Take files from all three data sets since we repartition anyway. fpaths = [] for path, subdirs, files in os.walk(download_dir): for name in files: if 'tfrecord' in name: fpaths.append(os.path.join(path, name)) # Create dataset and decode dataset = tf.data.TFRecordDataset(fpaths) dataset = dataset.map(decode) # Extract data to lists x = [] y = [] for d in dataset: x.append(d[0].numpy()) y.append(tf.sparse.to_dense(tf.sparse.reorder(d[1])).numpy()) ### Filter classes as described above. # Parse confidence values conf_data = {} with open(fpath_conf_data) as f: for line in f: tokens = line.split() # parse confidence c = 0 for t in tokens: if t.find('%') is not -1: c = int(t[:-1]) # parse class name n = '' for t in tokens: if t.find('%') == -1 and t != '-': if n == '': n = t else: n = n+' '+t else: break conf_data.update({n:c}) # Parse class numbers from label csv file l = -1 csv_data = {} with open(fpath_label_inds) as f: for line in f: if l == -1: l += 1 continue tokens = line.split('"') n = tokens[1] csv_data.update({n:l}) l +=1 # Parse ontology info from json file with open(fpath_ontology, 'r') as f: json_data = json.load(f) # Put all data into a single list. all_data = [] for j in json_data: if j['name'] in conf_data.keys(): class_info = { 'name' : j['name'], 'restricted' : j['restrictions'] != [], 'has_child' : j['child_ids'] != [], 'conf' : conf_data[j['name']], 'id' : csv_data[j['name']] } all_data.append(class_info) # Filter classes classes = [] for c in all_data: if not c['restricted'] and not c['has_child'] and c['conf'] >= 70: classes.append(c['id']) ### Filter the samples. # Find samples that belong to only one of the potential classes. # We also exclude some samples that don't have data for the full 10 seconds. # First discard labels that are not in the set of potential classes y_fil = [] for i in range(len(y)): y_fil.append( np.intersect1d(y[i],classes)) # Find samples with one label n_labels = np.asarray([len(y) for y in y_fil]) single_label_idx = np.where(n_labels == 1)[0] # Find samples that are shorter than 10 seconds (to be excluded) too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0] # Construct the set of valid samples valid_idx = np.setdiff1d(single_label_idx,too_short) # Count number of valid samples for potential classes y_single = np.asarray([y_fil[i][0] for i in valid_idx]) num_samples = [len(np.where(y_single == i)[0]) for i in classes] # Take the n classes with the highest number of samples n_sample_cutoff = np.sort(num_samples)[-n_classes] class_idx = np.where(np.asarray(num_samples) >= n_sample_cutoff)[0] our_classes = [classes[i] for i in class_idx] ### Filter the data again according the the chosen classes y_fil = [] for i in range(len(y)): y_fil.append( np.intersect1d(y[i],our_classes)) # Find samples that belong to only one of the potential classes n_labels = np.asarray([len(y) for y in y_fil]) single_label_idx = np.where(n_labels == 1)[0] # Find samples that dont are shorter than 10 seconds too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0] # Construct the set of valid samples valid_idx = np.setdiff1d(single_label_idx,too_short) # Restructure data and relabel the classes to be between 0 and n_classes y_data = [y_fil[i][0] for i in valid_idx] y_data = [np.where(np.asarray(our_classes) == i)[0][0] for i in y_data] y_data = np.asarray(y_data) x_data = [x[i] for i in valid_idx] x_data = np.stack(x_data) ### Split into test and train and restrict the number of samples per class np.random.seed(42) n_train = int(n_sample * (1-test_frac)) n_test = int(n_sample * test_frac) train_ind = [] test_ind = [] for i in range(n_classes): sample_idx = np.where(y_data == i)[0] n_sample_class = len(sample_idx) rand_idx = np.arange(n_sample_class) np.random.shuffle(rand_idx) train_ind.extend(sample_idx[rand_idx[0:n_train]]) test_ind.extend(sample_idx[rand_idx[n_train:n_sample]]) train_ind = np.asarray(train_ind) test_ind = np.asarray(test_ind) sub_sample_idx = np.hstack((train_ind,test_ind)) x_data_sub = x_data[sub_sample_idx,:,:] y_data_sub = y_data[sub_sample_idx] train_ind = np.arange(0,len(train_ind)) test_ind = np.arange(len(train_ind),len(train_ind)+len(test_ind)) ### Save data with open(target_path, 'wb') as f: pickle.dump([x_data_sub, y_data_sub, train_ind, test_ind], f)
32.889655
80
0.68463
8ad1bc3d3021f0317b2b318ccf03355bd2585dd4
13,844
py
Python
Posts/viewsAPI.py
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
63c0ba2a03f0b462e3673ce7a4bf6bae7999440c
[ "Apache-2.0" ]
3
2021-12-11T13:43:56.000Z
2022-03-31T02:36:05.000Z
Posts/viewsAPI.py
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
63c0ba2a03f0b462e3673ce7a4bf6bae7999440c
[ "Apache-2.0" ]
9
2021-10-01T22:46:57.000Z
2021-12-16T18:01:31.000Z
Posts/viewsAPI.py
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
63c0ba2a03f0b462e3673ce7a4bf6bae7999440c
[ "Apache-2.0" ]
2
2021-12-16T16:37:10.000Z
2021-12-16T20:30:12.000Z
from django.conf import settings from django.core import serializers from django.utils import timezone import requests from Posts.commentModel import Comments #from Posts.commentView import add_Comment from rest_framework import status from rest_framework.decorators import api_view, authentication_classes, permission_classes from rest_framework.response import Response from django.shortcuts import HttpResponse, render from requests import get from .serializers import CommentSerializer, PostSerializer from Author.serializers import LikeSerializer from Author.models import Like from Author.views import updateForeignAuthors, GetForeignAuthors from .models import Post, Author from .form import PostForm from Posts.commentForm import CommentForm import json import uuid import re import base64 from django.db.models import Q import django.core from permissions import CustomAuthentication, AccessPermission from django.core.paginator import Paginator import traceback
38.455556
306
0.621063
8ad1ee45a7daa21c8e394ff77552f61ad841514d
3,753
py
Python
workers/tests/test_array_element.py
Open-EO/openeo-sentinelhub-python-driver
92f990f098065ffb658eba6dca291dd1d5fc70f2
[ "Apache-2.0" ]
2
2019-12-03T12:49:47.000Z
2020-10-25T20:14:39.000Z
workers/tests/test_array_element.py
Open-EO/openeo-sentinelhub-python-driver
92f990f098065ffb658eba6dca291dd1d5fc70f2
[ "Apache-2.0" ]
5
2019-12-03T10:32:48.000Z
2020-10-09T13:07:39.000Z
workers/tests/test_array_element.py
Open-EO/openeo-sentinelhub-python-driver
92f990f098065ffb658eba6dca291dd1d5fc70f2
[ "Apache-2.0" ]
4
2020-03-06T14:51:52.000Z
2020-11-24T10:30:18.000Z
import pytest import sys, os import xarray as xr import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import process from process._common import ProcessArgumentInvalid, ProcessArgumentRequired ################################### # tests: ################################### def test_with_xarray_out_bounds(execute_array_element_process, generate_data): """ Test array_element process with xarray.DataArrays with out of bounds index """ with pytest.raises(ProcessArgumentInvalid) as ex: result = execute_array_element_process(index=5) assert ex.value.args[0] == "The argument 'index' in process 'array_element' is invalid: Index out of bounds."
40.354839
145
0.662137
8ad221c93a5fce8d825d0b6b80fc2f401b373d9b
7,627
py
Python
gn/gn_to_bp.py
despairblue/esy-skia
1c81aac298602f8e872c1079db92868199b6394f
[ "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
gn/gn_to_bp.py
despairblue/esy-skia
1c81aac298602f8e872c1079db92868199b6394f
[ "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
gn/gn_to_bp.py
despairblue/esy-skia
1c81aac298602f8e872c1079db92868199b6394f
[ "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
#!/usr/bin/env python # # Copyright 2016 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Generate Android.bp for Skia from GN configuration. import json import os import pprint import string import subprocess import tempfile import gn_to_bp_utils # First we start off with a template for Android.bp, # with holes for source lists and include directories. bp = string.Template('''// This file is autogenerated by gn_to_bp.py. cc_library_static { name: "libskia", cflags: [ $cflags ], cppflags:[ $cflags_cc ], export_include_dirs: [ $export_includes ], local_include_dirs: [ $local_includes ], srcs: [ $srcs ], arch: { arm: { srcs: [ $arm_srcs ], neon: { srcs: [ $arm_neon_srcs ], }, }, arm64: { srcs: [ $arm64_srcs ], }, mips: { srcs: [ $none_srcs ], }, mips64: { srcs: [ $none_srcs ], }, x86: { srcs: [ $x86_srcs ], cflags: [ // Clang seems to think new/malloc will only be 4-byte aligned // on x86 Android. We're pretty sure it's actually 8-byte // alignment. tests/OverAlignedTest.cpp has more information, // and should fail if we're wrong. "-Wno-over-aligned" ], }, x86_64: { srcs: [ $x86_srcs ], }, }, defaults: ["skia_deps", "skia_pgo", ], } // Build libskia with PGO by default. // Location of PGO profile data is defined in build/soong/cc/pgo.go // and is separate from skia. // To turn it off, set ANDROID_PGO_NO_PROFILE_USE environment variable // or set enable_profile_use property to false. cc_defaults { name: "skia_pgo", pgo: { instrumentation: true, profile_file: "hwui/hwui.profdata", benchmarks: ["hwui", "skia"], enable_profile_use: true, }, } // "defaults" property to disable profile use for Skia tools and benchmarks. cc_defaults { name: "skia_pgo_no_profile_use", defaults: [ "skia_pgo", ], pgo: { enable_profile_use: false, }, } cc_defaults { name: "skia_deps", shared_libs: [ "libEGL", "libGLESv2", "libdng_sdk", "libexpat", "libft2", "libheif", "libicui18n", "libicuuc", "libjpeg", "liblog", "libpiex", "libpng", "libvulkan", "libz", "libcutils", "libnativewindow", ], static_libs: [ "libarect", "libsfntly", "libwebp-decode", "libwebp-encode", ], group_static_libs: true, } cc_defaults { name: "skia_tool_deps", defaults: [ "skia_deps", "skia_pgo_no_profile_use" ], static_libs: [ "libjsoncpp", "libskia", ], cflags: [ "-Wno-unused-parameter", "-Wno-unused-variable", ], } cc_test { name: "skia_dm", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $dm_includes ], srcs: [ $dm_srcs ], shared_libs: [ "libbinder", "libutils", ], } cc_test { name: "skia_nanobench", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $nanobench_includes ], srcs: [ $nanobench_srcs ], data: [ "resources/*", ], }''') # We'll run GN to get the main source lists and include directories for Skia. gn_args = { 'is_official_build': 'true', 'skia_enable_tools': 'true', 'skia_enable_skottie': 'false', # requires rapidjson third-party 'skia_use_libheif': 'true', 'skia_use_vulkan': 'true', 'target_cpu': '"none"', 'target_os': '"android"', 'skia_vulkan_header': '"Skia_Vulkan_Android.h"', } js = gn_to_bp_utils.GenerateJSONFromGN(gn_args) srcs = strip_slashes(js['targets']['//:skia']['sources']) cflags = strip_slashes(js['targets']['//:skia']['cflags']) cflags_cc = strip_slashes(js['targets']['//:skia']['cflags_cc']) local_includes = strip_slashes(js['targets']['//:skia']['include_dirs']) export_includes = strip_slashes(js['targets']['//:public']['include_dirs']) defines = [str(d) for d in js['targets']['//:skia']['defines']] dm_srcs = strip_slashes(js['targets']['//:dm']['sources']) dm_includes = strip_slashes(js['targets']['//:dm']['include_dirs']) nanobench_target = js['targets']['//:nanobench'] nanobench_srcs = strip_slashes(nanobench_target['sources']) nanobench_includes = strip_slashes(nanobench_target['include_dirs']) gn_to_bp_utils.GrabDependentValues(js, '//:skia', 'sources', srcs, None) gn_to_bp_utils.GrabDependentValues(js, '//:dm', 'sources', dm_srcs, 'skia') gn_to_bp_utils.GrabDependentValues(js, '//:nanobench', 'sources', nanobench_srcs, 'skia') # skcms is a little special, kind of a second-party library. srcs .add("third_party/skcms/skcms.c") local_includes.add("third_party/skcms") dm_includes .add("third_party/skcms") # No need to list headers. srcs = {s for s in srcs if not s.endswith('.h')} dm_srcs = {s for s in dm_srcs if not s.endswith('.h')} nanobench_srcs = {s for s in nanobench_srcs if not s.endswith('.h')} cflags = gn_to_bp_utils.CleanupCFlags(cflags) cflags_cc = gn_to_bp_utils.CleanupCCFlags(cflags_cc) # We need to add the include path to the vulkan defines and header file set in # then skia_vulkan_header gn arg that is used for framework builds. local_includes.add("platform_tools/android/vulkan") export_includes.add("platform_tools/android/vulkan") here = os.path.dirname(__file__) defs = gn_to_bp_utils.GetArchSources(os.path.join(here, 'opts.gni')) gn_to_bp_utils.WriteUserConfig('include/config/SkUserConfig.h', defines) # Turn a list of strings into the style bpfmt outputs. # OK! We have everything to fill in Android.bp... with open('Android.bp', 'w') as f: print >>f, bp.substitute({ 'export_includes': bpfmt(8, export_includes), 'local_includes': bpfmt(8, local_includes), 'srcs': bpfmt(8, srcs), 'cflags': bpfmt(8, cflags, False), 'cflags_cc': bpfmt(8, cflags_cc), 'arm_srcs': bpfmt(16, defs['armv7']), 'arm_neon_srcs': bpfmt(20, defs['neon']), 'arm64_srcs': bpfmt(16, defs['arm64'] + defs['crc32']), 'none_srcs': bpfmt(16, defs['none']), 'x86_srcs': bpfmt(16, defs['sse2'] + defs['ssse3'] + defs['sse41'] + defs['sse42'] + defs['avx' ] + defs['hsw' ]), 'dm_includes' : bpfmt(8, dm_includes), 'dm_srcs' : bpfmt(8, dm_srcs), 'nanobench_includes' : bpfmt(8, nanobench_includes), 'nanobench_srcs' : bpfmt(8, nanobench_srcs), })
25.006557
78
0.556051
8ad263d1cb0c4c04603f5f92c314ea18d8d73526
1,681
py
Python
python/ray/autoscaler/tags.py
firebolt55439/ray
215300b070628c06f0106906fc6c03bd70ebf140
[ "Apache-2.0" ]
21,382
2016-09-26T23:12:52.000Z
2022-03-31T21:47:45.000Z
python/ray/autoscaler/tags.py
firebolt55439/ray
215300b070628c06f0106906fc6c03bd70ebf140
[ "Apache-2.0" ]
19,689
2016-09-17T08:21:25.000Z
2022-03-31T23:59:30.000Z
python/ray/autoscaler/tags.py
firebolt55439/ray
215300b070628c06f0106906fc6c03bd70ebf140
[ "Apache-2.0" ]
4,114
2016-09-23T18:54:01.000Z
2022-03-31T15:07:32.000Z
"""The Ray autoscaler uses tags/labels to associate metadata with instances.""" # Tag for the name of the node TAG_RAY_NODE_NAME = "ray-node-name" # Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag # value says 'type' instead of 'kind'. TAG_RAY_NODE_KIND = "ray-node-type" NODE_KIND_HEAD = "head" NODE_KIND_WORKER = "worker" NODE_KIND_UNMANAGED = "unmanaged" # Tag for user defined node types (e.g., m4xl_spot). This is used for multi # node type clusters. TAG_RAY_USER_NODE_TYPE = "ray-user-node-type" # Tag for autofilled node types for legacy cluster yamls without multi # node type defined in the cluster configs. NODE_TYPE_LEGACY_HEAD = "ray-legacy-head-node-type" NODE_TYPE_LEGACY_WORKER = "ray-legacy-worker-node-type" # Tag that reports the current state of the node (e.g. Updating, Up-to-date) TAG_RAY_NODE_STATUS = "ray-node-status" STATUS_UNINITIALIZED = "uninitialized" STATUS_WAITING_FOR_SSH = "waiting-for-ssh" STATUS_SYNCING_FILES = "syncing-files" STATUS_SETTING_UP = "setting-up" STATUS_UPDATE_FAILED = "update-failed" STATUS_UP_TO_DATE = "up-to-date" # Tag uniquely identifying all nodes of a cluster TAG_RAY_CLUSTER_NAME = "ray-cluster-name" # Hash of the node launch config, used to identify out-of-date nodes TAG_RAY_LAUNCH_CONFIG = "ray-launch-config" # Hash of the node runtime config, used to determine if updates are needed TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config" # Hash of the contents of the directories specified by the file_mounts config # if the node is a worker, this also hashes content of the directories # specified by the cluster_synced_files config TAG_RAY_FILE_MOUNTS_CONTENTS = "ray-file-mounts-contents"
40.02381
79
0.781678
8ad27d34811f9ef90b1af846c18b262998179e76
1,523
py
Python
tests/generation_test.py
stefan-feltmann/lands
b2f1fc3aab4895763160a135d085a17dceb5f58e
[ "MIT" ]
null
null
null
tests/generation_test.py
stefan-feltmann/lands
b2f1fc3aab4895763160a135d085a17dceb5f58e
[ "MIT" ]
null
null
null
tests/generation_test.py
stefan-feltmann/lands
b2f1fc3aab4895763160a135d085a17dceb5f58e
[ "MIT" ]
null
null
null
import unittest from worldengine.plates import Step, center_land, world_gen from worldengine.world import World from tests.draw_test import TestBase if __name__ == '__main__': unittest.main()
35.418605
80
0.690085
8ad69b4670dd35b6830ae32d5cbb71d9e32dff45
1,427
py
Python
tests/test_models/test_components/test_discriminators/test_light_cnn.py
ChenShuwei1001/mmediting
285e629fe9da8a13c7538a6bb3347e8870cd7201
[ "Apache-2.0" ]
null
null
null
tests/test_models/test_components/test_discriminators/test_light_cnn.py
ChenShuwei1001/mmediting
285e629fe9da8a13c7538a6bb3347e8870cd7201
[ "Apache-2.0" ]
1
2021-08-05T16:20:39.000Z
2021-08-05T16:20:39.000Z
tests/test_models/test_components/test_discriminators/test_light_cnn.py
ChenShuwei1001/mmediting
285e629fe9da8a13c7538a6bb3347e8870cd7201
[ "Apache-2.0" ]
null
null
null
import pytest import torch from mmedit.models.builder import build_component from mmedit.models.components.discriminators.light_cnn import MaxFeature
27.980392
72
0.613174
8ad728c2bc84ac4630b400804d13c8940597431e
4,727
py
Python
src/consumer.py
ssichynskyi/web_metrics_posting
26f104d2fdf31c2d029bac5a4d5337db42df86f5
[ "MIT" ]
null
null
null
src/consumer.py
ssichynskyi/web_metrics_posting
26f104d2fdf31c2d029bac5a4d5337db42df86f5
[ "MIT" ]
null
null
null
src/consumer.py
ssichynskyi/web_metrics_posting
26f104d2fdf31c2d029bac5a4d5337db42df86f5
[ "MIT" ]
null
null
null
import json import logging from typing import Iterable from kafka import KafkaConsumer log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) # I've used this example: # https://github.com/aiven/aiven-examples/blob/master/kafka/python/consumer_example.py # as well as Aiven Kafka tutorials
36.643411
97
0.610324
76d15f9d93efb01c92547e696339173cf885a335
18,576
py
Python
pp2_model.py
BetterManlinfeng/hyperclasspptwo
053e9cf8445911e285ac723bdfbceb1cb384ed2e
[ "Apache-2.0" ]
null
null
null
pp2_model.py
BetterManlinfeng/hyperclasspptwo
053e9cf8445911e285ac723bdfbceb1cb384ed2e
[ "Apache-2.0" ]
null
null
null
pp2_model.py
BetterManlinfeng/hyperclasspptwo
053e9cf8445911e285ac723bdfbceb1cb384ed2e
[ "Apache-2.0" ]
null
null
null
from tensorflow.keras import * import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, Sequential,regularizers from tensorflow.keras.layers import Dropout # from tensorflow.keras import * # 3x3kernel_initializer='he_normal','glorot_normal' from tensorflow.python.keras.layers import Concatenate ############################### ############################### ############################### ############################### ###################################### def build_resblock(self, filter_num, blocks, stride=1): res_blocks = Sequential() # may down sample res_blocks.add(BasicBlock(filter_num, stride)) for _ in range(1, blocks): res_blocks.add(BasicBlock(filter_num, stride=1)) return res_blocks ###################################### ########################### pp2 ######################################## def network_up(input_layer_up,filters_num,dropout_rate,Block_res): # input_layer = Input(input_shape) # conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 7), padding='same')(input_layer) # filters_num = 8 # conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3),padding='same',kernel_initializer='he_normal',kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # filters_num = 8 conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3), padding='same', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) #kernel_initializer='he_normal', # conv_layer1m = tf.keras.layers.MaxPooling3D(pool_size=(1, 1, 1),padding='same')(conv1) # conv_layer1g = tf.keras.layers.GlobalMaxPooling3D()(conv1) conv1_bn = layers.BatchNormalization()(conv1) conv1_relu = layers.Activation('relu')(conv1_bn) # conv1_relu = Dropout(0.5)(conv1_relu) # conv1_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv1_relu) # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv1_relu) # filters_num = 16 conv2_bn = layers.BatchNormalization()(conv2) conv2_relu = layers.Activation('relu')(conv2_bn) # conv2_relu = Dropout(0.5)(conv2_relu) # conv2_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv2_relu) conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv2_relu) # filters_num = 32 conv3_bn = layers.BatchNormalization()(conv3) conv3_relu = layers.Activation('relu')(conv3_bn) # conv3_relu = Dropout(0.5)(conv3_relu) # conv3_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv3_relu) conv3_relu_reshape = layers.Reshape((conv3_relu.shape[1],conv3_relu.shape[2],conv3_relu.shape[3]*conv3_relu.shape[4]))(conv3_relu) conv3_relu_reshape = Dropout(0.5)(conv3_relu_reshape) ########################################### # conv11 = layers.Conv3D(filters_num[0], kernel_size=(5, 5, 3), padding='same', # kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # conv11_bn = layers.BatchNormalization()(conv11) # conv11_relu = layers.Activation('relu')(conv11_bn) # # # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 # conv22 = layers.Conv3D(filters_num[1], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv11_relu) # filters_num = 16 # conv22_bn = layers.BatchNormalization()(conv22) # conv22_relu = layers.Activation('relu')(conv22_bn) # # conv33 = layers.Conv3D(filters_num[2], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv22_relu) # filters_num = 32 # conv33_bn = layers.BatchNormalization()(conv33) # conv33_relu = layers.Activation('relu')(conv33_bn) # # conv33_relu_reshape = layers.Reshape( # (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv33_relu) #################################################### # conv111 = layers.Conv3D(filters_num[0], kernel_size=(7, 7, 3), padding='same', # kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # conv111_bn = layers.BatchNormalization()(conv111) # conv111_relu = layers.Activation('relu')(conv111_bn) # # # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 # conv222 = layers.Conv3D(filters_num[1], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv111_relu) # filters_num = 16 # conv222_bn = layers.BatchNormalization()(conv222) # conv222_relu = layers.Activation('relu')(conv222_bn) # # conv333 = layers.Conv3D(filters_num[2], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv222_relu) # filters_num = 32 # conv333_bn = layers.BatchNormalization()(conv333) # conv333_relu = layers.Activation('relu')(conv333_bn) # # conv333_relu_reshape = layers.Reshape( # (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv333_relu) #################concatenate######################## # conv33333_relu_reshape = Concatenate(axis=-1)([conv3_relu_reshape, conv33_relu_reshape]) ######################################### conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv3_relu_reshape) # filters_num = 64 conv4_bn = layers.BatchNormalization()(conv4) conv4_relu = layers.Activation('relu')(conv4_bn) # conv4_relu = Dropout(0.5)(conv4_relu) # conv4_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv4_relu) # conv4_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv4_relu) conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv4_relu) # filters_num = ** conv5_bn = layers.BatchNormalization()(conv5) conv5_relu = layers.Activation('relu')(conv5_bn) # conv5_relu = Dropout(0.5)(conv5_relu) # conv5_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv5_relu) # conv5_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv5_relu) # conv5_dpout = layers.Dropout(dropout_rate)(conv5) # conv5_reshape = layers.Reshape((conv5_dpout.shape[1],conv5_dpout.shape[2],conv5_dpout.shape[3]))(conv5_dpout) outputs2,outputs4 = Block_res(conv5_relu) return conv5,outputs2,outputs4 # layer1 = build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64 # layer2 = build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128 # layer3 = build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256 # layer4 = build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
39.02521
191
0.596307
76d272f048a94a1ab146a49618ff07c96686a509
3,364
py
Python
medi/inference/utils.py
yuan-xy/medi
ffa416b73eb2fbffbae7a27b6eccc267b061ed0f
[ "MIT" ]
3
2020-05-27T08:42:26.000Z
2021-12-04T08:54:08.000Z
medi/inference/utils.py
yuan-xy/medi
ffa416b73eb2fbffbae7a27b6eccc267b061ed0f
[ "MIT" ]
null
null
null
medi/inference/utils.py
yuan-xy/medi
ffa416b73eb2fbffbae7a27b6eccc267b061ed0f
[ "MIT" ]
null
null
null
""" A universal module with functions / classes without dependencies. """ import sys import contextlib import functools import re import os from medi._compatibility import reraise _sep = os.path.sep if os.path.altsep is not None: _sep += os.path.altsep _path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) del _sep def unite(iterable): """Turns a two dimensional array into a one dimensional.""" return set(typ for types in iterable for typ in types) def reraise_uncaught(func): """ Re-throw uncaught `AttributeError`. Usage: Put ``@rethrow_uncaught`` in front of the function which does **not** suppose to raise `AttributeError`. AttributeError is easily get caught by `hasattr` and another ``except AttributeError`` clause. This becomes problem when you use a lot of "dynamic" attributes (e.g., using ``@property``) because you can't distinguish if the property does not exist for real or some code inside of the "dynamic" attribute through that error. In a well written code, such error should not exist but getting there is very difficult. This decorator is to help us getting there by changing `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. This helps us noticing bugs earlier and facilitates debugging. .. note:: Treating StopIteration here is easy. Add that feature when needed. """ return wrapper
29
82
0.671819
76d2dd0a16c26b25219d0d5220bf5e490de12769
1,627
py
Python
run.py
Bioconductor/bioc_git_transition
9ca29f9e8058b755163e12bf9324ec1063d0182d
[ "MIT" ]
16
2017-03-15T18:00:35.000Z
2018-07-30T14:44:53.000Z
run.py
Bioconductor/bioc_git_transition
9ca29f9e8058b755163e12bf9324ec1063d0182d
[ "MIT" ]
40
2017-03-29T20:04:25.000Z
2019-10-21T16:56:15.000Z
run.py
Bioconductor/bioc_git_transition
9ca29f9e8058b755163e12bf9324ec1063d0182d
[ "MIT" ]
4
2017-05-08T11:39:07.000Z
2017-08-17T14:18:03.000Z
"""Bioconductor run git transition code. This module assembles the classes for the SVN --> Git transition can be run in a sequential manner. It runs the following aspects fo the Bioconductor transition. Note: Update the SVN dump 1. Run Bioconductor Software package transition 2. Run Bioconductor Experiment Data package transition 3. Run Workflow package transition 4. Run Manifest file transition 5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on software packages Manual tasks which need to be done: 1. Copy over bare repos to repositories/packages 2. Copy manifest bare git repo to repositories/admin """ import src.run_transition as rt import src.svn_dump_update as sdu import logging import time logging.basicConfig(filename='transition.log', format='%(levelname)s %(asctime)s %(message)s', level=logging.DEBUG) if __name__ == '__main__': start_time = time.time() config_file = "./settings.ini" svn_dump_update(config_file) run(config_file) # TODO: Run updates after dump update svn_dump_update(config_file) rt.run_updates(config_file) logging.info("--- %s seconds ---" % (time.time() - start_time))
30.12963
69
0.754149
76d39eed393350171c588f61022e00d384bb01c9
53,515
py
Python
third_party/google-endpoints/dogpile/cache/region.py
tingshao/catapult
a8fe19e0c492472a8ed5710be9077e24cc517c5c
[ "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
third_party/google-endpoints/dogpile/cache/region.py
tingshao/catapult
a8fe19e0c492472a8ed5710be9077e24cc517c5c
[ "BSD-3-Clause" ]
4,640
2015-07-08T16:19:08.000Z
2019-12-02T15:01:27.000Z
third_party/google-endpoints/dogpile/cache/region.py
tingshao/catapult
a8fe19e0c492472a8ed5710be9077e24cc517c5c
[ "BSD-3-Clause" ]
698
2015-06-02T19:18:35.000Z
2022-03-29T16:57:15.000Z
from __future__ import with_statement from .. import Lock, NeedRegenerationException from ..util import NameRegistry from . import exception from ..util import PluginLoader, memoized_property, coerce_string_conf from .util import function_key_generator, function_multi_key_generator from .api import NO_VALUE, CachedValue from .proxy import ProxyBackend from ..util import compat import time import datetime from numbers import Number from functools import wraps import threading _backend_loader = PluginLoader("dogpile.cache") register_backend = _backend_loader.register from . import backends # noqa value_version = 1 """An integer placed in the :class:`.CachedValue` so that new versions of dogpile.cache can detect cached values from a previous, backwards-incompatible version. """ def _unexpired_value_fn(self, expiration_time, ignore_expiration): if ignore_expiration: return lambda value: value else: if expiration_time is None: expiration_time = self.expiration_time current_time = time.time() return value_fn def get_multi(self, keys, expiration_time=None, ignore_expiration=False): """Return multiple values from the cache, based on the given keys. Returns values as a list matching the keys given. E.g.:: values = region.get_multi(["one", "two", "three"]) To convert values to a dictionary, use ``zip()``:: keys = ["one", "two", "three"] values = region.get_multi(keys) dictionary = dict(zip(keys, values)) Keys which aren't present in the list are returned as the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionadded:: 0.5.0 """ if not keys: return [] if self.key_mangler: keys = list(map(lambda key: self.key_mangler(key), keys)) backend_values = self.backend.get_multi(keys) _unexpired_value_fn = self._unexpired_value_fn( expiration_time, ignore_expiration) return [ value.payload if value is not NO_VALUE else value for value in ( _unexpired_value_fn(value) for value in backend_values ) ] def get_or_create( self, key, creator, expiration_time=None, should_cache_fn=None): """Return a cached value based on the given key. If the value does not exist or is considered to be expired based on its creation time, the given creation function may or may not be used to recreate the value and persist the newly generated value in the cache. Whether or not the function is used depends on if the *dogpile lock* can be acquired or not. If it can't, it means a different thread or process is already running a creation function for this key against the cache. When the dogpile lock cannot be acquired, the method will block if no previous value is available, until the lock is released and a new value available. If a previous value is available, that value is returned immediately without blocking. If the :meth:`.invalidate` method has been called, and the retrieved value's timestamp is older than the invalidation timestamp, the value is unconditionally prevented from being returned. The method will attempt to acquire the dogpile lock to generate a new value, or will wait until the lock is released to return the new value. .. versionchanged:: 0.3.0 The value is unconditionally regenerated if the creation time is older than the last call to :meth:`.invalidate`. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param creator: function which creates a new value. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive the value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. E.g.:: def dont_cache_none(value): return value is not None value = region.get_or_create("some key", create_value, should_cache_fn=dont_cache_none) Above, the function returns the value of create_value() if the cache is invalid, however if the return value is None, it won't be cached. .. versionadded:: 0.4.3 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` - applies :meth:`.get_or_create` to any function using a decorator. :meth:`.CacheRegion.get_or_create_multi` - multiple key/value version """ orig_key = key if self.key_mangler: key = self.key_mangler(key) if expiration_time is None: expiration_time = self.expiration_time if (expiration_time is None and self.region_invalidator.was_soft_invalidated()): raise exception.DogpileCacheException( "Non-None expiration time required " "for soft invalidation") if expiration_time == -1: expiration_time = None if self.async_creation_runner: else: async_creator = None with Lock( self._mutex(key), gen_value, get_value, expiration_time, async_creator) as value: return value def get_or_create_multi( self, keys, creator, expiration_time=None, should_cache_fn=None): """Return a sequence of cached values based on a sequence of keys. The behavior for generation of values based on keys corresponds to that of :meth:`.Region.get_or_create`, with the exception that the ``creator()`` function may be asked to generate any subset of the given keys. The list of keys to be generated is passed to ``creator()``, and ``creator()`` should return the generated values as a sequence corresponding to the order of the keys. The method uses the same approach as :meth:`.Region.get_multi` and :meth:`.Region.set_multi` to get and set values from the backend. If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend` that modifies values, take note this function invokes ``.set_multi()`` for newly generated values using the same values it returns to the calling function. A correct implementation of ``.set_multi()`` will not modify values in-place on the submitted ``mapping`` dict. :param keys: Sequence of keys to be retrieved. :param creator: function which accepts a sequence of keys and returns a sequence of new values. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive each value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. .. versionadded:: 0.5.0 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """ if expiration_time is None: expiration_time = self.expiration_time if (expiration_time is None and self.region_invalidator.was_soft_invalidated()): raise exception.DogpileCacheException( "Non-None expiration time required " "for soft invalidation") if expiration_time == -1: expiration_time = None mutexes = {} sorted_unique_keys = sorted(set(keys)) if self.key_mangler: mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys] else: mangled_keys = sorted_unique_keys orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys)) values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys))) for orig_key, mangled_key in orig_to_mangled.items(): with Lock( self._mutex(mangled_key), gen_value, lambda: get_value(mangled_key), expiration_time, async_creator=lambda mutex: async_creator(orig_key, mutex) ): pass try: if mutexes: # sort the keys, the idea is to prevent deadlocks. # though haven't been able to simulate one anyway. keys_to_get = sorted(mutexes) new_values = creator(*keys_to_get) values_w_created = dict( (orig_to_mangled[k], self._value(v)) for k, v in zip(keys_to_get, new_values) ) if not should_cache_fn: self.backend.set_multi(values_w_created) else: self.backend.set_multi(dict( (k, v) for k, v in values_w_created.items() if should_cache_fn(v[0]) )) values.update(values_w_created) return [values[orig_to_mangled[k]].payload for k in keys] finally: for mutex in mutexes.values(): mutex.release() def _value(self, value): """Return a :class:`.CachedValue` given a value.""" return CachedValue( value, { "ct": time.time(), "v": value_version }) def set(self, key, value): """Place a new value in the cache under the given key.""" if self.key_mangler: key = self.key_mangler(key) self.backend.set(key, self._value(value)) def set_multi(self, mapping): """Place new values in the cache under the given keys. .. versionadded:: 0.5.0 """ if not mapping: return if self.key_mangler: mapping = dict(( self.key_mangler(k), self._value(v)) for k, v in mapping.items()) else: mapping = dict((k, self._value(v)) for k, v in mapping.items()) self.backend.set_multi(mapping) def delete(self, key): """Remove a value from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) """ if self.key_mangler: key = self.key_mangler(key) self.backend.delete(key) def delete_multi(self, keys): """Remove multiple values from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) .. versionadded:: 0.5.0 """ if self.key_mangler: keys = list(map(lambda key: self.key_mangler(key), keys)) self.backend.delete_multi(keys) def cache_on_arguments( self, namespace=None, expiration_time=None, should_cache_fn=None, to_str=compat.string_type, function_key_generator=None): """A function decorator that will cache the return value of the function using a key derived from the function itself and its arguments. The decorator internally makes use of the :meth:`.CacheRegion.get_or_create` method to access the cache and conditionally call the function. See that method for additional behavioral details. E.g.:: @someregion.cache_on_arguments() def generate_something(x, y): return somedatabase.query(x, y) The decorated function can then be called normally, where data will be pulled from the cache region unless a new value is needed:: result = generate_something(5, 6) The function is also given an attribute ``invalidate()``, which provides for invalidation of the value. Pass to ``invalidate()`` the same arguments you'd pass to the function itself to represent a particular value:: generate_something.invalidate(5, 6) Another attribute ``set()`` is added to provide extra caching possibilities relative to the function. This is a convenience method for :meth:`.CacheRegion.set` which will store a given value directly without calling the decorated function. The value to be cached is passed as the first argument, and the arguments which would normally be passed to the function should follow:: generate_something.set(3, 5, 6) The above example is equivalent to calling ``generate_something(5, 6)``, if the function were to produce the value ``3`` as the value to be cached. .. versionadded:: 0.4.1 Added ``set()`` method to decorated function. Similar to ``set()`` is ``refresh()``. This attribute will invoke the decorated function and populate a new value into the cache with the new value, as well as returning that value:: newvalue = generate_something.refresh(5, 6) .. versionadded:: 0.5.0 Added ``refresh()`` method to decorated function. Lastly, the ``get()`` method returns either the value cached for the given key, or the token ``NO_VALUE`` if no such key exists:: value = generate_something.get(5, 6) .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. The default key generation will use the name of the function, the module name for the function, the arguments passed, as well as an optional "namespace" parameter in order to generate a cache key. Given a function ``one`` inside the module ``myapp.tools``:: @region.cache_on_arguments(namespace="foo") def one(a, b): return a + b Above, calling ``one(3, 4)`` will produce a cache key as follows:: myapp.tools:one|foo|3 4 The key generator will ignore an initial argument of ``self`` or ``cls``, making the decorator suitable (with caveats) for use with instance or class methods. Given the example:: class MyClass(object): @region.cache_on_arguments(namespace="foo") def one(self, a, b): return a + b The cache key above for ``MyClass().one(3, 4)`` will again produce the same cache key of ``myapp.tools:one|foo|3 4`` - the name ``self`` is skipped. The ``namespace`` parameter is optional, and is used normally to disambiguate two functions of the same name within the same module, as can occur when decorating instance or class methods as below:: class MyClass(object): @region.cache_on_arguments(namespace='MC') def somemethod(self, x, y): "" class MyOtherClass(object): @region.cache_on_arguments(namespace='MOC') def somemethod(self, x, y): "" Above, the ``namespace`` parameter disambiguates between ``somemethod`` on ``MyClass`` and ``MyOtherClass``. Python class declaration mechanics otherwise prevent the decorator from having awareness of the ``MyClass`` and ``MyOtherClass`` names, as the function is received by the decorator before it becomes an instance method. The function key generation can be entirely replaced on a per-region basis using the ``function_key_generator`` argument present on :func:`.make_region` and :class:`.CacheRegion`. If defaults to :func:`.function_key_generator`. :param namespace: optional string argument which will be established as part of the cache key. This may be needed to disambiguate functions of the same name within the same source file, such as those associated with classes - note that the decorator itself can't see the parent class on a function as the class is being declared. :param expiration_time: if not None, will override the normal expiration time. May be specified as a callable, taking no arguments, that returns a value to be used as the ``expiration_time``. This callable will be called whenever the decorated function itself is called, in caching or retrieving. Thus, this can be used to determine a *dynamic* expiration time for the cached function result. Example use cases include "cache the result until the end of the day, week or time period" and "cache until a certain date or time passes". .. versionchanged:: 0.5.0 ``expiration_time`` may be passed as a callable to :meth:`.CacheRegion.cache_on_arguments`. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`. .. versionadded:: 0.4.3 :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_key_generator: a function that will produce a "cache key". This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """ expiration_time_is_callable = compat.callable(expiration_time) if function_key_generator is None: function_key_generator = self.function_key_generator return decorator def cache_multi_on_arguments( self, namespace=None, expiration_time=None, should_cache_fn=None, asdict=False, to_str=compat.string_type, function_multi_key_generator=None): """A function decorator that will cache multiple return values from the function using a sequence of keys derived from the function itself and the arguments passed to it. This method is the "multiple key" analogue to the :meth:`.CacheRegion.cache_on_arguments` method. Example:: @someregion.cache_multi_on_arguments() def generate_something(*keys): return [ somedatabase.query(key) for key in keys ] The decorated function can be called normally. The decorator will produce a list of cache keys using a mechanism similar to that of :meth:`.CacheRegion.cache_on_arguments`, combining the name of the function with the optional namespace and with the string form of each key. It will then consult the cache using the same mechanism as that of :meth:`.CacheRegion.get_multi` to retrieve all current values; the originally passed keys corresponding to those values which aren't generated or need regeneration will be assembled into a new argument list, and the decorated function is then called with that subset of arguments. The returned result is a list:: result = generate_something("key1", "key2", "key3") The decorator internally makes use of the :meth:`.CacheRegion.get_or_create_multi` method to access the cache and conditionally call the function. See that method for additional behavioral details. Unlike the :meth:`.CacheRegion.cache_on_arguments` method, :meth:`.CacheRegion.cache_multi_on_arguments` works only with a single function signature, one which takes a simple list of keys as arguments. Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function is also provided with a ``set()`` method, which here accepts a mapping of keys and values to set in the cache:: generate_something.set({"k1": "value1", "k2": "value2", "k3": "value3"}) ...an ``invalidate()`` method, which has the effect of deleting the given sequence of keys using the same mechanism as that of :meth:`.CacheRegion.delete_multi`:: generate_something.invalidate("k1", "k2", "k3") ...a ``refresh()`` method, which will call the creation function, cache the new values, and return them:: values = generate_something.refresh("k1", "k2", "k3") ...and a ``get()`` method, which will return values based on the given arguments:: values = generate_something.get("k1", "k2", "k3") .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments` have the same meaning as those passed to :meth:`.CacheRegion.cache_on_arguments`. :param namespace: optional string argument which will be established as part of each cache key. :param expiration_time: if not None, will override the normal expiration time. May be passed as an integer or a callable. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create_multi`. This function is given a value as returned by the creator, and only if it returns True will that value be placed in the cache. :param asdict: if ``True``, the decorated function should return its result as a dictionary of keys->values, and the final result of calling the decorated function will also be a dictionary. If left at its default value of ``False``, the decorated function should return its result as a list of values, and the final result of calling the decorated function will also be a list. When ``asdict==True`` if the dictionary returned by the decorated function is missing keys, those keys will not be cached. :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_multi_key_generator: a function that will produce a list of keys. This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` :meth:`.CacheRegion.get_or_create_multi` """ expiration_time_is_callable = compat.callable(expiration_time) if function_multi_key_generator is None: function_multi_key_generator = self.function_multi_key_generator return decorator def make_region(*arg, **kw): """Instantiate a new :class:`.CacheRegion`. Currently, :func:`.make_region` is a passthrough to :class:`.CacheRegion`. See that class for constructor arguments. """ return CacheRegion(*arg, **kw)
36.429544
82
0.606652
76d437c1b037e1c3fe1a171bd9eb231c53d36fc1
645
py
Python
projectparallelprogrammeren/codesimulatie.py
fury106/ProjectParallelProgrammeren
fd3c198edaca5bcb19d8e665561e8cd14824e894
[ "MIT" ]
null
null
null
projectparallelprogrammeren/codesimulatie.py
fury106/ProjectParallelProgrammeren
fd3c198edaca5bcb19d8e665561e8cd14824e894
[ "MIT" ]
null
null
null
projectparallelprogrammeren/codesimulatie.py
fury106/ProjectParallelProgrammeren
fd3c198edaca5bcb19d8e665561e8cd14824e894
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Module projectparallelprogrammeren.codesimulatie ================================================================= Deze module simuleert alles. """ import projectparallelprogrammeren def simulatie(): """ Deze functie voert alle versies uit zodat deze vergeleken kunnen worden qua timing. """ from importlib import import_module for i in range(4): #alle versies van de simulatie importeren en achtereenvolgens uitvoeren. version = f"montecarlo_v{i}" montecarlo = import_module(version) montecarlo.simulatie(100,50) #Deze waarden dienen enkel als test if __name__ == "__main__": simulatie() #eof
23.035714
84
0.674419
76d4b9d4643322713c59c30a22d968f034c3d591
2,361
py
Python
test/test_aes.py
haruhi-dl/haruhi-dl
0526e2add4c263209cad55347efa9a2dfe6c3fa6
[ "Unlicense" ]
32
2021-01-18T03:52:17.000Z
2022-02-17T20:43:39.000Z
test/test_aes.py
haruhi-dl/haruhi-dl
0526e2add4c263209cad55347efa9a2dfe6c3fa6
[ "Unlicense" ]
12
2021-02-06T08:12:08.000Z
2021-12-11T23:17:41.000Z
test/test_aes.py
haruhi-dl/haruhi-dl
0526e2add4c263209cad55347efa9a2dfe6c3fa6
[ "Unlicense" ]
6
2021-01-29T16:46:31.000Z
2022-01-20T18:40:03.000Z
#!/usr/bin/env python from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from haruhi_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text from haruhi_dl.utils import bytes_to_intlist, intlist_to_bytes import base64 # the encrypted data can be generate with 'devscripts/generate_aes_testdata.py' if __name__ == '__main__': unittest.main()
36.890625
102
0.671326
76d6a858fdb2f760a40ddaceed8b3a0b06e85a87
14,566
py
Python
layouts/layout_simulation_procedure.py
KEHUIYAO/coral-sampling-tool
731cc22fbf5e4045e894b894547ad52c270e3fb1
[ "MIT" ]
5
2022-03-29T04:41:22.000Z
2022-03-29T12:17:35.000Z
layouts/layout_simulation_procedure.py
KEHUIYAO/coral-sampling-tool
731cc22fbf5e4045e894b894547ad52c270e3fb1
[ "MIT" ]
null
null
null
layouts/layout_simulation_procedure.py
KEHUIYAO/coral-sampling-tool
731cc22fbf5e4045e894b894547ad52c270e3fb1
[ "MIT" ]
null
null
null
import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc def generate_dropdown_selection(): "return a Div containing the dropdown selection box" return dcc.Dropdown( id='dropdown_select_process', style={"display": "none"}, options=[ {'label': 'Homogeneous Poisson process', 'value': 1}, {'label': 'Inhomogeneous Poisson process', 'value': 2}, {'label': 'Cluster process', 'value': 3}, # {'label': 'Strauss process', 'value': 4} ], # set the initial value=0 to hide the user input interface value=0) def generate_user_input(): "return a Div containing users' input interface" input_n_toolkits = html.Div(html.Div([html.A('Number of transects:', className='col-sm-4'), dcc.Input( type='number', placeholder=2, value = 2, id='input_n_toolkits', className='col-sm-4' ) ], className='row'), id='input_n_toolkits_container', style={'display': 'none'}) # slider # input_n_toolkits = html.Div(html.Div([ # html.A("Number of transects",className='col-sm-4'), # dcc.Slider(min=1, # max=5, # step=1, # value=2, # marks={i: '{}'.format(i) for i in range(1, 6)}, # id='input_n_toolkits', # className='col-sm-4') # ], className='row'), id='input_n_toolkits_container', # className='row', # style={'display': 'none'}) input_disease_prevalence = html.Div(html.Div([html.A('disease prevalence: ', id='input_disease_prevalence_tooltip', className='col-sm-4'), dcc.Input( type='number', placeholder=0.1, value = 0.1, step=0.1, min=0, max=1, id='input_disease_prevalence', className='col-sm-4' ) ], className='row'), id='input_disease_prevalence_container', style={'display': 'none'}) input_disease_prevalence_tooltip = dbc.Tooltip('the proportion of corals which get infected by a disease', target='input_disease_prevalence_tooltip') # text or number input input_fun_lambda = html.Div(html.Div([html.A('proportion cover function:', className='col-sm-4'), dcc.Input( id="input_fun_lambda", type='text', placeholder="1000 * np.exp(-(((x - 50) / 50) ** 2 + ((y - 50) / 50) ** 2) / 0.5 ** 2)", value="1000 * np.exp(-(((x - 50) / 50) ** 2 + ((y - 50) / 50) ** 2) / 0.5 ** 2)", className='col-sm-4' )],className='row'),id='show_input_fun_lambda',style={'display':'none'}) input_parent_prop = html.Div(html.Div([html.A('parent corals / total corals:', className='col-sm-4'), dcc.Input( id="input_parent_prop", type='number', placeholder=0.01, value=0.01, step=0.01, className='col-sm-4' )],className='row'),id='show_input_parent_prop',style={'display':'none'}) input_parent_range = html.Div(html.Div([html.A('parent range:', className='col-sm-4'), dcc.Input( id="input_parent_range", type='number', placeholder=5, value=5, className='col-sm-4' )],className='row'),id='show_input_parent_range',style={'display':'none'}) input_strauss_beta = dcc.Input( id="input_strauss_beta", type='number', placeholder="strauss_beta", style={'display': 'none'} ) input_strauss_gamma = dcc.Input( id="input_strauss_gamma", type='number', placeholder="strauss_gamma", style={'display': 'none'} ) input_strauss_R = dcc.Input( id="input_strauss_R", type='number', placeholder="strauss_R", style={'display': 'none'} ) input_transect_length = html.Div(html.Div([html.A('transect width (m): ', className='col-sm-4'), dcc.Input( type='number', placeholder=25, value=25, id='dcc_input_transect_length', className='col-sm-4' ) ], className='row'), id='input_transect_length', style={'display': 'none'}) input_transect_width = html.Div(html.Div([html.A('transect length (m): ', className='col-sm-4'), dcc.Input( type='number', placeholder=6, value = 6, id='dcc_input_transect_width', className='col-sm-4' ) ], className='row'), id='input_transect_width', style={'display': 'none'}) line_intercept_ratio = html.Div(html.Div([html.A('transect width / plot width', className='col-sm-4'), dcc.Input( type='number', placeholder=1/5, value = 1/5, step=0.1, id='dcc_line_intercept_ratio', className='col-sm-4') ],className='row'), id='line_intercept_ratio', style={'display': 'none'}) coral_size = html.Div(html.Div([html.A('coral size (m^2): ', id='coral_size_tooltip',className='col-sm-4'), dcc.Input( type='number', placeholder=0.0068, value = 0.0068, step=0.0001, id='coral_size', className='col-sm-4' ) ],className='row' ), id='coral_size_input', style={'display': 'none'}) coral_size_tooltip = dbc.Tooltip('the average size of an individual coral, measured in m^3', target='coral_size_tooltip') coral_size_std = html.Div(html.Div([html.A('coral size standard error: ', id='coral_size_std_tooltip', className='col-sm-4'), dcc.Input( type='number', placeholder=0.001, value = 0.001, step=0.001, id='coral_size_std', className='col-sm-4' )], className='row') , id='coral_size_std_input', style={'display': 'none'}) coral_size_std_tooltip = dbc.Tooltip('the standard deviation of the average size of an individual coral', target='coral_size_std_tooltip') prop_cover = html.Div(html.Div([html.A('proportion cover: ', className='col-sm-4', id='prop_cover_tooltip'), dcc.Input( type='number', placeholder=0, value = 0, step=0.1, min=0, max=1, id='prop_cover', className='col-sm-4' ) ],className='row'), id='prop_cover_input', style={'display': 'none'}) prop_cover_tooltip = dbc.Tooltip('Proportion cover of coral. If it equals 0, its estimation based on the historical data will be used in the simulation', target='prop_cover_tooltip') num_of_replications = html.Div(html.Div([html.A('number of replications', className='col-sm-4'), dcc.Input( type='number', placeholder=10, value = 10, step=1, min=1, id='num_of_replications', className='col-sm-4' ) ],className='row'), id='number_of_replications_input', style={'display': 'none'}) return html.Div([ input_n_toolkits, prop_cover, prop_cover_tooltip, input_fun_lambda, coral_size, coral_size_tooltip, coral_size_std, coral_size_std_tooltip, input_disease_prevalence, input_disease_prevalence_tooltip, input_parent_prop, input_parent_range, input_strauss_beta, input_strauss_gamma, input_strauss_R, input_transect_length, input_transect_width, line_intercept_ratio, num_of_replications ], id='input_process_parameters')
49.376271
1,301
0.506316
76d73eb99aeff1e081d5c5783ce96e09453f8979
4,046
py
Python
tests/unit/detection/test_detection_notebooks.py
titipakorn/computervision-recipes
815435763c0cdce991b7511fd8d39f71c64ccea8
[ "MIT" ]
2
2020-03-03T15:29:50.000Z
2022-02-21T12:45:24.000Z
tests/unit/detection/test_detection_notebooks.py
titipakorn/computervision-recipes
815435763c0cdce991b7511fd8d39f71c64ccea8
[ "MIT" ]
null
null
null
tests/unit/detection/test_detection_notebooks.py
titipakorn/computervision-recipes
815435763c0cdce991b7511fd8d39f71c64ccea8
[ "MIT" ]
2
2020-05-06T14:07:00.000Z
2022-03-21T19:54:32.000Z
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # This test is based on the test suite implemented for Recommenders project # https://github.com/Microsoft/Recommenders/tree/master/tests import papermill as pm import pytest import scrapbook as sb from utils_cv.common.data import unzip_url from utils_cv.detection.data import Urls # Unless manually modified, python3 should be # the name of the current jupyter kernel # that runs on the activated conda environment KERNEL_NAME = "python3" OUTPUT_NOTEBOOK = "output.ipynb"
29.532847
75
0.686357
76d787aa0fb3effb59ce8288a064c7de0d40a573
524
py
Python
configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py
ismailkocdemir/mmdetection
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
[ "Apache-2.0" ]
null
null
null
configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py
ismailkocdemir/mmdetection
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
[ "Apache-2.0" ]
null
null
null
configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py
ismailkocdemir/mmdetection
4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823
[ "Apache-2.0" ]
null
null
null
_base_ = [ '../retinanet_r50_fpn_1x_coco.py', '../../_base_/datasets/hdr_detection_minmax_glob_gamma.py', ] # optimizer # lr is set for a batch size of 8 optimizer = dict(type='SGD', lr=0.0005, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[10]) runner = dict( type='EpochBasedRunner', max_epochs=20)
26.2
88
0.694656