hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
cd8d8365ca2301a760424dae1ee2e706688adc1f
9,678
py
Python
main/views.py
QingShuiXiFan/Style-Transfer
f79951323cdfd0c72f2157623209d9067376306b
[ "Apache-2.0" ]
null
null
null
main/views.py
QingShuiXiFan/Style-Transfer
f79951323cdfd0c72f2157623209d9067376306b
[ "Apache-2.0" ]
null
null
null
main/views.py
QingShuiXiFan/Style-Transfer
f79951323cdfd0c72f2157623209d9067376306b
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render, render_to_response, redirect from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, FileResponse from django.urls import reverse import os from django.contrib.auth import authenticate, login, logout # from django.contrib import auth from django.template import RequestContext from .forms import LoginForm, RegistrationForm from django.contrib.auth.models import User import hashlib # python from django.contrib.auth.hashers import make_password, check_password # Django from django.core.mail import send_mail import imghdr # import time, datetime from django.conf import settings from .models import Pictures # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) STATIC_DIR = "common_static" GPU_ISACTIVATED = True # Create your views here. # ip # # #
36.247191
198
0.615726
cd8e00f631a120690eef589a528899913c4b3443
781
py
Python
edj/Spot_square.py
CircuitLaunch/Spot_Bootcamp
47735ce474a59c5478099f6095b68c46b77d3da6
[ "BSD-3-Clause" ]
null
null
null
edj/Spot_square.py
CircuitLaunch/Spot_Bootcamp
47735ce474a59c5478099f6095b68c46b77d3da6
[ "BSD-3-Clause" ]
null
null
null
edj/Spot_square.py
CircuitLaunch/Spot_Bootcamp
47735ce474a59c5478099f6095b68c46b77d3da6
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 from Spot import * import time from bosdyn.client import math_helpers if __name__ == '__main__': spot = Spot() try: # It's ALIVE! spot.power_on() spot.move_to(1.0, 0.0, 0.0, math_helpers.Quat(), duration=5.0) time.sleep(5.0) spot.move_to(0.0, 1.0, 0.0, math_helpers.Quat(), duration=5.0) time.sleep(5.0) spot.move_to(-1.0, 0.0, 0.0, math_helpers.Quat(), duration=5.0) time.sleep(5.0) spot.move_to(0.0, -1.0, 0.0, math_helpers.Quat(), duration=5.0) time.sleep(5.0) # Power down spot.estop(graceful=True) except: print('Exception') print('Trying to make Python GC the Spot object') spot = None time.sleep(5.0) exit(0)
21.694444
71
0.577465
cd8f015fd57b190763452236dbe2f747d3309b7f
837
py
Python
tests/test_set.py
maxslarsson/tennis-probability
f26021b305e2b8abd87acad846454f7ce02e9199
[ "MIT" ]
null
null
null
tests/test_set.py
maxslarsson/tennis-probability
f26021b305e2b8abd87acad846454f7ce02e9199
[ "MIT" ]
null
null
null
tests/test_set.py
maxslarsson/tennis-probability
f26021b305e2b8abd87acad846454f7ce02e9199
[ "MIT" ]
null
null
null
import pytest from tennis_probability import set, InvalidInput, InvalidProbability, NegativeNumber
27.9
84
0.628435
cd90fb8f4961d4f54d2eb80fcec8b04e412e1af3
626
py
Python
sources/classic/messaging_kombu/handlers.py
variasov/classic_messaging_kombu
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
[ "MIT" ]
1
2021-11-12T08:19:53.000Z
2021-11-12T08:19:53.000Z
sources/classic/messaging_kombu/handlers.py
variasov/classic_messaging_kombu
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
[ "MIT" ]
null
null
null
sources/classic/messaging_kombu/handlers.py
variasov/classic_messaging_kombu
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from typing import Dict, Any, Callable from kombu import Message from classic.components import component MessageBody = Dict[str, Any]
18.969697
58
0.680511
cd937e31435e325df9a3ac8d8fa5487807539935
1,440
py
Python
byceps/services/shop/order/event_service.py
GSH-LAN/byceps
ab8918634e90aaa8574bd1bb85627759cef122fe
[ "BSD-3-Clause" ]
33
2018-01-16T02:04:51.000Z
2022-03-22T22:57:29.000Z
byceps/services/shop/order/event_service.py
GSH-LAN/byceps
ab8918634e90aaa8574bd1bb85627759cef122fe
[ "BSD-3-Clause" ]
7
2019-06-16T22:02:03.000Z
2021-10-02T13:45:31.000Z
byceps/services/shop/order/event_service.py
GSH-LAN/byceps
ab8918634e90aaa8574bd1bb85627759cef122fe
[ "BSD-3-Clause" ]
14
2019-06-01T21:39:24.000Z
2022-03-14T17:56:43.000Z
""" byceps.services.shop.order.event_service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from __future__ import annotations from datetime import datetime from typing import Sequence from ....database import db from .dbmodels.order_event import OrderEvent as DbOrderEvent, OrderEventData from .transfer.models import OrderID def create_event( event_type: str, order_id: OrderID, data: OrderEventData ) -> None: """Create an order event.""" event = build_event(event_type, order_id, data) db.session.add(event) db.session.commit() def create_events( event_type: str, order_id: OrderID, datas: Sequence[OrderEventData] ) -> None: """Create a sequence of order events.""" events = [build_event(event_type, order_id, data) for data in datas] db.session.add_all(events) db.session.commit() def build_event( event_type: str, order_id: OrderID, data: OrderEventData ) -> DbOrderEvent: """Assemble, but not persist, an order event.""" now = datetime.utcnow() return DbOrderEvent(now, event_type, order_id, data) def get_events_for_order(order_id: OrderID) -> list[DbOrderEvent]: """Return the events for that order.""" return db.session \ .query(DbOrderEvent) \ .filter_by(order_id=order_id) \ .order_by(DbOrderEvent.occurred_at) \ .all()
26.181818
76
0.690972
cd95b58b744f084920dc507989ebf490290a8ec2
637
py
Python
app/models/columns/suit.py
abcnever/euchre-game
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
[ "MIT" ]
1
2018-12-31T05:38:56.000Z
2018-12-31T05:38:56.000Z
app/models/columns/suit.py
abcnever/euchre-game
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
[ "MIT" ]
4
2018-11-03T15:51:13.000Z
2019-01-12T21:09:23.000Z
app/models/columns/suit.py
abcnever/euchre-game
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
[ "MIT" ]
null
null
null
from attr import attrs, attrib import enum from .enum import EnumColumn
21.233333
40
0.486656
cd977d3ad4e8e4d9141853e4e08a51d0ffa0f771
1,881
py
Python
dataset.py
sreza1/Diabetic-Retinopathy-Detection
75f10423ef161d3040756253a8ba0b9012e391b7
[ "MIT" ]
null
null
null
dataset.py
sreza1/Diabetic-Retinopathy-Detection
75f10423ef161d3040756253a8ba0b9012e391b7
[ "MIT" ]
null
null
null
dataset.py
sreza1/Diabetic-Retinopathy-Detection
75f10423ef161d3040756253a8ba0b9012e391b7
[ "MIT" ]
null
null
null
import config import os import pandas as pd import numpy as np from torch.utils.data import Dataset, DataLoader from PIL import Image from tqdm import tqdm if __name__ == "__main__": """ Test if everything works ok """ dataset = DRDataset( images_folder="/data/images_resized_650", path_to_csv="/data/trainLabels.csv", transform = config.val_transforms ) loader = DataLoader( dataset=dataset, batch_size=32, num_workers=6, shuffle=True, pin_memory=True ) for x, label, file in tqdm(loader): print(x.shape) print(label.shape) import sys sys.exit
29.857143
84
0.617757
cd988eff24525966178311b4c694188e2f3b5038
507
py
Python
server/server.py
Filipos27/Celebrity_classification
802474516b9ecaee70c4019600572bbbbd8b582a
[ "MIT" ]
null
null
null
server/server.py
Filipos27/Celebrity_classification
802474516b9ecaee70c4019600572bbbbd8b582a
[ "MIT" ]
null
null
null
server/server.py
Filipos27/Celebrity_classification
802474516b9ecaee70c4019600572bbbbd8b582a
[ "MIT" ]
null
null
null
from flask import Flask, request, jsonify import util app= Flask(__name__) if __name__ == "__main__": print("Starting Python Flask Server For Celebrity Image Classification") util.load_saved_artifacts() app.run(port=5000)
28.166667
77
0.710059
cd99a356df7305e9c0faf645726124d17a3abcde
41
py
Python
app.py
blogsley/blogsley-flask-site
40df6641cce8336d790549b7edac4f83d8b8fb8e
[ "MIT" ]
1
2020-12-18T03:52:25.000Z
2020-12-18T03:52:25.000Z
app.py
blogsley/blogsley-flask-site
40df6641cce8336d790549b7edac4f83d8b8fb8e
[ "MIT" ]
3
2020-05-04T07:46:54.000Z
2022-02-10T19:39:19.000Z
app.py
blogsley/blogsley-flask-site
40df6641cce8336d790549b7edac4f83d8b8fb8e
[ "MIT" ]
null
null
null
from blogsley_site.app import create_app
20.5
40
0.878049
cd9a0d84b5dfdc848833f7aaab7d4b2009ed8946
421
py
Python
awards/migrations/0003_project_project_photo.py
kimutaiamos/Gold-Awwards
1bf12cf0cdbf250251664f067c5397160fa5ed41
[ "MIT" ]
null
null
null
awards/migrations/0003_project_project_photo.py
kimutaiamos/Gold-Awwards
1bf12cf0cdbf250251664f067c5397160fa5ed41
[ "MIT" ]
null
null
null
awards/migrations/0003_project_project_photo.py
kimutaiamos/Gold-Awwards
1bf12cf0cdbf250251664f067c5397160fa5ed41
[ "MIT" ]
null
null
null
# Generated by Django 3.2 on 2021-12-12 18:38 from django.db import migrations, models
22.157895
79
0.615202
cd9a1323c7a15a9388bdc8532ce60de3beb414fa
7,827
py
Python
tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py
annagitel/ocs-ci
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
[ "MIT" ]
1
2021-03-12T09:01:36.000Z
2021-03-12T09:01:36.000Z
tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py
annagitel/ocs-ci
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
[ "MIT" ]
1
2021-08-30T20:06:00.000Z
2021-09-30T20:05:46.000Z
tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py
annagitel/ocs-ci
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
[ "MIT" ]
null
null
null
""" Test to measure pvc scale creation time. Total pvc count would be 50, 1 clone per PVC Total number of clones in bulk will be 50 """ import logging import pytest from ocs_ci.utility import utils from ocs_ci.ocs.perftests import PASTest from ocs_ci.framework.testlib import performance from ocs_ci.helpers import helpers, performance_lib from ocs_ci.ocs import constants, scale_lib from ocs_ci.ocs.resources import pvc, pod from ocs_ci.ocs.resources.objectconfigfile import ObjectConfFile log = logging.getLogger(__name__)
38.747525
118
0.614028
269506ce70f1d96fec0d9578b84d16a8d1ec4d2d
72
py
Python
back/app/models/__init__.py
davidroeca/simple_graphql
a6b2b20b6458b6b2fa9363a542015ab42761bd98
[ "MIT" ]
null
null
null
back/app/models/__init__.py
davidroeca/simple_graphql
a6b2b20b6458b6b2fa9363a542015ab42761bd98
[ "MIT" ]
null
null
null
back/app/models/__init__.py
davidroeca/simple_graphql
a6b2b20b6458b6b2fa9363a542015ab42761bd98
[ "MIT" ]
null
null
null
from .database import db from .user import User from .post import Post
14.4
24
0.777778
269573fa22001d4ea70efb720d1a7ce5724057f7
3,520
py
Python
tools/noise_reduction.py
8igfive/MyASR
565267d9df4b56cfad5107632146aab8150a962d
[ "MIT" ]
null
null
null
tools/noise_reduction.py
8igfive/MyASR
565267d9df4b56cfad5107632146aab8150a962d
[ "MIT" ]
null
null
null
tools/noise_reduction.py
8igfive/MyASR
565267d9df4b56cfad5107632146aab8150a962d
[ "MIT" ]
null
null
null
import argparse from genericpath import exists import os import time import re from tqdm import tqdm import numpy as np from scipy.io import wavfile from wiener_scalart import wienerScalart TIME = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) WORKPLACE_DIR = os.path.dirname(CURRENT_DIR) DUMP_DIR = os.path.join(WORKPLACE_DIR, os.path.join('dump', TIME)) DUMP_FEAT = 'feat_{}.scp'.format(TIME) DUMP_TEXT = 'text_{}'.format(TIME) FEAT_FORMAT = r'\s?(.+?)\s+(.+?\.wav)' intMap = {np.dtype('int8') : (0x7f, -0x80), np.dtype('int16') : (0x7fff, -0x8000), np.dtype('int32') : (0x7fffffff, -0x8000000), np.dtype('int64') : (0x7fffffffffffffff, -0x8000000000000000)} if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-f', '--feat', type=str, default=None, help='feat file path') parser.add_argument('-t', '--text', type=str, default=None, help='text file path') parser.add_argument('-dd', '--dumpDir', type=str, default=DUMP_DIR, help='the directory where holds new .wav files') parser.add_argument('-df', '--dumpFeat', type=str, default=os.path.join(DUMP_DIR, DUMP_FEAT), help='dump feat file path') parser.add_argument('-dt', '--dumpText', type=str, default=os.path.join(DUMP_DIR, DUMP_TEXT), help='dump text file path') parser.add_argument('-n', '--noiseLength', type=float, default=0.25, help='the noise time length at the beggining of the audio') parser.add_argument('-db', '--debug', action='store_true', help='print debug message') args = parser.parse_args() main(args)
40.930233
132
0.65142
2696d944b45b7b26bd7dbbe253779f41871a415a
7,779
py
Python
islandGen.py
Popcorn05/IslandGen
a06821c1db8f33befb1fb3db32fd2e18d323a23a
[ "MIT" ]
null
null
null
islandGen.py
Popcorn05/IslandGen
a06821c1db8f33befb1fb3db32fd2e18d323a23a
[ "MIT" ]
null
null
null
islandGen.py
Popcorn05/IslandGen
a06821c1db8f33befb1fb3db32fd2e18d323a23a
[ "MIT" ]
null
null
null
#Import libraries import random import os import noise import numpy import math import sys from chunks import Chunks as chk from PIL import Image import subprocess from scipy.misc import toimage import threading random.seed(os.urandom(6)) #Delete old chunks filelist = [ f for f in os.listdir("world/") if f.endswith(".chunk") ] #Delete previous world files for f in filelist: os.remove(os.path.join("world/", f)) #Functions #Colours dwaterCol = [54, 137, 245] waterCol = [67, 146, 245] dsandCol = [224, 214, 164] sandCol = [247, 232, 176] rockCol = [209, 209, 209] grassCol = [37, 170, 77] dgrassCol = [34, 161, 63] treeCol = [10, 122, 42] mountCol = [74, 62, 36] mountRockCol = [56, 48, 30] snowCol = [245, 254, 255] #Control Variables a = sys.argv if len(a) > 1: gridSize = int(a[1]) scale = float(a[2]) octaves = int(a[3]) persistance = float(a[4]) lacunarity = float(a[5]) thres = float(a[6]) else: gridSize = 1024 #Side length scale = 250.0 octaves = 6 persistance = 0.5 lacunarity = 2.0 thres = 0.08 #Generate base noise, Apply gradient im = Image.open("gradient/circle_grad.png") circle_grad = im.convert("L") main = numpy.zeros((gridSize,gridSize)) #Init arrays mainNoise = numpy.zeros_like(main) seed = random.randint(0,200) #Gen seed for y in range(gridSize): for x in range(gridSize): main[y][x] = noise.pnoise2(y/scale,x/scale,octaves=octaves,persistence=persistance,lacunarity=lacunarity,repeatx=gridSize,repeaty=gridSize,base=seed) #Set noise mainNoise[y][x] = (main[y][x] * mapVal(circle_grad.getpixel((round((1024/gridSize)*x),round((1024/gridSize)*y))), 0, 255, -0.05, 1)) #Apply gradient to noise if mainNoise[y][x] > 0: mainNoise[y][x] *= 20 #Amplify max_grad = numpy.max(mainNoise) mainNoise = mainNoise / max_grad #Weird even out math thing #Lay base display = numpy.zeros((gridSize//16,gridSize//16)+(16,16)+(3,)) processed = numpy.zeros((gridSize//16,gridSize//16), dtype=bool) passOver = numpy.zeros((gridSize//16,gridSize//16), dtype=bool) import time start = time.time() for cy in range(gridSize//16): for cx in range(gridSize//16): print(str(cy) + " " + str(cx)) if processed[cy][cx] == False: processed[cy][cx] = True for y in range(16): for x in range(16): m = mainNoise[y + (16*cy)][x + (16*cx)] #Set iterator to value of main array and check if meets certain thresholds to set colours if m < thres + 0.015: m = dwaterCol elif m < thres + 0.11: m = waterCol elif m < thres + 0.12: m = dsandCol passOver[cy][cx] = True elif m < thres + 0.15: m = sandCol passOver[cy][cx] = True elif m < thres + 0.28: m = grassCol passOver[cy][cx] = True elif m < thres + 0.46: m = dgrassCol passOver[cy][cx] = True elif m < thres + 0.78: m = mountCol passOver[cy][cx] = True elif m < thres + 1.0: m = snowCol passOver[cy][cx] = True display[cy][cx][y][x] = m #Second pass (Natural features) featSeed = random.randint(0,100) #Generate seed for cy in range(gridSize//16): for cx in range(gridSize//16): if passOver[cy][cx] == True: for y in range(16): for x in range(16): m = display[cy][cx][y][x] p = noise.pnoise2((y + (cy * 16))/(scale/2.5),(x + (cx * 16))/(scale/2.5),octaves=10,persistence=0.55,lacunarity=1.55,repeatx=gridSize,repeaty=gridSize,base=featSeed) #Get pond noise if all(m == grassCol) or all(m == dsandCol) or all(m == sandCol): #If light grass or beach generate pond if p > 0.17: if p < 0.25: m = sandCol elif p < 1.0: m = waterCol display[cy][cx][y][x] = m #Third pass (Structures) structScale = int(scale // 200) for cy in range(gridSize//16): for cx in range(gridSize//16): if passOver[cy][cx] == True: for y in range(16): for x in range(16): #Place rocks on beach and mountnain m = display[cy][cx][y][x] if all(m == sandCol): if percentChance(2) == True: addRock(display,cx,cy,x,y,structScale,rockCol) elif all(m == grassCol): if percentChance(5) == True: addTree(display,cx,cy,x,y,structScale) elif all(m == dgrassCol): if percentChance(20) == True: addTree(display,cx,cy,x,y,structScale) elif all(m == mountCol): if percentChance(0.01) == True: addRock(display,cx,cy,x,y,structScale,mountRockCol) #Save for cy in range(gridSize//16): for cx in range(gridSize//16): chk.writeChunk(cx,cy,display) #Display toimage(chk.readChunkArray(gridSize,display)).show()
33.530172
202
0.549556
26978b08939270913183c7dd0c609cfa2e52874f
4,363
py
Python
reagent/gym/tests/test_gym_replay_buffer.py
dmitryvinn/ReAgent
f98825b9d021ec353a1f9087840a05fea259bf42
[ "BSD-3-Clause" ]
1,156
2019-10-02T12:15:31.000Z
2022-03-31T16:01:27.000Z
reagent/gym/tests/test_gym_replay_buffer.py
dmitryvinn/ReAgent
f98825b9d021ec353a1f9087840a05fea259bf42
[ "BSD-3-Clause" ]
448
2019-10-03T13:40:52.000Z
2022-03-28T07:49:15.000Z
reagent/gym/tests/test_gym_replay_buffer.py
dmitryvinn/ReAgent
f98825b9d021ec353a1f9087840a05fea259bf42
[ "BSD-3-Clause" ]
214
2019-10-13T13:28:33.000Z
2022-03-24T04:11:52.000Z
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import numpy.testing as npt from reagent.core.parameters import ProblemDomain from reagent.gym.envs import Gym from reagent.gym.envs.wrappers.simple_minigrid import SimpleObsWrapper from reagent.gym.utils import create_df_from_replay_buffer from reagent.preprocessing.sparse_to_dense import PythonSparseToDenseProcessor from reagent.test.base.horizon_test_base import HorizonTestBase logger = logging.getLogger(__name__)
33.821705
86
0.60165
269836dc4bbab3746b4c28419c5eabb4308cb54c
37
py
Python
parser/__init__.py
jbudis/dante
90177c33825d5f9ce3fba5463092fbcf20b72fe2
[ "Apache-2.0" ]
4
2018-09-28T14:50:47.000Z
2021-08-09T12:46:12.000Z
parser/__init__.py
jbudis/dante
90177c33825d5f9ce3fba5463092fbcf20b72fe2
[ "Apache-2.0" ]
6
2019-01-02T13:08:31.000Z
2021-03-25T21:45:40.000Z
parser/__init__.py
jbudis/dante
90177c33825d5f9ce3fba5463092fbcf20b72fe2
[ "Apache-2.0" ]
1
2017-12-12T10:38:26.000Z
2017-12-12T10:38:26.000Z
from parser.readfile import ReadFile
18.5
36
0.864865
2698d48d436f6968997ba06d73587a502b5f3874
535
py
Python
main.py
Cynicism-lab/hhu-daily-health-common
1959beee61f2895166ac0be92b5817cbe278ef51
[ "MIT" ]
14
2021-02-03T14:38:15.000Z
2022-02-05T08:48:41.000Z
main.py
zhang-zimin/nanwenguidaka
e426ecee8758d70b20cf2a77dc87a6f949196927
[ "MIT" ]
4
2021-04-15T15:14:29.000Z
2022-03-15T02:29:04.000Z
main.py
zhang-zimin/nanwenguidaka
e426ecee8758d70b20cf2a77dc87a6f949196927
[ "MIT" ]
52
2021-02-03T14:38:17.000Z
2022-03-29T09:19:12.000Z
#!/usr/bin/env python # coding: utf-8 # In[1]: import requests from datetime import timezone from datetime import timedelta from datetime import datetime import hhu import os # In[2]: utc_time = datetime.utcnow().replace(tzinfo=timezone.utc) sh_tz = timezone(timedelta(hours=8),name='Asia/Shanghai') beijing_now = utc_time.astimezone(sh_tz) datestr = datetime.strftime(beijing_now,'%F') timestr = datetime.strftime(beijing_now,'%H:%M:%S') year = datestr[0:4] month = datestr[5:7] day = datestr[8:10] time = timestr hhu.hhu()
17.258065
57
0.736449
269a18ede77adaabe0e01c16057d910f3519fa89
30,573
py
Python
depparse.py
viadee/eric
680508cc5bf2d322638c6cf2c466a06c3c1f33d4
[ "BSD-3-Clause-Clear", "Apache-2.0", "CC0-1.0", "BSD-3-Clause" ]
4
2020-04-07T07:05:02.000Z
2020-09-23T14:23:16.000Z
depparse.py
viadee/eric
680508cc5bf2d322638c6cf2c466a06c3c1f33d4
[ "BSD-3-Clause-Clear", "Apache-2.0", "CC0-1.0", "BSD-3-Clause" ]
null
null
null
depparse.py
viadee/eric
680508cc5bf2d322638c6cf2c466a06c3c1f33d4
[ "BSD-3-Clause-Clear", "Apache-2.0", "CC0-1.0", "BSD-3-Clause" ]
1
2021-12-27T03:00:44.000Z
2021-12-27T03:00:44.000Z
import pickle import stanza import test_stuff from datetime import datetime from dictionary import cd, dictionary, nlp_dictionary, ph_outcome, ph_key, ph_value, ph_dvalue, ph_subject import eric_nlp #does not do preprocessing ''' creates a matrix with: columns: roots rows: count how often that root occurs for a function ''' #all_roots is a dict from words to another dict from function ids to ints #roots is expected to be a dict from words to ints #attempt 1: how many nodes do they share, regardless of node depth #a tree is a list of dictionarys. every dictionary represents a word of the sentence. key-value-pairs are the attributes of that word. ''' takes a tuple as in "deprel" in dictionary.nlp_dictionary. returns list of tuples. if master_tuple was a simple tuple, the list only contains that tuple if master_tuple has lists as elements, these get split so that every tuple in the returned list has only strings as elements Example: in: (["predict", "divinate"], "obl", ["data", "person"]) out: [ ("predict", "obl", "data"), ("predict", "obl", "person"), ("divinate", "obl", "data"), ("divinate", "obl", "person") ] note: returning list has x elements with x being the product of all three lengths. (here 2*1*2 = 4) ''' ''' takes a word-object of a depparse-word and a string element from a tuple (not a list-element. use generate_sub_tuples() first) checks if dictionary.cd (by default "#") is in tuple_element. If so, it extracts which attribute (i.e. in front of "#") is wanted. then returns the corresponding attribute value of word_object and the part right of "#" in tuple_element if "#" was not in tuple_element, it returns tuple_element as it is and the default attribute of word_object also needs an eric, to invoke replacement of placeholders ''' ''' word_attribute should be from the user input, tuple_attribute one element of a tuple from the depparse templates in dictionary.nlp_dictionary it's called attribute, not element because it should only be called at the end of get_comparison_attributes() which extracts attributes from word objects (e.g. the lemma, upos or deprel, etc.) word_attribute needs to be included even though it will not have any placeholders. In the case, that "<outcome>" is in tuple_attribute, word_attribute needs to be checked if it is a different form of the possible outcomes. This gets checked via the eric.model_columns["class"]["phrasings"] dict which has all possible outcomes as keys (here "survived" and "died") and stores different forms of those as the values of that dict as list. Here ["survive", "survives"] and ["die", "dies"]. ''' replace_depparse_placeholders("", "", "") #looks for the head/mother node of word in tree and returns it (or a representing dictionary if head is root). #returns dict since root is not really represented in the word objects of depparse #takes a depparse tree t and goes through the depparse tree templates in dictionary.nlp_dictionary #returns a list of tuples (fct_id, tree template) with a tuple for every found match. #expects a list of tuples with two elements each: 1st fct_id, 2nd the tree template that matched, i.e. a list of tuples #that list should represend a ranking from most likely (lowest index) to least likey (highest index) #it then goes through all templates and sorts them into templates that contain a lemma:not and and those that do not #then creates a ranking again for both, separately #then, both lists get concatenated with the negated tuples at the lower indices. So a short but negated template will have priority over a longer, non-negated one #returns that list #t is a tree like in tree_compare(t1, t2) ''' if you thought of new sentence while analysing the output and just depparsed them over debug console and included them in the output_file, this function will help. It can read your originally used input again, then the output file, compare sentences and store all new ones, i.e. the manually analysed sentences in a new input_file. Also, it will then overwrite the output file to update the root counts ''' if __name__ == "__main__": #main() debug_depparsed_sentences_to_console() quit() lines = test_stuff.read_input_from_file("data\\wrongly_accused.txt") sentences = [x[1] for x in lines] for s in sentences: print(s) print("//////////") sp = init_stanza("en") out, root = depparse(sentences, sp) test_stuff.list_to_file(out, "output\\depparse\\wrongly_accused_out.txt") quit() #test_some_sentences() for d in nlp_dictionary: print(d["id"]) try: x = d['depparse'][0] print("\t---") except Exception as e: print("\tNOTHING") sp = init_stanza("en") input_files = [f"data\\umfrage_input_{x}_cleaned.txt" for x in range(1,5)] fct = "whatif-gl" update_depparse_output(input_files, f"output\\depparse\\{fct}.txt", fct, "data\\manually_added.txt", sp=sp)
40.387054
384
0.603735
269ad28a75a19ae401ecbe624997f530c5904d6d
706
py
Python
ch03/simple_cbow_pytorch.py
tomy-0000/deep-learning-from-scratch-2
3d3d7fd614b8021499ffc103199be5e32622717e
[ "MIT" ]
null
null
null
ch03/simple_cbow_pytorch.py
tomy-0000/deep-learning-from-scratch-2
3d3d7fd614b8021499ffc103199be5e32622717e
[ "MIT" ]
null
null
null
ch03/simple_cbow_pytorch.py
tomy-0000/deep-learning-from-scratch-2
3d3d7fd614b8021499ffc103199be5e32622717e
[ "MIT" ]
null
null
null
# coding: utf-8 import torch.nn as nn
27.153846
54
0.609065
269c16b6afd598ff0e05a59d38e14e46ebde748b
7,814
py
Python
modules/input_output.py
nicolasying/WordNet-Embeddings
a6a5782dca97376e487df41fb83542729f284197
[ "MIT" ]
null
null
null
modules/input_output.py
nicolasying/WordNet-Embeddings
a6a5782dca97376e487df41fb83542729f284197
[ "MIT" ]
null
null
null
modules/input_output.py
nicolasying/WordNet-Embeddings
a6a5782dca97376e487df41fb83542729f284197
[ "MIT" ]
null
null
null
# coding=utf-8 #! /usr/bin/env python3.4 """ MIT License Copyright (c) 2018 NLX-Group Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This code reads wordnet data and index files data_file_reader(file_name): extract data from wordnet data files saved in "data/input" directory output is 1- a dictionary with key = synsetoffsets data = (synsetWrds, synsetConnections, synsetRelationTypes, connectedSynsetPos, gloss) 2- and offset_list [email protected] """ import os, sys import numpy as np from progressbar import ProgressBar, Percentage, Bar
36.344186
126
0.572562
269c9b7326d9d1d9ee7b7fac3ea362f209cd1e0f
1,599
py
Python
tests/test_formats.py
antmicro/raviewer
7529664d37e994d4c2f4c450a5577b79d73c4bb0
[ "Apache-2.0" ]
12
2021-11-18T09:38:34.000Z
2022-03-24T19:33:44.000Z
tests/test_formats.py
antmicro/raviewer
7529664d37e994d4c2f4c450a5577b79d73c4bb0
[ "Apache-2.0" ]
1
2022-02-14T12:07:02.000Z
2022-03-21T19:29:11.000Z
tests/test_formats.py
antmicro/raviewer
7529664d37e994d4c2f4c450a5577b79d73c4bb0
[ "Apache-2.0" ]
null
null
null
"""Module for testing formats on resources entities""" from raviewer.src.core import (get_displayable, load_image, parse_image) from terminaltables import AsciiTable from raviewer.image.color_format import AVAILABLE_FORMATS import os import pkg_resources import time import pytest def test_all(formats): """Test all formats""" print("Testing all formats, It may take a while...") table_data = [["Format", "Passed", "Performance"]] start_range = 800 end_range = 810 for color_format in formats.keys(): file_path = pkg_resources.resource_filename('resources', color_format + "_1000_750") passed_results = 0 format_performance = 0 start = time.time() for width in range(start_range, end_range): try: if not os.path.exists(file_path): break img = load_image(file_path) img = parse_image(img.data_buffer, color_format, width) get_displayable(img) passed_results += 1 except: continue end = time.time() #Stats format_performance = "{:.3f}".format(round(end - start, 3)) table_data.append([ color_format, "{}/{}".format(passed_results, end_range - start_range), format_performance ]) table = AsciiTable(table_data) table.title = 'Test all formats' print(table.table)
31.352941
79
0.590994
269e4231c77fb4d48c447cd279a2ecdca59c4ba8
702
py
Python
kqueen_ui/utils/wrappers.py
Richard-Barrett/kqueen-ui
f6b41edddd5130e5cd79773640942e9a824bbae1
[ "MIT" ]
null
null
null
kqueen_ui/utils/wrappers.py
Richard-Barrett/kqueen-ui
f6b41edddd5130e5cd79773640942e9a824bbae1
[ "MIT" ]
null
null
null
kqueen_ui/utils/wrappers.py
Richard-Barrett/kqueen-ui
f6b41edddd5130e5cd79773640942e9a824bbae1
[ "MIT" ]
null
null
null
from flask import request from flask import redirect from flask import session from flask import url_for from functools import wraps
29.25
77
0.660969
269f222cd807eb64aa23f3a0beb347f29492e7b2
4,089
py
Python
dqc/utils/safeops.py
Jaikinator/dqc
47c964c7d1323a35f4f69521d40476c41843810e
[ "Apache-2.0" ]
39
2021-05-31T17:01:23.000Z
2022-03-23T19:20:35.000Z
dqc/utils/safeops.py
Jaikinator/dqc
47c964c7d1323a35f4f69521d40476c41843810e
[ "Apache-2.0" ]
14
2021-09-01T13:39:11.000Z
2022-03-13T16:45:39.000Z
dqc/utils/safeops.py
Jaikinator/dqc
47c964c7d1323a35f4f69521d40476c41843810e
[ "Apache-2.0" ]
6
2021-07-16T09:08:29.000Z
2022-02-24T01:13:54.000Z
import math import torch from typing import Union, Optional, Tuple from dqc.utils.datastruct import ZType eps = 1e-12 ########################## safe operations ########################## ########################## occupation number gradients ########################## ########################## other tensor ops ########################## def safe_cdist(a: torch.Tensor, b: torch.Tensor, add_diag_eps: bool = False, diag_inf: bool = False): # returns the L2 pairwise distance of a and b # a: (*BA, na, ndim) # b: (*BB, nb, ndim) # returns: (*BAB, na, nb) square_mat = a.shape[-2] == b.shape[-2] dtype = a.dtype device = a.device ab = a.unsqueeze(-2) - b.unsqueeze(-3) # (*BAB, na, nb, ndim) # add the diagonal with a small eps to safeguard from nan if add_diag_eps: if not square_mat: raise ValueError("Enabling add_diag_eps for non-square result matrix is invalid") ab = ab + torch.eye(ab.shape[-2], dtype=dtype, device=device).unsqueeze(-1) * eps ab = ab.norm(dim=-1) # (*BAB, na, nb) # replace the diagonal with infinite (usually used for coulomb matrix) if diag_inf: if not square_mat: raise ValueError("Enabling diag_inf for non-square result matrix is invalid") infdiag = torch.eye(ab.shape[-1], dtype=dtype, device=device) idiag = infdiag.diagonal() idiag[:] = float("inf") ab = ab + infdiag return ab
36.837838
106
0.595011
269fd1b0bc7030c4e1f6c761faa1320701f6d9dc
4,713
py
Python
extra_envs/extra_envs/envs/point.py
Fanshaoliu/safe_rl
16ab54bebb70a86a80e1bfadb62656afb1547965
[ "MIT" ]
13
2021-06-19T03:19:36.000Z
2022-03-29T10:44:37.000Z
extra_envs/extra_envs/envs/point.py
Fanshaoliu/safe_rl
16ab54bebb70a86a80e1bfadb62656afb1547965
[ "MIT" ]
5
2021-06-16T20:06:51.000Z
2021-12-14T22:55:54.000Z
extra_envs/extra_envs/envs/point.py
Fanshaoliu/safe_rl
16ab54bebb70a86a80e1bfadb62656afb1547965
[ "MIT" ]
4
2021-11-03T13:30:08.000Z
2022-01-05T11:16:47.000Z
import numpy as np import gym from gym import spaces from gym.utils import seeding
38.317073
92
0.569064
26a2b53d4301d0838e61a2558348848ec6cc08bb
2,855
py
Python
src/model/model_basic.py
trungnt13/digisami_journal
671486d0fe7b65cad80daf8e8b96d475245c5fed
[ "Apache-2.0" ]
null
null
null
src/model/model_basic.py
trungnt13/digisami_journal
671486d0fe7b65cad80daf8e8b96d475245c5fed
[ "Apache-2.0" ]
null
null
null
src/model/model_basic.py
trungnt13/digisami_journal
671486d0fe7b65cad80daf8e8b96d475245c5fed
[ "Apache-2.0" ]
null
null
null
from __future__ import print_function, division, absolute_import from odin import nnet as N, backend as K import tensorflow as tf
31.722222
64
0.62732
26a4b307dfc5e770b6a75ad04eed2871c82ad2a0
174
py
Python
Python/CrashCourse/mongodb-quickstart-course/src/starter_code_snake_bnb/src/infrastructure/state.py
Mujtaba-Biyabani/Research
211f57812786a151ce8c3b1c3ed379fd1ba4e2f9
[ "Unlicense" ]
52
2017-12-06T10:44:05.000Z
2021-11-24T10:44:16.000Z
Python/CrashCourse/mongodb-quickstart-course/src/starter_code_snake_bnb/src/infrastructure/state.py
Mujtaba-Biyabani/Research
211f57812786a151ce8c3b1c3ed379fd1ba4e2f9
[ "Unlicense" ]
8
2018-01-29T08:32:34.000Z
2019-01-02T05:56:51.000Z
Python/CrashCourse/mongodb-quickstart-course/src/starter_code_snake_bnb/src/infrastructure/state.py
Mujtaba-Biyabani/Research
211f57812786a151ce8c3b1c3ed379fd1ba4e2f9
[ "Unlicense" ]
125
2018-11-20T07:56:22.000Z
2022-03-16T06:32:42.000Z
active_account = None
15.818182
49
0.683908
26a6c6bf63ea22f6476519c7dcbec3b7eb479136
550
py
Python
generators/activerecord/templates/models/abstract_model.py
afeiship/gx
9a3b566f64147eb1b6ffc753e2ebdf676f8ddef6
[ "MIT" ]
null
null
null
generators/activerecord/templates/models/abstract_model.py
afeiship/gx
9a3b566f64147eb1b6ffc753e2ebdf676f8ddef6
[ "MIT" ]
null
null
null
generators/activerecord/templates/models/abstract_model.py
afeiship/gx
9a3b566f64147eb1b6ffc753e2ebdf676f8ddef6
[ "MIT" ]
null
null
null
from orator import Model import pendulum
22
44
0.589091
26a919609f85889cbb4dc125397fcb5ff64f815f
220
py
Python
blank/build.py
davidbelliott/whitgl
bfa1a33b0059b722a773e513653ff802c8dc7d6f
[ "MIT" ]
26
2016-01-09T16:46:53.000Z
2021-05-17T02:41:13.000Z
blank/build.py
davidbelliott/whitgl
bfa1a33b0059b722a773e513653ff802c8dc7d6f
[ "MIT" ]
null
null
null
blank/build.py
davidbelliott/whitgl
bfa1a33b0059b722a773e513653ff802c8dc7d6f
[ "MIT" ]
3
2016-02-19T06:05:19.000Z
2021-03-10T02:19:35.000Z
import os import sys joinp = os.path.join sys.path.insert(0, 'whitgl') sys.path.insert(0, joinp('whitgl', 'input')) import build sys.path.insert(0, 'input') import ninja_syntax build.do_game('Game', '', ['png','ogg'])
18.333333
44
0.690909
26aabfb0114adf3aa767a0e26c7a937d741efc5e
9,018
py
Python
factom_core/blocks/entry_credit_block.py
sourcery-ai-bot/factom-core
186dca550d98d758e9f8dab878e6382153efeaf3
[ "MIT" ]
null
null
null
factom_core/blocks/entry_credit_block.py
sourcery-ai-bot/factom-core
186dca550d98d758e9f8dab878e6382153efeaf3
[ "MIT" ]
null
null
null
factom_core/blocks/entry_credit_block.py
sourcery-ai-bot/factom-core
186dca550d98d758e9f8dab878e6382153efeaf3
[ "MIT" ]
null
null
null
import hashlib import struct from dataclasses import dataclass, field from typing import Dict, List, Union from factom_core.block_elements.balance_increase import BalanceIncrease from factom_core.block_elements.chain_commit import ChainCommit from factom_core.block_elements.entry_commit import EntryCommit from factom_core.utils import varint from .directory_block import DirectoryBlock ECIDTypes = Union[ChainCommit, EntryCommit, int]
36.216867
118
0.622422
26aacebd7375e02a1085b024169371e35da36aab
59
py
Python
project1/tests/q0.py
ucsb-int5/int5-f19-notebooks
5b3d1ee6964d9357f211f4706787403ec5a3079c
[ "MIT" ]
1
2019-10-02T21:43:12.000Z
2019-10-02T21:43:12.000Z
project1/tests/q0.py
ucsb-int5/int5-f19-notebooks
5b3d1ee6964d9357f211f4706787403ec5a3079c
[ "MIT" ]
3
2019-12-14T06:20:14.000Z
2019-12-14T07:12:33.000Z
project1/tests/q0.py
ucsb-int5/int5-f19-notebooks
5b3d1ee6964d9357f211f4706787403ec5a3079c
[ "MIT" ]
3
2019-10-02T18:36:06.000Z
2019-12-03T18:16:45.000Z
test = { 'name': 'q0', 'points': 0, 'suites': []}
19.666667
24
0.389831
26ab3cad8e013a6c3ea0f8a7d1ac3f6bf89a579f
24,208
py
Python
tests/core/inst_utils.py
cornell-brg/lizard
7f9a78a913e64b5cfdee3a26223539ad225bd6da
[ "BSD-3-Clause" ]
50
2019-05-22T08:43:15.000Z
2022-03-21T23:58:50.000Z
tests/core/inst_utils.py
cornell-brg/lizard
7f9a78a913e64b5cfdee3a26223539ad225bd6da
[ "BSD-3-Clause" ]
1
2019-07-27T18:51:52.000Z
2019-08-02T01:20:22.000Z
tests/core/inst_utils.py
cornell-brg/lizard
7f9a78a913e64b5cfdee3a26223539ad225bd6da
[ "BSD-3-Clause" ]
11
2019-12-26T06:00:48.000Z
2022-03-27T02:29:35.000Z
#========================================================================= # inst_utils #========================================================================= # Includes helper functions to simplify creating assembly tests. from pymtl import * from tests.context import lizard #------------------------------------------------------------------------- # print_asm #------------------------------------------------------------------------- # Pretty print a generated assembly syntax #------------------------------------------------------------------------- # gen_nops #------------------------------------------------------------------------- #------------------------------------------------------------------------- # gen_word_data #------------------------------------------------------------------------- #------------------------------------------------------------------------- # gen_hword_data #------------------------------------------------------------------------- #------------------------------------------------------------------------- # gen_byte_data #------------------------------------------------------------------------- #------------------------------------------------------------------------- # gen_rr_src01_template #------------------------------------------------------------------------- # Template for register-register instructions. We first write src0 # register and then write the src1 register before executing the # instruction under test. We parameterize the number of nops after # writing both src registers and the instruction under test to enable # using this template for testing various bypass paths. We also # parameterize the register specifiers to enable using this template to # test situations where the srce registers are equal and/or equal the # destination register. #------------------------------------------------------------------------- # gen_rr_src10_template #------------------------------------------------------------------------- # Similar to the above template, except that we reverse the order in # which we write the two src registers. #------------------------------------------------------------------------- # gen_rr_dest_dep_test #------------------------------------------------------------------------- # Test the destination bypass path by varying how many nops are # inserted between the instruction under test and reading the destination # register with a csrr instruction. #------------------------------------------------------------------------- # gen_rr_src1_dep_test #------------------------------------------------------------------------- # Test the source 1 bypass paths by varying how many nops are inserted # between writing the src1 register and reading this register in the # instruction under test. #------------------------------------------------------------------------- # gen_rr_src0_dep_test #------------------------------------------------------------------------- # Test the source 0 bypass paths by varying how many nops are inserted # between writing the src0 register and reading this register in the # instruction under test. #------------------------------------------------------------------------- # gen_rr_srcs_dep_test #------------------------------------------------------------------------- # Test both source bypass paths at the same time by varying how many nops # are inserted between writing both src registers and reading both # registers in the instruction under test. #------------------------------------------------------------------------- # gen_rr_src0_eq_dest_test #------------------------------------------------------------------------- # Test situation where the src0 register specifier is the same as the # destination register specifier. #------------------------------------------------------------------------- # gen_rr_src1_eq_dest_test #------------------------------------------------------------------------- # Test situation where the src1 register specifier is the same as the # destination register specifier. #------------------------------------------------------------------------- # gen_rr_src0_eq_src1_test #------------------------------------------------------------------------- # Test situation where the src register specifiers are the same. #------------------------------------------------------------------------- # gen_rr_srcs_eq_dest_test #------------------------------------------------------------------------- # Test situation where all three register specifiers are the same. #------------------------------------------------------------------------- # gen_rr_value_test #------------------------------------------------------------------------- # Test the actual operation of a register-register instruction under # test. We assume that bypassing has already been tested. #------------------------------------------------------------------------- # gen_rimm_template #------------------------------------------------------------------------- # Template for register-immediate instructions. We first write the src # register before executing the instruction under test. We parameterize # the number of nops after writing the src register and the instruction # under test to enable using this template for testing various bypass # paths. We also parameterize the register specifiers to enable using # this template to test situations where the srce registers are equal # and/or equal the destination register. #------------------------------------------------------------------------- # gen_rimm_dest_dep_test #------------------------------------------------------------------------- # Test the destination bypass path by varying how many nops are # inserted between the instruction under test and reading the destination # register with a csrr instruction. #------------------------------------------------------------------------- # gen_rimm_src_dep_test #------------------------------------------------------------------------- # Test the source bypass paths by varying how many nops are inserted # between writing the src register and reading this register in the # instruction under test. #------------------------------------------------------------------------- # gen_rimm_src_eq_dest_test #------------------------------------------------------------------------- # Test situation where the src register specifier is the same as the # destination register specifier. #------------------------------------------------------------------------- # gen_rimm_value_test #------------------------------------------------------------------------- # Test the actual operation of a register-immediate instruction under # test. We assume that bypassing has already been tested. #------------------------------------------------------------------------- # gen_imm_template #------------------------------------------------------------------------- # Template for immediate instructions. We parameterize the number of nops # after the instruction under test to enable using this template for # testing various bypass paths. #------------------------------------------------------------------------- # gen_imm_dest_dep_test #------------------------------------------------------------------------- # Test the destination bypass path by varying how many nops are # inserted between the instruction under test and reading the destination # register with a csrr instruction. #------------------------------------------------------------------------- # gen_imm_value_test #------------------------------------------------------------------------- # Test the actual operation of an immediate instruction under test. We # assume that bypassing has already been tested. #------------------------------------------------------------------------- # gen_br2_template #------------------------------------------------------------------------- # Template for branch instructions with two sources. We test two forward # branches and one backwards branch. The way we actually do the test is # we update a register to reflect the control flow; certain bits in this # register are set at different points in the program. Then we can check # the control flow bits at the end to see if only the bits we expect are # set (i.e., the program only executed those points that we expect). Note # that test also makes sure that the instruction in the branch delay slot # is _not_ executed. # We currently need the id to create labels unique to this test. We might # eventually allow local labels (e.g., 1f, 1b) as in gas. gen_br2_template_id = 0 #------------------------------------------------------------------------- # gen_br2_src1_dep_test #------------------------------------------------------------------------- # Test the source 1 bypass paths by varying how many nops are inserted # between writing the src1 register and reading this register in the # instruction under test. #------------------------------------------------------------------------- # gen_br2_src0_dep_test #------------------------------------------------------------------------- # Test the source 0 bypass paths by varying how many nops are inserted # between writing the src0 register and reading this register in the # instruction under test. #------------------------------------------------------------------------- # gen_br2_srcs_dep_test #------------------------------------------------------------------------- # Test both source bypass paths at the same time by varying how many nops # are inserted between writing both src registers and reading both # registers in the instruction under test. #------------------------------------------------------------------------- # gen_br2_src0_eq_src1_test #------------------------------------------------------------------------- # Test situation where the src register specifiers are the same. #------------------------------------------------------------------------- # gen_br2_value_test #------------------------------------------------------------------------- # Test the correct branch resolution based on various source values. # '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' gen_jal_template_id = 0 #------------------------------------------------------------------------- # gen_ld_template #------------------------------------------------------------------------- # Template for load instructions. We first write the base register before # executing the instruction under test. We parameterize the number of # nops after writing the base register and the instruction under test to # enable using this template for testing various bypass paths. We also # parameterize the register specifiers to enable using this template to # test situations where the base register is equal to the destination # register. #------------------------------------------------------------------------- # gen_ld_dest_dep_test #------------------------------------------------------------------------- # Test the destination bypass path by varying how many nops are # inserted between the instruction under test and reading the destination # register with a csrr instruction. #------------------------------------------------------------------------- # gen_ld_base_dep_test #------------------------------------------------------------------------- # Test the base register bypass paths by varying how many nops are # inserted between writing the base register and reading this register in # the instruction under test. #------------------------------------------------------------------------- # gen_ld_base_eq_dest_test #------------------------------------------------------------------------- # Test situation where the base register specifier is the same as the # destination register specifier. #------------------------------------------------------------------------- # gen_ld_value_test #------------------------------------------------------------------------- # Test the actual operation of a register-register instruction under # test. We assume that bypassing has already been tested. # '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' #------------------------------------------------------------------------- # gen_st_template #------------------------------------------------------------------------- # Template for load instructions. We first write the base register before # executing the instruction under test. We parameterize the number of # nops after writing the base register and the instruction under test to # enable using this template for testing various bypass paths. We also # parameterize the register specifiers to enable using this template to # test situations where the base register is equal to the destination # register. # test dependency in load of same address as store
33.207133
80
0.527801
26ab7ad4f1a2c561a6b79626d7ff5c41db51de8f
365
py
Python
python/concepts/compile-runtime.py
shanavas786/coding-fu
2f29781ab793b5932a951259afffbbe0078e8a8a
[ "CC0-1.0" ]
1
2021-02-12T03:49:13.000Z
2021-02-12T03:49:13.000Z
python/concepts/compile-runtime.py
shanavas786/coding-fu
2f29781ab793b5932a951259afffbbe0078e8a8a
[ "CC0-1.0" ]
null
null
null
python/concepts/compile-runtime.py
shanavas786/coding-fu
2f29781ab793b5932a951259afffbbe0078e8a8a
[ "CC0-1.0" ]
null
null
null
inn = func(1) inn()
20.277778
47
0.635616
26ac0f2a19c349ef5a8b08d5da941091d8465553
487
py
Python
alpinelib/aws/aws_lambda.py
nbcnews/alpinelib
8e0d065611b69fdc431ca30ca1a257516670bcf9
[ "MIT" ]
null
null
null
alpinelib/aws/aws_lambda.py
nbcnews/alpinelib
8e0d065611b69fdc431ca30ca1a257516670bcf9
[ "MIT" ]
null
null
null
alpinelib/aws/aws_lambda.py
nbcnews/alpinelib
8e0d065611b69fdc431ca30ca1a257516670bcf9
[ "MIT" ]
null
null
null
import boto3 from .. import logging logger = logging.getFormattedLogger() lambda_client = boto3.client('lambda', region_name='us-west-2')
24.35
77
0.648871
26ad9a93696193c618815ae5d8967a74a464da8c
766
py
Python
test/test_lazy.py
sixty-north/python-transducers
575357e3a17ff3b4c757967afd396bf0ea042c08
[ "MIT" ]
54
2015-10-02T02:45:36.000Z
2021-06-22T04:40:33.000Z
test/test_lazy.py
sixty-north/python-transducers
575357e3a17ff3b4c757967afd396bf0ea042c08
[ "MIT" ]
3
2017-06-11T13:39:18.000Z
2017-06-12T06:07:24.000Z
test/test_lazy.py
sixty-north/python-transducers
575357e3a17ff3b4c757967afd396bf0ea042c08
[ "MIT" ]
9
2015-10-28T23:36:50.000Z
2019-01-11T13:47:05.000Z
import unittest from transducer.functional import compose from transducer.lazy import transduce from transducer.transducers import (mapping, filtering, taking, dropping_while, distinct) if __name__ == '__main__': unittest.main()
31.916667
89
0.574413
26adf150baad599be77596f054bbe7e015db202c
2,246
py
Python
cmake_pc_hooks/cppcheck.py
Takishima/cmake-pre-commit-hooks
a6d96865602f68f413f7f368aa1dbbb8bf495109
[ "Apache-2.0" ]
2
2021-08-10T21:48:05.000Z
2022-02-28T11:46:51.000Z
cmake_pc_hooks/cppcheck.py
Takishima/cmake-pre-commit-hooks
a6d96865602f68f413f7f368aa1dbbb8bf495109
[ "Apache-2.0" ]
null
null
null
cmake_pc_hooks/cppcheck.py
Takishima/cmake-pre-commit-hooks
a6d96865602f68f413f7f368aa1dbbb8bf495109
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2021 Damien Nguyen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wrapper script for cppcheck.""" import sys from pathlib import Path from ._utils import Command def main(argv=None): """ Run command. Args: argv (:obj:`list` of :obj:`str`): list of arguments """ if argv is None: argv = sys.argv cmd = CppcheckCmd(argv) cmd.run() if __name__ == "__main__": main()
29.946667
116
0.662066
26aec7ce88a039f97165462d31ff1b237e27b183
4,468
py
Python
django-todo-list/tasks/models.py
HimadriP/Orderly
e9464756c585e385f23436fc79acd2064b443432
[ "MIT" ]
1
2019-07-29T19:52:29.000Z
2019-07-29T19:52:29.000Z
django-todo-list/tasks/models.py
HimadriP/Orderly
e9464756c585e385f23436fc79acd2064b443432
[ "MIT" ]
null
null
null
django-todo-list/tasks/models.py
HimadriP/Orderly
e9464756c585e385f23436fc79acd2064b443432
[ "MIT" ]
null
null
null
from django.db import models from django.conf import settings from django.core.urlresolvers import reverse_lazy from django.utils import timezone from django.forms.util import to_current_timezone from model_utils import Choices
36.622951
78
0.530215
26af8dafdbc00b0bb2091823b9a4a72611dc7cfc
521
py
Python
apps/boards/apps.py
julianwachholz/thefarland
c7259311fafb60beba167422eefd0d0c5d362514
[ "WTFPL" ]
null
null
null
apps/boards/apps.py
julianwachholz/thefarland
c7259311fafb60beba167422eefd0d0c5d362514
[ "WTFPL" ]
null
null
null
apps/boards/apps.py
julianwachholz/thefarland
c7259311fafb60beba167422eefd0d0c5d362514
[ "WTFPL" ]
null
null
null
from django.apps import AppConfig from django.db.models.signals import post_save, post_delete from . import signals
30.647059
70
0.71785
26af8f12a06f8edb90f5fc54b553edce179f388f
2,445
py
Python
danmu.py
wjhtime/douyu_danmu_python
432198f86bc9f6facd7ef531f301e8c7c8a9285f
[ "MIT" ]
4
2018-12-15T10:35:20.000Z
2019-06-04T20:20:32.000Z
danmu.py
wjhtime/douyu_danmu_python
432198f86bc9f6facd7ef531f301e8c7c8a9285f
[ "MIT" ]
null
null
null
danmu.py
wjhtime/douyu_danmu_python
432198f86bc9f6facd7ef531f301e8c7c8a9285f
[ "MIT" ]
2
2019-04-29T08:20:08.000Z
2020-05-19T09:51:19.000Z
''' api tv ''' import multiprocessing import socket import time import re import signal # socketapi client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = socket.gethostbyname("openbarrage.douyutv.com") port = 8601 client.connect((host, port)) # danmu_re = re.compile(b'txt@=(.+?)/cid@') username_re = re.compile(b'nn@=(.+?)/txt@') def send_req_msg(msgstr): '''api''' msg = msgstr.encode('utf-8') data_length = len(msg) + 8 code = 689 # msgHead = int.to_bytes(data_length, 4, 'little') \ + int.to_bytes(data_length, 4, 'little') + \ int.to_bytes(code, 4, 'little') client.send(msgHead) sent = 0 while sent < len(msg): tn = client.send(msg[sent:]) sent = sent + tn def keeplive(): ''' 15 ''' while True: msg = 'type@=keeplive/tick@=' + str(int(time.time())) + '/\0' send_req_msg(msg) print('') time.sleep(15) def logout(): ''' ''' msg = 'type@=logout/' send_req_msg(msg) print('') def signal_handler(signal, frame): ''' ctrl+c signal.SIGINT hander ''' p1.terminate() p2.terminate() logout() print('Bye') if __name__ == '__main__': #room_id = input('ID ') # lpl room_id = 288016 # signal signal.signal(signal.SIGINT, signal_handler) # p1 = multiprocessing.Process(target=DM_start, args=(room_id,)) p2 = multiprocessing.Process(target=keeplive) p1.start() p2.start()
21.447368
75
0.578323
26afa6ab00539bb702ecf9ce1071e801dd9694da
3,828
py
Python
03_spider_douyin/spider_douyin.py
theThreeKingdom/python-exercises
fc08a7bbb9d6b53d5761b9e1017f293bff4e26db
[ "Apache-2.0" ]
null
null
null
03_spider_douyin/spider_douyin.py
theThreeKingdom/python-exercises
fc08a7bbb9d6b53d5761b9e1017f293bff4e26db
[ "Apache-2.0" ]
null
null
null
03_spider_douyin/spider_douyin.py
theThreeKingdom/python-exercises
fc08a7bbb9d6b53d5761b9e1017f293bff4e26db
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # @Time : 2020/4/1 0:48 # @Author : Nixin # @Email : [email protected] # @File : spider_douyin.py # @Software: PyCharm import requests, re, sys, os, time, random, socket import http.client from bs4 import BeautifulSoup if __name__ == '__main__': # download_douyin(56, "https://v.douyin.com/3wV6PQ") batch_download_douyin(80, "E:/nixin/douyin/video/20200419/1.txt") pass
33.876106
163
0.562696
26b0dec6991fd98013cf8dc45f05ed08b2f7cb49
479
py
Python
hooks/relations/juju-info/requires.py
xenefix/RoutingPolicyRule
fb0c815c3fc049f63491a3ca56416c8ba0fe692c
[ "Apache-2.0" ]
null
null
null
hooks/relations/juju-info/requires.py
xenefix/RoutingPolicyRule
fb0c815c3fc049f63491a3ca56416c8ba0fe692c
[ "Apache-2.0" ]
null
null
null
hooks/relations/juju-info/requires.py
xenefix/RoutingPolicyRule
fb0c815c3fc049f63491a3ca56416c8ba0fe692c
[ "Apache-2.0" ]
null
null
null
from charms.reactive import RelationBase from charms.reactive import scopes from charms.reactive import hook from charms.reactive import when
26.611111
73
0.707724
26b107fd23e87c597f676bc069f5e3b5b448d1e3
593
py
Python
suricata-4.1.4/python/suricata/ctl/test_filestore.py
runtest007/dpdk_surcata_4.1.1
5abf91f483b418b5d9c2dd410b5c850d6ed95c5f
[ "MIT" ]
77
2019-06-17T07:05:07.000Z
2022-03-07T03:26:27.000Z
suricata-4.1.4/python/suricata/ctl/test_filestore.py
clockdad/DPDK_SURICATA-4_1_1
974cc9eb54b0b1ab90eff12a95617e3e293b77d3
[ "MIT" ]
22
2019-07-18T02:32:10.000Z
2022-03-24T03:39:11.000Z
suricata-4.1.4/python/suricata/ctl/test_filestore.py
clockdad/DPDK_SURICATA-4_1_1
974cc9eb54b0b1ab90eff12a95617e3e293b77d3
[ "MIT" ]
49
2019-06-18T03:31:56.000Z
2022-03-13T05:23:10.000Z
from __future__ import print_function import unittest import filestore
31.210526
71
0.706577
26b279e2de1f9a9dd6eeef9e0736a69c9c2cc2f9
1,027
py
Python
chmap/examples/development/example109_store-creds.py
predsci/CHD
35f29d1b62861f4ffed57b38d18689b282664bcf
[ "Apache-2.0" ]
3
2021-06-29T00:23:47.000Z
2021-09-17T18:29:05.000Z
chmap/examples/development/example109_store-creds.py
predsci/CHD
35f29d1b62861f4ffed57b38d18689b282664bcf
[ "Apache-2.0" ]
null
null
null
chmap/examples/development/example109_store-creds.py
predsci/CHD
35f29d1b62861f4ffed57b38d18689b282664bcf
[ "Apache-2.0" ]
1
2021-12-08T06:26:18.000Z
2021-12-08T06:26:18.000Z
# This is a little bit clunky, but is a better solution than writing passwords into import os from cryptography.fernet import Fernet # cred_dir = os.path.join(os.path.dirname(os.getcwd()), "settings") cred_dir = '/Users/cdowns/work/imac_local/CoronalHoles/mysql_credentials' key_file = os.path.join(cred_dir, "e_key.bin") # Generate a new local encryption key if needed if not os.path.exists(key_file): key = Fernet.generate_key() # print(key) with open(key_file, 'wb') as file_object: file_object.write(key) else: with open(key_file, 'rb') as file_object: for line in file_object: key = line # User inputs password interactively so it is never saved passw = input("Enter a password to encrypt and save: ") cipher_suite = Fernet(key) ciphered_text = cipher_suite.encrypt(passw.encode()) # required to be bytes creds_file = os.path.join(cred_dir, "e_cred.bin") print("Writing credential file") with open(creds_file, 'wb') as file_object: file_object.write(ciphered_text)
31.121212
83
0.728335
26b4086be5d9ece68d83031748858af8aef6a984
663
py
Python
indjections/packages/django-allauth.py
pandichef/indjections
e8f0e62cf648607ed50330ac69dd12e9fc0cf710
[ "BSD-2-Clause" ]
7
2020-07-20T00:33:15.000Z
2020-07-24T04:14:53.000Z
indjections/packages/django-allauth.py
pandichef/indjections
e8f0e62cf648607ed50330ac69dd12e9fc0cf710
[ "BSD-2-Clause" ]
17
2020-07-20T03:17:06.000Z
2020-07-27T07:36:25.000Z
indjections/packages/django-allauth.py
pandichef/indjections
e8f0e62cf648607ed50330ac69dd12e9fc0cf710
[ "BSD-2-Clause" ]
null
null
null
settings = """ try: AUTHENTICATION_BACKENDS += [ "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ] except NameError: AUTHENTICATION_BACKENDS = [ "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ] INSTALLED_APPS += [ "django.contrib.sites", # not installed by default "allauth", "allauth.account", "allauth.socialaccount", "allauth.socialaccount.providers.google", ] SITE_ID = 1 """ urls = """ from django.urls import include urlpatterns += [path("accounts/", include("allauth.urls"))] """
24.555556
62
0.680241
26b4665a5f013ded26bc910df476a322704eda91
475
py
Python
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py
zhangyin2088/Teamcat
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
[ "Apache-2.0" ]
6
2018-11-26T08:42:52.000Z
2020-06-01T08:33:48.000Z
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py
zhangyin2088/Teamcat
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
[ "Apache-2.0" ]
null
null
null
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py
zhangyin2088/Teamcat
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
[ "Apache-2.0" ]
1
2019-01-22T06:45:36.000Z
2019-01-22T06:45:36.000Z
#coding=utf-8 ''' Created on 2015-10-10 @author: Devuser '''
19
65
0.751579
26b56046672f411c1c88bcbb0a2ebddb8ba65691
176
py
Python
competitive-programming/kattis/heimavinna.py
sanchopanca/coding-for-pleasure
fed1910e8a5a4241bd55aed333afd79b4405a71d
[ "MIT" ]
null
null
null
competitive-programming/kattis/heimavinna.py
sanchopanca/coding-for-pleasure
fed1910e8a5a4241bd55aed333afd79b4405a71d
[ "MIT" ]
null
null
null
competitive-programming/kattis/heimavinna.py
sanchopanca/coding-for-pleasure
fed1910e8a5a4241bd55aed333afd79b4405a71d
[ "MIT" ]
null
null
null
s = 0 problems = input().strip().split(';') for p in problems: if '-' in p: a, b = map(int, p.split('-')) s += b - a + 1 else: s += 1 print(s)
16
37
0.420455
26b76d047c1414efdb3d56d1cf6e2c55efd68449
745
py
Python
icepll.py
carlosedp/fusesoc-generators
4ee343ce0013952bd89d6986bfb5ed861b2cf6b2
[ "MIT" ]
null
null
null
icepll.py
carlosedp/fusesoc-generators
4ee343ce0013952bd89d6986bfb5ed861b2cf6b2
[ "MIT" ]
null
null
null
icepll.py
carlosedp/fusesoc-generators
4ee343ce0013952bd89d6986bfb5ed861b2cf6b2
[ "MIT" ]
null
null
null
#!/usr/bin/python from fusesoc.capi2.generator import Generator import subprocess g = IcepllGenerator() g.run() g.write()
31.041667
79
0.555705
26b7cad7376280941d68826426ca4c361383c494
351
py
Python
templates/checkcel/BrasExplor_Pictures_template.py
mboudet/braskoload
8f39f2d68638a683e0d8e5065b33a218afe370cb
[ "MIT" ]
null
null
null
templates/checkcel/BrasExplor_Pictures_template.py
mboudet/braskoload
8f39f2d68638a683e0d8e5065b33a218afe370cb
[ "MIT" ]
null
null
null
templates/checkcel/BrasExplor_Pictures_template.py
mboudet/braskoload
8f39f2d68638a683e0d8e5065b33a218afe370cb
[ "MIT" ]
null
null
null
from checkcel import Checkplate from checkcel.validators import SetValidator, NoValidator from collections import OrderedDict
29.25
74
0.706553
26bc7d495f1e995a46390a4be5f8a0a4a460b0ab
2,823
py
Python
src/shark/poker/player.py
twoodruff01/shark
6c183de1993492b614eff332548f5c0f57facda2
[ "Apache-2.0" ]
null
null
null
src/shark/poker/player.py
twoodruff01/shark
6c183de1993492b614eff332548f5c0f57facda2
[ "Apache-2.0" ]
null
null
null
src/shark/poker/player.py
twoodruff01/shark
6c183de1993492b614eff332548f5c0f57facda2
[ "Apache-2.0" ]
null
null
null
# Copyright 2022 Thomas Woodruff # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .. import cli
37.144737
106
0.616366
26bd34791b254cf4bcb5957b49692dda6546cfa1
1,059
py
Python
BUNKURO/BUNKURO.py
kantoku-code/Fusion360_BUNKURO
0c83f2ab57f03c83fcad98b85b59792360f7a804
[ "MIT" ]
1
2022-03-18T13:06:57.000Z
2022-03-18T13:06:57.000Z
BUNKURO/BUNKURO.py
kantoku-code/Fusion360_BUNKURO
0c83f2ab57f03c83fcad98b85b59792360f7a804
[ "MIT" ]
null
null
null
BUNKURO/BUNKURO.py
kantoku-code/Fusion360_BUNKURO
0c83f2ab57f03c83fcad98b85b59792360f7a804
[ "MIT" ]
null
null
null
# Author-kantoku # Description-! # Fusion360API Python import adsk.core import traceback try: from . import config from .apper import apper from .commands.BUNKUROCore import BUNKUROCore # Create our addin definition object my_addin = apper.FusionApp(config.app_name, config.company_name, False) my_addin.root_path = config.app_path my_addin.add_command( '', BUNKUROCore, { 'cmd_description': '!', 'cmd_id': 'bunkuro', 'workspace': 'FusionSolidEnvironment', 'toolbar_panel_id': 'UtilityPanel', 'cmd_resources': 'BUNKURO', 'command_visible': True, 'command_promoted': False, 'create_feature': False, } ) except: app = adsk.core.Application.get() ui = app.userInterface if ui: ui.messageBox('Initialization: {}'.format(traceback.format_exc()))
23.021739
75
0.634561
26bd97f61aa8677a52c1050fed16514074e239fd
2,004
py
Python
hp_steam_data/src/main.py
wangzhefeng/data-analysis
e502ac49ae1bc8287243e1faf51c467cc4d4187c
[ "MIT" ]
null
null
null
hp_steam_data/src/main.py
wangzhefeng/data-analysis
e502ac49ae1bc8287243e1faf51c467cc4d4187c
[ "MIT" ]
null
null
null
hp_steam_data/src/main.py
wangzhefeng/data-analysis
e502ac49ae1bc8287243e1faf51c467cc4d4187c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import pandas as pd import os PROJECT_PATH = "/mnt/e/dev/test/hp_steam_data/" DATA_PATH = os.path.join(PROJECT_PATH, "data") RESULT_PATH = os.path.join(PROJECT_PATH, "result") def get_origin_data(): """ origin data """ # raw data eturb_m1_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/eturb_m1_1min_metrics-0817.csv", header = 0, index_col = None) eturb_m2_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/eturb_m2_1min_metrics-0817.csv", header = 0, index_col = None) boiler_m1_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/boiler_m1_1min_outlet_steam_flow.csv", header = 0, index_col = None) boiler_m3_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/boiler_m3_1min_outlet_steam_flow.csv", header = 0, index_col = None) steampipeline_p1_data = pd.read_csv("/mnt/e/dev/test/hp_steam_data/data/steampipeline_p1_1min_hp_steam_pressure.csv", header = 0, index_col = None) # data aggregate df = pd.DataFrame() # eturb_m1 df["eturb_m1_steam_flow_in"] = eturb_m1_data["ExtCondensTurbineOP.steam_flow_in"] df["eturb_m2_steam_flow_in"] = eturb_m2_data["ExtCondensTurbineOP.steam_flow_in"] df["boiler_m1_outlet_steam_flow"] = boiler_m1_data["CFBoilerOP.outlet_steam_flow"] df["boiler_m3_outlet_steam_flow"] = boiler_m3_data["CFBoilerOP.outlet_steam_flow"] df["steampipeline_p1_hp_steam_pressure"] = steampipeline_p1_data["SteamPipelineOP.hp_steam_pressure"] df["boiler_steam_flow"] = df["boiler_m1_outlet_steam_flow"] + df["boiler_m3_outlet_steam_flow"] df["turbine_steam_flow"] = df["eturb_m1_steam_flow_in"] + df["eturb_m2_steam_flow_in"] df = df.reset_index(drop = True) return df if __name__ == "__main__": main()
36.436364
151
0.733034
26be0d11d5b6b76d30765326da6c34a562d5f111
621
py
Python
stock_experiment_sbb_with_vectors_of_ones.py
dvirg/auctions
da706f3d11b9582c7f811de9f50b96b43ac8cbd0
[ "MIT" ]
1
2021-11-20T19:27:45.000Z
2021-11-20T19:27:45.000Z
stock_experiment_sbb_with_vectors_of_ones.py
dvirg/auctions
da706f3d11b9582c7f811de9f50b96b43ac8cbd0
[ "MIT" ]
null
null
null
stock_experiment_sbb_with_vectors_of_ones.py
dvirg/auctions
da706f3d11b9582c7f811de9f50b96b43ac8cbd0
[ "MIT" ]
null
null
null
#!python3 """ Simulation experiment for our AAAI 2020 paper, with recipes that are vectors of ones. Comparing McAfee's double auction to our SBB auctions. Author: Dvir Gilor Since: 2020-08 """ from experiment_stock import experiment from mcafee_protocol import mcafee_trade_reduction from trade_reduction_protocol import budget_balanced_trade_reduction from ascending_auction_protocol import budget_balanced_ascending_auction import sys results_file = "stock/results/experiment_sbb_with_vectors_of_ones_stock.csv" experiment(results_file,budget_balanced_ascending_auction, "SBB Ascending Prices", recipe=4*(1,))
27
97
0.84219
26be6c749fedecdd29350e7dc06917fe50136ca1
556
py
Python
Modulo-02/ex058/ex058.py
Matheus-Henrique-Burey/Curso-de-Python
448aebaab96527affa1e45897a662bb0407c11c6
[ "MIT" ]
null
null
null
Modulo-02/ex058/ex058.py
Matheus-Henrique-Burey/Curso-de-Python
448aebaab96527affa1e45897a662bb0407c11c6
[ "MIT" ]
null
null
null
Modulo-02/ex058/ex058.py
Matheus-Henrique-Burey/Curso-de-Python
448aebaab96527affa1e45897a662bb0407c11c6
[ "MIT" ]
null
null
null
from random import randint print('=-' * 15) print('ADIVINHE EM QUE NUMERO ESTOU PENANDO') print('=-' * 15) pc = randint(0, 10) num = 11 cont = 0 while pc != num: num = int(input('Sera que voce consegue acertar o numero que pensei, entre 0, 10: ')) if num == pc: print('PARABES!!! VOCE ACERTOU') else: if num < pc: print('Mais...', end=' ') else: print('Menos...', end=' ') print('Tente novamente') print('-' * 20) cont += 1 print(f'Voce tentou {cont} vezes para acertar')
23.166667
89
0.546763
26c07cd4c709d13692e520d5fa627ce985733c5a
3,172
py
Python
sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
21
2016-11-03T12:30:50.000Z
2022-03-24T06:54:14.000Z
sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
1
2019-04-02T02:01:27.000Z
2019-04-07T21:07:10.000Z
sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
12
2016-11-03T12:30:57.000Z
2021-09-14T23:08:23.000Z
""" ex20170108_model_PC.py Create Model PC (Godley & Lavoie Chapter 4). Copyright 2017 Brian Romanchuk Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from sfc_models.examples.Quick2DPlot import Quick2DPlot from sfc_models.models import * from sfc_models.sector import Market from sfc_models.sector_definitions import Household, Treasury, CentralBank, TaxFlow, FixedMarginBusiness, DepositMarket, \ MoneyMarket if __name__ == '__main__': main()
39.65
122
0.669294
26c4a3b8183960d6aad7bfb1532a7a0f533eda4e
4,087
py
Python
flask_simpleview/__init__.py
jackwardell/Flask-SimpleView
813d5dbe6353f77016c893caff526abae8487492
[ "Apache-2.0" ]
null
null
null
flask_simpleview/__init__.py
jackwardell/Flask-SimpleView
813d5dbe6353f77016c893caff526abae8487492
[ "Apache-2.0" ]
null
null
null
flask_simpleview/__init__.py
jackwardell/Flask-SimpleView
813d5dbe6353f77016c893caff526abae8487492
[ "Apache-2.0" ]
null
null
null
import re import flask import flask.views from functools import wraps def camel_case_to_snake_case(word): """very simple mechanism for turning CamelCase words into snake_case""" return re.sub(r"(?<!^)(?=[A-Z])", "_", word).lower() def camel_case_to_slug_case(word): """very simple mechanism for turning CamelCase words into slug-case""" return re.sub(r"(?<!^)(?=[A-Z])", "-", word).lower() def extends_rule(rule): return extend_rule API = View = SimpleView
28.381944
84
0.62711
26c5a0a8bb014c980c7a75f56eb95838d11757a4
2,287
py
Python
qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py
knktc/qingcloud-cli
2be8bba43e08bd7a76e1326ece871386cc9b5b55
[ "Apache-2.0" ]
11
2015-05-27T19:52:36.000Z
2021-04-15T09:07:39.000Z
qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py
knktc/qingcloud-cli
2be8bba43e08bd7a76e1326ece871386cc9b5b55
[ "Apache-2.0" ]
7
2017-07-19T05:05:03.000Z
2019-04-25T07:18:04.000Z
qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py
knktc/qingcloud-cli
2be8bba43e08bd7a76e1326ece871386cc9b5b55
[ "Apache-2.0" ]
19
2016-03-15T07:31:47.000Z
2021-07-26T09:31:33.000Z
# ========================================================================= # Copyright 2012-present Yunify, Inc. # ------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================= from qingcloud.iaas import constants as const from qingcloud.cli.iaas_client.actions.base import BaseAction
40.122807
91
0.567118
26c5a8a0093950f0c391d5d30211cf53ae6f042f
5,022
py
Python
padevents/events.py
muffin-rice/pad-cogs
820ecf08f9569a3d7cf3264d0eb9567264b42edf
[ "MIT" ]
2
2020-09-25T01:57:21.000Z
2020-10-02T13:46:48.000Z
padevents/events.py
muffin-rice/pad-cogs
820ecf08f9569a3d7cf3264d0eb9567264b42edf
[ "MIT" ]
43
2020-08-29T06:16:39.000Z
2020-10-29T12:00:15.000Z
padevents/events.py
muffin-rice/pad-cogs
820ecf08f9569a3d7cf3264d0eb9567264b42edf
[ "MIT" ]
6
2020-08-31T04:37:55.000Z
2020-10-19T05:09:17.000Z
import datetime from datetime import timedelta from typing import Callable, Collection, TYPE_CHECKING import pytz from tsutils.formatting import normalize_server_name from tsutils.time import JP_TIMEZONE, KR_TIMEZONE, NA_TIMEZONE from padevents.enums import DungeonType, EventLength if TYPE_CHECKING: from dbcog.models.scheduled_event_model import ScheduledEventModel SUPPORTED_SERVERS = ["JP", "NA", "KR"] SERVER_TIMEZONES = { "JP": JP_TIMEZONE, "NA": NA_TIMEZONE, "KR": KR_TIMEZONE, }
36.656934
107
0.666866
26c6baf54f78e9c92b1e52fb48aafcc91b720d02
1,409
py
Python
server/getSert.py
sviridovt/WIE
9af6d3dff7e774f5e332e6c77eadde815d4c375d
[ "MIT" ]
1
2021-09-03T11:36:02.000Z
2021-09-03T11:36:02.000Z
server/getSert.py
sviridovt/WIE
9af6d3dff7e774f5e332e6c77eadde815d4c375d
[ "MIT" ]
null
null
null
server/getSert.py
sviridovt/WIE
9af6d3dff7e774f5e332e6c77eadde815d4c375d
[ "MIT" ]
1
2021-09-03T11:36:04.000Z
2021-09-03T11:36:04.000Z
# allows to import RSA lib from different dir import sys # inserts path to access RSA encryption lib # sys.path.insert(0, '../RSAEncryption') import socket import json from libs.communication import sendEncrypted, recvEncrypted, sendData, readData from libs.RSAKeys import readPrivateKey from libs.EncryptedSocket import EncryptedSocket from libs.settings import * HOST = '127.0.0.1' PORT = 4444 printDebug = True SSID = "SecureCanes" # sending data
21.029851
79
0.675656
26c71f804645b9d738d4394f797c6533de859d14
7,085
py
Python
code/billiard_game_multi_ball.py
ifsheldon/billiard_game
1ce13d39158734efd76e617bba2bb319d5498c3f
[ "BSD-2-Clause" ]
null
null
null
code/billiard_game_multi_ball.py
ifsheldon/billiard_game
1ce13d39158734efd76e617bba2bb319d5498c3f
[ "BSD-2-Clause" ]
null
null
null
code/billiard_game_multi_ball.py
ifsheldon/billiard_game
1ce13d39158734efd76e617bba2bb319d5498c3f
[ "BSD-2-Clause" ]
null
null
null
import taichi as ti import numpy as np from functools import partial from itertools import combinations from billiard_game_dual_ball import normalize_vector, two_ball_collides, calc_next_pos_and_velocity, \ calc_after_collision_velocity, rectify_positions_in_collision, rectify_positions_and_velocities # Constants WHITE = 0xFFFFFF RED = 0xFF0000 GREEN = 0x00FF00 BLUE = 0x0000FF # wc for world space x[0.0, ratio], y[0.0, 1.0] # sc for screen space [0.0, 1.0]^2 # Constant parameters RESOLUTION = (1230, 750) RATIO = RESOLUTION[0] / RESOLUTION[1] # x/y FPS = 60 CUE_BALL_IDX = 0 STICK_LENGTH_SC = 0.1 DRAG_COEFFICIENT = 0.03 G = 9.8 CUE_BALL_MAX_SPEED_WC = 1.0 BALL_PIXEL_RADIUS = 10 HOLE_PIXEL_RADIUS = 15 num_balls = 1 # Derived parameters ball_radius_wc = BALL_PIXEL_RADIUS / RESOLUTION[1] hole_radius_wc = HOLE_PIXEL_RADIUS / RESOLUTION[1] x_begin_wc = 0.0 x_end_wc = RATIO y_begin_wc = 0.0 y_end_wc = 1.0 if __name__ == "__main__": ti.init(ti.cpu) print("Press A to kick the cue ball") wc_to_sc_multiplier = np.array([1 / RATIO, 1]) # transform to [0,1]^ screen space sc_to_wc_multiplier = np.array([RATIO, 1]) virtual_bound_x = np.array([ball_radius_wc, x_end_wc - ball_radius_wc]) virtual_bound_y = np.array([ball_radius_wc, y_end_wc - ball_radius_wc]) dx_wc = x_end_wc / 2. dy_wc = y_end_wc / 2. hole_pos_x = np.arange(3) * dx_wc hole_pos_y = np.arange(3) * dy_wc hole_pos_x, hole_pos_y = np.meshgrid(hole_pos_x, hole_pos_y) hole_center_positions_wc = np.stack([hole_pos_x, hole_pos_y], axis=-1).reshape(-1, 2) # (3, 3, 2) -> (9, 2) hole_center_positions_wc = np.delete(hole_center_positions_wc, 4, axis=0) hole_center_positions_sc = hole_center_positions_wc * wc_to_sc_multiplier.reshape(1, 2) ball_velocities_wc = np.zeros((num_balls, 2)) ball_visible = np.ones(num_balls, dtype=bool) span_wc = np.array([virtual_bound_x[1] - virtual_bound_x[0], virtual_bound_y[1] - virtual_bound_y[0]]) offset_wc = np.array([virtual_bound_x[0], virtual_bound_y[0]]) ball_pos_wc = place_balls_wc(span_wc, offset_wc) gui = ti.GUI("billiard_game_multi_ball", RESOLUTION) gui.fps_limit = FPS delta_t = 1.0 / FPS boundary_begin_wc = np.array([ [x_begin_wc, y_begin_wc], [x_begin_wc, y_begin_wc], [x_end_wc, y_end_wc], [x_end_wc, y_end_wc] ]) boundary_end_wc = np.array([ [x_end_wc, y_begin_wc], [x_begin_wc, y_end_wc], [x_end_wc, y_begin_wc], [x_begin_wc, y_end_wc] ]) # a convenient partial function of rectify_positions_and_velocities rectify_pv = partial(rectify_positions_and_velocities, virtual_bound_x[0], virtual_bound_x[1], virtual_bound_y[0], virtual_bound_y[1]) ball_pairs = list(combinations(range(num_balls), 2)) ball_color_indices = np.ones(num_balls) ball_color_indices[CUE_BALL_IDX] = 0 ball_colors = [WHITE, RED] while gui.running: gui.clear(GREEN) hit_ball = gui.get_event(ti.GUI.PRESS) and gui.is_pressed("a") cue_ball_pos_sc = ball_pos_wc[CUE_BALL_IDX] * wc_to_sc_multiplier # the current setting is only when all balls are stationary, the mouse is available if np.allclose((ball_velocities_wc ** 2).sum(-1), 0., rtol=0.001, atol=0.001) and ball_visible[CUE_BALL_IDX]: rod_dir_sc, length = normalize_vector(gui.get_cursor_pos() - cue_ball_pos_sc) rod_line = rod_dir_sc * min(STICK_LENGTH_SC, length) gui.line(cue_ball_pos_sc, cue_ball_pos_sc + rod_line, radius=2) if hit_ball: ball_velocities_wc[CUE_BALL_IDX] = (rod_dir_sc * sc_to_wc_multiplier) \ * CUE_BALL_MAX_SPEED_WC * (min(STICK_LENGTH_SC, length) / STICK_LENGTH_SC) # modify the speed with a multiplier dependent on the distance between mouse and the cue ball # for i in range(num_balls): # for each ball, if score() returns True, set this ball invisible # # Not care now # if score(hole_center_positions_wc, ball_pos_wc[i]): # ball_visible[i] = False # ball_velocities_wc[i] = 0. # No need to care about this in verilog gui.lines(begin=boundary_begin_wc, end=boundary_end_wc, radius=2) gui.circles(ball_pos_wc[ball_visible] * wc_to_sc_multiplier.reshape(1, 2), radius=BALL_PIXEL_RADIUS, palette=ball_colors, palette_indices=ball_color_indices[ball_visible]) gui.circles(hole_center_positions_sc, radius=HOLE_PIXEL_RADIUS, color=0) gui.show() for i in range(num_balls): # unroll this loop for the two ball case if not ball_visible[i]: continue next_pos_wc, next_velocity_wc = calc_next_pos_and_velocity(ball_pos_wc[i], ball_velocities_wc[i], delta_t, DRAG_COEFFICIENT, G) next_pos_wc, next_velocity_wc = rectify_pv(next_pos_wc, next_velocity_wc) ball_pos_wc[i] = next_pos_wc ball_velocities_wc[i] = next_velocity_wc for ball_i, ball_j in ball_pairs: # only one iteration for the two ball case, since we have only one pair if not ball_visible[ball_i] or not ball_visible[ball_j]: continue ball_i_pos_wc = ball_pos_wc[ball_i] ball_j_pos_wc = ball_pos_wc[ball_j] if two_ball_collides(ball_i_pos_wc, ball_j_pos_wc, ball_radius_wc): ball_i_pos_wc, ball_j_pos_wc = rectify_positions_in_collision(ball_i_pos_wc, ball_j_pos_wc, ball_radius_wc) ball_i_v_wc = ball_velocities_wc[ball_i] ball_j_v_wc = ball_velocities_wc[ball_j] ball_i_v_wc, ball_j_v_wc = calc_after_collision_velocity(ball_i_pos_wc, ball_j_pos_wc, ball_i_v_wc, ball_j_v_wc) ball_velocities_wc[ball_i] = ball_i_v_wc ball_velocities_wc[ball_j] = ball_j_v_wc
44.006211
203
0.651941
26c8199913901f96201fe9b8091ee36c1351a53e
347
py
Python
examples/prompt.py
nelice/bullet
aafec4d0ca8f628d2be9b0667c50477929c2cca7
[ "MIT" ]
1
2021-03-22T07:55:30.000Z
2021-03-22T07:55:30.000Z
examples/prompt.py
nelice/bullet
aafec4d0ca8f628d2be9b0667c50477929c2cca7
[ "MIT" ]
null
null
null
examples/prompt.py
nelice/bullet
aafec4d0ca8f628d2be9b0667c50477929c2cca7
[ "MIT" ]
null
null
null
from bullet import Bullet, Prompt, Check, Input, YesNo from bullet import styles cli = Prompt( [ Bullet("Choose from a list: ", **styles.Example), Check("Choose from a list: ", **styles.Example), Input("Who are you? "), YesNo("Are you a student? ") ], spacing = 2 ) result = cli.launch() print(result)
23.133333
57
0.599424
26c8a00c561378714f8ad7990f244b2a1e695121
671
py
Python
testsuite/tests/T618-047__Ada_2012/run_test.py
AdaCore/style_checker
17108ebfc44375498063ecdad6c6e4430458e60a
[ "CNRI-Python" ]
2
2017-10-22T18:04:26.000Z
2020-03-06T11:07:41.000Z
testsuite/tests/T618-047__Ada_2012/run_test.py
AdaCore/style_checker
17108ebfc44375498063ecdad6c6e4430458e60a
[ "CNRI-Python" ]
null
null
null
testsuite/tests/T618-047__Ada_2012/run_test.py
AdaCore/style_checker
17108ebfc44375498063ecdad6c6e4430458e60a
[ "CNRI-Python" ]
4
2018-05-22T12:08:54.000Z
2020-12-14T15:25:27.000Z
def test_pck_2012_adb(style_checker): """Style check test against pck_2012.adb.""" style_checker.set_year(2006) p = style_checker.run_style_checker('repo_name', 'pck_2012.ads') style_checker.assertEqual(p.status, 0, p.image) style_checker.assertRunOutputEmpty(p) def test_pck_2012_adb_with_alt_config_forcing_gnat2012(style_checker): """Style check test against pck_2012.adb with gnat12 config option.""" style_checker.set_year(2006) p = style_checker.run_style_checker( '--config', 'gnat2012_config.yaml', 'repo_name', 'pck_2012.ads') style_checker.assertEqual(p.status, 0, p.image) style_checker.assertRunOutputEmpty(p)
41.9375
74
0.754098
26cacd8b2394e2ededf66d1f7ced4b0560e95348
594
py
Python
src/volume_0/0011_Drawing_Lots.py
DaikiShimada/aoj-exercise
dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803
[ "MIT" ]
null
null
null
src/volume_0/0011_Drawing_Lots.py
DaikiShimada/aoj-exercise
dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803
[ "MIT" ]
null
null
null
src/volume_0/0011_Drawing_Lots.py
DaikiShimada/aoj-exercise
dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import sys if __name__ == '__main__': main()
21.214286
73
0.503367
26cbd6df4059d6dbdf0c29f052b92ccdc1a7a881
1,832
py
Python
mglg/util/profiler.py
aforren1/mglg
a9b703e109a66377dd404929fc0b13ccc12b5214
[ "MIT" ]
null
null
null
mglg/util/profiler.py
aforren1/mglg
a9b703e109a66377dd404929fc0b13ccc12b5214
[ "MIT" ]
9
2019-08-05T21:11:09.000Z
2021-11-18T18:19:33.000Z
mglg/util/profiler.py
aforren1/mglg
a9b703e109a66377dd404929fc0b13ccc12b5214
[ "MIT" ]
null
null
null
from timeit import default_timer import numpy as np
31.586207
90
0.555131
26ccd5bc1d5e387e612a0f077f3e861929e6b021
2,972
py
Python
toolbox/exp/Experiment.py
LinXueyuanStdio/KGE-toolbox
916842835e61ba99dde1409592977a2ec55f8aae
[ "Apache-2.0" ]
2
2021-10-17T17:50:24.000Z
2021-12-13T05:22:46.000Z
toolbox/exp/Experiment.py
LinXueyuanStdio/KGE-toolbox
916842835e61ba99dde1409592977a2ec55f8aae
[ "Apache-2.0" ]
null
null
null
toolbox/exp/Experiment.py
LinXueyuanStdio/KGE-toolbox
916842835e61ba99dde1409592977a2ec55f8aae
[ "Apache-2.0" ]
null
null
null
import numpy as np from toolbox.exp.OutputSchema import OutputSchema from toolbox.utils.LaTeXSotre import EvaluateLaTeXStoreSchema from toolbox.utils.MetricLogStore import MetricLogStoreSchema from toolbox.utils.ModelParamStore import ModelParamStoreSchema from toolbox.utils.Visualize import VisualizeSchema
44.358209
89
0.685397
26cdda5a2dd54f427c59a8a3d865986d8ec6b5ee
893
py
Python
src/dataloader/tests/runner.py
ODM2/ODM2DataSharingPortal
4ea1d633fe8e1cc39916e83041f2dbc830339e55
[ "BSD-3-Clause" ]
18
2018-11-27T11:57:24.000Z
2022-03-19T16:52:35.000Z
src/dataloader/tests/runner.py
ODM2/ODM2DataSharingPortal
4ea1d633fe8e1cc39916e83041f2dbc830339e55
[ "BSD-3-Clause" ]
362
2018-02-21T16:27:00.000Z
2022-03-31T18:48:48.000Z
src/dataloader/tests/runner.py
ODM2/ODM2DataSharingPortal
4ea1d633fe8e1cc39916e83041f2dbc830339e55
[ "BSD-3-Clause" ]
5
2018-07-04T17:13:09.000Z
2021-12-19T22:51:40.000Z
import json import os from django.core.management import call_command from django.test.runner import DiscoverRunner from django.db import connections from dataloader.tests.data import data_manager
29.766667
98
0.721165
26cf29a0e44e798901be0b42a84cea83caaf14fe
364
py
Python
plugins/rain.py
xditya/PikaBotPlugins
2c5c52716158cd8964220bcc71fa383ccaf1210a
[ "Apache-2.0" ]
2
2021-02-16T05:35:41.000Z
2021-05-25T16:59:47.000Z
plugins/rain.py
xditya/PikaBotPlugins
2c5c52716158cd8964220bcc71fa383ccaf1210a
[ "Apache-2.0" ]
null
null
null
plugins/rain.py
xditya/PikaBotPlugins
2c5c52716158cd8964220bcc71fa383ccaf1210a
[ "Apache-2.0" ]
2
2021-02-07T03:09:40.000Z
2021-05-25T16:59:59.000Z
#Originally created By KingMars Rain Sequence 2 {Updated} from telethon import events import asyncio from collections import deque
22.75
59
0.717033
26cf746287a13ed33dacec35f0898c4fe183c37a
73
py
Python
preprocessing/__init__.py
WiktorSa/Music-Generation-with-LSTM-and-.wav-files
37b713b5e6193788a7710cc0fac4134efb74fa62
[ "MIT" ]
1
2022-03-09T20:13:57.000Z
2022-03-09T20:13:57.000Z
preprocessing/__init__.py
WiktorSa/Music-Generation-with-LSTM-and-.wav-files
37b713b5e6193788a7710cc0fac4134efb74fa62
[ "MIT" ]
1
2021-10-01T16:20:06.000Z
2021-10-01T17:25:30.000Z
preprocessing/__init__.py
WiktorSa/Music-Generation-with-LSTM-and-.wav-files
37b713b5e6193788a7710cc0fac4134efb74fa62
[ "MIT" ]
null
null
null
from preprocessing.generate_and_save_data import generate_and_save_data
36.5
72
0.917808
26cfb507f5245413925f5d6ffbbfcea4aa484298
6,126
py
Python
plot.py
lizzieayton/PrimordialOozebot
1e330b1ac6f27bd167734ad6c6ecff70f816986a
[ "MIT" ]
null
null
null
plot.py
lizzieayton/PrimordialOozebot
1e330b1ac6f27bd167734ad6c6ecff70f816986a
[ "MIT" ]
null
null
null
plot.py
lizzieayton/PrimordialOozebot
1e330b1ac6f27bd167734ad6c6ecff70f816986a
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import csv import statistics import math plt.title('Population Diversity') plt.ylabel('Diversity Score') plt.xlabel('Iteration Number') random = [] randombars = [] rmin = [] rmax = [] hill = [] hillbars = [] hmin = [] hmax = [] evo = [] emin = [] emax = [] evobars = [] cross = [] crossbars = [] cmin = [] cmax = [] numRuns = 5 numIterations = 100000000 sqrtRuns = math.sqrt(numRuns) iterationDataRandom = [] iterationDataHill = [] iterationDataEvo = [] iterationDataCross = [] indicesToPlot = [10, 15, 20, 25] index = 60 while indicesToPlot[-1] < numIterations: indicesToPlot.append(index) index = int(index * 1.02) indicesToPlot[-1] = numIterations - 1 #xtiks = [] #for i in range(10): # xtiks.append(int(numIterations / 5 * i)) #plt.xticks(xtiks) for i in range(1, numRuns + 1): iterationDataRandom.append({}) iterationDataHill.append({}) iterationDataEvo.append({}) iterationDataCross.append({}) with open('rand' + str(i) + '.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=' ', quotechar='|') index = 0 for row in reversed(list(reader)): vals = row[0].split(',') iteration = int(vals[0]) val = float(vals[1]) while index < len(indicesToPlot) - 1 and indicesToPlot[index + 1] < iteration: index += 1 iterationDataRandom[-1][indicesToPlot[index]] = val with open('hill' + str(i) + '.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=' ', quotechar='|') index = 0 for row in reversed(list(reader)): vals = row[0].split(',') iteration = int(vals[0]) val = float(vals[2]) while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration: index += 1 iterationDataHill[-1][indicesToPlot[index]] = val with open('evo' + str(i) + '.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=' ', quotechar='|') index = 0 for row in reversed(list(reader)): vals = row[0].split(',') iteration = int(vals[0]) * 100 val = float(vals[2]) while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration: index += 1 iterationDataEvo[-1][indicesToPlot[index]] = val with open('ed' + str(i) + '.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=' ', quotechar='|') index = 0 for row in reversed(list(reader)): vals = row[0].split(',') iteration = int(vals[0]) val = float(vals[2]) while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration: index += 1 iterationDataCross[-1][indicesToPlot[index]] = val print("Done reading data") unifiedRandom = [] unifiedHill = [] unifiedEvo = [] unifiedCross = [] index = 0 for iteration in indicesToPlot: currentRandom = [] currentHill = [] currentEvo = [] currentCross = [] unifiedRandom.append(currentRandom) unifiedHill.append(currentHill) unifiedEvo.append(currentEvo) unifiedCross.append(currentCross) for run in range(numRuns): valRandom = -1 if iteration in iterationDataRandom[run]: valRandom = iterationDataRandom[run][iteration] else: # unchanged valRandom = unifiedRandom[-2][run] currentRandom.append(valRandom) valHill = -1 if iteration in iterationDataHill[run]: valHill = iterationDataHill[run][iteration] else: # unchanged valHill = unifiedHill[-2][run] currentHill.append(valHill) valEvo = -1 if iteration in iterationDataEvo[run]: valEvo = iterationDataEvo[run][iteration] else: #unchanged valEvo = unifiedEvo[-2][run] currentEvo.append(valEvo) valCross = -1 if iteration in iterationDataCross[run]: valCross = iterationDataCross[run][iteration] else: #unchanged valCross = unifiedCross[-2][run] currentCross.append(valCross) randomAverage = statistics.mean(currentRandom) randomError = statistics.stdev(currentRandom) / sqrtRuns random.append(randomAverage) randombars.append(randomError) hillAverage = statistics.mean(currentHill) hillError = statistics.stdev(currentHill) / sqrtRuns hill.append(hillAverage) hillbars.append(hillError) evoAverage = statistics.mean(currentEvo) evoError = statistics.stdev(currentEvo) / sqrtRuns evo.append(evoAverage) evobars.append(evoError) crossAverage = statistics.mean(currentCross) crossError = statistics.stdev(currentCross) / sqrtRuns cross.append(crossAverage) crossbars.append(crossError) for i in range(len(random)): rmin.append(random[i] - randombars[i]) rmax.append(random[i] + randombars[i]) hmin.append(hill[i] - hillbars[i]) hmax.append(hill[i] + hillbars[i]) emin.append(evo[i] - evobars[i]) emax.append(evo[i] + evobars[i]) cmin.append(cross[i] - crossbars[i]) cmax.append(cross[i] + crossbars[i]) print("Done processing data") plt.xscale('log') #plt.yscale('log') #plt.plot(indicesToPlot, random, color='blue', linewidth=1, label='Random Search') plt.plot(indicesToPlot, hill, color='green', linewidth=1, label='Parallel Hill Climb') plt.plot(indicesToPlot, evo, color='red', linewidth=1, label='Weighted Selection') plt.plot(indicesToPlot, cross, color='blue', linewidth=1, label='Parental Replacement') plt.fill_between(indicesToPlot, hmin, hmax, facecolor='green', lw=0, alpha=0.5) plt.fill_between(indicesToPlot, emin, emax, facecolor='red', lw=0, alpha=0.5) plt.fill_between(indicesToPlot, cmin, cmax, facecolor='blue', lw=0, alpha=0.5) #plt.fill_between(indicesToPlot, rmin, rmax, facecolor='blue', lw=0, alpha=0.5) plt.legend(loc='best') plt.savefig('diversityp.png', dpi=500) plt.show()
30.939394
90
0.623572
26cfea22c43edc42786c9199d503d77927f66e4d
1,918
py
Python
python/obra_hacks/backend/commands.py
brandond/obra-hacks
df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3
[ "Apache-2.0" ]
null
null
null
python/obra_hacks/backend/commands.py
brandond/obra-hacks
df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3
[ "Apache-2.0" ]
null
null
null
python/obra_hacks/backend/commands.py
brandond/obra-hacks
df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from datetime import date import click from .data import DISCIPLINE_MAP from .outputs import OUTPUT_MAP if __name__ == '__main__': cli()
34.25
100
0.684567
26d2a8925926b05405485ed3b4fa01550942c26f
657
py
Python
join_json.py
ryavorsky/med_robo
56f8d2067921ef7208166380e50af0600c10032a
[ "CC0-1.0" ]
null
null
null
join_json.py
ryavorsky/med_robo
56f8d2067921ef7208166380e50af0600c10032a
[ "CC0-1.0" ]
null
null
null
join_json.py
ryavorsky/med_robo
56f8d2067921ef7208166380e50af0600c10032a
[ "CC0-1.0" ]
null
null
null
import json with open('bibliography.json', 'r', encoding='utf-8') as bib_data: bib = sorted(json.load(bib_data), key=lambda d: d['ID']) with open('abstracts.json', 'r', encoding='utf-8') as tex_data: tex = sorted(json.load(tex_data), key=lambda d: d['ID']) ID1 = [b['ID'] for b in bib] ID2 = [t['ID'] for t in tex] for i in range(len(ID1)): bib[i]['reference'] = tex[i]['title'] bib[i]['abstract'] = tex[i]['abstract'] print('Done') with open('med_robo_papers.json', 'w', encoding='utf-8') as res_file: res_file.write(json.dumps(bib, indent=4, ensure_ascii=False, sort_keys=True)) res_file.close()
28.565217
82
0.614916
26d58240f4233e1d13f48a78a83f734ca262cc13
147
py
Python
Qcover/simulator/__init__.py
BAQIS-Quantum/Qcover
ca3776ed73fefa0cfef08042143a8cf842f8dad5
[ "Apache-2.0" ]
38
2021-12-22T03:12:01.000Z
2022-03-17T06:57:10.000Z
Qcover/simulator/__init__.py
BAQIS-Quantum/Qcover
ca3776ed73fefa0cfef08042143a8cf842f8dad5
[ "Apache-2.0" ]
null
null
null
Qcover/simulator/__init__.py
BAQIS-Quantum/Qcover
ca3776ed73fefa0cfef08042143a8cf842f8dad5
[ "Apache-2.0" ]
13
2021-12-22T07:32:44.000Z
2022-02-28T06:47:41.000Z
from .qton import Qcircuit, Qcodes import warnings warnings.filterwarnings("ignore") __all__ = [ # 'Simulator', 'Qcircuit', 'Qcodes' ]
16.333333
34
0.680272
26d8d630adbf36e69e2b1f614c164c0bdbf94301
7,563
py
Python
pizzerias/pizzerias_search.py
LiushaHe0317/pizzerias_block_search
16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b
[ "MIT" ]
null
null
null
pizzerias/pizzerias_search.py
LiushaHe0317/pizzerias_block_search
16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b
[ "MIT" ]
null
null
null
pizzerias/pizzerias_search.py
LiushaHe0317/pizzerias_block_search
16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b
[ "MIT" ]
null
null
null
from typing import Sequence import numpy def check_area(self, loc: Sequence, radius: int, report=False): """ This method takes a location coordinate and a radius and search the delivery services around this specified area. :param loc: A tuple of integers. :param radius: An integer. :param report: A boolean that indicates whether or not print a report. return: - A sub-matrix of the pizzerias matrix which is created in terms of specified range. - A maximum in this area. - A set of cells that have maximum. """ matrix = self.area_matrix(loc, radius) x_initial, y_initial = loc y_center = self.n_of_block - y_initial x_center = x_initial - 1 low0 = y_center - radius if y_center - radius >= 0 else 0 left1 = x_center - radius if x_center - radius >= 0 else 0 maximum = self.maximum_in_matrix(matrix) max_set = self.max_locations(matrix=matrix, d0_start=low0, d1_start=left1) if report: print(f"In the given area, there are {len(max_set)} areas where {maximum} Pizzerias delivery service " f"can cover, they are: ", max_set) return matrix, maximum, max_set def check_city(self, report=False): """ This method returns the matrix, the maximum and a set of maximum tuple of cells. :param report: A boolean indicating whether or not print report. :return: - The pizzerias matrix. - A maximum in this the pizzerias matrix. - A set of cells that have maximum. """ if report: print(f"There are {len(self.max_locations())} area(s) where {self.maximum_in_matrix()} Pizzerias can cover, " f"they are: ", self.max_locations()) return self.pizzerias_matrix, self.maximum_in_matrix(), self.max_locations()
42.728814
123
0.627793
26d8feef12cddd2dca60e0f08ac5f863599108a2
1,213
py
Python
analysis/hist_javelin.py
LiyrAstroph/MICA
2592b8ad3011880898f557a69b22cad63fcd47e0
[ "MIT" ]
1
2016-10-25T06:32:33.000Z
2016-10-25T06:32:33.000Z
analysis/hist_javelin.py
LiyrAstroph/MICA
2592b8ad3011880898f557a69b22cad63fcd47e0
[ "MIT" ]
null
null
null
analysis/hist_javelin.py
LiyrAstroph/MICA
2592b8ad3011880898f557a69b22cad63fcd47e0
[ "MIT" ]
3
2016-12-29T06:04:13.000Z
2020-04-12T11:48:42.000Z
import numpy as np import matplotlib.pyplot as plt import corner mcmc = np.loadtxt("mychain1.dat") ntheta = mcmc.shape[1] fig = plt.figure(1, figsize=(15, 6)) ax = fig.add_subplot(231) ax.hist(mcmc[:, 0]/np.log(10.0), 100, normed=True, range=(-0.9, -0.1)) ax = fig.add_subplot(232) ax.hist(mcmc[:, 1]/np.log(10.0), 100, normed=True, range=(0.0, 2.0)) ax = fig.add_subplot(234) ax.hist(mcmc[:, 2], 100, normed=True, range=(1.0, 2.8)) ax = fig.add_subplot(235) ax.hist(mcmc[:, 3], 100, normed=True, range=(0.0, 1.2)) ax = fig.add_subplot(236) ax.hist(mcmc[:, 4], 100, normed=True, range=(5, 13)) mcmc = np.loadtxt("../data/mcmc.txt") ntheta = mcmc.shape[1] nb = 20000 fig = plt.figure(2, figsize=(15, 6)) ax = fig.add_subplot(231) ax.hist( (mcmc[nb:, 1]+0.5*mcmc[nb:, 2]-0.5*np.log(2.0))/np.log(10.0), 100, normed=True, range=(-0.9, -0.1)) ax = fig.add_subplot(232) ax.hist(mcmc[nb:, 2]/np.log(10), 100, normed=True, range=(0.0, 2.0)) ax = fig.add_subplot(234) ax.hist(mcmc[nb:, 5], 100, normed=True, range=(1.0, 2.8)) ax = fig.add_subplot(235) ax.hist(mcmc[nb:, 3], 100, normed=True, range=(0.0, 1.2)) ax = fig.add_subplot(236) ax.hist(mcmc[nb:, 4], 100, normed=True, range=(5, 13)) plt.show()
24.26
108
0.635614
26da85c2640497939b911d5705595d7671906491
1,158
py
Python
tests/test_stats.py
janjaappape/pastas
521b27efd921e240df0717038f8389d62099b8ff
[ "MIT" ]
252
2017-01-25T05:48:53.000Z
2022-03-31T17:46:37.000Z
tests/test_stats.py
janjaappape/pastas
521b27efd921e240df0717038f8389d62099b8ff
[ "MIT" ]
279
2017-02-14T10:59:01.000Z
2022-03-31T09:17:37.000Z
tests/test_stats.py
janjaappape/pastas
521b27efd921e240df0717038f8389d62099b8ff
[ "MIT" ]
57
2017-02-14T10:26:54.000Z
2022-03-11T14:04:48.000Z
import numpy as np import pandas as pd import pastas as ps def test_runs_test(): """ http://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm True Z-statistic = 2.69 Read NIST test data """ data = pd.read_csv("tests/data/nist.csv") test, _ = ps.stats.runs_test(data) assert test[0] - 2.69 < 0.02
27.571429
79
0.638169
26db23f57ee2cf9c420d9e5404d2b60d7671991a
320
py
Python
venv/lib64/python3.8/site-packages/tld/registry.py
nrfkhira/dnx-engine
99a326d83058bcfe54a0f455672d90637fe753c6
[ "MIT" ]
null
null
null
venv/lib64/python3.8/site-packages/tld/registry.py
nrfkhira/dnx-engine
99a326d83058bcfe54a0f455672d90637fe753c6
[ "MIT" ]
null
null
null
venv/lib64/python3.8/site-packages/tld/registry.py
nrfkhira/dnx-engine
99a326d83058bcfe54a0f455672d90637fe753c6
[ "MIT" ]
null
null
null
import warnings from .base import Registry __author__ = "Artur Barseghyan" __copyright__ = "2013-2021 Artur Barseghyan" __license__ = "MPL-1.1 OR GPL-2.0-only OR LGPL-2.1-or-later" __all__ = ("Registry",) warnings.warn( "The `Registry` class is moved from `tld.registry` to `tld.base`.", DeprecationWarning, )
24.615385
71
0.721875
26dc6cf0e3afad0c2ebf41ec4b792f1e330897c5
2,963
py
Python
hvo_api/model/gps.py
wtollett-usgs/hvo_api
cdd39cb74d28a931cac4b843a71c5d8435f4620c
[ "CC0-1.0" ]
null
null
null
hvo_api/model/gps.py
wtollett-usgs/hvo_api
cdd39cb74d28a931cac4b843a71c5d8435f4620c
[ "CC0-1.0" ]
null
null
null
hvo_api/model/gps.py
wtollett-usgs/hvo_api
cdd39cb74d28a931cac4b843a71c5d8435f4620c
[ "CC0-1.0" ]
null
null
null
# -*- coding: utf-8 -*- from valverest.database import db7 as db from sqlalchemy.ext.hybrid import hybrid_property
22.44697
79
0.59838
26ddb52d2be72d7d4dbeca2609c7ac5ce525625e
2,091
py
Python
SingleIRdetection/get_data.py
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
3
2021-11-30T18:41:11.000Z
2021-12-12T12:27:14.000Z
SingleIRdetection/get_data.py
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
null
null
null
SingleIRdetection/get_data.py
biqute/QTLab2122
4d53d4c660bb5931615d8652e698f6d689a4dead
[ "MIT" ]
null
null
null
from instruments import VNA_handler, Fridge_handler import os import time from datetime import date, datetime today = date.today() d1 = today.strftime("_%d_%m") directory = "data"+d1 dir_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),directory) if not os.path.isdir(dir_path): try: os.mkdir(directory) except: pass VNA_lab=VNA_handler() Fridge=Fridge_handler() temps=[] freqs1=[] freqs2=[] r = Fridge.execute("C3") file_log = open(directory + "\\log.txt", "w") with open('temperatures_gap.txt', encoding='utf-8') as file: for line in file: line = line.replace('\n', '') temps.append(int(line)) with open('frequency_ranges_gap_1.txt', encoding='utf-8') as file: for line in file: line = line.replace('\n', '') splitted = [float(x) for x in line.split('\t')] freqs1.append(splitted) with open('frequency_ranges_gap_2.txt', encoding='utf-8') as file: for line in file: line = line.replace('\n', '') splitted = [float(x) for x in line.split('\t')] freqs2.append(splitted) for T in temps: try: print("Set temp: " + str(T)) print(f"{datetime.now():%H:%M:%S}\tsens_1:{Fridge.get_T(1)}\tsens_2:{Fridge.get_T(2)}\tsens_3:{Fridge.get_T(3)}\tG1: {Fridge.get_T(14)}\tG2: {Fridge.get_T(15)}") log_sensori() time.sleep(10) Fridge.wait_for_T(T) if T >= 200: freqs = freqs2 else: freqs = freqs1 for idx,f in enumerate(freqs): file_name=str(T)+'mK_range'+str(idx+1)+'.txt' print("Set freqs: " + str(f[0]) + " - "+ str(f[1])) VNA_lab.set_sweep_freq(f[0],f[1]) VNA_lab.inst.write('AVERREST;') time.sleep(40) VNA_lab.save_sweep_data(directory + '\\' + file_name, 'polar') except: pass log_sensori() Fridge.set_T(0) log_sensori() file_log.close()
27.155844
169
0.595887
26ddc48f78a12f6195556b4fffb431166aa3a248
1,356
py
Python
repos.py
gigamonkey/git-utils
ac26ccab836b276fb7061167b4b2dc2a6bd87e66
[ "BSD-3-Clause" ]
null
null
null
repos.py
gigamonkey/git-utils
ac26ccab836b276fb7061167b4b2dc2a6bd87e66
[ "BSD-3-Clause" ]
1
2021-05-04T19:45:16.000Z
2021-05-04T19:45:16.000Z
repos.py
gigamonkey/git-utils
ac26ccab836b276fb7061167b4b2dc2a6bd87e66
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 """ Get a json dump of all the repos belonging to a GitHub org or user. """ import json import os import sys from functools import reduce import requests url = "https://api.github.com/graphql" token = os.environ["GITHUB_TOKEN"] headers = {"Authorization": "bearer {}".format(token)} FIELDS = [ "name", "description", "sshUrl", "isArchived", "isFork", "isPrivate", "pushedAt", ] if __name__ == "__main__": who = sys.argv[1] edges = True after = None while edges: r = requests.post(url, json={"query": query(who, after)}, headers=headers) edges = json.loads(r.text)["data"]["organization"]["repositories"]["edges"] for e in edges: print(json.dumps(node(e))) after = edges[-1]["cursor"]
22.229508
153
0.597345
26def15c65ab2e4480b9091dca33bf04179a4722
3,705
py
Python
test_package/conanfile.py
sintef-ocean/conan-casadi
70a14829ca3b3ec4cdff8b254e3c060b345c1e79
[ "MIT" ]
null
null
null
test_package/conanfile.py
sintef-ocean/conan-casadi
70a14829ca3b3ec4cdff8b254e3c060b345c1e79
[ "MIT" ]
null
null
null
test_package/conanfile.py
sintef-ocean/conan-casadi
70a14829ca3b3ec4cdff8b254e3c060b345c1e79
[ "MIT" ]
null
null
null
from conans import ConanFile, CMake, tools, RunEnvironment
42.102273
88
0.612146
26e0374db2378f11fc9bfc31927fa2a8ccdcf58c
1,995
py
Python
src/blog/templatetags/timediffer.py
codewithrakib/first-django-blog
339f5833025b0758f391c7c8e0979ca2eefd1b52
[ "MIT" ]
null
null
null
src/blog/templatetags/timediffer.py
codewithrakib/first-django-blog
339f5833025b0758f391c7c8e0979ca2eefd1b52
[ "MIT" ]
7
2021-03-19T02:00:00.000Z
2022-02-10T10:26:38.000Z
src/blog/templatetags/timediffer.py
codewithrakib/first-django-blog
339f5833025b0758f391c7c8e0979ca2eefd1b52
[ "MIT" ]
null
null
null
from django import template from datetime import datetime from datetime import date from datetime import time from datetime import timedelta register = template.Library()
30.227273
56
0.578947
26e08af32bb2b5d0bbed9fd354924f064bde0ecf
254
py
Python
ma.py
AmandaKhol/DLT-Blockchain
ee464b0d7b55bffe791eb0b814513620430bfa1c
[ "MIT" ]
1
2021-04-05T18:59:04.000Z
2021-04-05T18:59:04.000Z
ma.py
AmandaKhol/DLT-Blockchain
ee464b0d7b55bffe791eb0b814513620430bfa1c
[ "MIT" ]
null
null
null
ma.py
AmandaKhol/DLT-Blockchain
ee464b0d7b55bffe791eb0b814513620430bfa1c
[ "MIT" ]
null
null
null
""" title : ma.py description : Marshmallow object author : Amanda Garcia-Garcia version : 0 usage : python server_api.py python_version : 3.6.1 """ from flask_marshmallow import Marshmallow ma = Marshmallow()
19.538462
41
0.629921
26e3cb56bf5c43ffe1ebc53ce33bf565445ae974
6,107
py
Python
FGMabiotic.py
tjscott214/long-term-conflict-with-1nFGM
1c701e83c71ebe21fbc1192ca3d523a000614819
[ "MIT" ]
2
2019-09-13T13:46:33.000Z
2020-05-14T17:21:09.000Z
FGMabiotic.py
tjscott214/long-term-conflict-with-1nFGM
1c701e83c71ebe21fbc1192ca3d523a000614819
[ "MIT" ]
null
null
null
FGMabiotic.py
tjscott214/long-term-conflict-with-1nFGM
1c701e83c71ebe21fbc1192ca3d523a000614819
[ "MIT" ]
null
null
null
#!/usr/bin/env python ### This program simulates Fisher's geometric model with abiotic change equal to fixations during conflict simulations (from FGMconflict.py) ### ### python3 FGMabiotic.py -help for input options ### ### Written by Trey J Scott 2018 ### ### python --version ### ### Python 3.5.2 :: Anaconda 4.2.0 (x86_64) ### # Import programs import random import numpy as np from scipy.spatial import distance as dist from scipy.stats import norm import scipy.stats as stats import matplotlib.pyplot as plt import pandas as pd import argparse import scipy.special as spc from itertools import groupby ### FUNCTIONS ### # Function to generate random mutations with a specified average size # Gaussian fitness function # Calculates probability of fixation for new mutations # Functon that simulates adaptation to a moving optimum with Fisher's geometric model # Runs simulations multiple times ### SET ARGUMENTS ap = argparse.ArgumentParser() ap.add_argument('-x', '--samples', help = 'number of resamples', type = int) ap.add_argument('-p', '--population_size1', help = 'population size for one population', type = int) ap.add_argument('-pp', '--population_size2', help = 'population size for second population', type = int) ap.add_argument('-m', '--mutations', help = 'mutation distribution for mutation vectors') ap.add_argument('-q', '--Q', help = 'changes Q parameter in fitness function', type = float) ap.add_argument('-z', '--attempts', help = 'number of generations per walk', type = int) ap.add_argument('-c', '--init_fit', help = 'changes the distance optimal values by a factor of the input value', type = float) ap.add_argument('-r', '--rate', help = 'mutation rate for population 1', type = int) ap.add_argument('-b', '--burn_in', help = 'define burn in period for equilibrium', type = int) ap.add_argument('-a', '--ave_mut', help = 'average mutation norm', type = float) ap.add_argument('-d', '--selection', help = 'Adjust strength of selection', type = float) ap.add_argument('-mut', '--changes', help = 'mutation file for moving optimum', type = str) args = ap.parse_args() # get arguments if args.samples: samples = args.samples else: samples = 500 # Define initial position and optima position1 = np.zeros(1) position = position1 position2 = position1 if args.init_fit: r = 1-args.init_fit else: r = 1-0.2 # Set average norm size for mutations if args.ave_mut: average_mutation = args.ave_mut else: average_mutation = 0.1 # Get population sizes # Population 1 if args.population_size1: N_1 = 10**(args.population_size1) else: N_1 = 'infinite' # Population 2 if args.population_size2: N_2 = 10**(args.population_size2) else: N_2 = 'infinite' # Get distributions # Mutation distribution (default is uniform) if args.mutations: distribution = args.mutations else: distribution = 'normal' # Number of mutations if args.attempts: m = args.attempts else: m = 50000 # Get mutation rate if args.rate: rate = args.rate else: rate = 1 # Calculate normalization factor (used in mutation function) sd_1d = average_mutation*((np.pi)**(1/2))/(2**(1/2)) uni = 2*average_mutation expo = average_mutation if args.burn_in: burn_in = args.burn_in else: burn_in = 0 if args.Q: Q = args.Q q_string = 'Q_' + str(Q) + '_' else: Q = 2 q_string = '' if args.selection: d1 = args.selection else: d1 = 0.5 if args.changes: shake_file = args.changes[:-7] + 'mut.csv' # Open output file output = open('abiotic_data.csv', 'w') output.write('Iteration,Simulation,z,s,Mutation Size,Fitness,Population,Status\n') ### Run simulations run_simulations(position, samples)
34.117318
192
0.7159
26e5678c410804c82e1a66c1a1c30cc2e8b118d5
873
py
Python
epdif.py
cvasqxz/rpi-epd
b7921190dd84b1187364902f0e3059cba5a1973f
[ "MIT" ]
null
null
null
epdif.py
cvasqxz/rpi-epd
b7921190dd84b1187364902f0e3059cba5a1973f
[ "MIT" ]
null
null
null
epdif.py
cvasqxz/rpi-epd
b7921190dd84b1187364902f0e3059cba5a1973f
[ "MIT" ]
null
null
null
import spidev import RPi.GPIO as GPIO import time import yaml with open("config.yml", 'r') as f: cfg = yaml.load(f, Loader=yaml.FullLoader) # Pin definition RST_PIN = cfg['pinout']['RST_PIN'] DC_PIN = cfg['pinout']['DC_PIN'] CS_PIN = cfg['pinout']['CS_PIN'] BUSY_PIN = cfg['pinout']['BUSY_PIN'] # SPI device, bus = 0, device = 0 SPI = spidev.SpiDev(0, 0)
21.292683
46
0.683849
26e616bae86ed51b35013c799f67005f184552f2
2,469
py
Python
main.py
amankumarjsr/BinanceDataScrapper
e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14
[ "Apache-2.0" ]
null
null
null
main.py
amankumarjsr/BinanceDataScrapper
e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14
[ "Apache-2.0" ]
null
null
null
main.py
amankumarjsr/BinanceDataScrapper
e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14
[ "Apache-2.0" ]
null
null
null
from datetime import date from unicodedata import name from urllib import request import requests from bs4 import BeautifulSoup as bs import pandas as pd import datetime import os import zipfile import glob CoinName= input('Enter the coin name: ').upper() duration= input('Enter the duration of data you want(1m,1h,2h): ').lower() start_date= input ('Enter the date (dd-mm-yyyy): ') end_date= input('Enter the end date (dd-mm-yyyy): ') coin= requests.get('https://data.binance.vision/?prefix=data/spot/daily/klines/') ucoin= bs(coin.content , 'html.parser') start = datetime.datetime.strptime(start_date, "%d-%m-%Y") end = datetime.datetime.strptime(end_date, "%d-%m-%Y") date_generated = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days)] date_list=[] for date in date_generated: x=date.strftime("%Y-%m-%d") date_list.append(x) file_name_list= [] cols=['opening time', 'opening price','highest price','lowest price','closing price','volume','closing time','turnover','number of transactions','active buy volume','NA','NAN'] for item in date_list: try: file_name=(f'{CoinName}-{duration}-{item}.zip') download_mainurl= (f'https://data.binance.vision/data/spot/daily/klines/{CoinName}/{duration}/{CoinName}-{duration}-{item}.zip') download= requests.get(download_mainurl, allow_redirects= True) print(f'Scrapping data of {item} ') with open(file_name, 'wb') as f: f.write(download.content) with zipfile.ZipFile(file_name, 'r') as zip_ref: zip_ref.extractall('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data') file_name_list.append(file_name+'.csv') os.remove(file_name) except: print('skipped') continue master_df= pd.DataFrame() for file in os.listdir('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data'): if file.endswith('.csv'): master_df= master_df.append(pd.read_csv('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data/'+file, names= cols)) master_df.to_csv(f'{CoinName}-{duration}-master file.csv', index=False) for file in os.listdir('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data'): if file.endswith('.csv'): os.remove('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data/'+file) print('Data Scrapped sucessfully!!!')
34.291667
177
0.681247
26e61f306df9220c42f34738c067514777287317
19,370
py
Python
api/api.py
geoai-lab/GeoAnnotator
6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3
[ "MIT" ]
1
2022-02-14T20:43:41.000Z
2022-02-14T20:43:41.000Z
api/api.py
geoai-lab/GeoAnnotator
6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3
[ "MIT" ]
null
null
null
api/api.py
geoai-lab/GeoAnnotator
6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3
[ "MIT" ]
null
null
null
from flask import Flask, jsonify, request, session,redirect, url_for import bcrypt from flask_sqlalchemy import SQLAlchemy from sqlalchemy.sql import func from sqlalchemy.exc import IntegrityError import os from sqlalchemy.orm import load_only from flask_bcrypt import Bcrypt import urllib.parse from itertools import groupby from operator import attrgetter import json from flask_cors import CORS, cross_origin from flask_session import Session import redis from werkzeug.utils import secure_filename from datetime import datetime, timedelta, timezone from models import db, tweet_database, User, LoginForm, Project, Submission, CompareSubmission from dotenv import load_dotenv from flask_login import LoginManager, login_required, login_user, current_user, logout_user from sqlalchemy.orm import sessionmaker import pandas as pd import requests from sqlalchemy.types import String, DateTime import io load_dotenv() app = Flask(__name__,static_folder="../build", static_url_path='/')# app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///HarveyTwitter.db" app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config["SECRET_KEY"] = "6236413AA53537DE57D1F6931653B" app.config['SQLALCHEMY_ECHO'] = True app.config['SESSION_TYPE'] = "filesystem" # causes bugs right here this needs to be in redis soon need to download reddis and do some reddis cli stuff app.config['SESSION_USE_SIGNER'] = True #app.config['SESSION_COOKIE_NAME'] #app.config['SESSION_COOKIE_DOMAIN] #app.config['SESSIO N_COOKies] #app.config['SESSION_COOKIE_SECURE'] = True # add this to make the cookies invisible or something bcrypt = Bcrypt(app) # this is encyrpt the app CORS(app, supports_credentials=True) server_session = Session(app) db.__init__(app) with app.app_context(): db.create_all() login_manager = LoginManager() login_manager.init_app(app) with app.app_context(): # before intialization of the app, commands under here are ran first # Replace with the commented when running the command gunicorn3 -w 3 GeoAnnotator.api:app optionsData = jsonify(json.load(open('../../createProjectOptions.json'))) # 'GeoAnnotator/api/createProjectOptions.json' configurationsData = json.load(open('../../configuration_data.json')) # 'GeoAnnotator/api/configuration_data.json' if __name__ == '__main__': app.run(host='0.0.0.0')
40.10352
257
0.663087
26e97e5ea8220154eb41374939938275b9e537b0
741
py
Python
AppPython/app/core/src/forms.py
denalme/AplicacionPython
eb99af3c21f003135192ad040a0a04a40b63ea70
[ "MIT" ]
null
null
null
AppPython/app/core/src/forms.py
denalme/AplicacionPython
eb99af3c21f003135192ad040a0a04a40b63ea70
[ "MIT" ]
null
null
null
AppPython/app/core/src/forms.py
denalme/AplicacionPython
eb99af3c21f003135192ad040a0a04a40b63ea70
[ "MIT" ]
null
null
null
from django import forms from .pqrsf import pqrsf
74.1
167
0.747638
26ec29318bc12813be99da269d94707649d0104c
3,244
py
Python
prisma/generated/models.py
mao-shonen/prisma-client-py-tortoise-orm
e26d8451ea0775bd7ddfec42f663510434537a77
[ "MIT" ]
null
null
null
prisma/generated/models.py
mao-shonen/prisma-client-py-tortoise-orm
e26d8451ea0775bd7ddfec42f663510434537a77
[ "MIT" ]
null
null
null
prisma/generated/models.py
mao-shonen/prisma-client-py-tortoise-orm
e26d8451ea0775bd7ddfec42f663510434537a77
[ "MIT" ]
null
null
null
__doc__ = ''' This file is generated by the `prisma-client-py-tortoise-orm (0.2.2)`, Please do not modify directly. repository: https://github.com/mao-shonen/prisma-client-py-tortoise-orm ''' import typing from enum import Enum from tortoise import fields from tortoise.models import Model from prisma import base as base __all__ = ['Role', 'User', 'Post', 'Group']
30.317757
203
0.633785
26ed69ff9590d721e4368e521015afe41d5f9df5
2,536
py
Python
samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py
vgvoleg/gst-video-analytics
7e4006551f38334bc59b2ef3d205273d07d40ce4
[ "MIT" ]
null
null
null
samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py
vgvoleg/gst-video-analytics
7e4006551f38334bc59b2ef3d205273d07d40ce4
[ "MIT" ]
null
null
null
samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py
vgvoleg/gst-video-analytics
7e4006551f38334bc59b2ef3d205273d07d40ce4
[ "MIT" ]
1
2020-05-14T15:30:03.000Z
2020-05-14T15:30:03.000Z
from os.path import join, realpath from os import listdir, environ import shlex import subprocess import pickle import json import pickle as pkl import time import numpy as np from copy import copy MODEL_PATH = ("/root/Projects/models/intel/person-detection-retail-0013/FP32" "/person-detection-retail-0013.xml") DATASET_PATH = "/root/Projects/train/" ALPHA = 0.1 ALPHA_HW = 0.01 RES_PATH = ("/root/Projects/gst-video-analytics-0.7.0/samples/" "people_on_stairs/classify_overspeeding/res.json") SVM_PATH = '/root/Projects/models/overspeed_classify/SVM_Classifier_without_interval.sav' CLASSIFY_PIPELINE_TEMPLATE = """gst-launch-1.0 filesrc \ location={} \ ! decodebin ! videoconvert ! video/x-raw,format=BGRx ! gvadetect \ model={} ! queue \ ! gvaspeedometer alpha={} alpha-hw={} interval=0.03333333 \ ! gvapython module={} class=OverspeedClassifier arg=[\\"{}\\"] \ ! fakesink sync=false""" if __name__ == "__main__": svclassifier = pickle.load(open(SVM_PATH, 'rb')) for file_name in listdir(DATASET_PATH): if file_name.endswith(".mp4"): video_path = join(DATASET_PATH, file_name) pipeline_str = CLASSIFY_PIPELINE_TEMPLATE.format( video_path, MODEL_PATH, ALPHA, ALPHA_HW, realpath(__file__), join(DATASET_PATH, file_name.replace('.mp4', '.json')) ) print(pipeline_str) proc = subprocess.run( shlex.split(pipeline_str), env=environ.copy()) if proc.returncode != 0: print("Error while running pipeline") exit(-1)
32.101266
89
0.615536
26f0496f5cee5563d72ece3864af6c3cc42f430c
2,883
py
Python
indicators/migrations/0035_make_indicators_programs_foreignkey.py
mercycorps/TolaWorkflow
59542132fafd611081adb0e8cfaa04abc5886d7a
[ "Apache-2.0" ]
null
null
null
indicators/migrations/0035_make_indicators_programs_foreignkey.py
mercycorps/TolaWorkflow
59542132fafd611081adb0e8cfaa04abc5886d7a
[ "Apache-2.0" ]
268
2020-03-31T15:46:59.000Z
2022-03-31T18:01:08.000Z
indicators/migrations/0035_make_indicators_programs_foreignkey.py
Falliatcom-sa/falliatcom
39fb926de072c296ed32d50cccfb8003ca870739
[ "Apache-2.0" ]
1
2021-01-05T01:58:24.000Z
2021-01-05T01:58:24.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2018-11-06 08:34 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion
36.493671
105
0.622962
26f1b913f1ee12f1e92139c51f5d8c9e44276d06
4,335
py
Python
pymockserver/client.py
MXWest/py-mockserver
cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc
[ "MIT" ]
3
2018-06-14T19:44:05.000Z
2020-12-14T04:33:21.000Z
pymockserver/client.py
MXWest/py-mockserver
cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc
[ "MIT" ]
4
2020-02-01T16:20:18.000Z
2021-03-23T14:43:54.000Z
pymockserver/client.py
MXWest/py-mockserver
cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc
[ "MIT" ]
2
2020-02-01T16:25:50.000Z
2021-03-23T13:06:25.000Z
import requests import json from urllib3.exceptions import HTTPError
29.290541
90
0.514418
26f481dfc45ad24d352172f8f79006991163fc28
5,277
py
Python
workflow/executors/validation_tasks.py
mettadatalabs1/oncoscape-datapipeline
9c3209ba88831c3f1c598182c719ce45b4724fff
[ "Apache-2.0" ]
null
null
null
workflow/executors/validation_tasks.py
mettadatalabs1/oncoscape-datapipeline
9c3209ba88831c3f1c598182c719ce45b4724fff
[ "Apache-2.0" ]
null
null
null
workflow/executors/validation_tasks.py
mettadatalabs1/oncoscape-datapipeline
9c3209ba88831c3f1c598182c719ce45b4724fff
[ "Apache-2.0" ]
null
null
null
from validators.validation_configurator import ValidationConfigurator from pipeline.models import InputFile
49.783019
83
0.617965
26f602e46a5eecf3c443505b6bc8ba0c321a760e
1,290
py
Python
pytglib/api/types/input_message_video_note.py
iTeam-co/pytglib
e5e75e0a85f89b77762209b32a61b0a883c0ae61
[ "MIT" ]
6
2019-10-30T08:57:27.000Z
2021-02-08T14:17:43.000Z
pytglib/api/types/input_message_video_note.py
iTeam-co/python-telegram
e5e75e0a85f89b77762209b32a61b0a883c0ae61
[ "MIT" ]
1
2021-08-19T05:44:10.000Z
2021-08-19T07:14:56.000Z
pytglib/api/types/input_message_video_note.py
iTeam-co/python-telegram
e5e75e0a85f89b77762209b32a61b0a883c0ae61
[ "MIT" ]
5
2019-12-04T05:30:39.000Z
2021-05-21T18:23:32.000Z
from ..utils import Object
28.666667
77
0.615504
26f6c233aae91fb0635319c24ac7a5452088a65f
520
py
Python
gdc_readgroups/exceptions.py
NCI-GDC/gdc-readgroups
874387bb3473b0a0680551339e50b072cc058eb6
[ "Apache-2.0" ]
null
null
null
gdc_readgroups/exceptions.py
NCI-GDC/gdc-readgroups
874387bb3473b0a0680551339e50b072cc058eb6
[ "Apache-2.0" ]
null
null
null
gdc_readgroups/exceptions.py
NCI-GDC/gdc-readgroups
874387bb3473b0a0680551339e50b072cc058eb6
[ "Apache-2.0" ]
1
2020-01-23T22:07:10.000Z
2020-01-23T22:07:10.000Z
""" Exceptions for Read Group headers """
20.8
43
0.746154
26f984eeef056e7ffe65f198d0e3689278e5fc57
2,098
py
Python
aiida_logger/calculations/test_calculations.py
SINTEF/aiida-logger
d97aced2ec8967cb359f488d2218cc3b47c92f6b
[ "MIT" ]
null
null
null
aiida_logger/calculations/test_calculations.py
SINTEF/aiida-logger
d97aced2ec8967cb359f488d2218cc3b47c92f6b
[ "MIT" ]
null
null
null
aiida_logger/calculations/test_calculations.py
SINTEF/aiida-logger
d97aced2ec8967cb359f488d2218cc3b47c92f6b
[ "MIT" ]
null
null
null
""" Tests for calculations. """ from __future__ import print_function from __future__ import absolute_import import os import numpy as np def test_process(logger_code): """ Test running a calculation. Also checks its outputs. """ from aiida.plugins import DataFactory, CalculationFactory from aiida.engine import run from aiida.common.extendeddicts import AttributeDict from aiida_logger.tests import TEST_DIR # pylint: disable=wrong-import-position # Prepare input parameters parameters = AttributeDict() parameters.comment_string = '#' parameters.labels = True # Define input files to use SinglefileData = DataFactory('singlefile') datafile = SinglefileData( file=os.path.join(TEST_DIR, 'input_files', 'datafile')) # Set up calculation inputs = { 'code': logger_code, 'parameters': DataFactory('dict')(dict=parameters), 'datafiles': { 'datafile': datafile }, 'metadata': { 'options': { 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, 'parser_name': 'logger', 'withmpi': False, 'output_filename': 'logger.out' }, 'description': 'Test job submission with the aiida_logger plugin' }, } result = run(CalculationFactory('logger'), **inputs) assert 'data' in result assert 'metadata' in result data = result['data'] metadata = result['metadata'] metadata = metadata.get_dict() assert 'labels' in metadata assert 'comments' in metadata assert metadata['labels'] == ['time', 'param1', 'param2', 'param3'] assert metadata['comments'][0] == '# This is an example file' test_array = np.array([[1.0e+00, 3.0e+00, 4.0e+00, 5.0e+00], [2.0e+00, 4.0e+00, 5.7e+00, -1.0e-01], [3.0e+00, 1.0e-03, 1.0e+03, 8.0e-01]]) np.testing.assert_allclose(data.get_array('content'), test_array)
29.138889
84
0.594376
26fd4d122991c7a14eaad9bffe766e315791616a
90
py
Python
root/ilikeit/MySQLCrashCourse/dbcom/tests/__init__.py
ChyiYaqing/chyidlTutorial
77e7f6f84f21537a58a8a8a42e31cf2e3dd31996
[ "MIT" ]
5
2018-10-17T05:57:39.000Z
2021-07-05T15:38:24.000Z
root/ilikeit/MySQLCrashCourse/dbcom/tests/__init__.py
ChyiYaqing/chyidlTutorial
77e7f6f84f21537a58a8a8a42e31cf2e3dd31996
[ "MIT" ]
2
2021-04-14T00:48:43.000Z
2021-04-14T02:20:50.000Z
root/ilikeit/MySQLCrashCourse/dbcom/tests/__init__.py
ChyiYaqing/chyidlTutorial
77e7f6f84f21537a58a8a8a42e31cf2e3dd31996
[ "MIT" ]
3
2019-03-02T14:36:19.000Z
2022-03-18T10:12:09.000Z
#!/usr/bin/env python3 # -*- coding:utf-8 -*- """Requests test package initialisation."""
22.5
43
0.655556
26fdabbca3431190e788d02f52c14a320298b8ac
9,425
py
Python
discopy/components/sense/explicit/bert_conn_sense.py
rknaebel/discopy
5507d656987af2df9e595434a82c0a12bbc713e4
[ "MIT" ]
14
2019-04-14T16:10:23.000Z
2022-03-09T14:56:10.000Z
discopy/components/sense/explicit/bert_conn_sense.py
rknaebel/discopy
5507d656987af2df9e595434a82c0a12bbc713e4
[ "MIT" ]
15
2019-04-15T16:44:40.000Z
2021-11-23T17:36:41.000Z
discopy/components/sense/explicit/bert_conn_sense.py
rknaebel/discopy
5507d656987af2df9e595434a82c0a12bbc713e4
[ "MIT" ]
1
2020-02-28T23:36:35.000Z
2020-02-28T23:36:35.000Z
import json import logging import os from typing import List, Dict import click import numpy as np import tensorflow as tf from sklearn.metrics import cohen_kappa_score, precision_recall_fscore_support, accuracy_score from tqdm import tqdm from discopy.components.component import Component from discopy.components.connective.base import get_connective_candidates from discopy.evaluate.conll import evaluate_docs, print_results from discopy.utils import init_logger from discopy_data.data.doc import Document from discopy_data.data.loaders.conll import load_bert_conll_dataset from discopy_data.data.relation import Relation logger = logging.getLogger('discopy') if __name__ == "__main__": main()
44.042056
119
0.634589
f8053be6ee69e87199ea558062ed1fe681dca092
361
py
Python
busshaming/models/agency.py
katharosada/busshaming
c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9
[ "MIT" ]
42
2018-01-20T01:12:25.000Z
2022-02-02T01:40:17.000Z
busshaming/models/agency.py
katharosada/busshaming
c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9
[ "MIT" ]
2
2018-01-24T03:58:17.000Z
2018-06-10T01:05:57.000Z
busshaming/models/agency.py
katharosada/busshaming
c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9
[ "MIT" ]
7
2018-01-24T05:49:13.000Z
2018-12-03T08:47:43.000Z
from django.db import models
25.785714
72
0.67867
f805d1c83458a20d7c8be553923b41b8d8630a7f
328
py
Python
sqltask/base/dq.py
mjalo/sqltask
f6eefd624614a464ae5697ac61405416244518e2
[ "MIT" ]
10
2019-10-09T15:34:13.000Z
2022-02-21T07:44:03.000Z
sqltask/base/dq.py
mjalo/sqltask
f6eefd624614a464ae5697ac61405416244518e2
[ "MIT" ]
23
2019-10-09T15:20:01.000Z
2020-02-08T11:51:24.000Z
sqltask/base/dq.py
mjalo/sqltask
f6eefd624614a464ae5697ac61405416244518e2
[ "MIT" ]
4
2019-10-09T15:20:51.000Z
2020-02-11T08:43:03.000Z
from enum import Enum
15.619048
27
0.618902
f80628afca18060801db523544822b454ace8ecb
2,386
py
Python
main.py
deadpoool69/MediCare
eb45149dd14dc3792ef3ea724c61d46a29718068
[ "MIT" ]
null
null
null
main.py
deadpoool69/MediCare
eb45149dd14dc3792ef3ea724c61d46a29718068
[ "MIT" ]
null
null
null
main.py
deadpoool69/MediCare
eb45149dd14dc3792ef3ea724c61d46a29718068
[ "MIT" ]
null
null
null
from flask import Flask, render_template, request, url_for, redirect from forms import * from model import generate_recommendations, get_desc import os app = Flask(__name__) SECRET_KEY = os.urandom(32) app.config['SECRET_KEY'] = SECRET_KEY if __name__ == '__main__': app.run(debug=True, host='0.0.0.0')
38.483871
102
0.670159
f8065cbbdc71ae71f6d602d2671a71b28b0eea4a
2,057
py
Python
tools/draw_comparison_head_design_choices.py
twangnh/Calibration_mrcnn
e5f3076cefbe35297a403a753bb57e11503db818
[ "Apache-2.0" ]
87
2020-07-24T01:28:39.000Z
2021-08-29T08:40:18.000Z
tools/draw_comparison_head_design_choices.py
twangnh/Calibration_mrcnn
e5f3076cefbe35297a403a753bb57e11503db818
[ "Apache-2.0" ]
3
2020-09-27T12:59:28.000Z
2022-01-06T13:14:08.000Z
tools/draw_comparison_head_design_choices.py
twangnh/Calibration_mrcnn
e5f3076cefbe35297a403a753bb57e11503db818
[ "Apache-2.0" ]
20
2020-09-05T04:37:19.000Z
2021-12-13T02:25:48.000Z
import matplotlib import matplotlib.pyplot as plt import numpy as np labels = ['AP on bin (0,10)', 'AP on bin (10,100)'] baseline = [0.0, 13.3] fc2_ncm = [6.0, 18.9] fc2 = [8.6, 22.0] fc3_rand = [9.1, 18.8] fc3_ft = [13.2, 23.1] x = np.arange(len(labels)) # the label locations width = 0.15 # the width of the bars matplotlib.rcParams.update({'font.size': 16}) # plt.rc('ytick', labelsize=10) fig, ax = plt.subplots() # rects1 = ax.bar(x - width, baseline, width, label='baseline') # rects2 = ax.bar(x - width/2, fc2_ncm, width, label='2fc_ncm') # rects3 = ax.bar(x , baseline, fc2, label='baseline') # rects4 = ax.bar(x + width/2, fc3_rand, width, label='2fc_ncm') # rects5 = ax.bar(x + width, fc3_ft, width, label='baseline') # Set position of bar on X axis r1 = np.arange(len(labels)) r2 = [x + width for x in r1] r3 = [x + width for x in r2] r4 = [x + width for x in r3] r5 = [x + width for x in r4] # Make the plot rects1 = ax.bar(r1, baseline, color='#7f6d5f', width=width, edgecolor='white', label='baseline') rects2 = ax.bar(r2, fc2_ncm, color='#557f2d', width=width, edgecolor='white', label='2fc_ncm') rects3 = ax.bar(r3, fc2, width=width, edgecolor='white', label='2fc_rand') rects4 = ax.bar(r4, fc3_rand, width=width, edgecolor='white', label='3fc_rand') rects5 = ax.bar(r5, fc3_ft, width=width, edgecolor='white', label='3fc_ft') ax.set_ylim([0,25]) ax.set_xticks([0.3, 1.3]) ax.set_xticklabels(labels) ax.legend() def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom') autolabel(rects1) autolabel(rects2) autolabel(rects3) autolabel(rects4) autolabel(rects5) fig.tight_layout() plt.savefig('head_design_choices.eps', format='eps', dpi=1000) plt.show()
31.166667
96
0.6456
f807e6a714508c55a5204cce88f3927910a26a1e
9,916
py
Python
src/entry.py
akilmarshall/vash-2
5307bc414afba24b235ae0ae9b2583c33ea69b1f
[ "MIT" ]
null
null
null
src/entry.py
akilmarshall/vash-2
5307bc414afba24b235ae0ae9b2583c33ea69b1f
[ "MIT" ]
null
null
null
src/entry.py
akilmarshall/vash-2
5307bc414afba24b235ae0ae9b2583c33ea69b1f
[ "MIT" ]
null
null
null
from datetime import datetime from itertools import count from tkinter import * import tkinter.ttk as ttk from functools import partial from tkcalendar import DateEntry from case import COD, CONTRIES, Case, INCIDENT, ORGANIZATION, POLICESTATION, STATES from db import referred_other_agency from preview import CasePreview if __name__ == '__main__': root = Tk() entry = CaseEntry(root) entry.pack() root.mainloop()
38.583658
93
0.588846