hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
d99a1e98eccb58cbc0c0cef6e9e6702f33461b0e
5,886
py
Python
public_data/serializers.py
MTES-MCT/sparte
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
[ "MIT" ]
null
null
null
public_data/serializers.py
MTES-MCT/sparte
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
[ "MIT" ]
3
2022-02-10T11:47:58.000Z
2022-02-23T18:50:24.000Z
public_data/serializers.py
MTES-MCT/sparte
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
[ "MIT" ]
null
null
null
from rest_framework_gis import serializers from rest_framework import serializers as s from .models import ( Artificialisee2015to2018, Artificielle2018, CommunesSybarval, CouvertureSol, EnveloppeUrbaine2018, Ocsge, Renaturee2018to2015, Sybarval, Voirie2018, ZonesBaties2018, UsageSol, )
25.37069
80
0.613829
d99a20277c32bb1e28312f42ab6d732f38323169
241
py
Python
quick_search/admin.py
naman1901/django-quick-search
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
[ "MIT" ]
null
null
null
quick_search/admin.py
naman1901/django-quick-search
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
[ "MIT" ]
2
2020-02-11T23:28:22.000Z
2020-06-05T19:27:40.000Z
quick_search/admin.py
HereWithoutPermission/django-quick-search
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import SearchResult # Register your models here. admin.site.register(SearchResult, SearchResultAdmin)
30.125
52
0.771784
d99b5ab0ec594ac30b1d197b23a5cda7c48151d5
18,065
py
Python
rasa/train.py
Amirali-Shirkh/rasa-for-botfront
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
[ "Apache-2.0" ]
null
null
null
rasa/train.py
Amirali-Shirkh/rasa-for-botfront
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
[ "Apache-2.0" ]
null
null
null
rasa/train.py
Amirali-Shirkh/rasa-for-botfront
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
[ "Apache-2.0" ]
null
null
null
import asyncio import os import tempfile from contextlib import ExitStack from typing import Text, Optional, List, Union, Dict from rasa.importers.importer import TrainingDataImporter from rasa import model from rasa.model import FingerprintComparisonResult from rasa.core.domain import Domain from rasa.utils.common import TempDirectoryPath from rasa.cli.utils import ( print_success, print_warning, print_error, bcolors, print_color, ) from rasa.constants import DEFAULT_MODELS_PATH, DEFAULT_CORE_SUBDIRECTORY_NAME def train_core( domain: Union[Domain, Text], config: Text, stories: Text, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, additional_arguments: Optional[Dict] = None, ) -> Optional[Text]: loop = asyncio.get_event_loop() return loop.run_until_complete( train_core_async( domain=domain, config=config, stories=stories, output=output, train_path=train_path, fixed_model_name=fixed_model_name, additional_arguments=additional_arguments, ) ) def train_nlu( config: Text, nlu_data: Text, output: Text, train_path: Optional[Text] = None, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, ) -> Optional[Text]: """Trains an NLU model. Args: config: Path to the config file for NLU. nlu_data: Path to the NLU training data. output: Output path. train_path: If `None` the model will be trained in a temporary directory, otherwise in the provided directory. fixed_model_name: Name of the model to be stored. persist_nlu_training_data: `True` if the NLU training data should be persisted with the model. Returns: If `train_path` is given it returns the path to the model archive, otherwise the path to the directory with the trained model files. """ loop = asyncio.get_event_loop() return loop.run_until_complete( _train_nlu_async( config, nlu_data, output, train_path, fixed_model_name, persist_nlu_training_data, ) )
34.673704
128
0.654027
d99ed7256245422c7c5dd3c60b0661e4f78183ea
35,585
py
Python
rplugin/python3/denite/ui/default.py
timgates42/denite.nvim
12a9b5456f5a4600afeb0ba284ce1098bd35e501
[ "MIT" ]
null
null
null
rplugin/python3/denite/ui/default.py
timgates42/denite.nvim
12a9b5456f5a4600afeb0ba284ce1098bd35e501
[ "MIT" ]
null
null
null
rplugin/python3/denite/ui/default.py
timgates42/denite.nvim
12a9b5456f5a4600afeb0ba284ce1098bd35e501
[ "MIT" ]
null
null
null
# ============================================================================ # FILE: default.py # AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com> # License: MIT license # ============================================================================ import re import typing from denite.util import echo, error, clearmatch, regex_convert_py_vim from denite.util import Nvim, UserContext, Candidates, Candidate from denite.parent import SyncParent
37.816153
79
0.54863
d99f875863138f11af1d76f0c753c198ad6d96bd
1,329
py
Python
PyDSTool/core/context_managers.py
yuanz271/PyDSTool
886c143cdd192aea204285f3a1cb4968c763c646
[ "Python-2.0", "OLDAP-2.7" ]
null
null
null
PyDSTool/core/context_managers.py
yuanz271/PyDSTool
886c143cdd192aea204285f3a1cb4968c763c646
[ "Python-2.0", "OLDAP-2.7" ]
null
null
null
PyDSTool/core/context_managers.py
yuanz271/PyDSTool
886c143cdd192aea204285f3a1cb4968c763c646
[ "Python-2.0", "OLDAP-2.7" ]
null
null
null
# -*- coding: utf-8 -*- """Context managers implemented for (mostly) internal use""" import contextlib import functools from io import UnsupportedOperation import os import sys __all__ = ["RedirectStdout", "RedirectStderr"] RedirectStdout = functools.partial(_stdchannel_redirected, sys.stdout) RedirectStderr = functools.partial(_stdchannel_redirected, sys.stderr) RedirectNoOp = functools.partial(_stdchannel_redirected, None, "")
28.891304
109
0.68924
d99ff34b5f61cee604590c456f40398d7da18182
3,215
py
Python
pos_kiosk/hooks.py
Muzzy73/pos_kiosk
1ed42cfaeb15f009293b76d05dd85bd322b42f03
[ "MIT" ]
1
2022-03-05T11:42:36.000Z
2022-03-05T11:42:36.000Z
pos_kiosk/hooks.py
Muzzy73/pos_kiosk
1ed42cfaeb15f009293b76d05dd85bd322b42f03
[ "MIT" ]
null
null
null
pos_kiosk/hooks.py
Muzzy73/pos_kiosk
1ed42cfaeb15f009293b76d05dd85bd322b42f03
[ "MIT" ]
1
2022-03-05T11:42:37.000Z
2022-03-05T11:42:37.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from . import __version__ as app_version app_name = "pos_kiosk" app_title = "Pos Kiosk" app_publisher = "9t9it" app_description = "Kiosk App" app_icon = "octicon octicon-file-directory" app_color = "grey" app_email = "[email protected]" app_license = "MIT" # Includes in <head> # ------------------ # include js, css files in header of desk.html # app_include_css = "/assets/pos_kiosk/css/pos_kiosk.css" # app_include_js = "/assets/pos_kiosk/js/pos_kiosk.js" # include js, css files in header of web template # web_include_css = "/assets/pos_kiosk/css/pos_kiosk.css" # web_include_js = "/assets/pos_kiosk/js/pos_kiosk.js" # include js in page # page_js = {"page" : "public/js/file.js"} # page_js = { # "kiosk": ["public/js/pos_page_js.js", "public/js/includes/number_to_words.js"] # } # include js in doctype views # doctype_js = {"doctype" : "public/js/doctype.js"} # doctype_list_js = {"doctype" : "public/js/doctype_list.js"} # doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"} # doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"} fixtures = [ { "doctype": "Custom Field", "filters": [ [ "name", "in", [ "Sales Invoice Item-pos_kiosk", "Mode of Payment-logo" ] ] ] } ] # Home Pages # ---------- # application home page (will override Website Settings) # home_page = "login" # website user home page (by Role) # role_home_page = { # "Role": "home_page" # } # Website user home page (by function) # get_website_user_home_page = "pos_kiosk.utils.get_home_page" # Generators # ---------- # automatically create page for each record of this doctype # website_generators = ["Web Page"] # Installation # ------------ # before_install = "pos_kiosk.install.before_install" # after_install = "pos_kiosk.install.after_install" # Desk Notifications # ------------------ # See frappe.core.notifications.get_notification_config # notification_config = "pos_kiosk.notifications.get_notification_config" # Permissions # ----------- # Permissions evaluated in scripted ways # permission_query_conditions = { # "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions", # } # # has_permission = { # "Event": "frappe.desk.doctype.event.event.has_permission", # } # Document Events # --------------- # Hook on document methods and events # doc_events = { # "*": { # "on_update": "method", # "on_cancel": "method", # "on_trash": "method" # } # } # Scheduled Tasks # --------------- # scheduler_events = { # "all": [ # "pos_kiosk.tasks.all" # ], # "daily": [ # "pos_kiosk.tasks.daily" # ], # "hourly": [ # "pos_kiosk.tasks.hourly" # ], # "weekly": [ # "pos_kiosk.tasks.weekly" # ] # "monthly": [ # "pos_kiosk.tasks.monthly" # ] # } # Testing # ------- # before_tests = "pos_kiosk.install.before_tests" # Overriding Whitelisted Methods # ------------------------------ # # override_whitelisted_methods = { # "pos_bahrain.api.get_item_details.get_item_details": "pos_kiosk.api.item.get_item_details" # noqa # }
22.964286
101
0.631415
d9a00b2c6f1a0e88ad5b4a7def2a45bd074f417f
3,880
py
Python
pypagai/models/model_lstm.py
gcouti/pypagAI
d08fac95361dcc036d890a88cb86ce090322a612
[ "Apache-2.0" ]
1
2018-07-24T18:53:26.000Z
2018-07-24T18:53:26.000Z
pypagai/models/model_lstm.py
gcouti/pypagAI
d08fac95361dcc036d890a88cb86ce090322a612
[ "Apache-2.0" ]
7
2020-01-28T21:45:14.000Z
2022-03-11T23:20:53.000Z
pypagai/models/model_lstm.py
gcouti/pypagAI
d08fac95361dcc036d890a88cb86ce090322a612
[ "Apache-2.0" ]
null
null
null
from keras import Model, Input from keras.layers import Dense, concatenate, LSTM, Reshape, Permute, Embedding, Dropout, Convolution1D, Flatten from keras.optimizers import Adam from pypagai.models.base import KerasModel
33.162393
114
0.650773
d9a09cb6f497e8ccdf9de40f4b8ebd6b96a1c43a
113
py
Python
lib/variables/latent_variables/__init__.py
joelouismarino/variational_rl
11dc14bfb56f3ebbfccd5de206b78712a8039a9a
[ "MIT" ]
15
2020-10-20T22:09:36.000Z
2021-12-24T13:40:36.000Z
lib/variables/latent_variables/__init__.py
joelouismarino/variational_rl
11dc14bfb56f3ebbfccd5de206b78712a8039a9a
[ "MIT" ]
null
null
null
lib/variables/latent_variables/__init__.py
joelouismarino/variational_rl
11dc14bfb56f3ebbfccd5de206b78712a8039a9a
[ "MIT" ]
1
2020-10-23T19:48:06.000Z
2020-10-23T19:48:06.000Z
from .fully_connected import FullyConnectedLatentVariable from .convolutional import ConvolutionalLatentVariable
37.666667
57
0.911504
d9a0c8935f1da040f76922b94d20a857d8b8cd7d
3,338
py
Python
easyai/model/backbone/cls/pnasnet.py
lpj0822/image_point_cloud_det
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
[ "MIT" ]
1
2020-09-05T09:18:56.000Z
2020-09-05T09:18:56.000Z
easyai/model/backbone/cls/pnasnet.py
lpj0822/image_point_cloud_det
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
[ "MIT" ]
8
2020-04-20T02:18:55.000Z
2022-03-12T00:24:50.000Z
easyai/model/backbone/cls/pnasnet.py
lpj0822/image_point_cloud_det
7b20e2f42f3f2ff4881485da58ad188a1f0d0e0f
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding:utf-8 -*- # Author: ''' PNASNet in PyTorch. Paper: Progressive Neural Architecture Search ''' from easyai.base_name.block_name import NormalizationType, ActivationType from easyai.base_name.backbone_name import BackboneName from easyai.model.backbone.utility.base_backbone import * from easyai.model.base_block.utility.utility_block import ConvBNActivationBlock from easyai.model.base_block.cls.pnasnet_block import CellA, CellB __all__ = ['pnasnet_A', 'pnasnet_B']
35.892473
95
0.612942
d9a0daeef5f3a3455af5c2983af478cd08c74a7b
11,247
py
Python
map_download/cmd/TerrainDownloader.py
cugxy/map_download
02142b33edb2bc163f7ae971f443efe84c13e029
[ "MIT" ]
27
2019-04-02T08:34:16.000Z
2022-01-11T01:48:50.000Z
map_download/cmd/TerrainDownloader.py
cugxy/map_download
02142b33edb2bc163f7ae971f443efe84c13e029
[ "MIT" ]
8
2019-10-10T03:03:51.000Z
2021-11-14T11:01:47.000Z
map_download/cmd/TerrainDownloader.py
cugxy/map_download
02142b33edb2bc163f7ae971f443efe84c13e029
[ "MIT" ]
7
2019-04-02T08:43:04.000Z
2020-08-11T02:14:24.000Z
# -*- coding: utf-8 -*- # coding=utf-8 import json import os import math import logging import requests import time from map_download.cmd.BaseDownloader import DownloadEngine, BaseDownloaderThread, latlng2tile_terrain, BoundBox if __name__ == '__main__': if 1: logger = logging.getLogger('down') try: root = r'/Users/cugxy/Documents/data/downloader' formatter = logging.Formatter('%(levelname)s-%(message)s') hdlr = logging.StreamHandler() log_file = os.path.join(root, 'down.log') file_hdlr = logging.FileHandler(log_file) file_hdlr.setFormatter(formatter) logger.addHandler(file_hdlr) logger.addHandler(hdlr) logger.setLevel(logging.INFO) min_lng = -180.0 max_lng = 180.0 min_lat = -90.0 max_lat = 90.0 start_zoom = 0 end_zoom = 5 bbox = BoundBox(max_lat, max_lng, min_lat, min_lng, start_zoom, end_zoom) d = TerrainDownloadEngine(root, bbox, 8, logger) d.start() time.sleep(10000) logger.error('main thread out') except Exception as e: logger.error(e) if 0: accessToken = get_access_token() pass
35.479495
117
0.384992
d9a1f3b0cf83d1115ed19f3acdb5e35f75ece5c0
252,781
py
Python
kubernetes_asyncio/client/api/rbac_authorization_v1_api.py
dineshsonachalam/kubernetes_asyncio
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
[ "Apache-2.0" ]
1
2021-02-25T04:36:18.000Z
2021-02-25T04:36:18.000Z
kubernetes_asyncio/client/api/rbac_authorization_v1_api.py
hubo1016/kubernetes_asyncio
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
[ "Apache-2.0" ]
null
null
null
kubernetes_asyncio/client/api/rbac_authorization_v1_api.py
hubo1016/kubernetes_asyncio
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: v1.12.4 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from kubernetes_asyncio.client.api_client import ApiClient
66.661656
1,390
0.685004
d9a268f19adc7700cf1335eb9dfc2c8d74c5a4dc
2,208
py
Python
tools/utils.py
vahini01/electoral_rolls
82e42a6ee68844b1c8ac7899e8e7bf7a24e48d44
[ "MIT" ]
16
2018-01-22T02:03:09.000Z
2022-02-24T07:16:47.000Z
tools/utils.py
vahini01/electoral_rolls
82e42a6ee68844b1c8ac7899e8e7bf7a24e48d44
[ "MIT" ]
2
2019-02-01T02:48:17.000Z
2020-09-06T06:09:35.000Z
tools/utils.py
vahini01/electoral_rolls
82e42a6ee68844b1c8ac7899e8e7bf7a24e48d44
[ "MIT" ]
8
2018-01-22T06:48:07.000Z
2021-08-08T16:26:12.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Nov 10 23:28:58 2017 @author: dhingratul """ import urllib.request import os from selenium import webdriver from selenium.webdriver.support.ui import Select from bs4 import BeautifulSoup import ssl import requests import wget from PyPDF2 import PdfFileReader def is_valid_pdf(fn): """Check is the PDF valid """ try: with open(fn, 'rb') as f: pdf = PdfFileReader(f) numpages = pdf.numPages return (numpages > 0) except Exception as e: return False
25.976471
100
0.646286
d9a3883f0ea5d080d5d4d2e05df6fadcaeb5c36e
1,956
py
Python
exp/viz_raw_manhattan.py
ellencwade/coronavirus-2020
b71e018deb8df8450b4d88ddbcd6ded6497aa8f9
[ "MIT" ]
null
null
null
exp/viz_raw_manhattan.py
ellencwade/coronavirus-2020
b71e018deb8df8450b4d88ddbcd6ded6497aa8f9
[ "MIT" ]
null
null
null
exp/viz_raw_manhattan.py
ellencwade/coronavirus-2020
b71e018deb8df8450b4d88ddbcd6ded6497aa8f9
[ "MIT" ]
null
null
null
""" Experiment summary ------------------ Treat each province/state in a country cases over time as a vector, do a simple K-Nearest Neighbor between countries. What country has the most similar trajectory to a given country? Plots similar countries """ import sys sys.path.insert(0, '..') from utils import data import os import sklearn import numpy as np import json import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') # ------------ HYPERPARAMETERS ------------- BASE_PATH = '../COVID-19/csse_covid_19_data/' # ------------------------------------------ confirmed = os.path.join( BASE_PATH, 'csse_covid_19_time_series', 'time_series_covid19_confirmed_global.csv') confirmed = data.load_csv_data(confirmed) features = [] targets = [] fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(111) cm = plt.get_cmap('jet') NUM_COLORS = 0 LINE_STYLES = ['solid', 'dashed', 'dotted'] NUM_STYLES = len(LINE_STYLES) dist_diff = os.path.join('../exp/results/', 'knn_raw.json') f = open(dist_diff,) dist_diff = json.load(f) for region, dist in dist_diff.items(): plt.style.use('fivethirtyeight') fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(111) cm = plt.get_cmap('jet') other_region = dist['manhattan'][0] regions = [region, other_region] for val in regions: df = data.filter_by_attribute( confirmed, "Country/Region", val) cases, labels = data.get_cases_chronologically(df) cases = cases.sum(axis=0) lines = ax.plot(cases, label=val) ax.set_ylabel('# of confirmed cases') ax.set_xlabel("Time (days since Jan 22, 2020)") ax.set_yscale('log') ax.legend() plt.tight_layout() region = region.replace('*', '') other_region = other_region.replace('*', '') plt.title(f'Comparing confirmed cases in {region} and {other_region}') plt.savefig(f'results/raw_manhattan/{region}.png') plt.close() print(region)
26.432432
74
0.658487
d9a428c026d2352f281b2b7ddd8ec8a286d37297
5,290
py
Python
rational/mxnet/rationals.py
steven-lang/rational_activations
234623dbb9360c215c430185b09e2237d5186b54
[ "MIT" ]
null
null
null
rational/mxnet/rationals.py
steven-lang/rational_activations
234623dbb9360c215c430185b09e2237d5186b54
[ "MIT" ]
null
null
null
rational/mxnet/rationals.py
steven-lang/rational_activations
234623dbb9360c215c430185b09e2237d5186b54
[ "MIT" ]
null
null
null
""" Rational Activation Functions for MXNET ======================================= This module allows you to create Rational Neural Networks using Learnable Rational activation functions with MXNET networks. """ import mxnet as mx from mxnet import initializer from mxnet.gluon import HybridBlock from rational.utils.get_weights import get_parameters from rational.mxnet.versions import _version_a, _version_b, _version_c, _version_d from rational._base.rational_base import Rational_base
42.66129
99
0.56276
d9a6621d903359b14c87695eb4a1ac8dcea18138
844
py
Python
torchflare/criterion/utils.py
Neklaustares-tPtwP/torchflare
7af6b01ef7c26f0277a041619081f6df4eb1e42c
[ "Apache-2.0" ]
1
2021-09-14T08:38:05.000Z
2021-09-14T08:38:05.000Z
torchflare/criterion/utils.py
weidao-Shi/torchflare
3c55b5a0761f2e85dd6da95767c6ec03f0f5baad
[ "Apache-2.0" ]
null
null
null
torchflare/criterion/utils.py
weidao-Shi/torchflare
3c55b5a0761f2e85dd6da95767c6ec03f0f5baad
[ "Apache-2.0" ]
1
2021-08-06T19:24:43.000Z
2021-08-06T19:24:43.000Z
"""Utils for criterion.""" import torch import torch.nn.functional as F def normalize(x, axis=-1): """Performs L2-Norm.""" num = x denom = torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12 return num / denom # Source : https://github.com/earhian/Humpback-Whale-Identification-1st-/blob/master/models/triplet_loss.py def euclidean_dist(x, y): """Computes Euclidean distance.""" m, n = x.size(0), y.size(0) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(x, 2).sum(1, keepdim=True).expand(m, m).t() dist = xx + yy - 2 * torch.matmul(x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist def cosine_dist(x, y): """Computes Cosine Distance.""" x = F.normalize(x, dim=1) y = F.normalize(y, dim=1) dist = 2 - 2 * torch.mm(x, y.t()) return dist
26.375
107
0.613744
d9a714b3484177f5fee5427d98c53a86bf48daf3
134
py
Python
tests/__init__.py
eloo/sensor.sbahn_munich
05e05a845178ab529dc4c80e924035fe1d072b55
[ "MIT" ]
null
null
null
tests/__init__.py
eloo/sensor.sbahn_munich
05e05a845178ab529dc4c80e924035fe1d072b55
[ "MIT" ]
null
null
null
tests/__init__.py
eloo/sensor.sbahn_munich
05e05a845178ab529dc4c80e924035fe1d072b55
[ "MIT" ]
null
null
null
"""Tests for the sbahn_munich integration""" line_dict = { "name": "S3", "color": "#333333", "text_color": "#444444", }
14.888889
44
0.567164
d9a88e74a4ac032ae6e8218d9ec1ed42e6092d32
375
py
Python
app/views/web/homestack.py
geudrik/hautomation
0baae29e85cd68658a0f8578de2e36e42945053f
[ "MIT" ]
null
null
null
app/views/web/homestack.py
geudrik/hautomation
0baae29e85cd68658a0f8578de2e36e42945053f
[ "MIT" ]
null
null
null
app/views/web/homestack.py
geudrik/hautomation
0baae29e85cd68658a0f8578de2e36e42945053f
[ "MIT" ]
null
null
null
#! /usr/bin/env python2.7 # -*- coding: latin-1 -*- from flask import Blueprint from flask import current_app from flask import render_template from flask_login import login_required homestack = Blueprint("homestack", __name__, url_prefix="/homestack")
22.058824
69
0.749333
d9a90a5af3f207f1020cbf41f94830b75e23fbc9
4,411
py
Python
readthedocs/donate/forms.py
gamearming/readthedocs
53d0094f657f549326a86b8bd0ccf924c2126941
[ "MIT" ]
null
null
null
readthedocs/donate/forms.py
gamearming/readthedocs
53d0094f657f549326a86b8bd0ccf924c2126941
[ "MIT" ]
null
null
null
readthedocs/donate/forms.py
gamearming/readthedocs
53d0094f657f549326a86b8bd0ccf924c2126941
[ "MIT" ]
null
null
null
"""Forms for RTD donations""" import logging from django import forms from django.conf import settings from django.utils.translation import ugettext_lazy as _ from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin from readthedocs.payments.utils import stripe from .models import Supporter log = logging.getLogger(__name__)
33.416667
97
0.594423
d9ad95f0461bd02e44c310b1381567e8524c288c
6,258
py
Python
pandas_datareaders_unofficial/datareaders/google_finance_options.py
movermeyer/pandas_datareaders_unofficial
458dcf473d070cd7686d53d4a9b479cbe0ab9218
[ "BSD-3-Clause" ]
18
2015-02-05T01:42:51.000Z
2020-12-27T19:24:25.000Z
pandas_datareaders_unofficial/datareaders/google_finance_options.py
movermeyer/pandas_datareaders_unofficial
458dcf473d070cd7686d53d4a9b479cbe0ab9218
[ "BSD-3-Clause" ]
1
2015-01-12T11:08:02.000Z
2015-01-13T09:14:47.000Z
pandas_datareaders_unofficial/datareaders/google_finance_options.py
femtotrader/pandas_datareaders
458dcf473d070cd7686d53d4a9b479cbe0ab9218
[ "BSD-3-Clause" ]
13
2015-09-10T19:39:51.000Z
2022-01-06T17:08:35.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from .base import DataReaderBase from ..tools import COL, _get_dates, to_float, to_int import pandas as pd #from pandas.tseries.frequencies import to_offset from six.moves import cStringIO as StringIO import logging import traceback import datetime import json import token, tokenize def ymd_to_date(y, m, d): """ Returns date >>> expiration = {u'd': 1, u'm': 12, u'y': 2014} >>> ymd_to_date(**expiration) datetime.date(2014, 12, 1) >>> ymd_to_date(2014, 3, 1) datetime.date(2014, 3, 1) """ return(datetime.date(year=y, month=m, day=d)) def date_to_ymd(date): """ Returns dict like {'y': ..., 'm': ..., 'd': ...} >>> date_to_ymd(datetime.date(year=2010, month=1, day=3)) {'y': 2010, 'm': 1, 'd': 3} """ d = { 'y': date.year, 'm': date.month, 'd': date.day } return(d) def fix_lazy_json(in_text): """ Handle lazy JSON - to fix expecting property name this function fixes the json output from google http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name """ tokengen = tokenize.generate_tokens(StringIO(in_text).readline) result = [] for tokid, tokval, _, _, _ in tokengen: # fix unquoted strings if (tokid == token.NAME): if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']: tokid = token.STRING tokval = u'"%s"' % tokval # fix single-quoted strings elif (tokid == token.STRING): if tokval.startswith ("'"): tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"') # remove invalid commas elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')): if (len(result) > 0) and (result[-1][1] == ','): result.pop() # fix single-quoted strings elif (tokid == token.STRING): if tokval.startswith ("'"): tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"') result.append((tokid, tokval)) return tokenize.untokenize(result) if __name__ == "__main__": import doctest doctest.testmod()
29.942584
115
0.527964
d9adb9ef68a4c2ce5de1ed13aea3230964400996
5,039
py
Python
keras_textclassification/data_preprocess/generator_preprocess.py
Vail-qin/Keras-TextClassification
8acda5ae37db2647c8ecaa70027ffc6003d2abca
[ "MIT" ]
1
2019-12-27T16:59:16.000Z
2019-12-27T16:59:16.000Z
keras_textclassification/data_preprocess/generator_preprocess.py
Yolo-Cultivate/Keras-TextClassification
183cf7b3483588bfe10d19b65124e52df5b338f8
[ "MIT" ]
null
null
null
keras_textclassification/data_preprocess/generator_preprocess.py
Yolo-Cultivate/Keras-TextClassification
183cf7b3483588bfe10d19b65124e52df5b338f8
[ "MIT" ]
1
2022-01-11T06:37:54.000Z
2022-01-11T06:37:54.000Z
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2019/11/2 21:08 # @author : Mo # @function: from keras_textclassification.data_preprocess.text_preprocess import load_json, save_json from keras_textclassification.conf.path_config import path_model_dir path_fast_text_model_vocab2index = path_model_dir + 'vocab2index.json' path_fast_text_model_l2i_i2l = path_model_dir + 'l2i_i2l.json' import numpy as np import os
36.781022
92
0.523318
d9aeee22298fa03239ef3d63fdcaa4984d37ba63
3,030
py
Python
content/test/gpu/gpu_tests/pixel_expectations.py
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
content/test/gpu/gpu_tests/pixel_expectations.py
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
content/test/gpu/gpu_tests/pixel_expectations.py
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from gpu_tests.gpu_test_expectations import GpuTestExpectations # See the GpuTestExpectations class for documentation.
42.083333
78
0.706931
d9afca45a6adc9c41c0b981032c729d59e9db234
2,801
py
Python
examples/p02_budgets/budget_data_ingest/migrations/0001_initial.py
18F/data-federation-ingest
a896ef2da1faf3966f018366b26a338bb66cc717
[ "CC0-1.0" ]
18
2019-07-26T13:43:01.000Z
2022-01-15T14:57:52.000Z
examples/p02_budgets/budget_data_ingest/migrations/0001_initial.py
18F/data-federation-ingest
a896ef2da1faf3966f018366b26a338bb66cc717
[ "CC0-1.0" ]
96
2019-06-14T18:30:54.000Z
2021-08-03T09:25:02.000Z
examples/p02_budgets/budget_data_ingest/migrations/0001_initial.py
18F/data-federation-ingest
a896ef2da1faf3966f018366b26a338bb66cc717
[ "CC0-1.0" ]
3
2020-01-23T04:48:18.000Z
2021-01-12T09:31:20.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-06-08 22:54 from __future__ import unicode_literals from django.conf import settings import django.contrib.postgres.fields.jsonb from django.db import migrations, models import django.db.models.deletion
47.474576
209
0.611567
d9b0c3d32e07c56a0732f0fca454740538a940fe
451
py
Python
setup.py
Kaslanarian/PythonSVM
715eeef2a245736167addf45a6aee8b40b54d0c7
[ "MIT" ]
2
2021-09-25T01:00:37.000Z
2021-09-27T12:13:24.000Z
setup.py
Kaslanarian/PythonSVM
715eeef2a245736167addf45a6aee8b40b54d0c7
[ "MIT" ]
1
2021-09-17T12:08:14.000Z
2021-09-17T12:08:14.000Z
setup.py
Kaslanarian/PythonSVM
715eeef2a245736167addf45a6aee8b40b54d0c7
[ "MIT" ]
null
null
null
import setuptools #enables develop setuptools.setup( name='pysvm', version='0.1', description='PySVM : A NumPy implementation of SVM based on SMO algorithm', author_email="[email protected]", packages=['pysvm'], license='MIT License', long_description=open('README.md', encoding='utf-8').read(), install_requires=[ # 'numpy', 'sklearn' ], url='https://github.com/Kaslanarian/PySVM', )
28.1875
79
0.660754
d9b0df7f5ef294a68858d836af143c289d120187
4,375
py
Python
Object_detection_image.py
hiperus0988/pyao
72c56975a3d45aa033bdf7650b5369d59240395f
[ "Apache-2.0" ]
1
2021-06-09T22:17:57.000Z
2021-06-09T22:17:57.000Z
Object_detection_image.py
hiperus0988/pyao
72c56975a3d45aa033bdf7650b5369d59240395f
[ "Apache-2.0" ]
null
null
null
Object_detection_image.py
hiperus0988/pyao
72c56975a3d45aa033bdf7650b5369d59240395f
[ "Apache-2.0" ]
null
null
null
######## Image Object Detection Using Tensorflow-trained Classifier ######### # # Author: Evan Juras # Date: 1/15/18 # Description: # This program uses a TensorFlow-trained classifier to perform object detection. # It loads the classifier uses it to perform object detection on an image. # It draws boxes and scores around the objects of interest in the image. ## Some of the code is copied from Google's example at ## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb ## and some is copied from Dat Tran's example at ## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py ## but I changed it to make it more understandable to me. # Import packages import os import cv2 import numpy as np import tensorflow as tf import sys # This is needed since the notebook is stored in the object_detection folder. sys.path.append("..") # Import utilites from utils import label_map_util from utils import visualization_utils as vis_util # Name of the directory containing the object detection module we're using MODEL_NAME = 'inference_graph' IMAGE_NAME = 'test1.jpg' # Grab path to current working directory CWD_PATH = os.getcwd() # Path to frozen detection graph .pb file, which contains the model that is used # for object detection. PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb') # Path to label map file PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt') # Path to image PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME) # Number of classes the object detector can identify NUM_CLASSES = 6 # Load the label map. # Label maps map indices to category names, so that when our convolution # network predicts `5`, we know that this corresponds to `king`. # Here we use internal utility functions, but anything that returns a # dictionary mapping integers to appropriate string labels would be fine label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) # Load the Tensorflow model into memory. detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') sess = tf.Session(graph=detection_graph) # Define input and output tensors (i.e. data) for the object detection classifier # Input tensor is the image image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Output tensors are the detection boxes, scores, and classes # Each box represents a part of the image where a particular object was detected detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represents level of confidence for each of the objects. # The score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') # Number of objects detected num_detections = detection_graph.get_tensor_by_name('num_detections:0') # Load image using OpenCV and # expand image dimensions to have shape: [1, None, None, 3] # i.e. a single-column array, where each item in the column has the pixel RGB value image = cv2.imread(PATH_TO_IMAGE) image_expanded = np.expand_dims(image, axis=0) # Perform the actual detection by running the model with the image as input (boxes, scores, classes, num) = sess.run( [detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_expanded}) # Draw the results of the detection (aka 'visulaize the results') vis_util.visualize_boxes_and_labels_on_image_array( image, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=8, min_score_thresh=0.60) # All the results have been drawn on image. Now display the image. cv2.imshow('Object detector', image) # Press any key to close the image cv2.waitKey(0) # Clean up cv2.destroyAllWindows()
36.458333
122
0.779886
d9b2e0c418fbf0ff7ba59e80c34fb2974714b1c9
398
py
Python
polling_stations/apps/data_collection/management/commands/import_torbay.py
chris48s/UK-Polling-Stations
4742b527dae94f0276d35c80460837be743b7d17
[ "BSD-3-Clause" ]
null
null
null
polling_stations/apps/data_collection/management/commands/import_torbay.py
chris48s/UK-Polling-Stations
4742b527dae94f0276d35c80460837be743b7d17
[ "BSD-3-Clause" ]
null
null
null
polling_stations/apps/data_collection/management/commands/import_torbay.py
chris48s/UK-Polling-Stations
4742b527dae94f0276d35c80460837be743b7d17
[ "BSD-3-Clause" ]
null
null
null
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
44.222222
86
0.788945
d9b38469f6b00b7a441fff875e4ecd7bcc272b7e
1,832
py
Python
Backend/product/views.py
Bhavya0020/Readopolis
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
[ "MIT" ]
null
null
null
Backend/product/views.py
Bhavya0020/Readopolis
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
[ "MIT" ]
null
null
null
Backend/product/views.py
Bhavya0020/Readopolis
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
[ "MIT" ]
null
null
null
from django.db.models import Q from django.shortcuts import render from django.http import Http404 # Create your views here. from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.decorators import api_view from .models import Product, Category from .serializers import ProductSerializer, CategorySerializer
34.566038
101
0.715611
d9b42bca24804913cf6908775c04bc29a0bec6df
1,469
py
Python
model/contact.py
hubogeri/python_training
7a918040e4c8bae5a031134911bc8b465f322699
[ "Apache-2.0" ]
null
null
null
model/contact.py
hubogeri/python_training
7a918040e4c8bae5a031134911bc8b465f322699
[ "Apache-2.0" ]
null
null
null
model/contact.py
hubogeri/python_training
7a918040e4c8bae5a031134911bc8b465f322699
[ "Apache-2.0" ]
null
null
null
from sys import maxsize
30.604167
135
0.571137
d9b4cabd9071c90b544409b5b87e3302450b1278
11,342
py
Python
test/IECore/BasicPreset.py
ericmehl/cortex
054839cc709ce153d1bcaaefe7f340ebe641ec82
[ "BSD-3-Clause" ]
386
2015-01-02T11:10:43.000Z
2022-03-10T15:12:20.000Z
test/IECore/BasicPreset.py
ericmehl/cortex
054839cc709ce153d1bcaaefe7f340ebe641ec82
[ "BSD-3-Clause" ]
484
2015-01-09T18:28:06.000Z
2022-03-31T16:02:04.000Z
test/IECore/BasicPreset.py
ericmehl/cortex
054839cc709ce153d1bcaaefe7f340ebe641ec82
[ "BSD-3-Clause" ]
99
2015-01-28T23:18:04.000Z
2022-03-27T00:59:39.000Z
########################################################################## # # Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import os import sys import shutil import unittest import IECore if __name__ == "__main__": unittest.main()
33.655786
107
0.677923
d9b4da54ad6bdf7efb1efb5b210a443bc83b0db4
12,492
py
Python
rlpy/Domains/Pacman.py
imanolarrieta/RL
072a8c328652f45e053baecd640f04adf7f84b49
[ "BSD-3-Clause" ]
1
2019-12-07T13:47:43.000Z
2019-12-07T13:47:43.000Z
rlpy/Domains/Pacman.py
imanolarrieta/RL
072a8c328652f45e053baecd640f04adf7f84b49
[ "BSD-3-Clause" ]
null
null
null
rlpy/Domains/Pacman.py
imanolarrieta/RL
072a8c328652f45e053baecd640f04adf7f84b49
[ "BSD-3-Clause" ]
null
null
null
"""Pacman game domain.""" from rlpy.Tools import __rlpy_location__ from .Domain import Domain from .PacmanPackage import layout, pacman, game, ghostAgents from .PacmanPackage import graphicsDisplay import numpy as np from copy import deepcopy import os import time __copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy" __credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann", "William Dabney", "Jonathan P. How"] __license__ = "BSD 3-Clause" __author__ = "Austin Hays"
38.795031
159
0.616394
d9b4dfc1ad39620d7b5b2d1c39ad7fd8f6cec36b
819
py
Python
core/src/zeit/cms/settings/interfaces.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
5
2019-05-16T09:51:29.000Z
2021-05-31T09:30:03.000Z
core/src/zeit/cms/settings/interfaces.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
107
2019-05-24T12:19:02.000Z
2022-03-23T15:05:56.000Z
core/src/zeit/cms/settings/interfaces.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
3
2020-08-14T11:01:17.000Z
2022-01-08T17:32:19.000Z
from zeit.cms.i18n import MessageFactory as _ import zope.interface import zope.schema
26.419355
78
0.636142
d9b55a7ee025f94a0ef3f125fa9c30f974dd7d6e
211
py
Python
abc/abc165/abc165e.py
c-yan/atcoder
940e49d576e6a2d734288fadaf368e486480a948
[ "MIT" ]
1
2019-08-21T00:49:34.000Z
2019-08-21T00:49:34.000Z
abc/abc165/abc165e.py
c-yan/atcoder
940e49d576e6a2d734288fadaf368e486480a948
[ "MIT" ]
null
null
null
abc/abc165/abc165e.py
c-yan/atcoder
940e49d576e6a2d734288fadaf368e486480a948
[ "MIT" ]
null
null
null
N, M = map(int, input().split()) for i in range(1, M + 1): if i % 2 == 1: j = (i - 1) // 2 print(1 + j, M + 1 - j) else: j = (i - 2) // 2 print(M + 2 + j, 2 * M + 1 - j)
21.1
39
0.336493
d9b62ab258f0b51ef25d431f8fa66de9acd438a7
1,895
py
Python
setup.py
giggslam/python-messengerbot-sdk
4a6fadf96fe3425da9abc4726fbb84db6d84f7b5
[ "Apache-2.0" ]
23
2019-03-05T08:33:34.000Z
2021-12-13T01:52:47.000Z
setup.py
giggslam/python-messengerbot-sdk
4a6fadf96fe3425da9abc4726fbb84db6d84f7b5
[ "Apache-2.0" ]
null
null
null
setup.py
giggslam/python-messengerbot-sdk
4a6fadf96fe3425da9abc4726fbb84db6d84f7b5
[ "Apache-2.0" ]
6
2019-03-07T07:58:02.000Z
2020-12-18T10:08:47.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import sys from setuptools import setup from setuptools.command.test import test as TestCommand __version__ = '' with open('facebookbot/__about__.py', 'r') as fd: reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]') for line in fd: m = reg.match(line) if m: __version__ = m.group(1) break with open('README.rst', 'r') as fd: long_description = fd.read() setup( name="fbsdk", version=__version__, author="Sam Chang", author_email="[email protected]", maintainer="Sam Chang", maintainer_email="[email protected]", url="https://github.com/boompieman/fbsdk", description="Facebook Messaging API SDK for Python", long_description=long_description, license='Apache License 2.0', packages=[ "facebookbot", "facebookbot.models" ], install_requires=_requirements(), classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Topic :: Software Development" ] )
30.079365
76
0.663852
d9b76c6f6bd2bcb1986a9d9701e4ee097a1ff3bf
18,905
py
Python
src/transformers/models/mmbt/modeling_mmbt.py
MaximovaIrina/transformers
033c3ed95a14b58f5a657f5124bc5988e4109c9f
[ "Apache-2.0" ]
1
2022-01-12T11:39:47.000Z
2022-01-12T11:39:47.000Z
src/transformers/models/mmbt/modeling_mmbt.py
AugustVIII/transformers
185876392c0dcd4c4bb02f2750822144a3bee545
[ "Apache-2.0" ]
null
null
null
src/transformers/models/mmbt/modeling_mmbt.py
AugustVIII/transformers
185876392c0dcd4c4bb02f2750822144a3bee545
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MMBT model. """ import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput from ...modeling_utils import ModuleUtilsMixin from ...utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MMBTConfig" MMBT_START_DOCSTRING = r""" MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and Text](https://github.com/facebookresearch/mmbt) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine. It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and obtain state-of-the-art performance on various multimodal classification benchmark tasks. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MMBTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. transformer (:class: *~nn.Module*): A text transformer that is used by MMBT. It should have embeddings, encoder, and pooler attributes. encoder (:class: *~nn.Module*): Encoder for the second modality. It should take in a batch of modal inputs and return k, n dimension embeddings. """ MMBT_INPUTS_DOCSTRING = r""" Args: input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`): The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width) input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's appended to the end of other modality embeddings. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification tasks. modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used. attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`: Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`: Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`: Segment token indices to indicate different portions of the non-text modality. The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings for the non-text modality. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """
46.794554
209
0.685533
d9b79f86fa592dbe24c72c454192af966a916a5a
12,444
py
Python
eth2/beacon/chains/base.py
mhchia/trinity
e40e475064ca4605887706e9b0e4f8e2349b10cd
[ "MIT" ]
null
null
null
eth2/beacon/chains/base.py
mhchia/trinity
e40e475064ca4605887706e9b0e4f8e2349b10cd
[ "MIT" ]
null
null
null
eth2/beacon/chains/base.py
mhchia/trinity
e40e475064ca4605887706e9b0e4f8e2349b10cd
[ "MIT" ]
null
null
null
from abc import ( ABC, abstractmethod, ) import logging from typing import ( TYPE_CHECKING, Tuple, Type, ) from eth._utils.datatypes import ( Configurable, ) from eth.db.backends.base import ( BaseAtomicDB, ) from eth.exceptions import ( BlockNotFound, ) from eth.validation import ( validate_word, ) from eth_typing import ( Hash32, ) from eth_utils import ( ValidationError, encode_hex, ) from eth2._utils.ssz import ( validate_imported_block_unchanged, ) from eth2.beacon.db.chain import ( BaseBeaconChainDB, BeaconChainDB, ) from eth2.beacon.exceptions import ( BlockClassError, StateMachineNotFound, ) from eth2.beacon.types.blocks import ( BaseBeaconBlock, ) from eth2.beacon.types.states import ( BeaconState, ) from eth2.beacon.typing import ( FromBlockParams, Slot, ) from eth2.beacon.validation import ( validate_slot, ) if TYPE_CHECKING: from eth2.beacon.state_machines.base import ( # noqa: F401 BaseBeaconStateMachine, ) # # Block API # def get_block_class(self, block_root: Hash32) -> Type[BaseBeaconBlock]: slot = self.chaindb.get_slot_by_root(block_root) sm_class = self.get_state_machine_class_for_block_slot(slot) block_class = sm_class.block_class return block_class def create_block_from_parent(self, parent_block: BaseBeaconBlock, block_params: FromBlockParams) -> BaseBeaconBlock: """ Passthrough helper to the ``StateMachine`` class of the block descending from the given block. """ return self.get_state_machine_class_for_block_slot( slot=parent_block.slot + 1 if block_params.slot is None else block_params.slot, ).create_block_from_parent(parent_block, block_params) def get_block_by_root(self, block_root: Hash32) -> BaseBeaconBlock: """ Return the requested block as specified by block hash. Raise ``BlockNotFound`` if there's no block with the given hash in the db. """ validate_word(block_root, title="Block Hash") block_class = self.get_block_class(block_root) return self.chaindb.get_block_by_root(block_root, block_class) def get_canonical_head(self) -> BaseBeaconBlock: """ Return the block at the canonical chain head. Raise ``CanonicalHeadNotFound`` if there's no head defined for the canonical chain. """ block_root = self.chaindb.get_canonical_head_root() block_class = self.get_block_class(block_root) return self.chaindb.get_block_by_root(block_root, block_class) def get_score(self, block_root: Hash32) -> int: """ Return the score of the block with the given hash. Raise ``BlockNotFound`` if there is no matching black hash. """ return self.chaindb.get_score(block_root) def ensure_block(self, block: BaseBeaconBlock=None) -> BaseBeaconBlock: """ Return ``block`` if it is not ``None``, otherwise return the block of the canonical head. """ if block is None: head = self.get_canonical_head() return self.create_block_from_parent(head, FromBlockParams()) else: return block def get_block(self) -> BaseBeaconBlock: """ Return the current TIP block. """ return self.get_state_machine().block def get_canonical_block_by_slot(self, slot: Slot) -> BaseBeaconBlock: """ Return the block with the given number in the canonical chain. Raise ``BlockNotFound`` if there's no block with the given number in the canonical chain. """ validate_slot(slot) return self.get_block_by_root(self.chaindb.get_canonical_block_root(slot)) def get_canonical_block_root(self, slot: Slot) -> Hash32: """ Return the block hash with the given number in the canonical chain. Raise ``BlockNotFound`` if there's no block with the given number in the canonical chain. """ return self.chaindb.get_canonical_block_root(slot) def import_block( self, block: BaseBeaconBlock, perform_validation: bool=True ) -> Tuple[BaseBeaconBlock, Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]: """ Import a complete block and returns a 3-tuple - the imported block - a tuple of blocks which are now part of the canonical chain. - a tuple of blocks which were canonical and now are no longer canonical. """ try: parent_block = self.get_block_by_root(block.previous_block_root) except BlockNotFound: raise ValidationError( "Attempt to import block #{}. Cannot import block {} before importing " "its parent block at {}".format( block.slot, block.signed_root, block.previous_block_root, ) ) base_block_for_import = self.create_block_from_parent( parent_block, FromBlockParams(), ) state, imported_block = self.get_state_machine(base_block_for_import).import_block(block) # Validate the imported block. if perform_validation: validate_imported_block_unchanged(imported_block, block) # TODO: Now it just persists all state. Should design how to clean up the old state. self.chaindb.persist_state(state) ( new_canonical_blocks, old_canonical_blocks, ) = self.chaindb.persist_block(imported_block, imported_block.__class__) self.logger.debug( 'IMPORTED_BLOCK: slot %s | signed root %s', imported_block.slot, encode_hex(imported_block.signed_root), ) return imported_block, new_canonical_blocks, old_canonical_blocks
30.955224
99
0.634201
d9b8347698a1fe18b6d9ec66f6bfbfa77f2567be
1,566
py
Python
using_paramiko.py
allupramodreddy/cisco_py
5488b56d9324011860b78998e694dcce6da5e3d1
[ "Apache-2.0" ]
null
null
null
using_paramiko.py
allupramodreddy/cisco_py
5488b56d9324011860b78998e694dcce6da5e3d1
[ "Apache-2.0" ]
null
null
null
using_paramiko.py
allupramodreddy/cisco_py
5488b56d9324011860b78998e694dcce6da5e3d1
[ "Apache-2.0" ]
null
null
null
#!/usr/local/bin/python3 import paramiko,time #using as SSH Client client = paramiko.SSHClient() # check dir(client) to find available options. # auto adjust host key verification with yes or no client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # time for connecting to remote Cisco IOS """ Manually taking input addr = input('Provide IP address to connect to: ') user = input('Username: ') pwd = getpass.getpass('Password: ')""" # Taking input from files f1 = open("devices.txt","r") f2 = open("commands.txt","r") for line in f1: client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) data = line.split(" ") # print(data) addr = data[0] user = data[1] pwd = data[2] f3 = open(addr+".txt","w+") # print(addr +" "+ user +" " +pwd) client.connect(addr,username=user,password=pwd,allow_agent=False,look_for_keys=False) # we have to ask for Shell device_access = client.invoke_shell() for line in f2: device_access.send(line) time.sleep(1) output = device_access.recv(55000).decode('ascii') f3.write(output) """ THIS CODE IS FOR SINGLE COMMAND, FOR MULTIPLE COMMANDS CODE BELOW # send command to the device device_access.send("ter len 0\nshow run \n") time.sleep(2) # receive output from the device, convert it to byte-like format and print it print(device_access.recv(550000).decode('ascii')) # We can print the same to a file too with open("csr1000v.txt","w") as f: f.write(device_access.recv(550000).decode('ascii'))"""
23.727273
89
0.691571
d9b86cc42aaff67200ff3f4f5f6d27121835fd8c
733
py
Python
old/.history/a_20201125192943.py
pscly/bisai1
e619186cec5053a8e02bd59e48fc3ad3af47d19a
[ "MulanPSL-1.0" ]
null
null
null
old/.history/a_20201125192943.py
pscly/bisai1
e619186cec5053a8e02bd59e48fc3ad3af47d19a
[ "MulanPSL-1.0" ]
null
null
null
old/.history/a_20201125192943.py
pscly/bisai1
e619186cec5053a8e02bd59e48fc3ad3af47d19a
[ "MulanPSL-1.0" ]
null
null
null
# for n in range(400,500): # i = n // 100 # j = n // 10 % 10 # k = n % 10 # if n == i ** 3 + j ** 3 + k ** 3: # print(n) # (16) # input("():") # s1 = input("():") # l1 = s1.split(' ') # l2 = [] # for i in l1: # if i.isdigit(): # l2.append(int(i)) # for i in l2: # if not (i % 6): # print(i, end=" ") # (17) out_l1 = [] while 1: in_1 = input(":") nums_l1 = in_1.split(' ')
13.089286
39
0.452933
d9b8d42e905cba910e6a30f7d6f38e82d05ab46c
2,110
py
Python
graphdb/transformer.py
muggat0n/graphdb
56dfd5ef8a3321abc6a919faee47494bbe059080
[ "MIT" ]
2
2020-08-28T13:42:38.000Z
2020-09-05T03:13:45.000Z
graphdb/transformer.py
muggat0n/graphdb
56dfd5ef8a3321abc6a919faee47494bbe059080
[ "MIT" ]
null
null
null
graphdb/transformer.py
muggat0n/graphdb
56dfd5ef8a3321abc6a919faee47494bbe059080
[ "MIT" ]
null
null
null
""" A query transformer is a function that accepts a program and returns a program, plus a priority level. Higher priority transformers are placed closer to the front of the list. Were ensuring is a function, because were going to evaluate it later 31 . Well assume there wont be an enormous number of transformer additions, and walk the list linearly to add a new one. Well leave a note in case this assumption turns out to be false a binary search is much more time-optimal for long lists, but adds a little complexity and doesnt really speed up short lists. """ """ Dagoba.T = [] # transformers (more than meets the eye) """ """ Dagoba.addTransformer = function(fun, priority) { if(typeof fun != 'function') return Dagoba.error('Invalid transformer function') for(var i = 0; i < Dagoba.T.length; i++) # OPT: binary search if(priority > Dagoba.T[i].priority) break Dagoba.T.splice(i, 0, {priority: priority, fun: fun}) } """ """ Dagoba.transform = function(program) { return Dagoba.T.reduce(function(acc, transformer) { return transformer.fun(acc) }, program) } """ """ Dagoba.addAlias = function(newname, oldname, defaults) { defaults = defaults || [] # default arguments for the alias Dagoba.addPipetype(newname, function() {}) # because there's no method catchall in js Dagoba.addTransformer(function(program) { return program.map(function(step) { if(step[0] != newname) return step return [oldname, Dagoba.extend(step[1], defaults)] }) }, 100) # these need to run early, so they get a high priority } """ """ Dagoba.extend = function(list, defaults) { return Object.keys(defaults).reduce(function(acc, key) { if(typeof list[key] != 'undefined') return acc acc[key] = defaults[key] return acc }, list) } """
30.57971
120
0.627962
d9b92da15285253454115ccfc5647355f3c2b100
345
py
Python
yzcore/templates/project_template/src/const/_job.py
lixuemin13/yz-core
82774f807ac1002b77d0cc90f6695b1cc6ba0820
[ "MIT" ]
6
2021-01-26T10:27:04.000Z
2022-03-19T16:13:12.000Z
yzcore/templates/project_template/src/const/_job.py
lixuemin13/yz-core
82774f807ac1002b77d0cc90f6695b1cc6ba0820
[ "MIT" ]
null
null
null
yzcore/templates/project_template/src/const/_job.py
lixuemin13/yz-core
82774f807ac1002b77d0cc90f6695b1cc6ba0820
[ "MIT" ]
2
2021-07-27T04:11:51.000Z
2022-01-06T09:36:06.000Z
#!/usr/bin/python3.6.8+ # -*- coding:utf-8 -*- """ @auth: cml @date: 2020-12-2 @desc: ... """
15
28
0.53913
d9b95364464c7d47db46ee15f7524a804b79ea1b
10,311
py
Python
pyboleto/html.py
RenanPalmeira/pyboleto
7b12a7a2f7e92cad5f35f843ae67c397b6f7e36e
[ "BSD-3-Clause" ]
null
null
null
pyboleto/html.py
RenanPalmeira/pyboleto
7b12a7a2f7e92cad5f35f843ae67c397b6f7e36e
[ "BSD-3-Clause" ]
null
null
null
pyboleto/html.py
RenanPalmeira/pyboleto
7b12a7a2f7e92cad5f35f843ae67c397b6f7e36e
[ "BSD-3-Clause" ]
1
2019-03-20T01:01:00.000Z
2019-03-20T01:01:00.000Z
# -*- coding: utf-8 -*- """ pyboleto.html ~~~~~~~~~~~~~ Classe Responsvel por fazer o output do boleto em html. :copyright: 2012 by Artur Felipe de Sousa :license: BSD, see LICENSE for more details. """ import os import string import sys import codecs import base64 from itertools import chain if sys.version_info < (3,): from itertools import izip_longest as zip_longest zip_longest # chamando para evitar erro de nao uso do zip_longest else: from itertools import zip_longest DIGITS = [ ['n', 'n', 'w', 'w', 'n'], ['w', 'n', 'n', 'n', 'w'], ['n', 'w', 'n', 'n', 'w'], ['w', 'w', 'n', 'n', 'n'], ['n', 'n', 'w', 'n', 'w'], ['w', 'n', 'w', 'n', 'n'], ['n', 'w', 'w', 'n', 'n'], ['n', 'n', 'n', 'w', 'w'], ['w', 'n', 'n', 'w', 'n'], ['n', 'w', 'n', 'w', 'n'], ]
35.926829
77
0.617011
d9b9563b7aae9c46b0fbd98073d96eeedfaec4aa
91
py
Python
Courses/1 month/2 week/day 6/Formula.py
emir-naiz/first_git_lesson
1fecf712290f6da3ef03deff518870d91638eb69
[ "MIT" ]
null
null
null
Courses/1 month/2 week/day 6/Formula.py
emir-naiz/first_git_lesson
1fecf712290f6da3ef03deff518870d91638eb69
[ "MIT" ]
null
null
null
Courses/1 month/2 week/day 6/Formula.py
emir-naiz/first_git_lesson
1fecf712290f6da3ef03deff518870d91638eb69
[ "MIT" ]
null
null
null
summary = 0 i = 0 while i < 5: summary = summary + i print(summary) i = i + 1
11.375
25
0.516484
d9b9af3bd25b0d2f9357446b0ff43e3ab614b141
243
py
Python
tests/image_saver/image_saver_7.py
Vicken-Ghoubiguian/Imtreat
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
[ "MIT" ]
null
null
null
tests/image_saver/image_saver_7.py
Vicken-Ghoubiguian/Imtreat
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
[ "MIT" ]
null
null
null
tests/image_saver/image_saver_7.py
Vicken-Ghoubiguian/Imtreat
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
[ "MIT" ]
null
null
null
import imtreat img = imtreat.imageManagerClass.openImageFunction("../images/soleil.png", 0) img = imtreat.definedModesClass.detailEnhanceFunction(img) imtreat.imageManagerClass.saveImageFunction("/Tlchargements/", "image_1", ".png", img)
30.375
88
0.794239
d9ba3c5b12232bbc811a9ad606f2570ac2481108
10,492
py
Python
nova/conf/hyperv.py
raubvogel/nova
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
[ "Apache-2.0" ]
null
null
null
nova/conf/hyperv.py
raubvogel/nova
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
[ "Apache-2.0" ]
null
null
null
nova/conf/hyperv.py
raubvogel/nova
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2016 TUBITAK BILGEM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg hyperv_opt_group = cfg.OptGroup("hyperv", title='The Hyper-V feature', help=""" The hyperv feature allows you to configure the Hyper-V hypervisor driver to be used within an OpenStack deployment. """) hyperv_opts = [ cfg.FloatOpt('dynamic_memory_ratio', default=1.0, help=""" Dynamic memory ratio Enables dynamic memory allocation (ballooning) when set to a value greater than 1. The value expresses the ratio between the total RAM assigned to an instance and its startup RAM amount. For example a ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of RAM allocated at startup. Possible values: * 1.0: Disables dynamic memory allocation (Default). * Float values greater than 1.0: Enables allocation of total implied RAM divided by this value for startup. """), cfg.BoolOpt('enable_instance_metrics_collection', default=False, help=""" Enable instance metrics collection Enables metrics collections for an instance by using Hyper-V's metric APIs. Collected data can be retrieved by other apps and services, e.g.: Ceilometer. """), cfg.StrOpt('instances_path_share', default="", help=""" Instances path share The name of a Windows share mapped to the "instances_path" dir and used by the resize feature to copy files to the target host. If left blank, an administrative share (hidden network share) will be used, looking for the same "instances_path" used locally. Possible values: * "": An administrative share will be used (Default). * Name of a Windows share. Related options: * "instances_path": The directory which will be used if this option here is left blank. """), cfg.BoolOpt('limit_cpu_features', default=False, help=""" Limit CPU features This flag is needed to support live migration to hosts with different CPU features and checked during instance creation in order to limit the CPU features used by the instance. """), cfg.IntOpt('mounted_disk_query_retry_count', default=10, min=0, help=""" Mounted disk query retry count The number of times to retry checking for a mounted disk. The query runs until the device can be found or the retry count is reached. Possible values: * Positive integer values. Values greater than 1 is recommended (Default: 10). Related options: * Time interval between disk mount retries is declared with "mounted_disk_query_retry_interval" option. """), cfg.IntOpt('mounted_disk_query_retry_interval', default=5, min=0, help=""" Mounted disk query retry interval Interval between checks for a mounted disk, in seconds. Possible values: * Time in seconds (Default: 5). Related options: * This option is meaningful when the mounted_disk_query_retry_count is greater than 1. * The retry loop runs with mounted_disk_query_retry_count and mounted_disk_query_retry_interval configuration options. """), cfg.IntOpt('power_state_check_timeframe', default=60, min=0, help=""" Power state check timeframe The timeframe to be checked for instance power state changes. This option is used to fetch the state of the instance from Hyper-V through the WMI interface, within the specified timeframe. Possible values: * Timeframe in seconds (Default: 60). """), cfg.IntOpt('power_state_event_polling_interval', default=2, min=0, help=""" Power state event polling interval Instance power state change event polling frequency. Sets the listener interval for power state events to the given value. This option enhances the internal lifecycle notifications of instances that reboot themselves. It is unlikely that an operator has to change this value. Possible values: * Time in seconds (Default: 2). """), cfg.StrOpt('qemu_img_cmd', default="qemu-img.exe", help=""" qemu-img command qemu-img is required for some of the image related operations like converting between different image types. You can get it from here: (http://qemu.weilnetz.de/) or you can install the Cloudbase OpenStack Hyper-V Compute Driver (https://cloudbase.it/openstack-hyperv-driver/) which automatically sets the proper path for this config option. You can either give the full path of qemu-img.exe or set its path in the PATH environment variable and leave this option to the default value. Possible values: * Name of the qemu-img executable, in case it is in the same directory as the nova-compute service or its path is in the PATH environment variable (Default). * Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND). Related options: * If the config_drive_cdrom option is False, qemu-img will be used to convert the ISO to a VHD, otherwise the config drive will remain an ISO. To use config drive with Hyper-V, you must set the ``mkisofs_cmd`` value to the full path to an ``mkisofs.exe`` installation. """), cfg.StrOpt('vswitch_name', help=""" External virtual switch name The Hyper-V Virtual Switch is a software-based layer-2 Ethernet network switch that is available with the installation of the Hyper-V server role. The switch includes programmatically managed and extensible capabilities to connect virtual machines to both virtual networks and the physical network. In addition, Hyper-V Virtual Switch provides policy enforcement for security, isolation, and service levels. The vSwitch represented by this config option must be an external one (not internal or private). Possible values: * If not provided, the first of a list of available vswitches is used. This list is queried using WQL. * Virtual switch name. """), cfg.IntOpt('wait_soft_reboot_seconds', default=60, min=0, help=""" Wait soft reboot seconds Number of seconds to wait for instance to shut down after soft reboot request is made. We fall back to hard reboot if instance does not shutdown within this window. Possible values: * Time in seconds (Default: 60). """), cfg.BoolOpt('config_drive_cdrom', default=False, help=""" Mount config drive as a CD drive. OpenStack can be configured to write instance metadata to a config drive, which is then attached to the instance before it boots. The config drive can be attached as a disk drive (default) or as a CD drive. Related options: * This option is meaningful with ``force_config_drive`` option set to ``True`` or when the REST API call to create an instance will have ``--config-drive=True`` flag. * ``config_drive_format`` option must be set to ``iso9660`` in order to use CD drive as the config drive image. * To use config drive with Hyper-V, you must set the ``mkisofs_cmd`` value to the full path to an ``mkisofs.exe`` installation. Additionally, you must set the ``qemu_img_cmd`` value to the full path to an ``qemu-img`` command installation. * You can configure the Compute service to always create a configuration drive by setting the ``force_config_drive`` option to ``True``. """), cfg.BoolOpt('config_drive_inject_password', default=False, help=""" Inject password to config drive. When enabled, the admin password will be available from the config drive image. Related options: * This option is meaningful when used with other options that enable config drive usage with Hyper-V, such as ``force_config_drive``. """), cfg.IntOpt('volume_attach_retry_count', default=10, min=0, help=""" Volume attach retry count The number of times to retry attaching a volume. Volume attachment is retried until success or the given retry count is reached. Possible values: * Positive integer values (Default: 10). Related options: * Time interval between attachment attempts is declared with volume_attach_retry_interval option. """), cfg.IntOpt('volume_attach_retry_interval', default=5, min=0, help=""" Volume attach retry interval Interval between volume attachment attempts, in seconds. Possible values: * Time in seconds (Default: 5). Related options: * This options is meaningful when volume_attach_retry_count is greater than 1. * The retry loop runs with volume_attach_retry_count and volume_attach_retry_interval configuration options. """), cfg.BoolOpt('enable_remotefx', default=False, help=""" Enable RemoteFX feature This requires at least one DirectX 11 capable graphics adapter for Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization feature has to be enabled. Instances with RemoteFX can be requested with the following flavor extra specs: **os:resolution**. Guest VM screen resolution size. Acceptable values:: 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160 ``3840x2160`` is only available on Windows / Hyper-V Server 2016. **os:monitors**. Guest VM number of monitors. Acceptable values:: [1, 4] - Windows / Hyper-V Server 2012 R2 [1, 8] - Windows / Hyper-V Server 2016 **os:vram**. Guest VM VRAM amount. Only available on Windows / Hyper-V Server 2016. Acceptable values:: 64, 128, 256, 512, 1024 """), cfg.BoolOpt('use_multipath_io', default=False, help=""" Use multipath connections when attaching iSCSI or FC disks. This requires the Multipath IO Windows feature to be enabled. MPIO must be configured to claim such devices. """), cfg.ListOpt('iscsi_initiator_list', default=[], help=""" List of iSCSI initiators that will be used for estabilishing iSCSI sessions. If none are specified, the Microsoft iSCSI initiator service will choose the initiator. """) ]
31.04142
79
0.735989
d9ba3e40007f306c4c070fefef8a9b0aa2387204
363
py
Python
src/fetchWords.py
theyadev/thierry-bot
f3c72998d4c16afbca77baf4cabaf0f547d51e94
[ "MIT" ]
null
null
null
src/fetchWords.py
theyadev/thierry-bot
f3c72998d4c16afbca77baf4cabaf0f547d51e94
[ "MIT" ]
2
2022-01-20T16:36:33.000Z
2022-03-31T14:16:01.000Z
src/fetchWords.py
theyadev/thierry-bot
f3c72998d4c16afbca77baf4cabaf0f547d51e94
[ "MIT" ]
1
2022-01-28T12:14:14.000Z
2022-01-28T12:14:14.000Z
import requests words_list = requests.get("https://raw.githubusercontent.com/atebits/Words/master/Words/fr.txt").text words_list = filter(lambda x: len(x) > 4, words_list.split('\n')) path = input("Chemin d'criture ? (words.txt) ") if path == "": path = "./words.txt" with open(path, "w", encoding="utf-8") as file: file.write('\n'.join(words_list))
27.923077
101
0.672176
d9ba8bca5b7327bbb7e6554d0a3849c186cc4ba9
1,623
py
Python
inspiration/simplegallery/test/upload/variants/test_aws_uploader.py
Zenahr/simple-music-gallery
2cf6e81208b721a91dcbf77e047c7f77182dd194
[ "MIT" ]
1
2020-07-03T17:21:01.000Z
2020-07-03T17:21:01.000Z
simplegallery/test/upload/variants/test_aws_uploader.py
theemack/simple-photo-gallery
f5db98bca7a7443ea7a9172317811f446eff760c
[ "MIT" ]
1
2020-06-20T12:13:00.000Z
2020-06-20T15:32:03.000Z
inspiration/simplegallery/test/upload/variants/test_aws_uploader.py
Zenahr/simple-music-gallery
2cf6e81208b721a91dcbf77e047c7f77182dd194
[ "MIT" ]
null
null
null
import unittest from unittest import mock import os import subprocess from testfixtures import TempDirectory from simplegallery.upload.uploader_factory import get_uploader if __name__ == '__main__': unittest.main()
37.744186
103
0.646334
d9bd741cd9ad9e20eeb1069fce4709781f43edd4
6,476
py
Python
Qt_interface/add_subject.py
kithsirij/NLP-based-Syllabus-Coverage-Exam-paper-checker-Tool
b7b38a7b7c6d0a2ad5264df32acd75cdef552bd0
[ "MIT" ]
1
2019-07-17T09:08:41.000Z
2019-07-17T09:08:41.000Z
Qt_interface/add_subject.py
kithsirij/NLP-based-Syllabus-Coverage-Exam-paper-checker-Tool
b7b38a7b7c6d0a2ad5264df32acd75cdef552bd0
[ "MIT" ]
null
null
null
Qt_interface/add_subject.py
kithsirij/NLP-based-Syllabus-Coverage-Exam-paper-checker-Tool
b7b38a7b7c6d0a2ad5264df32acd75cdef552bd0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'add_subject.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: try: _encoding = QtGui.QApplication.UnicodeUTF8 except AttributeError: if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) Dialog_add_subject = QtGui.QDialog() ui = Ui_Dialog_add_subject() ui.setupUi(Dialog_add_subject) Dialog_add_subject.show() sys.exit(app.exec_())
48.691729
137
0.694719
d9be3b65d403b8ba23a315dd5e1dcfb9fd542171
2,553
py
Python
tests/syncdb_signals/tests.py
mdj2/django
e71b63e280559122371d125d75a593dc2435c394
[ "BSD-3-Clause" ]
1
2017-02-08T15:13:43.000Z
2017-02-08T15:13:43.000Z
tests/syncdb_signals/tests.py
mdj2/django
e71b63e280559122371d125d75a593dc2435c394
[ "BSD-3-Clause" ]
null
null
null
tests/syncdb_signals/tests.py
mdj2/django
e71b63e280559122371d125d75a593dc2435c394
[ "BSD-3-Clause" ]
null
null
null
from django.db.models import signals from django.test import TestCase from django.core import management from django.utils import six from shared_models import models PRE_SYNCDB_ARGS = ['app', 'create_models', 'verbosity', 'interactive', 'db'] SYNCDB_DATABASE = 'default' SYNCDB_VERBOSITY = 1 SYNCDB_INTERACTIVE = False # We connect receiver here and not in unit test code because we need to # connect receiver before test runner creates database. That is, sequence of # actions would be: # # 1. Test runner imports this module. # 2. We connect receiver. # 3. Test runner calls syncdb for create default database. # 4. Test runner execute our unit test code. pre_syncdb_receiver = OneTimeReceiver() signals.pre_syncdb.connect(pre_syncdb_receiver, sender=models)
34.04
77
0.703486
d9be5eda54c6b03914f01c88d3b8d97dd5add586
3,625
py
Python
pytorch_lightning/plugins/environments/slurm_environment.py
gianscarpe/pytorch-lightning
261ea90822e2bf1cfa5d56171ab1f95a81d5c571
[ "Apache-2.0" ]
null
null
null
pytorch_lightning/plugins/environments/slurm_environment.py
gianscarpe/pytorch-lightning
261ea90822e2bf1cfa5d56171ab1f95a81d5c571
[ "Apache-2.0" ]
null
null
null
pytorch_lightning/plugins/environments/slurm_environment.py
gianscarpe/pytorch-lightning
261ea90822e2bf1cfa5d56171ab1f95a81d5c571
[ "Apache-2.0" ]
null
null
null
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import re from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment log = logging.getLogger(__name__)
33.878505
114
0.622345
d9be639438e84e867c9e53c267b847b31292fe23
928
py
Python
examples/mouse.py
ginkage/trackball-python
06439ac77935f7fd9374bd4f535822e859734729
[ "MIT" ]
22
2019-04-19T11:13:16.000Z
2022-03-04T15:04:43.000Z
examples/mouse.py
ginkage/trackball-python
06439ac77935f7fd9374bd4f535822e859734729
[ "MIT" ]
7
2019-06-17T13:48:41.000Z
2022-02-07T14:24:00.000Z
examples/mouse.py
ginkage/trackball-python
06439ac77935f7fd9374bd4f535822e859734729
[ "MIT" ]
6
2019-04-24T00:58:29.000Z
2022-01-26T15:39:10.000Z
#!/usr/bin/env python import time import os import math from trackball import TrackBall print("""Trackball: Mouse Use the trackball as a mouse in Raspbian, with right-click when the switch is pressed. Press Ctrl+C to exit! """) trackball = TrackBall(interrupt_pin=4) trackball.set_rgbw(0, 0, 0, 0) # Check for xte (used to control mouse) use_xte = os.system('which xte') == 0 if use_xte == 0: raise RuntimeError("xte not found. Did you sudo apt install xautomation?") while True: up, down, left, right, switch, state = trackball.read() # Send movements and clicks to xte if switch: cmd = 'xte "mouseclick 1"' os.system(cmd) elif right or up or left or down: x = right - left x = math.copysign(x**2, x) y = down - up y = math.copysign(y**2, y) cmd = 'xte "mousermove {} {}"'.format(int(x), int(y)) os.system(cmd) time.sleep(0.0001)
23.2
78
0.635776
d9be866c44b7b03225042353a7fcf648c1ce10ab
11,294
py
Python
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
artberryx/LSD
99ee081de2502b4d13c140b474f772db8a5f92fe
[ "MIT" ]
7
2022-02-01T03:02:24.000Z
2022-02-10T12:54:05.000Z
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
artberryx/LSD
99ee081de2502b4d13c140b474f772db8a5f92fe
[ "MIT" ]
null
null
null
garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
artberryx/LSD
99ee081de2502b4d13c140b474f772db8a5f92fe
[ "MIT" ]
2
2022-02-03T03:33:25.000Z
2022-02-10T12:54:07.000Z
"""GaussianMLPRegressorModel.""" import numpy as np import tensorflow as tf import tensorflow_probability as tfp from garage.experiment import deterministic from garage.tf.models import GaussianMLPModel
44.81746
79
0.611741
d9becf802ca0765623e481aef0b8fd051c0096e5
3,594
py
Python
test.py
kim-sunghoon/DiracDeltaNet
7bcc0575f28715d9c7f737f8a239718320f9c05b
[ "Apache-2.0" ]
null
null
null
test.py
kim-sunghoon/DiracDeltaNet
7bcc0575f28715d9c7f737f8a239718320f9c05b
[ "Apache-2.0" ]
null
null
null
test.py
kim-sunghoon/DiracDeltaNet
7bcc0575f28715d9c7f737f8a239718320f9c05b
[ "Apache-2.0" ]
null
null
null
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import torchvision.datasets as datasets import os import argparse from torch.autograd import Variable from extensions.utils import progress_bar from extensions.model_refinery_wrapper import ModelRefineryWrapper from extensions.refinery_loss import RefineryLoss from models import ShuffleNetv2_wrapper from models import DiracDeltaNet_wrapper parser = argparse.ArgumentParser(description='PyTorch imagenet inference') parser.add_argument('--datadir', help='path to dataset') parser.add_argument('--inputdir', help='path to input model') args = parser.parse_args() # Data print('==> Preparing data..') # Data loading code valdir = os.path.join(args.datadir, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform_test = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) #imagenet testset = datasets.ImageFolder(valdir, transform_test) num_classes=1000 testloader = torch.utils.data.DataLoader(testset, batch_size=1000, shuffle=False, pin_memory=True, num_workers=30) use_cuda = torch.cuda.is_available() print('Using input path: %s' % args.inputdir) checkpoint = torch.load(args.inputdir) init_net = checkpoint['net'] net=init_net.to('cpu') label_refinery=torch.load('./resnet50.t7') net = ModelRefineryWrapper(net, label_refinery) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") net = nn.DataParallel(net) net=net.to(device) criterion = RefineryLoss() def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k) return res acc1,acc5,loss=test() print('top-1 accuracy: {0:.3f}%, top-5 accuracy: {1:.3f}%'.format(acc1,acc5))
28.983871
115
0.645242
d9c028ee1a5ced657b4755383e247cbb2fed35a8
416
py
Python
paccmann_chemistry/utils/hyperparams.py
PaccMann/paccmann_chemistry
f7e9735aafb936f837c38b5055c654be178f385f
[ "MIT" ]
9
2019-11-06T10:39:15.000Z
2022-01-09T11:08:52.000Z
paccmann_chemistry/utils/hyperparams.py
PaccMann/paccmann_chemistry
f7e9735aafb936f837c38b5055c654be178f385f
[ "MIT" ]
10
2019-11-06T17:33:51.000Z
2020-12-28T07:46:23.000Z
paccmann_chemistry/utils/hyperparams.py
PaccMann/paccmann_chemistry
f7e9735aafb936f837c38b5055c654be178f385f
[ "MIT" ]
5
2020-08-13T15:00:57.000Z
2022-03-24T14:29:07.000Z
"""Model Parameters Module.""" import torch.optim as optim from .search import SamplingSearch, GreedySearch, BeamSearch SEARCH_FACTORY = { 'sampling': SamplingSearch, 'greedy': GreedySearch, 'beam': BeamSearch, } OPTIMIZER_FACTORY = { 'adadelta': optim.Adadelta, 'adagrad': optim.Adagrad, 'adam': optim.Adam, 'adamax': optim.Adamax, 'rmsprop': optim.RMSprop, 'sgd': optim.SGD }
21.894737
60
0.675481
d9c1c1059c5b91f27882844cb4c3becda27ebd7c
6,417
py
Python
tests/gpflux/layers/test_latent_variable_layer.py
francescodonato/GPflux
fe45b353243b31d9fa0ec0daeb1d39a2e78ba094
[ "Apache-2.0" ]
100
2021-04-13T07:54:49.000Z
2022-03-21T16:25:45.000Z
tests/gpflux/layers/test_latent_variable_layer.py
francescodonato/GPflux
fe45b353243b31d9fa0ec0daeb1d39a2e78ba094
[ "Apache-2.0" ]
17
2021-04-13T03:13:11.000Z
2022-02-28T07:36:55.000Z
tests/gpflux/layers/test_latent_variable_layer.py
francescodonato/GPflux
fe45b353243b31d9fa0ec0daeb1d39a2e78ba094
[ "Apache-2.0" ]
13
2021-04-12T19:12:17.000Z
2022-03-10T00:41:44.000Z
# # Copyright (c) 2021 The GPflux Contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import numpy as np import pytest import tensorflow as tf import tensorflow_probability as tfp from gpflow.kullback_leiblers import gauss_kl from gpflux.encoders import DirectlyParameterizedNormalDiag from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer tf.keras.backend.set_floatx("float64") ############ # Utilities ############ def _zero_one_normal_prior(w_dim): """ N(0, I) prior """ return tfp.distributions.MultivariateNormalDiag(loc=np.zeros(w_dim), scale_diag=np.ones(w_dim)) ############ # Tests ############ class ArrayMatcher: def __init__(self, expected): self.expected = expected def test_no_tensorflow_metaclass_overwritten(): """ LayerWithObservations is a subclass of tf.keras.layers.Layer (via TrackableLayer); this test ensures that TrackableLayer does not have a metaclass, and hence by adding the ABCMeta to LayerWithObservations we are not accidentally removing some required TensorFlow magic metaclass. """ assert LayerWithObservations.__bases__ == (TrackableLayer,) assert type(TrackableLayer) is type assert type(LayerWithObservations) is abc.ABCMeta
32.573604
99
0.717781
d9c21d6803d82661080e36eb0e94a3b82f8b2f7c
18,041
py
Python
aw-actor-trust.py
actingweb/box-actingweb
f586458484649aba927cd78c60b4d0fec7b82ca6
[ "Apache-2.0" ]
null
null
null
aw-actor-trust.py
actingweb/box-actingweb
f586458484649aba927cd78c60b4d0fec7b82ca6
[ "Apache-2.0" ]
null
null
null
aw-actor-trust.py
actingweb/box-actingweb
f586458484649aba927cd78c60b4d0fec7b82ca6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # from actingweb import actor from actingweb import config from actingweb import trust from actingweb import auth import webapp2 import os from google.appengine.ext.webapp import template import json import logging import datetime import time # /trust handlers # # GET /trust with query parameters (relationship, type, and peerid) to retrieve trust relationships (auth: only creator and admins allowed) # POST /trust with json body to initiate a trust relationship between this # actor and another (reciprocal relationship) (auth: only creator and admins allowed) # POST /trust/{relationship} with json body to create new trust # relationship (see config.py for default relationship and auto-accept, no # auth required) # GET /trust/{relationship}}/{actorid} to get details on a specific relationship (auth: creator, admin, or peer secret) # POST /trust/{relationship}}/{actorid} to send information to a peer about changes in the relationship # PUT /trust/{relationship}}/{actorid} with a json body to change details on a relationship (baseuri, secret, desc) (auth: creator, # admin, or peer secret) # DELETE /trust/{relationship}}/{actorid} to delete a relationship (with # ?peer=true if the delete is from the peer) (auth: creator, admin, or # peer secret) # Handling requests to trust/ # Handling requests to /trust/*, e.g. /trust/friend # Handling requests to specific relationships, e.g. /trust/friend/12f2ae53bd application = webapp2.WSGIApplication([ webapp2.Route(r'/<id>/trust<:/?>', rootHandler, name='rootHandler'), webapp2.Route(r'/<id>/trust/<relationship><:/?>', relationshipHandler, name='relationshipHandler'), webapp2.Route(r'/<id>/trust/<relationship>/<peerid><:/?>', trustHandler, name='trustHandler'), ], debug=True)
41.955814
152
0.558228
d9c3024853794c19d2ce2400c8d47311441430b2
8,513
py
Python
src/main/python/rlbot/version.py
IamEld3st/RLBot
36195ffd3a836ed910ce63aed8ba103b98b7b361
[ "MIT" ]
null
null
null
src/main/python/rlbot/version.py
IamEld3st/RLBot
36195ffd3a836ed910ce63aed8ba103b98b7b361
[ "MIT" ]
null
null
null
src/main/python/rlbot/version.py
IamEld3st/RLBot
36195ffd3a836ed910ce63aed8ba103b98b7b361
[ "MIT" ]
null
null
null
# Store the version here so: # 1) we don't load dependencies by storing it in __init__.py # 2) we can import it in setup.py for the same reason # 3) we can import it into your module module # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package __version__ = '1.6.1' release_notes = { '1.6.1': """ Fixed GUI crash when loading certain RLBot config files with relative paths for agents. Fixed agent preset loading to allow multiple agents to saved/loaded correctly if they have the same name. - ima9rd """, '1.6.0':""" Add support for auto starting .NET executables. """, '1.5.1': """ Fixed crash with GUI when no default RLBot.cfg file was found. Updated GUI to launch Rocket League when clicking run if no Rocket League process is found. - ima9rd """, '1.5.0': """ Adding a have_internet helper function to help streamline upgrade checks. - ima9rd """, '1.4.2': """ Adding support for auto-running java bots during tournaments. To take advantage of this in your bot, see https://github.com/RLBot/RLBotJavaExample/wiki/Auto-Launching-Java Plus bug fixes: - Fixed a bug where auto-run executables would crash when trying to write to stderr. - Dragging bots to another team in the GUI no longer breaks the config. """, '1.3.0': """ Accurate ball prediction for Hoops and Dropshot modes! - Kipje13, Marvin, NeverCast, et. al. """, '1.2.6': """ Fixed a bug where field info was not extracted properly during dropshot mode. It was reporting 2 goals rather than the expected 140. """, '1.2.5': """ *************************************************** * Fix for dodge cancels / half flips! - ccman32 * *************************************************** Plus: - Changing the rendering strategy for 3D lines that go past the camera. Formerly it was "draw it, even though it's crazy sometimes", now it will be "don't draw it". - Showing the rate that inputs are received for each player index when you press the [home] key. Toggle back off with the [end] key. - Fixed a bug where party_member_bot could get influenced by real controller input. - Creating new presets in the GUI works better now. - Got rid of the libpng warning seen when using the GUI. - Giving specific error messages when cfg files are messed up. """, '1.2.2': """ - Rearranged the GUI a bit, and made it load and track appearance configs more effectively. - Fixed bug where RUN button behavior in the GUI would not work after killing bots. """, '1.2.0': """ - We now offer a 'RigidBodyTick' thanks to whatisaphone! It's a lower-level representation of physics data which updates at 120Hz and is not subject to interpolation. You can still make a great bot without it, but this feature is quite nice for the scientists among us. See https://github.com/RLBot/RLBotPythonExample/wiki/Rigid-Body-Tick for more details! - Faster way to access ball prediction data in python. - Skyborg """, '1.1.3': """ - Faster way to access ball prediction data in python. - Skyborg - Java bots will now shut down when the python framework quits. This has been necessary recently to avoid buggy situations. - Shutting down the python framework will no longer attempt to kill bots twice in a row. - Clicking on the "Run" button twice in a row in the GUI will no longer spawn duplicate processes. """, '1.1.2': """ Faster way to access ball prediction data in python. - Skyborg """, '1.1.1': """ You can now get information about the ball's status in Dropshot mode thanks to hallo_doei! Read all about it at https://github.com/RLBot/RLBot/wiki/Dropshot Other changes: - The loadout config for orange team is now respected again. - ccman32 - Fixed a bug where the GUI would crash with a "KeyError". - hallo_doei - Avoiding and suppressing some game crashes, and also restoring the ability to get game tick data during replays and the postgame. - tarehart - Fixed a bug where bots would dodge when they intended to double jump. -tarehart """, '1.0.6': """ The latest Rocket League patch broke dodges for our bots; this update fixes it. """, '1.0.5': """ Maximum size for a render message has been decreased again because many people experienced errors related to memory access. The limit is now only double the original. """, '1.0.4': """ - Maximum size for a render message has been increased by a factor of 100. This means you can draw a lot of lines at once without getting errors. - Boost amount for cars will now round up to the nearest integer, so 0.3% boost will now appear as 1 instead of 0. - Fixed a crash that would commonly happen after a match ends. As a side effect, you can no longer see up-to-date player data during instant replays. """, '1.0.3': """ Time for the big 1.0 release! We actually left "beta" a long time ago so this isn't as big a milestone as the number implies, but we DO have two great new features! 1. Setting game state. You can manipulate the position, velocity, etc of the ball and the cars! This can be a great help during bot development, and you can also get creative with it. Visit the wiki for details and documentation - https://github.com/RLBot/RLBot/wiki/Manipulating-Game-State Code written by hallo_doei, ccman32, and tarehart 2. Ball prediction. We now provide a list of future ball positions based on chip's excellent physics modeling. Take advantage of this to do next-level wall reads, catches, and dribbles! You can read about the math involved here: https://samuelpmish.github.io/notes/RocketLeague/ball_bouncing/ Note: currently the wall bounces are only accurate on the standard arena, not hoops or dropshot. Documentation and examples can be found here: https://github.com/RLBot/RLBot/wiki/Ball-Path-Prediction Code written by chip and tarehart Bonus: - You can now play on Salty Shores thanks to hallo_doei - Bug fix for people with spaces in their file path by Zaptive - Subprocess agent for future Rust support by whatisaphone """, '0.0.32': """ More comprehensive fix for Rocket League patch 1.50. Compared to previous version: - Dropshot tile data is fixed - Boost pad data is fixed - Loadout configuration is fixed Thanks to ccman32 and dtracers for delivering this fix quickly! """, '0.0.31': """ Rapid response to Rocket League patch 1.50 with the following known issues: - Dropshot tile data is missing - Boost pad data is missing - Loadout configuration is broken Thanks to ccman32 and dtracers for delivering this short-term fix quickly. We will follow this up with a proper fix as soon as possible. You may also choose to stay on Rocket League 1.49 and RLBot 0.0.30, ask for instructions on discord. """, '0.0.30': """ - New core dll that is less likely to break when Rocket League is patched - ccman32 and hallo-doei - Fixed bug resulting in incorrect quickchat - dtracers - Added more built-in colors to the python rendering manager - Eastvillage - Fix for items with a ':' not showing up in the GUI - hallo-doei - Fix for GUI not saving correct path - hallo-doei - Fix for GUI crash when saving preset then canceling - hallo-doei - Adding file checking before injection (Resolves #167) - Redox - Fixed typo in rlbot.cfg - Redox - Fancy release notes - tarehart and Skyborg """ } release_banner = """ ______ _ ______ _ 10100 | ___ \ | | ___ \ | | 00101 110011 | |_/ / | | |_/ / ___ | |_ 110011 00110110 | /| | | ___ \/ _ \| __| 01101100 010010 | |\ \| |____| |_/ / (_) | |_ 010010 10010 \_| \_\_____/\____/ \___/ \__| 01001 """
45.768817
118
0.677787
d9c310055166d8d1507c05ad91c6bc47af7f5743
32,544
py
Python
dungeoncog/enemy_skills_pb2.py
muffin-rice/pad-cogs
820ecf08f9569a3d7cf3264d0eb9567264b42edf
[ "MIT" ]
3
2021-04-16T23:47:59.000Z
2021-09-10T06:00:18.000Z
dungeoncog/enemy_skills_pb2.py
muffin-rice/pad-cogs
820ecf08f9569a3d7cf3264d0eb9567264b42edf
[ "MIT" ]
708
2020-10-31T08:02:40.000Z
2022-03-31T09:39:25.000Z
dungeoncog/enemy_skills_pb2.py
muffin-rice/pad-cogs
820ecf08f9569a3d7cf3264d0eb9567264b42edf
[ "MIT" ]
20
2020-11-01T23:11:29.000Z
2022-02-07T07:04:15.000Z
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: enemy_skills.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='enemy_skills.proto', package='dadguide_proto', syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x12\x65nemy_skills.proto\x12\x0e\x64\x61\x64guide_proto\"\xbf\x02\n\x1cMonsterBehaviorWithOverrides\x12\x12\n\nmonster_id\x18\x01 \x01(\x05\x12-\n\x06levels\x18\x02 \x03(\x0b\x32\x1d.dadguide_proto.LevelBehavior\x12\x36\n\x0flevel_overrides\x18\x03 \x03(\x0b\x32\x1d.dadguide_proto.LevelBehavior\x12\x43\n\x06status\x18\x04 \x01(\x0e\x32\x33.dadguide_proto.MonsterBehaviorWithOverrides.Status\"_\n\x06Status\x12\x10\n\x0cNOT_APPROVED\x10\x00\x12\x12\n\x0e\x41PPROVED_AS_IS\x10\x01\x12\x14\n\x10NEEDS_REAPPROVAL\x10\x02\x12\x19\n\x15\x41PPROVED_WITH_CHANGES\x10\x03\"f\n\x0fMonsterBehavior\x12\x12\n\nmonster_id\x18\x01 \x01(\x05\x12-\n\x06levels\x18\x02 \x03(\x0b\x32\x1d.dadguide_proto.LevelBehavior\x12\x10\n\x08\x61pproved\x18\x03 \x01(\x08\"M\n\rLevelBehavior\x12\r\n\x05level\x18\x01 \x01(\x05\x12-\n\x06groups\x18\x02 \x03(\x0b\x32\x1d.dadguide_proto.BehaviorGroup\"\xd9\x02\n\rBehaviorGroup\x12;\n\ngroup_type\x18\x01 \x01(\x0e\x32\'.dadguide_proto.BehaviorGroup.GroupType\x12,\n\tcondition\x18\x02 \x01(\x0b\x32\x19.dadguide_proto.Condition\x12.\n\x08\x63hildren\x18\x03 \x03(\x0b\x32\x1c.dadguide_proto.BehaviorItem\"\xac\x01\n\tGroupType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07PASSIVE\x10\x01\x12\x0b\n\x07PREEMPT\x10\x02\x12\x11\n\rDISPEL_PLAYER\x10\x03\x12\x12\n\x0eMONSTER_STATUS\x10\x04\x12\r\n\tREMAINING\x10\x05\x12\x0c\n\x08STANDARD\x10\x06\x12\t\n\x05\x44\x45\x41TH\x10\x07\x12\x0f\n\x0bUNKNOWN_USE\x10\x08\x12\x14\n\x10HIGHEST_PRIORITY\x10\t\"u\n\x0c\x42\x65haviorItem\x12.\n\x05group\x18\x02 \x01(\x0b\x32\x1d.dadguide_proto.BehaviorGroupH\x00\x12,\n\x08\x62\x65havior\x18\x03 \x01(\x0b\x32\x18.dadguide_proto.BehaviorH\x00\x42\x07\n\x05value\"c\n\x08\x42\x65havior\x12,\n\tcondition\x18\x01 \x01(\x0b\x32\x19.dadguide_proto.Condition\x12\x16\n\x0e\x65nemy_skill_id\x18\x02 \x01(\x05\x12\x11\n\tchild_ids\x18\x03 \x03(\x05\"\x80\x04\n\tCondition\x12\x14\n\x0chp_threshold\x18\x01 \x01(\x05\x12\x12\n\nuse_chance\x18\x02 \x01(\x05\x12\x15\n\rrepeats_every\x18\x03 \x01(\x05\x12\x17\n\x0fglobal_one_time\x18\x04 \x01(\x08\x12\x19\n\x11limited_execution\x18\r \x01(\x05\x12!\n\x19trigger_enemies_remaining\x18\x05 \x01(\x05\x12\x13\n\x0bif_defeated\x18\x06 \x01(\x08\x12\x1f\n\x17if_attributes_available\x18\x07 \x01(\x08\x12\x18\n\x10trigger_monsters\x18\x08 \x03(\x05\x12\x16\n\x0etrigger_combos\x18\t \x01(\x05\x12\x1a\n\x12if_nothing_matched\x18\n \x01(\x08\x12\x14\n\x0ctrigger_turn\x18\x0b \x01(\x05\x12\x18\n\x10trigger_turn_end\x18\x0c \x01(\x05\x12\x1c\n\x14\x61lways_trigger_above\x18\x0e \x01(\x05\x12\x14\n\x0c\x61lways_after\x18\x0f \x01(\x05\x12\x11\n\tskill_set\x18\x10 \x01(\x05\x12\x19\n\x11\x65rased_attributes\x18\x11 \x03(\x05\x12\x13\n\x0b\x64\x61mage_done\x18\x12 \x01(\x05\x12\x1b\n\x13\x61ttributes_attacked\x18\x13 \x03(\x05\x12\x13\n\x0bskills_used\x18\x14 \x01(\x05\x62\x06proto3' ) _MONSTERBEHAVIORWITHOVERRIDES_STATUS = _descriptor.EnumDescriptor( name='Status', full_name='dadguide_proto.MonsterBehaviorWithOverrides.Status', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='NOT_APPROVED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='APPROVED_AS_IS', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='NEEDS_REAPPROVAL', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='APPROVED_WITH_CHANGES', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=263, serialized_end=358, ) _sym_db.RegisterEnumDescriptor(_MONSTERBEHAVIORWITHOVERRIDES_STATUS) _BEHAVIORGROUP_GROUPTYPE = _descriptor.EnumDescriptor( name='GroupType', full_name='dadguide_proto.BehaviorGroup.GroupType', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='PASSIVE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='PREEMPT', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DISPEL_PLAYER', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='MONSTER_STATUS', index=4, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='REMAINING', index=5, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='STANDARD', index=6, number=6, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='DEATH', index=7, number=7, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='UNKNOWN_USE', index=8, number=8, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='HIGHEST_PRIORITY', index=9, number=9, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=717, serialized_end=889, ) _sym_db.RegisterEnumDescriptor(_BEHAVIORGROUP_GROUPTYPE) _MONSTERBEHAVIORWITHOVERRIDES = _descriptor.Descriptor( name='MonsterBehaviorWithOverrides', full_name='dadguide_proto.MonsterBehaviorWithOverrides', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='monster_id', full_name='dadguide_proto.MonsterBehaviorWithOverrides.monster_id', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='levels', full_name='dadguide_proto.MonsterBehaviorWithOverrides.levels', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='level_overrides', full_name='dadguide_proto.MonsterBehaviorWithOverrides.level_overrides', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='status', full_name='dadguide_proto.MonsterBehaviorWithOverrides.status', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _MONSTERBEHAVIORWITHOVERRIDES_STATUS, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=39, serialized_end=358, ) _MONSTERBEHAVIOR = _descriptor.Descriptor( name='MonsterBehavior', full_name='dadguide_proto.MonsterBehavior', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='monster_id', full_name='dadguide_proto.MonsterBehavior.monster_id', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='levels', full_name='dadguide_proto.MonsterBehavior.levels', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='approved', full_name='dadguide_proto.MonsterBehavior.approved', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=360, serialized_end=462, ) _LEVELBEHAVIOR = _descriptor.Descriptor( name='LevelBehavior', full_name='dadguide_proto.LevelBehavior', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='level', full_name='dadguide_proto.LevelBehavior.level', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='groups', full_name='dadguide_proto.LevelBehavior.groups', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=464, serialized_end=541, ) _BEHAVIORGROUP = _descriptor.Descriptor( name='BehaviorGroup', full_name='dadguide_proto.BehaviorGroup', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='group_type', full_name='dadguide_proto.BehaviorGroup.group_type', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='condition', full_name='dadguide_proto.BehaviorGroup.condition', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='children', full_name='dadguide_proto.BehaviorGroup.children', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ _BEHAVIORGROUP_GROUPTYPE, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=544, serialized_end=889, ) _BEHAVIORITEM = _descriptor.Descriptor( name='BehaviorItem', full_name='dadguide_proto.BehaviorItem', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='group', full_name='dadguide_proto.BehaviorItem.group', index=0, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='behavior', full_name='dadguide_proto.BehaviorItem.behavior', index=1, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='value', full_name='dadguide_proto.BehaviorItem.value', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=891, serialized_end=1008, ) _BEHAVIOR = _descriptor.Descriptor( name='Behavior', full_name='dadguide_proto.Behavior', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='condition', full_name='dadguide_proto.Behavior.condition', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='enemy_skill_id', full_name='dadguide_proto.Behavior.enemy_skill_id', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='child_ids', full_name='dadguide_proto.Behavior.child_ids', index=2, number=3, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1010, serialized_end=1109, ) _CONDITION = _descriptor.Descriptor( name='Condition', full_name='dadguide_proto.Condition', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='hp_threshold', full_name='dadguide_proto.Condition.hp_threshold', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='use_chance', full_name='dadguide_proto.Condition.use_chance', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='repeats_every', full_name='dadguide_proto.Condition.repeats_every', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='global_one_time', full_name='dadguide_proto.Condition.global_one_time', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='limited_execution', full_name='dadguide_proto.Condition.limited_execution', index=4, number=13, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='trigger_enemies_remaining', full_name='dadguide_proto.Condition.trigger_enemies_remaining', index=5, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='if_defeated', full_name='dadguide_proto.Condition.if_defeated', index=6, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='if_attributes_available', full_name='dadguide_proto.Condition.if_attributes_available', index=7, number=7, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='trigger_monsters', full_name='dadguide_proto.Condition.trigger_monsters', index=8, number=8, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='trigger_combos', full_name='dadguide_proto.Condition.trigger_combos', index=9, number=9, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='if_nothing_matched', full_name='dadguide_proto.Condition.if_nothing_matched', index=10, number=10, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='trigger_turn', full_name='dadguide_proto.Condition.trigger_turn', index=11, number=11, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='trigger_turn_end', full_name='dadguide_proto.Condition.trigger_turn_end', index=12, number=12, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='always_trigger_above', full_name='dadguide_proto.Condition.always_trigger_above', index=13, number=14, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='always_after', full_name='dadguide_proto.Condition.always_after', index=14, number=15, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='skill_set', full_name='dadguide_proto.Condition.skill_set', index=15, number=16, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='erased_attributes', full_name='dadguide_proto.Condition.erased_attributes', index=16, number=17, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='damage_done', full_name='dadguide_proto.Condition.damage_done', index=17, number=18, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='attributes_attacked', full_name='dadguide_proto.Condition.attributes_attacked', index=18, number=19, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='skills_used', full_name='dadguide_proto.Condition.skills_used', index=19, number=20, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1112, serialized_end=1624, ) _MONSTERBEHAVIORWITHOVERRIDES.fields_by_name['levels'].message_type = _LEVELBEHAVIOR _MONSTERBEHAVIORWITHOVERRIDES.fields_by_name['level_overrides'].message_type = _LEVELBEHAVIOR _MONSTERBEHAVIORWITHOVERRIDES.fields_by_name['status'].enum_type = _MONSTERBEHAVIORWITHOVERRIDES_STATUS _MONSTERBEHAVIORWITHOVERRIDES_STATUS.containing_type = _MONSTERBEHAVIORWITHOVERRIDES _MONSTERBEHAVIOR.fields_by_name['levels'].message_type = _LEVELBEHAVIOR _LEVELBEHAVIOR.fields_by_name['groups'].message_type = _BEHAVIORGROUP _BEHAVIORGROUP.fields_by_name['group_type'].enum_type = _BEHAVIORGROUP_GROUPTYPE _BEHAVIORGROUP.fields_by_name['condition'].message_type = _CONDITION _BEHAVIORGROUP.fields_by_name['children'].message_type = _BEHAVIORITEM _BEHAVIORGROUP_GROUPTYPE.containing_type = _BEHAVIORGROUP _BEHAVIORITEM.fields_by_name['group'].message_type = _BEHAVIORGROUP _BEHAVIORITEM.fields_by_name['behavior'].message_type = _BEHAVIOR _BEHAVIORITEM.oneofs_by_name['value'].fields.append( _BEHAVIORITEM.fields_by_name['group']) _BEHAVIORITEM.fields_by_name['group'].containing_oneof = _BEHAVIORITEM.oneofs_by_name['value'] _BEHAVIORITEM.oneofs_by_name['value'].fields.append( _BEHAVIORITEM.fields_by_name['behavior']) _BEHAVIORITEM.fields_by_name['behavior'].containing_oneof = _BEHAVIORITEM.oneofs_by_name['value'] _BEHAVIOR.fields_by_name['condition'].message_type = _CONDITION DESCRIPTOR.message_types_by_name['MonsterBehaviorWithOverrides'] = _MONSTERBEHAVIORWITHOVERRIDES DESCRIPTOR.message_types_by_name['MonsterBehavior'] = _MONSTERBEHAVIOR DESCRIPTOR.message_types_by_name['LevelBehavior'] = _LEVELBEHAVIOR DESCRIPTOR.message_types_by_name['BehaviorGroup'] = _BEHAVIORGROUP DESCRIPTOR.message_types_by_name['BehaviorItem'] = _BEHAVIORITEM DESCRIPTOR.message_types_by_name['Behavior'] = _BEHAVIOR DESCRIPTOR.message_types_by_name['Condition'] = _CONDITION _sym_db.RegisterFileDescriptor(DESCRIPTOR) MonsterBehaviorWithOverrides = _reflection.GeneratedProtocolMessageType('MonsterBehaviorWithOverrides', (_message.Message,), { 'DESCRIPTOR': _MONSTERBEHAVIORWITHOVERRIDES, '__module__': 'enemy_skills_pb2' # @@protoc_insertion_point(class_scope:dadguide_proto.MonsterBehaviorWithOverrides) }) _sym_db.RegisterMessage(MonsterBehaviorWithOverrides) MonsterBehavior = _reflection.GeneratedProtocolMessageType('MonsterBehavior', (_message.Message,), { 'DESCRIPTOR': _MONSTERBEHAVIOR, '__module__': 'enemy_skills_pb2' # @@protoc_insertion_point(class_scope:dadguide_proto.MonsterBehavior) }) _sym_db.RegisterMessage(MonsterBehavior) LevelBehavior = _reflection.GeneratedProtocolMessageType('LevelBehavior', (_message.Message,), { 'DESCRIPTOR': _LEVELBEHAVIOR, '__module__': 'enemy_skills_pb2' # @@protoc_insertion_point(class_scope:dadguide_proto.LevelBehavior) }) _sym_db.RegisterMessage(LevelBehavior) BehaviorGroup = _reflection.GeneratedProtocolMessageType('BehaviorGroup', (_message.Message,), { 'DESCRIPTOR': _BEHAVIORGROUP, '__module__': 'enemy_skills_pb2' # @@protoc_insertion_point(class_scope:dadguide_proto.BehaviorGroup) }) _sym_db.RegisterMessage(BehaviorGroup) BehaviorItem = _reflection.GeneratedProtocolMessageType('BehaviorItem', (_message.Message,), { 'DESCRIPTOR': _BEHAVIORITEM, '__module__': 'enemy_skills_pb2' # @@protoc_insertion_point(class_scope:dadguide_proto.BehaviorItem) }) _sym_db.RegisterMessage(BehaviorItem) Behavior = _reflection.GeneratedProtocolMessageType('Behavior', (_message.Message,), { 'DESCRIPTOR': _BEHAVIOR, '__module__': 'enemy_skills_pb2' # @@protoc_insertion_point(class_scope:dadguide_proto.Behavior) }) _sym_db.RegisterMessage(Behavior) Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), { 'DESCRIPTOR': _CONDITION, '__module__': 'enemy_skills_pb2' # @@protoc_insertion_point(class_scope:dadguide_proto.Condition) }) _sym_db.RegisterMessage(Condition) # @@protoc_insertion_point(module_scope)
51.169811
2,854
0.703386
d9c32f78fb7ce24035473595e0a40c4945453a5b
2,465
py
Python
classy_vision/heads/fully_connected_head.py
dlegor/ClassyVision
9c82d533b66b0a5fbb11f8ab3567a9c70aa4e013
[ "MIT" ]
1
2021-04-11T19:01:10.000Z
2021-04-11T19:01:10.000Z
classy_vision/heads/fully_connected_head.py
prigoyal/ClassyVision
db87bb87068ee8d2c7b21849ddd0548082e20a87
[ "MIT" ]
null
null
null
classy_vision/heads/fully_connected_head.py
prigoyal/ClassyVision
db87bb87068ee8d2c7b21849ddd0548082e20a87
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict import torch.nn as nn from classy_vision.generic.util import is_pos_int from classy_vision.heads import ClassyHead, register_head
31.602564
83
0.628398
d9c387f6c561372e064bfe33f0566d9f2a1cdd50
399
py
Python
Task2C.py
StanleyHou117/group66_LentTermProject
0255310cb202f21cada8cf7c0f45a045a9b72c1f
[ "MIT" ]
null
null
null
Task2C.py
StanleyHou117/group66_LentTermProject
0255310cb202f21cada8cf7c0f45a045a9b72c1f
[ "MIT" ]
null
null
null
Task2C.py
StanleyHou117/group66_LentTermProject
0255310cb202f21cada8cf7c0f45a045a9b72c1f
[ "MIT" ]
null
null
null
from floodsystem.stationdata import build_station_list from floodsystem.flood import stations_highest_rel_level if __name__ == "__main__": print("*** Task 2C: CUED Part IA Flood Warning System ***") run()
28.5
63
0.734336
d9c389b63a2c9720abef56190237f31a2306da19
1,972
py
Python
src/biotite/copyable.py
danijoo/biotite
22072e64676e4e917236eac8493eed4c6a22cc33
[ "BSD-3-Clause" ]
208
2018-04-20T15:59:42.000Z
2022-03-22T07:47:12.000Z
src/biotite/copyable.py
danielmuthama/biotite
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
[ "BSD-3-Clause" ]
121
2017-11-15T14:52:07.000Z
2022-03-30T16:31:41.000Z
src/biotite/copyable.py
danielmuthama/biotite
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
[ "BSD-3-Clause" ]
49
2018-07-19T09:06:24.000Z
2022-03-23T17:21:34.000Z
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite" __author__ = "Patrick Kunzmann" __all__ = ["Copyable"] import abc
27.774648
70
0.59432
d9c3ac1232aa677a1999a869a726247c9e688214
3,400
py
Python
custom_components/wyzeapi/binary_sensor.py
np-hacs/ha-wyzeapi
8abc6af59d36514008f696310b290a046d7c7a72
[ "Apache-2.0" ]
null
null
null
custom_components/wyzeapi/binary_sensor.py
np-hacs/ha-wyzeapi
8abc6af59d36514008f696310b290a046d7c7a72
[ "Apache-2.0" ]
null
null
null
custom_components/wyzeapi/binary_sensor.py
np-hacs/ha-wyzeapi
8abc6af59d36514008f696310b290a046d7c7a72
[ "Apache-2.0" ]
null
null
null
import logging import time from datetime import timedelta from typing import List from homeassistant.components.binary_sensor import ( BinarySensorEntity, DEVICE_CLASS_MOTION ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_ATTRIBUTION from homeassistant.core import HomeAssistant from wyzeapy.base_client import Device, AccessTokenError from wyzeapy.client import Client from wyzeapy.types import PropertyIDs from .const import DOMAIN _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by Wyze" SCAN_INTERVAL = timedelta(seconds=10)
29.059829
109
0.645
d9c3b05c7fcf1f87eb65a4b552deef9342032f24
6,520
py
Python
src/Components/missions/GEMS/mcd43c.py
GEOS-ESM/AeroApps
874dad6f34420c014d98eccbe81a061bdc0110cf
[ "NASA-1.3", "ECL-2.0", "Apache-2.0" ]
2
2020-12-02T14:23:30.000Z
2021-12-31T15:39:30.000Z
src/Components/missions/GEMS/mcd43c.py
GEOS-ESM/AeroApps
874dad6f34420c014d98eccbe81a061bdc0110cf
[ "NASA-1.3", "ECL-2.0", "Apache-2.0" ]
9
2020-04-15T16:22:14.000Z
2022-03-24T13:59:25.000Z
src/Components/missions/SENTINEL-4/mcd43c.py
GEOS-ESM/AeroApps
874dad6f34420c014d98eccbe81a061bdc0110cf
[ "NASA-1.3", "ECL-2.0", "Apache-2.0" ]
null
null
null
""" Reads climate modeling grid 0.05 degree MCD43 BRDF files. """ import os import sys from numpy import loadtxt, array, tile, where, concatenate, flipud from numpy import ones from datetime import date, datetime, timedelta from glob import glob from pyhdf.SD import SD, HDF4Error MISSING = 32.767 SDS = dict ( LAND = ('BRDF_Albedo_Parameter1_Band1','BRDF_Albedo_Parameter1_Band2', 'BRDF_Albedo_Parameter1_Band3','BRDF_Albedo_Parameter1_Band4', 'BRDF_Albedo_Parameter1_Band5','BRDF_Albedo_Parameter1_Band6', 'BRDF_Albedo_Parameter1_Band7', 'BRDF_Albedo_Parameter2_Band1','BRDF_Albedo_Parameter2_Band2', 'BRDF_Albedo_Parameter2_Band3','BRDF_Albedo_Parameter2_Band4', 'BRDF_Albedo_Parameter2_Band5','BRDF_Albedo_Parameter2_Band6', 'BRDF_Albedo_Parameter2_Band7', 'BRDF_Albedo_Parameter3_Band1','BRDF_Albedo_Parameter3_Band2', 'BRDF_Albedo_Parameter3_Band3','BRDF_Albedo_Parameter3_Band4', 'BRDF_Albedo_Parameter3_Band5','BRDF_Albedo_Parameter3_Band6', 'BRDF_Albedo_Parameter3_Band7'), QUAL = ('BRDF_Albedo_Quality', 'Snow_BRDF_Albedo', 'BRDF_Albedo_Ancillary', ) ) ALIAS = dict ( BRDF_Albedo_Parameter1_Band1 = 'KISO_b1_645', BRDF_Albedo_Parameter1_Band2 = 'KISO_b2_856', BRDF_Albedo_Parameter1_Band3 = 'KISO_b3_465', BRDF_Albedo_Parameter1_Band4 = 'KISO_b4_553', BRDF_Albedo_Parameter1_Band5 = 'KISO_b5_1241', BRDF_Albedo_Parameter1_Band6 = 'KISO_b6_1629', BRDF_Albedo_Parameter1_Band7 = 'KISO_b7_2114', BRDF_Albedo_Parameter2_Band1 = 'KVOL_b1_645', BRDF_Albedo_Parameter2_Band2 = 'KVOL_b2_856', BRDF_Albedo_Parameter2_Band3 = 'KVOL_b3_465', BRDF_Albedo_Parameter2_Band4 = 'KVOL_b4_553', BRDF_Albedo_Parameter2_Band5 = 'KVOL_b5_1241', BRDF_Albedo_Parameter2_Band6 = 'KVOL_b6_1629', BRDF_Albedo_Parameter2_Band7 = 'KVOL_b7_2114', BRDF_Albedo_Parameter3_Band1 = 'KGEO_b1_645', BRDF_Albedo_Parameter3_Band2 = 'KGEO_b2_856', BRDF_Albedo_Parameter3_Band3 = 'KGEO_b3_465', BRDF_Albedo_Parameter3_Band4 = 'KGEO_b4_553', BRDF_Albedo_Parameter3_Band5 = 'KGEO_b5_1241', BRDF_Albedo_Parameter3_Band6 = 'KGEO_b6_1629', BRDF_Albedo_Parameter3_Band7 = 'KGEO_b7_2114', ) #........................................................................... #--- #............................................................................ if __name__ == "__main__": path = '/nobackup/3/pcastell/MODIS/MCD43C1/MCD43C1.A2005361.005.2008094071946.hdf' lon = [-2.,-120.,15.2,17.2,170.1] lat = [88.,40.,-20.,-20.,-55.5] lon = np.arange(-180,180,1) lat = np.arange(-90,90,1) lon,lat = np.meshgrid(lon,lat) ex = McD43C(path,lon.flatten(),lat.flatte())
36.222222
103
0.533282
d9c4481e6f2e6c4d81a9ed81d21838df61cf431f
26,272
py
Python
tests/keras/layers/wrappers_test.py
kalyc/keras-apache-mxnet
5497ebd50a45ccc446b8944ebbe11fb7721a5533
[ "MIT" ]
300
2018-04-04T05:01:21.000Z
2022-02-25T18:56:04.000Z
tests/keras/layers/wrappers_test.py
kalyc/keras-apache-mxnet
5497ebd50a45ccc446b8944ebbe11fb7721a5533
[ "MIT" ]
163
2018-04-03T17:41:22.000Z
2021-09-03T16:44:04.000Z
tests/keras/layers/wrappers_test.py
kalyc/keras-apache-mxnet
5497ebd50a45ccc446b8944ebbe11fb7721a5533
[ "MIT" ]
72
2018-04-21T06:42:30.000Z
2021-12-26T06:02:42.000Z
import pytest import numpy as np import copy from numpy.testing import assert_allclose from keras.utils import CustomObjectScope from keras.layers import wrappers, Input, Layer from keras.layers import RNN from keras import layers from keras.models import Sequential, Model, model_from_json from keras import backend as K from keras.utils.generic_utils import object_list_uid, to_list def test_regularizers(): model = Sequential() model.add(wrappers.TimeDistributed( layers.Dense(2, kernel_regularizer='l1'), input_shape=(3, 4))) model.add(layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') assert len(model.layers[0].layer.losses) == 1 assert len(model.layers[0].losses) == 1 assert len(model.layers[0].get_losses_for(None)) == 1 assert len(model.losses) == 1 model = Sequential() model.add(wrappers.TimeDistributed( layers.Dense(2, activity_regularizer='l1'), input_shape=(3, 4))) model.add(layers.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') assert len(model.losses) == 1 def test_Bidirectional(): rnn = layers.SimpleRNN samples = 2 dim = 2 timesteps = 2 output_dim = 2 dropout_rate = 0.2 for mode in ['sum', 'concat']: x = np.random.random((samples, timesteps, dim)) target_dim = 2 * output_dim if mode == 'concat' else output_dim y = np.random.random((samples, target_dim)) # test with Sequential model model = Sequential() model.add(wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate, recurrent_dropout=dropout_rate), merge_mode=mode, input_shape=(timesteps, dim))) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) # test config model.get_config() model = model_from_json(model.to_json()) model.summary() # test stacked bidirectional layers model = Sequential() model.add(wrappers.Bidirectional(rnn(output_dim, return_sequences=True), merge_mode=mode, input_shape=(timesteps, dim))) model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode)) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) # test with functional API inputs = Input((timesteps, dim)) outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate, recurrent_dropout=dropout_rate), merge_mode=mode)(inputs) model = Model(inputs, outputs) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) # Bidirectional and stateful inputs = Input(batch_shape=(1, timesteps, dim)) outputs = wrappers.Bidirectional(rnn(output_dim, stateful=True), merge_mode=mode)(inputs) model = Model(inputs, outputs) model.compile(loss='mse', optimizer='sgd') model.fit(x, y, epochs=1, batch_size=1) def test_Bidirectional_state_reuse(): rnn = layers.LSTM samples = 2 dim = 5 timesteps = 3 units = 3 input1 = Input((timesteps, dim)) layer = wrappers.Bidirectional(rnn(units, return_state=True, return_sequences=True)) state = layer(input1)[1:] # test passing invalid initial_state: passing a tensor input2 = Input((timesteps, dim)) with pytest.raises(ValueError): output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state[0]) # test valid usage: passing a list output = wrappers.Bidirectional(rnn(units))(input2, initial_state=state) model = Model([input1, input2], output) assert len(model.layers) == 4 assert isinstance(model.layers[-1].input, list) inputs = [np.random.rand(samples, timesteps, dim), np.random.rand(samples, timesteps, dim)] outputs = model.predict(inputs) def test_Bidirectional_trainable(): # test layers that need learning_phase to be set x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) _ = layer(x) assert len(layer.trainable_weights) == 6 layer.trainable = False assert len(layer.trainable_weights) == 0 layer.trainable = True assert len(layer.trainable_weights) == 6 def test_Bidirectional_updates(): x = Input(shape=(3, 2)) layer = wrappers.Bidirectional(layers.SimpleRNN(3)) assert len(layer.updates) == 0 assert len(layer.get_updates_for(None)) == 0 assert len(layer.get_updates_for(x)) == 0 layer.forward_layer.add_update(0, inputs=x) layer.forward_layer.add_update(1, inputs=None) layer.backward_layer.add_update(0, inputs=x) layer.backward_layer.add_update(1, inputs=None) assert len(layer.updates) == 4 assert len(layer.get_updates_for(None)) == 2 assert len(layer.get_updates_for(x)) == 2 def test_Bidirectional_losses(): x = Input(shape=(3, 2)) layer = wrappers.Bidirectional( layers.SimpleRNN(3, kernel_regularizer='l1', bias_regularizer='l1')) _ = layer(x) assert len(layer.losses) == 4 assert len(layer.get_losses_for(None)) == 4 assert len(layer.get_losses_for(x)) == 0 layer.forward_layer.add_loss(0, inputs=x) layer.forward_layer.add_loss(1, inputs=None) layer.backward_layer.add_loss(0, inputs=x) layer.backward_layer.add_loss(1, inputs=None) assert len(layer.losses) == 8 assert len(layer.get_losses_for(None)) == 6 assert len(layer.get_losses_for(x)) == 2 if __name__ == '__main__': pytest.main([__file__])
40.418462
95
0.629225
d9c4cf9fb1ad31300587d3e24030d9670ed150d3
3,342
py
Python
src/tornado-3.2.2/tornado/platform/common.py
code-annotator/tornado-annotated
78fa3ab3b87a559c1db9ec11d86d79f6bf47853c
[ "MIT" ]
645
2015-01-03T02:03:59.000Z
2021-12-03T08:43:16.000Z
filenv/lib/python2.7/site-packages/tornado/platform/common.py
betoesquivel/fil2014
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
[ "MIT" ]
2
2021-04-30T20:29:40.000Z
2022-02-11T03:38:04.000Z
filenv/lib/python2.7/site-packages/tornado/platform/common.py
betoesquivel/fil2014
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
[ "MIT" ]
222
2015-01-07T05:00:52.000Z
2021-12-06T09:54:26.000Z
"""Lowest-common-denominator implementations of platform functionality.""" from __future__ import absolute_import, division, print_function, with_statement import errno import socket from tornado.platform import interface
36.326087
86
0.55775
d9c69927875c451378bcb7d50069e903036beefa
5,490
py
Python
bathymetry_blink/bathymetry_blink.py
poster515/BlinkyTape_Python
edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0
[ "MIT" ]
26
2015-02-14T11:37:21.000Z
2021-05-10T17:24:16.000Z
bathymetry_blink/bathymetry_blink.py
poster515/BlinkyTape_Python
edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0
[ "MIT" ]
8
2015-02-14T17:33:24.000Z
2021-10-05T20:32:19.000Z
bathymetry_blink/bathymetry_blink.py
poster515/BlinkyTape_Python
edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0
[ "MIT" ]
15
2015-01-24T23:36:54.000Z
2021-10-02T23:40:08.000Z
""" This script will modulate the blinky lights using the following algorithm: 1) uses user-provided location to obtain row of pixel data from bathy image 2) samples a 'number of LEDs' number of pixels from that row 3) shifts the sampled row data to center it at the location specified by user 4) displays resulting pixels on Blinky Tape 5) shifts next row by a given latitude, also specified by user 6) sleeps for user-specified period of time Uses the following arguments: -l/--location: tuple Location of the user in tuple(lat, lon). This represents the center of the LED strip. Defaults to (0, 0) -u/--update-interval: int Update interval of the script, in minutes. Defaults to 10. -p/--port: str Serial port of the BlinkyLight (e.g., 'ttyAMA0', 'COM3'). Defaults to 'COM5'. -d/--delta_latitude: int Vertical change in latitude every update rate. May be 0, but this will result in a never-changing LEDs. -i/--image: str Name of the PNG image that contains the color coded pathymetric data. The file current named mapserv.png was obtained using the following API: https://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,180&format=image/png&height=600&width=1200&crs=EPSG:4326&layers=GEBCO_LATEST_SUB_ICE_TOPO&version=1.3.0 In lieu of providing command line arguments, you may alternatively edit the defaults in bath_config.json. NOTE: runs via: runfile('/BlinkyTape_Python/bathymetry_blink/bathymetry_blink.py', wdir='/BlinkyTape_Python/') (C) 2021 Joseph Post (https://joeycodes.dev) MIT Licensed """ import optparse import json from blinkytape import BlinkyTape from time import sleep from PIL import Image import numpy as np import sys MAX_ERRORS = 3 num_errors = 0 # Obtain default parameters with open("./bathymetry_blink/bathy_config.json") as f: config = json.load(f) # Default Blinky Tape port on Raspberry Pi is /dev/ttyACM0 parser = optparse.OptionParser() parser.add_option("-p", "--port", dest="portname", help="serial port (ex: /dev/ttyACM0)", default=config["port"]) parser.add_option("-l", "--location", dest="location", help="Location of the center of the LED strip (ex: 70,-110)", default=config["location"]) parser.add_option("-u", "--update-rate", dest="update_rate", help="How often to update elevation profile (mins) (ex: 5)", default=config["update_rate"]) parser.add_option("-d", "--delta-latitude", dest="delta_latitude", help="Change in latitude during update (ex: 5)", default=config["delta_latitude"]) parser.add_option("-n", "--num-leds", dest="num_leds", help="Number of LEDs in strip (ex: 60)", default=config["num_leds"]) parser.add_option("-i", "--image", dest="image_name", help="Name of the map/bathymetry image (ex: ./mapserv.png)", default=config["image"]) (options, args) = parser.parse_args() if args: print("Unknown parameters: " + args) # grab the values provided by user (or defaults) port = options.portname loc = options.location rate = options.update_rate delta = options.delta_latitude n_leds = options.num_leds i_name = options.image_name # Some visual indication that it works, for headless setups (green tape) bt = BlinkyTape(port, n_leds) bt.displayColor(0, 100, 0) bt.show() sleep(2) while True: try: # first, load image im = Image.open(i_name) # Can be many different formats. cols, rows = im.size a = np.asarray(im) # of shape (rows, cols, channels) # map loc latitude to 0-based index latitude_index = min(rows - 1, max(0, (int)(((loc[0] - -90) / (90 - -90)) * (rows - 0) + 0))) longitude_index = min(cols - 1, max(0, (int)(((loc[1] - -180) / (180 - -180)) * (cols - 0) + 0))) # update the location of the next row of elevation data to take loc[0] += delta loc[0] = ((loc[0] + 90) % 180) - 90 # wraps to next pole if overflow print("Lat index: " + str(latitude_index)) print("Lon index: " + str(longitude_index)) print("Next latitude: " + str(loc[0])) # grab the applicable pixel indices indices = [(int)(x*(cols/n_leds)) for x in range(n_leds)] # sample that row of pixel data output_pixels = np.take(a[latitude_index], indices, axis=0) # rotate the row to center around the specified longitude output_pixels = np.roll(output_pixels, longitude_index, axis=0) # send all pixel data to bt for pixel in output_pixels: print("Sending r: {}, g: {}, b: {}".format(*pixel)) bt.sendPixel(*pixel) # finally, show the image bt.show() # delete variables for memory management del a del im # Tape resets to stored pattern after a few seconds of inactivity sleep(rate * 60) # Wait specified number of minutes # sleep(10) # Wait specified number of minutes except KeyboardInterrupt: print("Keyboard interrupt, ending program.") sys.exit() except RuntimeError as e: print("Encountered runtime error: " + e.args[0]) # flush any incomplete data bt.show() num_errors += 1 if num_errors > MAX_ERRORS: sys.exit("Error count exceeds that allowed.")
36.845638
230
0.654098
d9c6ca6076e88b29cf949f6ea50aa8a721054e5d
5,118
py
Python
service/transforms/export_submissions.py
SFDigitalServices/pts-dispatcher-microservice-py
80ec68d9d7f3f120a708717ed92c8b5a16742ff3
[ "MIT" ]
null
null
null
service/transforms/export_submissions.py
SFDigitalServices/pts-dispatcher-microservice-py
80ec68d9d7f3f120a708717ed92c8b5a16742ff3
[ "MIT" ]
4
2020-08-28T17:21:06.000Z
2021-06-02T01:52:16.000Z
service/transforms/export_submissions.py
SFDigitalServices/pts-dispatcher-microservice-py
80ec68d9d7f3f120a708717ed92c8b5a16742ff3
[ "MIT" ]
null
null
null
""" Export Submissions Transform module """ #pylint: disable=too-few-public-methods import pandas as pd from .transform import TransformBase from ..resources.field_configs import FieldConfigs from ..resources.field_maps import FieldMaps
44.12069
112
0.520125
d9c723ccb8662448fc572ef43b245239e373eaa3
2,877
py
Python
python/ray/ml/tests/test_torch_trainer.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[ "Apache-2.0" ]
22
2018-05-08T05:52:34.000Z
2020-04-01T10:09:55.000Z
python/ray/ml/tests/test_torch_trainer.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[ "Apache-2.0" ]
73
2021-09-25T07:11:39.000Z
2022-03-26T07:10:59.000Z
python/ray/ml/tests/test_torch_trainer.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[ "Apache-2.0" ]
10
2018-04-27T10:50:59.000Z
2020-02-24T02:41:43.000Z
import pytest import torch import ray from ray.ml.predictors.integrations.torch import TorchPredictor from ray.ml.train.integrations.torch import TorchTrainer from ray import train from ray.ml.examples.pytorch.torch_linear_example import train_func as linear_train_func def test_torch_e2e(ray_start_4_cpus): scaling_config = {"num_workers": 2} trainer = TorchTrainer( train_loop_per_worker=train_func, scaling_config=scaling_config ) result = trainer.fit() predict_dataset = ray.data.range(3) predictions = predict_dataset.map_batches( TorchScorer, batch_format="pandas", compute="actors" ) assert predictions.count() == 3 def test_torch_e2e_state_dict(ray_start_4_cpus): scaling_config = {"num_workers": 2} trainer = TorchTrainer( train_loop_per_worker=train_func, scaling_config=scaling_config ) result = trainer.fit() # If loading from a state dict, a model definition must be passed in. with pytest.raises(ValueError): TorchPredictor.from_checkpoint(result.checkpoint) predict_dataset = ray.data.range(3) predictions = predict_dataset.map_batches( TorchScorer, batch_format="pandas", compute="actors" ) assert predictions.count() == 3 if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", "-x", __file__]))
28.77
88
0.681265
d9c7946fa7c34a185ec10fc47b862efa2f519a9d
19,770
py
Python
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
OpenIxia/ixnetwork_restpy
f628db450573a104f327cf3c737ca25586e067ae
[ "MIT" ]
20
2019-05-07T01:59:14.000Z
2022-02-11T05:24:47.000Z
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
OpenIxia/ixnetwork_restpy
f628db450573a104f327cf3c737ca25586e067ae
[ "MIT" ]
60
2019-04-03T18:59:35.000Z
2022-02-22T12:05:05.000Z
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
OpenIxia/ixnetwork_restpy
f628db450573a104f327cf3c737ca25586e067ae
[ "MIT" ]
13
2019-05-20T10:48:31.000Z
2021-10-06T07:45:44.000Z
# MIT LICENSE # # Copyright 1997 - 2020 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import Files from typing import List, Any, Union def Apply(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the apply operation on the server. Applies the specified Quick Test. apply(async_operation=bool) --------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('apply', payload=payload, response_object=None) def ApplyAsync(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the applyAsync operation on the server. applyAsync(async_operation=bool) -------------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyAsync', payload=payload, response_object=None) def ApplyAsyncResult(self, *args, **kwargs): # type: (*Any, **Any) -> Union[bool, None] """Executes the applyAsyncResult operation on the server. applyAsyncResult(async_operation=bool)bool ------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns bool: Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyAsyncResult', payload=payload, response_object=None) def ApplyITWizardConfiguration(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the applyITWizardConfiguration operation on the server. Applies the specified Quick Test. applyITWizardConfiguration(async_operation=bool) ------------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('applyITWizardConfiguration', payload=payload, response_object=None) def GenerateReport(self, *args, **kwargs): # type: (*Any, **Any) -> Union[str, None] """Executes the generateReport operation on the server. Generate a PDF report for the last succesfull test run. generateReport(async_operation=bool)string ------------------------------------------ - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns str: This method is asynchronous and has no return value. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('generateReport', payload=payload, response_object=None) def Run(self, *args, **kwargs): # type: (*Any, **Any) -> Union[List[str], None] """Executes the run operation on the server. Starts the specified Quick Test and waits for its execution to finish. The IxNetwork model allows for multiple method Signatures with the same name while python does not. run(async_operation=bool)list ----------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. run(InputParameters=string, async_operation=bool)list ----------------------------------------------------- - InputParameters (str): The input arguments of the test. - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('run', payload=payload, response_object=None) def Start(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the start operation on the server. Starts the specified Quick Test. The IxNetwork model allows for multiple method Signatures with the same name while python does not. start(async_operation=bool) --------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. start(InputParameters=string, async_operation=bool) --------------------------------------------------- - InputParameters (str): The input arguments of the test. - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('start', payload=payload, response_object=None) def Stop(self, *args, **kwargs): # type: (*Any, **Any) -> None """Executes the stop operation on the server. Stops the currently running Quick Test. stop(async_operation=bool) -------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('stop', payload=payload, response_object=None) def WaitForTest(self, *args, **kwargs): # type: (*Any, **Any) -> Union[List[str], None] """Executes the waitForTest operation on the server. Waits for the execution of the specified Quick Test to be completed. waitForTest(async_operation=bool)list ------------------------------------- - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete. - Returns list(str): This method is synchronous and returns the result of the test. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ payload = { "Arg1": self.href } for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i] for item in kwargs.items(): payload[item[0]] = item[1] return self._execute('waitForTest', payload=payload, response_object=None)
44.728507
304
0.644917
d9c7f3fdaa6dbe4abf7e68c6052896f817807b98
190
py
Python
core/serializers.py
telminov/sonm-cdn-cms
e51107e3baed9e633e54db6cd7f784178f531b4a
[ "MIT" ]
1
2018-08-31T17:40:14.000Z
2018-08-31T17:40:14.000Z
core/serializers.py
telminov/sonm-cdn-cms
e51107e3baed9e633e54db6cd7f784178f531b4a
[ "MIT" ]
null
null
null
core/serializers.py
telminov/sonm-cdn-cms
e51107e3baed9e633e54db6cd7f784178f531b4a
[ "MIT" ]
null
null
null
from rest_framework import serializers from core import models
19
51
0.731579
d9c7f680a10afbb210d6a7c50f3b0ac7716821e0
190
py
Python
tests/wasp1/AllAnswerSets/aggregates_count_boundvariables_1.test.py
bernardocuteri/wasp
05c8f961776dbdbf7afbf905ee00fc262eba51ad
[ "Apache-2.0" ]
19
2015-12-03T08:53:45.000Z
2022-03-31T02:09:43.000Z
tests/wasp1/AllAnswerSets/aggregates_count_boundvariables_1.test.py
bernardocuteri/wasp
05c8f961776dbdbf7afbf905ee00fc262eba51ad
[ "Apache-2.0" ]
80
2017-11-25T07:57:32.000Z
2018-06-10T19:03:30.000Z
tests/wasp1/AllAnswerSets/aggregates_count_boundvariables_1.test.py
bernardocuteri/wasp
05c8f961776dbdbf7afbf905ee00fc262eba51ad
[ "Apache-2.0" ]
6
2015-01-15T07:51:48.000Z
2020-06-18T14:47:48.000Z
input = """ c(2). p(1). a(2). d(2,2,1). okay(X):- c(X), #count{V:a(V),d(V,X,1)} = 1. ouch(X):- p(X), #count{V:a(V),d(V,X,1)} = 1. """ output = """ {a(2), c(2), d(2,2,1), okay(2), p(1)} """
14.615385
44
0.4
d9c9b89785f6cfc7757c2e1d1d401d256c20d14f
2,567
py
Python
Pzzzzz/plugins/wm.py
Pzzzzz5142/animal-forest-QQ-group-bot
a9141a212a7746ac95d28459ec9cec5b6c188b35
[ "MIT" ]
5
2020-05-28T06:29:33.000Z
2020-09-30T12:14:46.000Z
Pzzzzz/plugins/wm.py
Pzzzzz5142/xjbx-QQ-group-bot
a9141a212a7746ac95d28459ec9cec5b6c188b35
[ "MIT" ]
null
null
null
Pzzzzz/plugins/wm.py
Pzzzzz5142/xjbx-QQ-group-bot
a9141a212a7746ac95d28459ec9cec5b6c188b35
[ "MIT" ]
null
null
null
from nonebot import CommandSession, on_command from langdetect import detect, detect_langs from aiohttp import ClientSession from nonebot import get_bot from nonebot.argparse import ArgumentParser import time import hmac import random, sys import hashlib import binascii import urllib bot = get_bot() # API,[email protected] # coding=utf-8 import hashlib import urllib import random
27.021053
75
0.592131
d9ca7d1ad949a33a39144490cd6ec4bc4a1910a2
5,375
py
Python
home/scripts/memory/lpsolve.py
ParksProjets/Mips-Applications
d4284a5ee357b0e5f348b9af28bb0d90c036ae99
[ "MIT" ]
1
2019-01-08T08:41:22.000Z
2019-01-08T08:41:22.000Z
home/scripts/memory/lpsolve.py
ParksProjets/Mips-Applications
d4284a5ee357b0e5f348b9af28bb0d90c036ae99
[ "MIT" ]
null
null
null
home/scripts/memory/lpsolve.py
ParksProjets/Mips-Applications
d4284a5ee357b0e5f348b9af28bb0d90c036ae99
[ "MIT" ]
null
null
null
""" LpSolve wrapper. Copyright (C) 2018, Guillaume Gonnet License MIT """ from ctypes import * import sys import os.path as path import platform # Import the DLL ver = ("x86", "x64")[sys.maxsize > 2**32] here = path.dirname(__file__) if sys.platform == "win32": lib = windll.LoadLibrary(path.abspath(path.join(here, "dll/lpsolve55-%s.dll" % ver))) elif sys.platform == "linux": lib = cdll.LoadLibrary(path.abspath(path.join(here, "dll/lpsolve55-%s.so" % ver))) else: raise ValueError("Can't load LpSolve library on this platform.") # Make the bindings c_double_p = POINTER(c_double) c_int_p = POINTER(c_int) lib.make_lp.argtypes = [c_int, c_int] lib.make_lp.restype = c_void_p lib.delete_lp.argtypes = [c_void_p] lib.set_binary.argtypes = [c_void_p, c_int, c_ubyte] lib.set_binary.restype = c_ubyte lib.set_int.argtypes = [c_void_p, c_int, c_ubyte] lib.set_int.restype = c_ubyte lib.add_constraintex.argtypes = [c_void_p, c_int, c_double_p, c_int_p, c_int, c_double] lib.add_constraintex.restype = c_ubyte lib.set_obj_fnex.argtypes = [c_void_p, c_int, c_double_p, c_int_p] lib.set_obj_fnex.restype = c_ubyte lib.set_add_rowmode.argtypes = [c_void_p, c_ubyte] lib.set_add_rowmode.restype = c_ubyte lib.set_maxim.argtypes = [c_void_p] lib.write_lp.argtypes = [c_void_p, c_char_p] lib.write_lp.restype = c_ubyte lib.set_verbose.argtypes = [c_void_p, c_int] lib.solve.argtypes = [c_void_p] lib.solve.restype = c_int lib.get_variables.argtypes = [c_void_p, c_double_p] lib.get_variables.restype = c_ubyte
25.116822
92
0.630884
d9ca842cbdc63c54359e746c423beca4af1124b3
118,727
py
Python
octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py
mauroseb/octavia
8f032d884e0f89ac69d5b6e5f5b77d19ee6eb1d7
[ "Apache-2.0" ]
null
null
null
octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py
mauroseb/octavia
8f032d884e0f89ac69d5b6e5f5b77d19ee6eb1d7
[ "Apache-2.0" ]
null
null
null
octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py
mauroseb/octavia
8f032d884e0f89ac69d5b6e5f5b77d19ee6eb1d7
[ "Apache-2.0" ]
null
null
null
# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import random from cryptography import fernet import mock from oslo_db import exception as odb_exceptions from oslo_utils import uuidutils from sqlalchemy.orm import exc from taskflow.types import failure from octavia.common import constants from octavia.common import data_models from octavia.common import utils from octavia.controller.worker.v2.tasks import database_tasks from octavia.db import repositories as repo import octavia.tests.unit.base as base AMP_ID = uuidutils.generate_uuid() COMPUTE_ID = uuidutils.generate_uuid() LB_ID = uuidutils.generate_uuid() SERVER_GROUP_ID = uuidutils.generate_uuid() LB_NET_IP = '192.0.2.2' LISTENER_ID = uuidutils.generate_uuid() POOL_ID = uuidutils.generate_uuid() HM_ID = uuidutils.generate_uuid() MEMBER_ID = uuidutils.generate_uuid() PORT_ID = uuidutils.generate_uuid() SUBNET_ID = uuidutils.generate_uuid() VRRP_PORT_ID = uuidutils.generate_uuid() HA_PORT_ID = uuidutils.generate_uuid() L7POLICY_ID = uuidutils.generate_uuid() L7RULE_ID = uuidutils.generate_uuid() VIP_IP = '192.0.5.2' VRRP_IP = '192.0.5.3' HA_IP = '192.0.5.4' AMP_ROLE = 'FAKE_ROLE' VRRP_ID = random.randrange(255) VRRP_PRIORITY = random.randrange(100) CACHED_ZONE = 'zone1' IMAGE_ID = uuidutils.generate_uuid() COMPUTE_FLAVOR = uuidutils.generate_uuid() _amphora_mock = mock.MagicMock() _amphora_mock.id = AMP_ID _amphora_mock.compute_id = COMPUTE_ID _amphora_mock.lb_network_ip = LB_NET_IP _amphora_mock.vrrp_ip = VRRP_IP _amphora_mock.ha_ip = HA_IP _amphora_mock.ha_port_id = HA_PORT_ID _amphora_mock.vrrp_port_id = VRRP_PORT_ID _amphora_mock.role = AMP_ROLE _amphora_mock.vrrp_id = VRRP_ID _amphora_mock.vrrp_priority = VRRP_PRIORITY _amphorae = [_amphora_mock] _loadbalancer_mock = mock.MagicMock() _loadbalancer_mock.id = LB_ID _loadbalancer_mock.amphorae = [_amphora_mock] _l7policy_mock = mock.MagicMock() _l7policy_mock.id = L7POLICY_ID _l7rule_mock = mock.MagicMock() _l7rule_mock.id = L7RULE_ID _listener_mock = mock.MagicMock() _listener_to_dict_mock = mock.MagicMock( return_value={'id': LISTENER_ID}) _listener_mock.id = LISTENER_ID _listener_mock.to_dict = _listener_to_dict_mock _tf_failure_mock = mock.Mock(spec=failure.Failure) _vip_mock = mock.MagicMock() _vip_mock.port_id = PORT_ID _vip_mock.subnet_id = SUBNET_ID _vip_mock.ip_address = VIP_IP _vrrp_group_mock = mock.MagicMock() _cert_mock = mock.MagicMock() _compute_mock = mock.MagicMock() _compute_mock.lb_network_ip = LB_NET_IP _compute_mock.cached_zone = CACHED_ZONE _compute_mock.image_id = IMAGE_ID _compute_mock.compute_flavor = COMPUTE_FLAVOR
42.191542
79
0.568691
d9caf13b41f36d2f1d5f56fee8cc8d3745513f23
18,402
py
Python
Yellow_Pages_Lithuania/unit_tests.py
Jay4C/Web-Scraping
187679bee035dad661d983b5a8382240f820c337
[ "MIT" ]
1
2022-02-28T05:05:06.000Z
2022-02-28T05:05:06.000Z
Yellow_Pages_Lithuania/unit_tests.py
Jay4C/Web-Scraping
187679bee035dad661d983b5a8382240f820c337
[ "MIT" ]
23
2020-03-04T22:17:32.000Z
2021-01-21T09:35:33.000Z
Yellow_Pages_Lithuania/unit_tests.py
Jay4C/Web-Scraping
187679bee035dad661d983b5a8382240f820c337
[ "MIT" ]
null
null
null
import time from bs4 import BeautifulSoup import requests import pymysql.cursors import unittest if __name__ == '__main__': unittest.main()
53.0317
119
0.395392
d9cb561a08fd3aac17d5adf4c0665d1418e60e6a
3,262
py
Python
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
vatervonacht/dagster
595d78c883ef20618052ac1575fe46cde51fd541
[ "Apache-2.0" ]
3
2020-04-28T16:27:33.000Z
2020-07-22T07:43:30.000Z
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
vatervonacht/dagster
595d78c883ef20618052ac1575fe46cde51fd541
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
vatervonacht/dagster
595d78c883ef20618052ac1575fe46cde51fd541
[ "Apache-2.0" ]
1
2021-02-21T12:16:47.000Z
2021-02-21T12:16:47.000Z
# pylint: disable=protected-access import os import re import pytest from dagster import file_relative_path from dagster.core.errors import DagsterInstanceMigrationRequired from dagster.core.instance import DagsterInstance, InstanceRef from dagster.utils.test import restore_directory # test that we can load runs and events from an old instance
42.363636
100
0.698038
d9cb7d0cdfc5b919d86c41747507b434bce2ff4e
2,595
py
Python
scripts/charts.py
yshrdbrn/bigdata
51114ae98354ee094e0bcff26c1814f85c434148
[ "MIT" ]
null
null
null
scripts/charts.py
yshrdbrn/bigdata
51114ae98354ee094e0bcff26c1814f85c434148
[ "MIT" ]
1
2020-02-01T04:53:43.000Z
2020-02-01T04:53:43.000Z
scripts/charts.py
yshrdbrn/bigdata
51114ae98354ee094e0bcff26c1814f85c434148
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import pandas as pd if __name__ == '__main__': df = pd.read_csv('../data/crimes_dataset_processed_incomplete.csv') group_by_territory(df) group_by_year(df) group_by_month(df) group_by_time_of_day(df) group_by_day_of_the_week(df) group_by_category(df)
38.161765
91
0.668208
d9cc56ba272dad2f5e9b82b388ad10350a722906
15,349
py
Python
unittests.py
benjaminkrenn/abcvoting
1e3833a7314d3467de7560f7e531a4c35c6eda08
[ "MIT" ]
null
null
null
unittests.py
benjaminkrenn/abcvoting
1e3833a7314d3467de7560f7e531a4c35c6eda08
[ "MIT" ]
null
null
null
unittests.py
benjaminkrenn/abcvoting
1e3833a7314d3467de7560f7e531a4c35c6eda08
[ "MIT" ]
null
null
null
# Unit tests import unittest if __name__ == '__main__': unittest.main()
42.167582
79
0.410906
d9cc99c89bae7a8c33f8aa618bc77a5eebb78e7c
7,638
py
Python
Robustness Check/Calculating Risk Factors/calculate_momentum_factor.py
behnoud-bazrafshan/ThesisPortfolio
2edda0109fb8aafc984b5dfc2e59cabb949b4a78
[ "MIT" ]
null
null
null
Robustness Check/Calculating Risk Factors/calculate_momentum_factor.py
behnoud-bazrafshan/ThesisPortfolio
2edda0109fb8aafc984b5dfc2e59cabb949b4a78
[ "MIT" ]
null
null
null
Robustness Check/Calculating Risk Factors/calculate_momentum_factor.py
behnoud-bazrafshan/ThesisPortfolio
2edda0109fb8aafc984b5dfc2e59cabb949b4a78
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np import jdatetime pd.options.mode.chained_assignment = None # Read Bourseview data for market cap # Concat all 75 tickers' data me_list = [] for file_number in range(1, 76): print(file_number) me_path = f'E:/Thesis/New Sampling/Daily Data - Bourseview/'\ f'{file_number}.xlsx' me_df = pd.read_excel( me_path, skiprows=7, usecols=[2, 3, 11], names=['date', 'open', 'market_cap'], na_values='-' ) # Change order from old to new dates me_df = me_df[::-1].reset_index(drop=True) me_df['date'] = me_df['date'].str.replace('-', '') # Delete non-traded days me_df.dropna(subset=['open'], inplace=True) me_df.drop(columns='open', inplace=True) # Create monthly dataframe me_df = me_df.groupby(me_df['date'].str[:6]).last() me_df = me_df.drop(columns=['date']).reset_index() me_df.insert(1, 'ticker_num', file_number) me_list.append(me_df) me_df = pd.concat(me_list, ignore_index=True) me_df = me_df.loc[(me_df['date'] >= '139212') & (me_df['date'] <= '139900')] me_df.reset_index(drop=True, inplace=True) # Read rahavard 365 data for calculating returns close_list = [] for file_number in range(1, 76): rahavard_path = f'E:/Thesis/New Sampling/Daily Data - Rahavard 365/'\ f'{file_number}.txt' df = pd.read_csv( rahavard_path, usecols=[2, 7], names=['date', 'close'], header=0, dtype={'date': str}, parse_dates=[0] ) # Solve index reading problem, pandas add 2 index to the df df.reset_index(drop=True, inplace=True) # Convert to shamsi dates df['date'] = df['date'].apply( lambda x: jdatetime.date.fromgregorian(date=x).strftime('%Y%m%d') ) # Create monthly dataframe df = df.groupby(df['date'].str[:6]).last() df = df.drop(columns=['date']).reset_index() df.insert(1, 'ticker_num', file_number) df['monthly_return'] = df['close'].pct_change() close_list.append(df) df = pd.concat(close_list, ignore_index=True) df = df.loc[(df['date'] >= '139212') & (df['date'] <= '139900')] # Read index df for indicating open market days index_path = r'E:\Thesis\New Sampling\TEDPIX\ 6.xls' index_df = pd.read_excel( index_path, usecols=[1], names=['date'], dtype={'date': str} ) index_df.dropna(inplace=True) # The list of all months months = index_df['date'].str[:6].unique().tolist() # The list of months that we need for calculating market cap me_months = [ '139312', '139401', '139402', '139403', '139404', '139405', '139406', '139407', '139408', '139409', '139410', '139411', '139412', '139501', '139502', '139503', '139504', '139505', '139506', '139507', '139508', '139509', '139510', '139511', '139512', '139601', '139602', '139603', '139604', '139605', '139606', '139607', '139608', '139609', '139610', '139611', '139612', '139701', '139702', '139703', '139704', '139705', '139706', '139707', '139708', '139709', '139710', '139711', '139712', '139801', '139802', '139803', '139804', '139805', '139806', '139807', '139808', '139809', '139810', '139811', '139812' ] # The list of months that we need for camculating MOM mom_months = me_months[1:] # Merge market cap and price dfs merged_df = pd.merge(df, me_df, on=['ticker_num', 'date']) # First, create a NaN column, and then add t-13 prices merged_df.insert(5, 't-13 price', np.nan) for month in mom_months: # Find t-13 prices for ticker in range(1, 76): t_13 = months[months.index(month) - 13] t_13_condtion = (merged_df['date'] == t_13) ticker_condition = (merged_df['ticker_num'] == ticker) try: t_13_price = merged_df.loc[ t_13_condtion & ticker_condition ]['close'].values[0] previous_month = me_months[me_months.index(month) - 1] t_1_condtion = (merged_df['date'] == previous_month) merged_df.loc[ (t_1_condtion & ticker_condition), 't-13 price' ] = t_13_price except: pass # Calculate last 12 months return for month t (t-1, t-12) merged_df['past_year_return'] = ( (merged_df['close'] / merged_df['t-13 price']) - 1 ) mom_list = [] for month in mom_months: # Check t-13 price condition and t-1 market cap condition previous_month = months[months.index(month) - 1] me_condition = (merged_df['date'] == previous_month) mom_condition = (merged_df['past_year_return'].notna()) portfo_const_df = merged_df.loc[me_condition & mom_condition] # Split each month ME into two groups conditions = [ ( portfo_const_df['market_cap'] > portfo_const_df['market_cap'].median() ), ( portfo_const_df['market_cap'] <= portfo_const_df['market_cap'].median() ) ] portfolio_size = np.select(conditions, ['B', 'S']).tolist() portfo_const_df.insert(6, 'size', portfolio_size) # Split each me portfolio into 3 MOM group q = [0, .3, .7, 1] labels = ['L', 'M', 'H'] x_b = portfo_const_df.loc[ portfo_const_df['size'] == 'B' ]['past_year_return'] b_mom = pd.qcut(x=x_b, q=q, labels=labels).to_dict() x_s = portfo_const_df.loc[ portfo_const_df['size'] == 'S' ]['past_year_return'] s_mom = pd.qcut(x=x_s, q=q, labels=labels).to_dict() portfo_const_df['mom'] = pd.Series(b_mom) portfo_const_df['mom'].update(pd.Series(s_mom)) # Extrect portfolio ticker numbers portfo_const_df['portfolio'] = ( portfo_const_df['size'] + portfo_const_df['mom'] ) bh = portfo_const_df.loc[ portfo_const_df['portfolio'] == 'BH' ]['ticker_num'].tolist() bl = portfo_const_df.loc[ portfo_const_df['portfolio'] == 'BL' ]['ticker_num'].tolist() sh = portfo_const_df.loc[ portfo_const_df['portfolio'] == 'SH' ]['ticker_num'].tolist() sl = portfo_const_df.loc[ portfo_const_df['portfolio'] == 'SL' ]['ticker_num'].tolist() # Calculating value-weighted return for each portfolio in month t # Set conditions month_condition = (merged_df['date'] == month) bh_condition = merged_df['ticker_num'].isin(bh) bl_condition = merged_df['ticker_num'].isin(bl) sh_condition = merged_df['ticker_num'].isin(sh) sl_condition = merged_df['ticker_num'].isin(sl) # Construct portfolios bh_portfolio = merged_df.loc[month_condition & bh_condition] bl_portfolio = merged_df.loc[month_condition & bl_condition] sh_portfolio = merged_df.loc[month_condition & sh_condition] sl_portfolio = merged_df.loc[month_condition & sl_condition] # Calculate value-weighted returns bh_return = np.average( bh_portfolio.monthly_return, weights=bh_portfolio.market_cap ) bl_return = np.average( bl_portfolio.monthly_return, weights=bl_portfolio.market_cap ) sh_return = np.average( sh_portfolio.monthly_return, weights=sh_portfolio.market_cap ) sl_return = np.average( sl_portfolio.monthly_return, weights=sl_portfolio.market_cap ) # Calculate MOM, and add it to a list mom = ( ((sh_return + bh_return) / 2) - ((sl_return + bl_return) / 2) ) mom_list.append(mom) mom_df = pd.Series(mom_list).to_excel('mom.xlsx')
38.38191
77
0.612857
d9cdaf9a83cf7f7590823c87b5b4ab6e714294e0
4,632
py
Python
source/lambda/geoip_downloader/index.py
aws-samples/siem-on-amazon-opensearch-service
9bac87d39e9fab04f483bae54ffe94948af096ff
[ "MIT-0" ]
92
2021-09-14T06:41:06.000Z
2022-03-31T09:52:07.000Z
source/lambda/geoip_downloader/index.py
aws-samples/siem-on-amazon-opensearch-service
9bac87d39e9fab04f483bae54ffe94948af096ff
[ "MIT-0" ]
74
2021-09-18T01:46:47.000Z
2022-03-28T10:46:59.000Z
source/lambda/geoip_downloader/index.py
aws-samples/siem-on-amazon-opensearch-service
9bac87d39e9fab04f483bae54ffe94948af096ff
[ "MIT-0" ]
42
2021-09-16T23:00:00.000Z
2022-03-29T15:11:43.000Z
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 __copyright__ = ('Copyright Amazon.com, Inc. or its affiliates. ' 'All Rights Reserved.') __version__ = '2.7.1' __license__ = 'MIT-0' __author__ = 'Akihiro Nakajima' __url__ = 'https://github.com/aws-samples/siem-on-amazon-opensearch-service' import hashlib import json import os import tarfile import urllib.error import urllib.parse import urllib.request import boto3 # get var from lambda environment try: s3bucket_name = os.environ['s3bucket_name'] license_key = os.environ['license_key'] except KeyError: raise Exception('ERROR: impossible to get lambda environment') s3key_prefix = os.environ.get('s3key_prefix', 'GeoLite2/') s3 = boto3.resource('s3') bucket = s3.Bucket(s3bucket_name) url = 'https://download.maxmind.com/app/geoip_download?' put_files = ['GeoLite2-City', 'GeoLite2-ASN', 'GeoLite2-Country']
34.827068
122
0.633636
d9cdbec7cf44be7c5e8dcf70bed770879dcd7e21
16,679
py
Python
components/mroipac/baseline/Baseline.py
earthobservatory/isce2
655c46cc4add275879167b750a5e91f6d00f168e
[ "ECL-2.0", "Apache-2.0" ]
1
2019-10-06T12:21:02.000Z
2019-10-06T12:21:02.000Z
components/mroipac/baseline/Baseline.py
earthobservatory/isce2
655c46cc4add275879167b750a5e91f6d00f168e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
components/mroipac/baseline/Baseline.py
earthobservatory/isce2
655c46cc4add275879167b750a5e91f6d00f168e
[ "ECL-2.0", "Apache-2.0" ]
2
2021-06-24T20:20:18.000Z
2021-06-24T20:32:23.000Z
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Copyright 2010 California Institute of Technology. ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # United States Government Sponsorship acknowledged. This software is subject to # U.S. export control laws and regulations and has been classified as 'EAR99 NLR' # (No [Export] License Required except when exporting to an embargoed country, # end user, or in support of a prohibited end use). By downloading this software, # the user agrees to comply with all applicable U.S. export laws and regulations. # The user has the responsibility to obtain export licenses, or other export # authority as may be required before exporting this software to any 'EAR99' # embargoed foreign country or citizen of those countries. # # Author: Giangi Sacco #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ import math import datetime import logging from iscesys.Component.Component import Component, Port from isceobj.Util.mathModule import MathModule as MM from isceobj.Orbit.Orbit import StateVector # A class to hold three-dimensional basis vectors # A class to hold three-dimensional basis vectors for spacecraft baselines BASELINE_LOCATION = Component.Parameter('baselineLocation', public_name = 'BASELINE_LOCATION', default = 'all', type=str, mandatory=False, doc = ('Location at which to compute baselines - "all" implies '+ 'top, middle, bottom of master image, '+ '"top" implies near start of master image, '+ '"bottom" implies at bottom of master image, '+ '"middle" implies near middle of master image. '+ 'To be used in case there is a large shift between images.') )
39.523697
202
0.655495
d9cf50080cfd2da35179773577dfa101c0a0615b
1,106
py
Python
src/modules/deuces/deck.py
Bot-Box/FiveCardStud
55e11d7a23becece33658075f922cf007909d058
[ "MIT" ]
null
null
null
src/modules/deuces/deck.py
Bot-Box/FiveCardStud
55e11d7a23becece33658075f922cf007909d058
[ "MIT" ]
1
2020-05-09T20:27:33.000Z
2020-05-09T20:27:33.000Z
src/modules/deuces/deck.py
Bot-Box/FiveCardStud
55e11d7a23becece33658075f922cf007909d058
[ "MIT" ]
null
null
null
from random import shuffle as rshuffle from .card import Card
25.136364
79
0.605787
d9cfb448c497219965f4d51af8838d801a58ed41
21,000
py
Python
openidc_client/__init__.py
puiterwijk/python-openidc-client
cd8d91c0503124305727f38a0f9fe93bb472209c
[ "MIT" ]
6
2017-03-16T13:32:11.000Z
2021-06-21T19:12:21.000Z
openidc_client/__init__.py
puiterwijk/python-openidc-client
cd8d91c0503124305727f38a0f9fe93bb472209c
[ "MIT" ]
5
2017-03-23T19:50:36.000Z
2022-01-25T04:45:27.000Z
openidc_client/__init__.py
puiterwijk/python-openidc-client
cd8d91c0503124305727f38a0f9fe93bb472209c
[ "MIT" ]
4
2017-03-21T17:34:28.000Z
2022-01-24T06:16:19.000Z
# -*- coding: utf-8 -*- # # Copyright (C) 2016, 2017 Red Hat, Inc. # Red Hat Author: Patrick Uiterwijk <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Client for applications relying on OpenID Connect for authentication.""" from __future__ import print_function from copy import copy import json import logging from threading import Lock import time try: from StringIO import StringIO except ImportError: from io import StringIO import socket import os try: from urllib import urlencode except ImportError: from urllib.parse import urlencode from uuid import uuid4 as uuidgen import webbrowser from wsgiref import simple_server import requests import sys from openidc_client import release # The ports that we will try to use for our webserver WEB_PORTS = [12345, 23456]
40.384615
87
0.617
d9cfea74cbe1fffe3e3d0849bdd6679785142bf0
7,159
py
Python
eoxserver/services/ows/wps/v10/encoders/parameters.py
constantinius/eoxserver_combined
68f261133fed65a4e8a6ddba82b0d2845171e4bf
[ "OML" ]
1
2017-11-21T22:23:30.000Z
2017-11-21T22:23:30.000Z
eoxserver/services/ows/wps/v10/encoders/parameters.py
constantinius/eoxserver_combined
68f261133fed65a4e8a6ddba82b0d2845171e4bf
[ "OML" ]
null
null
null
eoxserver/services/ows/wps/v10/encoders/parameters.py
constantinius/eoxserver_combined
68f261133fed65a4e8a6ddba82b0d2845171e4bf
[ "OML" ]
null
null
null
#------------------------------------------------------------------------------- # # WPS 1.0 parameters' XML encoders # # Project: EOxServer <http://eoxserver.org> # Authors: Fabian Schindler <[email protected]> # Martin Paces <[email protected]> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies of this Software or works derived from this Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from eoxserver.services.ows.wps.parameters import ( LiteralData, ComplexData, BoundingBoxData, AllowedAny, AllowedEnum, AllowedRange, AllowedRangeCollection, AllowedByReference, ) from eoxserver.services.ows.wps.v10.util import ( OWS, WPS, NIL, ns_ows, ) #------------------------------------------------------------------------------- def encode_input_descr(prm): """ Encode process description input.""" elem = NIL("Input", *_encode_param_common(prm)) elem.attrib["minOccurs"] = ("1", "0")[bool(prm.is_optional)] elem.attrib["maxOccurs"] = "1" if isinstance(prm, LiteralData): elem.append(_encode_literal(prm, True)) elif isinstance(prm, ComplexData): elem.append(_encode_complex(prm, True)) elif isinstance(prm, BoundingBoxData): elem.append(_encode_bbox(prm, True)) return elem def encode_output_descr(prm): """ Encode process description output.""" elem = NIL("Output", *_encode_param_common(prm)) if isinstance(prm, LiteralData): elem.append(_encode_literal(prm, False)) elif isinstance(prm, ComplexData): elem.append(_encode_complex(prm, False)) elif isinstance(prm, BoundingBoxData): elem.append(_encode_bbox(prm, False)) return elem def encode_input_exec(prm): """ Encode common part of the execure response data input.""" return WPS("Input", *_encode_param_common(prm, False)) def encode_output_exec(prm): """ Encode common part of the execure response data output.""" return WPS("Output", *_encode_param_common(prm)) def encode_output_def(outdef): """ Encode the execure response output definition.""" attrib = {} if outdef.uom is not None: attrib['uom'] = outdef.uom if outdef.crs is not None: attrib['crs'] = outdef.crs if outdef.mime_type is not None: attrib['mimeType'] = outdef.mime_type if outdef.encoding is not None: attrib['encoding'] = outdef.encoding if outdef.schema is not None: attrib['schema'] = outdef.schema if outdef.as_reference is not None: attrib['asReference'] = 'true' if outdef.as_reference else 'false' return WPS("Output", *_encode_param_common(outdef, False), **attrib) def _encode_param_common(prm, title_required=True): """ Encode common sub-elements of all XML parameters.""" elist = [OWS("Identifier", prm.identifier)] if prm.title or title_required: elist.append(OWS("Title", prm.title or prm.identifier)) if prm.abstract: elist.append(OWS("Abstract", prm.abstract)) return elist #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
38.283422
80
0.621875
d9d317f8ac0c3d87ca7347265d7a9836b41ed098
2,481
py
Python
gci-vci-serverless/src/helpers/vp_saves_helpers.py
ClinGen/gene-and-variant-curation-tools
30f21d8f03d8b5c180c1ce3cb8401b5abc660080
[ "MIT" ]
1
2021-09-17T20:39:07.000Z
2021-09-17T20:39:07.000Z
gci-vci-serverless/src/helpers/vp_saves_helpers.py
ClinGen/gene-and-variant-curation-tools
30f21d8f03d8b5c180c1ce3cb8401b5abc660080
[ "MIT" ]
133
2021-08-29T17:24:26.000Z
2022-03-25T17:24:31.000Z
gci-vci-serverless/src/helpers/vp_saves_helpers.py
ClinGen/gene-and-variant-curation-tools
30f21d8f03d8b5c180c1ce3cb8401b5abc660080
[ "MIT" ]
null
null
null
import datetime import uuid import simplejson as json from src.db.s3_client import Client as S3Client from decimal import Decimal def get_from_archive(archive_key): ''' Download a VP Save from S3. :param str archive_key: The vp_save data's location (S3 bucket and file path). This value is required. ''' if archive_key is None or '/' not in archive_key: raise ValueError() bucket, key = archive_key.split('/', 1) s3_client = S3Client() try: archive_object = json.loads(s3_client.get_object(bucket, key)['Body'].read(),parse_float=Decimal) except Exception as e: print('ERROR: Error downloading ' + key + ' from ' + bucket + ' bucket. ERROR\n%s' %e) raise return archive_object def build(vp_save={}): ''' Builds and returns a valid vp_save object. Builds a new vp_save object by creating default values for required fields and combines any of the given attributes. ''' vp_save['PK'] = str(uuid.uuid4()) # Set timestamps (for new data) now = datetime.datetime.now().isoformat() vp_save['date_created'] = now vp_save['last_modified'] = now vp_save['item_type'] = 'vp_save' return vp_save def archive(bucket, vp_save_pk, save_data): ''' Archives a vp save data to S3. Uploads the save data object as a JSON file to S3. The location of the archive depends on the bucket and the primary key of the save data. If the upload fails, an exception is raised. If successful, returns the archive location. :param str bucket: The name of the S3 bucket for the archive. This value is required. :param str vp_save_pk: The vp_save PK to use as the name of the JSON file. This value is required. :param obj save_data: The save data object to archive. This value is required. ''' if bucket is None or len(bucket) <= 0: raise ValueError() if vp_save_pk is None or len(vp_save_pk) <= 0: raise ValueError() if not save_data: raise ValueError() archive_file = __archive_key(save_data) + '/' + vp_save_pk + '.json' # Upload curation data to S3 archive bucket. s3_client = S3Client() try: s3_client.put_object( bytes(json.dumps(save_data).encode('UTF-8')), bucket, archive_file ) except Exception as e: print('ERROR: Error uploading ' + archive_file + ' to ' + bucket + ' bucket. ERROR\n%s' %e) raise archive_key_comps = [bucket, archive_file] return '/'.join(archive_key_comps)
27.263736
104
0.699315
d9d321dead6bc8e55098581c550215a3e969a2f1
464
py
Python
docs/source/auto_examples/plot_usage.py
ruhugu/brokenaxes
1cfb301c854b3336aeb4dd9a2c329310534dfb21
[ "MIT" ]
362
2017-05-01T10:20:56.000Z
2022-03-29T21:39:09.000Z
docs/source/auto_examples/plot_usage.py
ruhugu/brokenaxes
1cfb301c854b3336aeb4dd9a2c329310534dfb21
[ "MIT" ]
73
2017-04-20T18:54:39.000Z
2021-12-02T08:04:21.000Z
docs/source/auto_examples/plot_usage.py
ruhugu/brokenaxes
1cfb301c854b3336aeb4dd9a2c329310534dfb21
[ "MIT" ]
52
2017-05-04T13:03:25.000Z
2022-03-29T21:39:20.000Z
""" Basic usage =========== This example presents the basic usage of brokenaxes """ import matplotlib.pyplot as plt from brokenaxes import brokenaxes import numpy as np fig = plt.figure(figsize=(5,2)) bax = brokenaxes(xlims=((0, .1), (.4, .7)), ylims=((-1, .7), (.79, 1)), hspace=.05) x = np.linspace(0, 1, 100) bax.plot(x, np.sin(10 * x), label='sin') bax.plot(x, np.cos(10 * x), label='cos') bax.legend(loc=3) bax.set_xlabel('time') bax.set_ylabel('value')
21.090909
83
0.644397
d9d368d362ab070d71b3363fe0fb20728ec9660d
5,985
py
Python
src/entity/002_createRdf.py
toyo-bunko/paper_app
f988e05cf83711d98c5ed735c0fd74fcf11e0f05
[ "Apache-2.0" ]
1
2021-02-28T15:38:37.000Z
2021-02-28T15:38:37.000Z
src/entity/002_createRdf.py
toyo-bunko/paper_app
f988e05cf83711d98c5ed735c0fd74fcf11e0f05
[ "Apache-2.0" ]
null
null
null
src/entity/002_createRdf.py
toyo-bunko/paper_app
f988e05cf83711d98c5ed735c0fd74fcf11e0f05
[ "Apache-2.0" ]
null
null
null
import shutil import os import json import glob import yaml import sys import urllib import ssl import csv import time import requests import json import csv from rdflib import URIRef, BNode, Literal, Graph from rdflib.namespace import RDF, RDFS, FOAF, XSD from rdflib import Namespace all = Graph() with open("data/dict.json") as f: ln_map = json.load(f) st_path = "../data/index.json" with open(st_path) as f: result = json.load(f) uris = [] for obj in result: fields = ["spatial", "agential"] for field in fields: values = obj[field] for value in values: uri = "chname:"+value if field == "spatial": uri = "place:"+value if uri not in uris: uris.append(uri) for uri in uris: print(uri) tmp = uri.split(":") prefix = tmp[0] suffix = tmp[1] ln = suffix ln_org = "" if ln in ln_map: ln_org = ln ln = ln_map[ln] if len(ln) > 20: continue # ln = obj["uri"].split(":")[1] ''' wiki_path = "data/wikidata/"+ln+".json" wiki = {} if os.path.exists(wiki_path): with open(wiki_path) as f: wiki = json.load(f) # sameAs stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(wiki_url)) all.add(stmt) obj = wiki["entities"][wiki_url.split("/")[-1]] # description if "descriptions" in obj and "ja" in obj["descriptions"]: stmt = (subject, URIRef("http://schema.org/description"), Literal(obj["descriptions"]["ja"]["value"], lang="ja")) all.add(stmt) # label if "labels" in obj and "ja" in obj["labels"]: stmt = (subject, RDFS.label, Literal(obj["labels"]["ja"]["value"])) all.add(stmt) ln = wiki_url.split("/")[-1] ''' db_path = "data/dbpedia_ja/"+ln+".json" wiki_path = "data/wikidata/"+ln+".json" db = {} wiki = {} if os.path.exists(db_path): with open(db_path) as f: db = json.load(f) if os.path.exists(wiki_path): with open(wiki_path) as f: wiki = json.load(f) db_uri = "http://ja.dbpedia.org/resource/"+ln if db_uri not in db: print("not" , db_uri) continue # ###### subject = URIRef("https://shibusawa-dlab.github.io/lab1/api/"+prefix+"/"+ln) if prefix == "chname": stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Agent")) all.add(stmt) elif prefix == "time": stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Time")) all.add(stmt) elif prefix == "place": stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Place")) all.add(stmt) elif prefix == "event": stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Event")) all.add(stmt) elif prefix == "org": stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Organization")) all.add(stmt) elif prefix == "keyword": stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Keyword")) all.add(stmt) elif prefix == "type": stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Type")) all.add(stmt) # ###### obj = db[db_uri] stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(db_uri)) all.add(stmt) if "http://dbpedia.org/ontology/thumbnail" in obj: stmt = (subject, URIRef("http://schema.org/image"), URIRef(obj["http://dbpedia.org/ontology/thumbnail"][0]["value"])) all.add(stmt) if "http://www.w3.org/2000/01/rdf-schema#label" in obj: labels = obj["http://www.w3.org/2000/01/rdf-schema#label"] for label in labels: if label["lang"] == "ja": stmt = (subject, RDFS.label, Literal(label["value"])) all.add(stmt) if "http://www.w3.org/2000/01/rdf-schema#comment" in obj: labels = obj["http://www.w3.org/2000/01/rdf-schema#comment"] for label in labels: stmt = (subject, URIRef("http://schema.org/description"), Literal(label["value"], lang=label["lang"])) all.add(stmt) if "http://www.w3.org/2002/07/owl#sameAs" in obj: labels = obj["http://www.w3.org/2002/07/owl#sameAs"] for label in labels: value = label["value"] if "http://dbpedia.org" in value or "http://ja.dbpedia.org" in value or "www.wikidata.org" in value: stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(value)) all.add(stmt) # ''' if "point" in obj and prefix == "place": value = obj["point"]["value"].split(" ") # addGeo geoUri = addGeo({ "lat" : float(value[0]), "long": float(value[1]) }) stmt = (subject, URIRef("http://schema.org/geo"), geoUri) if suffix not in places: places[suffix] = { "lat" : float(value[0]), "long": float(value[1]) } all.add(stmt) ''' # if ln_org != "" and ln != ln_org: stmt = (subject, URIRef("http://schema.org/name"), Literal(ln_org)) all.add(stmt) path = "data/all.json" all.serialize(destination=path, format='json-ld') all.serialize(destination=path.replace(".json", ".rdf"), format='pretty-xml')
29.338235
129
0.513116
d9d4e94302ccb3b8bcc4d40fbc60872ee3780872
2,107
py
Python
client/tests/test_config_read_tool.py
nuft/can-bootloader
18dd77dae1fb2328dac1fd1df2c9e5d5c936771e
[ "BSD-2-Clause" ]
null
null
null
client/tests/test_config_read_tool.py
nuft/can-bootloader
18dd77dae1fb2328dac1fd1df2c9e5d5c936771e
[ "BSD-2-Clause" ]
null
null
null
client/tests/test_config_read_tool.py
nuft/can-bootloader
18dd77dae1fb2328dac1fd1df2c9e5d5c936771e
[ "BSD-2-Clause" ]
null
null
null
import unittest try: from unittest.mock import * except ImportError: from mock import * from msgpack import * import bootloader_read_config from commands import * import sys import json
30.985294
78
0.596108
d9d51a8133c12a74117e8b569f8ace23d5fb49e6
5,499
py
Python
bot.py
Pyrrolidine/letterboxd-bot
b2cd1364e00c3ec6fb70be9c8be7a8b707a8ffbe
[ "MIT" ]
1
2021-03-14T20:01:53.000Z
2021-03-14T20:01:53.000Z
bot.py
Pyrrolidine/letterboxd-bot
b2cd1364e00c3ec6fb70be9c8be7a8b707a8ffbe
[ "MIT" ]
null
null
null
bot.py
Pyrrolidine/letterboxd-bot
b2cd1364e00c3ec6fb70be9c8be7a8b707a8ffbe
[ "MIT" ]
null
null
null
import logging from asyncio import sleep import discord from discord.ext import commands from config import SETTINGS from crew import crew_embed from diary import diary_embed from film import film_embed from helpers import LetterboxdError from list_ import list_embed from review import review_embed from user import user_embed logging.basicConfig( level=logging.INFO, format='%(asctime)s | %(message)s', datefmt='%m/%d %H:%M:%S') bot = commands.Bot(command_prefix='!', case_insensitive=True) bot.remove_command('help') # Commands bot.run(SETTINGS['discord'])
28.640625
78
0.651755
d9d59aa9c4853e8590f823a63f53768b8aecbce1
6,899
py
Python
python/ray/experimental/workflow/execution.py
wgifford/ray
8acb469b047cd9b327c9477a13b030eb7357860e
[ "Apache-2.0" ]
null
null
null
python/ray/experimental/workflow/execution.py
wgifford/ray
8acb469b047cd9b327c9477a13b030eb7357860e
[ "Apache-2.0" ]
32
2021-09-04T07:08:45.000Z
2022-02-19T08:08:11.000Z
python/ray/experimental/workflow/execution.py
wgifford/ray
8acb469b047cd9b327c9477a13b030eb7357860e
[ "Apache-2.0" ]
null
null
null
import asyncio import logging import time from typing import Set, List, Tuple, Optional, TYPE_CHECKING import uuid import ray from ray.experimental.workflow import workflow_context from ray.experimental.workflow import workflow_storage from ray.experimental.workflow.common import (Workflow, WorkflowStatus, WorkflowMetaData, StepType) from ray.experimental.workflow.step_executor import commit_step from ray.experimental.workflow.storage import get_global_storage from ray.experimental.workflow.workflow_access import ( flatten_workflow_output, get_or_create_management_actor, get_management_actor) if TYPE_CHECKING: from ray.experimental.workflow.step_executor import WorkflowExecutionResult logger = logging.getLogger(__name__) def run(entry_workflow: Workflow, workflow_id: Optional[str] = None, overwrite: bool = True) -> ray.ObjectRef: """Run a workflow asynchronously. # TODO(suquark): The current "run" always overwrite existing workflow. # We need to fix this later. """ store = get_global_storage() assert ray.is_initialized() if workflow_id is None: # Workflow ID format: {Entry workflow UUID}.{Unix time to nanoseconds} workflow_id = f"{str(uuid.uuid4())}.{time.time():.9f}" logger.info(f"Workflow job created. [id=\"{workflow_id}\", storage_url=" f"\"{store.storage_url}\"].") with workflow_context.workflow_step_context(workflow_id, store.storage_url): # checkpoint the workflow ws = workflow_storage.get_workflow_storage(workflow_id) commit_step(ws, "", entry_workflow) workflow_manager = get_or_create_management_actor() ignore_existing = (entry_workflow.data.step_type != StepType.FUNCTION) # NOTE: It is important to 'ray.get' the returned output. This # ensures caller of 'run()' holds the reference to the workflow # result. Otherwise if the actor removes the reference of the # workflow output, the caller may fail to resolve the result. result: "WorkflowExecutionResult" = ray.get( workflow_manager.run_or_resume.remote(workflow_id, ignore_existing)) if entry_workflow.data.step_type == StepType.FUNCTION: return flatten_workflow_output(workflow_id, result.persisted_output) else: return flatten_workflow_output(workflow_id, result.volatile_output) # TODO(suquark): support recovery with ObjectRef inputs. def resume(workflow_id: str) -> ray.ObjectRef: """Resume a workflow asynchronously. See "api.resume()" for details. """ storage = get_global_storage() logger.info(f"Resuming workflow [id=\"{workflow_id}\", storage_url=" f"\"{storage.storage_url}\"].") workflow_manager = get_or_create_management_actor() # NOTE: It is important to 'ray.get' the returned output. This # ensures caller of 'run()' holds the reference to the workflow # result. Otherwise if the actor removes the reference of the # workflow output, the caller may fail to resolve the result. result: "WorkflowExecutionResult" = ray.get( workflow_manager.run_or_resume.remote( workflow_id, ignore_existing=False)) logger.info(f"Workflow job {workflow_id} resumed.") return flatten_workflow_output(workflow_id, result.persisted_output) def get_output(workflow_id: str, name: Optional[str]) -> ray.ObjectRef: """Get the output of a running workflow. See "api.get_output()" for details. """ assert ray.is_initialized() try: workflow_manager = get_management_actor() except ValueError as e: raise ValueError( "Failed to connect to the workflow management " "actor. The workflow could have already failed. You can use " "workflow.resume() to resume the workflow.") from e output = ray.get(workflow_manager.get_output.remote(workflow_id, name)) return flatten_workflow_output(workflow_id, output)
40.345029
79
0.681258
d9d5b48647e38ebb7586e30d71d263a91ce8bc1b
156
py
Python
src/zeep/wsse/__init__.py
bertonha/python-zeep
748f4e028db2ef498bc6dd1e60d3555b7688f08c
[ "MIT" ]
null
null
null
src/zeep/wsse/__init__.py
bertonha/python-zeep
748f4e028db2ef498bc6dd1e60d3555b7688f08c
[ "MIT" ]
null
null
null
src/zeep/wsse/__init__.py
bertonha/python-zeep
748f4e028db2ef498bc6dd1e60d3555b7688f08c
[ "MIT" ]
null
null
null
from .compose import Compose # noqa from .signature import BinarySignature, Signature, MemorySignature # noqa from .username import UsernameToken # noqa
39
74
0.801282
d9d5cc7533855c3c985b9ccbdc0f7d78d12441b1
746
py
Python
Complab assignment.py
peteboi/Python-Scripts
d84e352c41cff3f459d88c83bc81f6dc2f25ed05
[ "MIT" ]
null
null
null
Complab assignment.py
peteboi/Python-Scripts
d84e352c41cff3f459d88c83bc81f6dc2f25ed05
[ "MIT" ]
null
null
null
Complab assignment.py
peteboi/Python-Scripts
d84e352c41cff3f459d88c83bc81f6dc2f25ed05
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt dt=0.1 t = np.arange(0,10,dt) y0=np.array([10, 0.0, 10, 10]) sol_rk4=RK4_int(orbit,y0,t) x,y,v_x,v_y = sol_rk4.T plt.grid() plt.plot(x,y) plt.show()
19.128205
51
0.518767
d9d5e8ec4bcf85e917876d27f935eeb707d35bc9
675
py
Python
factory_generator/management/commands/generate_factories.py
gamabounty/django-factory-generator
284184b22f3564a7a915ac3f3363e588d3721158
[ "MIT" ]
10
2019-04-19T03:00:09.000Z
2022-02-23T16:17:43.000Z
factory_generator/management/commands/generate_factories.py
charlesthk/django-factory-generator
cd0f7aa5b4ecc2bbe8f30a081238056c653d7265
[ "MIT" ]
2
2020-05-10T00:40:51.000Z
2021-02-28T11:31:26.000Z
factory_generator/management/commands/generate_factories.py
charlesthk/django-factory-generator
cd0f7aa5b4ecc2bbe8f30a081238056c653d7265
[ "MIT" ]
6
2019-12-19T16:26:00.000Z
2021-05-13T23:42:35.000Z
import os from django.apps import apps from django.core.management.base import BaseCommand from factory_generator.generator import FactoryAppGenerator
33.75
80
0.715556
d9d66c8e24ecdddf4d2ecdc3b422d09645a2f485
3,021
py
Python
mro/stages/analyzer/run_differential_expression/__init__.py
qiangli/cellranger
046e24c3275cfbd4516a6ebc064594513a5c45b7
[ "MIT" ]
1
2019-03-29T04:05:58.000Z
2019-03-29T04:05:58.000Z
mro/stages/analyzer/run_differential_expression/__init__.py
qiangli/cellranger
046e24c3275cfbd4516a6ebc064594513a5c45b7
[ "MIT" ]
null
null
null
mro/stages/analyzer/run_differential_expression/__init__.py
qiangli/cellranger
046e24c3275cfbd4516a6ebc064594513a5c45b7
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # Copyright (c) 2017 10X Genomics, Inc. All rights reserved. # import cellranger.analysis.diffexp as cr_diffexp import cellranger.analysis.io as analysis_io from cellranger.analysis.singlegenome import SingleGenomeAnalysis import cellranger.h5_constants as h5_constants import cellranger.analysis.constants as analysis_constants import cellranger.matrix as cr_matrix import cellranger.io as cr_io import cellranger.library_constants as lib_constants NUM_THREADS_MIN = 4 #TODO Not clear why this stage takes > 1 thread. Martian thinks it does and kills it on long jobs __MRO__ = """ stage RUN_DIFFERENTIAL_EXPRESSION( in h5 matrix_h5, in h5 clustering_h5, in bool skip, in int random_seed, in int max_clusters, out h5 diffexp_h5, out path diffexp_csv, src py "stages/analyzer/run_differential_expression", ) split using ( in string clustering_key, ) """
35.541176
125
0.735849
d9d80db949c5d5f415b809076411a2404da55e53
10,912
py
Python
sympy/combinatorics/testutil.py
ethankward/sympy
44664d9f625a1c68bc492006cfe1012cb0b49ee4
[ "BSD-3-Clause" ]
2
2019-05-18T22:36:49.000Z
2019-05-24T05:56:16.000Z
sympy/combinatorics/testutil.py
ethankward/sympy
44664d9f625a1c68bc492006cfe1012cb0b49ee4
[ "BSD-3-Clause" ]
1
2020-04-22T12:45:26.000Z
2020-04-22T12:45:26.000Z
sympy/combinatorics/testutil.py
ethankward/sympy
44664d9f625a1c68bc492006cfe1012cb0b49ee4
[ "BSD-3-Clause" ]
3
2021-02-16T16:40:49.000Z
2022-03-07T18:28:41.000Z
from sympy.combinatorics import Permutation from sympy.combinatorics.util import _distribute_gens_by_base rmul = Permutation.rmul def _cmp_perm_lists(first, second): """ Compare two lists of permutations as sets. This is used for testing purposes. Since the array form of a permutation is currently a list, Permutation is not hashable and cannot be put into a set. Examples ======== >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.testutil import _cmp_perm_lists >>> a = Permutation([0, 2, 3, 4, 1]) >>> b = Permutation([1, 2, 0, 4, 3]) >>> c = Permutation([3, 4, 0, 1, 2]) >>> ls1 = [a, b, c] >>> ls2 = [b, c, a] >>> _cmp_perm_lists(ls1, ls2) True """ return {tuple(a) for a in first} == \ {tuple(a) for a in second} def _verify_bsgs(group, base, gens): """ Verify the correctness of a base and strong generating set. This is a naive implementation using the definition of a base and a strong generating set relative to it. There are other procedures for verifying a base and strong generating set, but this one will serve for more robust testing. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> A = AlternatingGroup(4) >>> A.schreier_sims() >>> _verify_bsgs(A, A.base, A.strong_gens) True See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims """ from sympy.combinatorics.perm_groups import PermutationGroup strong_gens_distr = _distribute_gens_by_base(base, gens) current_stabilizer = group for i in range(len(base)): candidate = PermutationGroup(strong_gens_distr[i]) if current_stabilizer.order() != candidate.order(): return False current_stabilizer = current_stabilizer.stabilizer(base[i]) if current_stabilizer.order() != 1: return False return True def _verify_centralizer(group, arg, centr=None): """ Verify the centralizer of a group/set/element inside another group. This is used for testing ``.centralizer()`` from ``sympy.combinatorics.perm_groups`` Examples ======== >>> from sympy.combinatorics.named_groups import (SymmetricGroup, ... AlternatingGroup) >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.testutil import _verify_centralizer >>> S = SymmetricGroup(5) >>> A = AlternatingGroup(5) >>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])]) >>> _verify_centralizer(S, A, centr) True See Also ======== _naive_list_centralizer, sympy.combinatorics.perm_groups.PermutationGroup.centralizer, _cmp_perm_lists """ if centr is None: centr = group.centralizer(arg) centr_list = list(centr.generate_dimino(af=True)) centr_list_naive = _naive_list_centralizer(group, arg, af=True) return _cmp_perm_lists(centr_list, centr_list_naive) def canonicalize_naive(g, dummies, sym, *v): """ Canonicalize tensor formed by tensors of the different types g permutation representing the tensor dummies list of dummy indices msym symmetry of the metric v is a list of (base_i, gens_i, n_i, sym_i) for tensors of type `i` base_i, gens_i BSGS for tensors of this type n_i number ot tensors of type `i` sym_i symmetry under exchange of two component tensors of type `i` None no symmetry 0 commuting 1 anticommuting Return 0 if the tensor is zero, else return the array form of the permutation representing the canonical form of the tensor. Examples ======== >>> from sympy.combinatorics.testutil import canonicalize_naive >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs >>> from sympy.combinatorics import Permutation, PermutationGroup >>> g = Permutation([1, 3, 2, 0, 4, 5]) >>> base2, gens2 = get_symmetric_group_sgs(2) >>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0)) [0, 2, 1, 3, 4, 5] """ from sympy.combinatorics.perm_groups import PermutationGroup from sympy.combinatorics.tensor_can import gens_products, dummy_sgs from sympy.combinatorics.permutations import Permutation, _af_rmul v1 = [] for i in range(len(v)): base_i, gens_i, n_i, sym_i = v[i] v1.append((base_i, gens_i, [[]]*n_i, sym_i)) size, sbase, sgens = gens_products(*v1) dgens = dummy_sgs(dummies, sym, size-2) if isinstance(sym, int): num_types = 1 dummies = [dummies] sym = [sym] else: num_types = len(sym) dgens = [] for i in range(num_types): dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2)) S = PermutationGroup(sgens) D = PermutationGroup([Permutation(x) for x in dgens]) dlist = list(D.generate(af=True)) g = g.array_form st = set() for s in S.generate(af=True): h = _af_rmul(g, s) for d in dlist: q = tuple(_af_rmul(d, h)) st.add(q) a = list(st) a.sort() prev = (0,)*size for h in a: if h[:-2] == prev[:-2]: if h[-1] != prev[-1]: return 0 prev = h return list(a[0]) def graph_certificate(gr): """ Return a certificate for the graph gr adjacency list The graph is assumed to be unoriented and without external lines. Associate to each vertex of the graph a symmetric tensor with number of indices equal to the degree of the vertex; indices are contracted when they correspond to the same line of the graph. The canonical form of the tensor gives a certificate for the graph. This is not an efficient algorithm to get the certificate of a graph. Examples ======== >>> from sympy.combinatorics.testutil import graph_certificate >>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]} >>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]} >>> c1 = graph_certificate(gr1) >>> c2 = graph_certificate(gr2) >>> c1 [0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21] >>> c1 == c2 True """ from sympy.combinatorics.permutations import _af_invert from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize items = list(gr.items()) items.sort(key=lambda x: len(x[1]), reverse=True) pvert = [x[0] for x in items] pvert = _af_invert(pvert) # the indices of the tensor are twice the number of lines of the graph num_indices = 0 for v, neigh in items: num_indices += len(neigh) # associate to each vertex its indices; for each line # between two vertices assign the # even index to the vertex which comes first in items, # the odd index to the other vertex vertices = [[] for i in items] i = 0 for v, neigh in items: for v2 in neigh: if pvert[v] < pvert[v2]: vertices[pvert[v]].append(i) vertices[pvert[v2]].append(i+1) i += 2 g = [] for v in vertices: g.extend(v) assert len(g) == num_indices g += [num_indices, num_indices + 1] size = num_indices + 2 assert sorted(g) == list(range(size)) g = Permutation(g) vlen = [0]*(len(vertices[0])+1) for neigh in vertices: vlen[len(neigh)] += 1 v = [] for i in range(len(vlen)): n = vlen[i] if n: base, gens = get_symmetric_group_sgs(i) v.append((base, gens, n, 0)) v.reverse() dummies = list(range(num_indices)) can = canonicalize(g, dummies, 0, *v) return can
32.47619
98
0.641679
d9d95781d1bacab44253ba285649d7b99ee1e33d
542
py
Python
src/vatic_checker/config.py
jonkeane/vatic-checker
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
[ "MIT" ]
null
null
null
src/vatic_checker/config.py
jonkeane/vatic-checker
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
[ "MIT" ]
null
null
null
src/vatic_checker/config.py
jonkeane/vatic-checker
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
[ "MIT" ]
null
null
null
localhost = "http://localhost/" # your local host database = "mysql://root@localhost/vaticChecker" # server://user:pass@localhost/dbname min_training = 2 # the minimum number of training videos to be considered recaptcha_secret = "" # recaptcha secret for verification duplicate_annotations = False # Should the server allow for duplicate annotations? import os.path import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) # TODO: remove on server import os os.environ['PYTHON_EGG_CACHE'] = '/tmp/apache'
38.714286
94
0.745387
d9d96360237e53141cd11d1271cee29b6140650f
8,233
py
Python
django/utils/timezone.py
graingert/django
784d0c261c76535dc760bc8d76793d92f35c1513
[ "BSD-3-Clause" ]
1
2015-11-11T12:20:45.000Z
2015-11-11T12:20:45.000Z
django/utils/timezone.py
graingert/django
784d0c261c76535dc760bc8d76793d92f35c1513
[ "BSD-3-Clause" ]
null
null
null
django/utils/timezone.py
graingert/django
784d0c261c76535dc760bc8d76793d92f35c1513
[ "BSD-3-Clause" ]
null
null
null
"""Timezone helper functions. This module uses pytz when it's available and fallbacks when it isn't. """ from datetime import datetime, timedelta, tzinfo from threading import local import time as _time try: import pytz except ImportError: pytz = None from django.conf import settings __all__ = [ 'utc', 'get_default_timezone', 'get_current_timezone', 'activate', 'deactivate', 'override', 'is_naive', 'is_aware', 'make_aware', 'make_naive', ] # UTC and local time zones ZERO = timedelta(0) utc = pytz.utc if pytz else UTC() """UTC time zone as a tzinfo instance.""" # In order to avoid accessing the settings at compile time, # wrap the expression in a function and cache the result. _localtime = None def get_default_timezone(): """ Returns the default time zone as a tzinfo instance. This is the time zone defined by settings.TIME_ZONE. See also :func:`get_current_timezone`. """ global _localtime if _localtime is None: if isinstance(settings.TIME_ZONE, basestring) and pytz is not None: _localtime = pytz.timezone(settings.TIME_ZONE) else: _localtime = LocalTimezone() return _localtime # This function exists for consistency with get_current_timezone_name def get_default_timezone_name(): """ Returns the name of the default time zone. """ return _get_timezone_name(get_default_timezone()) _active = local() def get_current_timezone(): """ Returns the currently active time zone as a tzinfo instance. """ return getattr(_active, "value", get_default_timezone()) def get_current_timezone_name(): """ Returns the name of the currently active time zone. """ return _get_timezone_name(get_current_timezone()) def _get_timezone_name(timezone): """ Returns the name of ``timezone``. """ try: # for pytz timezones return timezone.zone except AttributeError: # for regular tzinfo objects local_now = datetime.now(timezone) return timezone.tzname(local_now) # Timezone selection functions. # These functions don't change os.environ['TZ'] and call time.tzset() # because it isn't thread safe. def activate(timezone): """ Sets the time zone for the current thread. The ``timezone`` argument must be an instance of a tzinfo subclass or a time zone name. If it is a time zone name, pytz is required. """ if isinstance(timezone, tzinfo): _active.value = timezone elif isinstance(timezone, basestring) and pytz is not None: _active.value = pytz.timezone(timezone) else: raise ValueError("Invalid timezone: %r" % timezone) def deactivate(): """ Unsets the time zone for the current thread. Django will then use the time zone defined by settings.TIME_ZONE. """ if hasattr(_active, "value"): del _active.value # Templates def template_localtime(value, use_tz=None): """ Checks if value is a datetime and converts it to local time if necessary. If use_tz is provided and is not None, that will force the value to be converted (or not), overriding the value of settings.USE_TZ. This function is designed for use by the template engine. """ should_convert = (isinstance(value, datetime) and (settings.USE_TZ if use_tz is None else use_tz) and not is_naive(value) and getattr(value, 'convert_to_local_time', True)) return localtime(value) if should_convert else value # Utilities def localtime(value, timezone=None): """ Converts an aware datetime.datetime to local time. Local time is defined by the current time zone, unless another time zone is specified. """ if timezone is None: timezone = get_current_timezone() value = value.astimezone(timezone) if hasattr(timezone, 'normalize'): # available for pytz time zones value = timezone.normalize(value) return value def now(): """ Returns an aware or naive datetime.datetime, depending on settings.USE_TZ. """ if settings.USE_TZ: # timeit shows that datetime.now(tz=utc) is 24% slower return datetime.utcnow().replace(tzinfo=utc) else: return datetime.now() # By design, these four functions don't perform any checks on their arguments. # The caller should ensure that they don't receive an invalid value like None. def is_aware(value): """ Determines if a given datetime.datetime is aware. The logic is described in Python's docs: http://docs.python.org/library/datetime.html#datetime.tzinfo """ return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None def is_naive(value): """ Determines if a given datetime.datetime is naive. The logic is described in Python's docs: http://docs.python.org/library/datetime.html#datetime.tzinfo """ return value.tzinfo is None or value.tzinfo.utcoffset(value) is None def make_aware(value, timezone): """ Makes a naive datetime.datetime in a given time zone aware. """ if hasattr(timezone, 'localize'): # available for pytz time zones return timezone.localize(value, is_dst=None) else: # may be wrong around DST changes return value.replace(tzinfo=timezone) def make_naive(value, timezone): """ Makes an aware datetime.datetime naive in a given time zone. """ value = value.astimezone(timezone) if hasattr(timezone, 'normalize'): # available for pytz time zones value = timezone.normalize(value) return value.replace(tzinfo=None)
28.195205
81
0.66197
d9da1ced032a66e58537bdeecea30c322d1a2f01
644
py
Python
malleefowl/tests/test_wps_caps.py
Ouranosinc/malleefowl
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
[ "Apache-2.0" ]
null
null
null
malleefowl/tests/test_wps_caps.py
Ouranosinc/malleefowl
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
[ "Apache-2.0" ]
4
2017-09-21T17:14:45.000Z
2020-11-11T03:20:42.000Z
malleefowl/tests/test_wps_caps.py
Ouranosinc/malleefowl
685a4cabe4c4ccafc2721a50e1f8178b8b81689e
[ "Apache-2.0" ]
null
null
null
import pytest from pywps import Service from pywps.tests import assert_response_success from .common import client_for from malleefowl.processes import processes
28
80
0.608696
d9dd8d48aa39f42683555f052c81e9f33f26c3cd
1,835
py
Python
setup.py
CallumJHays/pyngrok
e1a28948d1d8cf42f8eed1b166a2caf6b2a68066
[ "MIT" ]
null
null
null
setup.py
CallumJHays/pyngrok
e1a28948d1d8cf42f8eed1b166a2caf6b2a68066
[ "MIT" ]
null
null
null
setup.py
CallumJHays/pyngrok
e1a28948d1d8cf42f8eed1b166a2caf6b2a68066
[ "MIT" ]
null
null
null
from setuptools import setup __author__ = "Alex Laird" __copyright__ = "Copyright 2019, Alex Laird" __version__ = "1.4.0" with open("README.md", "r") as f: long_description = f.read() setup( name="pyngrok", version=__version__, packages=["pyngrok"], python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", install_requires=[ "future", "pyyaml" ], entry_points=""" [console_scripts] ngrok=pyngrok.ngrok:run """, description="A Python wrapper for Ngrok.", long_description=long_description, long_description_content_type="text/markdown", author="Alex Laird", author_email="[email protected]", url="https://github.com/alexdlaird/pyngrok", download_url="https://github.com/alexdlaird/pyngrok/archive/{}.tar.gz".format(__version__), keywords=["ngrok", "tunnel", "tunneling", "webhook", "localhost"], license="MIT", classifiers=[ "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Python Modules", "Environment :: Console", "Environment :: Web Environment", "Intended Audience :: Developers", "Intended Audience :: Education", "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Operating System :: Unix" ] )
34.622642
95
0.611444
d9ddd794f7ce3da3377a0064524099ee9b8e1fd8
1,377
py
Python
pipelines/trackml.py
texasmichelle/kubeflow-cern
886925fad5c37a72f6999c1100584fa8e4a0adae
[ "Apache-2.0" ]
4
2019-06-06T20:10:08.000Z
2021-02-19T11:59:39.000Z
pipelines/trackml.py
texasmichelle/kubeflow-cern
886925fad5c37a72f6999c1100584fa8e4a0adae
[ "Apache-2.0" ]
null
null
null
pipelines/trackml.py
texasmichelle/kubeflow-cern
886925fad5c37a72f6999c1100584fa8e4a0adae
[ "Apache-2.0" ]
1
2019-10-13T03:51:16.000Z
2019-10-13T03:51:16.000Z
#!/usr/bin/env python3 import kfp.dsl as dsl import kfp.gcp as gcp # Pipeline input variables. KUBECTL_IMAGE = "gcr.io/mcas-195423/trackml_master_kfp_kubectl" KUBECTL_IMAGE_VERSION = "1" TRACKML_IMAGE = "gcr.io/mcas-195423/trackml_master_trackml" TRACKML_IMAGE_VERSION = "1" if __name__ == '__main__': import kfp.compiler as compiler compiler.Compiler().compile(trackml, __file__ + '.tar.gz')
24.157895
63
0.688453
d9de866f5c692eb5d2ae261f2a1854febddba480
2,211
py
Python
bin/ticker.py
aleasoluciones/infrabbitmq
2759590156c63b9a04fb5daf8d588a084fc30629
[ "MIT" ]
null
null
null
bin/ticker.py
aleasoluciones/infrabbitmq
2759590156c63b9a04fb5daf8d588a084fc30629
[ "MIT" ]
null
null
null
bin/ticker.py
aleasoluciones/infrabbitmq
2759590156c63b9a04fb5daf8d588a084fc30629
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import time import puka import argparse import logging from infcommon import utils from infrabbitmq import factory as infrabbitmq_factory from infrabbitmq.rabbitmq import RabbitMQError from infrabbitmq.events_names import ( TICK_1_SECOND, TICK_1_MINUTE, TICK_2_MINUTES, TICK_5_MINUTES, TICK_60_MINUTES, ) if __name__ == '__main__': try: parser = argparse.ArgumentParser() parser.add_argument('-n', '--network', action='store', required=True, help='Network name (ilo, c2k, ...)') args = parser.parse_args() network = args.network.split('-')[0] main(network) except Exception as exc: logging.critical("Ticker Fails: {}".format(exc))
29.878378
114
0.622795
d9df003e9cd20fdfdd89b5aaebba29cdc7e644c5
16,137
py
Python
transformers/modeling_encoder_decoder.py
Tarpelite/UniNLP
176c2a0f88c8054bf69e1f92693d353737367c34
[ "MIT" ]
null
null
null
transformers/modeling_encoder_decoder.py
Tarpelite/UniNLP
176c2a0f88c8054bf69e1f92693d353737367c34
[ "MIT" ]
3
2021-06-02T00:41:41.000Z
2022-02-10T01:07:59.000Z
transformers/modeling_encoder_decoder.py
Tarpelite/UniNLP
176c2a0f88c8054bf69e1f92693d353737367c34
[ "MIT" ]
1
2020-01-27T03:02:19.000Z
2020-01-27T03:02:19.000Z
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support Encoder-Decoder architectures """ from __future__ import absolute_import, division, print_function, unicode_literals import logging import os import torch from torch import nn from .modeling_auto import AutoModel, AutoModelWithLMHead logger = logging.getLogger(__name__)
51.88746
473
0.656008
d9e182705452fe461a2142c0afa4786d47f19c46
2,131
py
Python
dags/treinos_igti/treino03.py
rafaelols/airflow
8e4af5fb576a9568af476c0607819649b724adea
[ "Apache-2.0" ]
null
null
null
dags/treinos_igti/treino03.py
rafaelols/airflow
8e4af5fb576a9568af476c0607819649b724adea
[ "Apache-2.0" ]
null
null
null
dags/treinos_igti/treino03.py
rafaelols/airflow
8e4af5fb576a9568af476c0607819649b724adea
[ "Apache-2.0" ]
null
null
null
from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator, BranchPythonOperator from datetime import datetime, timedelta import pandas as pd import random # Default args definition default_args = { 'owner': 'Rafael', 'depends_on_past': False, 'start_date': datetime(2020, 11, 29, 18, 20), 'email': ['[email protected]', '[email protected]'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'Retry_delay': timedelta(minutes=1) } # Dag definition dag = DAG( 'treino-03', description="Extrai dados do Titanic e calcula idade media para homens ou mulheres", default_args = default_args, schedule_interval='*/20 * * * *' ) get_data = BashOperator( task_id='get-data', bash_command='curl https://raw.githubusercontent.com/A3Data/hermione/master/hermione/file_text/train.csv -o /usr/local/airflow/data/train.csv', dag=dag ) escolhe_h_m = PythonOperator( task_id='escolhe-h-m', python_callable=sorteia_h_m, dag=dag ) male_female = BranchPythonOperator( task_id='condicional', python_callable=MouF, provide_context=True, dag=dag ) branch_homem = PythonOperator( task_id='branch_homem', python_callable=mean_homem, dag=dag ) branch_mulher = PythonOperator( task_id='branch_mulher', python_callable=mean_mulher, dag=dag ) get_data >> escolhe_h_m >> male_female >> [branch_homem, branch_mulher]
25.987805
147
0.697325