hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b3c8e618e44b6365d5b13bea7673584e02f77cc | 1,652 | py | Python | the_unsync/thesync.py | vromanuk/async_techniques | 7e1c6efcd4c81c322002eb3002d5bb929c5bc623 | [
"MIT"
] | null | null | null | the_unsync/thesync.py | vromanuk/async_techniques | 7e1c6efcd4c81c322002eb3002d5bb929c5bc623 | [
"MIT"
] | null | null | null | the_unsync/thesync.py | vromanuk/async_techniques | 7e1c6efcd4c81c322002eb3002d5bb929c5bc623 | [
"MIT"
] | null | null | null | from unsync import unsync
import asyncio
import datetime
import math
import aiohttp
import requests
if __name__ == '__main__':
main()
| 24.656716 | 99 | 0.626513 |
6b3dd632291d2f985432a2f2e2e3bd67cb5c5d46 | 19,209 | py | Python | sdk/python/pulumi_azure/desktopvirtualization/workspace.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2018-06-18T00:19:44.000Z | 2022-02-20T05:32:57.000Z | sdk/python/pulumi_azure/desktopvirtualization/workspace.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 663 | 2018-06-18T21:08:46.000Z | 2022-03-31T20:10:11.000Z | sdk/python/pulumi_azure/desktopvirtualization/workspace.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 41 | 2018-07-19T22:37:38.000Z | 2022-03-14T10:56:26.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['WorkspaceArgs', 'Workspace']
class Workspace(pulumi.CustomResource):
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
super(Workspace, __self__).__init__(
'azure:desktopvirtualization/workspace:Workspace',
resource_name,
__props__,
opts)
| 42.781737 | 221 | 0.645895 |
6b3deda0113b8eb8f9bdf6272cc95e4fe0c53714 | 2,743 | py | Python | jupyanno/sheets.py | betatim/jupyanno | 11fbb1825c8e6966260620758768e0e1fa5cecc9 | [
"Apache-2.0"
] | 23 | 2018-08-24T16:48:20.000Z | 2021-02-26T02:52:40.000Z | jupyanno/sheets.py | L3-data/jupyanno | 6f6ec37e88b4d92f00bc359e7e39157b6b7f0eb5 | [
"Apache-2.0"
] | 73 | 2018-08-13T07:56:15.000Z | 2018-10-09T13:55:20.000Z | jupyanno/sheets.py | L3-data/jupyanno | 6f6ec37e88b4d92f00bc359e7e39157b6b7f0eb5 | [
"Apache-2.0"
] | 4 | 2018-08-13T07:55:50.000Z | 2020-09-30T12:04:27.000Z | """Code for reading and writing results to google sheets"""
from bs4 import BeautifulSoup
import requests
import warnings
import json
import pandas as pd
from six.moves.urllib.parse import urlparse, parse_qs
from six.moves.urllib.request import urlopen
_CELLSET_ID = "AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4"
def get_sheet_as_df(base_url, kk, columns="A:AG"):
"""
Gets the sheet as a list of Dicts (directly importable to Pandas)
:return:
"""
try:
# TODO: we should probably get the whole sheet
all_vals = "{base_url}/{cols}?key={kk}".format(base_url=base_url,
cols=columns,
kk=kk)
t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[
'values']
frow = t_data.pop(0)
return pd.DataFrame([
dict([(key, '' if idx >= len(irow) else irow[idx])
for idx, key in enumerate(frow)]) for irow in
t_data])
except IOError as e:
warnings.warn(
'Sheet could not be accessed, check internet connectivity, \
proxies and permissions: {}'.format(
e))
return pd.DataFrame([{}])
| 33.45122 | 79 | 0.606635 |
6b3e1154af6f1eb866c2c34cdc822a0ff3902ab9 | 2,191 | py | Python | sorting/python/max_heap.py | zhou7rui/algorithm | 9b5500ac3d8bdfd223bf9aec55e68675f2df7c59 | [
"MIT"
] | 6 | 2017-08-31T07:13:34.000Z | 2018-09-10T08:54:43.000Z | sorting/python/max_heap.py | zhou7rui/algorithm | 9b5500ac3d8bdfd223bf9aec55e68675f2df7c59 | [
"MIT"
] | null | null | null | sorting/python/max_heap.py | zhou7rui/algorithm | 9b5500ac3d8bdfd223bf9aec55e68675f2df7c59 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*
'''
98
/ \
96 84
/ \ / \
92 82 78 47
/ \ / \ / \ / \
33 26 51 85 50 15 44 60
/ \ / \ / \ / \ / \ / \ / \ / \
40 51 98 51 7 17 94 82 32 21 64 60 7 44 63 63
'''
import random
if __name__ == '__main__':
N = 31
M = 100
heap = Maxheap(N)
for i in range(0,N):
k = random.randint(1, M)
heap.insert(k)
# arr = [random.randint(1,M) for i in range(N)]
# heap = Maxheap(len(arr),arr)
print(heap.size())
print(heap.data)
print(heap.extractMax())
| 24.076923 | 84 | 0.426289 |
6b3e3c2d633954d06881dc1103a976a7248201f2 | 585 | py | Python | ink2canvas/svg/Use.py | greipfrut/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
] | 4 | 2016-05-06T21:29:39.000Z | 2020-02-25T08:47:48.000Z | ink2canvas/svg/Use.py | letw/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
] | null | null | null | ink2canvas/svg/Use.py | letw/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
] | null | null | null | from ink2canvas.svg.AbstractShape import AbstractShape | 32.5 | 76 | 0.647863 |
6b3ef77f1a082e51763d4a446e010e19a72af147 | 101 | py | Python | docs/source/tutorial/code/read_csv.py | HanSooLim/DIL-Project | 069fa7e35a2e1edfff30dc2540d9b87f5db95dde | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-10-16T15:08:05.000Z | 2021-10-16T15:59:57.000Z | docs/source/tutorial/code/read_csv.py | HanSooLim/DIL-Project | 069fa7e35a2e1edfff30dc2540d9b87f5db95dde | [
"MIT",
"BSD-3-Clause"
] | 8 | 2021-10-21T04:48:12.000Z | 2021-11-07T03:09:25.000Z | docs/source/tutorial/code/read_csv.py | HanSooLim/DIL-Project | 069fa7e35a2e1edfff30dc2540d9b87f5db95dde | [
"MIT",
"BSD-3-Clause"
] | 3 | 2021-05-02T13:39:14.000Z | 2021-05-31T14:05:56.000Z | import pandas
datas = pandas.read_csv("../../Sample/example_dataset.csv", index_col=0)
print(datas)
| 20.2 | 72 | 0.742574 |
6b4010a8299e923b75856db3391db03cdf9dc135 | 641 | py | Python | app.py | rghose/lol3 | c902e61bd5d69c541b46c834a5183e4da8eec591 | [
"BSD-2-Clause"
] | null | null | null | app.py | rghose/lol3 | c902e61bd5d69c541b46c834a5183e4da8eec591 | [
"BSD-2-Clause"
] | null | null | null | app.py | rghose/lol3 | c902e61bd5d69c541b46c834a5183e4da8eec591 | [
"BSD-2-Clause"
] | null | null | null | from flask import *
app = Flask(__name__)
import botty
# ----------------------------------
# -----------------------------------
# -----------------------------------
# -----------------------------------
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0")
| 23.740741 | 54 | 0.483619 |
6b40618c90c089307047e8b7e28b599c38d7a399 | 451 | py | Python | config.py | metarom-quality/gooseberry | 544503c52edd360a53d09f69ea6b4a0645aa617a | [
"MIT"
] | null | null | null | config.py | metarom-quality/gooseberry | 544503c52edd360a53d09f69ea6b4a0645aa617a | [
"MIT"
] | null | null | null | config.py | metarom-quality/gooseberry | 544503c52edd360a53d09f69ea6b4a0645aa617a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
DATABASE="/home/tomate/Warehouse/syte/meta.db"
XLSDIR = "/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/"
temp = [i for i in next(os.walk(XLSDIR))[2] if i.endswith("xlsx") or i.endswith("xls")]
flist = {}
for i in temp:
name = i.split(" ")[0].split("-")[0].split(".")[0]
if name.startswith("~") or name.startswith("PR") or name.startswith("FAB"):
continue
else:
flist[name] = i
| 26.529412 | 87 | 0.627494 |
6b41d9378cb46e318f4cb6580acecc3d11ab3c3b | 1,054 | py | Python | setup.py | markostrajkov/range-requests-proxy | 74d4bfee93098854c7b9f723c03c2316e729f295 | [
"BSD-3-Clause"
] | 1 | 2016-08-14T14:12:04.000Z | 2016-08-14T14:12:04.000Z | setup.py | markostrajkov/range-requests-proxy | 74d4bfee93098854c7b9f723c03c2316e729f295 | [
"BSD-3-Clause"
] | null | null | null | setup.py | markostrajkov/range-requests-proxy | 74d4bfee93098854c7b9f723c03c2316e729f295 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
setup(
name='range-requests-proxy',
version='0.1',
description='Asynchronous HTTP proxy for HTTP Range Requests',
author='Marko Trajkov',
author_email='[email protected]',
cmdclass={'test': PyTest},
tests_require=['pytest>=2.8.0', 'mock==2.0.0'],
install_requires=['tornado==4.4.1', 'pycurl==7.43.0'],
packages=['rangerequestsproxy'],
license='BSD',
url='https://github.com/markostrajkov/range-requests-proxy',
)
| 26.35 | 76 | 0.665085 |
6b42790dafdbd5621ed121da922a0750203f73ba | 918 | py | Python | tests/pytorch_pfn_extras_tests/onnx/test_load_model.py | kmaehashi/pytorch-pfn-extras | 70b5db0dad8a8e342cc231e8a18c6f32ce250d1c | [
"MIT"
] | 243 | 2020-05-12T01:15:46.000Z | 2022-03-21T22:07:57.000Z | tests/pytorch_pfn_extras_tests/onnx/test_load_model.py | kmaehashi/pytorch-pfn-extras | 70b5db0dad8a8e342cc231e8a18c6f32ce250d1c | [
"MIT"
] | 495 | 2020-05-12T06:45:12.000Z | 2022-03-31T07:14:02.000Z | tests/pytorch_pfn_extras_tests/onnx/test_load_model.py | kmaehashi/pytorch-pfn-extras | 70b5db0dad8a8e342cc231e8a18c6f32ce250d1c | [
"MIT"
] | 37 | 2020-05-12T02:16:07.000Z | 2021-08-11T06:00:16.000Z | import os
import pytest
import torch
import pytorch_pfn_extras.onnx as tou
from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net
| 34 | 79 | 0.704793 |
6b433031281aa45b18a53118e3852e760126a4ce | 867 | py | Python | validate/v1/base.py | huzidabanzhang/Python | 7b304290e5be7db4bce253edb069a12dcbc3c998 | [
"MIT"
] | 4 | 2019-09-04T09:16:24.000Z | 2019-09-18T08:50:36.000Z | validate/v1/base.py | huzidabanzhang/Python | 7b304290e5be7db4bce253edb069a12dcbc3c998 | [
"MIT"
] | null | null | null | validate/v1/base.py | huzidabanzhang/Python | 7b304290e5be7db4bce253edb069a12dcbc3c998 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
@Description:
@Author: Zpp
@Date: 2020-05-28 13:44:29
@LastEditors: Zpp
@LastEditTime: 2020-05-28 14:02:02
'''
params = {
#
'fields': {
'type': {
'name': '',
'type': 'int',
'between': [1, 2, 3],
'required': True
},
'document': {
'name': '',
'type': 'file',
'required': True,
'msg': ''
},
'admin_id': {
'name': '',
'type': 'str',
'required': True
},
'time': {
'name': '',
'type': 'str',
'required': True
}
},
#
'Export': ['type'],
#
'Import': ['document'],
#
'Login': ['admin_id', 'time']
}
| 19.704545 | 34 | 0.392157 |
6b434ec1049bc3564470ff973bc2f2c30ca659c6 | 329 | py | Python | example/speech_recognition/stt_layer_slice.py | axbaretto/mxnet | 5f593885356ff6d14f5519fa18e79b944beb51cd | [
"Apache-2.0"
] | 92 | 2017-04-25T15:40:55.000Z | 2022-03-28T17:54:53.000Z | example/speech_recognition/stt_layer_slice.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 18 | 2017-05-15T05:16:41.000Z | 2019-06-14T06:02:08.000Z | example/speech_recognition/stt_layer_slice.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 39 | 2017-04-23T12:38:45.000Z | 2021-04-04T05:01:03.000Z | import mxnet as mx
| 29.909091 | 98 | 0.726444 |
6b4469c0d369d163f87c18b571da60869e4d600b | 8,000 | py | Python | api/auth.py | fergalmoran/dss.api | d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073 | [
"BSD-2-Clause"
] | null | null | null | api/auth.py | fergalmoran/dss.api | d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073 | [
"BSD-2-Clause"
] | null | null | null | api/auth.py | fergalmoran/dss.api | d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073 | [
"BSD-2-Clause"
] | null | null | null | import datetime
import json
from calendar import timegm
from urllib.parse import parse_qsl
import requests
from allauth.socialaccount import models as aamodels
from requests_oauthlib import OAuth1
from rest_framework import parsers, renderers
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.settings import api_settings
from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler
from dss import settings
from spa.models import UserProfile
from spa.models.socialaccountlink import SocialAccountLink
def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload):
"""
Do some magic here to find user account and deprecate psa
1. Look for account in
"""
user = None
try:
sa = SocialAccountLink.objects.get(social_id=uid)
sa.type = provider
sa.social_id = uid
sa.access_token = access_token
sa.access_token_secret = access_token_secret
sa.provider_data = payload
sa.save()
user = UserProfile.objects.get(id=sa.user.id)
except SocialAccountLink.DoesNotExist:
# try allauth
try:
aa = aamodels.SocialAccount.objects.get(uid=uid)
try:
user = UserProfile.objects.get(user__id=aa.user_id)
except UserProfile.DoesNotExist:
print('Need to create UserProfile')
# we got an allauth, create the SocialAccountLink
sa = SocialAccountLink()
sa.user = user
sa.social_id = aa.uid
sa.type = aa.provider
sa.access_token = access_token
sa.access_token_secret = access_token_secret
sa.provider_data = payload
sa.save()
except aamodels.SocialAccount.DoesNotExist:
print('Need to create social model')
return user if user else None
| 42.105263 | 101 | 0.61525 |
6b454d373a4daf57bd5eb97d08752d3322beb78a | 6,146 | py | Python | bcgs/disqus_objects.py | aeturnum/bcgs | e5ae4c9f4cdd45b47615f00581dcc3792c281ea3 | [
"MIT"
] | null | null | null | bcgs/disqus_objects.py | aeturnum/bcgs | e5ae4c9f4cdd45b47615f00581dcc3792c281ea3 | [
"MIT"
] | null | null | null | bcgs/disqus_objects.py | aeturnum/bcgs | e5ae4c9f4cdd45b47615f00581dcc3792c281ea3 | [
"MIT"
] | null | null | null | import requests
import aiohttp
from constants import API_KEY
| 36.802395 | 143 | 0.493817 |
6b46022f290a59526dcdb44e97324f9e8df677ff | 11,520 | py | Python | nvdbgeotricks.py | LtGlahn/estimat_gulstripe | 8bb93d52131bdda9846810dbd6bac7f872377859 | [
"MIT"
] | null | null | null | nvdbgeotricks.py | LtGlahn/estimat_gulstripe | 8bb93d52131bdda9846810dbd6bac7f872377859 | [
"MIT"
] | null | null | null | nvdbgeotricks.py | LtGlahn/estimat_gulstripe | 8bb93d52131bdda9846810dbd6bac7f872377859 | [
"MIT"
] | null | null | null | """
En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til gjre nyttige ting, f.eks. lagre geografiske datasett
Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en del andre ting som m
installeres separat. Noen av disse bibliotekene kunne historisk av og til vre plundrete installere, evt
ha versjonskonflikter seg i mellom, spesielt p windows. Slikt plunder hrer historien til (stort sett)
Anbefalingen er like fullt bruke (ana)conda installasjon i et eget "environment". Dette er god kodehygiene
og sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss lar seg greit reparere ved lage nytt "enviroment",
uten at det pvirker hele python-installasjonen din.
"""
import re
import pdb
from shapely import wkt
# from shapely.ops import unary_union
import pandas as pd
import geopandas as gpd
from datetime import datetime
import nvdbapiv3
from apiforbindelse import apiforbindelse
def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True):
"""
Lagrer NVDB vegnett og angitte objekttyper til geopackage
ARGUMENTS
objekttyper: Liste med objekttyper du vil lagre
KEYWORDS
mittfilter=None : Dictionary med filter til skeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001 }
Samme filter brukes p bde vegnett og fagdata
vegnett=True : Bool, default=True. Angir om vi skal ta med data om vegnett eller ikke
vegsegmenter=False : Bool, default=False. Angir om vi skal repetere objektet delt inn etter vegsegementer
geometri=True : Bool, default=True. Angir om vi skal hente geometri fra egengeometri (hvis det finnes)
Hvis du nsker presentere vegobjekt ut fra objektets stedfesting langs veg s bruker du kombinasjonen
vegsegmenter=True, geometri=False
RETURNS
None
"""
if not '.gpkg' in filnavn:
filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg'
if not isinstance(objekttyper, list ):
objekttyper = [ objekttyper ]
for enObjTypeId in objekttyper:
enObjTypeId = int( enObjTypeId )
sok = nvdbapiv3.nvdbFagdata( enObjTypeId )
if mittfilter:
sok.filter( mittfilter )
stat = sok.statistikk()
objtypenavn = sok.objektTypeDef['navn']
print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn )
lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() )
rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri )
if len( rec ) > 0:
mindf = pd.DataFrame( rec )
# M trickse litt for unng navnekollisjon
kolonner = list( mindf.columns )
lowerkolonner = [ x.lower() for x in kolonner ]
# Duplicate element indices in list
# Using list comprehension + list slicing
# https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/
res = [idx for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]]
for ii, dublett in enumerate( res):
mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True )
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
# m droppe kolonne vegsegmenter hvis du har vegsegmenter=False
if 'vegsegmenter' in minGdf.columns:
minGdf.drop( 'vegsegmenter', 1, inplace=True)
minGdf.drop( 'geometri', 1, inplace=True)
minGdf.to_file( filnavn, layer=lagnavn, driver="GPKG")
else:
print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter)
if vegnett:
veg = nvdbapiv3.nvdbVegnett()
if mittfilter:
junk = mittfilter.pop( 'egenskap', None)
junk = mittfilter.pop( 'overlapp', None)
veg.filter( mittfilter )
print( 'Henter vegnett')
rec = veg.to_records()
mindf = pd.DataFrame( rec)
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
mindf.drop( 'geometri', 1, inplace=True)
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
minGdf.to_file( filnavn, layer='vegnett', driver="GPKG")
def dumpkontraktsomr( komr = [] ):
"""
Dumper et har (hardkodede) kontraktsomrder
"""
if not komr:
komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ]
komr = [ '9253 Agder elektro og veglys 2021-2024']
objliste = [ 540, # Trafikkmengde
105, # Fartsgrense
810, # Vinterdriftsklasse
482, # trafikkregistreringsstasjon
153, # Vrstasjon
64, # Ferjeleie
39, # Rasteplass
48, # Fortau
199, # Trr
15, # Grasdekker
274, # Blomsterbeplanting
511, # Busker
300 , # Naturomrde (ingen treff i Haugesund kontrakt)
517, # Artsrik vegkant
800, # Fremmede arter
67, # Tunnellp
846, # Skredsikring, bremsekjegler
850 # Skredsikring, forbygning
]
objliste = []
for enkontrakt in komr:
filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt )
nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt })
def firefeltrapport( mittfilter={}):
"""
Finner alle firefeltsveger i Norge, evt innafor angitt skekriterie
Bruker skeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3
ARGUMENTS
None
KEYWORDS:
mittfilter: Dictionary med skefilter
RETURNS
geodataframe med resultatet
"""
v = nvdbapiv3.nvdbVegnett()
# Legger til filter p kun fase = V (eksistende veg), sfremt det ikke kommer i konflikt med anna filter
if not 'vegsystemreferanse' in mittfilter.keys():
mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv'
if not 'kryssystem' in mittfilter.keys():
mittfilter['kryssystem'] = 'false'
if not 'sideanlegg' in mittfilter.keys():
mittfilter['sideanlegg'] = 'false'
v.filter( mittfilter )
# Kun kjrende, og kun verste topologiniv, og ikke adskiltelop=MOT
v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } )
data = []
vegsegment = v.nesteForekomst()
while vegsegment:
if sjekkfelt( vegsegment, felttype='firefelt'):
vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] )
vegsegment['geometri'] = vegsegment['geometri']['wkt']
vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform']
vegsegment['vegnr'] = vegsegment['vref'].split()[0]
vegsegment['vegkategori'] = vegsegment['vref'][0]
vegsegment['adskilte lp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_lp']
data.append( vegsegment )
vegsegment = v.nesteForekomst()
if len( data ) > 1:
mindf = pd.DataFrame( data )
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
mindf.drop( 'geometri', 1, inplace=True)
mindf.drop( 'kontraktsomrder', 1, inplace=True)
mindf.drop( 'riksvegruter', 1, inplace=True)
mindf.drop( 'href', 1, inplace=True)
mindf.drop( 'metadata', 1, inplace=True)
mindf.drop( 'kortform', 1, inplace=True)
mindf.drop( 'veglenkenummer', 1, inplace=True)
mindf.drop( 'segmentnummer', 1, inplace=True)
mindf.drop( 'startnode', 1, inplace=True)
mindf.drop( 'sluttnode', 1, inplace=True)
mindf.drop( 'referanse', 1, inplace=True)
mindf.drop( 'mlemetode', 1, inplace=True)
mindf.drop( 'mledato', 1, inplace=True)
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
return minGdf
else:
return None
def sjekkfelt( vegsegment, felttype='firefelt' ):
"""
Sjekker hva slags felt som finnes p et vegsegment
ARGUMENTS:
vegsegment - dicionary med data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/
KEYWORDS:
felttype - hva slags felttype som skal sjekkes. Mulige verdier:
firefelt (default). Antar at firefeltsveg betyr at kjrefeltnummer 1-4 er brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt
(flere varianter kommer nr de trengs)
RETURNS
boolean - True hvis kjrefeltene er av riktig type
"""
svar = False
vr = 'vegsystemreferanse'
sr = 'strekning'
if felttype == 'firefelt':
if 'feltoversikt' in vegsegment.keys() and 'detaljniv' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljniv']:
kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) )
if vr in vegsegment.keys():
if sr in vegsegment[vr] and 'adskilte_lp' in vegsegment[vr][sr]:
if vegsegment[vr][sr]['adskilte_lp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}):
svar = True
# Siste klausul her har f.eks. forekommet p Fv5724, envegskjrt tunnel ved Oldenvatnet.
elif vegsegment[vr][sr]['adskilte_lp'] == 'Med' and len( kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2} ):
svar = True
return svar
else:
raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype + 'er ikke implementert (enn)' )
def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]):
"""
Returnerer liste med kjrefeltnummer filtrert p hva slags feltkode vi evt har
ARGUMENTS
feltoversikt - Liste med feltkoder for et vegsegment.
KEYWORDS
mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for hva slags felt vi skal telle med. Sjekk hndbok v830
Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon:
'vanlig' - Helt vanlig kjrefelt, kjrefeltnumemr er angitt som heltall uten noen bokstaver.
'K' - kollektivfelt
'R' - reversibelt felt
'S' - Sykkelfelt
'H' - Svingefelt mot hyre
'V' - Svingefelt mot venstre
'B' - Ekstra felt for bompengeinnkreving
RETURNS
Liste med kjrefeltnummer hvor kun kjrefelt som angitt med mittfilter-nkkelord er inkludert
"""
data = [ ]
for felt in feltoversikt:
feltbokstav = re.findall( '[A-Za-z]', felt)
if feltbokstav:
feltbokstav = feltbokstav[0]
else:
feltbokstav = 'vanlig'
if feltbokstav in mittfilter:
feltnummer = int( re.split( '[A-Z]', felt)[0] )
data.append( feltnummer )
return data
| 39.183673 | 157 | 0.615712 |
6b477719b2c91c9e3ee4ff6ba226b115ec30e5ff | 979 | py | Python | 019_CountingSundays.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | 019_CountingSundays.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | 019_CountingSundays.py | joetache4/project-euler | 82f9e25b414929d9f62d94905906ba2f57db7935 | [
"MIT"
] | null | null | null | """
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
ans: 171
"""
# set to day of week for 1 Jan 1901 (Tuesday)
dow = 2
sum = 0
for y in range(1901, 2001):
for m in range(0, 12):
if dow == 0:
sum += 1
dow = (dow + no_days(m, y)) % 7
print(sum) | 23.878049 | 109 | 0.660878 |
6b496440b1b757ff1f65cdc922e139b550fcb6ef | 473 | py | Python | setup.py | aagaard/dbservice | 47daadab307e6744ef151dd4e0aacff27dcda881 | [
"MIT"
] | 1 | 2020-04-27T16:30:50.000Z | 2020-04-27T16:30:50.000Z | setup.py | aagaard/dbservice | 47daadab307e6744ef151dd4e0aacff27dcda881 | [
"MIT"
] | null | null | null | setup.py | aagaard/dbservice | 47daadab307e6744ef151dd4e0aacff27dcda881 | [
"MIT"
] | 1 | 2021-01-13T02:16:56.000Z | 2021-01-13T02:16:56.000Z | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Setup for the dbservice
"""
from setuptools import setup, find_packages
setup(
name='dbservice',
version='0.9',
description="Database service for storing meter data",
author="Sren Aagaard Mikkelsen",
author_email='[email protected]',
url='https://github.com/dbservice/dbservice',
packages=find_packages(),
package_data={'': ['static/*.*', 'templates/*.*']},
scripts=['manage.py'],
)
| 22.52381 | 58 | 0.646934 |
6b4af341d1bd006f2df5874fa788b8866cb5c77d | 800 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the junos facts module.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
| 25.806452 | 74 | 0.60625 |
6b4c6ac7304c74c6af0453d81ea3a3dfae8d7b81 | 1,033 | py | Python | server/dbcls/api/resources/authenticate.py | ripry/umakaviewer | e3df32313219d1b9d65edb6d180b2b4799d87e25 | [
"MIT"
] | 2 | 2017-08-17T02:01:48.000Z | 2019-12-19T12:11:08.000Z | server/dbcls/api/resources/authenticate.py | ripry/umakaviewer | e3df32313219d1b9d65edb6d180b2b4799d87e25 | [
"MIT"
] | 3 | 2021-04-04T01:25:07.000Z | 2021-10-20T06:07:29.000Z | server/dbcls/api/resources/authenticate.py | ripry/umakaviewer | e3df32313219d1b9d65edb6d180b2b4799d87e25 | [
"MIT"
] | 4 | 2020-12-01T04:20:55.000Z | 2020-12-04T04:51:54.000Z | from flask_restful import Resource, reqparse
from firebase_admin import auth as firebase_auth
from dbcls.models import User
parser = reqparse.RequestParser()
parser.add_argument('token', type=str, required=True, nullable=False)
| 33.322581 | 75 | 0.653437 |
860946d6e7694a280a705683f6a6189d61f153d3 | 475 | py | Python | GetJSONData_NLPParser.py | Feiyi-Ding/2021A | f599f0a21e05964fffce3dcf2d32ef70ddc3c75d | [
"Apache-2.0"
] | null | null | null | GetJSONData_NLPParser.py | Feiyi-Ding/2021A | f599f0a21e05964fffce3dcf2d32ef70ddc3c75d | [
"Apache-2.0"
] | 2 | 2021-03-22T17:57:27.000Z | 2021-03-22T17:58:01.000Z | GetJSONData_NLPParser.py | Feiyi-Ding/2021A | f599f0a21e05964fffce3dcf2d32ef70ddc3c75d | [
"Apache-2.0"
] | 4 | 2021-03-09T16:15:30.000Z | 2021-03-17T15:04:17.000Z | #Import required modules
import requests
import json
# Get json results for the required input
InputString = "kobe is a basketball player"
headers = {
'Content-type': 'application/json',
}
data = '{"text":InputString = '+ InputString + '}'
response = requests.post('http://66.76.242.198:9888/', data=data).json()
#Adding a test comment to check if the automatic git pull is working or not
#print(json.dumps(response, indent=4, sort_keys=True))
| 23.75 | 76 | 0.688421 |
86095983c39bff7a689e2233b004ba39842ac699 | 1,719 | py | Python | language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sentencize the raw wikitext103."""
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("wiki103_raw", None,
"Path to raw wikitext103 train corpus.")
flags.DEFINE_string("output_path", None,
"Path to output the processed dataset.")
FLAGS = flags.FLAGS
if __name__ == "__main__":
app.run(main)
| 29.135593 | 76 | 0.64107 |
860a02e12fea480e4c4b823d5c9ef02e0bf6f4a4 | 53 | py | Python | example_bots/any_to_any/__init__.py | budacom/trading-bots | 9ac362cc21ce185e7b974bf9bcc7480ff9c6b2aa | [
"MIT"
] | 21 | 2018-08-10T16:45:21.000Z | 2022-01-25T13:04:07.000Z | example_bots/any_to_any/__init__.py | rob-Hitchens/trading-bots | 16d53be0c32b45bee0520d8192629ade09727e24 | [
"MIT"
] | 6 | 2018-07-18T15:34:32.000Z | 2021-02-02T21:59:04.000Z | example_bots/any_to_any/__init__.py | rob-Hitchens/trading-bots | 16d53be0c32b45bee0520d8192629ade09727e24 | [
"MIT"
] | 10 | 2018-10-24T22:14:10.000Z | 2022-02-08T17:21:47.000Z | default_bot = 'example_bots.any_to_any.bot.AnyToAny'
| 26.5 | 52 | 0.830189 |
860a91391db83eb979d9849bfe427e0dbb8bf3eb | 1,171 | py | Python | helpers.py | owenjones/CaBot | dd47c077b21cbcf52c0ffd2e30b47fb736a41ebc | [
"MIT"
] | 3 | 2020-03-26T11:43:40.000Z | 2021-12-27T18:26:06.000Z | helpers.py | owenjones/CaBot | dd47c077b21cbcf52c0ffd2e30b47fb736a41ebc | [
"MIT"
] | 2 | 2021-05-14T01:31:12.000Z | 2021-08-23T16:07:44.000Z | helpers.py | owenjones/CaBot | dd47c077b21cbcf52c0ffd2e30b47fb736a41ebc | [
"MIT"
] | 1 | 2020-04-22T19:06:43.000Z | 2020-04-22T19:06:43.000Z | from server import roles
| 20.189655 | 63 | 0.679761 |
860af185b3aec78bf051659802424a1b61b8f5ba | 6,742 | py | Python | databuilder/loader/file_system_neo4j_csv_loader.py | davcamer/amundsendatabuilder | 1bd6cd5c30413640d4c377dc3c59c283e86347eb | [
"Apache-2.0"
] | null | null | null | databuilder/loader/file_system_neo4j_csv_loader.py | davcamer/amundsendatabuilder | 1bd6cd5c30413640d4c377dc3c59c283e86347eb | [
"Apache-2.0"
] | null | null | null | databuilder/loader/file_system_neo4j_csv_loader.py | davcamer/amundsendatabuilder | 1bd6cd5c30413640d4c377dc3c59c283e86347eb | [
"Apache-2.0"
] | 1 | 2019-09-21T23:56:41.000Z | 2019-09-21T23:56:41.000Z | import csv
import logging
import os
import shutil
from csv import DictWriter # noqa: F401
from pyhocon import ConfigTree, ConfigFactory # noqa: F401
from typing import Dict, Any # noqa: F401
from databuilder.job.base_job import Job
from databuilder.loader.base_loader import Loader
from databuilder.models.neo4j_csv_serde import NODE_LABEL, \
RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE
from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401
from databuilder.utils.closer import Closer
LOGGER = logging.getLogger(__name__)
| 35.671958 | 82 | 0.590478 |
860b3ffda1922298f17135c358d64932d9e08e95 | 3,015 | py | Python | sample_program_04_02_knn.py | pepsinal/python_doe_kspub | 65ae5c2d214f1a34fa242fee7d63453c81d56bfe | [
"MIT"
] | 16 | 2021-01-11T17:57:05.000Z | 2022-03-29T07:04:26.000Z | sample_program_04_02_knn.py | pepsinal/python_doe_kspub | 65ae5c2d214f1a34fa242fee7d63453c81d56bfe | [
"MIT"
] | 2 | 2021-08-12T03:18:52.000Z | 2021-08-13T06:31:55.000Z | sample_program_04_02_knn.py | pepsinal/python_doe_kspub | 65ae5c2d214f1a34fa242fee7d63453c81d56bfe | [
"MIT"
] | 14 | 2021-06-05T11:17:45.000Z | 2022-03-26T02:56:40.000Z | # -*- coding: utf-8 -*-
"""
@author: Hiromasa Kaneko
"""
import pandas as pd
from sklearn.neighbors import NearestNeighbors # k-NN
k_in_knn = 5 # k-NN k
rate_of_training_samples_inside_ad = 0.96 # AD AD
dataset = pd.read_csv('resin.csv', index_col=0, header=0)
x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0)
#
y = dataset.iloc[:, 0] #
x = dataset.iloc[:, 1:] #
# 0
deleting_variables = x.columns[x.std() == 0]
x = x.drop(deleting_variables, axis=1)
x_prediction = x_prediction.drop(deleting_variables, axis=1)
#
autoscaled_x = (x - x.mean()) / x.std()
autoscaled_x_prediction = (x_prediction - x.mean()) / x.std()
# k-NN AD
ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD
ad_model.fit(autoscaled_x) # k-NN AD x model_ad
# k k 2
# k 0 k_in_knn + 1
knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1)
knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame
mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1),
columns=['mean_of_knn_distance']) # k_in_knn
mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv
# rate_of_training_samples_inside_ad * 100 %
sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) #
ad_threshold = sorted_mean_of_knn_distance_train.iloc[
round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1]
# AD
inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD TRUE
inside_ad_flag_train.columns=['inside_ad_flag']
inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv
# k-NN
knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction)
knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame
mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1),
columns=['mean_of_knn_distance']) # k_in_knn
mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv
# AD
inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD TRUE
inside_ad_flag_prediction.columns=['inside_ad_flag']
inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv
| 49.42623 | 121 | 0.769818 |
860b82a531bcd228b8d28c903681d9b70c4a8b49 | 2,793 | py | Python | topology.py | destinysky/nsh_sfc | 290fa49df2880527e0b7844bf3bec4d55c4945a6 | [
"Apache-2.0"
] | 2 | 2020-10-26T17:22:04.000Z | 2020-11-11T13:19:08.000Z | topology.py | destinysky/nsh_sfc | 290fa49df2880527e0b7844bf3bec4d55c4945a6 | [
"Apache-2.0"
] | null | null | null | topology.py | destinysky/nsh_sfc | 290fa49df2880527e0b7844bf3bec4d55c4945a6 | [
"Apache-2.0"
] | 3 | 2020-03-28T12:53:35.000Z | 2021-06-29T18:13:43.000Z | #!/usr/bin/python
"""
"""
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch
#OVSLegacyKernelSwitch, UserSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, TCLink
#conf_port=50000
conf_ip_1='10.0.0.254'
conf_mac_1='11:12:13:14:15:16'
def topology():
"Create a network."
net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch )
print "*** Creating nodes"
h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' )
h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' )
h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' )
h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' )
h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' )
s1 = net.addSwitch( 's1', listenPort=6671 )
s2 = net.addSwitch( 's2', listenPort=6672 )
s3 = net.addSwitch( 's3', listenPort=6673 )
s4 = net.addSwitch( 's4', listenPort=6674 )
s5 = net.addSwitch( 's5', listenPort=6675 )
c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 )
print "*** Creating links"
net.addLink(s1, h1)
net.addLink(s2, h2)
net.addLink(s3, h3)
net.addLink(s4, h4)
net.addLink(s5, h5)
net.addLink(s1, s2)
net.addLink(s2, s3)
net.addLink(s3, s4)
net.addLink(s4, s5)
print "*** Starting network"
net.build()
h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0')
h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h1.cmd('sysctl -w net.ipv4.ip_forward=1')
h1.cmd('python3 listen.py &')
h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0')
h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h2.cmd('sysctl -w net.ipv4.ip_forward=1')
h2.cmd('python3 listen.py &')
h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0')
h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h3.cmd('sysctl -w net.ipv4.ip_forward=1')
h3.cmd('python3 listen.py &')
h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0')
h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h4.cmd('sysctl -w net.ipv4.ip_forward=1')
h4.cmd('python3 listen.py &')
h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0')
h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h5.cmd('sysctl -w net.ipv4.ip_forward=1')
h5.cmd('python3 listen.py &')
c1.start()
s1.start( [c1] )
s2.start( [c1] )
s3.start( [c1] )
s4.start( [c1] )
s5.start( [c1] )
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology() | 30.358696 | 90 | 0.617257 |
860d13de1aea5d89a236db351b3f802f70a454be | 815 | py | Python | lampara/lamp.py | gventuraagramonte/python | d96796c302f2f423a8e949f9c7d33a3bfabf8a0f | [
"MIT"
] | null | null | null | lampara/lamp.py | gventuraagramonte/python | d96796c302f2f423a8e949f9c7d33a3bfabf8a0f | [
"MIT"
] | null | null | null | lampara/lamp.py | gventuraagramonte/python | d96796c302f2f423a8e949f9c7d33a3bfabf8a0f | [
"MIT"
] | null | null | null |
#Definicion de la clase
#antes de empezar una clase se declara de la siguiente manera
| 19.404762 | 114 | 0.490798 |
860d27b54af610b3354ec914d17139eb593aede5 | 1,127 | py | Python | lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py | sneumann/galaxy | f6011bab5b8adbabae4986a45849bb9158ffc8bb | [
"CC-BY-3.0"
] | 1 | 2019-07-27T19:30:55.000Z | 2019-07-27T19:30:55.000Z | lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py | sneumann/galaxy | f6011bab5b8adbabae4986a45849bb9158ffc8bb | [
"CC-BY-3.0"
] | 4 | 2021-02-08T20:28:34.000Z | 2022-03-02T02:52:55.000Z | lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py | sneumann/galaxy | f6011bab5b8adbabae4986a45849bb9158ffc8bb | [
"CC-BY-3.0"
] | 1 | 2018-05-30T07:38:54.000Z | 2018-05-30T07:38:54.000Z | """
Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table.
"""
from __future__ import print_function
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData
)
from galaxy.model.migrate.versions.util import (
add_column,
drop_column
)
log = logging.getLogger(__name__)
metadata = MetaData()
| 26.833333 | 126 | 0.747116 |
860d477a0db1e737249b6ea5b90c2c542a001e37 | 102 | py | Python | ds.py | tobiichiorigami1/csp | e1f419869a0a1aa3e39aeb5888571267be5d80bd | [
"bzip2-1.0.6"
] | null | null | null | ds.py | tobiichiorigami1/csp | e1f419869a0a1aa3e39aeb5888571267be5d80bd | [
"bzip2-1.0.6"
] | null | null | null | ds.py | tobiichiorigami1/csp | e1f419869a0a1aa3e39aeb5888571267be5d80bd | [
"bzip2-1.0.6"
] | null | null | null | votes_t_shape = [3, 0, 1, 2]
for i in range(6 - 4):
votes_t_shape += [i + 4]
print(votes_t_shape)
| 20.4 | 28 | 0.617647 |
860e80203a82d7ffdb492d80f10371c72ae4d44a | 8,231 | py | Python | scripts/adam/cc100_baselines.py | TimDettmers/sched | e16735f2c2eb6a51f5cf29ead534041574034e2e | [
"MIT"
] | 1 | 2020-04-22T17:49:48.000Z | 2020-04-22T17:49:48.000Z | scripts/adam/cc100_baselines.py | TimDettmers/sched | e16735f2c2eb6a51f5cf29ead534041574034e2e | [
"MIT"
] | null | null | null | scripts/adam/cc100_baselines.py | TimDettmers/sched | e16735f2c2eb6a51f5cf29ead534041574034e2e | [
"MIT"
] | null | null | null | import numpy as np
import itertools
import gpuscheduler
import argparse
import os
import uuid
import hashlib
import glob
import math
from itertools import product
from torch.optim.lr_scheduler import OneCycleLR
from os.path import join
parser = argparse.ArgumentParser(description='Compute script.')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.')
args = parser.parse_args()
gpus = 128
cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus)
args2 = {}
name = 'blockwise5'
constraint = 'volta32gb'
# 1024 tokens * 8 update_freq * 56250 steps = 0.4608e9 tokens -> optimal batch size 3460
# model sizes: 1.92bn, 2.43bn, 1.41bn
logfolder = 'adam/cc100/{0}'.format(name)
ckp_name = logfolder
#time_hours = 24*2
cores_per_job = 5
mem = 56*(8 if gpus > 8 else gpus)
num_seeds = 1
seed_offset = 5
time_hours = 72
time_minutes = 0
#partition = 'learnlab,learnfair,scavenge'
partition = 'learnfair,learnlab'
#partition = 'learnfair'
#partition = 'uninterruptible'
change_dir = 'fairseq_private'
repo = 'fairseq_private'
exclude = ''
s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False)
fp16 = True
args3 = {}
args2['lr-scheduler'] = 'polynomial_decay'
args2['warmup-updates'] = 2000
args2['max-update'] = 56250
args2['total-num-update'] = 56250
#args2['lr-scheduler'] = 'cosine'
#args2['warmup-updates'] = 3000
#args2['max-update'] = 56250*4
args2['fp16-scale-window'] = 250
args2['clip-norm'] = 0.4
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]
#args3['adam8bits-offset'] = [1/512]
#args3['prob-quant'] = [False]
#args3['dist-scale'] = [1.0]
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3['decoder-embed-dim'] = [2048+256]
#args3['decoder-ffn-embed-dim'] = [8192+2048]
#args3['max-tokens'] = [3072]
#args3['update-freq'] = [2]
key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr')
#key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq')
args3[key] = []
#lrkey = ('lr', 'warmup-init-lr')
#args3[lrkey] = []
# 32-bit baseline
#args3['optimizer'] = ['adam']
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]
##args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048,8192,2))
#
#lr = 0.003239 + (-0.0001395*math.log(1.41e9))
#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))
# adafactor
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)]
#args2['optimizer'] = 'adafactor'
#args2['beta1'] = 0.9
#args2['decay-rate'] = 0.999
##args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048+256,8192+2048,2))
##args3[key].append((2048,2688,10752,2))
#
#lr = 0.003239 + (-0.0001395*math.log(1.92e9))
#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))
# 8-bit
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)]
args3['optimizer'] = ['adam']
args3[('use-bnb', 'optim-bits')] = [(True, 8)]
args3[('stable-emb', 'no-scale-embedding')] = [(True, True)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)]
args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048,8192,8, 0.00045))
#args3[key].append((2048,2688,10752,2))
#args3['use-emb-norm'] = [True]
#lr = 0.003239 + (-0.0001395*math.log(2.43e9))
#args3[lrkey].append((lr, 0.0))
#args2['train-subset'] = 'train11'
args4 = []
args5 = {}
args6 = {}
rdm = np.random.RandomState(5345)
for key, value in args2.items():
cmd = cmd + ' --{0} {1}'.format(key, value)
args_prod = []
for key, values in args3.items():
if isinstance(key, tuple):
keyvalues = []
for tups in values:
arg = ''
for i, v in enumerate(tups):
if v is True: v = ''
if v is False: continue
if len(key[i]) == 0:
arg += '{0} '.format(v)
else:
arg += '--{0} {1} '.format(key[i], v)
keyvalues.append(arg)
elif isinstance(key, str):
keyvalues = []
for v in values:
if v is True: v = ''
if v is False:
keyvalues.append('')
else:
keyvalues.append(' --{0} {1}'.format(key, v))
args_prod.append(keyvalues)
if len(args_prod) >= 2:
args_prod = list(product(*args_prod))
else:
new_args = []
if len(args_prod) > 0:
for arg in args_prod[0]:
new_args.append([arg])
args_prod = new_args
jobs = []
if len(args4) == 0: args4.append('')
for seed in range(num_seeds):
seed = seed + seed_offset
for arg4 in args4:
if len(args_prod) == 0: args_prod.append(('', ''))
for i, values in enumerate(args_prod):
job_cmd = cmd + arg4
for val in values:
job_cmd += ' {0}' .format(val)
#job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ')
job_cmd = job_cmd + ' --seed {0}'.format(seed)
checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
save_dir = ' --save-dir {0}'.format(checkpoint_dir)
job_cmd = job_cmd + save_dir
cmds = [job_cmd]
if rdm.rand(1) <= args.p:
jobs.append(job_cmd)
s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus)
if args.dry:
for i, job in enumerate(jobs):
print(i, job)
print('')
print('Total jobs', len(jobs))
print('Time hours: {0}'.format(time_hours))
print('GPUs: {0}'.format(gpus))
print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder)))
print('Jobs will be run on: {0}'.format(partition))
print('Run in folder: {0}'.format(change_dir))
if not args.dry:
s.run_jobs()
| 37.756881 | 773 | 0.628721 |
860eaaee93a0cd4aceb0ebc7da1f6e2b65f05589 | 224 | py | Python | boa3_test/test_sc/event_test/EventNep5Transfer.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/event_test/EventNep5Transfer.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/event_test/EventNep5Transfer.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from boa3.builtin import public
from boa3.builtin.contract import Nep5TransferEvent
transfer = Nep5TransferEvent
| 18.666667 | 56 | 0.785714 |
860efe37f66eefaa650bbcf92ef4ff07b3bc6d05 | 1,844 | py | Python | abtest/views.py | SchuylerGoodman/topicalguide | 7c26c8be8e1dddb7bf2be33ea9a7ba59034bf620 | [
"PostgreSQL"
] | null | null | null | abtest/views.py | SchuylerGoodman/topicalguide | 7c26c8be8e1dddb7bf2be33ea9a7ba59034bf620 | [
"PostgreSQL"
] | null | null | null | abtest/views.py | SchuylerGoodman/topicalguide | 7c26c8be8e1dddb7bf2be33ea9a7ba59034bf620 | [
"PostgreSQL"
] | null | null | null | # The Topical Guide
# Copyright 2010-2011 Brigham Young University
#
# This file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>.
#
# The Topical Guide is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# The Topical Guide is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with the Topical Guide. If not, see <http://www.gnu.org/licenses/>.
#
# If you have inquiries regarding any further use of the Topical Guide, please
# contact the Copyright Licensing Office, Brigham Young University, 3760 HBLL,
# Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail [email protected].
from __future__ import print_function
from django.shortcuts import render, redirect
from django.http import HttpResponse
import abtest
from abtest.settings import TEST_LIST
from visualize import root
# Create your views here.
# This view is called when the given url does not match anything
| 37.632653 | 79 | 0.750542 |
860f02df53bc5c8189989d03588264d399ebda12 | 2,086 | py | Python | neurodocker/reprozip/tests/test_merge.py | sulantha2006/neurodocker | d03fe865ae05fea2f7ce9a8b417717dae7bd640f | [
"Apache-2.0"
] | null | null | null | neurodocker/reprozip/tests/test_merge.py | sulantha2006/neurodocker | d03fe865ae05fea2f7ce9a8b417717dae7bd640f | [
"Apache-2.0"
] | null | null | null | neurodocker/reprozip/tests/test_merge.py | sulantha2006/neurodocker | d03fe865ae05fea2f7ce9a8b417717dae7bd640f | [
"Apache-2.0"
] | 1 | 2020-01-17T17:30:16.000Z | 2020-01-17T17:30:16.000Z | """Tests for merge.py."""
from __future__ import absolute_import, division, print_function
from glob import glob
import os
import tarfile
import tempfile
from neurodocker.docker import client
from neurodocker.reprozip.trace import ReproZipMinimizer
from neurodocker.reprozip.merge import merge_pack_files
def _create_packfile(commands, dir):
"""Create packfile from list `commands` in debian:stretch container."""
container = client.containers.run('debian:stretch', detach=True, tty=True,
security_opt=['seccomp:unconfined'])
try:
minimizer = ReproZipMinimizer(container.id, commands,
packfile_save_dir=dir)
packfile_path = minimizer.run()
except:
raise
finally:
container.stop()
container.remove()
return packfile_path
| 34.196721 | 78 | 0.64861 |
860f4df3a0a1148896e3af7d663a6706e11e5b27 | 2,429 | py | Python | build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py | dolang/build-kivy-linux | bb3e6dce956659d94604b524aa6702e8c390e15a | [
"MIT"
] | null | null | null | build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py | dolang/build-kivy-linux | bb3e6dce956659d94604b524aa6702e8c390e15a | [
"MIT"
] | null | null | null | build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py | dolang/build-kivy-linux | bb3e6dce956659d94604b524aa6702e8c390e15a | [
"MIT"
] | null | null | null | """
HTML5 contexts.
:author: Dominik Lang
:license: MIT
"""
import contextlib
import io
import sys
__all__ = ['create_document', 'tag', 'as_link']
| 28.244186 | 76 | 0.573075 |
860f856dd45e64104260a9b161c8dc5f275852d1 | 1,454 | py | Python | lab/hw03-part-i_nov14.py | jzacsh/neuralnets-cmp464 | de35bbba93b87446b231bf012a8de5acc7896a04 | [
"Apache-2.0"
] | 1 | 2017-08-30T04:31:00.000Z | 2017-08-30T04:31:00.000Z | lab/hw03-part-i_nov14.py | jzacsh/neuralnets-cmp464 | de35bbba93b87446b231bf012a8de5acc7896a04 | [
"Apache-2.0"
] | 1 | 2017-10-10T23:30:05.000Z | 2017-10-16T00:32:09.000Z | lab/hw03-part-i_nov14.py | jzacsh/neuralnets-cmp464 | de35bbba93b87446b231bf012a8de5acc7896a04 | [
"Apache-2.0"
] | null | null | null | """
Jonathan Zacsh's solution to homework #3, Nov 14., Part I
"""
# Per homework instructions, following lead from matlab example by professor:
# http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf
import sys
import tensorflow as tf
import tempfile
import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# not really doing intersting things in this lab, so just ignore optimization
# g(x) = x^4+2x-7 ; per matlab example
# g'(x) = 4x^3+2
fExFourth = Differentiable("fExFourth",
lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]),
lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2]))
tFofTwo = fExFourth.func(2)
tFofDerivTwo = fExFourth.deriv(2)
log_dir = tempfile.mkdtemp(prefix="hw3-nov14-parti")
print(log_dir)
with tf.Session() as sess:
writer = tf.summary.FileWriter(log_dir, sess.graph)
fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo])
sys.stderr.write("results:\n\tf(2)=%s\n\tf'(2)=%s\n" % (fOfTwo, fDerivOfTwo))
# note: only needed when doing a *loop* of sess.run() calls, and want to see
# intermediary results per-loop.
#writer.add_summary(results)
writer.flush()
writer.close()
| 31.608696 | 86 | 0.681568 |
860ffd8531729695796f989eadffa27a2953a3a7 | 8,437 | py | Python | modules/experiments_bc/set_tp.py | GChrysostomou/tasc | d943de343d725b99fa1a1ad201b32a21e5970801 | [
"MIT"
] | 2 | 2021-12-27T12:46:48.000Z | 2022-03-01T11:43:41.000Z | modules/experiments_bc/set_tp.py | tbose20/D-Ref | eda6170a72838b89637df241dd5619e001f3afdb | [
"MIT"
] | null | null | null | modules/experiments_bc/set_tp.py | tbose20/D-Ref | eda6170a72838b89637df241dd5619e001f3afdb | [
"MIT"
] | 3 | 2021-11-10T15:15:02.000Z | 2022-03-01T11:44:35.000Z | import torch
import torch.nn as nn
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import *
from sklearn.metrics import precision_recall_fscore_support as prfs
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
| 34.157895 | 96 | 0.505393 |
8610097182707b2aa40abc68e79c148fa664b19d | 4,224 | py | Python | helios/tasks.py | mattmurch/helios-server | c4f5409bbf7117fc561774208c07801b9ae61ff2 | [
"Apache-2.0"
] | null | null | null | helios/tasks.py | mattmurch/helios-server | c4f5409bbf7117fc561774208c07801b9ae61ff2 | [
"Apache-2.0"
] | 2 | 2018-08-20T18:44:57.000Z | 2019-01-31T17:45:08.000Z | helios/tasks.py | mattmurch/helios-server | c4f5409bbf7117fc561774208c07801b9ae61ff2 | [
"Apache-2.0"
] | 1 | 2017-12-10T15:33:18.000Z | 2017-12-10T15:33:18.000Z | """
Celery queued tasks for Helios
2010-08-01
[email protected]
"""
import copy
from celery import shared_task
from celery.utils.log import get_logger
import signals
from models import CastVote, Election, Voter, VoterFile
from view_utils import render_template_raw
| 29.746479 | 111 | 0.722775 |
8611c5caf6cad3b09e4113e9f2732c41ec4305ae | 992 | py | Python | tests/conftest.py | AlanRosenthal/virtual-dealer | 5c5689172b38b122a69e5ca244497646bf9d8fa8 | [
"MIT"
] | 1 | 2020-03-23T21:03:46.000Z | 2020-03-23T21:03:46.000Z | tests/conftest.py | AlanRosenthal/virtual-dealer | 5c5689172b38b122a69e5ca244497646bf9d8fa8 | [
"MIT"
] | null | null | null | tests/conftest.py | AlanRosenthal/virtual-dealer | 5c5689172b38b122a69e5ca244497646bf9d8fa8 | [
"MIT"
] | null | null | null | """
pytest fixtures
"""
import unittest.mock as mock
import pytest
import virtual_dealer.api
| 20.244898 | 87 | 0.688508 |
8612815990d7f299a2f7af8454d7502cc4069e32 | 4,890 | py | Python | corehq/apps/fixtures/tests.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2017-02-10T03:14:51.000Z | 2017-02-10T03:14:51.000Z | corehq/apps/fixtures/tests.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/fixtures/tests.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | null | null | null | from xml.etree import ElementTree
from casexml.apps.case.tests.util import check_xml_line_by_line
from casexml.apps.case.xml import V2
from corehq.apps.fixtures import fixturegenerators
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \
FixtureItemField, FieldList
from corehq.apps.fixtures.views import update_tables
from corehq.apps.fixtures.exceptions import FixtureVersionError
from corehq.apps.users.models import CommCareUser
from django.test import TestCase
| 36.492537 | 111 | 0.537628 |
86160af095ef8d0435d3f1fd7140e93918c54b2c | 2,685 | py | Python | readthedocs/search/signals.py | agarwalrounak/readthedocs.org | 4911600c230809bd6fb3585d1903121db2928ad6 | [
"MIT"
] | 10 | 2019-05-21T03:00:40.000Z | 2022-03-12T11:24:39.000Z | readthedocs/search/signals.py | agarwalrounak/readthedocs.org | 4911600c230809bd6fb3585d1903121db2928ad6 | [
"MIT"
] | 12 | 2019-12-05T04:47:01.000Z | 2022-01-09T00:56:58.000Z | readthedocs/search/signals.py | agarwalrounak/readthedocs.org | 4911600c230809bd6fb3585d1903121db2928ad6 | [
"MIT"
] | 5 | 2019-07-08T23:45:10.000Z | 2021-02-26T07:29:49.000Z | # -*- coding: utf-8 -*-
"""We define custom Django signals to trigger before executing searches."""
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django_elasticsearch_dsl.apps import DEDConfig
from readthedocs.projects.models import HTMLFile, Project
from readthedocs.projects.signals import bulk_post_create, bulk_post_delete
from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es
| 33.987342 | 80 | 0.714339 |
86161f7e9f969066db82c2f68d6e2be07cfb7ad1 | 3,694 | py | Python | src/falconpy/_endpoint/_filevantage.py | kra-ts/falconpy | c7c4ed93cb3b56cdfd86757f573fde57e4ccf857 | [
"Unlicense"
] | null | null | null | src/falconpy/_endpoint/_filevantage.py | kra-ts/falconpy | c7c4ed93cb3b56cdfd86757f573fde57e4ccf857 | [
"Unlicense"
] | null | null | null | src/falconpy/_endpoint/_filevantage.py | kra-ts/falconpy | c7c4ed93cb3b56cdfd86757f573fde57e4ccf857 | [
"Unlicense"
] | null | null | null | """Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_filevantage_endpoints = [
[
"getChanges",
"GET",
"/filevantage/entities/changes/v2",
"Retrieve information on changes",
"filevantage",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "multi",
"description": "Comma separated values of change ids",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"queryChanges",
"GET",
"/filevantage/queries/changes/v2",
"Returns one or more change IDs",
"filevantage",
[
{
"minimum": 0,
"type": "integer",
"description": "The first change index to return in the response. "
"If not provided it will default to '0'. "
"Use with the `limit` parameter to manage pagination of results.",
"name": "offset",
"in": "query"
},
{
"type": "integer",
"description": "The maximum number of changes to return in the response "
"(default: 100; max: 500). "
"Use with the `offset` parameter to manage pagination of results",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "Sort changes using options like:\n\n"
"- `action_timestamp` (timestamp of the change occurrence) \n\n "
"Sort either `asc` (ascending) or `desc` (descending). "
"For example: `action_timestamp|asc`.\n"
"The full list of allowed sorting options can be reviewed in our API documentation.",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Filter changes using a query in Falcon Query Language (FQL). \n\n"
"Common filter options include:\n\n - `host.host_name`\n - `action_timestamp`\n\n "
"The full list of allowed filter parameters can be reviewed in our API documentation.",
"name": "filter",
"in": "query"
}
]
]
]
| 35.180952 | 95 | 0.600704 |
86168b46a8faaf9e6d96f727abd89d459b3f8564 | 8,837 | py | Python | TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | 1 | 2021-06-20T19:28:37.000Z | 2021-06-20T19:28:37.000Z | import itertools
import sys
from signal import SIGINT, default_int_handler, signal
from typing import Any, Dict, List
from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar
from pip._vendor.progress.spinner import Spinner
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.misc import format_size
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any
class WindowsMixin:
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call needs to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor: # type: ignore
self.hide_cursor = False
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file) # type: ignore
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
BAR_TYPES = {
"off": (DownloadSilentBar, DownloadSilentBar),
"on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
"ascii": (DownloadBar, DownloadProgressSpinner),
"pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
"emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner),
}
| 33.729008 | 88 | 0.625099 |
86169ad5486623924eba0430b7afc33561fa170a | 4,012 | py | Python | scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 8 | 2021-06-30T06:55:14.000Z | 2022-03-18T01:57:14.000Z | scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 1 | 2021-06-30T03:08:15.000Z | 2021-06-30T03:08:15.000Z | scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 2 | 2021-11-17T11:19:48.000Z | 2021-11-18T03:05:58.000Z | import typing
import numpy as np
import scripts.study_case.ID_5.matchzoo as mz
from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric
from .tuner import Tuner
def tune(
params: 'mz.ParamTable',
optimizer: str = 'adam',
trainloader: mz.dataloader.DataLoader = None,
validloader: mz.dataloader.DataLoader = None,
embedding: np.ndarray = None,
fit_kwargs: dict = None,
metric: typing.Union[str, BaseMetric] = None,
mode: str = 'maximize',
num_runs: int = 10,
verbose=1
):
"""
Tune model hyper-parameters.
A simple shorthand for using :class:`matchzoo.auto.Tuner`.
`model.params.hyper_space` reprensents the model's hyper-parameters
search space, which is the cross-product of individual hyper parameter's
hyper space. When a `Tuner` builds a model, for each hyper parameter in
`model.params`, if the hyper-parameter has a hyper-space, then a sample
will be taken in the space. However, if the hyper-parameter does not
have a hyper-space, then the default value of the hyper-parameter will
be used.
See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage.
:param params: A completed parameter table to tune. Usually `model.params`
of the desired model to tune. `params.completed()` should be `True`.
:param optimizer: Str or `Optimizer` class. Optimizer for optimizing model.
:param trainloader: Training data to use. Should be a `DataLoader`.
:param validloader: Testing data to use. Should be a `DataLoader`.
:param embedding: Embedding used by model.
:param fit_kwargs: Extra keyword arguments to pass to `fit`.
(default: `dict(epochs=10, verbose=0)`)
:param metric: Metric to tune upon. Must be one of the metrics in
`model.params['task'].metrics`. (default: the first metric in
`params.['task'].metrics`.
:param mode: Either `maximize` the metric or `minimize` the metric.
(default: 'maximize')
:param num_runs: Number of runs. Each run takes a sample in
`params.hyper_space` and build a model based on the sample.
(default: 10)
:param callbacks: A list of callbacks to handle. Handled sequentially
at every callback point.
:param verbose: Verbosity. (default: 1)
Example:
>>> import scripts.study_case.ID_5.matchzoo as mz
>>> import numpy as np
>>> train = mz.datasets.toy.load_data('train')
>>> valid = mz.datasets.toy.load_data('dev')
>>> prpr = mz.models.DenseBaseline.get_default_preprocessor()
>>> train = prpr.fit_transform(train, verbose=0)
>>> valid = prpr.transform(valid, verbose=0)
>>> trainset = mz.dataloader.Dataset(train)
>>> validset = mz.dataloader.Dataset(valid)
>>> padding = mz.models.DenseBaseline.get_default_padding_callback()
>>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding)
>>> validloader = mz.dataloader.DataLoader(validset, callback=padding)
>>> model = mz.models.DenseBaseline()
>>> model.params['task'] = mz.tasks.Ranking()
>>> optimizer = 'adam'
>>> embedding = np.random.uniform(-0.2, 0.2,
... (prpr.context['vocab_size'], 100))
>>> tuner = mz.auto.Tuner(
... params=model.params,
... optimizer=optimizer,
... trainloader=trainloader,
... validloader=validloader,
... embedding=embedding,
... num_runs=1,
... verbose=0
... )
>>> results = tuner.tune()
>>> sorted(results['best'].keys())
['#', 'params', 'sample', 'score']
"""
tuner = Tuner(
params=params,
optimizer=optimizer,
trainloader=trainloader,
validloader=validloader,
embedding=embedding,
fit_kwargs=fit_kwargs,
metric=metric,
mode=mode,
num_runs=num_runs,
verbose=verbose
)
return tuner.tune()
| 38.951456 | 79 | 0.642323 |
861799191a7d114eaded88fe6c8c8ba1d448c7c7 | 4,392 | py | Python | libs/gym/tests/wrappers/test_pixel_observation.py | maxgold/icml22 | 49f026dd2314091639b52f5b8364a29e8000b738 | [
"MIT"
] | null | null | null | libs/gym/tests/wrappers/test_pixel_observation.py | maxgold/icml22 | 49f026dd2314091639b52f5b8364a29e8000b738 | [
"MIT"
] | null | null | null | libs/gym/tests/wrappers/test_pixel_observation.py | maxgold/icml22 | 49f026dd2314091639b52f5b8364a29e8000b738 | [
"MIT"
] | null | null | null | """Tests for the pixel observation wrapper."""
from typing import Optional
import pytest
import numpy as np
import gym
from gym import spaces
from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY
| 35.136 | 85 | 0.651184 |
8618701c5bffe90f48c4363133a7c542c718e23a | 2,144 | py | Python | real_plot_fft_stft_impl.py | MuAuan/Scipy-Swan | 2d79175e8fc2ab8179ea95e1b22918c29d88b7b5 | [
"MIT"
] | null | null | null | real_plot_fft_stft_impl.py | MuAuan/Scipy-Swan | 2d79175e8fc2ab8179ea95e1b22918c29d88b7b5 | [
"MIT"
] | null | null | null | real_plot_fft_stft_impl.py | MuAuan/Scipy-Swan | 2d79175e8fc2ab8179ea95e1b22918c29d88b7b5 | [
"MIT"
] | null | null | null | import pyaudio
import wave
from scipy.fftpack import fft, ifft
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import signal
from swan import pycwt
CHUNK = 1024
FORMAT = pyaudio.paInt16 # int16
CHANNELS = 1 # 1;monoral 2;-
RATE = 22100 # 22.1kHz 44.1kHz
RECORD_SECONDS = 5 # 5
WAVE_OUTPUT_FILENAME = "output2.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
s=1
# figure
fig = plt.figure(figsize=(12, 10))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax2.axis([0, 5, 200,20000])
ax2.set_yscale('log')
while True:
fig.delaxes(ax1)
fig.delaxes(ax3)
ax1 = fig.add_subplot(311)
ax3 = fig.add_subplot(313)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
wavfile = WAVE_OUTPUT_FILENAME
wr = wave.open(wavfile, "rb")
ch = CHANNELS #wr.getnchannels()
width = p.get_sample_size(FORMAT) #wr.getsampwidth()
fr = RATE #wr.getframerate()
fn = wr.getnframes()
fs = fn / fr
origin = wr.readframes(wr.getnframes())
data = origin[:fn]
wr.close()
sig = np.frombuffer(data, dtype="int16") /32768.0
t = np.linspace(0,fs, fn/2, endpoint=False)
ax1.axis([0, 5, -0.0075,0.0075])
ax1.plot(t, sig)
nperseg = 256
f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg)
ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv')
freq =fft(sig,int(fn/2))
Pyy = np.sqrt(freq*freq.conj())*2/fn
f = np.arange(int(fn/2))
ax3.axis([200, 20000, 0,0.000075])
ax3.set_xscale('log')
ax3.plot(f,Pyy)
plt.pause(1)
plt.savefig('figure'+str(s)+'.png')
s += 1
| 24.930233 | 62 | 0.620802 |
8618b81e3f7d92d3dac7ffa13548c536b939484f | 109 | py | Python | tests/pydecompile-test/baselines/events_in_code_blocks.py | gengxf0505/pxt | eca93a0e0605e68adcfbebce778cc5912a10efcf | [
"MIT"
] | 1 | 2020-04-17T01:45:18.000Z | 2020-04-17T01:45:18.000Z | tests/pydecompile-test/baselines/events_in_code_blocks.py | gengxf0505/pxt | eca93a0e0605e68adcfbebce778cc5912a10efcf | [
"MIT"
] | 3 | 2019-02-07T23:34:43.000Z | 2019-03-06T18:25:37.000Z | tests/pydecompile-test/baselines/events_in_code_blocks.py | gengxf0505/pxt | eca93a0e0605e68adcfbebce778cc5912a10efcf | [
"MIT"
] | 2 | 2019-10-29T06:56:11.000Z | 2021-05-25T10:18:12.000Z | #/ <reference path="./testBlocks/mb.ts" />
basic.forever(function_0) | 21.8 | 42 | 0.715596 |
861975d3c36c28b9ba6319750aff575b598fb65c | 4,147 | py | Python | PID/PDControl.py | l756302098/ros_practice | 4da8b4ddb25ada2e6f1adb3c0f8b34576aedf6b7 | [
"MIT"
] | null | null | null | PID/PDControl.py | l756302098/ros_practice | 4da8b4ddb25ada2e6f1adb3c0f8b34576aedf6b7 | [
"MIT"
] | null | null | null | PID/PDControl.py | l756302098/ros_practice | 4da8b4ddb25ada2e6f1adb3c0f8b34576aedf6b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
import numpy as np
import matplotlib.pyplot as plt
robot = Robot()
robot.set(0, 1, 0)
robot.set_noise(0.1,0.05)
x_trajectory, y_trajectory = run(robot, 0.1, 1.0)
n = len(x_trajectory)
fig, ax1 = plt.subplots(1, 1, figsize=(8, 8))
ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller')
ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference')
plt.show()
| 17.875 | 85 | 0.628647 |
861a029a9ec9483f45fb602ca0d783eedc1d7f90 | 161 | py | Python | torchvision/datasets/samplers/__init__.py | yoshitomo-matsubara/vision | 03d11338f3faf94a0749549912593ddb8b70be17 | [
"BSD-3-Clause"
] | 12,063 | 2017-01-18T19:58:38.000Z | 2022-03-31T23:08:44.000Z | torchvision/datasets/samplers/__init__.py | yoshitomo-matsubara/vision | 03d11338f3faf94a0749549912593ddb8b70be17 | [
"BSD-3-Clause"
] | 4,673 | 2017-01-18T21:30:03.000Z | 2022-03-31T20:58:33.000Z | torchvision/datasets/samplers/__init__.py | yoshitomo-matsubara/vision | 03d11338f3faf94a0749549912593ddb8b70be17 | [
"BSD-3-Clause"
] | 7,132 | 2017-01-18T18:12:23.000Z | 2022-03-31T21:19:10.000Z | from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler
__all__ = ("DistributedSampler", "UniformClipSampler", "RandomClipSampler")
| 40.25 | 83 | 0.838509 |
861a31bb111c594972aeb70c462a963cf1fefdb9 | 5,215 | py | Python | pysnmp/HH3C-PPPOE-SERVER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/HH3C-PPPOE-SERVER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/HH3C-PPPOE-SERVER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HH3C-PPPOE-SERVER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-PPPOE-SERVER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:16:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
hh3cCommon, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cCommon")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Integer32, IpAddress, NotificationType, Unsigned32, iso, MibIdentifier, Counter64, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "IpAddress", "NotificationType", "Unsigned32", "iso", "MibIdentifier", "Counter64", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "Bits", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hh3cPPPoEServer = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 102))
hh3cPPPoEServer.setRevisions(('2009-05-06 00:00',))
if mibBuilder.loadTexts: hh3cPPPoEServer.setLastUpdated('200905060000Z')
if mibBuilder.loadTexts: hh3cPPPoEServer.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
hh3cPPPoEServerObject = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1))
hh3cPPPoEServerMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerMaxSessions.setStatus('current')
hh3cPPPoEServerCurrSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerCurrSessions.setStatus('current')
hh3cPPPoEServerAuthRequests = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthRequests.setStatus('current')
hh3cPPPoEServerAuthSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthSuccesses.setStatus('current')
hh3cPPPoEServerAuthFailures = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthFailures.setStatus('current')
hh3cPPPoESAbnormOffsThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsThreshold.setStatus('current')
hh3cPPPoESAbnormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerThreshold.setStatus('current')
hh3cPPPoESNormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESNormOffPerThreshold.setStatus('current')
hh3cPPPoEServerTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2))
hh3cPPPoeServerTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0))
hh3cPPPoESAbnormOffsAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 1))
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsAlarm.setStatus('current')
hh3cPPPoESAbnormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 2))
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerAlarm.setStatus('current')
hh3cPPPoESNormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 3))
if mibBuilder.loadTexts: hh3cPPPoESNormOffPerAlarm.setStatus('current')
mibBuilder.exportSymbols("HH3C-PPPOE-SERVER-MIB", hh3cPPPoEServerMaxSessions=hh3cPPPoEServerMaxSessions, hh3cPPPoEServerObject=hh3cPPPoEServerObject, hh3cPPPoeServerTrapPrefix=hh3cPPPoeServerTrapPrefix, hh3cPPPoEServerAuthFailures=hh3cPPPoEServerAuthFailures, hh3cPPPoEServer=hh3cPPPoEServer, PYSNMP_MODULE_ID=hh3cPPPoEServer, hh3cPPPoESAbnormOffsAlarm=hh3cPPPoESAbnormOffsAlarm, hh3cPPPoEServerAuthRequests=hh3cPPPoEServerAuthRequests, hh3cPPPoEServerAuthSuccesses=hh3cPPPoEServerAuthSuccesses, hh3cPPPoESNormOffPerThreshold=hh3cPPPoESNormOffPerThreshold, hh3cPPPoEServerCurrSessions=hh3cPPPoEServerCurrSessions, hh3cPPPoEServerTraps=hh3cPPPoEServerTraps, hh3cPPPoESAbnormOffPerThreshold=hh3cPPPoESAbnormOffPerThreshold, hh3cPPPoESAbnormOffPerAlarm=hh3cPPPoESAbnormOffPerAlarm, hh3cPPPoESAbnormOffsThreshold=hh3cPPPoESAbnormOffsThreshold, hh3cPPPoESNormOffPerAlarm=hh3cPPPoESNormOffPerAlarm)
| 115.888889 | 892 | 0.797124 |
861a472cf4ef7f924185a3fe1ea6569338502257 | 2,041 | py | Python | Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py | suhaili99/python-share | 6c65faaff722b8bd9e381650a6b277f56d1ae4c9 | [
"MIT"
] | 4 | 2019-10-21T11:00:55.000Z | 2020-10-22T16:11:21.000Z | Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py | suhaili99/python-share | 6c65faaff722b8bd9e381650a6b277f56d1ae4c9 | [
"MIT"
] | 1 | 2019-12-17T05:20:26.000Z | 2019-12-17T05:20:26.000Z | Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py | suhaili99/python-share | 6c65faaff722b8bd9e381650a6b277f56d1ae4c9 | [
"MIT"
] | 9 | 2019-10-20T05:48:03.000Z | 2020-11-17T14:08:14.000Z | name = input("masukkan nama pembeli = ")
alamat= input("Alamat = ")
NoTelp = input("No Telp = ")
print("\n")
print("=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============")
print("Pilih Jenis Mobil :")
print("\t 1.Daihatsu ")
print("\t 2.Honda ")
print("\t 3.Toyota ")
print("")
pilihan = int(input("Pilih jenis mobil yang ingin dibeli : "))
print("")
if (pilihan==1):
print("<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>")
print("\ta.Grand New Xenia")
print("\tb.All New Terios")
print("\tc.New Ayla")
Pilih1 = input("Mana yang ingin anda pilih ?? = ")
if(Pilih1 == "a"):
print("Harga mobil Grand New Xenia adalah 183 juta ")
elif(Pilih1== "b"):
print("Harga mobil All New Terios adalah 215 juta")
elif(Pilih1== "c"):
print("Harga mobil New Ayla adalah 110 juta")
else:
print("Tidak terdefinisi")
elif (pilihan==2):
print("<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>")
print("\ta.Honda Brio Satya S")
print("\tb.Honda Jazz ")
print("\tb.Honda Mobilio ")
pilih2 = input("Mana yang ingin anda pilih??")
if(pilih2=="a"):
print("Harga mobil HOnda Brio Satya S adalah 131 juta")
elif(pilih2=="b"):
print("Harga mobil Honda Jazz adalah 232 juta")
elif(pilih2=="c"):
print("Harga mobil Honda mobilio adalah 189 juta")
else:
print("Tidak terdefinisi")
elif (pilihan==3):
print("<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?")
print("\ta.Alphard")
print("\tb.Camry")
print("\tc.Fortuner")
pilih3 = input("Mana yang ingin anda pilih??")
if (pilih3=="a"):
print("Harga mobil Alphard adalah 870 juta")
elif (pilih3=="b"):
print("Harga mobil Camry adalah 560 Juta")
elif (pilih3=="c"):
print("Harga mobil Fortuner adalah 492 Juta")
| 34.59322 | 80 | 0.529152 |
861c79331c252b7937573a42f8e033c57c978cd9 | 6,138 | py | Python | oneflow/python/test/ops/test_l1loss.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | 2 | 2021-09-10T00:19:49.000Z | 2021-11-16T11:27:20.000Z | oneflow/python/test/ops/test_l1loss.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | 1 | 2021-06-16T08:37:50.000Z | 2021-06-16T08:37:50.000Z | oneflow/python/test/ops/test_l1loss.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | 1 | 2021-11-10T07:57:01.000Z | 2021-11-10T07:57:01.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import numpy as np
import oneflow.typing as tp
from test_util import GenArgList
import unittest
from collections import OrderedDict
from typing import Dict
import os
if __name__ == "__main__":
unittest.main()
| 33.540984 | 99 | 0.665689 |
861cc7ffb7999a7f4d6f545192eee4e0b87dd394 | 869 | py | Python | tests/test_schema.py | Dog-Egg/dida | 17fd8dce0fe198e65effb48816a2339802234974 | [
"MIT"
] | null | null | null | tests/test_schema.py | Dog-Egg/dida | 17fd8dce0fe198e65effb48816a2339802234974 | [
"MIT"
] | 3 | 2021-06-15T19:10:55.000Z | 2022-02-27T10:30:28.000Z | tests/test_schema.py | Dog-Egg/dida | 17fd8dce0fe198e65effb48816a2339802234974 | [
"MIT"
] | null | null | null | import unittest
import datetime
from dida import schemas, triggers
from marshmallow import ValidationError
| 36.208333 | 107 | 0.700806 |
861cdcc494cb3bd3e797fd81fd6a76984fde4f26 | 26,883 | py | Python | apps/content/views.py | Sunbird-Ed/evolve-api | 371b39422839762e32401340456c13858cb8e1e9 | [
"MIT"
] | 1 | 2019-02-27T15:26:11.000Z | 2019-02-27T15:26:11.000Z | apps/content/views.py | Sunbird-Ed/evolve-api | 371b39422839762e32401340456c13858cb8e1e9 | [
"MIT"
] | 9 | 2019-12-16T10:09:46.000Z | 2022-03-11T23:42:12.000Z | apps/content/views.py | Sunbird-Ed/evolve-api | 371b39422839762e32401340456c13858cb8e1e9 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import status
from rest_framework.generics import (
ListAPIView,
ListCreateAPIView,
ListAPIView,
RetrieveUpdateAPIView,)
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from apps.configuration.models import Book
from apps.hardspot.models import HardSpot
from .models import Content,ContentContributors
from .serializers import (
ContentListSerializer,
BookNestedSerializer,
BookListSerializer,
ContentStatusListSerializer,
SectionKeywordSerializer,
SubSectionKeywordSerializer,
SectionKeywordsSerializer,
ChapterKeywordsSerializer,
SubSectionKeywordsSerializer,
KeywordSerializer,
ContentContributorSerializer,
ApprovedContentSerializer,
ContentStatusSerializer,
HardSpotCreateSerializer,
ContentContributorsSerializer,
SubSubSectionKeywordsSerializer,
ContentStatusSerializerFileFormat,
)
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from rest_framework.parsers import MultiPartParser
from apps.dataupload.models import (Chapter,
Section,
SubSection,
ChapterKeyword,
SectionKeyword,
SubSectionKeyword,
SubSubSectionKeyword,
)
import json
import pandas as pd
from evolve import settings
from evolve import settings
from azure.storage.blob import (
BlockBlobService,
ContainerPermissions
)
from datetime import datetime, timedelta
import os
import itertools
from django.db.models import Q
import threading
account_name = settings.AZURE_ACCOUNT_NAME
account_key = settings.AZURE_ACCOUNT_KEY
CONTAINER_NAME= settings.AZURE_CONTAINER
block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key)
| 47.246046 | 386 | 0.654986 |
861d74d55db578d9eef6b283f432f055362e839e | 975 | py | Python | examples/given_data.py | GuoJingyao/cornac | e7529990ec1dfa586c4af3de98e4b3e00a786578 | [
"Apache-2.0"
] | null | null | null | examples/given_data.py | GuoJingyao/cornac | e7529990ec1dfa586c4af3de98e4b3e00a786578 | [
"Apache-2.0"
] | null | null | null | examples/given_data.py | GuoJingyao/cornac | e7529990ec1dfa586c4af3de98e4b3e00a786578 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Example to train and evaluate a model with given data
@author: Quoc-Tuan Truong <[email protected]>
"""
from cornac.data import Reader
from cornac.eval_methods import BaseMethod
from cornac.models import MF
from cornac.metrics import MAE, RMSE
from cornac.utils import cache
# Download MovieLens 100K provided training and test splits
reader = Reader()
train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base'))
test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test'))
eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data,
exclude_unknowns=False, verbose=True)
mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02,
use_bias=True, early_stop=True, verbose=True)
# Evaluation
result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()], user_based=True)
print(result)
| 33.62069 | 100 | 0.73641 |
861eccc43552e108c8eb7cab4531e62034debd26 | 5,446 | py | Python | taming/data/ade20k.py | ZlodeiBaal/taming | b6c0f896992881f154bdfd910a8163ee754df83a | [
"MIT"
] | null | null | null | taming/data/ade20k.py | ZlodeiBaal/taming | b6c0f896992881f154bdfd910a8163ee754df83a | [
"MIT"
] | null | null | null | taming/data/ade20k.py | ZlodeiBaal/taming | b6c0f896992881f154bdfd910a8163ee754df83a | [
"MIT"
] | 1 | 2022-01-31T15:55:24.000Z | 2022-01-31T15:55:24.000Z | import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
from taming.data.sflckr import SegmentationBase # for examples included in repo
# With semantic map and scene label
if __name__ == "__main__":
dset = ADE20kValidation()
ex = dset[0]
for k in ["image", "scene_category", "segmentation"]:
print(type(ex[k]))
try:
print(ex[k].shape)
except:
print(ex[k])
| 42.546875 | 107 | 0.58722 |
861f13a8761f8f22a82c122d42219d7e56bf820e | 14,650 | py | Python | templates/federated_reporting/distributed_cleanup.py | olehermanse/masterfiles | bcee0a8c0a925e885ba47ba3300b96c722b91f02 | [
"MIT"
] | 44 | 2015-01-12T05:26:46.000Z | 2021-08-24T02:47:19.000Z | templates/federated_reporting/distributed_cleanup.py | olehermanse/masterfiles | bcee0a8c0a925e885ba47ba3300b96c722b91f02 | [
"MIT"
] | 1,104 | 2015-01-02T08:17:57.000Z | 2022-03-31T15:58:37.000Z | templates/federated_reporting/distributed_cleanup.py | Lex-2008/masterfiles | b43c44af2c4e544ff7d044e76580ced2168ce5e0 | [
"MIT"
] | 79 | 2015-01-05T19:13:03.000Z | 2021-08-25T07:57:31.000Z | #!/usr/bin/env python3
"""
fr_distributed_cleanup.py - a script to remove hosts which have migrated to
other feeder hubs. To be run on Federated Reporting superhub
after each import of feeder data.
First, to setup, enable fr_distributed_cleanup by setting a class in augments (def.json).
This enables policy in cfe_internal/enterprise/federation/federation.cf
```json
{
"classes": {
"cfengine_mp_enable_fr_distributed_cleanup": [ "any::" ]
}
}
```
After the policy has run on superhub and feeders, run this script
to setup fr_distributed_cleanup role and account on all feeders and superhubs with
proper RBAC settings for normal operation.
You will be prompted for superhub admin credentials and then
admin credentials on each feeder.
"""
import argparse
import logging
import os
import platform
import string
import random
import subprocess
import sys
from getpass import getpass
from nova_api import NovaApi
from cfsecret import read_secret, write_secret
WORKDIR = None
CFE_FR_TABLES = None
# get WORKDIR and CFE_FR_TABLES from config.sh
config_sh_path = os.path.join(os.path.dirname(__file__), "config.sh")
cmd = "source {}; echo $WORKDIR; echo $CFE_FR_TABLES".format(config_sh_path)
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=True, executable="/bin/bash"
) as proc:
lines = proc.stdout.readlines()
WORKDIR = lines[0].decode().strip()
CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()]
if not WORKDIR or not CFE_FR_TABLES:
print("Unable to get WORKDIR and CFE_FR_TABLES values from config.sh")
sys.exit(1)
# Primary dir in which to place various needed files
DISTRIBUTED_CLEANUP_DIR = "/opt/cfengine/federation/cftransport/distributed_cleanup"
# collect cert files from /var/cfengine/httpd/ssl/certs on
# superhub and feeders and cat all together into hubs.cert
CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, "hubs.cert")
# Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything.
# api calls will overwrite fr_distributed_cleanup user and role on superhub and all feeders.
DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, "state/fr_distributed_cleanup.cfsecret")
if __name__ == "__main__":
main()
else:
raise ImportError("fr_distributed_cleanup.py must only be used as a script!")
| 35.386473 | 110 | 0.597543 |
861f5e4cfdc98de2a394371bb5b02dd397322979 | 203 | py | Python | Python/Fibonacci.py | kennethsequeira/Hello-world | 464227bc7d9778a4a2a4044fe415a629003ea77f | [
"MIT"
] | 1 | 2018-12-19T11:42:09.000Z | 2018-12-19T11:42:09.000Z | Python/Fibonacci.py | kennethsequeira/Hello-world | 464227bc7d9778a4a2a4044fe415a629003ea77f | [
"MIT"
] | 1 | 2019-10-25T09:19:21.000Z | 2019-10-25T09:19:21.000Z | Python/Fibonacci.py | kennethsequeira/Hello-world | 464227bc7d9778a4a2a4044fe415a629003ea77f | [
"MIT"
] | 7 | 2019-09-11T07:17:32.000Z | 2019-09-25T12:23:52.000Z | #Doesn't work.
import time
fibonacci = [1, 1]
n = int(input())
while len(fibonacci) < n:
fibonacci.append(fibonacci[-1] + fibonacci[-2])
for i in range(n):
print(fibonacci[i], end=' ')
| 20.3 | 52 | 0.605911 |
862043a9de9b5c9db7b311f570877aeebbcfcd84 | 59 | py | Python | setup.py | kreyoo/csgo-inv-shuffle | 6392dd1eef1ca87ec25c9cf4845af3f8df3594a5 | [
"MIT"
] | null | null | null | setup.py | kreyoo/csgo-inv-shuffle | 6392dd1eef1ca87ec25c9cf4845af3f8df3594a5 | [
"MIT"
] | 5 | 2021-12-22T19:25:51.000Z | 2022-03-28T19:27:34.000Z | setup.py | kreyoo/csgo-inv-shuffle | 6392dd1eef1ca87ec25c9cf4845af3f8df3594a5 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name="csgoinvshuffle")
| 14.75 | 28 | 0.813559 |
86205fe9ef8a0c045201301f18357ead5b9c92fc | 6,081 | py | Python | py/_log/log.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2018-03-14T06:45:40.000Z | 2018-06-08T07:46:02.000Z | py/_log/log.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-03-23T08:23:21.000Z | 2019-03-23T08:23:21.000Z | py/_log/log.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2017-11-07T18:05:19.000Z | 2017-11-14T18:06:55.000Z | """
basic logging functionality based on a producer/consumer scheme.
XXX implement this API: (maybe put it into slogger.py?)
log = Logger(
info=py.log.STDOUT,
debug=py.log.STDOUT,
command=None)
log.info("hello", "world")
log.command("hello", "world")
log = Logger(info=Logger(something=...),
debug=py.log.STDOUT,
command=None)
"""
import py, sys
def default_consumer(msg):
""" the default consumer, prints the message to stdout (using 'print') """
sys.stderr.write(str(msg)+"\n")
default_keywordmapper = KeywordMapper()
#
# Consumers
#
def STDOUT(msg):
""" consumer that writes to sys.stdout """
sys.stdout.write(str(msg)+"\n")
def STDERR(msg):
""" consumer that writes to sys.stderr """
sys.stderr.write(str(msg)+"\n")
for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
_prio = "LOG_" + _prio
try:
setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
except AttributeError:
pass
| 32.518717 | 79 | 0.587568 |
86207cdb07326bc532b6b5b79d11a692b3f498c4 | 1,696 | py | Python | test/test_all_contacts.py | Sergggio/python_training | 6dfdbed9a503cf9a6810b31c57bdde76b15e4ec4 | [
"Apache-2.0"
] | null | null | null | test/test_all_contacts.py | Sergggio/python_training | 6dfdbed9a503cf9a6810b31c57bdde76b15e4ec4 | [
"Apache-2.0"
] | null | null | null | test/test_all_contacts.py | Sergggio/python_training | 6dfdbed9a503cf9a6810b31c57bdde76b15e4ec4 | [
"Apache-2.0"
] | null | null | null | import re
from model.contact import Contact
| 38.545455 | 94 | 0.635024 |
8621222cce83ccae2b5fe5d557b5c9ece5f258f8 | 604 | py | Python | samples/abp/test_graphics.py | jproudlo/PyModel | 2ab0e2cf821807206725adaa425409b0c28929b7 | [
"BSD-3-Clause"
] | 61 | 2015-01-29T16:18:51.000Z | 2021-09-28T10:14:02.000Z | samples/abp/test_graphics.py | vikstr/PyModel | 4fff616fe0fd8342c91a42d9db5d4097a179dff8 | [
"BSD-3-Clause"
] | 2 | 2015-02-04T11:57:53.000Z | 2021-07-18T20:59:55.000Z | samples/abp/test_graphics.py | vikstr/PyModel | 4fff616fe0fd8342c91a42d9db5d4097a179dff8 | [
"BSD-3-Clause"
] | 34 | 2015-02-04T12:00:29.000Z | 2022-03-14T07:41:25.000Z | """
ABP analyzer and graphics tests
"""
cases = [
('Run Pymodel Graphics to generate dot file from FSM model, no need use pma',
'pmg ABP'),
('Generate SVG file from dot',
'dotsvg ABP'),
# Now display ABP.dot in browser
('Run PyModel Analyzer to generate FSM from original FSM, should be the same',
'pma ABP'),
('Run PyModel Graphics to generate a file of graphics commands from new FSM',
'pmg ABPFSM'),
('Generate an svg file from the graphics commands',
'dotsvg ABPFSM'),
# Now display ABPFSM.svg in browser, should look the same as ABP.svg
]
| 24.16 | 82 | 0.653974 |
86226920fab3327506a58d2f239e976d2e4d87d4 | 634 | py | Python | games/migrations/0002_auto_20201026_1221.py | IceArrow256/game-list | 5f06e0ff80023acdc0290a9a8f814f7c93b45e0e | [
"Unlicense"
] | 3 | 2020-10-19T12:33:37.000Z | 2020-10-21T05:28:35.000Z | games/migrations/0002_auto_20201026_1221.py | IceArrow256/gamelist | 5f06e0ff80023acdc0290a9a8f814f7c93b45e0e | [
"Unlicense"
] | null | null | null | games/migrations/0002_auto_20201026_1221.py | IceArrow256/gamelist | 5f06e0ff80023acdc0290a9a8f814f7c93b45e0e | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-26 12:21
from django.db import migrations, models
import django.db.models.deletion
| 25.36 | 111 | 0.600946 |
862625f0bd5d6882a14a812018126e427778e14a | 11,603 | py | Python | build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py | mkubux/egenix-mx-base | 3e6f9186334d9d73743b0219ae857564c7208247 | [
"eGenix"
] | null | null | null | build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py | mkubux/egenix-mx-base | 3e6f9186334d9d73743b0219ae857564c7208247 | [
"eGenix"
] | null | null | null | build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py | mkubux/egenix-mx-base | 3e6f9186334d9d73743b0219ae857564c7208247 | [
"eGenix"
] | null | null | null | """ PackageTools - A set of tools to aid working with packages.
Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:[email protected]
Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:[email protected]
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
__version__ = '0.4.0'
import os,types,sys,re,imp,__builtin__
import mx.Tools.NewBuiltins
# RE to identify Python modules
suffixes = projection(imp.get_suffixes(),0)
module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$')
initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$')
initmodule_names = []
for suffix in suffixes:
initmodule_names.append('__init__' + suffix)
def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0,
pkgbasename='', pkgdict=None,
isdir=os.path.isdir,exists=os.path.exists,
isfile=os.path.isfile,join=os.path.join,listdir=os.listdir,
module_name=module_name,initmodule_name=initmodule_name):
""" Return a list of package names found in dir.
Packages are Python modules and subdirectories that provide an
__init__ module. The .py extension is removed from the
files. The __init__ modules are not considered being seperate
packages.
If files_only is true, only Python files are included in the
search (subdirectories are *not* taken into account). If
ignore_modules is true (default is false), modules are
ignored. If recursive is true the search recurses into package
directories.
pkgbasename and pkgdict are only used during recursion.
"""
l = listdir(dir)
if pkgdict is None:
pkgdict = {}
if files_only:
for filename in l:
m = module_name.match(filename)
if m is not None and \
m.group(1) != '__init__':
pkgdict[pkgbasename + m.group(1)] = 1
else:
for filename in l:
path = join(dir, filename)
if isdir(path):
# Check for __init__ module(s)
for name in initmodule_names:
if isfile(join(path, name)):
pkgname = pkgbasename + filename
pkgdict[pkgname] = 1
if recursive:
find_packages(path,
recursive=1,
pkgbasename=pkgname + '.',
pkgdict=pkgdict)
break
elif not ignore_modules:
m = module_name.match(filename)
if m is not None and \
m.group(1) != '__init__':
pkgdict[pkgbasename + m.group(1)] = 1
return pkgdict.keys()
def find_subpackages(package, recursive=0,
splitpath=os.path.split):
""" Assuming that package points to a loaded package module, this
function tries to identify all subpackages of that package.
Subpackages are all Python files included in the same
directory as the module plus all subdirectories having an
__init__.py file. The modules name is prepended to all
subpackage names.
The module location is found by looking at the __file__
attribute that non-builtin modules define. The function uses
the __all__ attribute from the package __init__ module if
available.
If recursive is true (default is false), then subpackages of
subpackages are recursively also included in the search.
"""
if not recursive:
# Try the __all__ attribute...
try:
subpackages = list(package.__all__)
except (ImportError, AttributeError):
# Did not work, then let's try to find the subpackages by looking
# at the directory where package lives...
subpackages = find_packages(package.__path__[0], recursive=recursive)
else:
# XXX Recursive search does not support the __all__ attribute
subpackages = find_packages(package.__path__[0], recursive=recursive)
basename = package.__name__ + '.'
for i,name in irange(subpackages):
subpackages[i] = basename + name
return subpackages
def _thismodule(upcount=1,
exc_info=sys.exc_info,trange=trange):
""" Returns the module object that the callee is calling from.
upcount can be given to indicate how far up the execution
stack the function is supposed to look (1 == direct callee, 2
== callee of callee, etc.).
"""
try:
1/0
except:
frame = exc_info()[2].tb_frame
for i in trange(upcount):
frame = frame.f_back
name = frame.f_globals['__name__']
del frame
return sys.modules[name]
def _module_loader(name, locals, globals, sysmods, errors='strict',
importer=__import__, reloader=reload, from_list=['*']):
""" Internal API for loading a module
"""
if not sysmods.has_key(name):
is_new = 1
else:
is_new = 0
try:
mod = importer(name, locals, globals, from_list)
if reload and not is_new:
mod = reloader(mod)
except KeyboardInterrupt:
# Pass through; SystemExit will be handled by the error handler
raise
except Exception, why:
if errors == 'ignore':
pass
elif errors == 'strict':
raise
elif callable(errors):
errors(name, sys.exc_info()[0], sys.exc_info()[1])
else:
raise ValueError,'unknown errors value'
else:
return mod
return None
def import_modules(modnames,module=None,errors='strict',reload=0,
thismodule=_thismodule):
""" Import all modules given in modnames into module.
module defaults to the caller's module. modnames may contain
dotted package names.
If errors is 'strict' (default), then ImportErrors and
SyntaxErrors are raised. If set to 'ignore', they are silently
ignored. If errors is a callable object, then it is called
with arguments (modname, errorclass, errorvalue). If the
handler returns, processing continues.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
if module is None:
module = _thismodule(2)
locals = module.__dict__
sysmods = sys.modules
for name in modnames:
mod = _module_loader(name, locals, locals, sysmods, errors=errors)
if mod is not None:
locals[name] = mod
def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0):
""" Imports all modules in modnames using the given namespaces and returns
list of corresponding module objects.
If errors is 'strict' (default), then ImportErrors and
SyntaxErrors are raised. If set to 'ignore', they are silently
ignored. If errors is a callable object, then it is called
with arguments (modname, errorclass, errorvalue). If the
handler returns, processing continues.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
modules = []
append = modules.append
sysmods = sys.modules
for name in modnames:
mod = _module_loader(name, locals, globals, sysmods, errors=errors)
if mod is not None:
append(mod)
return modules
def import_subpackages(module, reload=0, recursive=0,
import_modules=import_modules,
find_subpackages=find_subpackages):
""" Does a subpackages scan using find_subpackages(module) and then
imports all submodules found into module.
The module location is found by looking at the __file__
attribute that non-builtin modules define. The function uses
the __all__ attribute from the package __init__ module if
available.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
import_modules(find_subpackages(module, recursive=recursive),
module, reload=reload)
def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0,
recursive=0,
load_modules=load_modules,
find_subpackages=find_subpackages):
""" Same as import_subpackages but with load_modules
functionality, i.e. imports the modules and also returns a list of
module objects.
If errors is 'strict' (default), then ImportErrors are
raised. If set to 'ignore', they are silently ignored.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
return load_modules(find_subpackages(module, recursive=recursive),
locals, globals,
errors=errors, reload=reload)
def modules(names,
extract=extract):
""" Converts a list of module names into a list of module objects.
The modules must already be loaded.
"""
return extract(sys.modules, names)
def package_modules(pkgname):
""" Returns a list of all modules belonging to the package with the
given name.
The package must already be loaded. Only the currently
registered modules are included in the list.
"""
match = pkgname + '.'
match_len = len(match)
mods = [sys.modules[pkgname]]
for k,v in sys.modules.items():
if k[:match_len] == match and v is not None:
mods.append(v)
return mods
def find_classes(mods,baseclass=None,annotated=0,
ClassType=types.ClassType,issubclass=issubclass):
""" Find all subclasses of baseclass or simply all classes (if baseclass
is None) defined by the module objects in list mods.
If annotated is true the returned list will contain tuples
(module_object,name,class_object) for each class found where
module_object is the module where the class is defined.
"""
classes = []
for mod in mods:
for name,obj in mod.__dict__.items():
if type(obj) is ClassType:
if baseclass and not issubclass(obj,baseclass):
continue
if annotated:
classes.append((mod, name, obj))
else:
classes.append(obj)
return classes
def find_instances(mods,baseclass,annotated=0,
InstanceType=types.InstanceType,issubclass=issubclass):
""" Find all instances of baseclass defined by the module objects
in list mods.
If annotated is true the returned list will contain tuples
(module_object,name,instances_object) for each instances found where
module_object is the module where the instances is defined.
"""
instances = []
for mod in mods:
for name,obj in mod.__dict__.items():
if isinstance(obj,baseclass):
if annotated:
instances.append((mod,name,obj))
else:
instances.append(obj)
return instances
| 35.375 | 82 | 0.613031 |
8626687151185e3140516d592a31a3534739d928 | 72,182 | py | Python | Lib/test/test_urllib.py | Kshitijkrishnadas/haribol | ca45e633baaabaad3bb923f5633340ccf88d996c | [
"bzip2-1.0.6"
] | 4 | 2020-08-06T04:39:33.000Z | 2020-12-01T08:35:09.000Z | Lib/test/test_urllib.py | Kshitijkrishnadas/haribol | ca45e633baaabaad3bb923f5633340ccf88d996c | [
"bzip2-1.0.6"
] | 6 | 2020-07-22T01:19:01.000Z | 2021-04-25T15:03:35.000Z | Lib/test/test_urllib.py | Kshitijkrishnadas/haribol | ca45e633baaabaad3bb923f5633340ccf88d996c | [
"bzip2-1.0.6"
] | 2 | 2020-12-02T03:52:33.000Z | 2021-01-20T01:36:09.000Z | """Regression tests for what was in Python 2's "urllib" module"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegex(urllib.error.HTTPError, msg):
urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''', mock_close=True)
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://something")
finally:
self.unfakehttp()
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
if __name__ == '__main__':
unittest.main()
| 41.796178 | 108 | 0.589759 |
86273fb9e1a631cb61fc755f591bccb65bcc2063 | 553 | py | Python | gapipy/resources/tour/transport.py | wmak/gapipy | b6849606d4f6af24b9f871f65e87aaf0d0c013cc | [
"MIT"
] | null | null | null | gapipy/resources/tour/transport.py | wmak/gapipy | b6849606d4f6af24b9f871f65e87aaf0d0c013cc | [
"MIT"
] | null | null | null | gapipy/resources/tour/transport.py | wmak/gapipy | b6849606d4f6af24b9f871f65e87aaf0d0c013cc | [
"MIT"
] | null | null | null | # Python 2 and 3
from __future__ import unicode_literals
from ...models import Address, SeasonalPriceBand
from ..base import Product
| 27.65 | 87 | 0.703436 |
8627cb215475c4cdba11abae1ef96d79eaf6f75a | 440 | py | Python | modules/dare.py | VeNoM-hubs/nyx | 1d76b3ad50add2e71e70fac40699e0cb513b084e | [
"MIT"
] | null | null | null | modules/dare.py | VeNoM-hubs/nyx | 1d76b3ad50add2e71e70fac40699e0cb513b084e | [
"MIT"
] | 3 | 2020-10-16T16:23:02.000Z | 2021-09-08T02:33:38.000Z | modules/dare.py | VeNoM-hubs/nyx | 1d76b3ad50add2e71e70fac40699e0cb513b084e | [
"MIT"
] | 5 | 2020-10-14T04:03:27.000Z | 2020-11-24T04:10:03.000Z | from discord.ext import commands
import json
import random
with open("assets/json/questions.json") as data:
data = json.load(data)
dares = data["dares"]
| 19.130435 | 48 | 0.659091 |
8627e459bffff8a71e23af3dc3f940f880264aa8 | 65 | py | Python | scripts/apic.py | nicmatth/APIC-EM-HelloWorldv3 | c0645e6decf57dbd87c5a239b6fce36f3dcbef41 | [
"Apache-2.0"
] | null | null | null | scripts/apic.py | nicmatth/APIC-EM-HelloWorldv3 | c0645e6decf57dbd87c5a239b6fce36f3dcbef41 | [
"Apache-2.0"
] | null | null | null | scripts/apic.py | nicmatth/APIC-EM-HelloWorldv3 | c0645e6decf57dbd87c5a239b6fce36f3dcbef41 | [
"Apache-2.0"
] | null | null | null | APIC_IP="sandboxapic.cisco.com"
APIC_PORT="443"
GROUP='group-xx'
| 16.25 | 31 | 0.769231 |
8627f95ead1f5387af07178d55a37d9519bc58b3 | 1,205 | py | Python | stella/test/external_func.py | squisher/stella | d9f0b2ebbd853b31c6f75cd0f0286037da4bcaf9 | [
"Apache-2.0"
] | 11 | 2015-08-03T17:37:46.000Z | 2021-05-26T07:29:36.000Z | stella/test/external_func.py | squisher/stella | d9f0b2ebbd853b31c6f75cd0f0286037da4bcaf9 | [
"Apache-2.0"
] | 1 | 2016-09-17T01:46:13.000Z | 2016-09-17T01:46:13.000Z | stella/test/external_func.py | squisher/stella | d9f0b2ebbd853b31c6f75cd0f0286037da4bcaf9 | [
"Apache-2.0"
] | 3 | 2016-05-21T19:17:16.000Z | 2019-05-10T17:35:37.000Z | # Copyright 2013-2015 David Mohr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import randint
import mtpy
from . import * # noqa
| 24.591837 | 78 | 0.692946 |
862895e0beee8139d3bebfdbda1b874ae1ecc23b | 18,880 | py | Python | szh_objects.py | ipqhjjybj/bitcoin_trend_strategy | 0c85055558591574a4171abd68142ebbeb502958 | [
"MIT"
] | 4 | 2019-10-07T13:24:35.000Z | 2020-12-03T19:03:15.000Z | szh_objects.py | ipqhjjybj/bitcoin_trend_strategy | 0c85055558591574a4171abd68142ebbeb502958 | [
"MIT"
] | 1 | 2019-10-08T07:11:30.000Z | 2019-10-08T07:11:30.000Z | szh_objects.py | ipqhjjybj/bitcoin_trend_strategy | 0c85055558591574a4171abd68142ebbeb502958 | [
"MIT"
] | 2 | 2019-12-15T03:50:57.000Z | 2021-05-25T15:44:05.000Z | # encoding: utf-8
import sys
from market_maker import OrderManager
from settings import *
import os
from pymongo import MongoClient, ASCENDING
from pymongo.errors import ConnectionFailure
from datetime import datetime , timedelta
import numpy as np
########################################################################################################################
# constants
EXCHANGE_BITMEX = "BITMEX"
EMPTY_STRING = ""
EMPTY_FLOAT = 0.0
EMPTY_INT = 0
#----------------------------------------------------------------------
'''
tick
'''
########################################################################
'''
engine
'''
'''
Engine
'''
########################################################################
########################################################################
| 33.895871 | 131 | 0.476536 |
8628a8ccf18c32191b9cace42141414df8e8de89 | 7,864 | py | Python | CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Utilities for creating VCG and Dot diagrams"""
from logilab.common.vcgutils import VCGPrinter
from logilab.common.graph import DotBackend
from pylint.pyreverse.utils import is_exception
| 39.32 | 82 | 0.594863 |
86291f58eceea662a0595f262f1d06df3c3cd46d | 1,070 | py | Python | graphql-ml-serving/backend/mutations.py | philippe-heitzmann/python-apps | 1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a | [
"MIT"
] | 13 | 2021-05-23T15:47:24.000Z | 2022-03-24T16:22:14.000Z | graphql-ml-serving/backend/mutations.py | philippe-heitzmann/python-apps | 1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a | [
"MIT"
] | 4 | 2021-11-16T20:44:55.000Z | 2022-01-13T19:13:38.000Z | graphql-ml-serving/backend/mutations.py | philippe-heitzmann/python-apps | 1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a | [
"MIT"
] | 11 | 2021-01-31T06:18:10.000Z | 2021-11-21T00:02:05.000Z | import logging
from ariadne import MutationType, convert_kwargs_to_snake_case
from config import clients, messages, queue
mutation = MutationType()
| 32.424242 | 64 | 0.673832 |
8629a20e8661d77754338b9cfeef38848a59f1c8 | 18,336 | py | Python | hc/api/transports.py | MaxwellDPS/healthchecks | 3730c67c803e707ae51b01bacf2929bd053ee22f | [
"BSD-3-Clause"
] | 1 | 2020-06-08T12:22:51.000Z | 2020-06-08T12:22:51.000Z | hc/api/transports.py | pathcl/healthchecks | ffc45f0c74694d06679aefe3b92a0b0778473ca7 | [
"BSD-3-Clause"
] | 5 | 2021-03-19T11:20:11.000Z | 2021-09-22T19:36:18.000Z | hc/api/transports.py | MaxwellDPS/healthchecks | 3730c67c803e707ae51b01bacf2929bd053ee22f | [
"BSD-3-Clause"
] | null | null | null | import os
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
import json
import requests
from urllib.parse import quote, urlencode
from hc.accounts.models import Profile
from hc.lib import emails
from hc.lib.string import replace
try:
import apprise
except ImportError:
# Enforce
settings.APPRISE_ENABLED = False
class Sms(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
class WhatsApp(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
class Trello(HttpTransport):
URL = "https://api.trello.com/1/cards"
class Apprise(HttpTransport):
class MsTeams(HttpTransport):
class Zulip(HttpTransport):
| 30.816807 | 88 | 0.588405 |
8629c195c4f2a076441e398a8eff9a8680863488 | 9,419 | py | Python | graviti/portex/builder.py | Graviti-AI/graviti-python-sdk | d2faf86b4718416503b965f6057b31015417446f | [
"MIT"
] | 12 | 2022-01-26T06:51:02.000Z | 2022-03-22T21:28:35.000Z | graviti/portex/builder.py | Graviti-AI/graviti-python-sdk | d2faf86b4718416503b965f6057b31015417446f | [
"MIT"
] | 51 | 2022-02-22T07:19:34.000Z | 2022-03-31T11:39:51.000Z | graviti/portex/builder.py | Graviti-AI/graviti-python-sdk | d2faf86b4718416503b965f6057b31015417446f | [
"MIT"
] | 5 | 2022-01-26T06:51:49.000Z | 2022-03-08T03:41:11.000Z | #!/usr/bin/env python3
#
# Copyright 2022 Graviti. Licensed under MIT License.
#
"""Portex type builder related classes."""
from hashlib import md5
from pathlib import Path
from shutil import rmtree
from subprocess import PIPE, CalledProcessError, run
from tempfile import gettempdir
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar
import yaml
import graviti.portex.ptype as PTYPE
from graviti.exception import GitCommandError, GitNotFoundError
from graviti.portex.base import PortexRecordBase
from graviti.portex.external import PortexExternalType
from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory
from graviti.portex.package import ExternalPackage, Imports, packages
from graviti.portex.param import Param, Params
from graviti.portex.register import ExternalContainerRegister
if TYPE_CHECKING:
from subprocess import CompletedProcess
from graviti.portex.base import PortexType
EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER
_I = TypeVar("_I", bound="BuilderImports")
def build_package(url: str, revision: str) -> ExternalPackage:
"""Build an external package.
Arguments:
url: The git repo url of the external package.
revision: The git repo revision (tag/commit) of the external package.
Returns:
The :class:`ExternalPackage` instance.
"""
builder = PackageBuilder(url, revision)
package = builder.build()
packages.externals[url, revision] = package
return package
| 29.996815 | 93 | 0.62013 |
862a6e4ef7c112a1f58f960d0cfe8a4298a64c51 | 3,184 | py | Python | dffml/operation/mapping.py | SGeetansh/dffml | 04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b | [
"MIT"
] | 171 | 2019-03-08T19:02:06.000Z | 2022-03-29T16:17:23.000Z | dffml/operation/mapping.py | NikhilBartwal/dffml | 16180144f388924d9e5840c4aa80d08970af5e60 | [
"MIT"
] | 1,158 | 2019-03-08T19:07:50.000Z | 2022-03-25T08:28:27.000Z | dffml/operation/mapping.py | NikhilBartwal/dffml | 16180144f388924d9e5840c4aa80d08970af5e60 | [
"MIT"
] | 183 | 2019-03-10T02:40:56.000Z | 2022-03-27T18:51:26.000Z | from typing import Dict, List, Any
from ..df.types import Definition
from ..df.base import op
from ..util.data import traverse_get
MAPPING = Definition(name="mapping", primitive="map")
MAPPING_TRAVERSE = Definition(name="mapping_traverse", primitive="List[str]")
MAPPING_KEY = Definition(name="key", primitive="str")
MAPPING_VALUE = Definition(name="value", primitive="generic")
| 26.533333 | 89 | 0.557161 |
862ab8872e3c569f3400e44a0e697886a1c4335b | 13,859 | py | Python | anchore_engine/services/policy_engine/__init__.py | Vijay-P/anchore-engine | 660a0bf10c56d16f894919209c51ec7a12081e9b | [
"Apache-2.0"
] | null | null | null | anchore_engine/services/policy_engine/__init__.py | Vijay-P/anchore-engine | 660a0bf10c56d16f894919209c51ec7a12081e9b | [
"Apache-2.0"
] | null | null | null | anchore_engine/services/policy_engine/__init__.py | Vijay-P/anchore-engine | 660a0bf10c56d16f894919209c51ec7a12081e9b | [
"Apache-2.0"
] | null | null | null | import time
import sys
import pkg_resources
import os
import retrying
from sqlalchemy.exc import IntegrityError
# anchore modules
import anchore_engine.clients.services.common
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.metrics
from anchore_engine.subsys import logger
from anchore_engine.configuration import localconfig
from anchore_engine.clients.services import simplequeue, internal_client_for
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.services.policy_engine.engine.feeds.feeds import (
VulnerabilityFeed,
NvdV2Feed,
PackagesFeed,
VulnDBFeed,
GithubFeed,
feed_registry,
NvdFeed,
)
# from anchore_engine.subsys.logger import enable_bootstrap_logging
# enable_bootstrap_logging()
from anchore_engine.utils import timer
feed_sync_queuename = "feed_sync_tasks"
system_user_auth = None
feed_sync_msg = {"task_type": "feed_sync", "enabled": True}
# These are user-configurable but mostly for debugging and testing purposes
try:
FEED_SYNC_RETRIES = int(os.getenv("ANCHORE_FEED_SYNC_CHECK_RETRIES", 5))
except ValueError:
logger.exception(
"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5"
)
FEED_SYNC_RETRIES = 5
try:
FEED_SYNC_RETRY_BACKOFF = int(
os.getenv("ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", 5)
)
except ValueError:
logger.exception(
"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5"
)
FEED_SYNC_RETRY_BACKOFF = 5
try:
feed_config_check_retries = int(os.getenv("FEED_CLIENT_CHECK_RETRIES", 3))
except ValueError:
logger.exception(
"Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3"
)
feed_config_check_retries = 3
try:
feed_config_check_backoff = int(os.getenv("FEED_CLIENT_CHECK_BACKOFF", 5))
except ValueError:
logger.exception(
"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5"
)
feed_config_check_backoff = 5
# service funcs (must be here)
def process_preflight():
"""
Execute the preflight functions, aborting service startup if any throw uncaught exceptions or return False return value
:return:
"""
preflight_check_functions = [init_db_content, init_feed_registry]
for fn in preflight_check_functions:
try:
fn()
except Exception as e:
logger.exception(
"Preflight checks failed with error: {}. Aborting service startup".format(
e
)
)
sys.exit(1)
def init_db_content():
"""
Initialize the policy engine db with any data necessary at startup.
:return:
"""
return _init_distro_mappings()
def handle_feed_sync(*args, **kwargs):
"""
Initiates a feed sync in the system in response to a message from the queue
:param args:
:param kwargs:
:return:
"""
system_user = _system_creds()
logger.info("init args: {}".format(kwargs))
cycle_time = kwargs["mythread"]["cycle_timer"]
while True:
config = localconfig.get_config()
feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True)
if feed_sync_enabled:
logger.info("Feed sync task executor activated")
try:
run_feed_sync(system_user)
except Exception as e:
logger.error("Caught escaped error in feed sync handler: {}".format(e))
finally:
logger.info("Feed sync task executor complete")
else:
logger.info("sync_enabled is set to false in config - skipping feed sync")
time.sleep(cycle_time)
return True
def handle_feed_sync_trigger(*args, **kwargs):
"""
Checks to see if there is a task for a feed sync in the queue and if not, adds one.
Interval for firing this should be longer than the expected feed sync duration.
:param args:
:param kwargs:
:return:
"""
system_user = _system_creds()
logger.info("init args: {}".format(kwargs))
cycle_time = kwargs["mythread"]["cycle_timer"]
while True:
config = localconfig.get_config()
feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True)
if feed_sync_enabled:
logger.info("Feed Sync task creator activated")
try:
push_sync_task(system_user)
logger.info("Feed Sync Trigger done, waiting for next cycle.")
except Exception as e:
logger.error(
"Error caught in feed sync trigger handler after all retries. Will wait for next cycle"
)
finally:
logger.info("Feed Sync task creator complete")
else:
logger.info(
"sync_enabled is set to false in config - skipping feed sync trigger"
)
time.sleep(cycle_time)
return True
| 32.91924 | 126 | 0.637925 |
862c0ef5874a647cec05d7913d882ea14b577a42 | 1,767 | py | Python | juriscraper/oral_args/united_states/federal_appellate/scotus.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 228 | 2015-01-23T04:41:39.000Z | 2022-03-30T09:52:20.000Z | juriscraper/oral_args/united_states/federal_appellate/scotus.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 331 | 2015-01-05T18:53:40.000Z | 2022-03-29T23:43:30.000Z | juriscraper/oral_args/united_states/federal_appellate/scotus.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 84 | 2015-01-03T01:19:21.000Z | 2022-03-01T08:09:32.000Z | """Scraper for Supreme Court of U.S.
CourtID: scotus
Court Short Name: scotus
History:
- 2014-07-20 - Created by Andrei Chelaru, reviewed by MLR
- 2017-10-09 - Updated by MLR.
"""
from datetime import datetime
from juriscraper.OralArgumentSite import OralArgumentSite
| 31 | 107 | 0.611771 |
862c27d164efa5a02f7a2714b410e87587a9e318 | 26,357 | py | Python | code/main.py | pengzhansun/CF-CAR | 2e497a4da0bcc80bb327ee041f1aa0107f53bc3f | [
"MIT"
] | 8 | 2022-03-19T06:53:43.000Z | 2022-03-30T06:37:48.000Z | code/main.py | pengzhansun/CF-CAR | 2e497a4da0bcc80bb327ee041f1aa0107f53bc3f | [
"MIT"
] | 1 | 2022-03-22T12:03:23.000Z | 2022-03-23T02:40:52.000Z | code/main.py | pengzhansun/CF-CAR | 2e497a4da0bcc80bb327ee041f1aa0107f53bc3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import os
import shutil
import time
import numpy as np
import random
from collections import OrderedDict
import torch
import torch.backends.cudnn as cudnn
from callbacks import AverageMeter
from data_utils.causal_data_loader_frames import VideoFolder
from utils import save_results
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Counterfactual CAR')
# Path, dataset and log related arguments
parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/',
help='path to the folder with frames')
parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json',
help='path to the json file with train video meta data')
parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json',
help='path to the json file with validation video meta data')
parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json',
help='path to the json file with ground truth labels')
parser.add_argument('--dataset', default='smth_smth',
help='which dataset to train')
parser.add_argument('--logname', default='my_method',
help='name of the experiment for checkpoints and logs')
parser.add_argument('--print_freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 20)')
parser.add_argument('--ckpt', default='./ckpt',
help='folder to output checkpoints')
parser.add_argument('--resume_vision', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_coord', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# model, image&feature dim and training related arguments
parser.add_argument('--model_vision', default='rgb_roi')
parser.add_argument('--model_coord', default='interaction')
parser.add_argument('--model_fusion', default='concat_fusion')
parser.add_argument('--fusion_function', default='fused_sum', type=str,
help='function for fusing activations from each branch')
parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for image-based features')
parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for coord-based features')
parser.add_argument('--size', default=224, type=int, metavar='N',
help='primary image input size')
parser.add_argument('--num_boxes', default=4, type=int,
help='num of boxes for each image')
parser.add_argument('--num_frames', default=16, type=int,
help='num of frames for the model')
parser.add_argument('--num_classes', default=174, type=int,
help='num of class in the model')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size', '-b', default=16, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs="+",
metavar='LRSteps', help='epochs to decay learning rate by 10')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--clip_gradient', '-cg', default=5, type=float,
metavar='W', help='gradient norm clipping (default: 5)')
parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides')
# train mode, hardware setting and others related arguments
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set')
parser.add_argument('--parallel', default=True, type=bool,
help='whether or not train with multi GPUs')
parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of gpu you want to use')
best_loss = 1000000
def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None):
"""Sets the learning rate to the initial LR decayed by 10"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
if branch_name == 'vision':
for param_group in optimizer.param_groups:
param_group['lr'] = lr * 0.8
elif branch_name == 'coord':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
elif branch_name == 'fusion':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 45.679376 | 133 | 0.624009 |
862c8c299735f25fc3f48ddc79dfdcc178dd4e54 | 606 | py | Python | api/application/__init__.py | 114000/webapp-boilerplate | 0550396694b4f009e5d862b0098bf7d1f61a4a40 | [
"MIT"
] | null | null | null | api/application/__init__.py | 114000/webapp-boilerplate | 0550396694b4f009e5d862b0098bf7d1f61a4a40 | [
"MIT"
] | null | null | null | api/application/__init__.py | 114000/webapp-boilerplate | 0550396694b4f009e5d862b0098bf7d1f61a4a40 | [
"MIT"
] | 1 | 2021-06-10T02:08:30.000Z | 2021-06-10T02:08:30.000Z | # encoding: utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import logging
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})
app.config.from_object('config.current')
db = SQLAlchemy(app)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
'''
'''
import application.jwt
import application.routes.config
import application.routes.user
import application.routes.permission
import application.routes.role
import application.routes.access
# after Model defined
db.create_all() | 16.833333 | 47 | 0.729373 |
862cf0dcbc5e00c994d2c00c5e16de0409816e8b | 1,004 | py | Python | Betsy/Betsy/modules/get_illumina_control.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 9 | 2017-01-13T02:38:41.000Z | 2021-04-08T00:44:39.000Z | Betsy/Betsy/modules/get_illumina_control.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | null | null | null | Betsy/Betsy/modules/get_illumina_control.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 4 | 2017-01-05T16:25:25.000Z | 2019-12-12T20:07:38.000Z | from Module import AbstractModule
| 29.529412 | 76 | 0.645418 |
862dc531f725b524bb6846cb090205fc7468f382 | 1,166 | py | Python | src/backup/template/PositionalArgumentTemplate.py | ytyaru0/Python.TemplateFileMaker.20180314204216 | 4849f982acea5d86b711c5dec4cc046016ab1031 | [
"CC0-1.0"
] | null | null | null | src/backup/template/PositionalArgumentTemplate.py | ytyaru0/Python.TemplateFileMaker.20180314204216 | 4849f982acea5d86b711c5dec4cc046016ab1031 | [
"CC0-1.0"
] | null | null | null | src/backup/template/PositionalArgumentTemplate.py | ytyaru0/Python.TemplateFileMaker.20180314204216 | 4849f982acea5d86b711c5dec4cc046016ab1031 | [
"CC0-1.0"
] | null | null | null | from string import Template
import re
if __name__ == '__main__':
template_str = '${0} is Aug.'
t = PositionalArgumentTemplate(template_str)
print(template_str)
print(dir(t))
print(t.delimiter)
print(t.idpattern)
print(type(t.idpattern))
print(t.flags)
print(t.pattern)
print(t.substitute(**{'0':'V'}))
t.find_place_holders(template_str)
| 31.513514 | 73 | 0.587479 |
862e0a0793ac26ff1693be29a952ce4f785121be | 1,020 | py | Python | cla-backend/cla/tests/unit/test_company.py | kdhaigud/easycla | f913f8dbf658acf4711b601f9312ca5663a4efe8 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | cla-backend/cla/tests/unit/test_company.py | kdhaigud/easycla | f913f8dbf658acf4711b601f9312ca5663a4efe8 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | cla-backend/cla/tests/unit/test_company.py | kdhaigud/easycla | f913f8dbf658acf4711b601f9312ca5663a4efe8 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null | # Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import json
import os
import requests
import uuid
import hug
import pytest
from falcon import HTTP_200, HTTP_409
import cla
from cla import routes
ID_TOKEN = os.environ.get('ID_TOKEN')
API_URL = os.environ.get('API_URL')
def test_create_company_duplicate():
"""
Test creating duplicate company names
"""
import pdb;pdb.set_trace()
url = f'{API_URL}/v1/company'
company_name = 'test_company_name'
data = {
'company_id' : uuid.uuid4() ,
'company_name' : company_name ,
}
headers = {
'Authorization' : f'Bearer {ID_TOKEN}'
}
response = requests.post(url, data=data, headers=headers)
assert response.status == HTTP_200
# add duplicate company
data = {
'company_id' : uuid.uuid4(),
'company_name' : company_name
}
req = hug.test.post(routes, url, data=data, headers=headers)
assert req.status == HTTP_409
| 23.72093 | 73 | 0.673529 |
862fb7f41889fb9ebdc1d283480d889b7dbfd294 | 3,144 | py | Python | py/WatchDialog.py | mathematicalmichael/SpringNodes | 3ff4034b6e57ee6efa55c963e1819f3d30a2c4ab | [
"MIT"
] | 51 | 2015-09-25T09:30:57.000Z | 2022-01-19T14:16:44.000Z | py/WatchDialog.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 66 | 2015-09-30T02:43:32.000Z | 2022-03-31T02:26:52.000Z | py/WatchDialog.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 48 | 2015-11-19T01:34:47.000Z | 2022-02-25T17:26:48.000Z | # Copyright(c) 2017, Dimitar Venkov
# @5devene, [email protected]
# www.badmonkeys.net
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Drawing import Point, Color, Font
from System.Windows.Forms import *
from cStringIO import StringIO
str_file = StringIO()
size1 = [30, 23] #height, width
l1 = [] if IN[0] is None else tolist(IN[0])
list2str(l1, IN[1])
str_content = str_file.getvalue()
str_file.close()
width1 = 100
form = WatchBox(str_content)
form.adjust_controls(*size1)
Application.Run(form)
OUT = form.text1
Application.Exit()
form.Dispose() | 27.578947 | 79 | 0.688613 |
8630212a84fb76678a871b47fba5eab501615806 | 1,658 | py | Python | 292-nim-game.py | mvj3/leetcode | 3111199beeaefbb3a74173e783ed21c9e53ab203 | [
"MIT"
] | null | null | null | 292-nim-game.py | mvj3/leetcode | 3111199beeaefbb3a74173e783ed21c9e53ab203 | [
"MIT"
] | null | null | null | 292-nim-game.py | mvj3/leetcode | 3111199beeaefbb3a74173e783ed21c9e53ab203 | [
"MIT"
] | null | null | null | """
Question:
Nim Game My Submissions Question
You are playing the following Nim Game with your friend: There is a heap of stones on the table, each time one of you take turns to remove 1 to 3 stones. The one who removes the last stone will be the winner. You will take the first turn to remove the stones.
Both of you are very clever and have optimal strategies for the game. Write a function to determine whether you can win the game given the number of stones in the heap.
For example, if there are 4 stones in the heap, then you will never win the game: no matter 1, 2, or 3 stones you remove, the last stone will always be removed by your friend.
Hint:
If there are 5 stones in the heap, could you figure out a way to remove the stones such that you will always be the winner?
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 31755 Total Submissions: 63076 Difficulty: Easy
2. Your runtime beats 43.52% of python submissions.
"""
assert Solution().canWinNim(0) is True
assert Solution().canWinNim(1) is True
assert Solution().canWinNim(2) is True
assert Solution().canWinNim(3) is True
assert Solution().canWinNim(4) is False
assert Solution().canWinNim(5) is True
assert Solution().canWinNim(6) is True
assert Solution().canWinNim(7) is True
assert Solution().canWinNim(8) is False
| 36.043478 | 263 | 0.700844 |
863032b8210dd9655600e6a9f42f0fb08b0f6d53 | 370 | py | Python | script_tests/maf_extract_ranges_indexed_tests.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 122 | 2015-07-01T12:00:22.000Z | 2022-03-02T09:27:35.000Z | script_tests/maf_extract_ranges_indexed_tests.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 64 | 2015-11-06T21:03:18.000Z | 2022-03-24T00:55:27.000Z | script_tests/maf_extract_ranges_indexed_tests.py | lldelisle/bx-python | 19ab41e0905221e3fcaaed4b74faf2d7cda0d15a | [
"MIT"
] | 60 | 2015-10-05T19:19:36.000Z | 2021-11-19T20:53:54.000Z | import unittest
import base
| 37 | 116 | 0.775676 |
8630b3c80464d13f544a914873b82ed141f94bf1 | 9,098 | py | Python | qstklearn/1knn.py | elxavicio/QSTK | 4981506c37227a72404229d5e1e0887f797a5d57 | [
"BSD-3-Clause"
] | 339 | 2015-01-01T10:06:49.000Z | 2022-03-23T23:32:24.000Z | QSTK/qstklearn/1knn.py | jenniyanjie/QuantSoftwareToolkit | 0eb2c7a776c259a087fdcac1d3ff883eb0b5516c | [
"BSD-3-Clause"
] | 19 | 2015-01-04T13:12:33.000Z | 2021-07-19T11:13:47.000Z | QSTK/qstklearn/1knn.py | jenniyanjie/QuantSoftwareToolkit | 0eb2c7a776c259a087fdcac1d3ff883eb0b5516c | [
"BSD-3-Clause"
] | 154 | 2015-01-30T09:41:15.000Z | 2022-03-19T02:27:59.000Z | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Feb 20, 2011
@author: John Cornwell
@organization: Georgia Institute of Technology
@contact: [email protected]
@summary: This is an implementation of the 1-KNN algorithm for ranking features quickly.
It uses the knn implementation.
@status: oneKNN functions correctly, optimized to use n^2/2 algorithm.
'''
import matplotlib.pyplot as plt
from pylab import gca
import itertools
import string
import numpy as np
import math
import knn
from time import clock
'''
@summary: Query function for 1KNN, return value is a double between 0 and 1.
@param naData: A 2D numpy array. Each row is a data point with the final column containing the classification.
'''
''' Test function to plot results '''
''' Function to plot 2 distributions '''
''' Function to test KNN performance '''
def _knnResult( naData ):
''' Split up data into training/testing '''
lSplit = naData.shape[0] * .7
naTrain = naData[:lSplit, :]
naTest = naData[lSplit:, :]
knn.addEvidence( naTrain.astype(float), 1 );
''' Query with last column omitted and 5 nearest neighbors '''
naResults = knn.query( naTest[:,:-1], 5, 'mode')
''' Count returns which are correct '''
lCount = 0
for i, dVal in enumerate(naResults):
if dVal == naTest[i,-1]:
lCount = lCount + 1
dResult = float(lCount) / naResults.size
return dResult
''' Tests performance of 1-KNN '''
def _test1():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
for i in range(3):
''' Select one of three distributions '''
if i == 0:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
elif i == 1:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
else:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] )
naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) )
naOrig = np.vstack( (naTest1, naTest2) )
naBoth = np.vstack( (naTest1, naTest2) )
''' Keep track of runtimes '''
t = clock()
cOneRuntime = t-t;
cKnnRuntime = t-t;
lfResults = []
lfKnnResults = []
for i in range( 15 ):
#_plotDist( naTest1, naBoth[100:,:], i )
t = clock()
lfResults.append( oneKnn( naBoth ) )
cOneRuntime = cOneRuntime + (clock() - t)
t = clock()
lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) )
cKnnRuntime = cKnnRuntime + (clock() - t)
naBoth[500:,0] = naBoth[500:,0] - .1
print 'Runtime OneKnn:', cOneRuntime
print 'Runtime 5-KNN:', cKnnRuntime
_plotResults( naTest1, naTest2, lfResults, lfKnnResults )
''' Tests performance of 1-KNN '''
def _test2():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
np.random.seed( 12345 )
''' Create 5 distributions for each of the 5 attributes '''
dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
lDists = [ dist1, dist2, dist3, dist4, dist5 ]
''' All features used except for distribution 4 '''
distY = np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5 )
distY = distY.reshape( -1, 1 )
for i, fVal in enumerate( distY ):
if fVal >= 0:
distY[i] = 1
else:
distY[i] = 0
for i in range( 1, 6 ):
lsNames = []
lf1Vals = []
lfVals = []
for perm in itertools.combinations( '12345', i ):
''' set test distribution to first element '''
naTest = lDists[ int(perm[0]) - 1 ]
sPerm = perm[0]
''' stack other distributions on '''
for j in range( 1, len(perm) ):
sPerm = sPerm + str(perm[j])
naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) )
''' finally stack y values '''
naTest = np.hstack( (naTest, distY) )
lf1Vals.append( oneKnn( naTest ) )
lfVals.append( _knnResult( np.random.permutation(naTest) ) )
lsNames.append( sPerm )
''' Plot results '''
plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' )
plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' )
plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') )
plt.ylabel('1-KNN Value/KNN Classification')
plt.xlabel('Feature Set')
plt.title('Combinations of ' + str(i) + ' Features')
plt.ylim( (0,1) )
if len(lf1Vals) < 2:
plt.xlim( (-1,1) )
gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 )
gca().xaxis.set_ticklabels( lsNames )
plt.show()
if __name__ == '__main__':
_test1()
#_test2()
| 31.811189 | 112 | 0.523522 |
86311bc6fef14e7f3a84f443854c9a8a4139ce52 | 2,508 | py | Python | pyscf/nao/m_comp_coulomb_pack.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 2 | 2019-05-28T05:25:56.000Z | 2019-11-09T02:16:43.000Z | pyscf/nao/m_comp_coulomb_pack.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 2 | 2019-09-16T17:58:31.000Z | 2019-09-22T17:26:01.000Z | pyscf/nao/m_comp_coulomb_pack.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 1 | 2019-11-09T02:13:16.000Z | 2019-11-09T02:13:16.000Z | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from pyscf.nao.m_coulomb_am import coulomb_am
import numpy as np
try:
import numba as nb
from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril
use_numba = True
except:
use_numba = False
#
#
#
def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs):
"""
Computes the matrix elements given by funct, for instance coulomb interaction
Args:
sv : (System Variables), this must have arrays of coordinates and species, etc
ao_log : description of functions (either orbitals or product basis functions)
Returns:
matrix elements for the whole system in packed form (lower triangular part)
"""
from pyscf.nao.m_ao_matelem import ao_matelem_c
from pyscf.nao.m_pack2den import ij2pack_l
aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp)
me = ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log)
atom2s = np.zeros((sv.natm+1), dtype=np.int64)
for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]
norbs = atom2s[-1]
res = np.zeros(norbs*(norbs+1)//2, dtype=dtype)
for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
#print("atom1 = {0}, rv1 = {1}".format(atom1, rv1))
for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
if atom2>atom1: continue # skip
oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs)
if use_numba:
fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs)
else:
for i1 in range(s1,f1):
for i2 in range(s2, min(i1+1, f2)):
res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2]
#print("number call = ", count)
#print("sum kernel: {0:.6f}".format(np.sum(abs(res))))
#np.savetxt("kernel_pyscf.txt", res)
#import sys
#sys.exit()
return res, norbs
| 38 | 92 | 0.702153 |
863143ad0e8c0560ad9359d49f02a31a8146a084 | 2,338 | py | Python | nova/tests/unit/test_service_auth.py | panguan737/nova | 0d177185a439baa228b42c948cab4e934d6ac7b8 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/test_service_auth.py | panguan737/nova | 0d177185a439baa228b42c948cab4e934d6ac7b8 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/test_service_auth.py | panguan737/nova | 0d177185a439baa228b42c948cab4e934d6ac7b8 | [
"Apache-2.0"
] | 1 | 2020-11-02T10:17:13.000Z | 2020-11-02T10:17:13.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
import mock
import nova.conf
from nova import context
from nova import service_auth
from nova import test
CONF = nova.conf.CONF
| 37.709677 | 79 | 0.732678 |
86319d7588f06ccfd5e5e22eadc702136a0fe831 | 552 | py | Python | classification/model/build_gen.py | LittleWat/MCD_DA | 37cb1bc38c203702e22c7c0c37e284d0294714fb | [
"MIT"
] | 464 | 2018-04-04T22:38:44.000Z | 2022-03-12T15:46:49.000Z | classification/model/build_gen.py | seqam-lab/MCD_DA | af10217c5c5451dcd8bc3e975a7d067c285cc029 | [
"MIT"
] | 28 | 2018-05-05T20:01:31.000Z | 2022-01-16T05:07:56.000Z | classification/model/build_gen.py | seqam-lab/MCD_DA | af10217c5c5451dcd8bc3e975a7d067c285cc029 | [
"MIT"
] | 147 | 2018-04-10T08:44:10.000Z | 2021-12-28T02:14:38.000Z | import svhn2mnist
import usps
import syn2gtrsb
import syndig2svhn
| 24 | 45 | 0.63587 |
86329776e65dca78e6c2604731e8b04b13e73992 | 1,318 | py | Python | deep_table/nn/models/loss/info_nce_loss.py | pfnet-research/deep-table | a19c0c3048484017d5f24806604c3b3470bcf550 | [
"MIT"
] | 48 | 2021-09-30T08:14:26.000Z | 2022-03-02T12:20:08.000Z | deep_table/nn/models/loss/info_nce_loss.py | pfnet-research/deep-table | a19c0c3048484017d5f24806604c3b3470bcf550 | [
"MIT"
] | 1 | 2021-11-08T11:41:49.000Z | 2021-11-08T11:41:49.000Z | deep_table/nn/models/loss/info_nce_loss.py | pfnet-research/deep-table | a19c0c3048484017d5f24806604c3b3470bcf550 | [
"MIT"
] | 2 | 2021-12-31T03:43:48.000Z | 2022-03-11T09:04:21.000Z | import torch
from torch import Tensor
from torch.nn.modules.loss import _Loss
| 32.95 | 92 | 0.615326 |
8633d44756a388da352b3bc3dd3c8cfc0eeaabfe | 19,830 | py | Python | patroni/config.py | korkin25/patroni | 333d41d9f039b5a799940c8a6fbc75dcbe0e9a31 | [
"MIT"
] | null | null | null | patroni/config.py | korkin25/patroni | 333d41d9f039b5a799940c8a6fbc75dcbe0e9a31 | [
"MIT"
] | null | null | null | patroni/config.py | korkin25/patroni | 333d41d9f039b5a799940c8a6fbc75dcbe0e9a31 | [
"MIT"
] | null | null | null | import json
import logging
import os
import shutil
import tempfile
import yaml
from collections import defaultdict
from copy import deepcopy
from patroni import PATRONI_ENV_PREFIX
from patroni.exceptions import ConfigParseError
from patroni.dcs import ClusterConfig
from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler
from patroni.utils import deep_compare, parse_bool, parse_int, patch_config
logger = logging.getLogger(__name__)
_AUTH_ALLOWED_PARAMETERS = (
'username',
'password',
'sslmode',
'sslcert',
'sslkey',
'sslpassword',
'sslrootcert',
'sslcrl',
'sslcrldir',
'gssencmode',
'channel_binding'
)
def _load_config_path(self, path):
"""
If path is a file, loads the yml file pointed to by path.
If path is a directory, loads all yml files in that directory in alphabetical order
"""
if os.path.isfile(path):
files = [path]
elif os.path.isdir(path):
files = [os.path.join(path, f) for f in sorted(os.listdir(path))
if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))]
else:
logger.error('config path %s is neither directory nor file', path)
raise ConfigParseError('invalid config path')
overall_config = {}
for fname in files:
with open(fname) as f:
config = yaml.safe_load(f)
patch_config(overall_config, config)
return overall_config
def _load_config_file(self):
"""Loads config.yaml from filesystem and applies some values which were set via ENV"""
config = self._load_config_path(self._config_file)
patch_config(config, self.__environment_configuration)
return config
# configuration could be either ClusterConfig or dict
| 43.486842 | 120 | 0.580736 |
863423445c595d9f921067c5163063a99cb0a68c | 12,040 | py | Python | src/Products/CMFCore/tests/test_DirectoryView.py | fdiary/Products.CMFCore | 361a30e0c72a15a21f88433b8d5fc49331f36728 | [
"ZPL-2.1"
] | 3 | 2015-11-24T16:26:02.000Z | 2019-04-09T07:37:12.000Z | src/Products/CMFCore/tests/test_DirectoryView.py | fdiary/Products.CMFCore | 361a30e0c72a15a21f88433b8d5fc49331f36728 | [
"ZPL-2.1"
] | 86 | 2015-09-10T16:25:08.000Z | 2022-03-17T07:16:30.000Z | src/Products/CMFCore/tests/test_DirectoryView.py | fdiary/Products.CMFCore | 361a30e0c72a15a21f88433b8d5fc49331f36728 | [
"ZPL-2.1"
] | 16 | 2015-08-21T21:35:35.000Z | 2021-08-04T18:20:55.000Z | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit tests for DirectoryView module.
"""
import sys
import unittest
import warnings
from os import mkdir
from os import remove
from os.path import join
from tempfile import mktemp
from App.config import getConfiguration
from . import _globals
from .base.dummy import DummyFolder
from .base.testcase import FSDVTest
from .base.testcase import WritableFSDVTest
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DirectoryViewPathTests))
suite.addTest(unittest.makeSuite(DirectoryViewTests))
suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests))
suite.addTest(unittest.makeSuite(DirectoryViewFolderTests))
suite.addTest(unittest.makeSuite(DebugModeTests))
return suite
| 38.222222 | 79 | 0.66603 |
8634627ed49b276d745b67db33bc1b7a02ae6c52 | 4,247 | py | Python | pycycle/elements/flight_conditions.py | eshendricks/pyCycle | 2b7f9c2a60c6d93d5e561c71b27e75566b3baef0 | [
"Apache-2.0"
] | null | null | null | pycycle/elements/flight_conditions.py | eshendricks/pyCycle | 2b7f9c2a60c6d93d5e561c71b27e75566b3baef0 | [
"Apache-2.0"
] | null | null | null | pycycle/elements/flight_conditions.py | eshendricks/pyCycle | 2b7f9c2a60c6d93d5e561c71b27e75566b3baef0 | [
"Apache-2.0"
] | null | null | null | import openmdao.api as om
from pycycle.thermo.cea import species_data
from pycycle.constants import AIR_ELEMENTS
from pycycle.elements.ambient import Ambient
from pycycle.elements.flow_start import FlowStart
if __name__ == "__main__":
p1 = om.Problem()
p1.model = om.Group()
des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp())
des_vars.add_output('W', 0.0, units='lbm/s')
des_vars.add_output('alt', 1., units='ft')
des_vars.add_output('MN', 0.5)
des_vars.add_output('dTs', 0.0, units='degR')
fc = p1.model.add_subsystem("fc", FlightConditions())
p1.model.connect('des_vars.W', 'fc.W')
p1.model.connect('des_vars.alt', 'fc.alt')
p1.model.connect('des_vars.MN', 'fc.MN')
p1.model.connect('des_vars.dTs', 'fc.dTs')
p1.setup()
# p1.root.list_connections()
p1['des_vars.alt'] = 17868.79060515557
p1['des_vars.MN'] = 2.101070288213628
p1['des_vars.dTs'] = 0.0
p1['des_vars.W'] = 1.0
p1.run_model()
print('Ts_atm: ', p1['fc.ambient.Ts'])
print('Ts_set: ', p1['fc.Fl_O:stat:T'])
print('Ps_atm: ', p1['fc.ambient.Ps'])
print('Ps_set: ', p1['fc.Fl_O:stat:P'])
print('rhos_atm: ', p1['fc.ambient.rhos']*32.175)
print('rhos_set: ', p1['fc.Fl_O:stat:rho'])
print('W', p1['fc.Fl_O:stat:W'])
print('Pt: ', p1['fc.Fl_O:tot:P'])
| 38.261261 | 120 | 0.600895 |
86346fa63b7971b7ad956846f8bc8dcc94175283 | 2,679 | py | Python | server/cauth/views.py | mashaka/TravelHelper | 8a216dd13c253e138f241187dee46e6e53281a7b | [
"MIT"
] | null | null | null | server/cauth/views.py | mashaka/TravelHelper | 8a216dd13c253e138f241187dee46e6e53281a7b | [
"MIT"
] | 3 | 2020-02-11T23:38:20.000Z | 2021-06-10T19:10:53.000Z | server/cauth/views.py | mashaka/TravelHelper | 8a216dd13c253e138f241187dee46e6e53281a7b | [
"MIT"
] | 1 | 2018-09-19T11:19:48.000Z | 2018-09-19T11:19:48.000Z | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm
from django.contrib.auth import update_session_auth_hash, login, authenticate
from django.contrib import messages
from django.shortcuts import render, redirect
from social_django.models import UserSocialAuth
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from rest_framework.authtoken.models import Token
from app.methods import prepare_user
| 31.892857 | 99 | 0.709966 |
8634b2f385acdad2561bde76c51b0f6fb67361d8 | 2,806 | py | Python | samples/modules/tensorflow/magic_wand/train/data_split_person.py | lviala-zaack/zephyr | bf3c6e7ba415dd85f1b68eb69ea2779b234c686f | [
"Apache-2.0"
] | 6,224 | 2016-06-24T20:04:19.000Z | 2022-03-31T20:33:45.000Z | samples/modules/tensorflow/magic_wand/train/data_split_person.py | Conexiotechnologies/zephyr | fde24ac1f25d09eb9722ce4edc6e2d3f844b5bce | [
"Apache-2.0"
] | 32,027 | 2017-03-24T00:02:32.000Z | 2022-03-31T23:45:53.000Z | samples/modules/tensorflow/magic_wand/train/data_split_person.py | Conexiotechnologies/zephyr | fde24ac1f25d09eb9722ce4edc6e2d3f844b5bce | [
"Apache-2.0"
] | 4,374 | 2016-08-11T07:28:47.000Z | 2022-03-31T14:44:59.000Z | # Lint as: python3
# coding=utf-8
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split data into train, validation and test dataset according to person.
That is, use some people's data as train, some other people's data as
validation, and the rest ones' data as test. These data would be saved
separately under "/person_split".
It will generate new files with the following structure:
person_split
test
train
valid
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from data_split import read_data
from data_split import write_data
def person_split(whole_data, train_names, valid_names, test_names):
"""Split data by person."""
random.seed(30)
random.shuffle(whole_data)
train_data = []
valid_data = []
test_data = []
for idx, data in enumerate(whole_data): # pylint: disable=unused-variable
if data["name"] in train_names:
train_data.append(data)
elif data["name"] in valid_names:
valid_data.append(data)
elif data["name"] in test_names:
test_data.append(data)
print("train_length:" + str(len(train_data)))
print("valid_length:" + str(len(valid_data)))
print("test_length:" + str(len(test_data)))
return train_data, valid_data, test_data
if __name__ == "__main__":
data = read_data("./data/complete_data")
train_names = [
"hyw", "shiyun", "tangsy", "dengyl", "jiangyh", "xunkai", "negative3",
"negative4", "negative5", "negative6"
]
valid_names = ["lsj", "pengxl", "negative2", "negative7"]
test_names = ["liucx", "zhangxy", "negative1", "negative8"]
train_data, valid_data, test_data = person_split(data, train_names,
valid_names, test_names)
if not os.path.exists("./person_split"):
os.makedirs("./person_split")
write_data(train_data, "./person_split/train")
write_data(valid_data, "./person_split/valid")
write_data(test_data, "./person_split/test")
| 36.921053 | 125 | 0.653956 |
86350332b9c46bb259c547e1b3c963ac7c8f647c | 10,632 | py | Python | tests/k8s_handler.py | josebalius/go-spacemesh | 7ad61dcbe30f361b348e93c97eb3871ab79f1848 | [
"MIT"
] | 586 | 2017-12-27T10:29:30.000Z | 2022-03-21T00:25:54.000Z | tests/k8s_handler.py | josebalius/go-spacemesh | 7ad61dcbe30f361b348e93c97eb3871ab79f1848 | [
"MIT"
] | 2,542 | 2017-12-27T11:23:12.000Z | 2022-03-31T18:40:52.000Z | tests/k8s_handler.py | josebalius/go-spacemesh | 7ad61dcbe30f361b348e93c97eb3871ab79f1848 | [
"MIT"
] | 162 | 2017-12-27T13:37:12.000Z | 2022-03-25T09:15:13.000Z | from datetime import datetime
from kubernetes import client
from kubernetes.client.rest import ApiException
import os
import time
import yaml
from tests import config as conf
import tests.utils as ut
| 44.3 | 133 | 0.630267 |
863721243454a95cc68c80d7a3e2d4352bbe5a24 | 2,718 | py | Python | natlas-agent/config.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-agent/config.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-agent/config.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | import os
from dotenv import load_dotenv
| 36.24 | 118 | 0.756439 |
86375b708be1e1e74cc333322674e530709cceeb | 4,663 | py | Python | rdr2019/mcmc_lc_jla_fit.py | rubind/host_unity | a1908d80a8b6354e4516cccbf2b1a214cbc7daa9 | [
"MIT"
] | null | null | null | rdr2019/mcmc_lc_jla_fit.py | rubind/host_unity | a1908d80a8b6354e4516cccbf2b1a214cbc7daa9 | [
"MIT"
] | 3 | 2021-03-16T17:19:23.000Z | 2021-03-24T17:05:05.000Z | rdr2019/mcmc_lc_jla_fit.py | rubind/host_unity | a1908d80a8b6354e4516cccbf2b1a214cbc7daa9 | [
"MIT"
] | null | null | null | import os
import sys
import click
import pickle
import sncosmo
import numpy as np
from astropy.table import Table
DATA_PATH = '/home/samdixon/jla_light_curves/'
def modify_error(lc, error_floor=0.):
"""Add an error floor of `error_floor` times the maximum flux of the band
to each observation
"""
data = sncosmo.photdata.photometric_data(lc).normalized(zp=25., zpsys='ab')
new_lc = {'time': data.time,
'band': data.band,
'flux': data.flux,
'fluxerr': data.fluxerr,
'zp': data.zp,
'zpsys': data.zpsys}
for band in set(data.band):
band_cut = data.band==band
max_flux_in_band = np.max(data.flux[band_cut])
new_lc['fluxerr'][band_cut] = np.sqrt((error_floor*max_flux_in_band)**2+data.fluxerr[band_cut]**2)
new_lc = Table(new_lc, meta=lc.meta)
return new_lc
if __name__=='__main__':
main()
| 39.516949 | 133 | 0.51855 |
863791c55712e28d3fe1488aacf0c833eaf8ff5c | 11,011 | py | Python | openmdao/core/tests/test_system.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | null | null | null | openmdao/core/tests/test_system.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | null | null | null | openmdao/core/tests/test_system.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | 1 | 2018-07-27T06:39:15.000Z | 2018-07-27T06:39:15.000Z | """ Unit tests for the system interface."""
import unittest
from six import assertRaisesRegex
from six.moves import cStringIO
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, ExecComp
from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp
from openmdao.utils.assert_utils import assert_rel_error, assert_warning
if __name__ == "__main__":
unittest.main()
| 35.066879 | 95 | 0.596767 |
8638749e9332abd43829f80692ff4532468c5620 | 1,244 | py | Python | code/src/db/create_db.py | fabiangunzinger/sample_project | a5c87d0c3ff2f6ed39f3e3a18557c0ab439f6b42 | [
"MIT"
] | null | null | null | code/src/db/create_db.py | fabiangunzinger/sample_project | a5c87d0c3ff2f6ed39f3e3a18557c0ab439f6b42 | [
"MIT"
] | null | null | null | code/src/db/create_db.py | fabiangunzinger/sample_project | a5c87d0c3ff2f6ed39f3e3a18557c0ab439f6b42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import sqlite3
import sys
import pandas as pd
from src import config
def db_tables(connection):
"""List tables in database."""
res = pd.read_sql("select name from sqlite_master", connection)
return res.name.values
def create_database(sample):
"""Create database with tables for targets, outcomes, and predictions."""
db_name = f'{sample}.db'
db_path = os.path.join(config.DATADIR, db_name)
conn = sqlite3.connect(db_path)
usr_name = f'users_{sample}.csv'
usr_path = os.path.join(config.DATADIR, usr_name)
users = pd.read_csv(usr_path)
db_tbls = db_tables(conn)
for tbl in ['decisions', 'outcomes', 'predictions']:
if tbl not in db_tbls:
users.to_sql(tbl, conn, index=False)
conn.execute(f"create index idx_{tbl}_user_id on {tbl}(user_id)")
if __name__ == '__main__':
sys.exit(main())
| 25.387755 | 77 | 0.673633 |
863b23444fda9cb581afbddd6338c59075cfc887 | 1,793 | py | Python | tests/test_responder.py | craigderington/responder-persons-api | d2270d2f761c5dd3dbe253113d410f3e37d4d217 | [
"Apache-2.0"
] | null | null | null | tests/test_responder.py | craigderington/responder-persons-api | d2270d2f761c5dd3dbe253113d410f3e37d4d217 | [
"Apache-2.0"
] | null | null | null | tests/test_responder.py | craigderington/responder-persons-api | d2270d2f761c5dd3dbe253113d410f3e37d4d217 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pytest
import app as service
import yaml
import responder
from starlette.responses import PlainTextResponse
| 19.703297 | 65 | 0.621305 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.