hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cfa416e684eef42a41f05552ac51704b017a9e1 | 1,471 | py | Python | arguments_setting.py | Projectoy/ml_framework | f3d37d632a1aec314eb186a3da6d174a5dc4beee | [
"Apache-2.0"
] | null | null | null | arguments_setting.py | Projectoy/ml_framework | f3d37d632a1aec314eb186a3da6d174a5dc4beee | [
"Apache-2.0"
] | null | null | null | arguments_setting.py | Projectoy/ml_framework | f3d37d632a1aec314eb186a3da6d174a5dc4beee | [
"Apache-2.0"
] | null | null | null | import argparse, os | 36.775 | 119 | 0.680489 |
7cfa745e3890fcda9ffd072f599dc7be286f99a5 | 11,039 | py | Python | fileHandler.py | Omer-Sella/ldpc | 955c0bc32236e171365cbbb88f00574302771610 | [
"MIT"
] | null | null | null | fileHandler.py | Omer-Sella/ldpc | 955c0bc32236e171365cbbb88f00574302771610 | [
"MIT"
] | null | null | null | fileHandler.py | Omer-Sella/ldpc | 955c0bc32236e171365cbbb88f00574302771610 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 12:10:11 2019
@author: Omer
"""
## File handler
## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf
## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it.
## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format.
import numpy as np
from scipy.linalg import circulant
#import matplotlib.pyplot as plt
import scipy.io
import common
import hashlib
import os
projectDir = os.environ.get('LDPC')
if projectDir == None:
import pathlib
projectDir = pathlib.Path(__file__).parent.absolute()
## Omer Sella: added on 01/12/2020, need to make sure this doesn't break anything.
import sys
sys.path.insert(1, projectDir)
FILE_HANDLER_INT_DATA_TYPE = np.int32
GENERAL_CODE_MATRIX_DATA_TYPE = np.int32
NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
#plt.imshow(nearEarthParity)
#nearEarthParity = readMatrixFromFile('/home/oss22/swift/swift/codeMatrices/nearEarthParity.txt', 1022, 8176, 511, True, False, False)
#import networkx as nx
#from networkx.algorithms import bipartite
#B = nx.Graph()
#B.add_nodes_from(range(1022), bipartite=0)
#B.add_nodes_from(range(1022, 7156 + 1022), bipartite=1)
# Add edges only between nodes of opposite node sets
#for i in range(8176):
# for j in range(1022):
# if nearEarthParity[j,i] != 0:
# B.add_edges_from([(j, 7156 + i)])
#X, Y = bipartite.sets(B)
#pos = dict()
#pos.update( (n, (1, i)) for i, n in enumerate(X) )
#pos.update( (n, (2, i)) for i, n in enumerate(Y) )
#nx.draw(B, pos=pos)
#plt.show()
| 38.197232 | 172 | 0.621343 |
7cfaab0b77af0b6c7c138ff09a0a82244c391f57 | 12,133 | py | Python | stage/configuration/test_amazon_s3_origin.py | Sentienz/datacollector-tests | ca27988351dc3366488098b5db6c85a8be2f7b85 | [
"Apache-2.0"
] | null | null | null | stage/configuration/test_amazon_s3_origin.py | Sentienz/datacollector-tests | ca27988351dc3366488098b5db6c85a8be2f7b85 | [
"Apache-2.0"
] | null | null | null | stage/configuration/test_amazon_s3_origin.py | Sentienz/datacollector-tests | ca27988351dc3366488098b5db6c85a8be2f7b85 | [
"Apache-2.0"
] | 1 | 2019-10-29T08:46:11.000Z | 2019-10-29T08:46:11.000Z | import logging
import pytest
from streamsets.testframework.markers import aws, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
S3_SANDBOX_PREFIX = 'sandbox'
LOG_FIELD_MAPPING = [{'fieldPath': '/date', 'group': 1},
{'fieldPath': '/time', 'group': 2},
{'fieldPath': '/timehalf', 'group': 3},
{'fieldPath': '/info', 'group': 4},
{'fieldPath': '/file', 'group': 5},
{'fieldPath': '/message', 'group': 6}]
REGULAR_EXPRESSION = r'(\S+) (\S+) (\S+) (\S+) (\S+) (.*)'
# log to be written int the file on s3
data_format_content = {
'COMMON_LOG_FORMAT': '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] '
'"GET /apache.gif HTTP/1.0" 200 232',
'LOG4J': '200 [main] DEBUG org.StreamSets.Log4j unknown - This is sample log message',
'APACHE_ERROR_LOG_FORMAT': '[Wed Oct 11 14:32:52 2000] [error] [client 127.0.0.1] client '
'denied by server configuration:/export/home/live/ap/htdocs/test',
'COMBINED_LOG_FORMAT': '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache.gif'
' HTTP/1.0" 200 2326 "http://www.example.com/strt.html" "Mozilla/4.08'
' [en] (Win98; I ;Nav)"',
'APACHE_CUSTOM_LOG_FORMAT': '10.185.248.71 - - [09/Jan/2015:9:12:06 +0000] "GET '
'/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300 '
'HTTP/1.1" 500 17 ',
'CEF': '10.217.31.247 CEF:0|Citrix|NetScaler|NS10.0|APPFW|APPFW_STARTURL|6|src=10.217.253.78 '
'spt=53743 method=GET request=http://vpx247.example.net/FFC/login.html msg=Disallow Illegal URL.',
'LEEF': 'LEEF: 2.0|Trend Micro|Deep Security Agent|<DSA version>|4000030|cat=Anti-Malware '
'name=HEU_AEGIS_CRYPT desc=HEU_AEGIS_CRYPT sev=6 cn1=241 msg=Realtime',
'REGEX': '2019-04-30 08:23:53 AM [INFO] [streamsets.sdk.sdc_api] Pipeline Filewriterpipeline53'}
# data to verify the output of amazon s3 origin.
get_data_to_verify_output = {
'LOG4J': {'severity': 'DEBUG', 'relativetime': '200', 'thread': 'main', 'category': 'org.StreamSets.Log4j',
'ndc': 'unknown', 'message': 'This is sample log message'},
'COMMON_LOG_FORMAT': {'request': '/apache.gif', 'auth': 'frank', 'ident': '-', 'response': '200', 'bytes':
'232', 'clientip': '127.0.0.1', 'verb': 'GET', 'httpversion': '1.0', 'rawrequest': None,
'timestamp': '10/Oct/2000:13:55:36 -0700'},
'APACHE_ERROR_LOG_FORMAT': {'message': 'client denied by server configuration:/export/home/live/ap/htdocs/'
'test', 'timestamp': 'Wed Oct 11 14:32:52 2000', 'loglevel': 'error',
'clientip': '127.0.0.1'},
'COMBINED_LOG_FORMAT': {'request': '/apache.gif', 'agent': '"Mozilla/4.08 [en] (Win98; I ;Nav)"', 'auth':
'frank', 'ident': '-', 'verb': 'GET', 'referrer': '"http://www.example.com/strt.'
'html"', 'response': '200', 'bytes': '2326', 'clientip': '127.0.0.1',
'httpversion': '1.0', 'rawrequest': None, 'timestamp': '10/Oct/2000:13:55:36 -0700'},
'APACHE_CUSTOM_LOG_FORMAT': {'remoteUser': '-', 'requestTime': '09/Jan/2015:9:12:06 +0000', 'request': 'GET '
'/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300 HTTP/1.1',
'logName': '-', 'remoteHost': '10.185.248.71', 'bytesSent': '17', 'status': '500'},
'CEF': {'severity': '6', 'product': 'NetScaler', 'extensions': {'msg': 'Disallow Illegal URL.', 'request':
'http://vpx247.example.net/FFC/login.html', 'method': 'GET', 'src': '10.217.253.78', 'spt': '53743'},
'signature': 'APPFW', 'vendor': 'Citrix', 'cefVersion': 0, 'name': 'APPFW_STARTURL',
'version': 'NS10.0'},
'GROK': {'request': '/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300', 'auth': '-',
'ident': '-', 'response': '500', 'bytes': '17', 'clientip': '10.185.248.71', 'verb': 'GET',
'httpversion': '1.1', 'rawrequest': None, 'timestamp': '09/Jan/2015:9:12:06 +0000'},
'LEEF': {'eventId': '4000030', 'product': 'Deep Security Agent', 'extensions': {'cat': 'Realtime'},
'leefVersion': 2.0, 'vendor': 'Trend Micro', 'version': '<DSA version>'},
'REGEX': {'/time': '08:23:53', '/date': '2019-04-30', '/timehalf': 'AM',
'/info': '[INFO]', '/message': 'Pipeline Filewriterpipeline53', '/file': '[streamsets.sdk.sdc_api]'}}
def get_aws_origin_to_trash_pipeline(sdc_builder, attributes, aws):
# Build pipeline.
builder = sdc_builder.get_pipeline_builder()
builder.add_error_stage('Discard')
s3_origin = builder.add_stage('Amazon S3', type='origin')
s3_origin.set_attributes(**attributes)
trash = builder.add_stage('Trash')
pipeline_finisher_executor = builder.add_stage('Pipeline Finisher Executor')
pipeline_finisher_executor.set_attributes(stage_record_preconditions=["${record:eventType() == 'no-more-data'}"])
s3_origin >> trash
s3_origin >= pipeline_finisher_executor
s3_origin_pipeline = builder.build().configure_for_environment(aws)
s3_origin_pipeline.configuration['shouldRetry'] = False
return s3_origin_pipeline
def delete_aws_objects(client, aws, s3_key):
# Clean up S3.
delete_keys = {'Objects': [{'Key': k['Key']}
for k in
client.list_objects_v2(Bucket=aws.s3_bucket_name, Prefix=s3_key)['Contents']]}
client.delete_objects(Bucket=aws.s3_bucket_name, Delete=delete_keys)
def execute_pipeline_and_get_output(sdc_executor, s3_origin, pipeline):
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
output_records = snapshot[s3_origin].output
return output_records
| 46.84556 | 334 | 0.656556 |
7cfb56c23b97ce934940b9509f58841e0ebbb0fe | 3,493 | py | Python | model_building/svr_experiment_configuration.py | eubr-atmosphere/a-MLLibrary | b6ba472baacea6d793ab4f03275cdfa874e83bc3 | [
"Apache-2.0"
] | 3 | 2021-09-19T17:06:31.000Z | 2021-12-10T23:21:21.000Z | model_building/svr_experiment_configuration.py | eubr-atmosphere/a-MLLibrary | b6ba472baacea6d793ab4f03275cdfa874e83bc3 | [
"Apache-2.0"
] | null | null | null | model_building/svr_experiment_configuration.py | eubr-atmosphere/a-MLLibrary | b6ba472baacea6d793ab4f03275cdfa874e83bc3 | [
"Apache-2.0"
] | 1 | 2021-09-27T13:54:12.000Z | 2021-09-27T13:54:12.000Z | """
Copyright 2019 Marco Lattuada
Copyright 2019 Danilo Ardagna
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sklearn.svm as svm
import model_building.experiment_configuration as ec
| 38.811111 | 111 | 0.692242 |
7cfbb1bacec44e19e996fa070cd83150534767b0 | 1,000 | py | Python | src/scs_host/sys/host_gpi.py | south-coast-science/scs_host_rpi | a02afde3fd2e1f2b8c6dc08beef8c74039108a64 | [
"MIT"
] | null | null | null | src/scs_host/sys/host_gpi.py | south-coast-science/scs_host_rpi | a02afde3fd2e1f2b8c6dc08beef8c74039108a64 | [
"MIT"
] | 1 | 2020-07-13T14:54:08.000Z | 2020-11-16T10:11:04.000Z | src/scs_host/sys/host_gpi.py | south-coast-science/scs_host_rpi | a02afde3fd2e1f2b8c6dc08beef8c74039108a64 | [
"MIT"
] | 2 | 2017-11-07T16:59:02.000Z | 2019-09-29T15:39:37.000Z | """
Created on 12 May 2017
@author: Bruno Beloff ([email protected])
"""
from scs_host.sys.host_gpio import HostGPIO
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyUnusedLocal,PyAbstractClass
| 25 | 118 | 0.346 |
7cfcc11fbbb1d31705e442bed5fe7d622b04a2bd | 4,472 | py | Python | benchmark/AMS/HIGGSTES/TP.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | 2 | 2019-03-20T09:05:02.000Z | 2019-03-20T15:23:44.000Z | benchmark/AMS/HIGGSTES/TP.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | null | null | null | benchmark/AMS/HIGGSTES/TP.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.VAR.GG.TP
import os
import logging
from config import SEED
from config import _ERROR
from config import _TRUTH
import numpy as np
import pandas as pd
from visual.misc import set_plot_config
set_plot_config()
from utils.log import set_logger
from utils.log import flush
from utils.log import print_line
from utils.model import get_model
from utils.model import get_optimizer
from utils.model import train_or_load_neural_net
from utils.evaluation import evaluate_summary_computer
from utils.images import gather_images
from visual.misc import plot_params
from problem.higgs import HiggsConfigTesOnly as Config
from problem.higgs import get_generators_torch
from problem.higgs import GeneratorCPU
from problem.higgs import GeneratorTorch
from problem.higgs import HiggsNLL as NLLComputer
from model.tangent_prop import TangentPropClassifier
from archi.classic import L4 as ARCHI
from ...my_argparser import TP_parse_args
from collections import OrderedDict
from .common import measurement
DATA_NAME = 'HIGGSTES'
BENCHMARK_NAME = 'VAR-'+DATA_NAME
N_ITER = 30
# =====================================================================
# MAIN
# =====================================================================
if __name__ == '__main__':
main()
| 30.841379 | 97 | 0.687165 |
7cfd01e468c618706379749c3f05781c60e2fe7b | 1,883 | py | Python | papermill/tests/test_adl.py | dmartinpro/papermill | fbb0a60c97cde70e3b278f778cbd366cf54f83f0 | [
"BSD-3-Clause"
] | null | null | null | papermill/tests/test_adl.py | dmartinpro/papermill | fbb0a60c97cde70e3b278f778cbd366cf54f83f0 | [
"BSD-3-Clause"
] | null | null | null | papermill/tests/test_adl.py | dmartinpro/papermill | fbb0a60c97cde70e3b278f778cbd366cf54f83f0 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from ..adl import ADL
import six
if six.PY3:
from unittest.mock import Mock, MagicMock
else:
from mock import Mock, MagicMock
| 36.211538 | 97 | 0.670738 |
7cfd39821c7ad2ac471f6e189d3999d3560e833a | 259 | py | Python | users/views.py | AnvarKhan/django-python | bd54e44deb290f43ea5982c2ca9f37cd6c946879 | [
"Apache-2.0"
] | 1 | 2022-02-05T15:07:25.000Z | 2022-02-05T15:07:25.000Z | users/views.py | AnvarKhan/django-python | bd54e44deb290f43ea5982c2ca9f37cd6c946879 | [
"Apache-2.0"
] | null | null | null | users/views.py | AnvarKhan/django-python | bd54e44deb290f43ea5982c2ca9f37cd6c946879 | [
"Apache-2.0"
] | null | null | null | from django.views.generic import CreateView
from django.urls import reverse_lazy
from .forms import CustomUserCreationForm
| 28.777778 | 43 | 0.830116 |
7cfe95c6759feee2397de3f952b5fd6bdfa39ca2 | 137 | py | Python | st3/package_util/compat/typing.py | Thom1729/package_util | 3ddec00d8ab4a52f0f5ce3fe8b09247c1518547f | [
"MIT"
] | 18 | 2020-02-20T11:56:43.000Z | 2021-12-30T19:00:50.000Z | st3/package_util/compat/typing.py | Thom1729/package_util | 3ddec00d8ab4a52f0f5ce3fe8b09247c1518547f | [
"MIT"
] | 31 | 2020-02-21T13:38:12.000Z | 2021-12-15T22:18:37.000Z | st3/package_util/compat/typing.py | Thom1729/package_util | 3ddec00d8ab4a52f0f5ce3fe8b09247c1518547f | [
"MIT"
] | 3 | 2020-02-21T09:31:27.000Z | 2021-10-01T20:56:16.000Z | try:
from typing import * # noqa: F401, F403
except ImportError:
from .typing_stubs import * # type: ignore # noqa: F401, F403
| 27.4 | 66 | 0.671533 |
7cfea92f95c14fb4efaa051120fc4e6f1facdf01 | 2,858 | py | Python | stanza/models/common/dropout.py | rasimuvaikas/stanza | 21793519a531b0e9d7151e42d180d97785c9a5b8 | [
"Apache-2.0"
] | 3,633 | 2016-01-21T17:29:13.000Z | 2022-03-31T13:36:47.000Z | stanza/models/common/dropout.py | rasimuvaikas/stanza | 21793519a531b0e9d7151e42d180d97785c9a5b8 | [
"Apache-2.0"
] | 593 | 2016-01-19T07:16:05.000Z | 2022-03-31T20:23:58.000Z | stanza/models/common/dropout.py | rasimuvaikas/stanza | 21793519a531b0e9d7151e42d180d97785c9a5b8 | [
"Apache-2.0"
] | 525 | 2016-01-20T03:22:19.000Z | 2022-03-24T05:51:56.000Z | import torch
import torch.nn as nn
| 37.605263 | 139 | 0.642407 |
7cff5cb6c2fef0ecc0f5ac6be8e4bd36f4fe013c | 365 | py | Python | Day01-15/code/Day15/pdf2.py | bdfd/Python_Zero2Hero_DS | 9dafe90b8112fdc3d07e1aa02e41ed3f019f733c | [
"MIT"
] | 3 | 2022-01-15T19:06:19.000Z | 2022-01-18T16:47:27.000Z | Day01-15/code/Day15/pdf2.py | bdfd/4.5_Data-Science-Python-Zero2Hero- | 9dafe90b8112fdc3d07e1aa02e41ed3f019f733c | [
"MIT"
] | null | null | null | Day01-15/code/Day15/pdf2.py | bdfd/4.5_Data-Science-Python-Zero2Hero- | 9dafe90b8112fdc3d07e1aa02e41ed3f019f733c | [
"MIT"
] | 1 | 2022-01-09T00:18:49.000Z | 2022-01-09T00:18:49.000Z | """
PDF
Version: 0.1
Author: BDFD
Date: 2018-03-26
"""
from PyPDF2 import PdfFileReader
with open('./res/Python.pdf', 'rb') as f:
reader = PdfFileReader(f, strict=False)
print(reader.numPages)
if reader.isEncrypted:
reader.decrypt('')
current_page = reader.getPage(5)
print(current_page)
print(current_page.extractText())
| 19.210526 | 45 | 0.682192 |
7cff626ae151f2363fd9919cb12cd92f5b8974de | 2,335 | py | Python | qt__pyqt__pyside__pyqode/qt__class_tree__parse_and_print__recursively__from__doc_qt_io/gui.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | null | null | null | qt__pyqt__pyside__pyqode/qt__class_tree__parse_and_print__recursively__from__doc_qt_io/gui.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | null | null | null | qt__pyqt__pyside__pyqode/qt__class_tree__parse_and_print__recursively__from__doc_qt_io/gui.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5 import QtWidgets as qtw
from PyQt5.QtTest import QTest
import time
import requests
from bs4 import BeautifulSoup
from console import get_inherited_children, ROOT_URL
if __name__ == '__main__':
app = qtw.QApplication([])
w = MainWindow()
w.resize(500, 500)
w.show()
w.fill_tree()
app.exec()
| 25.107527 | 98 | 0.628266 |
7cffa5673d098c5404a18e4042db11fef2170e1f | 6,540 | py | Python | common/OpTestASM.py | kyle-ibm/op-test | df8dbf8cbff1390668c22632052adb46ebf277c1 | [
"Apache-2.0"
] | null | null | null | common/OpTestASM.py | kyle-ibm/op-test | df8dbf8cbff1390668c22632052adb46ebf277c1 | [
"Apache-2.0"
] | null | null | null | common/OpTestASM.py | kyle-ibm/op-test | df8dbf8cbff1390668c22632052adb46ebf277c1 | [
"Apache-2.0"
] | 1 | 2021-05-25T11:33:18.000Z | 2021-05-25T11:33:18.000Z | #!/usr/bin/env python3
# encoding=utf8
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/common/OpTestASM.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
'''
OpTestASM: Advanced System Management (FSP Web UI)
--------------------------------------------------
This class can contains common functions which are useful for
FSP ASM Web page. Some functionality is only accessible through
the FSP Web UI (such as progress codes), so we scrape it.
'''
import time
import subprocess
import os
import pexpect
import sys
import subprocess
from .OpTestConstants import OpTestConstants as BMC_CONST
from .OpTestError import OpTestError
import http.cookiejar
import urllib.request
import urllib.parse
import urllib.error
import re
import ssl
| 31.902439 | 90 | 0.555657 |
6b00216e5015b612b495eca186f46004bdc92b04 | 1,824 | py | Python | test/test_storage.py | jrabasco/PyPasser | 3cc6ecdfa9b5fe22f5a88c221517fe09d2df9db6 | [
"MIT"
] | null | null | null | test/test_storage.py | jrabasco/PyPasser | 3cc6ecdfa9b5fe22f5a88c221517fe09d2df9db6 | [
"MIT"
] | null | null | null | test/test_storage.py | jrabasco/PyPasser | 3cc6ecdfa9b5fe22f5a88c221517fe09d2df9db6 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.4
__author__ = "Jeremy Rabasco"
import sys
import os
sys.path.append("..")
import unittest
from modules import storage
from modules.service import Service
from modules.database import Database
if __name__ == "__main__":
unittest.main() | 35.076923 | 104 | 0.668311 |
6b00e8ebc8e80cec62f2565854961c322350a073 | 4,676 | py | Python | virt/ansible-latest/lib/python2.7/site-packages/ansible/plugins/lookup/template.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | 1 | 2020-03-22T01:04:39.000Z | 2020-03-22T01:04:39.000Z | ansible/ansible/plugins/lookup/template.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 7 | 2020-09-07T17:27:56.000Z | 2022-03-02T06:25:46.000Z | ansible/ansible/plugins/lookup/template.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 1 | 2020-03-22T01:04:48.000Z | 2020-03-22T01:04:48.000Z | # Copyright: (c) 2012, Michael DeHaan <[email protected]>
# Copyright: (c) 2012-17, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: template
author: Michael DeHaan <[email protected]>
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
description:
- Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
options:
_terms:
description: list of files to template
convert_data:
type: bool
description: whether to convert YAML into data. If False, strings that are YAML will be left untouched.
variable_start_string:
description: The string marking the beginning of a print statement.
default: '{{'
version_added: '2.8'
type: str
variable_end_string:
description: The string marking the end of a print statement.
default: '}}'
version_added: '2.8'
type: str
"""
EXAMPLES = """
- name: show templating results
debug:
msg: "{{ lookup('template', './some_template.j2') }}"
- name: show templating results with different variable start and end string
debug:
msg: "{{ lookup('template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
"""
RETURN = """
_raw:
description: file(s) content after templating
"""
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
from ansible.template import generate_ansible_template_vars
from ansible.utils.display import Display
display = Display()
| 40.66087 | 159 | 0.64136 |
6b01058178b8f414abe46085a609e4696e9cb097 | 1,096 | py | Python | setup.py | ripiuk/fant_sizer | dcc0908c79ed76af3f4189ebd2a75cecf7a89e34 | [
"MIT"
] | null | null | null | setup.py | ripiuk/fant_sizer | dcc0908c79ed76af3f4189ebd2a75cecf7a89e34 | [
"MIT"
] | null | null | null | setup.py | ripiuk/fant_sizer | dcc0908c79ed76af3f4189ebd2a75cecf7a89e34 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name="fant_sizer",
version="0.7",
author="Rypiuk Oleksandr",
author_email="[email protected]",
description="fant_sizer command-line file-information",
url="https://github.com/ripiuk/fant_sizer",
keywords="file command-line information size tool recursively",
license="MIT",
classifiers=[
'Topic :: Utilities',
'Environment :: Console',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.6'
],
packages=find_packages(),
long_description=open(join(dirname(__file__), "README.rst")).read(),
entry_points={
"console_scripts":
['fant_sizer = fant_sizer.fant_sizer:_main'],
},
)
| 36.533333 | 76 | 0.581204 |
6b032c5cf849bb1b6a9241eb068c04ff780d5adc | 1,957 | py | Python | 2018/Round 1A/A.py | elvisyjlin/google-code-jam | 7fe8244c5ae07a9896acf9c48f3a06b306b393b1 | [
"MIT"
] | null | null | null | 2018/Round 1A/A.py | elvisyjlin/google-code-jam | 7fe8244c5ae07a9896acf9c48f3a06b306b393b1 | [
"MIT"
] | null | null | null | 2018/Round 1A/A.py | elvisyjlin/google-code-jam | 7fe8244c5ae07a9896acf9c48f3a06b306b393b1 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
T = int(input())
for t in range(T):
print('Case #{}: {}'.format(t+1, solve()))
| 27.56338 | 50 | 0.444558 |
6b041831b70999f5552fbde4cf4fd10965b426d5 | 9,798 | py | Python | desktop/libs/liboozie/src/liboozie/submittion_tests.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | 1 | 2018-08-01T05:10:26.000Z | 2018-08-01T05:10:26.000Z | desktop/libs/liboozie/src/liboozie/submittion_tests.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | desktop/libs/liboozie/src/liboozie/submittion_tests.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | 1 | 2019-07-23T12:36:09.000Z | 2019-07-23T12:36:09.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.models import User
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_not_equal
from hadoop import cluster, pseudo_hdfs4
from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
from liboozie.submittion import Submission
from oozie.tests import OozieMockBase
from desktop.lib.test_utils import clear_sys_caches
from desktop.lib.django_test_util import make_logged_in_client
LOG = logging.getLogger(__name__)
| 35.11828 | 106 | 0.677689 |
6b04b9ebe40e4d32dbf9b4d850ad1eefd373d8ea | 12,721 | py | Python | Training/train_baseHD.py | Wenyuan-Vincent-Li/SSL_Seg_GAN | 8f6c45fd000ea12468dccf211b376fadbf4759c6 | [
"Apache-2.0"
] | 1 | 2022-03-09T11:51:22.000Z | 2022-03-09T11:51:22.000Z | Training/train_baseHD.py | Wenyuan-Vincent-Li/SSL_Seg_GAN | 8f6c45fd000ea12468dccf211b376fadbf4759c6 | [
"Apache-2.0"
] | null | null | null | Training/train_baseHD.py | Wenyuan-Vincent-Li/SSL_Seg_GAN | 8f6c45fd000ea12468dccf211b376fadbf4759c6 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from Training import functions
from Training.imresize import imresize
import matplotlib.pyplot as plt
from Models.pix2pixHD_base import GANLoss, VGGLoss
from Models.pix2pixHD2 import mask2onehot
def train_single_scale(dataloader, netD, netG, netS, reals, Gs, Ss, in_s, in_s_S, NoiseAmp, NoiseAmpS, opt):
'''
:param netD: currD
:param netG: currG
:param netS: currS
:param reals: a list of image pyramid ## TODO: you can just pass image shape here
:param Gs: list of prev netG
:param Ss: list of prev netS
:param in_s: 0-> all zero [1, 3, 26, 26]
:param NoiseAmp: [] -> [1]
:param opt: config
:return:
'''
loss = Losses(opt)
real = reals[opt.scale_num] # find the current level image xn
opt.nzx = real[0]
opt.nzy = real[1]
# z_opt = 0 ## dummy z_opt
alpha = opt.alpha
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
optimizerS = optim.Adam(netS.parameters(), lr=opt.lr_s, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerS = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerS, milestones=[opt.niter * 0.8],
gamma=opt.gamma)
errD2plot = []
errG2plot = []
D_real2plot = []
D_fake2plot = []
for epoch in range(opt.niter): # niter = 2000
if Gs == [] and Ss == []:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize) # [None, 1, 32, 32]
noise_ = noise_.expand(opt.batchSize, 3, opt.nzx, opt.nzy)
## Noise_: for generated false samples through generator
else:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize)
for j, data in enumerate(dataloader):
data['image'] = data['image'].to(opt.device)
data['label'] = data['label'].long().to(opt.device)
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
# train with real
netD.zero_grad()
pred_real = netD(data['image'], data['label'][:,0:1,...])
loss_D_real = loss.criterionGAN(pred_real, True)
D_x = loss_D_real.item()
# train with fake
if (j == 0) & (epoch == 0): # first iteration training in this level
if Gs == [] and Ss == []:
prev = torch.full([opt.batchSize, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device)
in_s = prev # full of 0 [None, 3, 32, 32]
prev_S = torch.full([opt.batchSize, opt.label_nc, opt.nzx, opt.nzy], 0, device=opt.device)
in_s_S = prev_S # full of 0 [None, 4, 32, 32]
mask = data['label'][:,0:1,...]
opt.noise_amp = opt.noise_amp_init
opt.noise_amp_S = opt.noise_amp_init
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
## given a new noise, prev is a image generated by previous Generator with bilinear upsampling [1, 3, 33, 33]
criterion = nn.MSELoss()
RMSE = torch.sqrt(criterion(data['image'], prev))
opt.noise_amp = opt.noise_amp_init * RMSE
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt) ## prob with [None, 4, 32, 32]
onehot_label = mask2onehot(data['label'][:,0:1,...], opt.label_nc)
RMSE_S = torch.sqrt(criterion(onehot_label, prev_S))
# RMSE_S = 0
opt.noise_amp_S = opt.noise_amp_init * RMSE_S
mask = data['label'][:,0:1,...]
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt)
mask = data['label'][:,0:1,...]
if Gs == []:
noise = noise_ ## Gausiaan noise for generating image [None, 3, 42, 42]
else:
noise = opt.noise_amp * noise_ + prev ## [None, 3, 43, 43] new noise is equal to the prev generated image plus the gaussian noise.
fake = netG(noise.detach(), prev, mask) # [None, 3, 32, 32] the same size with the input image
# detach() make sure that the gradients don't go to the noise.
# prev:[None, 3, 42, 42] -> [None, 3, 43, 43] first step prev = 0, second step prev = a image generated by previous Generator with bilinaer upsampling
pred_fake = netD(fake.detach(), data['label'][:,0:1,...]) # output shape [1, 1, 16, 16] -> [1, 1, 23, 23]
# print(len(pred_fake), len(pred_fake[0]))
loss_D_fake = loss.criterionGAN(pred_fake, False)
D_G_z = loss_D_fake.item()
# segment_logit, segment_mask = netS(data['image'], mask2onehot(prev_S, opt.label_nc))
# print(data['image'].shape, onehot.shape)
# print(epoch, j)
segment_logit, segment_prob, segment_mask = netS(data['image'], prev_S.detach())
pred_fake_S = netD(data['image'], segment_prob.detach())
loss_D_fake_S = loss.criterionGAN(pred_fake_S, False)
D_S_z = loss_D_fake_S.item()
errD = (loss_D_real + 0.5 * loss_D_fake + 0.5 * loss_D_fake_S) ## Todo: figure out a proper coefficient
errD.backward()
optimizerD.step()
errD2plot.append(errD.detach()) ## errD for each iteration
############################
# (2) Update G network: maximize D(G(z))
###########################
netG.zero_grad()
pred_fake = netD(fake, data['label'][:,0:1,...])
loss_G_GAN = 0.5 * loss.criterionGAN(pred_fake, True)
# GAN feature matching loss
loss_G_GAN_Feat = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake[i]) - 1):
loss_G_GAN_Feat += D_weights * feat_weights * \
loss.criterionFeat(pred_fake[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
# VGG feature matching loss
loss_G_VGG = 0
if not opt.no_vgg_loss:
loss_G_VGG = loss.criterionVGG(fake, data['image']) * opt.lambda_feat
## reconstruction loss
if alpha != 0: ## alpha = 10 calculate the reconstruction loss
Recloss = nn.MSELoss()
rec_loss = alpha * Recloss(fake, data['image'])
else:
rec_loss = 0
errG = loss_G_GAN + loss_G_GAN_Feat + loss_G_VGG + rec_loss
errG.backward()
optimizerG.step()
############################
# (3) Update S network: maximize D(S(z))
###########################
netS.zero_grad()
pred_fake_S = netD(data['image'], segment_prob)
loss_G_GAN_S = 0.03 * loss.criterionGAN(pred_fake_S, True)
# Segmentation loss
if opt.contour:
loss_G_Seg = loss.crossEntropy(segment_logit, data['label'].float())
else:
loss_G_Seg = loss.crossEntropy(segment_prob, torch.squeeze(data['label'][:,0:1,...], dim =1))
# GAN feature matching loss
loss_G_GAN_Feat_S = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake_S[i]) - 1):
loss_G_GAN_Feat_S += D_weights * feat_weights * \
loss.criterionFeat(pred_fake_S[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
errS = loss_G_GAN_S + loss_G_GAN_Feat_S + loss_G_Seg
errS.backward()
optimizerS.step()
## for every epoch, do the following:
errG2plot.append(errG.detach()) ## ErrG for each iteration
D_real2plot.append(D_x) ## discriminator loss on real
D_fake2plot.append(D_G_z + D_S_z) ## discriminator loss on fake
if epoch % 25 == 0 or epoch == (opt.niter - 1):
print('scale %d:[%d/%d]' % (opt.scale_num, epoch, opt.niter))
if epoch % 25 == 0 or epoch == (opt.niter - 1):
plt.imsave('%s/fake_sample_%d.png' % (opt.outf, epoch),
functions.convert_image_np(fake.detach()), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_real_%d.png' % (opt.outf, epoch),
functions.convert_image_np(data['image']), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(data['label'][:,0:1,...], num_classes= opt.label_nc))
plt.imsave('%s/segmentation_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(segment_mask.detach(), num_classes=opt.label_nc))
schedulerD.step()
schedulerG.step()
schedulerS.step()
functions.save_networks(netG, netD, netS, opt) ## save netG, netD, z_opt, opt is used to parser output path
return in_s, in_s_S, netG, netS
def draw_concat(Gs, masks, reals, NoiseAmp, in_s, mode, opt):
'''
:param Gs: [G0]
:param mask: [down scaled _mask]
:param reals: [image pyramid] only used to represent the image shape
:param NoiseAmp: [1]
:param in_s: all zeros [1, 3, 26, 26]
:param mode: 'rand'
:param opt:
:return:
'''
G_z = in_s[:opt.batchSize, :, :, :] # [None, 3, 26, 26] all zeros, image input for the corest level
if len(Gs) > 0:
if mode == 'generator':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
if count == 0:
z = functions.generate_noise([1, real_curr[0], real_curr[1]],
opt.batchSize)
z = z.expand(opt.batchSize, G_z.shape[1], z.shape[2], z.shape[3])
else:
z = functions.generate_noise(
[opt.nc_z, real_curr[0], real_curr[1]], opt.batchSize)
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
z_in = noise_amp * z + G_z
G_z = G(z_in.detach(), G_z, mask) ## [1, 3, 26, 26] output of previous generator
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
elif mode == 'segment':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
_, G_z, _ = G(mask, G_z) ## [1, 3, 26, 26] output of previous generator
if opt.contour:
G_z = torch.cat((G_z, 1-G_z), 1)
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
return G_z
| 48.003774 | 162 | 0.537929 |
6b04db30f6d56200725a9e9d3be9cbc67d645d65 | 2,074 | py | Python | tests/python/unittest/test_tir_pass_inject_double_buffer.py | 0xreza/tvm | f08d5d78ee000b2c113ac451f8d73817960eafd5 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tests/python/unittest/test_tir_pass_inject_double_buffer.py | 0xreza/tvm | f08d5d78ee000b2c113ac451f8d73817960eafd5 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2020-07-29T00:21:19.000Z | 2020-07-29T00:21:19.000Z | tests/python/unittest/test_tir_pass_inject_double_buffer.py | 0xreza/tvm | f08d5d78ee000b2c113ac451f8d73817960eafd5 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2021-07-22T17:33:16.000Z | 2021-07-22T17:33:16.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
if __name__ == "__main__":
test_double_buffer()
| 36.385965 | 74 | 0.655738 |
6b052c373e2583931e7668595c831adfd5fed432 | 491 | py | Python | read_sensor.py | shivupoojar/openfaas-pi | 5eda501368a1ac321954cb2aaf58be617977bd58 | [
"Apache-2.0"
] | 1 | 2020-11-24T03:31:26.000Z | 2020-11-24T03:31:26.000Z | read_sensor.py | shivupoojar/openfaas-pi | 5eda501368a1ac321954cb2aaf58be617977bd58 | [
"Apache-2.0"
] | null | null | null | read_sensor.py | shivupoojar/openfaas-pi | 5eda501368a1ac321954cb2aaf58be617977bd58 | [
"Apache-2.0"
] | null | null | null | import requests
from sense_hat import SenseHat
import smbus
import time
while True:
try:
pressure=0
sense = SenseHat()
pressure = sense.get_pressure()
data = {'pressure':pressure}
print(pressure)
#send http request to sense serverless function with pressure
#data
r=requests.post('http://127.0.0.1:8080/function/sensor',data)
print(r.text)
sense=SenseHat()
sense.show_message(r.text)
except KeyboardInterrupt:
sys.exit()
| 21.347826 | 69 | 0.672098 |
6b0543a7aff4c6ab6b022a2d8e6d154ed4873777 | 1,528 | py | Python | trabantsim/prototypes/space_invaders.py | highfestiva/life | b05b592502d72980ab55e13e84330b74a966f377 | [
"BSD-3-Clause"
] | 9 | 2019-09-03T18:33:31.000Z | 2022-02-04T04:00:02.000Z | trabantsim/prototypes/space_invaders.py | highfestiva/life | b05b592502d72980ab55e13e84330b74a966f377 | [
"BSD-3-Clause"
] | null | null | null | trabantsim/prototypes/space_invaders.py | highfestiva/life | b05b592502d72980ab55e13e84330b74a966f377 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Space Invadersishkebab.
from trabant import *
# ASCII geometries.
shipascii = r'''
/\
/XXXXXXXX\
v v
'''
invader = r'''
/XXXXXX\
/XXXXXXXX\
XXXXXXXXXX
XX XX XX
\XXXXXXXX/
/XX XX\
/X/ \/ \X\
X/ \X
'''
cam(distance=250)
gravity((0,0,0))
ship = create_ascii_object(shipascii, pos=(0,0,-100), col='#070')
shots = []
invaderspeeds,isi = [(25,0,0), (0,0,-10), (-25,0,0), (0,0,-10)],0
invaders = set()
for y in range(2):
for x in range(8):
invaders.add(create_ascii_object(invader, pos=(x*25-130,0,100-y*20), col=rndvec().abs(), physmesh=True))
for invader in invaders:
invader.vel(invaderspeeds[0])
while loop():
# Steering.
vel = keydir()*50 + tapdir(ship.pos())*4
ship.vel((vel.x,0,0)) # Only move in X.
# Shooting.
is_tap_close = taps() and tapdir(ship.pos()).x < 3
is_shooting = 'Space' in keys() or 'LCtrl' in keys() or is_tap_close
if is_shooting and timeout(0.7, first_hit=True):
shots += [create_sphere(ship.pos()+vec3(0,0,10), vel=(0,0,200), col='#fff')]
sound(sound_bang, shots[-1].pos())
# Run invaders.
if timeout(3, timer='invaders'):
isi = (isi+1)%len(invaderspeeds)
[i.vel(invaderspeeds[isi]) for i in invaders]
# Check collisions, make explosions.
for o in collided_objects():
if o in invaders:
invaders.remove(o)
explode(o.pos(),o.vel(),5)
elif o == ship:
while loop():
pass
o.release()
| 24.645161 | 112 | 0.581806 |
6b05df704fde4ca413cc3974d404975347c287a5 | 11,550 | py | Python | model/backbone/xception.py | Shang-XH/BAFTT | 62392325342f48b8a89f0c2bf71e48026dd90629 | [
"MIT"
] | 4 | 2021-09-07T03:29:38.000Z | 2021-09-07T04:24:31.000Z | model/backbone/xception.py | Shang-XH/BAFTT | 62392325342f48b8a89f0c2bf71e48026dd90629 | [
"MIT"
] | null | null | null | model/backbone/xception.py | Shang-XH/BAFTT | 62392325342f48b8a89f0c2bf71e48026dd90629 | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from model.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
if __name__ == "__main__":
import torch
model = AlignedXception(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size()) | 40.104167 | 116 | 0.583117 |
6b096c429c0f219b1a8f9aeb011545c4774f439d | 1,430 | py | Python | Backend/autonomus/utils/mail.py | IrinaMBejan/Autonom | 4a97da1b26ed22e3ec8bb939359148765392b692 | [
"MIT"
] | 2 | 2019-03-08T10:04:35.000Z | 2020-03-14T15:24:56.000Z | Backend/autonomus/utils/mail.py | IrinaMBejan/Autonom | 4a97da1b26ed22e3ec8bb939359148765392b692 | [
"MIT"
] | null | null | null | Backend/autonomus/utils/mail.py | IrinaMBejan/Autonom | 4a97da1b26ed22e3ec8bb939359148765392b692 | [
"MIT"
] | 2 | 2019-03-16T14:47:36.000Z | 2020-04-28T14:09:45.000Z | from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, Substitution
API_KEY = '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'
API_KEY_ID = 'egd1yywWRbeVF2gcGhTH2Q'
ENCODING = "utf-8"
DEFAULT_MAIL="[email protected]"
| 29.791667 | 89 | 0.68951 |
6b09dfca59db461ba56fcce8bea683cfe5b5f132 | 22,696 | py | Python | yellowbrick/features/pca.py | percygautam/yellowbrick | 1ba6774a257bc85768a990293790caf4c14a5653 | [
"Apache-2.0"
] | 1 | 2020-04-30T08:50:11.000Z | 2020-04-30T08:50:11.000Z | yellowbrick/features/pca.py | percygautam/yellowbrick | 1ba6774a257bc85768a990293790caf4c14a5653 | [
"Apache-2.0"
] | null | null | null | yellowbrick/features/pca.py | percygautam/yellowbrick | 1ba6774a257bc85768a990293790caf4c14a5653 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# yellowbrick.features.pca
# Decomposition based feature visualization with PCA.
#
# Author: Carlo Morales
# Author: Ral Peralta Lozada
# Author: Benjamin Bengfort
# Created: Tue May 23 18:34:27 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: pca.py [] [email protected] $
"""
Decomposition based feature visualization with PCA.
"""
##########################################################################
## Imports
##########################################################################
# NOTE: must import mplot3d to load the 3D projection
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from yellowbrick.style import palettes
from yellowbrick.features.projection import ProjectionVisualizer
from yellowbrick.exceptions import YellowbrickValueError, NotFitted
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA as PCATransformer
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import NotFittedError
##########################################################################
# 2D and 3D PCA Visualizer
##########################################################################
def fit(self, X, y=None, **kwargs):
"""
Fits the PCA transformer, transforms the data in X, then draws the
decomposition in either 2D or 3D space as a scatter plot.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
self : visualizer
Returns self for use in Pipelines.
"""
# Call super fit to compute features, classes, colors, etc.
super(PCA, self).fit(X=X, y=y, **kwargs)
self.pca_transformer.fit(X)
self.pca_components_ = self.pca_transformer.named_steps["pca"].components_
return self
def transform(self, X, y=None, **kwargs):
"""
Calls the internal `transform` method of the scikit-learn PCA transformer, which
performs a dimensionality reduction on the input features ``X``. Next calls the
``draw`` method of the Yellowbrick visualizer, finally returning a new array of
transformed features of shape ``(len(X), projection)``.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
Xp : ndarray or DataFrame of shape n x m
Returns a new array-like object of transformed features of shape
``(len(X), projection)``.
"""
try:
Xp = self.pca_transformer.transform(X)
self.draw(Xp, y)
return Xp
except NotFittedError:
raise NotFitted.from_estimator(self, "transform")
def draw(self, Xp, y):
"""
Plots a scatterplot of points that represented the decomposition,
`pca_features_`, of the original features, `X`, projected into either 2 or
3 dimensions.
If 2 dimensions are selected, a colorbar and heatmap can also be optionally
included to show the magnitude of each feature value to the component.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
# Call to super draw which draws the scatter plot.
super(PCA, self).draw(Xp, y)
if self.proj_features:
# Draws projection features in transformed space.
self._draw_projection_features(Xp, y)
if self.projection == 2:
if self.heatmap:
if not self.colormap:
self.colormap = palettes.DEFAULT_SEQUENCE
# TODO: change to pcolormesh instead of imshow per #615 spec
im = self.lax.imshow(
self.pca_components_,
interpolation="none",
cmap=self.colormap,
aspect="auto",
)
plt.colorbar(
im,
cax=self.uax,
orientation="horizontal",
ticks=[self.pca_components_.min(), 0, self.pca_components_.max()],
)
return self.ax
def _draw_projection_features(self, Xp, y):
"""
Draw the projection of features in the transformed space.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
x_vector = self.pca_components_[0]
y_vector = self.pca_components_[1]
max_x = max(Xp[:, 0])
max_y = max(Xp[:, 1])
if self.projection == 2:
for i in range(self.pca_components_.shape[1]):
self.ax.arrow(
x=0,
y=0,
dx=x_vector[i] * max_x,
dy=y_vector[i] * max_y,
color="r",
head_width=0.05,
width=0.005,
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
self.features_[i],
color="r",
)
elif self.projection == 3:
z_vector = self.pca_components_[2]
max_z = max(Xp[:, 1])
for i in range(self.pca_components_.shape[1]):
self.ax.plot(
[0, x_vector[i] * max_x],
[0, y_vector[i] * max_y],
[0, z_vector[i] * max_z],
color="r",
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
z_vector[i] * max_z * 1.05,
self.features_[i],
color="r",
)
else:
raise YellowbrickValueError("Projection dimensions must be either 2 or 3")
return self.ax
def finalize(self, **kwargs):
"""
Draws the title, labels, legends, heatmap, and colorbar as specified by the
keyword arguments.
"""
super(PCA, self).finalize()
self.ax.set_title("Principal Component Plot")
self.ax.set_xlabel("$PC_1$")
self.ax.set_ylabel("$PC_2$")
if self.projection == 3:
self.ax.set_zlabel("$PC_3$")
if self.heatmap == True:
self.lax.set_xticks(np.arange(-0.5, len(self.features_)))
self.lax.set_xticklabels([])
# Makes the labels centered.
self.lax.set_xticks(np.arange(0, len(self.features_)), minor=True)
self.lax.set_xticklabels(
self.features_, rotation=90, fontsize=12, minor=True
)
self.lax.set_yticks(np.arange(0.5, 2))
self.lax.set_yticklabels(["$PC_1$", "$PC_2$"], va="bottom", fontsize=10)
self.fig.tight_layout()
##########################################################################
## Quick Method
##########################################################################
def pca_decomposition(
X,
y=None,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
show=True,
**kwargs
):
"""
Produce a two or three dimensional principal component plot of the data array ``X``
projected onto its largest sequential principal components. It is common practice
to scale the data array ``X`` before applying a PC decomposition. Variable scaling
can be controlled using the ``scale`` argument.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
Examples
--------
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data
>>> y = iris.target
>>> pca_decomposition(X, y, colors=['r', 'g', 'b'], projection=3)
"""
# Instantiate the visualizer
visualizer = PCA(
ax=ax,
features=features,
scale=scale,
projection=projection,
proj_features=proj_features,
colors=colors,
colormap=colormap,
alpha=alpha,
random_state=random_state,
colorbar=colorbar,
heatmap=heatmap,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
visualizer.transform(X, y)
if show:
visualizer.show()
else:
visualizer.finalize()
# Returns the visualizer object.
return visualizer
# Alias for PCA
PCADecomposition = PCA
| 36.547504 | 88 | 0.606847 |
6b09e9510878cd225e1975781111b1e1feae8734 | 1,229 | py | Python | k2/python/host/k2host/properties.py | Jarvan-Wang/k2 | 7f164ecb804d15006fd30e8564d80e0fa212f011 | [
"Apache-2.0"
] | 144 | 2020-04-17T10:10:57.000Z | 2022-03-25T19:07:54.000Z | k2/python/host/k2host/properties.py | Jarvan-Wang/k2 | 7f164ecb804d15006fd30e8564d80e0fa212f011 | [
"Apache-2.0"
] | 136 | 2020-04-22T10:35:10.000Z | 2021-08-16T13:49:29.000Z | k2/python/host/k2host/properties.py | Jarvan-Wang/k2 | 7f164ecb804d15006fd30e8564d80e0fa212f011 | [
"Apache-2.0"
] | 26 | 2020-04-21T08:23:06.000Z | 2021-09-02T15:23:53.000Z | # Copyright (c) 2020 Xiaomi Corporation (author: Haowen Qiu)
# See ../../../LICENSE for clarification regarding multiple authors
import torch
from torch.utils.dlpack import to_dlpack
from .fsa import Fsa
from _k2host import _is_valid
from _k2host import _is_top_sorted
from _k2host import _is_arc_sorted
from _k2host import _has_self_loops
from _k2host import _is_acyclic
from _k2host import _is_deterministic
from _k2host import _is_epsilon_free
from _k2host import _is_connected
from _k2host import _is_empty
| 22.759259 | 67 | 0.753458 |
6b0a2521796cb92f0d1e011306fd05dc969275cf | 355 | py | Python | origamibot/core/teletypes/poll_option.py | cmd410/OrigamiBot | 03667d069f0c0b088671936ce36bf8f85a029b93 | [
"MIT"
] | 4 | 2020-06-30T10:32:54.000Z | 2020-11-01T23:07:58.000Z | origamibot/core/teletypes/poll_option.py | cmd410/OrigamiBot | 03667d069f0c0b088671936ce36bf8f85a029b93 | [
"MIT"
] | 6 | 2020-06-26T23:14:59.000Z | 2020-07-26T11:48:07.000Z | origamibot/core/teletypes/poll_option.py | cmd410/OrigamiBot | 03667d069f0c0b088671936ce36bf8f85a029b93 | [
"MIT"
] | 1 | 2020-07-28T08:52:51.000Z | 2020-07-28T08:52:51.000Z | from .base import TelegramStructure, Field
| 19.722222 | 42 | 0.515493 |
6b0a3fda038a685ade7b25955f97976cdafc44a7 | 787 | py | Python | var/spack/repos/builtin/packages/r-viridislite/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/r-viridislite/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/r-viridislite/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 39.35 | 95 | 0.7446 |
6b0a89ea28d57009a70965dacb867faddce3f86e | 28,086 | py | Python | shiSock-0.2.0/test_two/PySock/server.py | AnanyaRamanA/shiSock | 51efb0eba17eb106b9480598d278536ddd7732c3 | [
"MIT"
] | null | null | null | shiSock-0.2.0/test_two/PySock/server.py | AnanyaRamanA/shiSock | 51efb0eba17eb106b9480598d278536ddd7732c3 | [
"MIT"
] | null | null | null | shiSock-0.2.0/test_two/PySock/server.py | AnanyaRamanA/shiSock | 51efb0eba17eb106b9480598d278536ddd7732c3 | [
"MIT"
] | 1 | 2021-10-31T13:47:42.000Z | 2021-10-31T13:47:42.000Z | from re import S
import select
import socket
import queue
import threading
import sys
import pickle
import base64
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives.serialization import load_ssh_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
import hashlib
import yaml
import random
import time
| 38.73931 | 170 | 0.44054 |
6b0b0bbc4a2a5899aadcf7804e822911158b0d28 | 9,304 | py | Python | server/www/packages/packages-windows/x86/ldap3/utils/asn1.py | zhoulhb/teleport | 54da194697898ef77537cfe7032d774555dc1335 | [
"Apache-2.0"
] | 640 | 2018-09-12T03:14:13.000Z | 2022-03-30T04:38:09.000Z | server/www/packages/packages-windows/x86/ldap3/utils/asn1.py | zhoulhb/teleport | 54da194697898ef77537cfe7032d774555dc1335 | [
"Apache-2.0"
] | 175 | 2018-09-10T19:52:20.000Z | 2022-03-30T04:37:30.000Z | server/www/packages/packages-windows/x86/ldap3/utils/asn1.py | zhoulhb/teleport | 54da194697898ef77537cfe7032d774555dc1335 | [
"Apache-2.0"
] | 230 | 2018-09-13T02:40:49.000Z | 2022-03-29T11:53:58.000Z | """
"""
# Created on 2015.08.19
#
# Author: Giovanni Cannata
#
# Copyright 2015 - 2018 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from pyasn1 import __version__ as pyasn1_version
from pyasn1.codec.ber import decoder # for usage in other modules
from pyasn1.codec.ber.encoder import Encoder # for monkeypatching of boolean value
from ..core.results import RESULT_CODES
from ..utils.conv import to_unicode
from ..protocol.convert import referrals_to_list
CLASSES = {(False, False): 0, # Universal
(False, True): 1, # Application
(True, False): 2, # Context
(True, True): 3} # Private
# Monkeypatching of pyasn1 for encoding Boolean with the value 0xFF for TRUE
# THIS IS NOT PART OF THE FAST BER DECODER
if pyasn1_version == 'xxx0.2.3':
from pyasn1.codec.ber.encoder import tagMap, BooleanEncoder, encode
from pyasn1.type.univ import Boolean
from pyasn1.compat.octets import ints2octs
tagMap[Boolean.tagSet] = BooleanCEREncoder()
else:
from pyasn1.codec.ber.encoder import tagMap, typeMap, AbstractItemEncoder
from pyasn1.type.univ import Boolean
from copy import deepcopy
customTagMap = deepcopy(tagMap)
customTypeMap = deepcopy(typeMap)
customTagMap[Boolean.tagSet] = LDAPBooleanEncoder()
customTypeMap[Boolean.typeId] = LDAPBooleanEncoder()
encode = Encoder(customTagMap, customTypeMap)
# end of monkey patching
# a fast BER decoder for LDAP responses only
def compute_ber_size(data):
"""
Compute size according to BER definite length rules
Returns size of value and value offset
"""
if data[1] <= 127: # BER definite length - short form. Highest bit of byte 1 is 0, message length is in the last 7 bits - Value can be up to 127 bytes long
return data[1], 2
else: # BER definite length - long form. Highest bit of byte 1 is 1, last 7 bits counts the number of following octets containing the value length
bytes_length = data[1] - 128
value_length = 0
cont = bytes_length
for byte in data[2: 2 + bytes_length]:
cont -= 1
value_length += byte * (256 ** cont)
return value_length, bytes_length + 2
######
if str is not bytes: # Python 3
else: # Python 2
DECODERS = {
# Universal
(0, 1): decode_boolean, # Boolean
(0, 2): decode_integer, # Integer
(0, 4): decode_octet_string, # Octet String
(0, 10): decode_integer, # Enumerated
(0, 16): decode_sequence, # Sequence
(0, 17): decode_sequence, # Set
# Application
(1, 1): decode_bind_response, # Bind response
(1, 4): decode_sequence, # Search result entry
(1, 5): decode_sequence, # Search result done
(1, 7): decode_sequence, # Modify response
(1, 9): decode_sequence, # Add response
(1, 11): decode_sequence, # Delete response
(1, 13): decode_sequence, # ModifyDN response
(1, 15): decode_sequence, # Compare response
(1, 19): decode_sequence, # Search result reference
(1, 24): decode_extended_response, # Extended response
(1, 25): decode_intermediate_response, # intermediate response
(2, 3): decode_octet_string #
}
BIND_RESPONSE_CONTEXT = {
7: decode_octet_string # SaslCredentials
}
EXTENDED_RESPONSE_CONTEXT = {
10: decode_octet_string, # ResponseName
11: decode_octet_string # Response Value
}
INTERMEDIATE_RESPONSE_CONTEXT = {
0: decode_octet_string, # IntermediateResponseName
1: decode_octet_string # IntermediateResponseValue
}
LDAP_MESSAGE_CONTEXT = {
0: decode_controls, # Controls
3: decode_sequence # Referral
}
CONTROLS_CONTEXT = {
0: decode_sequence # Control
}
| 37.821138 | 161 | 0.661651 |
6b0c63a3de849494bdcf25b7c5c83e9a868cfc9f | 2,351 | py | Python | lib/utils/arg_scope.py | SimeonZhang/detectron2_tensorflow | ca03f633111d540ea91b3de75dbfa1da813647be | [
"Apache-2.0"
] | 3 | 2021-06-07T10:48:51.000Z | 2022-03-01T11:43:40.000Z | lib/utils/arg_scope.py | SimeonZhang/detectron2_tensorflow | ca03f633111d540ea91b3de75dbfa1da813647be | [
"Apache-2.0"
] | null | null | null | lib/utils/arg_scope.py | SimeonZhang/detectron2_tensorflow | ca03f633111d540ea91b3de75dbfa1da813647be | [
"Apache-2.0"
] | null | null | null | import copy
from contextlib import contextmanager
from functools import wraps
from collections import defaultdict
import tensorflow as tf
_ArgScopeStack = []
def get_arg_scope():
"""
Returns:
dict: the current argscope.
An argscope is a dict of dict: ``dict[layername] = {arg: val}``
"""
if len(_ArgScopeStack) > 0:
return _ArgScopeStack[-1]
else:
return defaultdict(dict)
def add_arg_scope(cls):
"""Decorator for function to support argscope
Example:
.. code-block:: python
from mylib import MyClass
myfunc = add_arg_scope(MyClass)
Args:
func: A function mapping one or multiple tensors to one or multiple
tensors.
Remarks:
If the function ``func`` returns multiple input or output tensors,
only the first input/output tensor shape is displayed during logging.
Returns:
The decorated function.
"""
original_init = cls.__init__
cls.__arg_scope_enabled__ = True
cls.__init__ = wrapped_init
return cls
| 26.41573 | 93 | 0.64228 |
6b0cebe762170956488a4d3cddc7f97ae057f2da | 754 | py | Python | CORN-TEST/textfsm_parse.py | AnkitDeshwal89/NETMIKO | 81c164e9cff46d11b56612f6adc343b6bcdfe87a | [
"Apache-2.0"
] | null | null | null | CORN-TEST/textfsm_parse.py | AnkitDeshwal89/NETMIKO | 81c164e9cff46d11b56612f6adc343b6bcdfe87a | [
"Apache-2.0"
] | null | null | null | CORN-TEST/textfsm_parse.py | AnkitDeshwal89/NETMIKO | 81c164e9cff46d11b56612f6adc343b6bcdfe87a | [
"Apache-2.0"
] | null | null | null | import textfsm
import subprocess
import random
res = subprocess.run('ifconfig',stdout=subprocess.PIPE)
intstatus = res.stdout.decode('ascii')
with open("datafile","w+") as a:
a.write(intstatus)
a.close()
template_file= "ifconfig-template.template"
template = open(template_file)
with open("datafile") as f:
raw_data = f.read()
re_table = textfsm.TextFSM(template)
data = re_table.ParseText(raw_data)
print(data)
NL = []
for x in data:
NLD = {
'Interface' : x[0].split(':')[0],
'TX' : int(x[1])+int(random.randint(1,100))
}
NL.append(NLD)
print(NL)
import json
print('#'*12)
print(json.dumps(NL))
#Enter template FileName :ifconfig-template.template
#Input Data file : ifconfig_output.txt
| 18.390244 | 55 | 0.667109 |
6b0d16f74ff1faebf0826e751ccbc24a085729d3 | 31,951 | py | Python | classes.py | jared-jorgenson/mini_game | ac73987ac4c32c0e9f521d7bcf8d4d9ee4ded85a | [
"MIT"
] | null | null | null | classes.py | jared-jorgenson/mini_game | ac73987ac4c32c0e9f521d7bcf8d4d9ee4ded85a | [
"MIT"
] | null | null | null | classes.py | jared-jorgenson/mini_game | ac73987ac4c32c0e9f521d7bcf8d4d9ee4ded85a | [
"MIT"
] | null | null | null | import pygame
| 59.833333 | 121 | 0.52111 |
6b0d7e26713e21d118eb39e3b4c51db758d9a74a | 18,151 | py | Python | installSynApps/data_model/install_config.py | NSLS-II/installSynApps | 0f8e978939715bbba1a064ead3044fa36215cb09 | [
"BSD-3-Clause"
] | null | null | null | installSynApps/data_model/install_config.py | NSLS-II/installSynApps | 0f8e978939715bbba1a064ead3044fa36215cb09 | [
"BSD-3-Clause"
] | 2 | 2021-01-06T19:57:19.000Z | 2021-03-11T20:48:42.000Z | installSynApps/data_model/install_config.py | NSLS-II/installSynApps | 0f8e978939715bbba1a064ead3044fa36215cb09 | [
"BSD-3-Clause"
] | 1 | 2020-12-14T20:35:20.000Z | 2020-12-14T20:35:20.000Z | """A file containing representations of install configurations.
The core Data representation for installSynApps. An InstallConfiguration object
is parsed from a configuration, and is then used throughout the build process.
InjectorFile objects are used for representing text that need to be injected
into configuration files prior to builds.
"""
import os
import installSynApps
from installSynApps.data_model.install_module import InstallModule as IM
def generate_default_install_config(target_install_loc='/epics', update_versions=False, with_pva=True):
config = InstallConfiguration(target_install_loc, None)
y = 'YES'
n = 'NO'
gu = 'GIT_URL'
wu = 'WGET_URL'
base_org = 'https://github.com/epics-base/'
syn_org = 'https://github.com/EPICS-synApps/'
mod_org = 'https://github.com/epics-modules/'
ad_org = 'https://github.com/areaDetector/'
seq_rel = 'http://www-csr.bessy.de/control/SoftDist/sequencer/releases/'
psi_org = 'https://github.com/paulscherrerinstitute/'
# Add core modules that will generally always be built
config.add_module(IM("EPICS_BASE", "R7.0.3", "$(INSTALL)/base", gu, base_org, "epics-base", y, y, y))
config.add_module(IM("SUPPORT", "R6-1", "$(INSTALL)/support", gu, syn_org, "support", y, y, n))
config.add_module(IM("CONFIGURE", "R6-1", "$(SUPPORT)/configure", gu, syn_org, "configure", y, y, n))
config.add_module(IM("UTILS", "R6-1", "$(SUPPORT)/utils", gu, syn_org, "utils", y, y, n))
config.add_module(IM("SNCSEQ", "2.2.8", "$(SUPPORT)/seq", wu, seq_rel, "seq-2.2.8.tar.gz", y, y, y))
config.add_module(IM("IPAC", "2.15", "$(SUPPORT)/ipac", gu, mod_org, "ipac", y, y, y))
config.add_module(IM("ASYN", "R4-37", "$(SUPPORT)/asyn", gu, mod_org, "asyn", y, y, y))
config.add_module(IM("AUTOSAVE", "R5-10", "$(SUPPORT)/autosave", gu, mod_org, "autosave", y, y, y))
config.add_module(IM("BUSY", "R1-7-2", "$(SUPPORT)/busy", gu, mod_org, "busy", y, y, y))
config.add_module(IM("CALC", "R3-7-3", "$(SUPPORT)/calc", gu, mod_org, "calc", y, y, y))
config.add_module(IM("DEVIOCSTATS", "master", "$(SUPPORT)/iocStats", gu, mod_org, "iocStats", y, y, y))
config.add_module(IM("SSCAN", "R2-11-3", "$(SUPPORT)/sscan", gu, mod_org, "sscan", y, y, y))
config.add_module(IM("IPUNIDIG", "R2-11", "$(SUPPORT)/ipUnidig", gu, mod_org, "ipUnidig", y, y, y))
# Some modules that are commonly needed
config.add_module(IM("XSPRESS3", "master", "$(SUPPORT)/xspress3", gu, mod_org, "xspress3", y, y, y))
config.add_module(IM("MOTOR", "R7-1", "$(SUPPORT)/motor", gu, mod_org, "motor", y, y, y))
config.add_module(IM("QUADEM", "R9-3", "$(SUPPORT)/quadEM", gu, mod_org, "quadEM", y, y, y))
config.add_module(IM("STREAM", "2.8.10", "$(SUPPORT)/stream", gu, psi_org, "StreamDevice", y, y, y))
# AreaDetector and commonly used drivers
config.add_module(IM("AREA_DETECTOR", "R3-8", "$(SUPPORT)/areaDetector", gu, ad_org, "areaDetector", y, y, n))
config.add_module(IM("ADSUPPORT", "R1-9", "$(AREA_DETECTOR)/ADSupport", gu, ad_org, "ADSupport", y, y, y))
config.add_module(IM("ADCORE", "R3-8", "$(AREA_DETECTOR)/ADCore", gu, ad_org, "ADCore", y, y, y))
config.add_module(IM("ADPERKINELMER", "master", "$(AREA_DETECTOR)/ADPerkinElmer", gu, ad_org, "ADPerkinElmer", n, n, n))
config.add_module(IM("ADGENICAM", "master", "$(AREA_DETECTOR)/ADGenICam", gu, ad_org, "ADGenICam", n, n, n))
config.add_module(IM("ADANDOR3", "master", "$(AREA_DETECTOR)/ADAndor3", gu, ad_org, "ADAndor3", n, n, n))
config.add_module(IM("ADPROSILICA", "R2-5", "$(AREA_DETECTOR)/ADProsilica", gu, ad_org, "ADProsilica", n, n, n))
config.add_module(IM("ADSIMDETECTOR", "master", "$(AREA_DETECTOR)/ADSimDetector", gu, ad_org, "ADSimDetector", n, n, n))
config.add_module(IM("ADPILATUS", "R2-8", "$(AREA_DETECTOR)/ADPilatus", gu, ad_org, "ADPilatus", n, n, n))
config.add_module(IM("ADMERLIN", "master", "$(AREA_DETECTOR)/ADMerlin", gu, ad_org, "ADMerlin", n, n, n))
config.add_module(IM("ADARAVIS", "master", "$(AREA_DETECTOR)/ADAravis", gu, ad_org, "ADAravis", n, n, n))
config.add_module(IM("ADEIGER", "R2-6", "$(AREA_DETECTOR)/ADEiger", gu, ad_org, "ADEiger", n, n, n))
config.add_module(IM("ADVIMBA", "master", "$(AREA_DETECTOR)/ADVimba", gu, ad_org, "ADVimba", n, n, n))
config.add_module(IM("ADPOINTGREY", "master", "$(AREA_DETECTOR)/ADPointGrey", gu, ad_org, "ADPointGrey", n, n, n))
config.add_module(IM("ADANDOR", "R2-8", "$(AREA_DETECTOR)/ADAndor", gu, ad_org, "ADAndor", n, n, n))
config.add_module(IM("ADDEXELA", "R2-3", "$(AREA_DETECTOR)/ADDexela", gu, ad_org, "ADDexela", n, n, n))
config.add_module(IM("ADMYTHEN", "master", "$(AREA_DETECTOR)/ADMythen", gu, ad_org, "ADMythen", n, n, n))
config.add_module(IM("ADURL", "master", "$(AREA_DETECTOR)/ADURL", gu, ad_org, "ADURL", n, n, n))
common_plugins_str = 'dbLoadRecords("$(DEVIOCSTATS)/db/iocAdminSoft.db", "IOC=$(PREFIX)")\n'
autosave_str = 'file "sseqRecord_settings.req", P=$(P), S=AcquireSequence\n'
if with_pva:
autosave_str += 'file "NDPva_settings.req", P=$(P), R=Pva1:\n'
common_plugins_str += 'NDPvaConfigure("PVA1", $(QSIZE), 0, "$(PORT)", 0, $(PREFIX)Pva1:Image, 0, 0, 0)\n' \
'dbLoadRecords("NDPva.template", "P=$(PREFIX),R=Pva1:, PORT=PVA1,ADDR=0,TIMEOUT=1,NDARRAY_PORT=$(PORT)")\n' \
'# Must start PVA server if this is enabled\n' \
'startPVAServer\n' \
config.add_injector_file('PLUGIN_CONFIG', common_plugins_str, '$(AREA_DETECTOR)/ADCore/iocBoot/EXAMPLE_commonPlugins.cmd')
config.add_injector_file('AUTOSAVE_CONFIG', autosave_str, '$(AREA_DETECTOR)/ADCore/iocBoot/EXAMPLE_commonPlugin_settings.req')
if update_versions:
installSynApps.sync_all_module_tags(config)
return config
| 39.804825 | 130 | 0.598204 |
6b0ed79dd0939a74afbcf7db38081382144c0b6e | 3,587 | py | Python | apps/accounts/views.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | 1 | 2022-03-12T23:44:21.000Z | 2022-03-12T23:44:21.000Z | apps/accounts/views.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | null | null | null | apps/accounts/views.py | tarvitz/icu | 9a7cdac9d26ea224539f68f678b90bf70084374d | [
"BSD-3-Clause"
] | null | null | null | # Create your views here.
# -*- coding: utf-8 -*-
from apps.core.helpers import render_to, ajax_response, get_object_or_None
from apps.core.decorators import lock, login_required_json
from apps.accounts.models import Invite
from apps.accounts.decorators import check_invite
from apps.accounts.forms import (
LoginForm, AccountRegisterForm, SendInviteForm, InviteRegisterForm
)
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
#@check for possibility to register
| 30.922414 | 115 | 0.642598 |
6b0f57abb4c6963ae8d955c1ecf87495f2b1c219 | 12,193 | py | Python | plugins/modules/oci_blockstorage_volume_backup_policy_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_blockstorage_volume_backup_policy_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_blockstorage_volume_backup_policy_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_blockstorage_volume_backup_policy_facts
short_description: Fetches details about one or multiple VolumeBackupPolicy resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple VolumeBackupPolicy resources in Oracle Cloud Infrastructure
- Lists all the volume backup policies available in the specified compartment.
- For more information about Oracle defined backup policies and user defined backup policies,
see L(Policy-Based Backups,https://docs.cloud.oracle.com/iaas/Content/Block/Tasks/schedulingvolumebackups.htm).
- If I(policy_id) is specified, the details of a single VolumeBackupPolicy will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
policy_id:
description:
- The OCID of the volume backup policy.
- Required to get a specific volume_backup_policy.
type: str
aliases: ["id"]
compartment_id:
description:
- The OCID of the compartment.
If no compartment is specified, the Oracle defined backup policies are listed.
type: str
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_display_name_option ]
"""
EXAMPLES = """
- name: Get a specific volume_backup_policy
oci_blockstorage_volume_backup_policy_facts:
# required
policy_id: "ocid1.policy.oc1..xxxxxxEXAMPLExxxxxx"
- name: List volume_backup_policies
oci_blockstorage_volume_backup_policy_facts:
# optional
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
volume_backup_policies:
description:
- List of VolumeBackupPolicy resources
returned: on success
type: complex
contains:
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
id:
description:
- The OCID of the volume backup policy.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
schedules:
description:
- The collection of schedules that this policy will apply.
returned: on success
type: complex
contains:
backup_type:
description:
- The type of volume backup to create.
returned: on success
type: str
sample: FULL
offset_seconds:
description:
- The number of seconds that the volume backup start
time should be shifted from the default interval boundaries specified by
the period. The volume backup start time is the frequency start time plus the offset.
returned: on success
type: int
sample: 56
period:
description:
- The volume backup frequency.
returned: on success
type: str
sample: ONE_HOUR
offset_type:
description:
- Indicates how the offset is defined. If value is `STRUCTURED`,
then `hourOfDay`, `dayOfWeek`, `dayOfMonth`, and `month` fields are used
and `offsetSeconds` will be ignored in requests and users should ignore its
value from the responses.
- "`hourOfDay` is applicable for periods `ONE_DAY`,
`ONE_WEEK`, `ONE_MONTH` and `ONE_YEAR`."
- "`dayOfWeek` is applicable for period
`ONE_WEEK`."
- "`dayOfMonth` is applicable for periods `ONE_MONTH` and `ONE_YEAR`."
- "'month' is applicable for period 'ONE_YEAR'."
- They will be ignored in the requests for inapplicable periods.
- If value is `NUMERIC_SECONDS`, then `offsetSeconds`
will be used for both requests and responses and the structured fields will be
ignored in the requests and users should ignore their values from the responses.
- For clients using older versions of Apis and not sending `offsetType` in their
requests, the behaviour is just like `NUMERIC_SECONDS`.
returned: on success
type: str
sample: STRUCTURED
hour_of_day:
description:
- The hour of the day to schedule the volume backup.
returned: on success
type: int
sample: 56
day_of_week:
description:
- The day of the week to schedule the volume backup.
returned: on success
type: str
sample: MONDAY
day_of_month:
description:
- The day of the month to schedule the volume backup.
returned: on success
type: int
sample: 56
month:
description:
- The month of the year to schedule the volume backup.
returned: on success
type: str
sample: JANUARY
retention_seconds:
description:
- How long, in seconds, to keep the volume backups created by this schedule.
returned: on success
type: int
sample: 56
time_zone:
description:
- Specifies what time zone is the schedule in
returned: on success
type: str
sample: UTC
destination_region:
description:
- The paired destination region for copying scheduled backups to. Example `us-ashburn-1`.
See L(Region Pairs,https://docs.cloud.oracle.com/iaas/Content/Block/Tasks/schedulingvolumebackups.htm#RegionPairs) for details about paired
regions.
returned: on success
type: str
sample: us-phoenix-1
time_created:
description:
- The date and time the volume backup policy was created. Format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
compartment_id:
description:
- The OCID of the compartment that contains the volume backup.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
sample: [{
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"schedules": [{
"backup_type": "FULL",
"offset_seconds": 56,
"period": "ONE_HOUR",
"offset_type": "STRUCTURED",
"hour_of_day": 56,
"day_of_week": "MONDAY",
"day_of_month": 56,
"month": "JANUARY",
"retention_seconds": 56,
"time_zone": "UTC"
}],
"destination_region": "us-phoenix-1",
"time_created": "2013-10-20T19:20:30+01:00",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"freeform_tags": {'Department': 'Finance'}
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import BlockstorageClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
VolumeBackupPolicyFactsHelperCustom = get_custom_class(
"VolumeBackupPolicyFactsHelperCustom"
)
if __name__ == "__main__":
main()
| 38.342767 | 157 | 0.584598 |
6b0ff469900ccc9c854a18661fc7b7737ba3ac79 | 98 | py | Python | pi_control/server_stats/apps.py | mhozza/pi-control | 0dce821b4702519fedc3950270ee0091ed484ef6 | [
"MIT"
] | null | null | null | pi_control/server_stats/apps.py | mhozza/pi-control | 0dce821b4702519fedc3950270ee0091ed484ef6 | [
"MIT"
] | 10 | 2020-03-14T21:04:36.000Z | 2022-03-03T21:51:07.000Z | pi_control/server_stats/apps.py | mhozza/pi-control | 0dce821b4702519fedc3950270ee0091ed484ef6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16.333333 | 35 | 0.77551 |
6b1048e91d3158720f5949f6fb7c7ea76df6e7a1 | 14,435 | py | Python | testproject/testproject/settings.py | jackvz/mezzanine-cartridge-api | c956afa672fcf1035ab60cd5eb6589a06ccaafa0 | [
"MIT"
] | 1 | 2019-04-18T23:28:03.000Z | 2019-04-18T23:28:03.000Z | testproject/testproject/settings.py | jackvz/mezzanine-cartridge-api | c956afa672fcf1035ab60cd5eb6589a06ccaafa0 | [
"MIT"
] | 1 | 2020-06-05T20:27:04.000Z | 2020-06-05T20:27:04.000Z | testproject/testproject/settings.py | jackvz/mezzanine-cartridge-api | c956afa672fcf1035ab60cd5eb6589a06ccaafa0 | [
"MIT"
] | 1 | 2020-12-13T15:55:53.000Z | 2020-12-13T15:55:53.000Z |
from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
SECRET_KEY = '%29hnw7d-dy4n)!@1yi#ov#^@x0b=o*2o8^31oe!+(xw!!oc9a'
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for conveniently overriding. Please consult the settings
# documentation for a full list of settings Cartridge implements:
# http://cartridge.jupo.org/configuration.html#default-settings
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
# SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
# SHOP_CHECKOUT_STEPS_CONFIRMATION = True
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
SHOP_CURRENCY_LOCALE = "en_GB.UTF-8"
# Dotted package path and name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
# SHOP_HANDLER_BILLING_SHIPPING = \
# "cartridge.shop.checkout.default_billship_handler"
# Dotted package path and name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
# SHOP_HANDLER_ORDER = "cartridge.shop.checkout.default_order_handler"
# Dotted package path and name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
# SHOP_HANDLER_PAYMENT = "cartridge.shop.checkout.default_payment_handler"
# Sequence of value/name pairs for order statuses.
# SHOP_ORDER_STATUS_CHOICES = (
# (1, "Unprocessed"),
# (2, "Processed"),
# )
# Sequence of value/name pairs for types of product options,
# eg Size, Colour. NOTE: Increasing the number of these will
# require database migrations!
# SHOP_OPTION_TYPE_CHOICES = (
# (1, "Size"),
# (2, "Colour"),
# )
# Sequence of indexes from the SHOP_OPTION_TYPE_CHOICES setting that
# control how the options should be ordered in the admin,
# eg for "Colour" then "Size" given the above:
# SHOP_OPTION_ADMIN_ORDER = (2, 1)
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# (_("Shop"), ("shop.Product", "shop.ProductOption", "shop.DiscountCode",
# "shop.Sale", "shop.Order")),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = True
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.dev',
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
"loaders": [
"mezzanine.template.loaders.host_themes.Loader",
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"cartridge.shop",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
'corsheaders',
'rest_framework',
'rest_framework_api_key',
'drf_yasg',
# 'oauth2_provider',
# 'rest_framework.authtoken',
'mezzanine_cartridge_api',
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
'corsheaders.middleware.CorsMiddleware',
)
if DJANGO_VERSION < (1, 10):
MIDDLEWARE_CLASSES = MIDDLEWARE
del MIDDLEWARE
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| 34.783133 | 79 | 0.699619 |
6b1163fcd99a8abc3b5c62d0ed18bd3324cc7b0a | 959 | py | Python | wordgen/data_gen.py | ishaanbakhle/wordgen.us | 45c5247ce04b13badd2e1b3164cedc9176a805c7 | [
"MIT"
] | null | null | null | wordgen/data_gen.py | ishaanbakhle/wordgen.us | 45c5247ce04b13badd2e1b3164cedc9176a805c7 | [
"MIT"
] | null | null | null | wordgen/data_gen.py | ishaanbakhle/wordgen.us | 45c5247ce04b13badd2e1b3164cedc9176a805c7 | [
"MIT"
] | null | null | null | from wordgen import consts
import numpy as np
from sklearn import preprocessing
if __name__ == '__main__':
print(fill_matrix("james as"))
| 25.236842 | 76 | 0.594369 |
6b1167f333bc4ee9231e98ecd5d13fbcbf6bc62d | 30,725 | py | Python | arcade/gl/context.py | Cleptomania/arcade | abb7f0a0229b7f3a7843856d4b0812a3a2b80468 | [
"MIT"
] | null | null | null | arcade/gl/context.py | Cleptomania/arcade | abb7f0a0229b7f3a7843856d4b0812a3a2b80468 | [
"MIT"
] | null | null | null | arcade/gl/context.py | Cleptomania/arcade | abb7f0a0229b7f3a7843856d4b0812a3a2b80468 | [
"MIT"
] | null | null | null | from ctypes import c_int, c_char_p, cast, c_float
from collections import deque
import logging
import weakref
from typing import Any, Dict, List, Tuple, Union, Sequence, Set
import pyglet
from pyglet.window import Window
from pyglet import gl
from .buffer import Buffer
from .program import Program
from .vertex_array import Geometry, VertexArray
from .framebuffer import Framebuffer, DefaultFrameBuffer
from typing import Optional
from .texture import Texture
from .query import Query
from .glsl import ShaderSource
from .types import BufferDescription
LOG = logging.getLogger(__name__)
def enable(self, *args):
"""
Enables one or more context flags::
# Single flag
ctx.enable(ctx.BLEND)
# Multiple flags
ctx.enable(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags.update(args)
for flag in args:
gl.glEnable(flag)
def enable_only(self, *args):
"""
Enable only some flags. This will disable all other flags.
This is a simple way to ensure that context flag states
are not lingering from other sections of your code base::
# Ensure all flags are disabled (enable no flags)
ctx.enable_only()
# Make sure only blending is enabled
ctx.enable_only(ctx.BLEND)
# Make sure only depth test and culling is enabled
ctx.enable_only(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags = set(args)
if self.BLEND in self._flags:
gl.glEnable(self.BLEND)
else:
gl.glDisable(self.BLEND)
if self.DEPTH_TEST in self._flags:
gl.glEnable(self.DEPTH_TEST)
else:
gl.glDisable(self.DEPTH_TEST)
if self.CULL_FACE in self._flags:
gl.glEnable(self.CULL_FACE)
else:
gl.glDisable(self.CULL_FACE)
if self.PROGRAM_POINT_SIZE in self._flags:
gl.glEnable(self.PROGRAM_POINT_SIZE)
else:
gl.glDisable(self.PROGRAM_POINT_SIZE)
def disable(self, *args):
"""
Disable one or more context flags::
# Single flag
ctx.disable(ctx.BLEND)
# Multiple flags
ctx.disable(ctx.DEPTH_TEST, ctx.CULL_FACE)
"""
self._flags -= set(args)
for flag in args:
gl.glDisable(flag)
def is_enabled(self, flag) -> bool:
"""
Check if a context flag is enabled
:type: bool
"""
return flag in self._flags
# def blend_equation(self)
# def front_face(self)
# def cull_face(self)
def finish(self) -> None:
"""Wait until all OpenGL rendering commands are completed"""
gl.glFinish()
# --- Resource methods ---
def buffer(
self, *, data: Optional[Any] = None, reserve: int = 0, usage: str = "static"
) -> Buffer:
"""Create a new OpenGL Buffer object.
:param Any data: The buffer data, This can be ``bytes`` or an object supporting the buffer protocol.
:param int reserve: The number of bytes reserve
:param str usage: Buffer usage. 'static', 'dynamic' or 'stream'
:rtype: :py:class:`~arcade.gl.Buffer`
"""
# create_with_size
return Buffer(self, data, reserve=reserve, usage=usage)
def framebuffer(
self,
*,
color_attachments: Union[Texture, List[Texture]] = None,
depth_attachment: Texture = None
) -> Framebuffer:
"""Create a Framebuffer.
:param List[arcade.gl.Texture] color_attachments: List of textures we want to render into
:param arcade.gl.Texture depth_attachment: Depth texture
:rtype: :py:class:`~arcade.gl.Framebuffer`
"""
return Framebuffer(
self, color_attachments=color_attachments, depth_attachment=depth_attachment
)
def texture(
self,
size: Tuple[int, int],
*,
components: int = 4,
dtype: str = "f1",
data: Any = None,
wrap_x: gl.GLenum = None,
wrap_y: gl.GLenum = None,
filter: Tuple[gl.GLenum, gl.GLenum] = None
) -> Texture:
"""Create a 2D Texture.
Wrap modes: ``GL_REPEAT``, ``GL_MIRRORED_REPEAT``, ``GL_CLAMP_TO_EDGE``, ``GL_CLAMP_TO_BORDER``
Minifying filters: ``GL_NEAREST``, ``GL_LINEAR``, ``GL_NEAREST_MIPMAP_NEAREST``, ``GL_LINEAR_MIPMAP_NEAREST``
``GL_NEAREST_MIPMAP_LINEAR``, ``GL_LINEAR_MIPMAP_LINEAR``
Magnifying filters: ``GL_NEAREST``, ``GL_LINEAR``
:param Tuple[int, int] size: The size of the texture
:param int components: Number of components (1: R, 2: RG, 3: RGB, 4: RGBA)
:param str dtype: The data type of each component: f1, f2, f4 / i1, i2, i4 / u1, u2, u4
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
:param GLenum wrap_x: How the texture wraps in x direction
:param GLenum wrap_y: How the texture wraps in y direction
:param Tuple[GLenum,GLenum] filter: Minification and magnification filter
"""
return Texture(
self,
size,
components=components,
data=data,
dtype=dtype,
wrap_x=wrap_x,
wrap_y=wrap_y,
filter=filter,
)
def depth_texture(self, size: Tuple[int, int], *, data=None) -> Texture:
"""Create a 2D depth texture
:param Tuple[int, int] size: The size of the texture
:param Any data: The texture data (optional). Can be bytes or an object supporting the buffer protocol.
"""
return Texture(self, size, data=data, depth=True)
def geometry(
self,
content: Optional[Sequence[BufferDescription]] = None,
index_buffer: Buffer = None,
mode: int = None,
index_element_size: int = 4,
):
"""
Create a Geomtry instance.
:param list content: List of :py:class:`~arcade.gl.BufferDescription` (optional)
:param Buffer index_buffer: Index/element buffer (optional)
:param int mode: The default draw mode (optional)
:param int mode: The default draw mode (optional)
:param int index_element_size: Byte size of the index buffer type. Can be 1, 2 or 4 (8, 16 or 32 bit unsigned integer)
"""
return Geometry(self, content, index_buffer=index_buffer, mode=mode, index_element_size=index_element_size)
def program(
self,
*,
vertex_shader: str,
fragment_shader: str = None,
geometry_shader: str = None,
tess_control_shader: str = None,
tess_evaluation_shader: str = None,
defines: Dict[str, str] = None
) -> Program:
"""Create a :py:class:`~arcade.gl.Program` given the vertex, fragment and geometry shader.
:param str vertex_shader: vertex shader source
:param str fragment_shader: fragment shader source (optional)
:param str geometry_shader: geometry shader source (optional)
:param str tess_control_shader: tessellation control shader source (optional)
:param str tess_evaluation_shader: tessellation evaluation shader source (optional)
:param dict defines: Substitute #defines values in the source (optional)
:rtype: :py:class:`~arcade.gl.Program`
"""
source_vs = ShaderSource(vertex_shader, gl.GL_VERTEX_SHADER)
source_fs = (
ShaderSource(fragment_shader, gl.GL_FRAGMENT_SHADER)
if fragment_shader
else None
)
source_geo = (
ShaderSource(geometry_shader, gl.GL_GEOMETRY_SHADER)
if geometry_shader
else None
)
source_tc = (
ShaderSource(tess_control_shader, gl.GL_TESS_CONTROL_SHADER)
if tess_control_shader
else None
)
source_te = (
ShaderSource(tess_evaluation_shader, gl.GL_TESS_EVALUATION_SHADER)
if tess_evaluation_shader
else None
)
# If we don't have a fragment shader we are doing transform feedback.
# When a geometry shader is present the out attributes will be located there
out_attributes = [] # type: List[str]
if not source_fs:
if source_geo:
out_attributes = source_geo.out_attributes
else:
out_attributes = source_vs.out_attributes
return Program(
self,
vertex_shader=source_vs.get_source(defines=defines),
fragment_shader=source_fs.get_source(defines=defines)
if source_fs
else None,
geometry_shader=source_geo.get_source(defines=defines)
if source_geo
else None,
tess_control_shader=source_tc.get_source(defines=defines)
if source_tc
else None,
tess_evaluation_shader=source_te.get_source(defines=defines)
if source_te
else None,
out_attributes=out_attributes,
)
def query(self):
"""
Create a query object for measuring rendering calls in opengl.
:rtype: :py:class:`~arcade.gl.Query`
"""
return Query(self)
class ContextStats:
def __init__(self, warn_threshold=100):
self.warn_threshold = warn_threshold
# (created, freed)
self.texture = (0, 0)
self.framebuffer = (0, 0)
self.buffer = (0, 0)
self.program = (0, 0)
self.vertex_array = (0, 0)
self.geometry = (0, 0)
| 38.310474 | 126 | 0.652823 |
6b118924fc3b616de8ffa2d875b9ce842c00da9f | 2,141 | py | Python | api/app/models/bookings/exam.py | pixelater/queue-management | 9881505d4af2b9860aeaf76b9572315dd016c7dc | [
"Apache-2.0"
] | null | null | null | api/app/models/bookings/exam.py | pixelater/queue-management | 9881505d4af2b9860aeaf76b9572315dd016c7dc | [
"Apache-2.0"
] | 1 | 2019-02-26T00:27:31.000Z | 2019-02-26T00:27:31.000Z | api/app/models/bookings/exam.py | pixelater/queue-management | 9881505d4af2b9860aeaf76b9572315dd016c7dc | [
"Apache-2.0"
] | null | null | null | '''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from app.models.bookings import Base
from qsystem import db
| 43.693878 | 111 | 0.739841 |
6b12470d00652efed9a53779a3b55749c6b298e3 | 9,350 | py | Python | datatableview/tests/test_helpers.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | 2 | 2019-11-01T20:50:35.000Z | 2021-01-13T22:02:55.000Z | datatableview/tests/test_helpers.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | null | null | null | datatableview/tests/test_helpers.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
from datetime import datetime
from functools import partial
from django import get_version
from datatableview import helpers
import six
from .testcase import DatatableViewTestCase
from .test_app.models import ExampleModel, RelatedM2MModel
if get_version().split('.') < ['1', '7']:
test_data_fixture = 'test_data_legacy.json'
else:
test_data_fixture = 'test_data.json'
| 37.250996 | 107 | 0.610695 |
6b136297b7f7ffe43bf97fc683bc6c2f3794e562 | 3,518 | py | Python | discordbot.py | naari3/seibaribot | 3686206ed0b28b318a4032753350be8d9f2223fd | [
"MIT"
] | null | null | null | discordbot.py | naari3/seibaribot | 3686206ed0b28b318a4032753350be8d9f2223fd | [
"MIT"
] | null | null | null | discordbot.py | naari3/seibaribot | 3686206ed0b28b318a4032753350be8d9f2223fd | [
"MIT"
] | 1 | 2022-02-09T16:45:40.000Z | 2022-02-09T16:45:40.000Z | import traceback
from os import getenv
import discord
from discord import Message
from discord.ext import commands
from discord.ext.commands import Context
from asyncio import sleep
import asyncio
client = discord.Client()
# bot!
bot = commands.Bot(command_prefix='!')
# ID
GIRATINA_CHANNEL_ID = 940610524415144036
WIP_CHANNEL_ID = 940966825087361025
#
# Bot
#
#
#
#
#
# bokuseku.mp3 - https://qiita.com/sizumita/items/cafd00fe3e114d834ce3
token = getenv('DISCORD_BOT_TOKEN')
bot.run(token)
| 29.07438 | 191 | 0.689312 |
6b13e68ee45340f613741a1e02396fe2503dcda1 | 6,831 | py | Python | test/cpp/naming/utils/dns_server.py | arghyadip01/grpc | 9e10bfc8a096ef91a327e22f84f10c0fabff4417 | [
"Apache-2.0"
] | 9 | 2020-12-04T07:34:08.000Z | 2022-03-07T21:10:35.000Z | test/cpp/naming/utils/dns_server.py | arghyadip01/grpc | 9e10bfc8a096ef91a327e22f84f10c0fabff4417 | [
"Apache-2.0"
] | 62 | 2020-02-27T00:53:36.000Z | 2021-02-05T06:10:53.000Z | test/cpp/naming/utils/dns_server.py | arghyadip01/grpc | 9e10bfc8a096ef91a327e22f84f10c0fabff4417 | [
"Apache-2.0"
] | 12 | 2020-07-14T23:59:57.000Z | 2022-03-22T09:59:18.000Z | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts a local DNS server for use in tests"""
import argparse
import sys
import yaml
import signal
import os
import threading
import time
import twisted
import twisted.internet
import twisted.internet.reactor
import twisted.internet.threads
import twisted.internet.defer
import twisted.internet.protocol
import twisted.names
import twisted.names.client
import twisted.names.dns
import twisted.names.server
from twisted.names import client, server, common, authority, dns
import argparse
import platform
_SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax
_SERVER_HEALTH_CHECK_RECORD_DATA = '123.123.123.123'
if __name__ == '__main__':
main()
| 37.125 | 143 | 0.639731 |
6b13e6f6469b20dda5e5b5da9f0367c1ee7833b5 | 726 | py | Python | colour/examples/models/examples_ictcp.py | BPearlstine/colour | 40f0281295496774d2a19eee017d50fd0c265bd8 | [
"Cube",
"BSD-3-Clause"
] | 2 | 2020-05-03T20:15:42.000Z | 2021-04-09T18:19:06.000Z | colour/examples/models/examples_ictcp.py | BPearlstine/colour | 40f0281295496774d2a19eee017d50fd0c265bd8 | [
"Cube",
"BSD-3-Clause"
] | null | null | null | colour/examples/models/examples_ictcp.py | BPearlstine/colour | 40f0281295496774d2a19eee017d50fd0c265bd8 | [
"Cube",
"BSD-3-Clause"
] | 1 | 2019-12-11T19:48:27.000Z | 2019-12-11T19:48:27.000Z | # -*- coding: utf-8 -*-
"""
Showcases *ICTCP* *colour encoding* computations.
"""
import numpy as np
import colour
from colour.utilities import message_box
message_box('"ICTCP" Colour Encoding Computations')
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
message_box(('Converting from "ITU-R BT.2020" colourspace to "ICTCP" colour '
'encoding given "RGB" values:\n'
'\n\t{0}'.format(RGB)))
print(colour.RGB_to_ICTCP(RGB))
print('\n')
ICTCP = np.array([0.07351364, 0.00475253, 0.09351596])
message_box(('Converting from "ICTCP" colour encoding to "ITU-R BT.2020" '
'colourspace given "ICTCP" values:\n'
'\n\t{0}'.format(ICTCP)))
print(colour.ICTCP_to_RGB(ICTCP))
| 27.923077 | 77 | 0.665289 |
6b147a9cf7116a1a4434ef19b42cc9c65f9ba8e8 | 1,536 | py | Python | app/core/model/routine.py | MauricioAntonioMartinez/django-workout-tracker-api | 82f9499f172bd6d4b861f072948949dd6f8f6ec1 | [
"MIT"
] | null | null | null | app/core/model/routine.py | MauricioAntonioMartinez/django-workout-tracker-api | 82f9499f172bd6d4b861f072948949dd6f8f6ec1 | [
"MIT"
] | null | null | null | app/core/model/routine.py | MauricioAntonioMartinez/django-workout-tracker-api | 82f9499f172bd6d4b861f072948949dd6f8f6ec1 | [
"MIT"
] | null | null | null |
import os
import uuid
from django.conf import settings # this is how we can retrive variables
# for the settings file
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from multiselectfield import MultiSelectField
# Maneger User class is the class that provides the creation
# of user or admin and all methods out of the box
from rest_framework import exceptions
from rest_framework.authentication import TokenAuthentication
from user.custom_token import ExpiringToken
from .exercise import BaseSerie
| 30.72 | 74 | 0.742839 |
6b15e8a8bf1abf0fd58cab05c52fa68b6927df9e | 3,732 | py | Python | example_scripts/transect_tutorial.py | British-Oceanographic-Data-Centre/COAsT | 4d3d57c9afb61a92063b665626c1828dd2998d2b | [
"MIT"
] | 8 | 2020-09-10T13:40:07.000Z | 2022-03-10T22:52:44.000Z | example_scripts/transect_tutorial.py | British-Oceanographic-Data-Centre/COAsT | 4d3d57c9afb61a92063b665626c1828dd2998d2b | [
"MIT"
] | 294 | 2020-05-11T12:17:17.000Z | 2022-03-31T22:07:52.000Z | example_scripts/transect_tutorial.py | British-Oceanographic-Data-Centre/COAsT | 4d3d57c9afb61a92063b665626c1828dd2998d2b | [
"MIT"
] | 4 | 2020-05-28T10:43:56.000Z | 2021-09-07T10:40:09.000Z | """
This is a demonstration script for using the Transect class in the COAsT
package. This object has strict data formatting requirements, which are
outlined in tranect.py.
Transect subsetting (a vertical slice of data between two coordinates): Creating them and performing some custom diagnostics with them.
---
In this tutorial we take a look at subsetting the model data along a transect (a custom straight line) and creating some bespoke diagnostics along it. We look at:
1. Creating a TRANSECT object, defined between two points.
2. Plotting data along a transect.
3. Calculating flow normal to the transect
"""
## Create a transect subset of the example dataset
# Load packages and define some file paths
import coast
import xarray as xr
import matplotlib.pyplot as plt
fn_nemo_dat_t = "./example_files/nemo_data_T_grid.nc"
fn_nemo_dat_u = "./example_files/nemo_data_U_grid.nc"
fn_nemo_dat_v = "./example_files/nemo_data_V_grid.nc"
fn_nemo_dom = "./example_files/COAsT_example_NEMO_domain.nc"
# Configuration files describing the data files
fn_config_t_grid = "./config/example_nemo_grid_t.json"
fn_config_f_grid = "./config/example_nemo_grid_f.json"
fn_config_u_grid = "./config/example_nemo_grid_u.json"
fn_config_v_grid = "./config/example_nemo_grid_v.json"
# %% Load data variables that are on the NEMO t-grid
nemo_t = coast.Gridded(fn_data=fn_nemo_dat_t, fn_domain=fn_nemo_dom, config=fn_config_t_grid)
# Now create a transect between the points (54 N 15 W) and (56 N, 12 W) using the `coast.TransectT` object. This needs to be passed the corresponding NEMO object and transect end points. The model points closest to these coordinates will be selected as the transect end points.
tran_t = coast.TransectT(nemo_t, (54, -15), (56, -12))
# Inspect the data
tran_t.data
# where `r_dim` is the dimension along the transect.
# %% Plot the data
# It is simple to plot a scalar such as temperature along the transect:
temp_mean = tran_t.data.temperature.mean(dim="t_dim")
plt.figure()
temp_mean.plot.pcolormesh(y="depth_0", yincrease=False)
plt.show()
# %% Flow across the transect
# With NEMOs staggered grid, the first step is to define the transect on the f-grid so that the velocity components are between f-points. We do not need any model data on the f-grid, just the grid information, so create a nemo f-grid object
nemo_f = coast.Gridded(fn_domain=fn_nemo_dom, config=fn_config_f_grid)
# and a transect on the f-grid
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
tran_f.data
# We also need the i- and j-components of velocity so (lazy) load the model data on the u- and v-grid grids
nemo_u = coast.Gridded(fn_data=fn_nemo_dat_u, fn_domain=fn_nemo_dom, config=fn_config_u_grid)
nemo_v = coast.Gridded(fn_data=fn_nemo_dat_v, fn_domain=fn_nemo_dom, config=fn_config_v_grid)
# Now we can calculate the flow across the transect with the method
tran_f.calc_flow_across_transect(nemo_u, nemo_v)
# The flow across the transect is stored in a new dataset where the variables are all defined at the points between f-points.
tran_f.data_cross_tran_flow
# For example, to plot the time averaged velocity across the transect, we can plot the normal_velocities variable
cross_velocity_mean = tran_f.data_cross_tran_flow.normal_velocities.mean(dim="t_dim")
plt.figure()
cross_velocity_mean.rolling(r_dim=2).mean().plot.pcolormesh(yincrease=False, y="depth_0", cbar_kwargs={"label": "m/s"})
plt.show()
# or the volume transport across the transect, we can plot the normal_transports variable
plt.figure()
cross_transport_mean = tran_f.data_cross_tran_flow.normal_transports.mean(dim="t_dim")
cross_transport_mean.rolling(r_dim=2).mean().plot()
plt.ylabel("Sv")
plt.show()
| 38.081633 | 277 | 0.780279 |
6b16a33b1ae4cc31b9c80ce44c59e17df1095980 | 44,917 | py | Python | diofant/logic/boolalg.py | skirpichev/diofant | 16e280fdd6053be10c3b60fbb66fc26b52ede27a | [
"BSD-3-Clause"
] | null | null | null | diofant/logic/boolalg.py | skirpichev/diofant | 16e280fdd6053be10c3b60fbb66fc26b52ede27a | [
"BSD-3-Clause"
] | 1 | 2021-06-23T08:27:17.000Z | 2021-06-23T08:27:17.000Z | diofant/logic/boolalg.py | skirpichev/diofant | 16e280fdd6053be10c3b60fbb66fc26b52ede27a | [
"BSD-3-Clause"
] | 1 | 2021-06-23T07:58:58.000Z | 2021-06-23T07:58:58.000Z | """
Boolean algebra module for Diofant.
"""
from collections import defaultdict
from itertools import combinations, product
from ..core import Atom, cacheit
from ..core.expr import Expr
from ..core.function import Application
from ..core.numbers import Number
from ..core.operations import LatticeOp
from ..core.singleton import S
from ..core.singleton import SingletonWithManagedProperties as Singleton
from ..core.sympify import converter, sympify
from ..utilities import ordered
true = BooleanTrue()
false: BooleanFalse = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: true if x else false
# end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in the expr s.
Examples
========
>>> conjuncts(a & b) == frozenset([a, b])
True
>>> conjuncts(a | b) == frozenset([Or(a, b)])
True
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in the sentence s.
Examples
========
>>> disjuncts(a | b) == frozenset([a, b])
True
>>> disjuncts(a & b) == frozenset([And(a, b)])
True
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> distribute_and_over_or(Or(a, And(Not(b), Not(c))))
(a | ~b) & (a | ~c)
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> distribute_or_over_and(And(Or(Not(a), b), c))
(b & c) | (c & ~a)
"""
return _distribute((expr, Or, And))
def _distribute(info):
"""Distributes info[1] over info[2] with respect to info[0]."""
if isinstance(info[0], info[2]):
for arg in info[0].args:
if isinstance(arg, info[1]):
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
((info[2](c, rest), info[1], info[2]) for c in conj.args))))
elif isinstance(info[0], info[1]):
return info[1](*list(map(_distribute,
((x, info[1], info[2]) for x in info[0].args))))
else:
return info[0]
def to_nnf(expr, simplify=True):
"""
Converts expr to Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplify is True, the result contains no redundant clauses.
Examples
========
>>> to_nnf(Not((~a & ~b) | (c & d)))
(a | b) & (~c | ~d)
>>> to_nnf(Equivalent(a >> b, b >> a))
(a | ~b | (a & ~b)) & (b | ~a | (b & ~a))
"""
expr = sympify(expr)
if is_nnf(expr, simplify):
return expr
return expr.to_nnf(simplify)
def to_cnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...).
If simplify is True, the expr is evaluated to its simplest CNF form.
Examples
========
>>> to_cnf(~(a | b) | c)
(c | ~a) & (c | ~b)
>>> to_cnf((a | b) & (a | ~a), True)
a | b
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'cnf', True)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_and_over_or(expr)
def to_dnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to disjunctive normal form.
That is, of the form ((A & ~B & ...) | (B & C & ...) | ...).
If simplify is True, the expr is evaluated to its simplest DNF form.
Examples
========
>>> to_dnf(b & (a | c))
(a & b) | (b & c)
>>> to_dnf((a & b) | (a & ~b) | (b & c) | (~b & c), True)
a | c
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'dnf', True)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_nnf(expr, simplified=True):
"""
Checks if expr is in Negation Normal Form.
A logical expression is in Negation Normal Form (NNF) if it
contains only And, Or and Not, and Not is applied only to literals.
If simplified is True, checks if result contains no redundant clauses.
Examples
========
>>> is_nnf(a & b | ~c)
True
>>> is_nnf((a | ~a) & (b | c))
False
>>> is_nnf((a | ~a) & (b | c), False)
True
>>> is_nnf(Not(a & b) | c)
False
>>> is_nnf((a >> b) & (b >> a))
False
"""
expr = sympify(expr)
if is_literal(expr):
return True
stack = [expr]
while stack:
expr = stack.pop()
if expr.func in (And, Or):
if simplified:
args = expr.args
for arg in args:
if Not(arg) in args:
return False
stack.extend(expr.args)
elif not is_literal(expr):
return False
return True
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> is_cnf(a | b | c)
True
>>> is_cnf(a & b & c)
True
>>> is_cnf((a & b) | c)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> is_dnf(a | b | c)
True
>>> is_dnf(a & b & c)
True
>>> is_dnf((a & b) | c)
True
>>> is_dnf(a & (b | c))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""Test whether or not an expression is of the required form."""
expr = sympify(expr)
# Special case of an Atom
if expr.is_Atom:
return True
# Special case of a single expression of function2
if isinstance(expr, function2):
for lit in expr.args:
if isinstance(lit, Not):
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
# Special case of a single negation
if isinstance(expr, Not):
if not expr.args[0].is_Atom:
return False
if not isinstance(expr, function1):
return False
for cls in expr.args:
if cls.is_Atom:
continue
if isinstance(cls, Not):
if not cls.args[0].is_Atom:
return False
elif not isinstance(cls, function2):
return False
for lit in cls.args:
if isinstance(lit, Not):
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
def eliminate_implications(expr):
"""
Change >>, <<, and Equivalent into &, |, and ~. That is, return an
expression that is equivalent to s, but has only &, |, and ~ as logical
operators.
Examples
========
>>> eliminate_implications(Implies(a, b))
b | ~a
>>> eliminate_implications(Equivalent(a, b))
(a | ~b) & (b | ~a)
>>> eliminate_implications(Equivalent(a, b, c))
(a | ~c) & (b | ~a) & (c | ~b)
"""
return to_nnf(expr)
def is_literal(expr):
"""
Returns True if expr is a literal, else False.
Examples
========
>>> is_literal(a)
True
>>> is_literal(~a)
True
>>> is_literal(a + b)
True
>>> is_literal(Or(a, b))
False
"""
if isinstance(expr, Not):
return not isinstance(expr.args[0], BooleanFunction)
else:
return not isinstance(expr, BooleanFunction)
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> to_int_repr([x | y, y], [x, y])
[{1, 2}, {2}]
"""
symbols = dict(zip(symbols, range(1, len(symbols) + 1)))
return [{append_symbol(arg, symbols) for arg in Or.make_args(c)}
for c in clauses]
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
return Or(*temp)
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = list(range(len(terms)))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in [_ for _ in todo if _ is not None]])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x not in (3, minterm[i]):
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
essential = []
for x in terms:
temporary = []
for y in l1:
if _compare_term(x, y):
temporary.append(y)
if len(temporary) == 1:
if temporary[0] not in essential:
essential.append(temporary[0])
for x in terms:
for y in essential:
if _compare_term(x, y):
break
else:
for z in l1: # pragma: no branch
if _compare_term(x, z):
assert z not in essential
essential.append(z)
break
return essential
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform([t, x, y, z], minterms, dontcares)
(y & z) | (z & ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform([t, x, y, z], minterms, dontcares)
z & (y | ~t)
References
==========
* https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError(f'{d} in minterms is also in dontcares')
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return {expr}
return set().union(*(_find_predicates(i) for i in expr.args))
def simplify_logic(expr, form=None, deep=True):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in Diofant.
Parameters
==========
expr : string or boolean expression
form : string ('cnf' or 'dnf') or None (default).
If 'cnf' or 'dnf', the simplest expression in the corresponding
normal form is returned; if None, the answer is returned
according to the form with fewest args (in CNF by default).
deep : boolean (default True)
indicates whether to recursively simplify any
non-boolean functions contained within the input.
Examples
========
>>> b = (~x & ~y & ~z) | (~x & ~y & z)
>>> simplify_logic(b)
~x & ~y
>>> sympify(b)
(z & ~x & ~y) | (~x & ~y & ~z)
>>> simplify_logic(_)
~x & ~y
"""
if form == 'cnf' or form == 'dnf' or form is None:
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
variables = _find_predicates(expr)
truthtable = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if expr.xreplace(dict(zip(variables, t))):
truthtable.append(t)
if deep:
from ..simplify import simplify
variables = [simplify(v) for v in variables]
if form == 'dnf' or \
(form is None and len(truthtable) >= (2 ** (len(variables) - 1))):
return SOPform(variables, truthtable)
elif form == 'cnf' or form is None: # pragma: no branch
return POSform(variables, truthtable)
else:
raise ValueError('form can be cnf or dnf only')
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol,
# of times it appeared as a Not(symbol),
# of times it appeared as a Symbol in an And or Or,
# of times it appeared as a Not(Symbol) in an And or Or,
sum of the number of arguments with which it appeared,
counting Symbol as 1 and Not(Symbol) as 2
]
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(_finger(eq))
{(0, 0, 1, 0, 2): [x],
(0, 0, 1, 0, 3): [a, b],
(0, 0, 1, 2, 8): [y]}
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = {fi: [0] * 5 for fi in f}
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args) + sum(isinstance(ai, Not) for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1] += o
else:
d[ai.args[0]][3] += 1
d[ai.args[0]][-1] += o
inv = defaultdict(list)
for k, v in ordered(d.items()):
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of bool1, and the mapping of variables
that makes the two expressions bool1 and bool2 represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, And(x, y) is logically equivalent to And(a, b) for
the mapping {x: a, y:b} or {x: b, y:a}.
If no such mapping exists, return False.
Examples
========
>>> function1 = SOPform([x, z, y], [[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform([a, b, c], [[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(y & ~z, {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(t, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq1 = Or(And(Not(y), t), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq1, eq2)
((x & y) | (t & ~y) | (z & ~y), {t: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c, d))
>>> bool_map(eq, eq.subs({c: x}))
(c & d & (a | b) & (~a | ~b), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
And(x, Not(y), Or(w, Not(z))).
Basic.match is not robust enough (see issue sympy/sympy#4835) so this is
a workaround that is valid for simplified boolean expressions.
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return
if len(function1.args) != len(function2.args):
return
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return
# assemble the match dictionary if possible
matchdict = {}
for k in f1:
if k not in f2 or len(f1[k]) != len(f2[k]):
return
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict if matchdict else None
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m is not None
| 27.042143 | 93 | 0.552107 |
6b188f11fc196c24fb2215879e63681e45f8138c | 5,585 | py | Python | amazon/goods_review_thread.py | JoanLee0826/amazon | 13fcbcb0e9e396af6d4b2287c2a1a06fd602ce98 | [
"MIT"
] | 5 | 2019-09-26T02:39:20.000Z | 2021-04-05T13:19:49.000Z | amazon/goods_review_thread.py | JoanLee0826/amazon | 13fcbcb0e9e396af6d4b2287c2a1a06fd602ce98 | [
"MIT"
] | null | null | null | amazon/goods_review_thread.py | JoanLee0826/amazon | 13fcbcb0e9e396af6d4b2287c2a1a06fd602ce98 | [
"MIT"
] | 3 | 2020-01-08T08:53:32.000Z | 2021-06-04T17:06:34.000Z | import pandas as pd
import requests
from lxml import etree
import re, time, random, datetime
from queue import Queue
import threading
if __name__ == '__main__':
data = r"../data/category/Kid's Weighted Blankets_08_28_13_22.xlsx"
review = Review(domain='com')
review.run(data=data)
| 39.055944 | 110 | 0.520859 |
6b199ca74af9fa333d99b4deab665ee6ec19fa62 | 1,032 | py | Python | lumicks/pylake/population/tests/conftest.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 8 | 2019-02-18T07:56:39.000Z | 2022-03-19T01:14:48.000Z | lumicks/pylake/population/tests/conftest.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 42 | 2018-11-30T14:40:35.000Z | 2022-03-29T11:43:45.000Z | lumicks/pylake/population/tests/conftest.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 4 | 2019-01-09T13:45:53.000Z | 2021-07-06T14:06:52.000Z | import pytest
import numpy as np
from pathlib import Path
| 25.8 | 79 | 0.666667 |
6b1abd24dcce5c1b223e996046c73de1b7c697fc | 1,332 | py | Python | Concurrent/PipelineDecomposingTask.py | rafagarciac/ParallelProgrammingPython | bba91984018688f41049fd63961d3b8872876336 | [
"MIT"
] | null | null | null | Concurrent/PipelineDecomposingTask.py | rafagarciac/ParallelProgrammingPython | bba91984018688f41049fd63961d3b8872876336 | [
"MIT"
] | null | null | null | Concurrent/PipelineDecomposingTask.py | rafagarciac/ParallelProgrammingPython | bba91984018688f41049fd63961d3b8872876336 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Artesanal example Pipe without Pipe class.
"""
__author__ = "Rafael Garca Cullar"
__email__ = "[email protected]"
__copyright__ = "Copyright (c) 2018 Rafael Garca Cullar"
__license__ = "MIT"
from concurrent.futures import ProcessPoolExecutor
import time
import random
if __name__ == "__main__":
__spec__ = None # Fix multiprocessing in Spyder's IPython
pools = instanceProcessPool() # pool = ProcessPoolExecutor([max_workers])
runThreadsInPipeline(pools) # pools[0].submit(worker, random.random()).add_done_callback(pipeline)
shutdownPools(pools) # pool.shutdown() | 27.183673 | 107 | 0.678679 |
6b1e268c000917add1c1379d6ddcd9ab23f2b03b | 245 | py | Python | src/digibujogens/__main__.py | roaet/digibujogens | ab154edda69c091595902dd8b2e3fd273b2e7105 | [
"MIT"
] | null | null | null | src/digibujogens/__main__.py | roaet/digibujogens | ab154edda69c091595902dd8b2e3fd273b2e7105 | [
"MIT"
] | null | null | null | src/digibujogens/__main__.py | roaet/digibujogens | ab154edda69c091595902dd8b2e3fd273b2e7105 | [
"MIT"
] | null | null | null | """ Main application entry point.
python -m digibujogens ...
"""
def main():
""" Execute the application.
"""
raise NotImplementedError
# Make the script executable.
if __name__ == "__main__":
raise SystemExit(main())
| 14.411765 | 33 | 0.636735 |
6b1f0f890c358afb721298af5289d546925c2ca1 | 42,279 | py | Python | lisa/target.py | mrkajetanp/lisa | 15cfbc430f46b59f52a9d13769d0f6791ed6f154 | [
"Apache-2.0"
] | null | null | null | lisa/target.py | mrkajetanp/lisa | 15cfbc430f46b59f52a9d13769d0f6791ed6f154 | [
"Apache-2.0"
] | null | null | null | lisa/target.py | mrkajetanp/lisa | 15cfbc430f46b59f52a9d13769d0f6791ed6f154 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
import os
import os.path
import contextlib
import shlex
from collections.abc import Mapping
import copy
import sys
import argparse
import textwrap
import functools
import inspect
import pickle
import tempfile
from types import ModuleType, FunctionType
from operator import itemgetter
import devlib
from devlib.exception import TargetStableError
from devlib.utils.misc import which
from devlib.platform.gem5 import Gem5SimulationPlatform
from lisa.utils import Loggable, HideExekallID, resolve_dotted_name, get_subclasses, import_all_submodules, LISA_HOME, RESULT_DIR, LATEST_LINK, setup_logging, ArtifactPath, nullcontext, ExekallTaggable, memoized
from lisa.assets import ASSETS_PATH
from lisa.conf import SimpleMultiSrcConf, KeyDesc, LevelKeyDesc, TopLevelKeyDesc,Configurable
from lisa.generic import TypedList
from lisa.platforms.platinfo import PlatformInfo
# Make sure all submodules of devlib.module are imported so the classes
# are all created before we list them
import_all_submodules(devlib.module)
_DEVLIB_AVAILABLE_MODULES = {
cls.name
for cls in get_subclasses(devlib.module.Module)
if (
getattr(cls, 'name', None)
# early modules try to connect to UART and do very
# platform-specific things we are not interested in
and getattr(cls, 'stage') != 'early'
)
}
def get_res_dir(self, name=None, append_time=True, symlink=True):
"""
Returns a directory managed by LISA to store results.
Usage of that function is reserved to interactive use or simple scripts.
Tests should not rely on that as the created folder will not be tracked
by any external entity, which means the results will be lost in some
automated environment.
:param name: Name of the results directory
:type name: str
:param append_time: If True, the current datetime will be appended to
the given ``name``. If ``name`` is None, the directory name will be
the current datetime.
:type append_time: bool
:param symlink: Create a symlink named ``results_latest`` to the newly
created results directory
:type symlink: bool
"""
if isinstance(self._res_dir, ArtifactPath):
root = self._res_dir.root
relative = self._res_dir.relative
else:
root = self._res_dir
relative = ''
return self._get_res_dir(
root=root,
relative=relative,
name=name,
append_time=append_time,
symlink=symlink,
)
def _get_res_dir(self, root, relative, name, append_time, symlink):
logger = self.get_logger()
while True:
time_str = datetime.now().strftime('%Y%m%d_%H%M%S.%f')
if not name:
name = time_str
elif append_time:
name = f"{name}-{time_str}"
# If we were given an ArtifactPath with an existing root, we
# preserve that root so it can be relocated as the caller wants it
res_dir = ArtifactPath(root, os.path.join(relative, name))
# Compute base installation path
logger.info(f'Creating result directory: {res_dir}')
# It will fail if the folder already exists. In that case,
# append_time should be used to ensure we get a unique name.
try:
os.makedirs(res_dir)
break
except FileExistsError:
# If the time is used in the name, there is some hope that the
# next time it will succeed
if append_time:
logger.info('Directory already exists, retrying ...')
continue
else:
raise
if symlink:
res_lnk = os.path.join(LISA_HOME, LATEST_LINK)
with contextlib.suppress(FileNotFoundError):
os.remove(res_lnk)
# There may be a race condition with another tool trying to create
# the link
with contextlib.suppress(FileExistsError):
os.symlink(res_dir, res_lnk)
return res_dir
def install_tools(self, tools):
"""
Install tools additional to those specified in the test config 'tools'
field
:param tools: The list of names of tools to install
:type tools: list(str)
"""
tools = set(tools) - self._installed_tools
# TODO: compute the checksum of the tool + install location and keep
# that in _installed_tools, so we are sure to be correct
for tool in map(bin_path, tools):
self.target.install(tool)
self._installed_tools.add(tool)
def get_tags(self):
return {'board': self.name}
def execute_python(self, f, args, kwargs, **execute_kwargs):
"""
Executes the given Python function ``f`` with the provided positional
and keyword arguments.
The return value or any exception is pickled back and is
returned/raised in the host caller.
:Variable keyword arguments: Forwarded to :meth:`execute` that
will spawn the Python interpreter on the target
.. note:: Closure variables are supported, but mutating them will not
be reflected in the caller's context. Also, functions that are
referred to will be:
* bundled in the script if it is defined in the same module
* referred to by name, assuming it comes from a module that is
installed on the target and that this module is in scope. If
that is not the case, a :exc:`NameError` will be raised.
.. attention:: Decorators are ignored and not applied.
"""
sig = inspect.signature(f)
kwargs = sig.bind(*args, **kwargs).arguments
closure_vars = inspect.getclosurevars(f)
name, code_str = self._get_code(f)
out_tempfiles = tuple()
try:
out_tempfiles = (mktemp(), mktemp())
snippet = self._make_remote_snippet(
name=name,
code_str=code_str,
module=f.__module__,
kwargs=kwargs,
global_vars={
**closure_vars.globals,
**closure_vars.nonlocals,
},
out_tempfiles=out_tempfiles
)
cmd = ['python3', '-c', snippet]
cmd = ' '.join(map(shlex.quote, cmd))
try:
self.execute(cmd, **execute_kwargs)
except Exception as e: # pylint: disable=broad-except
err = e
else:
err = None
return parse_output(out_tempfiles, err)
finally:
for path in out_tempfiles:
self.remove(path)
def remote_func(self, **kwargs):
"""
Decorates a given function to execute remotely using
:meth:`execute_python`::
target = Target(...)
@target.remote_func(timeout=42)
def foo(x, y):
return x + y
# Execute the function on the target transparently
val = foo(1, y=2)
:Variable keyword arguments: Forwarded to :meth:`execute` that
will spawn the Python interpreter on the target
"""
return wrapper_param
class Gem5SimulationPlatformWrapper(Gem5SimulationPlatform):
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| 36.636915 | 211 | 0.584404 |
6b1f450e0afe4b703c2e85c366b7453eddf2730b | 1,289 | py | Python | iota/commands/core/get_node_info.py | EasonC13/iota.py | f596c1ac0d9bcbceda1cf6109cd921943a6599b3 | [
"MIT"
] | 347 | 2016-12-23T14:28:06.000Z | 2019-09-30T13:46:30.000Z | iota/commands/core/get_node_info.py | EasonC13/iota.py | f596c1ac0d9bcbceda1cf6109cd921943a6599b3 | [
"MIT"
] | 194 | 2016-12-22T21:22:47.000Z | 2019-10-01T09:01:16.000Z | iota/commands/core/get_node_info.py | EasonC13/iota.py | f596c1ac0d9bcbceda1cf6109cd921943a6599b3 | [
"MIT"
] | 147 | 2017-01-08T13:14:47.000Z | 2019-10-01T22:27:31.000Z | import filters as f
from iota import TransactionHash, Address
from iota.commands import FilterCommand, RequestFilter, ResponseFilter
from iota.filters import Trytes
__all__ = [
'GetNodeInfoCommand',
]
| 28.644444 | 73 | 0.6827 |
6b2003bf580dcb3f7c6fda11b1276e5b7d0fe837 | 4,925 | py | Python | Aplicacion/Presentacion/views.py | Juandiegordp/TPI | 427266f00745e9d9678110c1d01d3be4febca673 | [
"MIT"
] | null | null | null | Aplicacion/Presentacion/views.py | Juandiegordp/TPI | 427266f00745e9d9678110c1d01d3be4febca673 | [
"MIT"
] | null | null | null | Aplicacion/Presentacion/views.py | Juandiegordp/TPI | 427266f00745e9d9678110c1d01d3be4febca673 | [
"MIT"
] | null | null | null | from Negocio import controller
import forms, functions
from flask import Flask, render_template, request, redirect, url_for, flash
| 40.702479 | 236 | 0.718376 |
6b204b59051969cf45dc90d85f76793faabc4ec6 | 644 | py | Python | evalme/tests/test_old_format.py | heartexlabs/label-studio-evalme | 48f7a5226346b6e074edb4717b84122cc089bc7a | [
"MIT"
] | 3 | 2020-04-11T13:01:57.000Z | 2021-05-19T13:53:16.000Z | evalme/tests/test_old_format.py | heartexlabs/label-studio-evalme | 48f7a5226346b6e074edb4717b84122cc089bc7a | [
"MIT"
] | 28 | 2020-05-21T01:34:44.000Z | 2022-03-21T15:39:16.000Z | evalme/tests/test_old_format.py | heartexlabs/label-studio-evalme | 48f7a5226346b6e074edb4717b84122cc089bc7a | [
"MIT"
] | 1 | 2020-05-21T17:43:26.000Z | 2020-05-21T17:43:26.000Z | from evalme.matcher import Matcher
| 25.76 | 53 | 0.723602 |
6b20937e56fc436965d29a3e4d7196bce1d5cd54 | 30,942 | py | Python | behave/runner.py | wombat70/behave | c54493b0531795d946ac6754bfc643248cf3056a | [
"BSD-2-Clause"
] | 13 | 2019-10-03T19:15:14.000Z | 2019-10-16T02:01:57.000Z | behave/runner.py | wombat70/behave | c54493b0531795d946ac6754bfc643248cf3056a | [
"BSD-2-Clause"
] | null | null | null | behave/runner.py | wombat70/behave | c54493b0531795d946ac6754bfc643248cf3056a | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
This module provides Runner class to run behave feature files (or model elements).
"""
from __future__ import absolute_import, print_function, with_statement
import contextlib
import os.path
import sys
import warnings
import weakref
import six
from behave._types import ExceptionUtil
from behave.capture import CaptureController
from behave.exception import ConfigError
from behave.formatter._registry import make_formatters
from behave.runner_util import \
collect_feature_locations, parse_features, \
exec_file, load_step_modules, PathManager
from behave.step_registry import registry as the_step_registry
from enum import Enum
if six.PY2:
# -- USE PYTHON3 BACKPORT: With unicode traceback support.
import traceback2 as traceback
else:
import traceback
def _push(self, layer_name=None):
"""Push a new layer on the context stack.
HINT: Use layer_name values: "scenario", "feature", "testrun".
:param layer_name: Layer name to use (or None).
"""
initial_data = {"@cleanups": []}
if layer_name:
initial_data["@layer"] = layer_name
self._stack.insert(0, initial_data)
def _pop(self):
"""Pop the current layer from the context stack.
Performs any pending cleanups, registered for this layer.
"""
try:
self._do_cleanups()
finally:
# -- ENSURE: Layer is removed even if cleanup-errors occur.
self._stack.pop(0)
def _use_with_behave_mode(self):
"""Provides a context manager for using the context in BEHAVE mode."""
return use_context_with_mode(self, ContextMode.BEHAVE)
def use_with_user_mode(self):
"""Provides a context manager for using the context in USER mode."""
return use_context_with_mode(self, ContextMode.USER)
def user_mode(self):
warnings.warn("Use 'use_with_user_mode()' instead",
PendingDeprecationWarning, stacklevel=2)
return self.use_with_user_mode()
def _set_root_attribute(self, attr, value):
for frame in self.__dict__["_stack"]:
if frame is self.__dict__["_root"]:
continue
if attr in frame:
record = self.__dict__["_record"][attr]
params = {
"attr": attr,
"filename": record[0],
"line": record[1],
"function": record[3],
}
self._emit_warning(attr, params)
self.__dict__["_root"][attr] = value
if attr not in self._origin:
self._origin[attr] = self._mode
def execute_steps(self, steps_text):
"""The steps identified in the "steps" text string will be parsed and
executed in turn just as though they were defined in a feature file.
If the execute_steps call fails (either through error or failure
assertion) then the step invoking it will need to catch the resulting
exceptions.
:param steps_text: Text with the Gherkin steps to execute (as string).
:returns: True, if the steps executed successfully.
:raises: AssertionError, if a step failure occurs.
:raises: ValueError, if invoked without a feature context.
"""
assert isinstance(steps_text, six.text_type), "Steps must be unicode."
if not self.feature:
raise ValueError("execute_steps() called outside of feature")
# -- PREPARE: Save original context data for current step.
# Needed if step definition that called this method uses .table/.text
original_table = getattr(self, "table", None)
original_text = getattr(self, "text", None)
self.feature.parser.variant = "steps"
steps = self.feature.parser.parse_steps(steps_text)
with self._use_with_behave_mode():
for step in steps:
passed = step.run(self._runner, quiet=True, capture=False)
if not passed:
# -- ISSUE #96: Provide more substep info to diagnose problem.
step_line = u"%s %s" % (step.keyword, step.name)
message = "%s SUB-STEP: %s" % \
(step.status.name.upper(), step_line)
if step.error_message:
message += "\nSubstep info: %s\n" % step.error_message
message += u"Traceback (of failed substep):\n"
message += u"".join(traceback.format_tb(step.exc_traceback))
# message += u"\nTraceback (of context.execute_steps()):"
assert False, message
# -- FINALLY: Restore original context data for current step.
self.table = original_table
self.text = original_text
return True
def add_cleanup(self, cleanup_func, *args, **kwargs):
"""Adds a cleanup function that is called when :meth:`Context._pop()`
is called. This is intended for user-cleanups.
:param cleanup_func: Callable function
:param args: Args for cleanup_func() call (optional).
:param kwargs: Kwargs for cleanup_func() call (optional).
"""
# MAYBE:
assert callable(cleanup_func), "REQUIRES: callable(cleanup_func)"
assert self._stack
if args or kwargs:
else:
internal_cleanup_func = cleanup_func
current_frame = self._stack[0]
if cleanup_func not in current_frame["@cleanups"]:
# -- AVOID DUPLICATES:
current_frame["@cleanups"].append(internal_cleanup_func)
def path_getrootdir(path):
"""
Extract rootdir from path in a platform independent way.
POSIX-PATH EXAMPLE:
rootdir = path_getrootdir("/foo/bar/one.feature")
assert rootdir == "/"
WINDOWS-PATH EXAMPLE:
rootdir = path_getrootdir("D:\\foo\\bar\\one.feature")
assert rootdir == r"D:\"
"""
drive, _ = os.path.splitdrive(path)
if drive:
# -- WINDOWS:
return drive + os.path.sep
# -- POSIX:
return os.path.sep
class ModelRunner(object):
"""
Test runner for a behave model (features).
Provides the core functionality of a test runner and
the functional API needed by model elements.
.. attribute:: aborted
This is set to true when the user aborts a test run
(:exc:`KeyboardInterrupt` exception). Initially: False.
Stored as derived attribute in :attr:`Context.aborted`.
"""
# pylint: disable=too-many-instance-attributes
# @property
# @aborted.setter
aborted = property(_get_aborted, _set_aborted,
doc="Indicates that test run is aborted by the user.")
def run(self):
"""
Implements the run method by running the model.
"""
self.context = Context(self)
return self.run_model()
class Runner(ModelRunner):
"""
Standard test runner for behave:
* setup paths
* loads environment hooks
* loads step definitions
* select feature files, parses them and creates model (elements)
"""
def before_all_default_hook(self, context):
"""
Default implementation for :func:`before_all()` hook.
Setup the logging subsystem based on the configuration data.
"""
# pylint: disable=no-self-use
context.config.setup_logging()
| 36.704626 | 93 | 0.598151 |
6b20e0c8f16f54d5573a17cd7bb380c1b08265f4 | 2,645 | py | Python | 01_P/P_2_1_1_02/main.py | genfifth/generative-design_Code-Package-Python-Mode | 93fc8435933aa2e9329de77a1177bb34e63dd1c4 | [
"BSD-2-Clause"
] | 1 | 2019-04-23T16:26:31.000Z | 2019-04-23T16:26:31.000Z | 01_P/P_2_1_1_02/main.py | QuantumNovice/generative-design_Code-Package-Python-Mode | 93fc8435933aa2e9329de77a1177bb34e63dd1c4 | [
"BSD-2-Clause"
] | null | null | null | 01_P/P_2_1_1_02/main.py | QuantumNovice/generative-design_Code-Package-Python-Mode | 93fc8435933aa2e9329de77a1177bb34e63dd1c4 | [
"BSD-2-Clause"
] | 1 | 2019-01-31T16:05:19.000Z | 2019-01-31T16:05:19.000Z | add_library('pdf')
import random
from datetime import datetime
tileCount = 20
| 27.552083 | 93 | 0.565974 |
6b21fe4bf1085238cec917c37ffada209e34d9c0 | 41,499 | py | Python | core/dbt/contracts/graph/manifest.py | peiwangdb/dbt | 30e72bc5e2ae950ddf0a1230b0c6406b889bea1a | [
"Apache-2.0"
] | null | null | null | core/dbt/contracts/graph/manifest.py | peiwangdb/dbt | 30e72bc5e2ae950ddf0a1230b0c6406b889bea1a | [
"Apache-2.0"
] | 1 | 2021-08-14T03:52:23.000Z | 2021-08-14T03:52:23.000Z | core/dbt/contracts/graph/manifest.py | peiwangdb/dbt | 30e72bc5e2ae950ddf0a1230b0c6406b889bea1a | [
"Apache-2.0"
] | 1 | 2021-08-14T03:50:50.000Z | 2021-08-14T03:50:50.000Z | import enum
from dataclasses import dataclass, field
from itertools import chain, islice
from mashumaro import DataClassMessagePackMixin
from multiprocessing.synchronize import Lock
from typing import (
Dict, List, Optional, Union, Mapping, MutableMapping, Any, Set, Tuple,
TypeVar, Callable, Iterable, Generic, cast, AbstractSet, ClassVar
)
from typing_extensions import Protocol
from uuid import UUID
from dbt.contracts.graph.compiled import (
CompileResultNode, ManifestNode, NonSourceCompiledNode, GraphMemberNode
)
from dbt.contracts.graph.parsed import (
ParsedMacro, ParsedDocumentation, ParsedNodePatch, ParsedMacroPatch,
ParsedSourceDefinition, ParsedExposure, HasUniqueID,
UnpatchedSourceDefinition, ManifestNodes
)
from dbt.contracts.graph.unparsed import SourcePatch
from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile
from dbt.contracts.util import (
BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version
)
from dbt.dataclass_schema import dbtClassMixin
from dbt.exceptions import (
CompilationException,
raise_duplicate_resource_name, raise_compiler_error, warn_or_error,
raise_duplicate_patch_name,
raise_duplicate_macro_patch_name, raise_duplicate_source_patch_name,
)
from dbt.helper_types import PathSet
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.node_types import NodeType
from dbt.ui import line_wrap_message
from dbt import flags
from dbt import tracking
import dbt.utils
NodeEdgeMap = Dict[str, List[str]]
PackageName = str
DocName = str
RefName = str
UniqueID = str
def _search_packages(
current_project: str,
node_package: str,
target_package: Optional[str] = None,
) -> List[Optional[str]]:
if target_package is not None:
return [target_package]
elif current_project == node_package:
return [current_project, None]
else:
return [current_project, node_package, None]
def _sort_values(dct):
"""Given a dictionary, sort each value. This makes output deterministic,
which helps for tests.
"""
return {k: sorted(v) for k, v in dct.items()}
def build_node_edges(nodes: List[ManifestNode]):
"""Build the forward and backward edges on the given list of ParsedNodes
and return them as two separate dictionaries, each mapping unique IDs to
lists of edges.
"""
backward_edges: Dict[str, List[str]] = {}
# pre-populate the forward edge dict for simplicity
forward_edges: Dict[str, List[str]] = {n.unique_id: [] for n in nodes}
for node in nodes:
backward_edges[node.unique_id] = node.depends_on_nodes[:]
for unique_id in node.depends_on_nodes:
if unique_id in forward_edges.keys():
forward_edges[unique_id].append(node.unique_id)
return _sort_values(forward_edges), _sort_values(backward_edges)
# Build a map of children of macros
M = TypeVar('M', bound=MacroCandidate)
N = TypeVar('N', bound=Searchable)
D = TypeVar('D')
MaybeDocumentation = Optional[ParsedDocumentation]
MaybeParsedSource = Optional[Union[
ParsedSourceDefinition,
Disabled[ParsedSourceDefinition],
]]
MaybeNonSource = Optional[Union[
ManifestNode,
Disabled[ManifestNode]
]]
T = TypeVar('T', bound=GraphMemberNode)
def _update_into(dest: MutableMapping[str, T], new_item: T):
"""Update dest to overwrite whatever is at dest[new_item.unique_id] with
new_itme. There must be an existing value to overwrite, and they two nodes
must have the same original file path.
"""
unique_id = new_item.unique_id
if unique_id not in dest:
raise dbt.exceptions.RuntimeException(
f'got an update_{new_item.resource_type} call with an '
f'unrecognized {new_item.resource_type}: {new_item.unique_id}'
)
existing = dest[unique_id]
if new_item.original_file_path != existing.original_file_path:
raise dbt.exceptions.RuntimeException(
f'cannot update a {new_item.resource_type} to have a new file '
f'path!'
)
dest[unique_id] = new_item
# This contains macro methods that are in both the Manifest
# and the MacroManifest
class MacroManifest(MacroMethods):
AnyManifest = Union[Manifest, MacroManifest]
K_T = TypeVar('K_T')
V_T = TypeVar('V_T')
| 34.669173 | 98 | 0.644738 |
6b221317ab066084e6a6681c2759fb8660e93351 | 11,665 | py | Python | openGaussBase/testcase/SQL/DCL/Alter_Default_Privileges/Opengauss_Function_Alter_Default_Privileges_Case0016.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DCL/Alter_Default_Privileges/Opengauss_Function_Alter_Default_Privileges_Case0016.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DCL/Alter_Default_Privileges/Opengauss_Function_Alter_Default_Privileges_Case0016.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type :
Case Name : sysadminalter
Description :
1.alter:alter
1.1.alter
1.2. :
2.sysadminalter:alter
2.1.sysadmin default016_01 :
2.2.default016_016 alter
2.3. :
:alter
Expect :
1.alter:alter
1.1.alter
1.2. :
2.sysadminalter:alter
2.1.sysadmin default016_01 :
2.2.default016_016 alter
2.3. :
:alter
History :
"""
import sys
import unittest
from yat.test import macro
from yat.test import Node
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser') | 55.547619 | 184 | 0.598028 |
6b2234f49a8b57fe4bf6fd97f1ef5ca5137dfade | 2,342 | py | Python | Apps/phdigitalshadows/dsapi/service/infrastructure_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 74 | 2019-10-22T02:00:53.000Z | 2022-03-15T12:56:13.000Z | Apps/phdigitalshadows/dsapi/service/infrastructure_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 375 | 2019-10-22T20:53:50.000Z | 2021-11-09T21:28:43.000Z | Apps/phdigitalshadows/dsapi/service/infrastructure_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 175 | 2019-10-23T15:30:42.000Z | 2021-11-05T21:33:31.000Z | # File: infrastructure_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from .ds_base_service import DSBaseService
from .ds_find_service import DSFindService
from ..model.infrastructure import Infrastructure
| 32.985915 | 106 | 0.599488 |
6b226704f6cb4e708962ce8718453e73c2ce6810 | 5,562 | py | Python | src/find_genes_by_location/find_genes_by_location.py | NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome | b1eb83118268ada50e90f979347e47e055a51029 | [
"MIT"
] | 3 | 2020-07-06T18:23:47.000Z | 2020-07-15T06:41:44.000Z | src/find_genes_by_location/find_genes_by_location.py | NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome | b1eb83118268ada50e90f979347e47e055a51029 | [
"MIT"
] | 5 | 2020-07-09T12:15:07.000Z | 2020-07-10T17:23:50.000Z | src/find_genes_by_location/find_genes_by_location.py | NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome | b1eb83118268ada50e90f979347e47e055a51029 | [
"MIT"
] | 3 | 2020-07-06T18:25:24.000Z | 2020-07-06T19:50:56.000Z | import argparse
from collections import defaultdict
import csv
from dataclasses import dataclass, field
from enum import Enum, unique, auto
import os
import sys
import tempfile
import yaml
import zipfile
import gffutils
from google.protobuf import json_format
from ncbi.datasets.v1alpha1 import dataset_catalog_pb2
from ncbi.datasets.v1alpha1.reports import assembly_pb2
from ncbi.datasets.reports.report_reader import DatasetsReportReader
if __name__ == '__main__':
FindGenesByLoc().run()
| 36.834437 | 141 | 0.670262 |
6b22b1c8cec3284d72a98eea77ac255711ba8ec7 | 811 | py | Python | web/backend/backend_django/apps/capacity/models.py | tOverney/ADA-Project | 69221210b1f4f13f6979123c6a7a1a9813ea18e5 | [
"Apache-2.0"
] | null | null | null | web/backend/backend_django/apps/capacity/models.py | tOverney/ADA-Project | 69221210b1f4f13f6979123c6a7a1a9813ea18e5 | [
"Apache-2.0"
] | 1 | 2016-11-04T01:03:21.000Z | 2016-11-04T10:10:06.000Z | web/backend/backend_django/apps/capacity/models.py | tOverney/ADA-Project | 69221210b1f4f13f6979123c6a7a1a9813ea18e5 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from multigtfs.models import (
Block, Fare, FareRule, Feed, Frequency, Route, Service, ServiceDate, Shape,
ShapePoint, Stop, StopTime, Trip, Agency)
| 32.44 | 79 | 0.705302 |
6b22ef64a2b0a516b9dfe5541aa85e77c40a249c | 106 | py | Python | apps/interface/settings/config.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 349 | 2020-08-04T10:21:01.000Z | 2022-03-23T08:31:29.000Z | apps/interface/settings/config.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 2 | 2021-01-07T06:17:05.000Z | 2021-04-01T06:01:30.000Z | apps/interface/settings/config.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 70 | 2020-08-24T06:46:14.000Z | 2022-03-25T13:23:27.000Z | try:
from public_config import *
except ImportError:
pass
PORT = 9028
SERVICE_NAME = 'interface'
| 13.25 | 31 | 0.716981 |
6b22ef6d7e0edee04fb293d0c3cd3eec5a122d66 | 1,307 | py | Python | api/api/pokemon/views.py | farnswj1/PokemonAPI | b6fc4dfe8c0fde6b4560455dd37e61b6a0d2ea27 | [
"MIT"
] | null | null | null | api/api/pokemon/views.py | farnswj1/PokemonAPI | b6fc4dfe8c0fde6b4560455dd37e61b6a0d2ea27 | [
"MIT"
] | null | null | null | api/api/pokemon/views.py | farnswj1/PokemonAPI | b6fc4dfe8c0fde6b4560455dd37e61b6a0d2ea27 | [
"MIT"
] | null | null | null | from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
UpdateAPIView,
DestroyAPIView
)
from .models import Pokemon
from .serializers import PokemonSerializer
from .filters import PokemonFilterSet
# Create your views here.
| 27.229167 | 52 | 0.75746 |
6b23ef94d3d317043d5fc3a13457402a61c1b88c | 9,546 | py | Python | plugins/action/normalize_gitlab_cfg.py | sma-de/ansible-collections-gitlab | 5da99b04722fc016d3e8589635fcbb3579dcfda2 | [
"BSD-3-Clause"
] | null | null | null | plugins/action/normalize_gitlab_cfg.py | sma-de/ansible-collections-gitlab | 5da99b04722fc016d3e8589635fcbb3579dcfda2 | [
"BSD-3-Clause"
] | null | null | null | plugins/action/normalize_gitlab_cfg.py | sma-de/ansible-collections-gitlab | 5da99b04722fc016d3e8589635fcbb3579dcfda2 | [
"BSD-3-Clause"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.six import iteritems, string_types
from ansible_collections.smabot.base.plugins.module_utils.plugins.config_normalizing.base import ConfigNormalizerBaseMerger, NormalizerBase, NormalizerNamed, DefaultSetterConstant, DefaultSetterOtherKey
from ansible_collections.smabot.base.plugins.module_utils.utils.dicting import setdefault_none, SUBDICT_METAKEY_ANY, get_subdict
from ansible_collections.smabot.base.plugins.module_utils.utils.utils import ansible_assert
class ActionModule(ConfigNormalizerBaseMerger):
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(ConfigRootNormalizer(self),
*args, default_merge_vars=['gitlab_cfg_defaults'],
extra_merge_vars_ans=['extra_gitlab_config_maps'],
**kwargs
)
self._supports_check_mode = False
self._supports_async = False
| 27.589595 | 202 | 0.626859 |
6b240627551477bf7c6382038b724993aeef7b0b | 1,416 | py | Python | microservices/validate/tools/dynamodb.py | clodonil/pipeline_aws_custom | 8ca517d0bad48fe528461260093f0035f606f9be | [
"Apache-2.0"
] | null | null | null | microservices/validate/tools/dynamodb.py | clodonil/pipeline_aws_custom | 8ca517d0bad48fe528461260093f0035f606f9be | [
"Apache-2.0"
] | null | null | null | microservices/validate/tools/dynamodb.py | clodonil/pipeline_aws_custom | 8ca517d0bad48fe528461260093f0035f606f9be | [
"Apache-2.0"
] | null | null | null | """
Tools de integrao com o Dynamodb
"""
import boto3
import botocore
import logging
import datetime
import json
import copy
import time
import os
| 24.413793 | 70 | 0.639124 |
6b24d64a863b721f8c91dae3e401a33b896a0b31 | 853 | py | Python | scrapy_ddiy/spiders/GlidedSky/glided_sky_001.py | LZC6244/scrapy_ddiy | 1bf7cdd382afd471af0bf7069b377fb364dc4730 | [
"MIT"
] | 9 | 2021-05-17T02:55:16.000Z | 2022-03-28T08:36:50.000Z | scrapy_ddiy/spiders/GlidedSky/glided_sky_001.py | LZC6244/scrapy_ddiy | 1bf7cdd382afd471af0bf7069b377fb364dc4730 | [
"MIT"
] | null | null | null | scrapy_ddiy/spiders/GlidedSky/glided_sky_001.py | LZC6244/scrapy_ddiy | 1bf7cdd382afd471af0bf7069b377fb364dc4730 | [
"MIT"
] | 1 | 2022-01-23T06:28:31.000Z | 2022-01-23T06:28:31.000Z | # -*- coding: utf-8 -*-
from scrapy import Request
from scrapy_ddiy.utils.spiders.ddiy_base import DdiyBaseSpider
| 34.12 | 106 | 0.657679 |
6b25fa954e2aca18ad4da138b448689002685921 | 5,125 | py | Python | datasets/celeba/celeba_dataset.py | google/joint_vae | 984f456d1a38c6b27e23433aef241dea56f53384 | [
"Apache-2.0"
] | 35 | 2017-12-15T12:58:15.000Z | 2020-09-27T05:48:50.000Z | datasets/celeba/celeba_dataset.py | google/joint_vae | 984f456d1a38c6b27e23433aef241dea56f53384 | [
"Apache-2.0"
] | null | null | null | datasets/celeba/celeba_dataset.py | google/joint_vae | 984f456d1a38c6b27e23433aef241dea56f53384 | [
"Apache-2.0"
] | 11 | 2017-12-08T06:07:30.000Z | 2021-10-31T10:36:05.000Z | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Provides data for the mnist with attributes dataset.
Provide data loading utilities for an augmented version of the
MNIST dataset which contains the following attributes:
1. Location (digits are translated on a canvas and placed around
one of four locations/regions in the canvas). Each location
is a gaussian placed at four quadrants of the canvas.
2. Scale (We vary scale from 0.4 to 1.0), with two gaussians
placed at 0.5 +- 0.1 and 0.9 +- 0.1 repsectively.
3. Orientation: we vary orientation from -90 to +90 degrees,
sampling actual values from gaussians at +30 +- 10 and
-30 +-10. On a third of the occasions we dont orient the
digit at all which means a rotation of 0 degrees.
The original data after transformations is binarized as per the
procedure described in the following paper:
Salakhutdinov, Ruslan, and Iain Murray. 2008. ``On the Quantitative Analysis of
Deep Belief Networks.'' In Proceedings of the 25th International Conference on
Machine Learning, 872-79.
Author: vrama@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from datasets.celeba.image_decoder import ImageDecodeProcess
# Only provides option to load the binarized version of the dataset.
_FILE_PATTERN = '%s-*'
_SPLIT_TYPE = 'iid'
_DATASET_DIR = '/srv/share/datasets/celeba_for_tf_ig'
_SPLITS_TO_SIZES = {'train': 162770, 'val': 19867, 'test': 19962}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [218 x 178 x 3] RGB image.',
'labels': 'Attributes corresponding to the image.',
}
_NUM_CLASSES_PER_ATTRIBUTE = tuple([2]*18)
def get_split(split_name='train',
split_type="iid",
dataset_dir=None,
image_length=64,
num_classes_per_attribute=None):
"""Gets a dataset tuple with instructions for reading 2D shapes data.
Args:
split_name: A train/test split name.
split_type: str, type of split being loaded "iid" or "comp"
dataset_dir: The base directory of the dataset sources.
num_classes_per_attribute: The number of labels for the classfication
problem corresponding to each attribute. For example, if the first
attribute is "shape" and there are three possible shapes, then
then provide a value 3 in the first index, and so on.
Returns:
A `Dataset` namedtuple.
metadata: A dictionary with some metadata about the dataset we just
constructed.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if split_type is not "iid":
raise ValueError("Only IID split available for CelebA.")
if num_classes_per_attribute is None:
num_classes_per_attribute = _NUM_CLASSES_PER_ATTRIBUTE
if dataset_dir is None or dataset_dir == '':
dataset_dir = _DATASET_DIR
# Load attribute label map file.
label_map_json = os.path.join(dataset_dir,
'attribute_label_map.json')
file_pattern = os.path.join(dataset_dir, _FILE_PATTERN % split_name)
tf.logging.info('Loading from %s file.' % (file_pattern))
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/labels': tf.FixedLenFeature([len(num_classes_per_attribute)], tf.int64),
}
# TODO(vrama): See
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py#L270
# For where changes would need to be made to preprocess the images which
# get loaded.
items_to_handlers = {
'image': ImageDecodeProcess(shape=[218, 178, 3], image_length=64),
'labels': tfexample_decoder.Tensor('image/labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
metadata = {
'num_classes_per_attribute': num_classes_per_attribute,
'split_type': _SPLIT_TYPE,
'label_map_json': label_map_json,
}
return dataset.Dataset(
data_sources=file_pattern,
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS), metadata
| 36.091549 | 123 | 0.724293 |
6b269f5ba9e2d6a392abd625b09ccdc699507f3d | 1,303 | py | Python | jorldy/manager/log_manager.py | kan-s0/JORLDY | 44989cf415196604a1ad0383b34085dee6bb1c51 | [
"Apache-2.0"
] | null | null | null | jorldy/manager/log_manager.py | kan-s0/JORLDY | 44989cf415196604a1ad0383b34085dee6bb1c51 | [
"Apache-2.0"
] | null | null | null | jorldy/manager/log_manager.py | kan-s0/JORLDY | 44989cf415196604a1ad0383b34085dee6bb1c51 | [
"Apache-2.0"
] | null | null | null | import os
import datetime, time
import imageio
from pygifsicle import optimize
from torch.utils.tensorboard import SummaryWriter
| 36.194444 | 86 | 0.583269 |
6b27daa674cb67e0f7a35c3fcd65be25c5a4c1db | 2,676 | py | Python | lib/SeparateDriver/ASRDriverParts/UNIInterface.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | null | null | null | lib/SeparateDriver/ASRDriverParts/UNIInterface.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | null | null | null | lib/SeparateDriver/ASRDriverParts/UNIInterface.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | 1 | 2020-04-02T01:17:43.000Z | 2020-04-02T01:17:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: ASRDriverParts/UNIInterface.py
'''
Parts Module for ASR driver UNI interface configuraton
'''
import GlobalModule
from EmCommonLog import decorater_log
from ASRDriverParts.InterfaceBase import InterfaceBase
| 33.037037 | 63 | 0.567638 |
6b2847d0ef2aafced05fa68a40e983a929d467d0 | 6,003 | py | Python | tools/accuracy_checker/accuracy_checker/annotation_converters/mnist.py | AnthonyQuantum/open_model_zoo | 7d235755e2d17f6186b11243a169966e4f05385a | [
"Apache-2.0"
] | 4 | 2021-04-21T02:38:04.000Z | 2021-10-13T12:15:33.000Z | tools/accuracy_checker/accuracy_checker/annotation_converters/mnist.py | AnthonyQuantum/open_model_zoo | 7d235755e2d17f6186b11243a169966e4f05385a | [
"Apache-2.0"
] | 6 | 2020-11-13T19:02:47.000Z | 2022-03-12T00:43:24.000Z | tools/accuracy_checker/accuracy_checker/annotation_converters/mnist.py | AnthonyQuantum/open_model_zoo | 7d235755e2d17f6186b11243a169966e4f05385a | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..config import PathField, BoolField
from ..representation import ClassificationAnnotation
from ..utils import read_csv, check_file_existence, read_json
from .format_converter import BaseFormatConverter, ConverterReturn
try:
from PIL import Image
except ImportError:
Image = None
| 42.274648 | 118 | 0.670165 |
6b28598ebd5982e3c50306026cc2ae916f9a979c | 4,511 | py | Python | Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py | davidbrownell/Common_EnvironmentEx | 9e20b79b4de0cb472f65ac08b3de83f9ed8e2ca3 | [
"BSL-1.0"
] | null | null | null | Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py | davidbrownell/Common_EnvironmentEx | 9e20b79b4de0cb472f65ac08b3de83f9ed8e2ca3 | [
"BSL-1.0"
] | null | null | null | Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py | davidbrownell/Common_EnvironmentEx | 9e20b79b4de0cb472f65ac08b3de83f9ed8e2ca3 | [
"BSL-1.0"
] | 1 | 2020-08-19T17:25:22.000Z | 2020-08-19T17:25:22.000Z | """\
Dialog to show details of internal errors.
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2017 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import bugdialog_ui
import config
import log
import logging
import sys
import wx
def Show(msg, exc):
"""Wrapper for creating a L{BugReport} dialog and show the details of the given exception instance.
msg: Short description of the action that has raised this error
exc: Caught exception
see ShowEI(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContent(msg, exc)
dialog.ShowModal()
dialog.Destroy()
def ShowEI(exc_type, exc_value, exc_tb, msg=None):
"""Wrapper for creating a L{BugReport} dialog and show the given exception details.
exc_type: Exception type
exc_value: Exception value
exc_tb: Exception traceback
msg: Short description of the exception
see: L{Show(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContentEI(exc_type, exc_value, exc_tb, msg)
dialog.ShowModal()
dialog.Destroy()
def ShowEnvironmentError(msg, inst):
"""Show EnvironmentError exceptions detailed and user-friendly
msg: Error message
inst: The caught exception"""
details = {'msg':msg, 'type':inst.__class__.__name__}
if inst.filename:
details['filename'] = _('Filename: %s') % inst.filename
if inst.errno is not None and inst.strerror is not None:
details['error'] = '%s - %s' % (inst.errno, inst.strerror)
else:
details['error'] = str(inst.args)
text = _("""%(msg)s
Error type: %(type)s
Error code: %(error)s
%(filename)s""") % details
wx.MessageBox(text, _('Error'), wx.OK | wx.CENTRE | wx.ICON_ERROR)
| 30.073333 | 103 | 0.65573 |
6b2889ee02cbc2db0ebf9270a48b091ad3ca3b59 | 8,237 | py | Python | core/views.py | Neelamegam2000/QRcode-for-license | a6d4c9655c5ba52b24c1ea737797557f06e0fcbf | [
"MIT"
] | null | null | null | core/views.py | Neelamegam2000/QRcode-for-license | a6d4c9655c5ba52b24c1ea737797557f06e0fcbf | [
"MIT"
] | null | null | null | core/views.py | Neelamegam2000/QRcode-for-license | a6d4c9655c5ba52b24c1ea737797557f06e0fcbf | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.conf import settings
from django.core.files.storage import FileSystemStorage,default_storage
from django.core.mail import send_mail, EmailMessage
from core.models import Document
from core.forms import DocumentForm
from django.contrib import messages
import os
import pyqrcode
import png
import random
import base64
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
"""def simple_upload(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
media_path = os.path.join(BASE_DIR,'media')
full_path=os.path.join(media_path,myfile.name)
qr=pyqrcode.create(uploaded_file_url)
filename_before=filename.rsplit(".")
filename1=filename_before[0]+".png"
s=qr.png(filename1,scale=6)
'''from fpdf import FPDF
pdf=FPDF()
pdf.add_page()
pdf.image(filename1,x=50,y=None,w=60,h=60,type="",link=uploaded_file_url)'''
return render(request, 'simple_upload.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'simple_upload.html')"""
| 42.901042 | 167 | 0.617701 |
6b29a1af58169202c8dc76623da144e32be97995 | 25 | py | Python | docassemble/MACourts/__init__.py | nonprofittechy/docassemble-MACourts | 6035393a09cff3e8a371f19b79d1cde3a60691c1 | [
"MIT"
] | 2 | 2020-07-20T19:13:38.000Z | 2021-03-02T04:30:44.000Z | docassemble/MACourts/__init__.py | nonprofittechy/docassemble-MACourts | 6035393a09cff3e8a371f19b79d1cde3a60691c1 | [
"MIT"
] | 25 | 2020-04-11T18:40:32.000Z | 2021-12-20T14:18:04.000Z | docassemble/MACourts/__init__.py | nonprofittechy/docassemble-MACourts | 6035393a09cff3e8a371f19b79d1cde3a60691c1 | [
"MIT"
] | 7 | 2020-04-10T01:51:27.000Z | 2021-06-25T21:24:48.000Z | __version__ = '0.0.58.2'
| 12.5 | 24 | 0.64 |
6b2b5f9728064d787e0b3474fa79a57d993dda3b | 594 | py | Python | main.py | Meat0Project/ChatBot | 35ebadc71b100d861f9c9e211e1e751175f47c50 | [
"MIT"
] | 4 | 2020-10-30T07:46:39.000Z | 2020-10-30T18:20:57.000Z | main.py | Meat0Project/ChatBot | 35ebadc71b100d861f9c9e211e1e751175f47c50 | [
"MIT"
] | null | null | null | main.py | Meat0Project/ChatBot | 35ebadc71b100d861f9c9e211e1e751175f47c50 | [
"MIT"
] | null | null | null | '''
Made by - Aditya mangal
Purpose - Python mini project
Date - 18 october 2020
'''
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
form termcolor import cprint
import time
chatbot = ChatBot('Bot')
trainer = ChatterBotCorpusTrainer(chatbot)
trainer.train('chatterbot.corpus.english')
cprint("#" * 50, "magenta")
cprint((f"A Chatot ").center(50), "yellow")
cprint("#" * 50, "magenta")
print('You can exit by type exit\n')
while True:
query = input(">> ")
if 'exit' in query:
exit()
else:
print(chatbot.get_response(query))
| 22.846154 | 55 | 0.69697 |
6b2bddfb3c677b2bd52d34844ad305be0f97c9b1 | 9,330 | py | Python | challenges/day14.py | Jeffreyo3/AdventOfCode2020 | 8705847a04885d6489eb11acfddf2ff5702d8927 | [
"MIT"
] | null | null | null | challenges/day14.py | Jeffreyo3/AdventOfCode2020 | 8705847a04885d6489eb11acfddf2ff5702d8927 | [
"MIT"
] | null | null | null | challenges/day14.py | Jeffreyo3/AdventOfCode2020 | 8705847a04885d6489eb11acfddf2ff5702d8927 | [
"MIT"
] | null | null | null | """
--- Day 14: Docking Data ---
As your ferry approaches the sea port, the captain asks for your help again. The computer system that runs this port isn't compatible with the docking program on the ferry, so the docking parameters aren't being correctly initialized in the docking program's memory.
After a brief inspection, you discover that the sea port's computer system uses a strange bitmask system in its initialization program. Although you don't have the correct decoder chip handy, you can emulate it in software!
The initialization program (your puzzle input) can either update the bitmask or write a value to memory. Values and memory addresses are both 36-bit unsigned integers. For example, ignoring bitmasks for a moment, a line like mem[8] = 11 would write the value 11 to memory address 8.
The bitmask is always given as a string of 36 bits, written with the most significant bit (representing 2^35) on the left and the least significant bit (2^0, that is, the 1s bit) on the right. The current bitmask is applied to values immediately before they are written to memory: a 0 or 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value unchanged.
For example, consider the following program:
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
This program starts by specifying a bitmask (mask = ....). The mask it specifies will overwrite two bits in every written value: the 2s bit is overwritten with 0, and the 64s bit is overwritten with 1.
The program then attempts to write the value 11 to memory address 8. By expanding everything out to individual bits, the mask is applied as follows:
value: 000000000000000000000000000000001011 (decimal 11)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001001001 (decimal 73)
So, because of the mask, the value 73 is written to memory address 8 instead. Then, the program tries to write 101 to address 7:
value: 000000000000000000000000000001100101 (decimal 101)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001100101 (decimal 101)
This time, the mask has no effect, as the bits it overwrote were already the values the mask tried to set. Finally, the program tries to write 0 to address 8:
value: 000000000000000000000000000000000000 (decimal 0)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001000000 (decimal 64)
64 is written to address 8 instead, overwriting the value that was there previously.
To initialize your ferry's docking program, you need the sum of all values left in memory after the initialization program completes. (The entire 36-bit address space begins initialized to the value 0 at every address.) In the above example, only two values in memory are not zero - 101 (at address 7) and 64 (at address 8) - producing a sum of 165.
Execute the initialization program. What is the sum of all values left in memory after it completes?
"""
f = open("challenges\data\day14data.txt", "r")
# Function to convert Decimal number
# to Binary number
"""
--- Part Two ---
For some reason, the sea port's computer system still can't communicate with your ferry's docking program. It must be using version 2 of the decoder chip!
A version 2 decoder chip doesn't modify the values being written at all. Instead, it acts as a memory address decoder. Immediately before a value is written to memory, each bit in the bitmask modifies the corresponding bit of the destination memory address in the following way:
If the bitmask bit is 0, the corresponding memory address bit is unchanged.
If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
If the bitmask bit is X, the corresponding memory address bit is floating.
A floating bit is not connected to anything and instead fluctuates unpredictably. In practice, this means the floating bits will take on all possible values, potentially causing many memory addresses to be written all at once!
For example, consider the following program:
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
When this program goes to write to memory address 42, it first applies the bitmask:
address: 000000000000000000000000000000101010 (decimal 42)
mask: 000000000000000000000000000000X1001X
result: 000000000000000000000000000000X1101X
After applying the mask, four bits are overwritten, three of which are different, and two of which are floating. Floating bits take on every possible combination of values; with two floating bits, four actual memory addresses are written:
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
000000000000000000000000000000111010 (decimal 58)
000000000000000000000000000000111011 (decimal 59)
Next, the program is about to write to memory address 26 with a different bitmask:
address: 000000000000000000000000000000011010 (decimal 26)
mask: 00000000000000000000000000000000X0XX
result: 00000000000000000000000000000001X0XX
This results in an address with three floating bits, causing writes to eight memory addresses:
000000000000000000000000000000010000 (decimal 16)
000000000000000000000000000000010001 (decimal 17)
000000000000000000000000000000010010 (decimal 18)
000000000000000000000000000000010011 (decimal 19)
000000000000000000000000000000011000 (decimal 24)
000000000000000000000000000000011001 (decimal 25)
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
The entire 36-bit address space still begins initialized to the value 0 at every address, and you still need the sum of all values left in memory at the end of the program. In this example, the sum is 208.
Execute the initialization program using an emulator for a version 2 decoder chip. What is the sum of all values left in memory after it completes?
"""
data = processData(f)
# [print(d) for d in data]
sumAllValues = initialize(data)
print("Part 1:", sumAllValues)
sumAllValuesV2 = initialize_v2(data)
print("Part 2:", sumAllValuesV2)
# binary = decimalToBinary(33323)
# binary = leadingZeros(36, binary)
# print(binary)
# combos = initialize_v2([("mask", "100X100X101011111X100000100X11010011"),
# ("mem[33323]", "349380")])
# print(combos) | 42.217195 | 386 | 0.700536 |
6b2c276716f02206bb780210c6a91cee657ed190 | 2,524 | py | Python | src/Dialogs/RegularPolygonDialog.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 41 | 2021-11-24T05:54:08.000Z | 2022-03-26T10:19:30.000Z | src/Dialogs/RegularPolygonDialog.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 1 | 2022-02-28T04:34:51.000Z | 2022-03-07T10:49:27.000Z | src/Dialogs/RegularPolygonDialog.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 10 | 2021-11-24T07:35:17.000Z | 2022-03-25T18:42:14.000Z | from PyQt5 import QtWidgets, uic
from Factory import Factory
from Dialogs.DialogMacros import turn_into_free_point, free_point_checkbox
from Fill.ListWidget import fill_listWidget_with_data, set_selected_id_in_listWidget
import Constant as c
| 39.4375 | 112 | 0.660063 |
6b2cac513cb8e6260352dc24ccb57b041a317ef9 | 8,858 | py | Python | tests/test_networks.py | UCY-LINC-LAB/5G-Slicer | 41e75a6709bc779cb4f3e08484b9ada3911646ed | [
"Apache-2.0"
] | null | null | null | tests/test_networks.py | UCY-LINC-LAB/5G-Slicer | 41e75a6709bc779cb4f3e08484b9ada3911646ed | [
"Apache-2.0"
] | null | null | null | tests/test_networks.py | UCY-LINC-LAB/5G-Slicer | 41e75a6709bc779cb4f3e08484b9ada3911646ed | [
"Apache-2.0"
] | null | null | null | import unittest
from networks.QoS import QoS
from networks.connections.mathematical_connections import FunctionalDegradation
from networks.slicing import SliceConceptualGraph
from utils.location import Location
| 47.368984 | 112 | 0.634229 |
6b2cec5a2588f39302333a5f4dacaf75c507b16b | 3,344 | py | Python | backend/api/management/commands/create_testdb.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | 2 | 2021-06-01T08:33:04.000Z | 2021-08-20T04:22:39.000Z | backend/api/management/commands/create_testdb.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | null | null | null | backend/api/management/commands/create_testdb.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | null | null | null | """
Setup DB with example data for tests
"""
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User, Group
from django.core.management.base import BaseCommand
from api import models
| 54.819672 | 121 | 0.62201 |
6b2cf4b5b97a007dddbfd9bea2e0b5aea5f19d54 | 576 | py | Python | pyinfra/facts/util/distro.py | charles-l/pyinfra | 1992d98ff31d41404427dbb3cc6095a7bebd4052 | [
"MIT"
] | 1 | 2020-12-24T08:24:13.000Z | 2020-12-24T08:24:13.000Z | pyinfra/facts/util/distro.py | charles-l/pyinfra | 1992d98ff31d41404427dbb3cc6095a7bebd4052 | [
"MIT"
] | null | null | null | pyinfra/facts/util/distro.py | charles-l/pyinfra | 1992d98ff31d41404427dbb3cc6095a7bebd4052 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import os
import distro
| 27.428571 | 92 | 0.751736 |
6b2d1574c0e19ae7863baaa36967e1b1432a37dd | 3,206 | py | Python | appium/webdriver/common/multi_action.py | salabogdan/python-client | 66208fdbbc8f0a8b0e90376b404135b57e797fa5 | [
"Apache-2.0"
] | 1 | 2021-07-23T03:56:49.000Z | 2021-07-23T03:56:49.000Z | appium/webdriver/common/multi_action.py | ayvnkhan/python-client | ba408b74f0d30fc06a51e77f68fc5cfd4ac8f99a | [
"Apache-2.0"
] | 11 | 2019-07-16T04:21:22.000Z | 2021-02-24T15:11:02.000Z | appium/webdriver/common/multi_action.py | ki4070ma/python-client | d5f29f08a2fe9b5a9cca4162726c7cfb4faa42e9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The Selenium team implemented something like the Multi Action API in the form of
# "action chains" (https://code.google.com/p/selenium/source/browse/py/selenium/webdriver/common/action_chains.py).
# These do not quite work for this situation, and do not allow for ad hoc action
# chaining as the spec requires.
import copy
from typing import TYPE_CHECKING, Dict, List, Optional, TypeVar, Union
from appium.webdriver.mobilecommand import MobileCommand as Command
if TYPE_CHECKING:
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.webdriver import WebDriver
from appium.webdriver.webelement import WebElement
T = TypeVar('T', bound='MultiAction')
| 35.622222 | 118 | 0.663132 |
6b2de30f1514c024be028007b3c7a182b53eba57 | 8,652 | py | Python | src/visu/visualizer.py | JonasFrey96/PLR2 | a0498e6ff283a27c6db11b3d57d3b3100026f069 | [
"MIT"
] | null | null | null | src/visu/visualizer.py | JonasFrey96/PLR2 | a0498e6ff283a27c6db11b3d57d3b3100026f069 | [
"MIT"
] | 2 | 2020-06-30T17:33:54.000Z | 2020-07-07T18:12:21.000Z | src/visu/visualizer.py | JonasFrey96/PLR2 | a0498e6ff283a27c6db11b3d57d3b3100026f069 | [
"MIT"
] | null | null | null | import numpy as np
import sys
import os
from PIL import Image
from visu.helper_functions import save_image
from scipy.spatial.transform import Rotation as R
from helper import re_quat
import copy
import torch
import numpy as np
import k3d
def plot_pcd(x, point_size=0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k = 25811000
elif c == 'r':
k = 11801000
elif c == 'black':
k = 2580
else:
k = 2580
colors = np.ones(x.shape[0]) * k
plot = k3d.plot(name='points')
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
| 33.929412 | 194 | 0.532016 |
6b2de9f9abfa7c9d1e5aab26305227c69409476d | 3,317 | py | Python | leetCode_Q37_serializeTree.py | FreesiaLikesPomelo/-offer | 14ac73cb46d13c7f5bbc294329a14f3c5995bc7a | [
"Apache-2.0"
] | null | null | null | leetCode_Q37_serializeTree.py | FreesiaLikesPomelo/-offer | 14ac73cb46d13c7f5bbc294329a14f3c5995bc7a | [
"Apache-2.0"
] | null | null | null | leetCode_Q37_serializeTree.py | FreesiaLikesPomelo/-offer | 14ac73cb46d13c7f5bbc294329a14f3c5995bc7a | [
"Apache-2.0"
] | null | null | null | '''
37.
:
1
/ \
2 3
/ \
4 5
"[1,2,3,null,null,4,5]"
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# :240 ms, Python3 22.75%
# :31 MB, Python3 100.00%
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| 27.188525 | 81 | 0.472716 |
6b2e543e1da4f0dd04f05a16bdaaac83f262d6ce | 1,505 | py | Python | ipuz/puzzlekinds/__init__.py | maiamcc/ipuz | fbe6f663b28ad42754622bf2d3bbe59a26be2615 | [
"MIT"
] | 5 | 2015-06-23T17:18:41.000Z | 2020-05-05T16:43:14.000Z | ipuz/puzzlekinds/__init__.py | maiamcc/ipuz | fbe6f663b28ad42754622bf2d3bbe59a26be2615 | [
"MIT"
] | 3 | 2015-08-21T05:17:22.000Z | 2021-03-20T18:39:31.000Z | ipuz/puzzlekinds/__init__.py | maiamcc/ipuz | fbe6f663b28ad42754622bf2d3bbe59a26be2615 | [
"MIT"
] | 3 | 2018-01-15T17:28:10.000Z | 2020-09-29T20:32:21.000Z | from .acrostic import IPUZ_ACROSTIC_VALIDATORS
from .answer import IPUZ_ANSWER_VALIDATORS
from .block import IPUZ_BLOCK_VALIDATORS
from .crossword import IPUZ_CROSSWORD_VALIDATORS
from .fill import IPUZ_FILL_VALIDATORS
from .sudoku import IPUZ_SUDOKU_VALIDATORS
from .wordsearch import IPUZ_WORDSEARCH_VALIDATORS
IPUZ_PUZZLEKINDS = {
"http://ipuz.org/acrostic": {
"mandatory": (
"puzzle",
),
"validators": {
1: IPUZ_ACROSTIC_VALIDATORS,
},
},
"http://ipuz.org/answer": {
"mandatory": (),
"validators": {
1: IPUZ_ANSWER_VALIDATORS,
},
},
"http://ipuz.org/block": {
"mandatory": (
"dimensions",
),
"validators": {
1: IPUZ_BLOCK_VALIDATORS,
},
},
"http://ipuz.org/crossword": {
"mandatory": (
"dimensions",
"puzzle",
),
"validators": {
1: IPUZ_CROSSWORD_VALIDATORS,
},
},
"http://ipuz.org/fill": {
"mandatory": (),
"validators": {
1: IPUZ_FILL_VALIDATORS,
},
},
"http://ipuz.org/sudoku": {
"mandatory": (
"puzzle",
),
"validators": {
1: IPUZ_SUDOKU_VALIDATORS,
},
},
"http://ipuz.org/wordsearch": {
"mandatory": (
"dimensions",
),
"validators": {
1: IPUZ_WORDSEARCH_VALIDATORS,
},
},
}
| 23.153846 | 50 | 0.509635 |
6b2f80664f980dad5a40411dc361a14a2b34e519 | 8,263 | py | Python | CTFd/api/v1/users.py | MrQubo/CTFd | 5c8ffff1412ea91ad6cf87135cb3d175a1223544 | [
"Apache-2.0"
] | null | null | null | CTFd/api/v1/users.py | MrQubo/CTFd | 5c8ffff1412ea91ad6cf87135cb3d175a1223544 | [
"Apache-2.0"
] | null | null | null | CTFd/api/v1/users.py | MrQubo/CTFd | 5c8ffff1412ea91ad6cf87135cb3d175a1223544 | [
"Apache-2.0"
] | null | null | null | from flask import session, request, abort
from flask_restplus import Namespace, Resource
from CTFd.models import (
db,
Users,
Solves,
Awards,
Tracking,
Unlocks,
Submissions,
Notifications,
)
from CTFd.utils.decorators import authed_only, admins_only, ratelimit
from CTFd.cache import clear_standings
from CTFd.utils.user import get_current_user, is_admin
from CTFd.utils.decorators.visibility import (
check_account_visibility,
check_score_visibility,
)
from CTFd.schemas.submissions import SubmissionSchema
from CTFd.schemas.awards import AwardSchema
from CTFd.schemas.users import UserSchema
users_namespace = Namespace("users", description="Endpoint to retrieve Users")
| 30.156934 | 78 | 0.634031 |
6b33e7e0e395a01bbb9aefe040bd4c754743cdbd | 1,005 | py | Python | getting_started/pages.py | emilhe/dash-extensions-docs | f44edba1c955242fc503185954ea5f3be69eb122 | [
"MIT"
] | 1 | 2022-03-20T09:50:07.000Z | 2022-03-20T09:50:07.000Z | getting_started/pages.py | emilhe/dash-extensions-docs | f44edba1c955242fc503185954ea5f3be69eb122 | [
"MIT"
] | null | null | null | getting_started/pages.py | emilhe/dash-extensions-docs | f44edba1c955242fc503185954ea5f3be69eb122 | [
"MIT"
] | null | null | null | import dash_labs as dl
from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input
app = DashProxy(prevent_initial_callbacks=True, plugins=[dl.plugins.pages])
# Register a few pages.
n_pages = 5
for i in range(n_pages):
page = make_page(i)
page.register(app, page_name(i), prefix=str(i))
# Setup main app layout.
app_shell = [html.H1("App shell"), dl.plugins.page_container]
navigation = html.Ul([html.Li(html.A(page_name(i), href=page_name(i))) for i in range(n_pages)])
app.layout = html.Div(app_shell + [navigation], style=dict(display="block"))
if __name__ == '__main__':
app.run_server() | 34.655172 | 106 | 0.689552 |
6b34589c449dc4aced65c72c732c394afc998c68 | 8,790 | py | Python | zaqar/transport/wsgi/v2_0/homedoc.py | vkmc/zaqar-websocket | a93c460a28e541b5cc8b425d5fb4d69e78ab9f4b | [
"Apache-2.0"
] | 1 | 2015-03-22T18:41:13.000Z | 2015-03-22T18:41:13.000Z | zaqar/transport/wsgi/v2_0/homedoc.py | vkmc/zaqar-websocket | a93c460a28e541b5cc8b425d5fb4d69e78ab9f4b | [
"Apache-2.0"
] | null | null | null | zaqar/transport/wsgi/v2_0/homedoc.py | vkmc/zaqar-websocket | a93c460a28e541b5cc8b425d5fb4d69e78ab9f4b | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03
JSON_HOME = {
'resources': {
# -----------------------------------------------------------------
# Queues
# -----------------------------------------------------------------
'rel/queues': {
'href-template': '/v2/queues{?marker,limit,detailed}',
'href-vars': {
'marker': 'param/marker',
'limit': 'param/queue_limit',
'detailed': 'param/detailed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/queue': {
'href-template': '/v2/queues/{queue_name}',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['PUT', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
'rel/queue_stats': {
'href-template': '/v2/queues/{queue_name}/stats',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Messages
# -----------------------------------------------------------------
'rel/messages': {
'href-template': ('/v2/queues/{queue_name}/messages'
'{?marker,limit,echo,include_claimed}'),
'href-vars': {
'queue_name': 'param/queue_name',
'marker': 'param/marker',
'limit': 'param/messages_limit',
'echo': 'param/echo',
'include_claimed': 'param/include_claimed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_messages': {
'href-template': '/v2/queues/{queue_name}/messages',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json'],
},
},
'rel/messages_delete': {
'href-template': '/v2/queues/{queue_name}/messages{?ids,pop}',
'href-vars': {
'queue_name': 'param/queue_name',
'ids': 'param/ids',
'pop': 'param/pop'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
'rel/message_delete': {
'href-template': '/v2/queues/{queue_name}/messages/{message_id}{?claim}', # noqa
'href-vars': {
'queue_name': 'param/queue_name',
'message_id': 'param/message_id',
'claim': 'param/claim_id'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
# -----------------------------------------------------------------
# Claims
# -----------------------------------------------------------------
'rel/claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_claim': {
'href-template': '/v2/queues/{queue_name}/claims{?limit}',
'href-vars': {
'queue_name': 'param/queue_name',
'limit': 'param/claim_limit',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/patch_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['PATCH'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/delete_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['DELETE'],
'formats': {
'application/json': {},
},
},
},
}
}
ADMIN_RESOURCES = {
# -----------------------------------------------------------------
# Pools
# -----------------------------------------------------------------
'rel/pools': {
'href-template': '/v2/pools{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/pool_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/pool': {
'href-template': '/v2/pools/{pool_name}',
'href-vars': {
'pool_name': 'param/pool_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Flavors
# -----------------------------------------------------------------
'rel/flavors': {
'href-template': '/v2/flavors{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/flavor_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/flavor': {
'href-template': '/v2/flavors/{flavor_name}',
'href-vars': {
'flavor_name': 'param/flavor_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Health
# -----------------------------------------------------------------
'rel/health': {
'href': '/v2/health',
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
}
| 31.170213 | 93 | 0.367577 |
6b35baeaa7950e5538a7f5306ca85d6a854ed57e | 82,533 | py | Python | synapse/models/infotech.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 216 | 2017-01-17T18:52:50.000Z | 2022-03-31T18:44:49.000Z | synapse/models/infotech.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 2,189 | 2017-01-17T22:31:48.000Z | 2022-03-31T20:41:45.000Z | synapse/models/infotech.py | vertexproject/synapse | 9712e2aee63914441c59ce6cfc060fe06a2e5920 | [
"Apache-2.0"
] | 44 | 2017-01-17T16:50:57.000Z | 2022-03-16T18:35:52.000Z | import asyncio
import logging
import synapse.exc as s_exc
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
loglevels = (
(10, 'debug'),
(20, 'info'),
(30, 'notice'),
(40, 'warning'),
(50, 'err'),
(60, 'crit'),
(70, 'alert'),
(80, 'emerg'),
)
| 48.807215 | 191 | 0.372506 |
6b35eaa0ed1f5a899840c77cec0648c4c36f9761 | 1,447 | py | Python | test/test.py | bciar/ppp-web | 1afe39a3c8d2197595ad0e2610c612db210cd62e | [
"MIT"
] | 2 | 2018-09-27T03:31:42.000Z | 2018-09-27T11:11:17.000Z | test/test.py | bciar/ppp-web | 1afe39a3c8d2197595ad0e2610c612db210cd62e | [
"MIT"
] | null | null | null | test/test.py | bciar/ppp-web | 1afe39a3c8d2197595ad0e2610c612db210cd62e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests."""
import os
import unittest
from copy import copy
from webui.app import create_app
if __name__ == '__main__':
from test.utils.doctest_unittest_runner import doctest_unittest_runner
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
doctest_unittest_runner(test_dir=TEST_DIR, relative_path_to_root='../',
package_names=['webui', 'test'])
| 27.826923 | 79 | 0.607464 |
6b36c2213b18abb5b4d5cac68e49cf0ee92025a0 | 6,938 | py | Python | tests/sources/test_clang_format.py | Justin-Fisher/webots | 8a39e8e4390612919a8d82c7815aa914f4c079a4 | [
"Apache-2.0"
] | 1,561 | 2019-09-04T11:32:32.000Z | 2022-03-31T18:00:09.000Z | tests/sources/test_clang_format.py | Justin-Fisher/webots | 8a39e8e4390612919a8d82c7815aa914f4c079a4 | [
"Apache-2.0"
] | 2,184 | 2019-09-03T11:35:02.000Z | 2022-03-31T10:01:44.000Z | tests/sources/test_clang_format.py | Justin-Fisher/webots | 8a39e8e4390612919a8d82c7815aa914f4c079a4 | [
"Apache-2.0"
] | 1,013 | 2019-09-07T05:09:32.000Z | 2022-03-31T13:01:28.000Z | #!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the C, C++ and shader source code is compliant with ClangFormat."""
import unittest
import difflib
import os
import subprocess
from io import open
from distutils.spawn import find_executable
if __name__ == '__main__':
unittest.main()
| 41.54491 | 113 | 0.528971 |
6b37d79a6bcd2f11e42ccf5ea2b0694fffb12722 | 10,361 | py | Python | src/python/tests/core/system/shell_test.py | sanketsaurav/clusterfuzz | 9f7efba7781614d50cdc6ab136b9bcf19607731c | [
"Apache-2.0"
] | 1 | 2019-04-09T06:40:55.000Z | 2019-04-09T06:40:55.000Z | src/python/tests/core/system/shell_test.py | Delaney6/clusterfuzz | 9eeb08a85869b32733dd54c69b098688ff3b1bf5 | [
"Apache-2.0"
] | null | null | null | src/python/tests/core/system/shell_test.py | Delaney6/clusterfuzz | 9eeb08a85869b32733dd54c69b098688ff3b1bf5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""shell tests."""
import mock
import os
import unittest
from pyfakefs import fake_filesystem_unittest
from system import environment
from system import shell
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
| 36.101045 | 79 | 0.705434 |
6b3842e1a431bbf5fa9d29f78a1c73a20bb3a410 | 2,735 | py | Python | Language Model/birnn/model.py | osamaqureshi/NLP-for-Urdu | 864550dbf27244900c2be86e0bedcfb5bb519cb6 | [
"MIT"
] | 1 | 2020-10-22T20:18:22.000Z | 2020-10-22T20:18:22.000Z | Language Model/birnn/model.py | osamaqureshi/NLP-for-Urdu | 864550dbf27244900c2be86e0bedcfb5bb519cb6 | [
"MIT"
] | null | null | null | Language Model/birnn/model.py | osamaqureshi/NLP-for-Urdu | 864550dbf27244900c2be86e0bedcfb5bb519cb6 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
def loss_function(real, pred, loss_object):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
def mask_sequences(seq, t):
mask = np.zeros(seq.shape)
mask[:,:t] = 1
inp = tf.math.multiply(seq, mask)
mask[:,:t+1] = 1
tar = tf.math.multiply(seq, mask)
return inp, tar | 42.734375 | 113 | 0.545521 |
6b385bed93debf4cc525192d73536e80c2566746 | 591 | py | Python | python/ray/train/__init__.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 33 | 2020-05-27T14:25:24.000Z | 2022-03-22T06:11:30.000Z | python/ray/train/__init__.py | jamesliu/ray | 11ab412db1fa3603a3006e8ed414e80dd1f11c0c | [
"Apache-2.0"
] | 227 | 2021-10-01T08:00:01.000Z | 2021-12-28T16:47:26.000Z | python/ray/train/__init__.py | gramhagen/ray | c18caa4db36d466718bdbcb2229aa0b2dc03da1f | [
"Apache-2.0"
] | 5 | 2020-08-06T15:53:07.000Z | 2022-02-09T03:31:31.000Z | from ray.train.backend import BackendConfig
from ray.train.callbacks import TrainingCallback
from ray.train.checkpoint import CheckpointStrategy
from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint,
report, save_checkpoint, world_rank, world_size)
from ray.train.trainer import Trainer, TrainingIterator
__all__ = [
"BackendConfig", "CheckpointStrategy", "get_dataset_shard",
"load_checkpoint", "local_rank", "report", "save_checkpoint",
"TrainingIterator", "TrainingCallback", "Trainer", "world_rank",
"world_size"
]
| 42.214286 | 79 | 0.749577 |
6b387e13117dba7e993918eb8dcf86f67409ab84 | 1,548 | py | Python | test/test_contact_in_group.py | anastas11a/python_training | 1daceddb193d92542f7f7313026a7e67af4d89bb | [
"Apache-2.0"
] | null | null | null | test/test_contact_in_group.py | anastas11a/python_training | 1daceddb193d92542f7f7313026a7e67af4d89bb | [
"Apache-2.0"
] | null | null | null | test/test_contact_in_group.py | anastas11a/python_training | 1daceddb193d92542f7f7313026a7e67af4d89bb | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
from model.group import Group
import random
| 29.207547 | 73 | 0.709302 |
6b3881271e2eaf5752f4f95cac12eda083886a6f | 301 | py | Python | byurak/accounts/admin.py | LikeLion-CAU-9th/Django-fancy-coder | 53c770f4c1891f9076bed8c89d0b942b77e67667 | [
"MIT"
] | null | null | null | byurak/accounts/admin.py | LikeLion-CAU-9th/Django-fancy-coder | 53c770f4c1891f9076bed8c89d0b942b77e67667 | [
"MIT"
] | 2 | 2021-06-27T16:19:47.000Z | 2021-08-01T16:41:54.000Z | byurak/accounts/admin.py | LikeLion-CAU-9th/Django-fancy-coder | 53c770f4c1891f9076bed8c89d0b942b77e67667 | [
"MIT"
] | 2 | 2021-08-21T13:32:52.000Z | 2021-12-20T10:12:45.000Z | from django.contrib import admin
from accounts.models import User, Profile, UserFollow
admin.site.register(Profile)
admin.site.register(UserFollow)
| 23.153846 | 53 | 0.750831 |
6b3934ad826855dff168d0197fe9075473c458c0 | 20,474 | py | Python | viz_utils/eoa_viz.py | olmozavala/eoas-pyutils | f552a512e250f8aa16e1f3ababf8b4644253918b | [
"MIT"
] | null | null | null | viz_utils/eoa_viz.py | olmozavala/eoas-pyutils | f552a512e250f8aa16e1f3ababf8b4644253918b | [
"MIT"
] | null | null | null | viz_utils/eoa_viz.py | olmozavala/eoas-pyutils | f552a512e250f8aa16e1f3ababf8b4644253918b | [
"MIT"
] | null | null | null | import os
from PIL import Image
import cv2
from os import listdir
from os.path import join
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LogNorm
from io_utils.io_common import create_folder
from viz_utils.constants import PlotMode, BackgroundType
import pylab
import numpy as np
import cmocean
import shapely
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy
def select_colormap(field_name):
'''
Based on the name if the field it chooses a colormap from cmocean
Args:
field_name:
Returns:
'''
if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]):
# cmaps_fields.append(cmocean.cm.deep_r)
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]):
return cmocean.cm.thermal
elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]):
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]):
return cmocean.cm.haline
elif field_name.find('error') != -1:
return cmocean.cm.diff
elif field_name.find('binary') != -1:
return cmocean.cm.oxy
elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]):
return cmocean.cm.speed
| 41.869121 | 163 | 0.575657 |
6b39d4fb43437addee89cd08745a9f78f2bca971 | 1,414 | py | Python | ade20kScripts/setup.py | fcendra/PSPnet18 | bc4f4292f4ddd09dba7076ca0b587c8f60dfa043 | [
"MIT"
] | 1 | 2020-08-16T14:27:31.000Z | 2020-08-16T14:27:31.000Z | ade20kScripts/setup.py | fcendra/PSPNet.pytorch | bc4f4292f4ddd09dba7076ca0b587c8f60dfa043 | [
"MIT"
] | null | null | null | ade20kScripts/setup.py | fcendra/PSPNet.pytorch | bc4f4292f4ddd09dba7076ca0b587c8f60dfa043 | [
"MIT"
] | null | null | null | from os import listdir
from os.path import isfile, join
from path import Path
import numpy as np
import cv2
# Dataset path
target_path = Path('target/')
annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath()
dataset = [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))]
images = np.empty(len(dataset), dtype = object)
count = 1
# Iterate all Training Images
for n in range(0, len(dataset)):
# Read image
images[n] = cv2.imread(join(annotation_images_path,dataset[n]))
# Convert it to array
array = np.asarray(images[n],dtype=np.int8)
# Conditions when the value equal less than 1, change it to 255.
# If it is >= 1, increment it by -1
arr = np.where(array < 1, 255, array -1)
#Saved it to another file
if count < 10:
cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + ".png", arr)
elif count < 100 and count > 9:
cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + ".png", arr)
elif count < 1000 and count > 99:
cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + ".png", arr)
elif count < 10000 and count > 999:
cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + ".png", arr)
else:
cv2.imwrite(target_path +'ADE_train_000'+ str(count) + ".png", arr)
print(str(count) + ".png is printed")
count += 1
| 34.487805 | 97 | 0.65983 |
6b3b2f253f3b9ff3bee85537636c322b9a7a1ad0 | 8,617 | py | Python | src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/mesh/azext_mesh/servicefabricmesh/mgmt/servicefabricmesh/models/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .available_operation_display import AvailableOperationDisplay
from .error_details_model import ErrorDetailsModel
from .error_error_model import ErrorErrorModel
from .error_model import ErrorModel, ErrorModelException
from .operation_result import OperationResult
from .provisioned_resource_properties import ProvisionedResourceProperties
from .proxy_resource import ProxyResource
from .managed_proxy_resource import ManagedProxyResource
from .resource import Resource
from .tracked_resource import TrackedResource
from .secret_resource_properties import SecretResourceProperties
from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties
from .secret_resource_properties_base import SecretResourcePropertiesBase
from .secret_resource_description import SecretResourceDescription
from .secret_value import SecretValue
from .secret_value_properties import SecretValueProperties
from .secret_value_resource_description import SecretValueResourceDescription
from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile
from .volume_properties import VolumeProperties
from .volume_reference import VolumeReference
from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters
from .application_scoped_volume import ApplicationScopedVolume
from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk
from .volume_resource_description import VolumeResourceDescription
from .network_resource_properties import NetworkResourceProperties
from .local_network_resource_properties import LocalNetworkResourceProperties
from .endpoint_ref import EndpointRef
from .network_ref import NetworkRef
from .network_resource_properties_base import NetworkResourcePropertiesBase
from .network_resource_description import NetworkResourceDescription
from .gateway_destination import GatewayDestination
from .tcp_config import TcpConfig
from .http_route_match_path import HttpRouteMatchPath
from .http_route_match_header import HttpRouteMatchHeader
from .http_route_match_rule import HttpRouteMatchRule
from .http_route_config import HttpRouteConfig
from .http_host_config import HttpHostConfig
from .http_config import HttpConfig
from .gateway_properties import GatewayProperties
from .gateway_resource_description import GatewayResourceDescription
from .image_registry_credential import ImageRegistryCredential
from .environment_variable import EnvironmentVariable
from .setting import Setting
from .container_label import ContainerLabel
from .endpoint_properties import EndpointProperties
from .resource_requests import ResourceRequests
from .resource_limits import ResourceLimits
from .resource_requirements import ResourceRequirements
from .diagnostics_ref import DiagnosticsRef
from .reliable_collections_ref import ReliableCollectionsRef
from .container_state import ContainerState
from .container_event import ContainerEvent
from .container_instance_view import ContainerInstanceView
from .container_code_package_properties import ContainerCodePackageProperties
from .auto_scaling_trigger import AutoScalingTrigger
from .auto_scaling_mechanism import AutoScalingMechanism
from .auto_scaling_policy import AutoScalingPolicy
from .service_resource_description import ServiceResourceDescription
from .diagnostics_sink_properties import DiagnosticsSinkProperties
from .diagnostics_description import DiagnosticsDescription
from .application_properties import ApplicationProperties
from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription
from .application_resource_description import ApplicationResourceDescription
from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism
from .auto_scaling_metric import AutoScalingMetric
from .auto_scaling_resource_metric import AutoScalingResourceMetric
from .service_properties import ServiceProperties
from .service_replica_properties import ServiceReplicaProperties
from .service_replica_description import ServiceReplicaDescription
from .average_load_scaling_trigger import AverageLoadScalingTrigger
from .container_logs import ContainerLogs
from .operation_result_paged import OperationResultPaged
from .secret_resource_description_paged import SecretResourceDescriptionPaged
from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged
from .volume_resource_description_paged import VolumeResourceDescriptionPaged
from .network_resource_description_paged import NetworkResourceDescriptionPaged
from .gateway_resource_description_paged import GatewayResourceDescriptionPaged
from .application_resource_description_paged import ApplicationResourceDescriptionPaged
from .service_resource_description_paged import ServiceResourceDescriptionPaged
from .service_replica_description_paged import ServiceReplicaDescriptionPaged
from .service_fabric_mesh_management_client_enums import (
ResourceStatus,
HealthState,
SecretKind,
VolumeProvider,
SizeTypes,
ApplicationScopedVolumeKind,
NetworkKind,
HeaderMatchType,
OperatingSystemType,
DiagnosticsSinkKind,
AutoScalingMechanismKind,
AutoScalingMetricKind,
AutoScalingResourceMetricName,
AutoScalingTriggerKind,
)
__all__ = [
'AvailableOperationDisplay',
'ErrorDetailsModel',
'ErrorErrorModel',
'ErrorModel', 'ErrorModelException',
'OperationResult',
'ProvisionedResourceProperties',
'ProxyResource',
'ManagedProxyResource',
'Resource',
'TrackedResource',
'SecretResourceProperties',
'InlinedValueSecretResourceProperties',
'SecretResourcePropertiesBase',
'SecretResourceDescription',
'SecretValue',
'SecretValueProperties',
'SecretValueResourceDescription',
'VolumeProviderParametersAzureFile',
'VolumeProperties',
'VolumeReference',
'ApplicationScopedVolumeCreationParameters',
'ApplicationScopedVolume',
'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk',
'VolumeResourceDescription',
'NetworkResourceProperties',
'LocalNetworkResourceProperties',
'EndpointRef',
'NetworkRef',
'NetworkResourcePropertiesBase',
'NetworkResourceDescription',
'GatewayDestination',
'TcpConfig',
'HttpRouteMatchPath',
'HttpRouteMatchHeader',
'HttpRouteMatchRule',
'HttpRouteConfig',
'HttpHostConfig',
'HttpConfig',
'GatewayProperties',
'GatewayResourceDescription',
'ImageRegistryCredential',
'EnvironmentVariable',
'Setting',
'ContainerLabel',
'EndpointProperties',
'ResourceRequests',
'ResourceLimits',
'ResourceRequirements',
'DiagnosticsRef',
'ReliableCollectionsRef',
'ContainerState',
'ContainerEvent',
'ContainerInstanceView',
'ContainerCodePackageProperties',
'AutoScalingTrigger',
'AutoScalingMechanism',
'AutoScalingPolicy',
'ServiceResourceDescription',
'DiagnosticsSinkProperties',
'DiagnosticsDescription',
'ApplicationProperties',
'AzureInternalMonitoringPipelineSinkDescription',
'ApplicationResourceDescription',
'AddRemoveReplicaScalingMechanism',
'AutoScalingMetric',
'AutoScalingResourceMetric',
'ServiceProperties',
'ServiceReplicaProperties',
'ServiceReplicaDescription',
'AverageLoadScalingTrigger',
'ContainerLogs',
'OperationResultPaged',
'SecretResourceDescriptionPaged',
'SecretValueResourceDescriptionPaged',
'VolumeResourceDescriptionPaged',
'NetworkResourceDescriptionPaged',
'GatewayResourceDescriptionPaged',
'ApplicationResourceDescriptionPaged',
'ServiceResourceDescriptionPaged',
'ServiceReplicaDescriptionPaged',
'ResourceStatus',
'HealthState',
'SecretKind',
'VolumeProvider',
'SizeTypes',
'ApplicationScopedVolumeKind',
'NetworkKind',
'HeaderMatchType',
'OperatingSystemType',
'DiagnosticsSinkKind',
'AutoScalingMechanismKind',
'AutoScalingMetricKind',
'AutoScalingResourceMetricName',
'AutoScalingTriggerKind',
]
| 42.034146 | 150 | 0.82755 |
6b3bed4772887b4ca3b8868f07f00b80ff44103a | 1,503 | py | Python | Core/managers/InputPeripherals.py | Scoppio/Rogue-EVE | a46f1faa9c7835e8c5838f6270fb5d75b349936b | [
"MIT"
] | 2 | 2016-11-07T23:43:17.000Z | 2016-11-08T21:49:57.000Z | Core/managers/InputPeripherals.py | Scoppio/Rogue-EVE | a46f1faa9c7835e8c5838f6270fb5d75b349936b | [
"MIT"
] | null | null | null | Core/managers/InputPeripherals.py | Scoppio/Rogue-EVE | a46f1faa9c7835e8c5838f6270fb5d75b349936b | [
"MIT"
] | null | null | null | import logging
from models.GenericObjects import Vector2
logger = logging.getLogger('Rogue-EVE')
| 33.4 | 109 | 0.630739 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.