hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b90f54d52259df5370c156bb89c17f8368958017
| 1,845 |
py
|
Python
|
documents/aws-doc-sdk-examples/python/example_code/kda/kda-python-datagenerator-stockticker.py
|
siagholami/aws-documentation
|
2d06ee9011f3192b2ff38c09f04e01f1ea9e0191
|
[
"CC-BY-4.0"
] | 5 |
2021-08-13T09:20:58.000Z
|
2021-12-16T22:13:54.000Z
|
documents/aws-doc-sdk-examples/python/example_code/kda/kda-python-datagenerator-stockticker.py
|
siagholami/aws-documentation
|
2d06ee9011f3192b2ff38c09f04e01f1ea9e0191
|
[
"CC-BY-4.0"
] | null | null | null |
documents/aws-doc-sdk-examples/python/example_code/kda/kda-python-datagenerator-stockticker.py
|
siagholami/aws-documentation
|
2d06ee9011f3192b2ff38c09f04e01f1ea9e0191
|
[
"CC-BY-4.0"
] | null | null | null |
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[kda-python-datagenerator-stockticker.py demonstrates how to generate sample data for Amazon Kinesis Data Analytics SQL applications.]
# snippet-service:[kinesisanalytics]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Amazon Kinesis Data Analytics]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-29]
# snippet-sourceauthor:[fletpatr (AWS)]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[kinesisanalytics.python.datagenerator.stockticker]
import json
import boto3
import random
import datetime
kinesis = boto3.client('kinesis')
while True:
data = json.dumps(getReferrer())
print(data)
kinesis.put_record(
StreamName="ExampleInputStream",
Data=data,
PartitionKey="partitionkey")
# snippet-end:[kinesisanalytics.python.datagenerator.stockticker]
| 34.811321 | 162 | 0.720325 |
b90fbfa2a7bb6e18e5af7e82345d7b5cf393db62
| 2,347 |
py
|
Python
|
backend/app.py
|
alexespejo/project-argus
|
53a6a8b1790906044bffbd2db156322938b62da9
|
[
"MIT"
] | 1 |
2022-03-21T02:13:25.000Z
|
2022-03-21T02:13:25.000Z
|
backend/app.py
|
alexespejo/project-argus
|
53a6a8b1790906044bffbd2db156322938b62da9
|
[
"MIT"
] | null | null | null |
backend/app.py
|
alexespejo/project-argus
|
53a6a8b1790906044bffbd2db156322938b62da9
|
[
"MIT"
] | null | null | null |
import face_recognition
from flask import Flask, request, redirect, Response
import camera
import firestore as db
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app = Flask(__name__)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True)
| 26.370787 | 94 | 0.672348 |
f8d46f993d25bd7f9f34660f23bf18928f5a3963
| 5,672 |
py
|
Python
|
module/classification_package/src/utils.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2022-01-03T14:00:17.000Z
|
2022-01-03T14:00:17.000Z
|
module/classification_package/src/utils.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | null | null | null |
module/classification_package/src/utils.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2021-12-21T09:50:53.000Z
|
2021-12-21T09:50:53.000Z
|
import numpy as np
import logging
import numbers
import torch
import math
import json
import sys
from torch.optim.lr_scheduler import LambdaLR
from torchvision.transforms.functional import pad
def get_padding(image):
w, h = image.size
max_wh = np.max([w, h])
h_padding = (max_wh - w) / 2
v_padding = (max_wh - h) / 2
l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5
t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5
r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5
b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5
padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))
return padding
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 33.761905 | 117 | 0.653738 |
f8d470d1980749c03e842d69c111ae8c0604cde9
| 992 |
py
|
Python
|
tests/pylint_plugins/test_assert_raises_without_msg.py
|
L-Net-1992/mlflow
|
a90574dbb730935c815ff41a0660b9a823b81630
|
[
"Apache-2.0"
] | null | null | null |
tests/pylint_plugins/test_assert_raises_without_msg.py
|
L-Net-1992/mlflow
|
a90574dbb730935c815ff41a0660b9a823b81630
|
[
"Apache-2.0"
] | null | null | null |
tests/pylint_plugins/test_assert_raises_without_msg.py
|
L-Net-1992/mlflow
|
a90574dbb730935c815ff41a0660b9a823b81630
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable
pytestmark = skip_if_pylint_unavailable()
| 30.060606 | 95 | 0.768145 |
f8d49b043794456e8669c31d21ba4a68846ab71c
| 5,088 |
py
|
Python
|
SVassembly/plot_bcs_across_bkpts.py
|
AV321/SVPackage
|
c9c625af7f5047ddb43ae79f8beb2ce9aadf7697
|
[
"MIT"
] | null | null | null |
SVassembly/plot_bcs_across_bkpts.py
|
AV321/SVPackage
|
c9c625af7f5047ddb43ae79f8beb2ce9aadf7697
|
[
"MIT"
] | null | null | null |
SVassembly/plot_bcs_across_bkpts.py
|
AV321/SVPackage
|
c9c625af7f5047ddb43ae79f8beb2ce9aadf7697
|
[
"MIT"
] | 1 |
2019-01-22T19:16:24.000Z
|
2019-01-22T19:16:24.000Z
|
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
import csv
from scipy.stats import mode
import math as m
import os
import collections
#set working directory
#os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
#bkpt_name = "1"
#example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
| 36.085106 | 228 | 0.605149 |
f8d6b09688dbea2ed0259d01f1aa0504d9acbfdc
| 821 |
py
|
Python
|
bites/bite029.py
|
ChidinmaKO/Chobe-bitesofpy
|
2f933e6c8877a37d1ce7ef54ea22169fc67417d3
|
[
"MIT"
] | null | null | null |
bites/bite029.py
|
ChidinmaKO/Chobe-bitesofpy
|
2f933e6c8877a37d1ce7ef54ea22169fc67417d3
|
[
"MIT"
] | null | null | null |
bites/bite029.py
|
ChidinmaKO/Chobe-bitesofpy
|
2f933e6c8877a37d1ce7ef54ea22169fc67417d3
|
[
"MIT"
] | 1 |
2019-07-16T19:12:52.000Z
|
2019-07-16T19:12:52.000Z
|
# tests
| 30.407407 | 74 | 0.478685 |
f8d75cfce0f3dc1a5df25624c4dcbf0a3624f6c0
| 2,917 |
py
|
Python
|
language-detection-webapp/blueprints/langid.py
|
derlin/SwigSpot_Schwyzertuutsch-Spotting
|
f38c8243ff34c6e512cadab5e4f51b08dacc16c6
|
[
"Apache-2.0"
] | 6 |
2018-06-17T07:14:32.000Z
|
2020-03-02T15:28:25.000Z
|
language-detection-webapp/blueprints/langid.py
|
derlin/SwigSpot_Schwyzertuutsch-Spotting
|
f38c8243ff34c6e512cadab5e4f51b08dacc16c6
|
[
"Apache-2.0"
] | 1 |
2021-03-31T18:42:26.000Z
|
2021-03-31T18:42:26.000Z
|
language-detection-webapp/blueprints/langid.py
|
derlin/SwigSpot_Schwyzertuutsch-Spotting
|
f38c8243ff34c6e512cadab5e4f51b08dacc16c6
|
[
"Apache-2.0"
] | 1 |
2019-04-16T09:18:08.000Z
|
2019-04-16T09:18:08.000Z
|
import logging
from flask import Blueprint
from flask import Flask, render_template, request, flash
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectField, BooleanField
from wtforms.fields.html5 import IntegerRangeField
from wtforms.widgets import TextArea
import langid
from utils.utils import templated
blueprint_langid = Blueprint('langid', __name__)
| 29.765306 | 106 | 0.65204 |
f8d7e5ead057431bae183bbe4b52bcbc1bde7f4d
| 962 |
py
|
Python
|
var/spack/repos/builtin/packages/r-xts/package.py
|
kehw/spack
|
4f49b1a9301447a8cf880c99820cad65e5c2d7e3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 |
2020-09-10T22:50:08.000Z
|
2021-01-12T22:18:54.000Z
|
var/spack/repos/builtin/packages/r-xts/package.py
|
kehw/spack
|
4f49b1a9301447a8cf880c99820cad65e5c2d7e3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14 |
2021-07-20T01:04:53.000Z
|
2022-03-02T01:08:36.000Z
|
var/spack/repos/builtin/packages/r-xts/package.py
|
kehw/spack
|
4f49b1a9301447a8cf880c99820cad65e5c2d7e3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 |
2021-05-06T00:17:46.000Z
|
2021-05-06T00:17:46.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 41.826087 | 96 | 0.748441 |
f8d9062a56a02a0e0c258c3b8d23088b9caa04a9
| 11,421 |
py
|
Python
|
sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
from .JSConfigBCDBBase import JSConfigBCDBBase
| 34.609091 | 175 | 0.571228 |
f8da2f02f4840468e37f0eba92152ef522fab6ae
| 2,589 |
py
|
Python
|
source/tree.py
|
holderekt/regression-tree
|
130fe07262faea8681159092718310d9aefe9889
|
[
"MIT"
] | null | null | null |
source/tree.py
|
holderekt/regression-tree
|
130fe07262faea8681159092718310d9aefe9889
|
[
"MIT"
] | null | null | null |
source/tree.py
|
holderekt/regression-tree
|
130fe07262faea8681159092718310d9aefe9889
|
[
"MIT"
] | null | null | null |
import utils as utl
import error_measures as err
# Regression Tree Node
# Regression Tree
| 29.758621 | 93 | 0.588644 |
f8dde6ce7a9ea138ac9fe3fc5e28f791075d3425
| 454 |
py
|
Python
|
src/site/config.py
|
ninaamorim/sentiment-analysis-2018-president-election
|
a5c12f1b659186edbc2dfa916bc82a2cfa2dd67f
|
[
"MIT"
] | 39 |
2018-09-05T14:42:05.000Z
|
2021-09-24T20:21:56.000Z
|
src/site/config.py
|
ninaamorim/sentiment-analysis-2018-president-election
|
a5c12f1b659186edbc2dfa916bc82a2cfa2dd67f
|
[
"MIT"
] | null | null | null |
src/site/config.py
|
ninaamorim/sentiment-analysis-2018-president-election
|
a5c12f1b659186edbc2dfa916bc82a2cfa2dd67f
|
[
"MIT"
] | 11 |
2018-12-07T19:43:44.000Z
|
2021-05-21T21:54:43.000Z
|
from starlette.applications import Starlette
from starlette.middleware.gzip import GZipMiddleware
from starlette.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
app = Starlette(debug=False, template_directory='src/site/templates')
app.add_middleware(GZipMiddleware, minimum_size=500)
app.add_middleware(CORSMiddleware, allow_origins=['*'])
app.mount('/static', StaticFiles(directory='src/site/media'), name='static')
| 45.4 | 76 | 0.830396 |
f8de8fc01b4a4af13fb95b42532f7a7fe7198cd6
| 225 |
py
|
Python
|
loadbalanceRL/lib/__init__.py
|
fqzhou/LoadBalanceControl-RL
|
689eec3b3b27e121aa45d2793e411f1863f6fc0b
|
[
"MIT"
] | 11 |
2018-10-29T06:50:43.000Z
|
2022-03-28T14:26:09.000Z
|
loadbalanceRL/lib/__init__.py
|
fqzhou/LoadBalanceControl-RL
|
689eec3b3b27e121aa45d2793e411f1863f6fc0b
|
[
"MIT"
] | 1 |
2022-03-01T13:46:25.000Z
|
2022-03-01T13:46:25.000Z
|
loadbalanceRL/lib/__init__.py
|
fqzhou/LoadBalanceControl-RL
|
689eec3b3b27e121aa45d2793e411f1863f6fc0b
|
[
"MIT"
] | 6 |
2019-02-05T20:01:53.000Z
|
2020-09-04T12:30:00.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Contains core logic for Rainman2
"""
__author__ = 'Ari Saha ([email protected]), Mingyang Liu([email protected])'
__date__ = 'Wednesday, February 14th 2018, 11:42:09 am'
| 20.454545 | 76 | 0.68 |
f8dfe184dbac3633e171f2ced9f8b35d7607d947
| 717 |
py
|
Python
|
openff/bespokefit/__init__.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 12 |
2020-08-28T20:49:00.000Z
|
2021-11-17T08:50:32.000Z
|
openff/bespokefit/__init__.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 95 |
2020-02-19T18:40:54.000Z
|
2021-12-02T10:52:23.000Z
|
openff/bespokefit/__init__.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 3 |
2021-04-01T04:22:49.000Z
|
2021-04-13T03:19:10.000Z
|
"""
BespokeFit
Creating bespoke parameters for individual molecules.
"""
import logging
import sys
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
# Silence verbose messages when running the CLI otherwise you can't read the output
# without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings...
if sys.argv[0].endswith("openff-bespoke"):
from openff.bespokefit.utilities.logging import DeprecationWarningFilter
# if "openff-bespoke"
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
logging.getLogger().addFilter(DeprecationWarningFilter())
| 28.68 | 86 | 0.781032 |
f8e0235b8205933db406d18f8b9437b0dca33a40
| 1,810 |
py
|
Python
|
TRANSFORM/Resources/python/2006LUT_to_SDF.py
|
greenwoodms/TRANSFORM-Library
|
dc152d4f0298d3f18385f2ea33645d87d7812915
|
[
"Apache-2.0"
] | 29 |
2018-04-24T17:06:19.000Z
|
2021-11-21T05:17:28.000Z
|
TRANSFORM/Resources/python/2006LUT_to_SDF.py
|
greenwoodms/TRANSFORM-Library
|
dc152d4f0298d3f18385f2ea33645d87d7812915
|
[
"Apache-2.0"
] | 13 |
2018-04-05T08:34:27.000Z
|
2021-10-04T14:24:41.000Z
|
TRANSFORM/Resources/python/2006LUT_to_SDF.py
|
greenwoodms/TRANSFORM-Library
|
dc152d4f0298d3f18385f2ea33645d87d7812915
|
[
"Apache-2.0"
] | 17 |
2018-08-06T22:18:01.000Z
|
2022-01-29T21:38:17.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 03 11:06:37 2018
@author: vmg
"""
import sdf
import numpy as np
# Load 2006 LUT for interpolation
# 2006 Groeneveld Look-Up Table as presented in
# "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922.
# This file requires the file 2006LUTdata.txt
# Pressure range [MPa] from 2006 LUT, convert to [Pa]
P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6
# Mass Flux range [kg/m^2-s] from 2006 .LUT.
G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.))
# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))
# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3
# Convert the imported array into a (MxNxQ) where:
# M is number of mass flux divisions
# N is number of quality divisions
# Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
for j in xrange(lenx):
for k in xrange(lenP):
q[i,j,k] = q_raw[i + k*lenG,j]
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])
# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g)
| 36.2 | 137 | 0.651381 |
f8e07bde7c24919fc5325f0451f8753ee945632d
| 2,836 |
py
|
Python
|
test/asserting/policy.py
|
tmsanrinsha/vint
|
8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0
|
[
"MIT"
] | 2 |
2021-06-15T15:07:28.000Z
|
2021-10-05T12:23:23.000Z
|
test/asserting/policy.py
|
tmsanrinsha/vint
|
8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0
|
[
"MIT"
] | null | null | null |
test/asserting/policy.py
|
tmsanrinsha/vint
|
8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0
|
[
"MIT"
] | null | null | null |
import unittest
from pathlib import Path
from pprint import pprint
from vint.compat.itertools import zip_longest
from vint.linting.linter import Linter
from vint.linting.config.config_default_source import ConfigDefaultSource
| 31.865169 | 97 | 0.685825 |
f8e0ad168c40024827eba4f57a5381ccd338e24b
| 39,902 |
py
|
Python
|
dataprofiler/labelers/character_level_cnn_model.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
dataprofiler/labelers/character_level_cnn_model.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | 1 |
2021-11-20T01:08:12.000Z
|
2021-11-20T01:08:12.000Z
|
dataprofiler/labelers/character_level_cnn_model.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
import logging
import os
import sys
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from sklearn import decomposition
from .. import dp_logging
from . import labeler_utils
from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
logger = dp_logging.get_child_logger(__name__)
tf_logger = logging.getLogger('tensorflow')
tf_logger.addFilter(NoV1ResourceMessageFilter())
def build_embd_dictionary(filename):
"""
Returns a numpy embedding dictionary from embed file with GloVe-like format
:param filename: Path to the embed file for loading
:type filename: str
"""
embd_table = dict()
with open(filename, 'r') as embds:
for line in embds:
line = line.strip().split()
embd_table[line[0]] = np.asarray(line[1:])
return embd_table
def create_glove_char(n_dims, source_file=None):
"""
Embeds GloVe chars embeddings from source file to n_dims principal
components in a new file
:param n_dims: Final number of principal component dims of the embeddings
:type n_dims: int
:param source_file: Location of original embeddings to factor down
:type source_file: str
"""
if source_file is None:
source_file = os.path.join(_file_dir,
"embeddings/glove.840B.300d-char.txt")
# get embedding table first and vectors as array
embd_table = build_embd_dictionary(source_file)
embd_words, embd_matrix = [
np.asarray(ls) if i > 0 else list(ls)
for i, ls in enumerate(zip(*embd_table.items()))]
# get PCA embedder
pca = decomposition.PCA(n_components=n_dims)
reduced_embds = pca.fit_transform(embd_matrix)
# write to file
dir_name = os.path.dirname(source_file)
embd_file_name = os.path.join(dir_name,
'glove-reduced-{}D.txt'.format(n_dims))
with open(embd_file_name, 'w') as file:
for word, embd in zip(embd_words, reduced_embds):
file.write(word + " " + ' '.join(str(num) for num in embd) + "\n")
class CharacterLevelCnnModel(BaseTrainableModel,
metaclass=AutoSubRegistrationMeta):
# boolean if the label mapping requires the mapping for index 0 reserved
requires_zero_mapping = True
def __init__(self, label_mapping=None, parameters=None):
"""
CNN Model Initializer. initialize epoch_id
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the
model. Must contain num_labels. Other possible parameters are:
max_length, max_char_encoding_id, dim_embed, size_fc
dropout, size_conv, num_fil, optimizer, default_label
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('max_length', 3400)
parameters.setdefault('max_char_encoding_id', 127)
parameters.setdefault('dim_embed', 64)
parameters.setdefault('size_fc', [96, 96])
parameters.setdefault('dropout', 0.073)
parameters.setdefault('size_conv', 13)
parameters.setdefault('default_label', "UNKNOWN")
parameters.setdefault('num_fil', [48 for _ in range(4)])
parameters['pad_label'] = 'PAD'
self._epoch_id = 0
# reconstruct flags for model
self._model_num_labels = 0
self._model_default_ind = -1
BaseModel.__init__(self, label_mapping, parameters)
def __eq__(self, other):
"""
Checks if two models are equal with one another, may only check
important variables, i.e. may not check model itself.
:param self: a model
:param other: a model
:type self: BaseModel
:type other: BaseModel
:return: Whether or not self and other are equal
:rtype: bool
"""
if self._parameters != other._parameters \
or self._label_mapping != other._label_mapping:
return False
return True
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
max_length: Maximum char length in a sample
max_char_encoding_id: Maximum integer value for encoding the input
dim_embed: Number of embedded dimensions
size_fc: Size of each fully connected layers
dropout: Ratio of dropout in the model
size_conv: Convolution kernel size
default_label: Key for label_mapping that is the default label
pad_label: Key for entities_dict that is the pad label
num_fil: Number of filters in each convolution layer
:type parameters: dict
:return: None
"""
errors = []
list_of_necessary_params = ['max_length', 'max_char_encoding_id',
'dim_embed', 'size_fc', 'dropout',
'size_conv', 'default_label', 'pad_label',
'num_fil']
# Make sure the necessary parameters are present and valid.
for param in parameters:
if param in ['max_length', 'max_char_encoding_id', 'dim_embed',
'size_conv']:
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0:
errors.append(param + " must be a valid integer or float "
"greater than 0.")
elif param == 'dropout':
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0 or parameters[param] > 1:
errors.append(param + " must be a valid integer or float "
"from 0 to 1.")
elif param == 'size_fc' or param == 'num_fil':
if not isinstance(parameters[param], list) \
or len(parameters[param]) == 0:
errors.append(param + " must be a non-empty list of "
"integers.")
else:
for item in parameters[param]:
if not isinstance(item, int):
errors.append(param + " must be a non-empty "
"list of integers.")
break
elif param == 'default_label':
if not isinstance(parameters[param], str):
error = str(param) + " must be a string."
errors.append(error)
# Error if there are extra parameters thrown in
for param in parameters:
if param not in list_of_necessary_params:
errors.append(param + " is not an accepted parameter.")
if errors:
raise ValueError('\n'.join(errors))
def set_label_mapping(self, label_mapping):
"""
Sets the labels for the model
:param label_mapping: label mapping of the model
:type label_mapping: dict
:return: None
"""
if not isinstance(label_mapping, (list, dict)):
raise TypeError("Labels must either be a non-empty encoding dict "
"which maps labels to index encodings or a list.")
label_mapping = copy.deepcopy(label_mapping)
if 'PAD' not in label_mapping:
if isinstance(label_mapping, list): # if list missing PAD
label_mapping = ['PAD'] + label_mapping
elif 0 not in label_mapping.values(): # if dict missing PAD and 0
label_mapping.update({'PAD': 0})
if (isinstance(label_mapping, dict)
and label_mapping.get('PAD', None) != 0): # dict with bad PAD
raise ValueError("`PAD` must map to index zero.")
if self._parameters['default_label'] not in label_mapping:
raise ValueError("The `default_label` of {} must exist in the "
"label mapping.".format(
self._parameters['default_label']))
super().set_label_mapping(label_mapping)
def _need_to_reconstruct_model(self):
"""
Determines whether or not the model needs to be reconstructed.
:return: bool of whether or not the model needs to reconstruct.
"""
if not self._model:
return False
default_ind = self.label_mapping[self._parameters['default_label']]
return self.num_labels != self._model_num_labels or \
default_ind != self._model_default_ind
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not self._model:
self._construct_model()
elif self._need_to_reconstruct_model():
self._reconstruct_model()
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
self._model.save(os.path.join(dirpath))
def _construct_model(self):
"""
Model constructor for the data labeler. This also serves as a weight
reset.
:return: None
"""
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Reset model
tf.keras.backend.clear_session()
# generate glove embedding
create_glove_char(self._parameters['dim_embed'])
# generate model
self._model = tf.keras.models.Sequential()
# default parameters
max_length = self._parameters['max_length']
max_char_encoding_id = self._parameters['max_char_encoding_id']
# Encoding layer
self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string))
self._model.add(
tf.keras.layers.Lambda(encoding_function,
output_shape=tuple([max_length])))
# Create a pre-trained weight matrix
# character encoding indices range from 0 to max_char_encoding_id,
# we add one extra index for out-of-vocabulary character
embed_file = os.path.join(
_file_dir, "embeddings/glove-reduced-{}D.txt".format(
self._parameters['dim_embed']))
embedding_matrix = np.zeros((max_char_encoding_id + 2,
self._parameters['dim_embed']))
embedding_dict = build_embd_dictionary(embed_file)
input_shape = tuple([max_length])
# Fill in the weight matrix: let pad and space be 0s
for ascii_num in range(max_char_encoding_id):
if chr(ascii_num) in embedding_dict:
embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)]
self._model.add(tf.keras.layers.Embedding(
max_char_encoding_id + 2,
self._parameters['dim_embed'],
weights=[embedding_matrix],
input_length=input_shape[0],
trainable=True))
# Add the convolutional layers
for fil in self._parameters['num_fil']:
self._model.add(tf.keras.layers.Conv1D(
filters=fil, kernel_size=self._parameters['size_conv'],
activation='relu', padding='same'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add batch normalization, set fused = True for compactness
self._model.add(
tf.keras.layers.BatchNormalization(fused=False, scale=True))
# Add the fully connected layers
for size in self._parameters['size_fc']:
self._model.add(
tf.keras.layers.Dense(units=size, activation='relu'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add the final Softmax layer
self._model.add(
tf.keras.layers.Dense(num_labels, activation='softmax'))
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(self._model.output)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = self._model.outputs + \
[argmax_layer,
final_predicted_layer(argmax_layer, self._model.output)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def reset_weights(self):
"""
Reset the weights of the model.
:return: None
"""
self._construct_model()
def _reconstruct_model(self):
"""
Reconstruct the appropriate layers if the number of number of labels is
altered
:return: None
"""
# Reset model
tf.keras.backend.clear_session()
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax',
# 'thresh_arg_max_layer')
for _ in range(3):
self._model.layers.pop()
# Add the final Softmax layer to the previous spot
final_softmax_layer = tf.keras.layers.Dense(
num_labels, activation='softmax', name="dense_2")(
self._model.layers[-4].output)
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(final_softmax_layer)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = [final_softmax_layer] + \
[argmax_layer,
final_predicted_layer(argmax_layer,
final_softmax_layer)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None,
reset_weights=False, verbose=True):
"""
Train the current model with the training data and validation data
:param train_data: Training data used to train model
:type train_data: Union[list, np.ndarray]
:param val_data: Validation data used to validate the training
:type val_data: Union[list, np.ndarray]
:param batch_size: Used to determine number of samples in each batch
:type batch_size: int
:param label_mapping: maps labels to their encoded integers
:type label_mapping: Union[dict, None]
:param reset_weights: Flag to determine whether to reset the weights or
not
:type reset_weights: bool
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: None
"""
if label_mapping is not None:
self.set_label_mapping(label_mapping)
if not self._model:
self._construct_model()
else:
if self._need_to_reconstruct_model():
self._reconstruct_model()
if reset_weights:
self.reset_weights()
history = defaultdict()
f1 = None
f1_report = []
self._model.reset_metrics()
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
start_time = time.time()
batch_id = 0
for x_train, y_train in train_data:
model_results = self._model.train_on_batch(
x_train, {softmax_output_layer_name: y_train})
sys.stdout.flush()
if verbose:
sys.stdout.write(
"\rEPOCH %d, batch_id %d: loss: %f - acc: %f - "
"f1_score %f" %
(self._epoch_id, batch_id, *model_results[1:]))
batch_id += 1
for i, metric_label in enumerate(self._model.metrics_names):
history[metric_label] = model_results[i]
if val_data:
f1, f1_report = self._validate_training(val_data)
history['f1_report'] = f1_report
val_f1 = f1_report['weighted avg']['f1-score'] \
if f1_report else np.NAN
val_precision = f1_report['weighted avg']['precision'] \
if f1_report else np.NAN
val_recall = f1_report['weighted avg']['recall'] \
if f1_report else np.NAN
epoch_time = time.time() - start_time
logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- "
"val_f1: %f - val_precision: %f - val_recall %f" %
(self._epoch_id, epoch_time, *model_results[1:],
val_f1, val_precision, val_recall))
self._epoch_id += 1
return history, f1, f1_report
def _validate_training(self, val_data, batch_size_test=32,
verbose_log=True, verbose_keras=False):
"""
Validate the model on the test set and return the evaluation metrics.
:param val_data: data generator for the validation
:type val_data: iterator
:param batch_size_test: Number of samples to process in testing
:type batch_size_test: int
:param verbose_log: whether or not to print out scores for training,
etc.
:type verbose_log: bool
:param verbose_keras: whether or not to print out scores for training,
from keras.
:type verbose_keras: bool
return (f1-score, f1 report).
"""
f1 = None
f1_report = None
if val_data is None:
return f1, f1_report
# Predict on the test set
batch_id = 0
y_val_pred = []
y_val_test = []
for x_val, y_val in val_data:
y_val_pred.append(self._model.predict(
x_val, batch_size=batch_size_test, verbose=verbose_keras)[1])
y_val_test.append(np.argmax(y_val, axis=-1))
batch_id += 1
sys.stdout.flush()
if verbose_log:
sys.stdout.write("\rEPOCH %g, validation_batch_id %d" %
(self._epoch_id, batch_id))
tf.keras.backend.set_floatx('float32')
# Clean the predicted entities and the actual entities
f1, f1_report = labeler_utils.evaluate_accuracy(
np.concatenate(y_val_pred, axis=0),
np.concatenate(y_val_test, axis=0),
self.num_labels,
self.reverse_label_mapping,
verbose=verbose_keras)
return f1, f1_report
def predict(self, data, batch_size=32, show_confidences=False,
verbose=True):
"""
Run model and get predictions
:param data: text input
:type data: Union[list, numpy.ndarray]
:param batch_size: number of samples in the batch of data
:type batch_size: int
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
if not self._model:
raise ValueError("You are trying to predict without a model. "
"Construct/Load a model before predicting.")
elif self._need_to_reconstruct_model():
raise RuntimeError("The model label mapping definitions have been "
"altered without additional training. Please "
"train the model or reset the label mapping to "
"predict.")
# Pre-allocate space for predictions
confidences = []
sentence_lengths = np.zeros((batch_size,), dtype=int)
predictions = np.zeros((batch_size, self._parameters['max_length']))
if show_confidences:
confidences = np.zeros((batch_size,
self._parameters['max_length'],
self.num_labels))
# Run model with batching
allocation_index = 0
for batch_id, batch_data in enumerate(data):
model_output = self._model(
tf.convert_to_tensor(batch_data)
)
# Count number of samples in batch to prevent array mismatch
num_samples_in_batch = len(batch_data)
allocation_index = batch_id * batch_size
# Double array size
if len(predictions) <= allocation_index:
predictions = np.pad(predictions, ((0, len(predictions)),
(0, 0)), mode='constant')
sentence_lengths = np.pad(
sentence_lengths, pad_width=((0, len(sentence_lengths)),),
mode='constant')
if show_confidences:
confidences = np.pad(confidences,
((0, len(predictions)),
(0, 0), (0, 0)), mode='constant')
if show_confidences:
confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy()
predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy()
sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data))
allocation_index += num_samples_in_batch
# Convert predictions, confidences to lists from numpy
predictions_list = [i for i in range(0, allocation_index)]
confidences_list = None
if show_confidences:
confidences_list = [i for i in range(0, allocation_index)]
# Append slices of predictions to return prediction & confidence matrices
for index, sentence_length \
in enumerate(sentence_lengths[:allocation_index]):
predictions_list[index] = list(predictions[index][:sentence_length])
if show_confidences:
confidences_list[index] = list(confidences[index][:sentence_length])
if show_confidences:
return {'pred': predictions_list, 'conf': confidences_list}
return {'pred': predictions_list}
def details(self):
"""
Prints the relevant details of the model (summary, parameters, label
mapping)
"""
print("\n###### Model Details ######\n")
self._model.summary()
print("\nModel Parameters:")
for key, value in self._parameters.items():
print("{}: {}".format(key, value))
print("\nModel Label Mapping:")
for key, value in self.label_mapping.items():
print("{}: {}".format(key, value))
| 40.38664 | 136 | 0.602276 |
f8e1bca5e78231c74ae6a4100aeb7480c5e84ad6
| 6,031 |
py
|
Python
|
airflow/contrib/plugins/metastore_browser/main.py
|
Nipica/airflow
|
211a71f8a6b9d808bd03af84bd77bf8ff0ef247f
|
[
"Apache-2.0"
] | null | null | null |
airflow/contrib/plugins/metastore_browser/main.py
|
Nipica/airflow
|
211a71f8a6b9d808bd03af84bd77bf8ff0ef247f
|
[
"Apache-2.0"
] | 1 |
2019-01-14T17:12:47.000Z
|
2019-01-14T17:12:47.000Z
|
airflow/contrib/plugins/metastore_browser/main.py
|
shubhamod/airflow
|
04f4622656656d4c55b69d460bbd2ed1379810c4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
| 33.320442 | 75 | 0.62842 |
f8e296b5bc6bda6288119a1eb8117102f686848c
| 12,255 |
py
|
Python
|
app/lib/manage.py
|
AaronDewes/compose-nonfree
|
82ef3e58019ee03d163dea7aff4d7ed18d884238
|
[
"MIT"
] | 5 |
2021-09-26T18:02:27.000Z
|
2022-03-30T10:16:03.000Z
|
app/lib/manage.py
|
AaronDewes/compose-nonfree
|
82ef3e58019ee03d163dea7aff4d7ed18d884238
|
[
"MIT"
] | 5 |
2021-09-23T18:57:00.000Z
|
2021-11-02T06:47:05.000Z
|
app/lib/manage.py
|
AaronDewes/compose-nonfree
|
82ef3e58019ee03d163dea7aff4d7ed18d884238
|
[
"MIT"
] | 3 |
2021-10-01T15:14:09.000Z
|
2022-03-30T10:16:06.000Z
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2021 Aaron Dewes <[email protected]>
#
# SPDX-License-Identifier: MIT
import stat
import tempfile
import threading
from typing import List
from sys import argv
import os
import requests
import shutil
import json
import yaml
import subprocess
from lib.composegenerator.v0.generate import createComposeConfigFromV0
from lib.composegenerator.v1.generate import createComposeConfigFromV1
from lib.appymlgenerator import convertComposeYMLToAppYML
from lib.validate import findAndValidateApps
from lib.metadata import getAppRegistry, getSimpleAppRegistry
from lib.entropy import deriveEntropy
# For an array of threads, join them and wait for them to finish
# The directory with this script
scriptDir = os.path.dirname(os.path.realpath(__file__))
nodeRoot = os.path.join(scriptDir, "..", "..")
appsDir = os.path.join(nodeRoot, "apps")
appSystemDir = os.path.join(nodeRoot, "app-system")
sourcesList = os.path.join(appSystemDir, "sources.list")
appDataDir = os.path.join(nodeRoot, "app-data")
userFile = os.path.join(nodeRoot, "db", "user.json")
legacyScript = os.path.join(nodeRoot, "scripts", "app")
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
# Loads an app.yml and converts it to a docker-compose.yml
# Parse the sources.list repo file, which contains a list of sources in the format
# <git-url> <branch>
# For every line, clone the repo to a temporary dir and checkout the branch
# Then, check that repos apps in the temporary dir/apps and for every app,
# overwrite the current app dir with the contents of the temporary dir/apps/app
# Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file
| 37.941176 | 166 | 0.651652 |
f8e31dd1ab5827961bb3c5e7a54cd2196fee2f7f
| 2,814 |
py
|
Python
|
features/jit-features/query/query.py
|
YuanruiZJU/SZZ-TSE
|
093506f9019a0d8b412dad4672525f93150ca181
|
[
"MIT"
] | 13 |
2019-04-15T12:54:56.000Z
|
2022-03-09T02:30:14.000Z
|
features/jit-features/query/query.py
|
YanYoungZhao/SZZ-TSE
|
093506f9019a0d8b412dad4672525f93150ca181
|
[
"MIT"
] | 1 |
2022-01-27T02:33:09.000Z
|
2022-01-27T02:33:09.000Z
|
features/jit-features/query/query.py
|
YanYoungZhao/SZZ-TSE
|
093506f9019a0d8b412dad4672525f93150ca181
|
[
"MIT"
] | 6 |
2019-11-04T11:24:13.000Z
|
2021-12-16T07:53:18.000Z
|
from query.base import BaseQuery
| 34.317073 | 76 | 0.643568 |
f8e3234f6fa0a9c3711d4ac7b793885d955f7286
| 449 |
py
|
Python
|
example/mappers.py
|
mikeywaites/flask-arrested
|
6b97ce2ad2765f9acab10f4726e310258aa51de0
|
[
"MIT"
] | 46 |
2016-06-28T10:25:07.000Z
|
2019-12-10T20:53:47.000Z
|
example/mappers.py
|
mikeywaites/flask-arrested
|
6b97ce2ad2765f9acab10f4726e310258aa51de0
|
[
"MIT"
] | 4 |
2018-02-10T10:53:08.000Z
|
2018-11-07T08:11:06.000Z
|
example/mappers.py
|
mikeywaites/flask-arrested
|
6b97ce2ad2765f9acab10f4726e310258aa51de0
|
[
"MIT"
] | 9 |
2016-07-20T17:05:46.000Z
|
2022-02-15T18:40:17.000Z
|
from kim import Mapper, field
from example.models import Planet, Character
| 19.521739 | 47 | 0.712695 |
f8e3680aea79628533b40e4e3bc074491f7796fd
| 3,660 |
py
|
Python
|
collections/ansible_collections/community/general/plugins/connection/saltstack.py
|
escalate/ansible-gitops-example-repository
|
f7f7a9fcd09abd982f5fcd3bd196809a6c4c2f08
|
[
"MIT"
] | 1 |
2021-07-16T19:51:04.000Z
|
2021-07-16T19:51:04.000Z
|
collections/ansible_collections/community/general/plugins/connection/saltstack.py
|
escalate/ansible-gitops-example-repository
|
f7f7a9fcd09abd982f5fcd3bd196809a6c4c2f08
|
[
"MIT"
] | null | null | null |
collections/ansible_collections/community/general/plugins/connection/saltstack.py
|
escalate/ansible-gitops-example-repository
|
f7f7a9fcd09abd982f5fcd3bd196809a6c4c2f08
|
[
"MIT"
] | null | null | null |
# Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# Based on chroot.py (c) 2013, Maykel Moya <[email protected]>
# Based on func.py
# (c) 2014, Michael Scherer <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Michael Scherer (@mscherer) <[email protected]>
name: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
'''
import os
import base64
from ansible import errors
from ansible.plugins.connection import ConnectionBase
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
| 36.237624 | 133 | 0.668033 |
f8e37ad4239180526865365831c9ddf7d0371aa5
| 5,074 |
py
|
Python
|
create/views.py
|
normaldotcom/webvirtmgr
|
8d822cb94105abf82eb0ff6651a36c43b0911d2a
|
[
"Apache-2.0"
] | 1 |
2019-07-16T20:32:44.000Z
|
2019-07-16T20:32:44.000Z
|
create/views.py
|
normaldotcom/webvirtmgr
|
8d822cb94105abf82eb0ff6651a36c43b0911d2a
|
[
"Apache-2.0"
] | null | null | null |
create/views.py
|
normaldotcom/webvirtmgr
|
8d822cb94105abf82eb0ff6651a36c43b0911d2a
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
from create.models import Flavor
from instance.models import Instance
from libvirt import libvirtError
from vrtManager.create import wvmCreate
from vrtManager import util
from create.forms import FlavorAddForm, NewVMForm
def create(request, host_id):
"""
Create new instance.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
compute = Compute.objects.get(id=host_id)
flavors = Flavor.objects.filter().order_by('id')
try:
conn = wvmCreate(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = sorted(conn.get_storages())
networks = sorted(conn.get_networks())
instances = conn.get_instances()
get_images = sorted(conn.get_storages_images())
mac_auto = util.randomMAC()
except libvirtError as err:
errors.append(err.message)
if not storages:
msg = _("You haven't defined have any storage pools")
errors.append(msg)
if not networks:
msg = _("You haven't defined have any network pools")
errors.append(msg)
if request.method == 'POST':
if 'create_flavor' in request.POST:
form = FlavorAddForm(request.POST)
if form.is_valid():
data = form.cleaned_data
create_flavor = Flavor(label=data['label'],
vcpu=data['vcpu'],
memory=data['memory'],
disk=data['disk'])
create_flavor.save()
return HttpResponseRedirect(request.get_full_path())
if 'delete_flavor' in request.POST:
flavor_id = request.POST.get('flavor', '')
delete_flavor = Flavor.objects.get(id=flavor_id)
delete_flavor.delete()
return HttpResponseRedirect(request.get_full_path())
if 'create' in request.POST:
volumes = {}
form = NewVMForm(request.POST)
if form.is_valid():
data = form.cleaned_data
if instances:
if data['name'] in instances:
msg = _("A virtual machine with this name already exists")
errors.append(msg)
if not errors:
if data['hdd_size']:
if not data['mac']:
msg = _("No Virtual Machine MAC has been entered")
errors.append(msg)
else:
try:
path = conn.create_volume(data['storage'], data['name'], data['hdd_size'])
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
elif data['template']:
templ_path = conn.get_volume_path(data['template'])
clone_path = conn.clone_from_template(data['name'], templ_path)
volumes[clone_path] = conn.get_volume_type(clone_path)
else:
if not data['images']:
msg = _("First you need to create or select an image")
errors.append(msg)
else:
for vol in data['images'].split(','):
try:
path = conn.get_volume_path(vol)
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
if not errors:
uuid = util.randomUUID()
try:
conn.create_instance(data['name'], data['memory'], data['vcpu'], data['host_model'],
uuid, volumes, data['networks'], data['virtio'], data['mac'])
create_instance = Instance(compute_id=host_id, name=data['name'], uuid=uuid)
create_instance.save()
return HttpResponseRedirect('/instance/%s/%s/' % (host_id, data['name']))
except libvirtError as msg_error:
if data['hdd_size']:
conn.delete_volume(volumes.keys()[0])
errors.append(msg_error.message)
conn.close()
return render_to_response('create.html', locals(), context_instance=RequestContext(request))
| 44.121739 | 112 | 0.513599 |
f8e487af25b9797dd2a942cb5666ca85e89e2765
| 886 |
py
|
Python
|
utils/wassersteinGradientPenalty.py
|
andimarafioti/GACELA
|
34649fb01bdecbcb266db046a8b9c48c141f16e1
|
[
"MIT"
] | 15 |
2020-05-12T02:58:12.000Z
|
2022-03-14T12:10:56.000Z
|
utils/wassersteinGradientPenalty.py
|
tifgan/gacela
|
cd496cfce128ea7b6191a93639f8f4efac7e7142
|
[
"MIT"
] | 1 |
2021-05-22T14:02:06.000Z
|
2021-06-01T13:45:11.000Z
|
utils/wassersteinGradientPenalty.py
|
tifgan/gacela
|
cd496cfce128ea7b6191a93639f8f4efac7e7142
|
[
"MIT"
] | 5 |
2020-06-18T20:15:00.000Z
|
2021-11-05T15:45:35.000Z
|
import torch
__author__ = 'Andres'
| 42.190476 | 91 | 0.688488 |
f8e61b754a032cf61ead46cd66c6dc6f3690b256
| 121 |
py
|
Python
|
pytest_capture_log_error/test_file.py
|
butla/experiments
|
8c8ade15bb01978763d6618342fa42ad7563e38f
|
[
"MIT"
] | 1 |
2020-06-01T02:41:45.000Z
|
2020-06-01T02:41:45.000Z
|
pytest_capture_log_error/test_file.py
|
butla/experiments
|
8c8ade15bb01978763d6618342fa42ad7563e38f
|
[
"MIT"
] | 48 |
2019-12-26T16:38:19.000Z
|
2021-07-06T13:29:50.000Z
|
pytest_capture_log_error/test_file.py
|
butla/experiments
|
8c8ade15bb01978763d6618342fa42ad7563e38f
|
[
"MIT"
] | null | null | null |
import a_file
| 20.166667 | 56 | 0.719008 |
f8e61d9aa8b9610c3339494d4c960ec17ee4ba35
| 286 |
py
|
Python
|
src_py/ui/identify_page.py
|
Magier/Aetia
|
7f6045d99904b808e1201f445d0d10b0dce54c37
|
[
"MIT"
] | null | null | null |
src_py/ui/identify_page.py
|
Magier/Aetia
|
7f6045d99904b808e1201f445d0d10b0dce54c37
|
[
"MIT"
] | null | null | null |
src_py/ui/identify_page.py
|
Magier/Aetia
|
7f6045d99904b808e1201f445d0d10b0dce54c37
|
[
"MIT"
] | null | null | null |
import streamlit as st
from ui.session_state import SessionState, get_state
from infer import ModelStage
| 26 | 52 | 0.734266 |
f8e6a09b44f3ad67acebf3ea296df8c1d2d40eaf
| 4,075 |
py
|
Python
|
openke/data/UniverseTrainDataLoader.py
|
luofeisg/OpenKE-PuTransE
|
0bfefb3917e7479520917febd91a9f4d7353c7fc
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
openke/data/UniverseTrainDataLoader.py
|
luofeisg/OpenKE-PuTransE
|
0bfefb3917e7479520917febd91a9f4d7353c7fc
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
openke/data/UniverseTrainDataLoader.py
|
luofeisg/OpenKE-PuTransE
|
0bfefb3917e7479520917febd91a9f4d7353c7fc
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
'''
MIT License
Copyright (c) 2020 Rashid Lafraie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import ctypes
import numpy as np
from .TrainDataLoader import TrainDataLoader
| 40.75 | 120 | 0.694724 |
f8e92112b61dc64252a8bdb77bbf3e0e15b55abe
| 5,074 |
py
|
Python
|
test/jit/test_backend_nnapi.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067 |
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
test/jit/test_backend_nnapi.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955 |
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
test/jit/test_backend_nnapi.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210 |
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
| 44.508772 | 132 | 0.643082 |
f8e997acb5df08763f83e5ed402ea27c456b06ca
| 1,078 |
py
|
Python
|
main/configure.py
|
syxu828/Graph2Seq-0.1
|
36e38f755c0ee390735e49121259151da54bcc1c
|
[
"Apache-2.0"
] | 24 |
2018-11-04T17:16:52.000Z
|
2022-01-06T12:34:49.000Z
|
main/configure.py
|
syxu828/Graph2Seq-0.1
|
36e38f755c0ee390735e49121259151da54bcc1c
|
[
"Apache-2.0"
] | 3 |
2018-12-09T00:31:36.000Z
|
2020-07-29T06:21:51.000Z
|
main/configure.py
|
syxu828/Graph2Seq-0.1
|
36e38f755c0ee390735e49121259151da54bcc1c
|
[
"Apache-2.0"
] | 4 |
2019-01-09T06:44:41.000Z
|
2019-08-04T07:55:00.000Z
|
train_data_path = "../data/no_cycle/train.data"
dev_data_path = "../data/no_cycle/dev.data"
test_data_path = "../data/no_cycle/test.data"
word_idx_file_path = "../data/word.idx"
word_embedding_dim = 100
train_batch_size = 32
dev_batch_size = 500
test_batch_size = 500
l2_lambda = 0.000001
learning_rate = 0.001
epochs = 100
encoder_hidden_dim = 200
num_layers_decode = 1
word_size_max = 1
dropout = 0.0
path_embed_method = "lstm" # cnn or lstm or bi-lstm
unknown_word = "<unk>"
PAD = "<PAD>"
GO = "<GO>"
EOS = "<EOS>"
deal_unknown_words = True
seq_max_len = 11
decoder_type = "greedy" # greedy, beam
beam_width = 4
attention = True
num_layers = 1 # 1 or 2
# the following are for the graph encoding method
weight_decay = 0.0000
sample_size_per_layer = 4
sample_layer_size = 4
hidden_layer_dim = 100
feature_max_len = 1
feature_encode_type = "uni"
# graph_encode_method = "max-pooling" # "lstm" or "max-pooling"
graph_encode_direction = "bi" # "single" or "bi"
concat = True
encoder = "gated_gcn" # "gated_gcn" "gcn" "seq"
lstm_in_gcn = "none" # before, after, none
| 21.137255 | 63 | 0.727273 |
f8ea7055295dd79ddcfe4843e79b06f95f13078d
| 7,506 |
py
|
Python
|
dataControlWidget.py
|
andreasbayer/AEGUIFit
|
6a1e31091b74d648d007c75c9fef6efae4086860
|
[
"BSD-3-Clause"
] | null | null | null |
dataControlWidget.py
|
andreasbayer/AEGUIFit
|
6a1e31091b74d648d007c75c9fef6efae4086860
|
[
"BSD-3-Clause"
] | null | null | null |
dataControlWidget.py
|
andreasbayer/AEGUIFit
|
6a1e31091b74d648d007c75c9fef6efae4086860
|
[
"BSD-3-Clause"
] | null | null | null |
from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox
from InftyDoubleSpinBox import InftyDoubleSpinBox
from PyQt5.QtCore import pyqtSignal, Qt
import helplib as hl
import numpy as np
| 34.589862 | 123 | 0.635092 |
f8ea7298a7caca93599e616f2e4db31947e61892
| 6,425 |
py
|
Python
|
src/freemovr_engine/calib/acquire.py
|
strawlab/flyvr
|
335892cae740e53e82e07b526e1ba53fbd34b0ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3 |
2015-01-29T14:09:25.000Z
|
2016-04-24T04:25:49.000Z
|
src/freemovr_engine/calib/acquire.py
|
strawlab/flyvr
|
335892cae740e53e82e07b526e1ba53fbd34b0ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
src/freemovr_engine/calib/acquire.py
|
strawlab/flyvr
|
335892cae740e53e82e07b526e1ba53fbd34b0ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
import roslib
roslib.load_manifest('sensor_msgs')
roslib.load_manifest('dynamic_reconfigure')
import rospy
import sensor_msgs.msg
import dynamic_reconfigure.srv
import dynamic_reconfigure.encoding
import numpy as np
import time
import os.path
import queue
def clear_queue(self):
q = self.im_queue
while 1:
try:
q.get_nowait()
except queue.Empty:
break
class SimultaneousCameraRunner(_Runner):
class SequentialCameraRunner(_Runner):
| 36.095506 | 124 | 0.588327 |
f8eb7ee679859acda30ad6ca74e666a2bc11c767
| 6,949 |
py
|
Python
|
examples/hfht/pointnet_classification.py
|
nixli/hfta
|
76274b5ee0e32732da20b153a3cc6550510d8a78
|
[
"MIT"
] | 24 |
2021-04-06T20:36:10.000Z
|
2022-02-26T17:03:33.000Z
|
examples/hfht/pointnet_classification.py
|
nixli/hfta
|
76274b5ee0e32732da20b153a3cc6550510d8a78
|
[
"MIT"
] | 20 |
2021-04-02T00:51:34.000Z
|
2022-03-29T15:00:08.000Z
|
examples/hfht/pointnet_classification.py
|
nixli/hfta
|
76274b5ee0e32732da20b153a3cc6550510d8a78
|
[
"MIT"
] | 5 |
2021-04-11T20:07:32.000Z
|
2021-06-14T06:41:05.000Z
|
import argparse
import logging
import numpy as np
import os
import pandas as pd
import random
import subprocess
from pathlib import Path
from hyperopt import hp
from hyperopt.pyll.stochastic import sample
from hfta.hfht import (tune_hyperparameters, attach_common_args,
rearrange_algorithm_kwargs, handle_integers,
generate_fusible_param_flags, generate_nonfusible_param)
from hfta.workflow import extract_logging_level
from hfta.hfht.utils import fuse_dicts
if __name__ == '__main__':
args = attach_args().parse_args()
rearrange_algorithm_kwargs(args)
logging.basicConfig(level=extract_logging_level(args))
args.outdir = os.path.abspath(os.path.expanduser(args.outdir))
args.dataset = os.path.abspath(os.path.expanduser(args.dataset))
main(args)
| 29.570213 | 80 | 0.61534 |
f8eb7f85d81c3a2dfe42f499dfc3e4db4b3a0b93
| 444 |
py
|
Python
|
cpdb/trr/migrations/0002_alter_trr_subject_id_type.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 25 |
2018-07-20T22:31:40.000Z
|
2021-07-15T16:58:41.000Z
|
cpdb/trr/migrations/0002_alter_trr_subject_id_type.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 13 |
2018-06-18T23:08:47.000Z
|
2022-02-10T07:38:25.000Z
|
cpdb/trr/migrations/0002_alter_trr_subject_id_type.py
|
invinst/CPDBv2_backend
|
b4e96d620ff7a437500f525f7e911651e4a18ef9
|
[
"Apache-2.0"
] | 6 |
2018-05-17T21:59:43.000Z
|
2020-11-17T00:30:26.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-06 04:00
from __future__ import unicode_literals
from django.db import migrations, models
| 21.142857 | 57 | 0.61036 |
f8ec1873a929e5565a9c1de6ad8321fa85a4a6d9
| 1,409 |
py
|
Python
|
tests/utils/dut.py
|
Ostrokrzew/standalone-linux-io-tracer
|
5fcbe7f0c7b027d9e5fdfb4c6e9d553c6fa617b6
|
[
"BSD-3-Clause-Clear"
] | 24 |
2019-05-09T08:36:46.000Z
|
2022-03-16T16:20:01.000Z
|
tests/utils/dut.py
|
Ostrokrzew/standalone-linux-io-tracer
|
5fcbe7f0c7b027d9e5fdfb4c6e9d553c6fa617b6
|
[
"BSD-3-Clause-Clear"
] | 122 |
2019-05-27T12:27:15.000Z
|
2020-07-31T06:45:08.000Z
|
tests/utils/dut.py
|
Ostrokrzew/standalone-linux-io-tracer
|
5fcbe7f0c7b027d9e5fdfb4c6e9d553c6fa617b6
|
[
"BSD-3-Clause-Clear"
] | 18 |
2019-05-27T09:31:56.000Z
|
2021-05-27T18:54:52.000Z
|
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run_utils import TestRun
from utils.installer import install_iotrace, check_if_installed
from utils.iotrace import IotracePlugin
from utils.misc import kill_all_io
from test_tools.fio.fio import Fio
| 30.630435 | 112 | 0.726757 |
f8ecc8d3dac32d7fd54bf1a19d511383c8e5ce7f
| 355 |
py
|
Python
|
game_service.py
|
Drew8521/MusiQ
|
e52671c7dcc4f54f6cbb829486a733a9179575b1
|
[
"MIT"
] | null | null | null |
game_service.py
|
Drew8521/MusiQ
|
e52671c7dcc4f54f6cbb829486a733a9179575b1
|
[
"MIT"
] | 1 |
2019-08-09T21:36:33.000Z
|
2019-08-09T21:37:24.000Z
|
game_service.py
|
Drew8521/MusiQ
|
e52671c7dcc4f54f6cbb829486a733a9179575b1
|
[
"MIT"
] | null | null | null |
from models import Song
from random import choice
| 23.666667 | 60 | 0.622535 |
f8ed0d2649220a6a4bd9e78f42580892fbc06d4f
| 288 |
py
|
Python
|
stdlib/csv/custom_dialect.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 13 |
2017-08-22T12:26:07.000Z
|
2021-07-29T16:13:50.000Z
|
stdlib/csv/custom_dialect.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 1 |
2021-02-08T10:24:33.000Z
|
2021-02-08T10:24:33.000Z
|
stdlib/csv/custom_dialect.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 17 |
2018-08-13T11:10:33.000Z
|
2021-07-29T16:14:02.000Z
|
#!/usr/bin/python
# custom_dialect.py
import csv
csv.register_dialect("hashes", delimiter="#")
f = open('items3.csv', 'w')
with f:
writer = csv.writer(f, dialect="hashes")
writer.writerow(("pencils", 2))
writer.writerow(("plates", 1))
writer.writerow(("books", 4))
| 16.941176 | 45 | 0.635417 |
f8ed4e06a829662d9c548dd0be8745a5ea388df8
| 996 |
py
|
Python
|
servicex/web/forms.py
|
zorache/ServiceX_App
|
4479afa0f019bbdcd35812691e78abba442c9d37
|
[
"BSD-3-Clause"
] | 3 |
2019-12-31T06:44:06.000Z
|
2021-03-19T17:39:42.000Z
|
servicex/web/forms.py
|
zorache/ServiceX_App
|
4479afa0f019bbdcd35812691e78abba442c9d37
|
[
"BSD-3-Clause"
] | 132 |
2019-10-09T20:45:53.000Z
|
2022-03-30T19:07:37.000Z
|
servicex/web/forms.py
|
zorache/ServiceX_App
|
4479afa0f019bbdcd35812691e78abba442c9d37
|
[
"BSD-3-Clause"
] | 12 |
2019-12-06T22:47:35.000Z
|
2021-09-23T21:13:47.000Z
|
from typing import Optional
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, Email
from servicex.models import UserModel
| 38.307692 | 80 | 0.653614 |
f8ee134e47a471c9b912238f8dbcd8fb83c49b93
| 3,405 |
py
|
Python
|
libs/export_pbs/exportPb.py
|
linye931025/FPN_Tensorflow-master
|
e972496a798e9d77a74ddc6062d46b152d072ce7
|
[
"MIT"
] | null | null | null |
libs/export_pbs/exportPb.py
|
linye931025/FPN_Tensorflow-master
|
e972496a798e9d77a74ddc6062d46b152d072ce7
|
[
"MIT"
] | null | null | null |
libs/export_pbs/exportPb.py
|
linye931025/FPN_Tensorflow-master
|
e972496a798e9d77a74ddc6062d46b152d072ce7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os, sys
import tensorflow as tf
import tf_slim as slim
from tensorflow.python.tools import freeze_graph
sys.path.append('../../')
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.configs import cfgs
from libs.networks import build_whole_network
CKPT_PATH = '/home/yjr/PycharmProjects/Faster-RCNN_Tensorflow/output/trained_weights/FasterRCNN_20180517/voc_200000model.ckpt'
OUT_DIR = '../../output/Pbs'
PB_NAME = 'FasterRCNN_Res101_Pascal.pb'
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = ''
export_frozenPB()
| 38.693182 | 126 | 0.612335 |
f8eef0cd263627a15c156d8fca2fb80f3faea6c2
| 983 |
py
|
Python
|
ngadnap/command_templates/adapter_removal.py
|
smilefreak/NaDNAP
|
18354778dd896bc0ab3456ca7dbb9d194c1ebf4d
|
[
"MIT"
] | null | null | null |
ngadnap/command_templates/adapter_removal.py
|
smilefreak/NaDNAP
|
18354778dd896bc0ab3456ca7dbb9d194c1ebf4d
|
[
"MIT"
] | null | null | null |
ngadnap/command_templates/adapter_removal.py
|
smilefreak/NaDNAP
|
18354778dd896bc0ab3456ca7dbb9d194c1ebf4d
|
[
"MIT"
] | null | null | null |
"""
Adapter Removal templates
"""
# AdapterRemoval
#
# {0}: executable
# {1}: fastq1 abs
# {2}: fastq2 abs
# {3}: fastq1
# {4}: fastq2
# {5}: minimum length
# {6}: mismatch_rate
# {7}: min base uality
# {8}: min merge_length
__ADAPTER_REMOVAL__="""
{0} --collapse --file1 {1} --file2 {2} --outputstats {3}.stats --trimns --outputcollapsed {3}.collapsed --minlength {5} --output1 {3}.p1 --output2 {4}.p2 --mm {6} --minquality {7} --minalignmentlength {8} --trimqualities
"""
import os
from ngadnap.dependency_graph.graph import CommandNode
| 31.709677 | 225 | 0.678535 |
f8f15b0752a64958efc156868083500a63e94dc1
| 1,745 |
py
|
Python
|
undercloud_heat_plugins/immutable_resources.py
|
AllenJSebastian/tripleo-common
|
d510a30266e002e90c358e69cb720bfdfa736134
|
[
"Apache-2.0"
] | 52 |
2015-04-17T12:06:09.000Z
|
2021-11-23T09:46:30.000Z
|
undercloud_heat_plugins/immutable_resources.py
|
AllenJSebastian/tripleo-common
|
d510a30266e002e90c358e69cb720bfdfa736134
|
[
"Apache-2.0"
] | null | null | null |
undercloud_heat_plugins/immutable_resources.py
|
AllenJSebastian/tripleo-common
|
d510a30266e002e90c358e69cb720bfdfa736134
|
[
"Apache-2.0"
] | 47 |
2015-10-09T15:22:38.000Z
|
2021-04-22T04:35:57.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from heat.engine.resources.openstack.neutron import net
from heat.engine.resources.openstack.neutron import port
from heat.engine.resources.openstack.neutron import subnet
def resource_mapping():
return {
'OS::Neutron::Net': ImmutableNet,
'OS::Neutron::Port': ImmutablePort,
'OS::Neutron::Subnet': ImmutableSubnet,
}
| 28.145161 | 78 | 0.696848 |
f8f2378998282c62f5eff079407d0b48e7bea81d
| 2,154 |
py
|
Python
|
slybot/setup.py
|
DataKnower/dk-portia
|
24579c0160167af2442117975bf7d6a714b4d7d5
|
[
"BSD-3-Clause"
] | null | null | null |
slybot/setup.py
|
DataKnower/dk-portia
|
24579c0160167af2442117975bf7d6a714b4d7d5
|
[
"BSD-3-Clause"
] | null | null | null |
slybot/setup.py
|
DataKnower/dk-portia
|
24579c0160167af2442117975bf7d6a714b4d7d5
|
[
"BSD-3-Clause"
] | null | null | null |
from os.path import join, abspath, dirname, exists
from slybot import __version__
from setuptools import setup, find_packages
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist
install_requires = ['Scrapy', 'scrapely', 'loginform', 'lxml', 'jsonschema',
'dateparser', 'scrapyjs', 'page_finder', 'six']
extras = {
'tests': ['nose', 'nose-timer'],
'clustering': ['page_clustering']
}
setup(name='slybot',
version=__version__,
license='BSD',
description='Slybot crawler',
author='Scrapy project',
author_email='[email protected]',
url='http://github.com/scrapinghub/portia',
packages=find_packages(exclude=('tests', 'tests.*')),
platforms=['Any'],
scripts=['bin/slybot', 'bin/portiacrawl'],
install_requires=install_requires,
extras_require=extras,
package_data={'': ['slybot/splash-script-combined.js']},
include_package_data=True,
cmdclass={
'bdist_egg': bdist_egg_command,
'sdist': sdist_command
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
])
| 29.916667 | 76 | 0.615135 |
f8f25149f3eefd3629cc486cf987c4d8a9a5bbb9
| 3,846 |
py
|
Python
|
yolov3.py
|
huhuhang/yolov3
|
6c254b3f453c394046381e1c00cb0908b8f97b3a
|
[
"MIT"
] | 35 |
2018-10-12T06:33:09.000Z
|
2022-02-25T03:19:37.000Z
|
yolov3.py
|
huhuhang/yolov3
|
6c254b3f453c394046381e1c00cb0908b8f97b3a
|
[
"MIT"
] | 1 |
2019-08-31T16:05:12.000Z
|
2020-01-05T15:34:54.000Z
|
yolov3.py
|
huhuhang/yolov3
|
6c254b3f453c394046381e1c00cb0908b8f97b3a
|
[
"MIT"
] | 14 |
2018-12-10T22:48:51.000Z
|
2021-11-18T20:56:38.000Z
|
import torch
import torch.nn as nn
from .yolo_layer import *
from .yolov3_base import *
###################################################################
## Backbone and helper modules
| 38.079208 | 119 | 0.578783 |
f8f25cd96d67041f861381dbd21810aa553cccdc
| 883 |
py
|
Python
|
tests/assets/test_driver_errors.py
|
CyrilLeMat/modelkit
|
2150ffe78ebb00e3302dac36ccb09e66becd5130
|
[
"MIT"
] | null | null | null |
tests/assets/test_driver_errors.py
|
CyrilLeMat/modelkit
|
2150ffe78ebb00e3302dac36ccb09e66becd5130
|
[
"MIT"
] | null | null | null |
tests/assets/test_driver_errors.py
|
CyrilLeMat/modelkit
|
2150ffe78ebb00e3302dac36ccb09e66becd5130
|
[
"MIT"
] | null | null | null |
import os
import pytest
from modelkit.assets import errors
from tests.conftest import skip_unless
| 29.433333 | 65 | 0.822197 |
f8f2d8744612c8dd54640bd2fc3dd67702898911
| 2,163 |
py
|
Python
|
wiki/tests.py
|
Jarquevious/makewiki
|
a945da5ab7704042ef9d740987e23da19ec87267
|
[
"MIT"
] | null | null | null |
wiki/tests.py
|
Jarquevious/makewiki
|
a945da5ab7704042ef9d740987e23da19ec87267
|
[
"MIT"
] | 4 |
2020-06-06T01:42:46.000Z
|
2021-06-10T20:10:57.000Z
|
wiki/tests.py
|
Jarquevious/makewiki
|
a945da5ab7704042ef9d740987e23da19ec87267
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
# Create your tests here.
def test_detail_page(self):
""" Test to see if slug generated when saving a Page."""
# Create a user and save to the database
user = User.objects.create()
user.save()
# Create a page and save to the database
page = Page(title="My Detail Test Page", content="details_test", author=user)
page.save()
# Slug is generated matches with what we expect
slug = page.slug
response = self.client.get(f'/{slug}/')
self.assertEqual(response.status_code, 200)
info = self.client.get('/')
self.assertContains(info, 'makewiki', html=True)
def test_edit_page(self):
"""Test edit page."""
# Test data that will be displayed on the screen
user = User.objects.create()
user.save()
page = Page.objects.create(title="My Test Page", content="edit_test", author=user)
page.save()
# Make a GET request to the MakeWiki homepage that will get a response back
post_data = {
'title': 'Who',
'content': 'Are you?',
'author': user.id,
}
response = self.client.post('/form/', data=post_data)
# Check if response is 200
self.assertEqual(response.status_code, 200)
# Check the number of pages passed to the template matches the number of pages in the database
end = self.client.get('/')
result = end.context['pages']
self.assertQuerysetEqual(result, ['<Page: My Test Page>', '<Page: Test>'], ordered=False)
| 27.730769 | 98 | 0.660194 |
f8f3fc84d2eec11a3d1fe8de179b44f825aeb0e4
| 419 |
py
|
Python
|
BanditSim/__init__.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
BanditSim/__init__.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
BanditSim/__init__.py
|
AJB0211/BanditSim
|
5426486b40c35492049b09f9b57eb18ad5d6ce63
|
[
"MIT"
] | null | null | null |
from .multiarmedbandit import MultiArmedBandit
from .eps_greedy_constant_stepsize import EpsilonGreedyConstantStepsize
from .greedy_constant_stepsize import GreedyConstantStepsize
from .epsilon_greedy_average_step import EpsilonGreedyAverageStep
from .greedy_average_step import GreedyAverageStep
from .greedy_bayes_update import GreedyBayesianUpdate
from .eps_greedy_bayes_update import EpsilonGreedyBayesianUpdate
| 38.090909 | 71 | 0.909308 |
f8f43d95779ee26635e6e7c26bda70278bc13afd
| 3,915 |
py
|
Python
|
tests/queries/test_query.py
|
txf626/django
|
95bda03f2da15172cf342f13ba8a77c007b63fbb
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2 |
2019-02-28T12:38:32.000Z
|
2019-09-30T08:08:16.000Z
|
tests/queries/test_query.py
|
Scheldon/django
|
11a9017179812198a12a2fc19610262a549aa46e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 57 |
2018-10-08T12:37:30.000Z
|
2018-10-08T17:39:26.000Z
|
tests/queries/test_query.py
|
Scheldon/django
|
11a9017179812198a12a2fc19610262a549aa46e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1 |
2021-06-21T07:51:09.000Z
|
2021-06-21T07:51:09.000Z
|
from datetime import datetime
from django.core.exceptions import FieldError
from django.db.models import CharField, F, Q
from django.db.models.expressions import SimpleCol
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.query import Query
from django.db.models.sql.where import OR
from django.test import TestCase
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
| 41.648936 | 81 | 0.696552 |
f8f4457783432480005e18ff932b887d871f9663
| 16,356 |
py
|
Python
|
src/matrix_game/matrix_game.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 26 |
2019-10-28T09:01:45.000Z
|
2021-09-20T08:56:12.000Z
|
src/matrix_game/matrix_game.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 1 |
2020-07-25T06:50:05.000Z
|
2020-07-25T06:50:05.000Z
|
src/matrix_game/matrix_game.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 6 |
2019-12-18T12:02:57.000Z
|
2021-03-03T13:15:47.000Z
|
# This notebook implements a proof-of-principle for
# Multi-Agent Common Knowledge Reinforcement Learning (MACKRL)
# The entire notebook can be executed online, no need to download anything
# http://pytorch.org/
from itertools import chain
import torch
import torch.nn.functional as F
from torch.multiprocessing import Pool, set_start_method, freeze_support
try:
set_start_method('spawn')
except RuntimeError:
pass
from torch.nn import init
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plt
use_cuda = False
payoff_values = []
payoff_values.append(torch.tensor([ # payoff values
[5, 0, 0, 2, 0],
[0, 1, 2, 4, 2],
[0, 0, 0, 2, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=torch.float32) * 0.2)
payoff_values.append(
torch.tensor([ # payoff values
[0, 0, 1, 0, 5],
[0, 0, 2, 0, 0],
[1, 2, 4, 2, 1],
[0, 0, 2, 0, 0],
[0, 0, 1, 0, 0],
], dtype=torch.float32) * 0.2)
n_agents = 2
n_actions = len(payoff_values[0])
n_states_dec = 5
n_states_joint = 3
n_mix_hidden = 3
p_observation = 0.5
p_ck_noise = [0.0]
# Number of gradient steps
t_max = 202
# We'll be using a high learning rate, since we have exact gradients
lr = 0.05 # DEBUG: 0.05 if exact gradients!
optim = 'adam'
# You can reduce this number if you are short on time. (Eg. n_trials = 20)
#n_trials = 100 # 30
n_trials = 20 #15 #100
std_val = 1.0
# These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK),
# Independent Actor-Critic (always uses decentralised actions selection)
labels = ["IAC", "JAL"]
p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
final_res = []
# # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for
# # joint action + 1 action for delegation to the independent agents.
# theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1)
# Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab)
# Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau)
if __name__ == "__main__":
freeze_support()
main()
| 38.037209 | 145 | 0.547811 |
f8f4dcd9fb78ee1924b9f50173ac949a710abcfd
| 3,190 |
py
|
Python
|
testcases/school_bus.py
|
wilsonsuen/av-testing
|
a6967b4cb4e4ad6b10d041ffd3dc62188fccad81
|
[
"MIT"
] | null | null | null |
testcases/school_bus.py
|
wilsonsuen/av-testing
|
a6967b4cb4e4ad6b10d041ffd3dc62188fccad81
|
[
"MIT"
] | null | null | null |
testcases/school_bus.py
|
wilsonsuen/av-testing
|
a6967b4cb4e4ad6b10d041ffd3dc62188fccad81
|
[
"MIT"
] | null | null | null |
import sys
import os
import glob
import json
from robot import rebot
from robot.api import TestSuite
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
if __name__ == "__main__":
main_suite = TestSuite('School Bus Scenario')
main_suite.resource.imports.library('lib/simulation.py')
testcase_paths = glob.glob('data/testdata/04_school_bus/*.json')
testcase_paths.sort()
for testcase_path in testcase_paths[110:113]:
with open(testcase_path) as f:
testdata = json.load(f)
tags = list(testdata['testcase']['context'].values()) +\
list(testdata['testcase']['input'].values())
school_bus_test = main_suite.tests.create(testdata['testcase']['name'], tags=tags)
school_bus_test.setup.config(name='Setup Scenario', args=[testcase_path])
school_bus_test.body.create_keyword('Start Simulation')
school_bus_test.body.create_keyword('Validate Result')
school_bus_test.teardown.config(name='Test Case Teardown')
main_suite.run(output='results/04_school_bus/output.xml')
rebot('results/04_school_bus/output.xml',
log="results/04_school_bus/log.html",
report="results/04_school_bus/report.html")
"""
rebot --tagstatcombine "8:00AMANDSunny:8AM and Sunny(C1)" --tagstatcombine "8:00AMANDCloudy:8AM and Cloudy(C2)" --tagstatcombine "8:00AMANDRainning:8AM and Rainning(C3)" --tagstatcombine "8:00AMANDFoggy:8AM and Foggy(C4)" --tagstatcombine "12:00PMANDSunny:12PM and Sunny(C5)" --tagstatcombine "12:00PMANDCloudy:12PM and Cloudy(C6)" --tagstatcombine "12:00PMANDRainning:12PM and Rainning(C7)" --tagstatcombine "12:00PMANDFoggy:12PM and Foggy(C8)" --tagstatcombine "3:00PMANDSunny:3PM and Sunny(C9)" --tagstatcombine "3:00PMANDCloudy:3PM and Cloudy(C10)" --tagstatcombine "3:00PMANDRainning:3PM and Rainning(C11)" --tagstatcombine "3:00PMANDFoggy:3PM and Foggy(C12)" --tagstatcombine "5:00PMANDSunny:5PM and Sunny(C13)" --tagstatcombine "5:00PMANDCloudy:5PM and Cloudy(C14)" --tagstatcombine "5:00PMANDRainning:5PM and Ranining(C15)" --tagstatcombine "5:00PMANDFoggy:5PM and Foggy(C16)" --tagstatcombine "7:00PMANDSunny:7PM and Sunny(C17)" --tagstatcombine "7:00PMANDCloudy:7PM and Cloudy(C18)" --tagstatcombine "7:00PMANDRainning:7PM and Rainning(C19)" --tagstatcombine "7:00PMANDFoggy:7PM and Foggy(C20)" --tagstatcombine MovingANDBackward_lane:Moving\ and\ Backward\ lane\(I12\) --tagstatcombine MovingANDForward_lane:Moving\ and\ Forward\ lane\(I9\) --tagstatcombine LoadingANDBackward_lane:Loading\ and\ Backward\ lane\(I6\) --tagstatcombine LoadingANDForward_lane:Loading\ and\ Forward\ lane\(I3\) --tagstatcombine StopANDBackward_lane:Stop\ and\ Backward\ lane\(I18\) --tagstatcombine StopANDForward_lane:Stop\ and\ Forward\ lane\(I15\) --tagstatexclude Forward_lane --tagstatexclude Backward_lane --tagstatexclude Moving --tagstatexclude Loading --tagstatexclude Stop --tagstatexclude 8\:00AM --tagstatexclude 12\:00PM --tagstatexclude 3\:00PM --tagstatexclude 5\:00PM --tagstatexclude 7\:00PM --tagstatexclude Sunny --tagstatexclude Foggy --tagstatexclude Rainning --tagstatexclude Cloudy -r combined_report.html -l combined_log.html output.xml
"""
| 91.142857 | 1,951 | 0.754232 |
f8f623d0cb63c4b268f633b3bf392a5401ce666a
| 2,962 |
py
|
Python
|
pr_consistency/2.find_pr_branches.py
|
adrn/astropy-tools
|
c26a5e4cdf8735976375dd2b77de797a7723bcd9
|
[
"BSD-3-Clause"
] | 10 |
2018-02-24T15:06:39.000Z
|
2020-11-24T15:28:35.000Z
|
pr_consistency/2.find_pr_branches.py
|
adrn/astropy-tools
|
c26a5e4cdf8735976375dd2b77de797a7723bcd9
|
[
"BSD-3-Clause"
] | 63 |
2018-01-22T20:12:47.000Z
|
2021-07-10T15:42:58.000Z
|
pr_consistency/2.find_pr_branches.py
|
adrn/astropy-tools
|
c26a5e4cdf8735976375dd2b77de797a7723bcd9
|
[
"BSD-3-Clause"
] | 16 |
2018-02-25T16:32:51.000Z
|
2021-07-10T13:33:46.000Z
|
# The purpose of this script is to check all the maintenance branches of the
# given repository, and find which pull requests are included in which
# branches. The output is a JSON file that contains for each pull request the
# list of all branches in which it is included. We look specifically for the
# message "Merge pull request #xxxx " in commit messages, so this is not
# completely foolproof, but seems to work for now.
import os
import sys
import json
import re
import subprocess
import tempfile
from collections import defaultdict
from astropy.utils.console import color_print
from common import get_branches
if sys.argv[1:]:
REPOSITORY_NAME = sys.argv[1]
else:
REPOSITORY_NAME = 'astropy/astropy'
print("The repository this script currently works with is '{}'.\n"
.format(REPOSITORY_NAME))
REPOSITORY = f'git://github.com/{REPOSITORY_NAME}.git'
NAME = os.path.basename(REPOSITORY_NAME)
DIRTOCLONEIN = tempfile.mkdtemp() # set this to a non-temp directory to retain the clone between runs
ORIGIN = 'origin' # set this to None to not fetch anything but rather use the directory as-is.
STARTDIR = os.path.abspath('.')
# The branches we are interested in
BRANCHES = get_branches(REPOSITORY_NAME)
# Read in a list of all the PRs
with open(f'merged_pull_requests_{NAME}.json') as merged:
merged_prs = json.load(merged)
# Set up a dictionary where each key will be a PR and each value will be a list
# of branches in which the PR is present
pr_branches = defaultdict(list)
try:
# Set up repository
color_print(f'Cloning {REPOSITORY}', 'green')
os.chdir(DIRTOCLONEIN)
if os.path.isdir(NAME):
# already exists... assume its the right thing
color_print('"{}" directory already exists - assuming it is an already '
'existing clone'.format(NAME), 'yellow')
os.chdir(NAME)
if ORIGIN:
subprocess.call(f'git fetch {ORIGIN}', shell=True)
else:
subprocess.call(f'git clone {REPOSITORY}', shell=True)
os.chdir(NAME)
# Loop over branches and find all PRs in the branch
for branch in BRANCHES:
# Change branch
color_print(f'Switching to branch {branch}', 'green')
subprocess.call('git reset --hard', shell=True)
subprocess.call('git clean -fxd', shell=True)
subprocess.call(f'git checkout {branch}', shell=True)
if ORIGIN:
subprocess.call(f'git reset --hard {ORIGIN}/{branch}', shell=True)
# Extract log:
log = subprocess.check_output('git log', shell=True).decode('utf-8')
# Check for the presence of the PR in the log
for pr in (re.findall(r'Merge pull request #(\d+) ', log) +
re.findall(r'Backport PR #(\d+):', log)):
pr_branches[pr].append(branch)
finally:
os.chdir(STARTDIR)
with open(f'pull_requests_branches_{NAME}.json', 'w') as f:
json.dump(pr_branches, f, sort_keys=True, indent=2)
| 33.659091 | 102 | 0.686698 |
f8f63abc9f6d14490126b79f424fe99cf745e819
| 603 |
py
|
Python
|
agents/solo_q_agents/q_agent_test/aux.py
|
pedMatias/matias_hfo
|
6d88e1043a1455f5c1f6cc11b9380869772f4176
|
[
"MIT"
] | 1 |
2021-06-03T20:03:50.000Z
|
2021-06-03T20:03:50.000Z
|
agents/solo_q_agents/q_agent_test/aux.py
|
pedMatias/matias_hfo
|
6d88e1043a1455f5c1f6cc11b9380869772f4176
|
[
"MIT"
] | null | null | null |
agents/solo_q_agents/q_agent_test/aux.py
|
pedMatias/matias_hfo
|
6d88e1043a1455f5c1f6cc11b9380869772f4176
|
[
"MIT"
] | 1 |
2021-03-14T01:22:33.000Z
|
2021-03-14T01:22:33.000Z
|
from datetime import datetime as dt
import os
import numpy as np
import settings
| 24.12 | 67 | 0.665008 |
f8f65ce2aa90b1532e983805cc84833de1433b1e
| 1,316 |
py
|
Python
|
Python38/Lib/site-packages/PyInstaller/hooks/hook-PyQt4.py
|
AXFS-H/Windows10Debloater
|
ab5f8a8a8fb065bb40b7ddbd1df75563d8b8d13e
|
[
"MIT"
] | 5 |
2020-08-24T23:29:58.000Z
|
2022-02-07T19:58:07.000Z
|
PyInstaller/hooks/hook-PyQt4.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 12 |
2020-02-15T04:04:55.000Z
|
2022-02-18T20:29:49.000Z
|
PyInstaller/hooks/hook-PyQt4.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 2 |
2020-08-24T23:30:06.000Z
|
2021-12-23T18:23:38.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import qt_menu_nib_dir
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt4 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt4') for x in getsitepackages()])
hiddenimports = ['sip']
# For Qt to work on Mac OS X it is necessary to include directory qt_menu.nib.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
datas = [
(qt_menu_nib_dir('PyQt4'), 'qt_menu.nib'),
]
| 35.567568 | 86 | 0.670213 |
f8f694a754b9e6ecfc7a48eb472c8ee96d237a42
| 278 |
py
|
Python
|
timeserio/utils/functools.py
|
ig248/timeserio
|
afc2a953a83e763418d417059493ef13a17d349c
|
[
"MIT"
] | 63 |
2019-07-12T17:16:27.000Z
|
2022-02-22T11:06:50.000Z
|
timeserio/utils/functools.py
|
ig248/timeserio
|
afc2a953a83e763418d417059493ef13a17d349c
|
[
"MIT"
] | 34 |
2019-07-30T11:52:09.000Z
|
2022-03-28T12:42:02.000Z
|
timeserio/utils/functools.py
|
ig248/timeserio
|
afc2a953a83e763418d417059493ef13a17d349c
|
[
"MIT"
] | 12 |
2019-08-14T05:51:22.000Z
|
2021-03-15T09:34:15.000Z
|
import inspect
def get_default_args(func):
"""Get default arguments of a function.
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
| 21.384615 | 51 | 0.636691 |
f8f72bfabd477794319287090180fb8cb1c3008e
| 2,059 |
py
|
Python
|
Sec_10_expr_lambdas_fun_integradas/a_lambdas.py
|
PauloAlexSilva/Python
|
690913cdcfd8bde52d9ddd15e3c838e6aef27730
|
[
"MIT"
] | null | null | null |
Sec_10_expr_lambdas_fun_integradas/a_lambdas.py
|
PauloAlexSilva/Python
|
690913cdcfd8bde52d9ddd15e3c838e6aef27730
|
[
"MIT"
] | null | null | null |
Sec_10_expr_lambdas_fun_integradas/a_lambdas.py
|
PauloAlexSilva/Python
|
690913cdcfd8bde52d9ddd15e3c838e6aef27730
|
[
"MIT"
] | null | null | null |
"""
Utilizando Lambdas
Conhecidas por Expresses Lambdas, ou simplesmente Lambdas, so funes sem nome, ou seja,
funes annimas.
# Funo em Python
def funcao(x):
return 3 * x + 1
print(funcao(4))
print(funcao(7))
# Expresso Lambda
lambda x: 3 * x + 1
# Como utlizar a expresso lambda?
calc = lambda x: 3 * x + 1
print(calc(4))
print(calc(7))
# Podemos ter expresses lambdas com mltiplas entradas
nome_compelto = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title()
print(nome_compelto(' paulo', ' SILVA '))
print(nome_compelto(' MARIA ', ' albertina '))
# Em funes Python podemos ter nenhuma ou vrias entradas. Em Lambdas tambm
hello = lambda: 'Hello World!'
uma = lambda x: 3 * x + 1
duas = lambda x, y: (x * y) ** 0.5
tres = lambda x, y, z: 3 / (1 / x + 1 / 7 + 1 / z)
# n = lambda x1, x2, ..., xn: <expresso>
print(hello())
print(uma(6))
print(duas(5, 7))
print(tres(3, 6, 9))
# OBS: Se passarmos mais argumentos do que parmetros esperados teremos TypeError
# Exemplo
autores = ['Paulo Silva', 'Maria Albertina', 'Luis Marques Nunes', 'Carlos Nunes',
'Ana S. Leito', 'Ins Garcia', 'Claudia Sofia', 'I. L. Antunes',
'Amrico Silva']
print(autores)
# ['Paulo Silva', 'Maria Albertina', 'Luis Marques Nunes', 'Carlos Nunes',
# 'Ana S. Leito', 'Ins Garcia', 'Claudia Sofia', 'I. L. Antunes', 'Amrico Silva']
# Ordenar pelo sobrenome
autores.sort(key=lambda sobrenome: sobrenome.split(' ')[-1].lower())
print(autores)
# ['Maria Albertina', 'I. L. Antunes', 'Ins Garcia', 'Ana S. Leito',
# 'Luis Marques Nunes', 'Carlos Nunes', 'Paulo Silva', 'Amrico Silva', 'Claudia Sofia']
"""
# Funo Quadrtica
# f(x) = a * x ** 2 + b * x + c
# Definindo a funo
def geradora_funcao_quadratica(a, b, c):
"""
Retorna a funo f(x) = a * x ** 2 + b * x + c
"""
return lambda x: a * x ** 2 + b * x + c
teste = geradora_funcao_quadratica(2, 3, -5)
print(teste(0))
print(teste(1))
print(teste(2))
print(geradora_funcao_quadratica(3, 0, 1)(2))
| 20.79798 | 94 | 0.641088 |
f8f83df34dfaf5ae52ea9e532bb035a4e1cce478
| 825 |
py
|
Python
|
ex085.py
|
EduotavioFonseca/ProgramasPython
|
8e0ef5f6f4239d1fe52321f8795b6573f6ff5130
|
[
"MIT"
] | null | null | null |
ex085.py
|
EduotavioFonseca/ProgramasPython
|
8e0ef5f6f4239d1fe52321f8795b6573f6ff5130
|
[
"MIT"
] | null | null | null |
ex085.py
|
EduotavioFonseca/ProgramasPython
|
8e0ef5f6f4239d1fe52321f8795b6573f6ff5130
|
[
"MIT"
] | null | null | null |
# Lista dentro de dicionrio
campeonato = dict()
gol = []
aux = 0
campeonato['Jogador'] = str(input('Digite o nome do jogador: '))
print()
partidas = int(input('Quantas partidas ele jogou? '))
print()
for i in range(0, partidas):
aux = int(input(f'Quantos gols na partida {i + 1}? '))
gol.append(aux)
print()
campeonato['Gols'] = gol[:]
campeonato['Total'] = sum(gol)
print('=' * 55)
print()
print(campeonato)
print()
print('=' * 55)
print()
for k, v in campeonato.items():
print(f'O campo {k} tem o valor: {v}')
print()
print('=' * 55)
print(f'O jogador {campeonato["Jogador"]} jogou {partidas} partidas.')
print()
for i in range(0, partidas):
print(f'Na partida {i + 1} ele fez {gol[i]} gol(s).')
print()
print(f'No total ele fez {campeonato["Total"]} gols.')
print('=' * 55)
| 25.78125 | 71 | 0.613333 |
f8f8dae5df2040a52619a0f46630de1b8ffbe445
| 533 |
py
|
Python
|
heat/initial_data.py
|
kjetil-lye/ismo_heat
|
09776b740a0543e270417af653d2a047c94f1b50
|
[
"MIT"
] | null | null | null |
heat/initial_data.py
|
kjetil-lye/ismo_heat
|
09776b740a0543e270417af653d2a047c94f1b50
|
[
"MIT"
] | 6 |
2020-11-13T19:04:16.000Z
|
2022-02-10T02:10:50.000Z
|
heat/initial_data.py
|
kjetil-lye/ismo_heat
|
09776b740a0543e270417af653d2a047c94f1b50
|
[
"MIT"
] | 1 |
2021-03-26T06:53:19.000Z
|
2021-03-26T06:53:19.000Z
|
import numpy
| 28.052632 | 85 | 0.613508 |
f8fa0708043799c2510940867111d04480ef484c
| 5,030 |
py
|
Python
|
explore/scripts/get_repos_creationhistory.py
|
john18/uccross.github.io
|
72cd88c7310ab1503467fba27add2338cf57d8f7
|
[
"MIT"
] | 12 |
2019-03-02T06:42:37.000Z
|
2022-03-01T03:59:08.000Z
|
explore/scripts/get_repos_creationhistory.py
|
john18/uccross.github.io
|
72cd88c7310ab1503467fba27add2338cf57d8f7
|
[
"MIT"
] | 6 |
2020-04-14T21:22:36.000Z
|
2022-01-19T23:41:35.000Z
|
explore/scripts/get_repos_creationhistory.py
|
john18/uccross.github.io
|
72cd88c7310ab1503467fba27add2338cf57d8f7
|
[
"MIT"
] | 29 |
2017-11-08T19:39:20.000Z
|
2022-03-17T18:05:29.000Z
|
import helpers
import json
import re
datfilepath = "../github-data/labRepos_CreationHistory.json"
allData = {}
# Check for and read existing data file
allData = helpers.read_existing(datfilepath)
# Read repo info data file (to use as repo list)
dataObj = helpers.read_json("../github-data/labReposInfo.json")
# Populate repo list
repolist = []
print("Getting internal repos ...")
repolist = sorted(dataObj["data"].keys())
print("Repo list complete. Found %d repos." % (len(repolist)))
# Read pretty GraphQL query
query_in = helpers.read_gql("../queries/repo-CreationDate.gql")
# Rest endpoint query
query_commits_in = "/repos/OWNNAME/REPONAME/commits?until=CREATETIME&per_page=100"
query_commits_in2 = "/repos/OWNNAME/REPONAME/commits?per_page=100"
# Retrieve authorization token
authhead = helpers.get_gitauth()
# Iterate through internal repos
print("Gathering data across multiple paginated queries...")
collective = {u'data': {}}
tab = " "
for repo in repolist:
# History doesn't change, only update new repos or those that had no previous commits
if "data" in allData.keys() and repo in allData["data"].keys():
if allData["data"][repo]["firstCommitAt"]:
print(tab + "Already recorded data for '%s'" % (repo))
continue
pageNum = 1
print("\n'%s'" % (repo))
print(tab + "page %d" % (pageNum))
repoSplit = repo.split("/")
# Query 1
print(tab + "Get creation date and default branch")
print(tab + "Modifying query...")
newquery = re.sub('OWNNAME', repoSplit[0], query_in)
newquery = re.sub('REPONAME', repoSplit[1], newquery)
gitquery = json.dumps({'query': newquery})
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_github(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
collective["data"][repo] = outObj["data"]["repository"]
# Query 2
print(tab + "Get pre-GitHub commit timestamps")
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
gitquery = re.sub('CREATETIME', collective["data"][repo]["createdAt"], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not get pre-GitHub commits for '%s'" % (repo))
outObj["data"] = []
# Update collective data
collective["data"][repo]["commitTimestamps"] = []
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# If no pre-GitHub commits, check the greater commit history
if len(collective["data"][repo]["commitTimestamps"]) > 0 and collective["data"][repo]["commitTimestamps"][0]:
collective["data"][repo]["initBeforeGitHubRepo"] = True
else:
print(tab + "No pre-GitHub commits found, getting full history")
collective["data"][repo]["initBeforeGitHubRepo"] = False
# Query 3
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in2)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# Paginate if needed
hasNext = ("next" in outObj)
while hasNext:
pageNum += 1
print(tab + "page %d" % (pageNum))
print(tab + "Modifying query...")
newquery = gitquery + "&page=" + str(pageNum)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, newquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
hasNext = ("next" in outObj)
# Sort dates
collective["data"][repo]["commitTimestamps"].sort()
# Save earliest commit date
firstdate = None
if len(collective["data"][repo]["commitTimestamps"]) > 0:
firstdate = collective["data"][repo]["commitTimestamps"][0]
collective["data"][repo]["firstCommitAt"] = firstdate
del collective["data"][repo]["commitTimestamps"]
print("'%s' Done!" % (repo))
print("\nCollective data gathering complete!")
# Combine new data with existing data
if "data" not in allData.keys():
allData["data"] = {}
for repo in collective["data"].keys():
allData["data"][repo] = collective["data"][repo]
allDataString = json.dumps(allData, indent=4, sort_keys=True)
# Write output file
print("\nWriting file '%s'" % (datfilepath))
with open(datfilepath, "w") as fileout:
fileout.write(allDataString)
print("Wrote file!")
print("\nDone!\n")
| 31.4375 | 110 | 0.695626 |
f8fb454d7a74c617f9f1467386eb93a2fe60e4db
| 341 |
py
|
Python
|
examples/test/runMe.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
examples/test/runMe.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
examples/test/runMe.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#! /usr/bin/env python2.7
from __future__ import print_function
import sys
sys.path.append("../../include")
import PyBool_public_interface as Bool
if __name__ == "__main__":
expr = Bool.parse_std("input.txt")
expr = expr["main_expr"]
expr = Bool.simplify(expr)
expr = Bool.nne(expr)
print(Bool.print_expr(expr))
| 16.238095 | 38 | 0.683284 |
f8fcc7a6b82e6b901e4e3c720b6e0e1f082a90c0
| 24,425 |
py
|
Python
|
calculator.py
|
rupen4678/botique_management_system
|
9b7807cc28bb15e024093d6161a8fef96ce7e291
|
[
"Apache-2.0"
] | null | null | null |
calculator.py
|
rupen4678/botique_management_system
|
9b7807cc28bb15e024093d6161a8fef96ce7e291
|
[
"Apache-2.0"
] | null | null | null |
calculator.py
|
rupen4678/botique_management_system
|
9b7807cc28bb15e024093d6161a8fef96ce7e291
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
import random
import time
from PIL import Image
from datetime import datetime
from tinydb import *
import os
import pickle
#from database1 import *
from random import randint
root = Tk()
root.geometry("1600x800+0+0")
root.title("Suman_dai_ko_DHOKAN")
root.configure(bg="goldenrod4")
text_Input = StringVar()
operator =""
yes =""
no=""
Tops = Frame(root, width=1600 ,height=50,bg="goldenrod4", relief=RIDGE)
Tops.pack(side=TOP)
f1 = Frame(root, width = 800 ,height=500,bg="goldenrod4",relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root, width = 300,height = 700,bg="dark slate blue",relief=SUNKEN)
f2.pack(side=RIGHT)
#f3= Frame(root,width=1600,height=300,fg="blue", bg="powder blue", relief=SUNKEN).pack(side=Bottom)
#==========================================================Time=======================================
localtime=time.asctime(time.localtime(time.time()))
#datetime=Label(Tops,font("arial",20,"bold"),text=nowTime,bd=10 ,bg="black", #fg="white", anchor="w").pack()
#====================================debugged========================
shirt = IntVar()
pant = IntVar()
sale = IntVar()
buy = IntVar()
deposite = IntVar()
withdraw = IntVar()
coat = IntVar()
order = IntVar()
total = IntVar()
out = IntVar()
before = IntVar() #order before the 60
stock = IntVar()
delivery = IntVar()
#########################main_gate######################
#after wards set the total from here total.set
#++++++++++++++++++++++++++++++Varibales_inset+++++++++++++++++++++++++++++++++
order_bef = IntVar()
stock_full = IntVar()
shrting = IntVar()
pant = IntVar()
sari = IntVar()
order_info = IntVar()
delivery_report = IntVar()
daily_info = IntVar()
sales = IntVar()
buy = IntVar()
total_bank = IntVar()
bank_deposite = IntVar()
bank_withdraw = IntVar()
due_amount = IntVar()
order_info = IntVar()
daily_cash = IntVar()
cus_name = IntVar()
cus_no = IntVar()
employee = IntVar()
###############################class of algoriths#########################
#++++++++++++++++++++++++++++++++++++++++tinydb example++++++++++++++++++++++
#db = TinyDB("/databse/d4ta.json")
#db.insert({"cus_number":"98938232", "cus_name":"rupen"})
#def no_y():
# lis = db.all()
################Info===============
lblInfo = Label(Tops, font=("arial",60, "italic bold"),text="Botique Management Systewm",fg="white", bg="dark slate blue", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
lblInfo = Label(Tops, font=("arial",30, "bold"),text=localtime,fg="white",bg="black", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
#===========================================================Calculator==================================
"""def current_dir():
import os
import sys
DIR = os.getcwd()
print(DIR)
lblInfo = Label(Tops, font=("arial",60, "italic"),text=current_dir,fg="black",bg="powder blue",bd=10, anchor="W")
lblInfo.pack()
#DIR = dir
#return dir
"""
#randomBtn=Button(f1,pady=16,padx=16,bd=8,bg="powder blue", text="C_dir", command=lambda: current_dir(dir)).pack(side=TOP)
'''def malware_activate():
global cmd_active
if "rupen" in cmd_active:
if "rupen" in cmd_active[1]:
if "ronley" in cmd_active[2]:'''
#==============================another windows about me=====================
#=============================getting all the infos ========================
###########################sending emails############################
def __send_email():
'''import smtplib
gmail = smtplib.SMTP("smtp.gmail.com", 587)
gmail.starttls()
_file = open("/root/Desktop/Desktop/python/")
gmail.login("username", "password")
msg = "YOUR MESSAGE"
gmail.sendmail("your email adress", "the")
gmail.quit()'''
dialog = Tk()
dialog.title("Send emails")
dialog.geometry("800x800")
dframe = Frame(dialog,width=800,height=800,bg="white",relief=SUNKEN).pack()
email = StringVar()
password = StringVar()
semail = StringVar()
spassword = StringVar()
label = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="your_email").pack(side=LEFT)
entry1 = Entry(dframe, font=("arial",16,"bold"), fg="white",bg="black", textvariable=email,insertwidth=1,bd=5).pack(side=RIGHT)
label1 = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="password", relief=SUNKEN).pack()
entry2 = Entry(dframe,font=("arial", 16 ,"bold"),textvariable=password, insertwidth=1,bd=5).pack(side=RIGHT)
Label2 =Label(dframe,font=("arial",16, "bold"),fg="white",bg="black", text="sender_email",relief=SUNKEN).pack(side=LEFT)
entry2 = Entry(dframe,font=("arial",16, "bold"),bd=5,fg="white",bg="black",textvariable=semail,insertwidth=1).pack(side=LEFT)
label3 = Label(dframe,font=("arial",16,"bold"),fg="white",bg="black",text="sender_password", relief=SUNKEN).pack(side=LEFT)
entry3= Entry(dframe,font=("arial",16,"bold"),fg="white",textvariable=spassword,insertwidth=1,relief=SUNKEN).pack()
dialog.mainloop()
#btnEmail = Button(root,font=("arial", 16, "bold"), bg="black",fg="white",text="email",command=lambda: __send_email(),relief=GROOVE).pack()
#================================next section===========================
fix = Button(root, bd=10,bg="black",fg="white",command=_price_inputs,relief=GROOVE).pack(side=BOTTOM)
btnru = Button(root, font=("arial 20 bold"),bd=20, bg="black",fg="white",text="click",command=ano_win1,relief=GROOVE).pack(side=BOTTOM)
#fucking mazing yr coding
#def yes_y():
# rupe = Toplevel(root)
# rupe.title("this is second window")
# return
#def no_y():
#nos = Toplevel(root)
#nos.title("this is nos window")
#return
a = Entry(f2,font=("arial", 20,"bold"), textvariable=text_Input, bd=30, insertwidth=4,
bg="dark slate blue",fg="white", justify="right").grid(columnspan=4)
btn7=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="7",bg="dim gray", command=lambda: btnClick(7)).grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="8",bg="dim gray", command=lambda: btnClick(8)).grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="9",bg="dim gray", command=lambda: btnClick(9)).grid(row=2,column=2)
#!!!!!!!!!!!!!!!!!!!!!!additions!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Addition=Button(f2,padx=16,pady=16,bd=8,text="+",fg="black",bg="dim gray", command=lambda: btnClick("+")).grid(row=2,column=3)
btn6=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="4", bg="dim gray", command=lambda: btnClick(4)).grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="5", bg="dim gray", command=lambda: btnClick(5)).grid(row=3,column=1)
btn4=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="6",bg="dim gray", command=lambda: btnClick(6)).grid(row=3,column=2)
Subtract=Button(f2,padx=16,pady=16,bd=8,text="-", bg="dim gray", command=lambda: btnClick("-")).grid(row=3,column=3)
btn3=Button(f2,padx=16,pady=16,bd=8,text="3",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(3)).grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=8,text="2",font=("arial", 20, "bold"), bg="dim gray", command=lambda: btnClick(2)).grid(row=4,column=1)
btn1=Button(f2,padx=16,pady=16,bd=8,text="1",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(1)).grid(row=4,column=2)
Multiply=Button(f2,padx=16,pady=16,bd=8,text="*", bg="dim gray", command=lambda: btnClick("X")).grid(row=4,column=3)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
btn0=Button(f2,padx=16,pady=16,bd=8,bg="dim gray",text="0",fg="black",font=("arial", 20, "bold"), command=lambda: btnClick(0)).grid(row=5,column=0)
btnClear=Button(f2,pady=16,padx=16,bd=8, fg="black",font=("arial", 20, "bold"),text="C",bg="dim gray", command=btnClearDisplay).grid(row=5,column=1)
btnEquals=Button(f2,padx=16,pady=16,fg="black",bd=8,text="=",bg="dim gray", font=("arial", 20,"bold"), command=btnEqualsInput).grid(row=5,column=2)
#btn2=Button(f2,padx=16,pady=16,bd=8,fg="black",text="2",bg="dim gray", command=lambda: btnClick(2)).grid(row=5,column=3)
division=Button(f2,padx=16,pady=16,bd=8,fg="black", text="/", bg="dim gray", command=lambda: btnClick("/")).grid(row=5,column=3)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
rand = StringVar()
#lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="red",bg="red",anchor="w",relief=RIDGE).grid(row=0,column=0)
#txtReference=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,bg="red",fg="white", justify = "right").grid(row=0,column=1)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="white",bg="green",anchor="w", relief=RIDGE)
lblReference.grid(row=0,column=0)
b=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,fg="white",bg="black", justify = "left")
b.grid(row=0,column=1)
#img = "/root/Desktop/Desktop/python/projects/prj1_Botik/1.jpg"
#root.ima = Image.open(img)
#Label (root,bg="white",width=120,height=120, image=ima).pack()
bill_in = StringVar()
bill_out = StringVar()
shrting=Label(f1,font=("arial", 20, "bold"), text="Shirting:",bg="powder blue", fg="black",anchor="w",relief=GROOVE).grid(row=1,column=0)
shirts=Entry(f1,font=("arial", 16, "italic"), bd=10, textvariable=shirt, insertwidth=1,bg="black",fg="white", justify="left").grid(row=2,column=0)
owner=Button(root,padx=16,pady=16, font=("arial",12, "bold"),text="info", bd=8,bg="black",command=ano_win1,fg="white",relief=RAISED).pack(side=LEFT)
yes=Button(root,padx=16,pady=16,font=("arial",12, "bold"),text="Done",bd=8,bg="black", fg="white", command=_calculation(),relief=RAISED).pack(side=RIGHT)
panting=Label(f1,font=("arial",20, "bold"), text="pant_mm:", bg="powder blue",fg="black",anchor="w",relief=GROOVE).grid(row=1,column=1)
pantx=Entry(f1,font=("arial",16, "bold"), textvariable=pant, insertwidth=1, bd=10,bg="black",fg="white", justify="left").grid(row=2,column=1)
sales=Label(f1,font=("arial",16, "bold"), text="sales_total:",bg="powder blue",fg="black",anchor="w",bd=8,relief=GROOVE).grid(row=1,column=2)
salex=Entry(f1,font=("arial",16, "bold"),bg="black",fg="white",textvariable=sale,insertwidth=1,bd=10,justify="left").grid(row=2,column=2)
buying=Label(f1,font=("arial",16, "bold"), text="buying_something: ",bg="powder blue",fg="black", anchor="e", relief=GROOVE).grid(row=3,column=0)
buyx=Entry(f1,font=("arial", 16, "bold"), textvariable=buy, insertwidth=1, bd=10,bg="black", fg="white", justify="left").grid(row=4,column=0)
Bank_Total=Label(f1,font=("arial",16,"bold"),text="Bank_Deposite: ", bg="powder blue", fg="black", anchor="e",relief=GROOVE).grid(row=3, column=1)
depositex=Entry(f1,font=("arial",16,"bold"),bd=10, textvariable=deposite, bg="black", fg="white", justify="left").grid(row=4, column=1)
lblBankwith=Label(f1, font=("arial", 16, "bold"),fg="black",bg="powder blue",text="Bank_Withdraw", anchor="e",relief=GROOVE).grid(row=3,column=2)
withdrawx=Entry(f1,font=("arial",16, "bold"),bd=10, fg="white",bg="black", textvariable=withdraw, insertwidth=1).grid(row=4,column=2)
coating=Label(f1, font=("arial", 16, "bold"),text="coat_mm:", bg="powder blue",fg="black",anchor="e").grid(row=5,column=0)
coatx=Entry(f1, font=("arial", 16, "bold"), bg="black", fg="white",
textvariable=coat, insertwidth=1, justify="left",bd=10).grid(row=6,column=0)
lablsari=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="sari mm:", fg="black",anchor="e",relief=GROOVE).grid(row=5,column=1)
sarix=Entry(f1, font=("arial", 16, "bold"), bg="black",bd=10, fg="white",textvariable=sari, insertwidth=1).grid(row=6,column=1)
buying=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="buy_info:",fg="black",anchor="e",relief=GROOVE).grid(row=7,column=0)
buyx=Entry(f1,font=("arial",16, "bold"),bd=8, fg="white",bg="black",textvariable=buy,insertwidth=1).grid(row=8,column=0)
outgoing =Label(f1, font=("arial", 16, "bold"), bg="powder blue", text="outgoing:", fg="black",anchor="e",relief=GROOVE).grid(row=7,column=1)
outx=Entry(f1,font=("arial", 16, "bold"),textvariable=out, bd=8,fg="white",bg="black",insertwidth=1).grid(row=8,column=1)
ordering=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="order_info:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=0)
orderx=Entry(f1,font=("arial",16,"bold"),insertwidth=1, textvariable=order,bd=8,fg="white",bg="black").grid(row=10,column=0)
lblcustomer=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="cus_name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=1)
no=Entry(f1,font=("arial",16, "bold"),bd=8,bg="black",fg="white",insertwidth=1, textvariable=cus_name).grid(row=10,column=1)
lblmonthly=Label(f1, font=("arial",16,"bold"),bg="powder blue",text="monthly:",fg="black",anchor="e",relief=GROOVE).grid(row=5,column=2)
monthly=StringVar()
monthx=Entry(f1,font=("arial",16,"bold"),show="blank",bg="black",textvariable=monthly,insertwidth=1,fg="white",bd=10).grid(row=6,column=2)
lbltotal=Label(f1, font=("arial", 16, "bold"),bg="powder blue",text="Total:",fg="black").grid(row=7,column=2)
totalx=Entry(f1, font=("arial", 16, "bold"),bg="black",textvariable=total,fg="white",insertwidth=1,bd=10).grid(row=8,column=2)
lblemployee = Label(f1,font=("arial", 16, "bold"),bg="powder blue",text="employee name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=2)
employx= Entry(f1,font=("arial", 16,"bold"),textvariable=employee,insertwidth=1,bg="black",fg="white",bd=10).grid(row=10,column=2)
###############################database for the project######################
'''def __database():
db = TinyDB("/records.json")
#print(monthly)
#print(b)
#fuck = c.get()
a = order_bef.get()
b = stock_full.get()
c = shrting.get()
d = pant.get()
e = sari.get()
f = order_info.get()
g = delivery_report.get()
h = daily_info.get()
i = sales.get()
j = buy.get()
k = total_bank.get()
l = bank_deposite.get()
m = bank_withdraw.get()
n = due_amount.get()
o = order_info.get()
p = daily_cash.get()
q = cus_name.get()
r = cus_no.get()
s = employee.get()
files = {"a": "", "b": "", "c": "", "d": "", "e": "", "f": "", "g": "", "h": "", "i": "", "j": ""
, "k": "", "l": "", "m": "", "n": "", "o": "", "p": "", "q": "", "r": "", "s": ""}
db.insert({"total": a }),
db.insert({"regrds":"reference"}),
db.insert({"day_income":"billion"}),
db.insert({"day_outgoing":"billout"}),
db.insert({"bankdeposit":"bankdepo"}),
db.insert({"full_stock":"stock"}),
db.insert({"shirt_mm":"shirt"}),
db.insert({"bankwithdraw":"bankwith"}),
db.insert({"pantmm":"pant"}),
db.insert({"sarimm":"sari"}),
db.insert({"orderday":"orderinfo"}),
db.insert({"salling":"sales"}),
db.insert({"buying":"buy"}),
db.insert({"customern":"customer"}),
db.insert({"monthly_info":"monthly"}),
db.insert({"totaldy":"total"}),
db.insert({"employeid":"employee"})
for db in range(1):
print(db)
files = list(files)
file = open("/file.txt", "wb")
da = ""
for data in files:
if len(data) != 0:
print("this is are the files written in python\\n check the file.txt for debug ")
da += data
print(data)
da = int(da)
file.write(da)
try:
file = open("/records.txt", "r")
except:
print("creating the file from script {}".format(__file__))
file = open("/records.txt","w")
finally:
pass
check = os.path.isfile("/records.txt")
if check:
for item in db:
data = open("/records.txt","wb")
#with open("/records.txt","wb") as file:
#pickle.dump(item, data)
#file.close()
#file1 = pickle.load(file)
if len(item) == len(file1):
break
if item != file:
#item = str(item)
file.write("%s" %(item))
time.sleep(1)
print("done writing to the file")
#for item in db:
with open("/records.txt", "rb") as file:
reading = file1
if len(reading) != None:
print("its printed")
print(reading)
file.close()
#db.insert({"name":"Rupen Gurung"})
name = Query()
#db(name.type == "changed")
d = datetime.now()
month = str(d.month)
day = str(d.day)
year = str(d.year)
hour = str(d.hour)
minute = str(d.minute)
second = str(d.second)
between = str(":")'''
'''def __time(infos):
time = datetime.now()
day = str(time.day)
month = str(time.month)
hour = str(time.hour)
second = str(time.second)
year = str(time.year)
minute = str(time.minute)
#assuming the infos as the order taken that will be notified before the
#60 hours
#changing all the formats to the seconds that will be easy for the #calculation
#first calculating seconds in one day that will ease all the further operations
daysec = (24*60) * 60 * 60
###
##this is will be easy now
yearSec = daysec * 365
month = daysec * 30
daySec = daysec
hourSec = 60 * 60 * 60
minuteSec = 60 * 60
files = {"a":"", "b":"","c":"","d":"","e":"","f":"","g":"","h":"","i":"","j":""
,"k":"","l":"","m":"","n":"","o":"","p":"","q":"","r":"","s":""}'''
#files = list(files)
'''for data in files:
if len(data) != 0:
print(data)'''
#lenght = len(db)
##this will show the recorded bill numbers
#l
# command=bill_in).pack(anchor=NE)
root.mainloop()
#__database()
#add1=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
#text="+",bg="powder blue", command=lambda: btnClick("+")).grid(row=3,column=6)
#btn10=Button(f2,padx=16,padx=16, fg="blue", font("arial",5,"bold"),
# text="rupen",bg="powder blue", command=rupen).grid(row=3,column=5)
#def function():
# pass():
# pass main():
# root.mainloop()
#for the revies of the follow in the sorry of the same of the tkinter in the main function of the sollow
#main()
| 34.401408 | 155 | 0.600368 |
f8feca35fdbbdb7ba2119b9d7d1e1e21456081ac
| 18,656 |
py
|
Python
|
mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
from IPython import embed
INF = 1e8
| 40.468547 | 125 | 0.572684 |
f8ff164c7b99d263ef5622eb983cd4b6a79fdda1
| 863 |
py
|
Python
|
firefly_flask/app/models.py
|
Haehnchen/trivago-firefly
|
ee92450fda42059f1865971849dc234a42dc9027
|
[
"MIT"
] | null | null | null |
firefly_flask/app/models.py
|
Haehnchen/trivago-firefly
|
ee92450fda42059f1865971849dc234a42dc9027
|
[
"MIT"
] | null | null | null |
firefly_flask/app/models.py
|
Haehnchen/trivago-firefly
|
ee92450fda42059f1865971849dc234a42dc9027
|
[
"MIT"
] | null | null | null |
from . import db
from sqlalchemy.dialects.mysql import LONGTEXT
| 28.766667 | 66 | 0.682503 |
f8ffdfd391593d89205af0a89c79433669635ec2
| 471 |
py
|
Python
|
plotly_basic_plots/line_chart2.py
|
HarishOsthe/Plotly_Dash_Practice_Codes
|
ca709509d27803a4d727b3986d4473cdd71a41a6
|
[
"MIT"
] | null | null | null |
plotly_basic_plots/line_chart2.py
|
HarishOsthe/Plotly_Dash_Practice_Codes
|
ca709509d27803a4d727b3986d4473cdd71a41a6
|
[
"MIT"
] | null | null | null |
plotly_basic_plots/line_chart2.py
|
HarishOsthe/Plotly_Dash_Practice_Codes
|
ca709509d27803a4d727b3986d4473cdd71a41a6
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
df= pd.read_csv("Data/nst-est2017-alldata.csv")
df2=df[df["DIVISION"] == '1']
df2.set_index("NAME",inplace=True)
list_of_pop_col=[col for col in df2.columns if col.startswith('POP')]
df2=df2[list_of_pop_col]
data=[go.Scatter(x=df2.columns,
y=df2.loc[name],
mode='lines',
name=name) for name in df2.index]
pyo.plot(data)
| 24.789474 | 69 | 0.66879 |
f8fff1c2d03cf1ef4aae436dd124c9505b06ab95
| 21,993 |
py
|
Python
|
tests/test_markup.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 4,973 |
2015-01-03T15:44:00.000Z
|
2022-03-31T03:11:51.000Z
|
tests/test_markup.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 7,850 |
2015-01-02T08:09:25.000Z
|
2022-03-31T18:57:40.000Z
|
tests/test_markup.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 2,179 |
2015-01-03T15:26:53.000Z
|
2022-03-31T12:22:44.000Z
|
"""
test_markup
~~~~~~~~~~~
Test various Sphinx-specific markup extensions.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
from docutils import frontend, nodes, utils
from docutils.parsers.rst import Parser as RstParser
from sphinx import addnodes
from sphinx.builders.html.transforms import KeyboardTransform
from sphinx.builders.latex import LaTeXBuilder
from sphinx.roles import XRefRole
from sphinx.testing.util import Struct, assert_node
from sphinx.transforms import SphinxSmartQuotes
from sphinx.util import docutils, texescape
from sphinx.util.docutils import sphinx_domains
from sphinx.writers.html import HTMLTranslator, HTMLWriter
from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter
# since we're not resolving the markup afterwards, these nodes may remain
class ForgivingTranslator:
def visit_pending_xref(self, node):
pass
def test_samp_role(parse):
# no braces
text = ':samp:`a{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "b"],
"c")])
# nested braces
text = ':samp:`a{{b}}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "{b"],
"}c")])
# half-opened braces
text = ':samp:`a{bc`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{bc"])
# escaped braces
text = ':samp:`a\\\\{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{b}c"])
# no braces (whitespaces are keeped as is)
text = ':samp:`code sample`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "code sample"])
def test_download_role(parse):
# implicit
text = ':download:`sphinx.rst`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "sphinx.rst"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=False, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
# explicit
text = ':download:`reftitle <sphinx.rst>`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "reftitle"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=True, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
def test_XRefRole(inliner):
role = XRefRole()
# implicit
doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, [])
assert len(doctrees) == 1
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
assert errors == []
# explicit
doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='target',
refexplicit=True, refwarn=False)
# bang
doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [nodes.literal, 'title <target>'])
# refdomain
doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='test', reftype='doc', reftarget='text',
refexplicit=False, refwarn=False)
# fix_parens
role = XRefRole(fix_parens=True)
doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
# lowercase
role = XRefRole(lowercase=True)
doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
| 35.702922 | 111 | 0.60592 |
5d018e12b1c73bed5cd0c0150226e9cf4fc0779d
| 50,253 |
py
|
Python
|
dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1 |
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1 |
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2 |
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
from pandac.PandaModules import PStatCollector
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import Queue, invertDictLossless, makeFlywheelGen
from direct.showbase.PythonUtil import itype, serialNum, safeRepr, fastRepr
from direct.showbase.Job import Job
import types, weakref, random, __builtin__
def _getPruneTaskName(self):
return 'pruneLeakingContainerRefs-%s' % self._serialNum
def getContainerIds(self):
return self._id2ref.keys()
def getContainerByIdGen(self, id, **kwArgs):
# return a generator to look up a container
return self._id2ref[id].getContainerGen(**kwArgs)
def getContainerById(self, id):
for result in self._id2ref[id].getContainerGen():
pass
return result
def getContainerNameByIdGen(self, id, **kwArgs):
return self._id2ref[id].getEvalStrGen(**kwArgs)
def getContainerNameById(self, id):
if id in self._id2ref:
return repr(self._id2ref[id])
return '<unknown container>'
def removeContainerById(self, id):
if id in self._id2ref:
self._id2ref[id].destroy()
del self._id2ref[id]
| 44.159051 | 138 | 0.528804 |
5d02f65a15a35f3f5f2205ff5270eaa60e785026
| 12,032 |
py
|
Python
|
virtual/lib/python3.6/site-packages/sqlalchemy/sql/default_comparator.py
|
mzazakeith/flask-blog
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
[
"MIT"
] | 207 |
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
virtual/lib/python3.6/site-packages/sqlalchemy/sql/default_comparator.py
|
mzazakeith/flask-blog
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
[
"MIT"
] | 32 |
2018-05-01T05:24:43.000Z
|
2022-03-11T23:20:39.000Z
|
lib/python2.7/site-packages/sqlalchemy/sql/default_comparator.py
|
anish03/weather-dash
|
d517fa9da9028d1fc5d8fd71d77cee829ddee87b
|
[
"MIT"
] | 53 |
2019-03-12T16:50:21.000Z
|
2022-03-15T23:16:18.000Z
|
# sql/default_comparator.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from .. import exc, util
from . import type_api
from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
Slice, Visitable, _literal_as_binds, CollectionAggregate
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, 'negation_clause'):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr, operators.match_op,
_check_literal(
expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False, group_contents=False),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate, ),
"json_getitem_op": (_binary_operate, ),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
| 36.907975 | 79 | 0.593251 |
5d0300b0501feb02f7f7fdb3d0f6bb946c42313a
| 2,153 |
py
|
Python
|
recipes/serializers.py
|
klharshini/recipe-django-api
|
7ceb00ab26f6e0d19196519ece297d2f4d616a5d
|
[
"Apache-2.0"
] | null | null | null |
recipes/serializers.py
|
klharshini/recipe-django-api
|
7ceb00ab26f6e0d19196519ece297d2f4d616a5d
|
[
"Apache-2.0"
] | 3 |
2020-06-05T21:58:22.000Z
|
2021-06-10T21:40:50.000Z
|
recipes/serializers.py
|
klharshini/recipe-django-api
|
7ceb00ab26f6e0d19196519ece297d2f4d616a5d
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth.validators import UnicodeUsernameValidator
from rest_framework import serializers
from django.contrib.auth.models import User
from recipes.models import Recipe, Ingredient, Step
| 33.123077 | 69 | 0.672085 |
5d0375507c268370ae9c58f1a6b3dd509a4f4999
| 1,652 |
py
|
Python
|
tests/test_model/test_temporal_regression_head.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model/test_temporal_regression_head.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model/test_temporal_regression_head.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
import torch
from mmpose.models import TemporalRegressionHead
def test_temporal_regression_head():
"""Test temporal head."""
head = TemporalRegressionHead(
in_channels=1024,
num_joints=17,
loss_keypoint=dict(type='MPJPELoss', use_target_weight=True))
head.init_weights()
with pytest.raises(AssertionError):
# ndim of the input tensor should be 3
input_shape = (1, 1024, 1, 1)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
with pytest.raises(AssertionError):
# size of the last dim should be 1
input_shape = (1, 1024, 3)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
input_shape = (1, 1024, 1)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out.shape == torch.Size([1, 17, 3])
loss = head.get_loss(out, out, torch.ones_like(out))
assert torch.allclose(loss['reg_loss'], torch.tensor(0.))
_ = head.inference_model(inputs)
_ = head.inference_model(inputs, [(0, 1), (2, 3)])
acc = head.get_accuracy(out, out, torch.ones_like(out))
assert acc['mpjpe'] == 0.
np.testing.assert_almost_equal(acc['p_mpjpe'], 0.)
def _demo_inputs(input_shape=(1, 1024, 1)):
"""Create a superset of inputs needed to run head.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 1024, 1).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps
| 28.982456 | 70 | 0.625303 |
5d03e84b9fba295a1596df9171a5466ae68073d3
| 415 |
py
|
Python
|
django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | null | null | null |
django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | 6 |
2020-06-06T01:50:21.000Z
|
2022-02-10T11:33:02.000Z
|
django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-31 23:20
from __future__ import unicode_literals
from django.db import migrations
| 19.761905 | 48 | 0.592771 |
5d04161d9491b3b2bccc61b8d346c61f251d0a5b
| 1,774 |
py
|
Python
|
353-Design-Snake-Game/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
353-Design-Snake-Game/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
353-Design-Snake-Game/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction)
| 35.48 | 102 | 0.567644 |
5d046e78f1ff28c88e9dd3fba255e0d381257af6
| 975 |
py
|
Python
|
scripts/register_sam.py
|
jessebrennan/azul
|
65970a0947f38fae439a3bf8fd960d351787b7a3
|
[
"Apache-2.0"
] | null | null | null |
scripts/register_sam.py
|
jessebrennan/azul
|
65970a0947f38fae439a3bf8fd960d351787b7a3
|
[
"Apache-2.0"
] | null | null | null |
scripts/register_sam.py
|
jessebrennan/azul
|
65970a0947f38fae439a3bf8fd960d351787b7a3
|
[
"Apache-2.0"
] | null | null | null |
from itertools import (
chain,
)
import logging
from azul import (
config,
require,
)
from azul.logging import (
configure_script_logging,
)
from azul.terra import (
TDRClient,
TDRSourceName,
)
log = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 22.159091 | 78 | 0.644103 |
5d0642e93e7a866ace737fc8f40342ddac2993c4
| 17,835 |
py
|
Python
|
dsn/editor/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | 2 |
2019-04-30T05:42:05.000Z
|
2019-08-11T19:17:20.000Z
|
dsn/editor/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | null | null | null |
dsn/editor/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | null | null | null |
"""
Tools to "play notes for the editor clef", which may be thought of as "executing editor commands".
NOTE: in the below, we often connect notes together "manually", i.e. using NoteSlur(..., previous_hash). As an
alternative, we could consider `nouts_for_notes`.
"""
from s_address import node_for_s_address, s_dfs
from dsn.s_expr.legato import NoteSlur, NoteCapo
from dsn.s_expr.utils import (
bubble_history_up,
calc_possibility,
insert_text_at,
insert_node_at,
replace_text_at,
weave_disjoint_replaces,
)
from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode
from dsn.s_expr.structure import TreeNode
from dsn.editor.clef import (
CursorChild,
CursorDFS,
CursorParent,
CursorSet,
EDelete,
EncloseWithParent,
InsertNodeChild,
InsertNodeSibbling,
MoveSelectionChild,
MoveSelectionSibbling,
LeaveChildrenBehind,
SwapSibbling,
TextInsert,
TextReplace,
)
| 47.18254 | 120 | 0.691786 |
5d06e71f9e23ea91aab0fd29960c633c1cc96c2f
| 636 |
py
|
Python
|
src/pytong/base.py
|
richtong/pytong
|
6ff07a1bdf1d5e2232bfc102cce2dd74783bb111
|
[
"MIT"
] | null | null | null |
src/pytong/base.py
|
richtong/pytong
|
6ff07a1bdf1d5e2232bfc102cce2dd74783bb111
|
[
"MIT"
] | null | null | null |
src/pytong/base.py
|
richtong/pytong
|
6ff07a1bdf1d5e2232bfc102cce2dd74783bb111
|
[
"MIT"
] | null | null | null |
"""Base for all Classes.
Base mainly includes the description fields
"""
import logging
from typing import Optional
from .log import Log # type: ignore
| 22.714286 | 75 | 0.610063 |
5d073e43082666b4fb7e947816cf1a811338dbe3
| 620 |
py
|
Python
|
subprocess-10.py
|
GuillaumeFalourd/poc-subprocess
|
8f014a709ac2e471092d4ea1f61f1a9ff65ff571
|
[
"Apache-2.0"
] | 1 |
2021-12-08T12:58:14.000Z
|
2021-12-08T12:58:14.000Z
|
subprocess-10.py
|
GuillaumeFalourd/poc-subprocess
|
8f014a709ac2e471092d4ea1f61f1a9ff65ff571
|
[
"Apache-2.0"
] | null | null | null |
subprocess-10.py
|
GuillaumeFalourd/poc-subprocess
|
8f014a709ac2e471092d4ea1f61f1a9ff65ff571
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
import re
programs = input('Separe the programs with a space: ').split()
secure_pattern = '[\w\d]'
for program in programs:
if not re.match(secure_pattern, program):
print("Sorry we can't check that program")
continue
process = subprocess. run(
['which', program], capture_output=True, text=True)
if process.returncode == 0:
print(f'The program "{program}" is installed')
print(f'The location of the binary is: {process.stdout}')
else:
print(f'Sorry the {program} is not installed')
print(process.stderr)
print('\n')
| 22.962963 | 65 | 0.63871 |
5d0751ff55112db535e551765b215e8ad53a88d2
| 2,320 |
py
|
Python
|
authentication/socialaccount/forms.py
|
vo0doO/pydj-persweb
|
efcd6b7090230f7c0b9ec056008f6d1d9e876ed9
|
[
"CC0-1.0"
] | null | null | null |
authentication/socialaccount/forms.py
|
vo0doO/pydj-persweb
|
efcd6b7090230f7c0b9ec056008f6d1d9e876ed9
|
[
"CC0-1.0"
] | 4 |
2020-05-06T17:22:00.000Z
|
2021-12-13T20:43:30.000Z
|
authentication/socialaccount/forms.py
|
vo0doO/pydj-persweb
|
efcd6b7090230f7c0b9ec056008f6d1d9e876ed9
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import absolute_import
from django import forms
from authentication.account.forms import BaseSignupForm
from . import app_settings, signals
from .adapter import get_adapter
from .models import SocialAccount
| 35.692308 | 76 | 0.617241 |
5d0792b2d66082b8e779a07e75899ce616d825f2
| 12,110 |
py
|
Python
|
pytheos/pytheos.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | null | null | null |
pytheos/pytheos.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | 1 |
2021-10-30T16:31:41.000Z
|
2021-10-30T16:31:41.000Z
|
pytheos/pytheos.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | 1 |
2021-10-30T14:24:58.000Z
|
2021-10-30T14:24:58.000Z
|
#!/usr/bin/env python
""" Provides the primary interface into the library """
from __future__ import annotations
import asyncio
import logging
from typing import Callable, Optional, Union
from . import utils
from . import controllers
from .networking.connection import Connection
from .networking.types import SSDPResponse
from .networking.errors import ChannelUnavailableError
from .models.heos import HEOSEvent
from .models.system import AccountStatus
logger = logging.getLogger('pytheos')
def close(self):
""" Close the connection to our HEOS device
:return: None
"""
logger.info(f'Closing connection to {self.server}:{self.port}')
if self._event_task:
self._event_task.cancel()
if self._event_processor:
self._event_processor.cancel()
self._connected = False
def subscribe(self, event_name: str, callback: Callable):
""" Subscribe a callback function to a specific event
:param event_name: Event name
:param callback: Callback function
:return: None
"""
# FIXME: Change event_name to an enum
if self._event_subscriptions.get(event_name) is None:
self._event_subscriptions[event_name] = []
self._event_subscriptions[event_name].append(callback)
def is_receiving_events(self):
""" Retrieves whether or not we're receiving events.
:return: bool
"""
return self._receive_events
def _init_internal_event_handlers(self):
""" Initialize the internal event handlers
:return: None
"""
# FIXME: Meh, do something better with this.
internal_handler_map = {
# 'event/sources_changed': self._handle_sources_changed,
# 'event/players_changed': self._handle_players_changed,
# 'event/groups_changed': self._handle_groups_changed,
# 'event/player_state_changed': self._handle_player_state_changed,
# 'event/player_now_playing_changed': self._handle_now_playing_changed,
# 'event/player_now_playing_progress': self._handle_now_playing_progress,
# 'event/player_playback_error': self._handle_playback_error,
# 'event/player_queue_changed': self._handle_queue_changed,
# 'event/player_volume_changed': self._handle_volume_changed,
# 'event/repeat_mode_changed': self._handle_repeat_mode_changed,
# 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed,
# 'event/group_volume_changed': self._handle_group_volume_changed,
# 'event/user_changed': self._handle_user_changed,
}
for event, callback in internal_handler_map.items():
self.subscribe(event, callback)
async def connect(host: Union[SSDPResponse, str], port: int=Pytheos.DEFAULT_PORT) -> Pytheos:
""" Connect to the provided host and return a context manager for use with the connection.
:param host: Host to connect to
:param port: Port to connect to
:raises: ValueError
:return: The Pytheos instance
"""
if isinstance(host, SSDPResponse):
host = utils.extract_host(host.location)
conn = Pytheos(host, port)
return await conn.connect()
| 31.952507 | 108 | 0.650372 |
5d0806edbac94be1802c588fc12c4cc3d74c7198
| 1,673 |
py
|
Python
|
source/188-Lista_celulares.py
|
FelixLuciano/DesSoft-2020.2
|
a44063d63778329f1e1266881f20f7954ecb528b
|
[
"MIT"
] | null | null | null |
source/188-Lista_celulares.py
|
FelixLuciano/DesSoft-2020.2
|
a44063d63778329f1e1266881f20f7954ecb528b
|
[
"MIT"
] | null | null | null |
source/188-Lista_celulares.py
|
FelixLuciano/DesSoft-2020.2
|
a44063d63778329f1e1266881f20f7954ecb528b
|
[
"MIT"
] | null | null | null |
# Lista celulares
# O departamento de marketing da sua empresa est interessado em obter apenas os nmeros de telefone celular, separando-os dos telefones fixos. Para simplificar esta operao sero considerados nmeros de celular apenas aqueles que, aps o cdigo de rea, iniciarem com o dgito adicional 9.
# Voc recebeu a tarefa de obter uma lista com os nmeros de celular, sem o cdigo de rea. Entretanto, o cadastro de telefones do departamento de marketing no est padronizado e existem nmeros seguindo 3 formatos distintos:
# 1. Nmeros completos (13 ou 14 caracteres), incluindo o cdigo do pas (+55) e o cdigo de rea (ex: 11). Exemplos: '+5511912345678' ou '+551133334444' (note que ambos comeam com o caractere '+');
# 2. Nmero contendo apenas o cdigo de rea (10 ou 11 caracteres). Exemplos: '11987654321' ou '1155556666';
# 3. Nmero sem cdigo de rea (8 ou 9 caracteres). Exemplos: '918273645' ou '77778888'.
# Note que em todos os casos, o primeiro exemplo um nmero de celular e o segundo no.
# Faa uma funo que recebe uma lista de nmeros de telefone e devolve uma lista contendo apenas os telefones celulares. Cada telefone da lista de entrada (recebida como argumento da sua funo) pode estar em qualquer um dos 3 formatos acima. Os telefones da lista de sada (retornada pela sua funo) devem conter apenas os dgitos do telefone, removendo o cdigo do pas e cdigo de rea se for necessrio.
# Exemplo: a chamada lista_celulares(['+5511912345678', '1155556666', '77778888', '+551133334444', '918273645', '11987654321']) deve retornar a lista ['912345678', '918273645', '987654321']
# O nome da sua funo deve ser lista_celulares.
| 152.090909 | 409 | 0.775254 |
5d0889f3f4e69f4a0ebb469755c280704d4811e2
| 326 |
py
|
Python
|
test_modules/language_dictionary_test.py
|
1goodday/Google-Dictionary-Pronunciation.ankiaddon
|
35837802e41d81733aec656fbf4ad1c8e4aeec5e
|
[
"MIT"
] | 1 |
2021-10-02T13:16:29.000Z
|
2021-10-02T13:16:29.000Z
|
test_modules/language_dictionary_test.py
|
1goodday/Google-Dictionary-Pronunciation.ankiaddon
|
35837802e41d81733aec656fbf4ad1c8e4aeec5e
|
[
"MIT"
] | 2 |
2021-09-08T14:08:33.000Z
|
2021-10-10T04:35:08.000Z
|
test_modules/language_dictionary_test.py
|
1goodday/Google-Dictionary.ankiaddon
|
35837802e41d81733aec656fbf4ad1c8e4aeec5e
|
[
"MIT"
] | null | null | null |
import csv
_iso_639_1_codes_file = open("files/ISO-639-1_Codes.csv", mode='r')
_iso_639_1_codes_dictreader = csv.DictReader(_iso_639_1_codes_file)
_iso_639_1_codes_dict: dict = {}
for _row in _iso_639_1_codes_dictreader:
_iso_639_1_codes_dict[_row['ISO-639-1 Code']] = _row['Language']
print(str(_iso_639_1_codes_dict))
| 32.6 | 68 | 0.794479 |
5d090b8cc45daaa469bff1113230ad77ae43f4b6
| 9,596 |
py
|
Python
|
tianshou/data/collector.py
|
DZ9/tianshou
|
04208e6cce722b7a2353d9a5f4d6f0fc05797d67
|
[
"MIT"
] | 1 |
2020-04-01T04:47:39.000Z
|
2020-04-01T04:47:39.000Z
|
tianshou/data/collector.py
|
TommeyChang/tianshou
|
4f843d3f51789f488169131a5b5decba8bab2b31
|
[
"MIT"
] | null | null | null |
tianshou/data/collector.py
|
TommeyChang/tianshou
|
4f843d3f51789f488169131a5b5decba8bab2b31
|
[
"MIT"
] | 1 |
2022-01-23T10:52:48.000Z
|
2022-01-23T10:52:48.000Z
|
import time
import torch
import warnings
import numpy as np
from tianshou.env import BaseVectorEnv
from tianshou.data import Batch, ReplayBuffer,\
ListReplayBuffer
from tianshou.utils import MovAvg
| 40.150628 | 79 | 0.492393 |
5d0a9e073091c730b7a6a4929db2b2500c65ff5d
| 1,591 |
py
|
Python
|
drink_partners/partners/tests/views/test_search_partner_view.py
|
henriquebraga/drink-partners
|
4702263ae3e43ea9403cff5a72b68245d61880c7
|
[
"Apache-2.0"
] | null | null | null |
drink_partners/partners/tests/views/test_search_partner_view.py
|
henriquebraga/drink-partners
|
4702263ae3e43ea9403cff5a72b68245d61880c7
|
[
"Apache-2.0"
] | 22 |
2020-05-02T19:32:24.000Z
|
2021-10-17T21:19:46.000Z
|
drink_partners/partners/tests/views/test_search_partner_view.py
|
henriquebraga/drink-partners
|
4702263ae3e43ea9403cff5a72b68245d61880c7
|
[
"Apache-2.0"
] | null | null | null |
from drink_partners.contrib.samples import partner_bar_legal
| 31.82 | 90 | 0.649906 |
5d0c13bd9ead504a51ba033deca97415ff355734
| 17,858 |
py
|
Python
|
Titanic/class_create_model_of_logistic_regression.py
|
ysh329/Titanic-Machine-Learning-from-Disaster
|
d2ba330625e40b648b2946a8ca221198af148368
|
[
"MIT"
] | 1 |
2018-07-23T08:41:02.000Z
|
2018-07-23T08:41:02.000Z
|
Titanic/class_create_model_of_logistic_regression.py
|
ysh329/Titanic-Machine-Learning-from-Disaster
|
d2ba330625e40b648b2946a8ca221198af148368
|
[
"MIT"
] | null | null | null |
Titanic/class_create_model_of_logistic_regression.py
|
ysh329/Titanic-Machine-Learning-from-Disaster
|
d2ba330625e40b648b2946a8ca221198af148368
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_create_model_of_logistic_regression.py
# Description:
#
# Author: Shuai Yuan
# E-mail: [email protected]
# Create: 2016-01-23 23:32:49
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import MySQLdb
import logging
import time
import pylab
from numpy import *
from math import exp
import csv
import decorator_of_function
################################### PART2 CLASS && FUNCTION ###########################
################################### PART3 CLASS TEST ##################################
"""
# Initial parameters
database_name = "TitanicDB"
passenger_table_name = "passenger_table"
LRModel = CreateLogisticRegressionModel()
"""
| 48.005376 | 177 | 0.57302 |
5d0d63268d357d52fa0b7327baa9d61702e3b1cd
| 3,341 |
py
|
Python
|
mjecv/io/base.py
|
mje-nz/mjecv
|
9a02c005a0abc7d21594f65c348cfe5185c90184
|
[
"MIT"
] | null | null | null |
mjecv/io/base.py
|
mje-nz/mjecv
|
9a02c005a0abc7d21594f65c348cfe5185c90184
|
[
"MIT"
] | null | null | null |
mjecv/io/base.py
|
mje-nz/mjecv
|
9a02c005a0abc7d21594f65c348cfe5185c90184
|
[
"MIT"
] | null | null | null |
import multiprocessing
from typing import List, Optional
import numpy as np
from ..util import dill_for_apply
| 33.079208 | 86 | 0.609398 |
5d0f2e44cd4703366dc6065304ee5f71411d41c4
| 1,495 |
py
|
Python
|
377_combination_sum_iv.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 2 |
2018-04-24T19:17:40.000Z
|
2018-04-24T19:33:52.000Z
|
377_combination_sum_iv.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | null | null | null |
377_combination_sum_iv.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 3 |
2020-06-17T05:48:52.000Z
|
2021-01-02T06:08:25.000Z
|
# 377 Combination Sum IV
# Given an integer array with all positive numbers and no duplicates,
# find the number of possible combinations that add up to a positive integer target.
#
# Example:
#
# nums = [1, 2, 3]
# target = 4
#
# The possible combination ways are:
# (1, 1, 1, 1)
# (1, 1, 2)
# (1, 2, 1)
# (1, 3)
# (2, 1, 1)
# (2, 2)
# (3, 1)
#
# Note that different sequences are counted as different combinations.
#
# Therefore the output is 7.
#
# Follow up:
# What if negative numbers are allowed in the given array?
# How does it change the problem?
# What limitation we need to add to the question to allow negative numbers?
print(Solution().combinationSum4([1, 2, 3], 4))
| 24.508197 | 84 | 0.535786 |
5d102888dd921effe96e5fc388b2a1b8ea50b383
| 3,440 |
py
|
Python
|
nvidia-texture-tools/conanfile.py
|
koeleck/conan-packages
|
da43e82c2444e934e69a38e524998d028f8edcc3
|
[
"Unlicense"
] | null | null | null |
nvidia-texture-tools/conanfile.py
|
koeleck/conan-packages
|
da43e82c2444e934e69a38e524998d028f8edcc3
|
[
"Unlicense"
] | null | null | null |
nvidia-texture-tools/conanfile.py
|
koeleck/conan-packages
|
da43e82c2444e934e69a38e524998d028f8edcc3
|
[
"Unlicense"
] | null | null | null |
from conans import ConanFile, CMake, tools
import os
STATIC_LIBS = ["nvtt", "squish", "rg_etc1", "nvimage", "bc6h", "posh",
"bc7", "nvmath", "nvthread", "nvcore"]
SHARED_LIBS = ["nvtt", "nvimage", "nvthread", "nvmath", "nvcore"]
| 47.123288 | 184 | 0.627326 |
5d105ccb37935c70d4da4645c5743044452805b9
| 2,890 |
py
|
Python
|
train_args.py
|
MyWay/Create-Your-Own-Image-Classifier
|
70e5744084435af8a74b2cfe2098c25b0745c9af
|
[
"MIT"
] | null | null | null |
train_args.py
|
MyWay/Create-Your-Own-Image-Classifier
|
70e5744084435af8a74b2cfe2098c25b0745c9af
|
[
"MIT"
] | null | null | null |
train_args.py
|
MyWay/Create-Your-Own-Image-Classifier
|
70e5744084435af8a74b2cfe2098c25b0745c9af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" train_args.py
train_args.py command-line args.
"""
import argparse
def get_args():
"""
"""
parser = argparse.ArgumentParser(
description="This script lets you train and save your model.",
usage="python3 train.py flowers/train --gpu --learning_rate 0.001 --epochs 11 --gpu --hidden_units 500",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('data_directory', action="store")
parser.add_argument('--arch',
action="store",
default="alexnet",
dest='arch',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_dir',
action="store",
default=".",
dest='save_dir',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_name',
action="store",
default="checkpoint",
dest='save_name',
type=str,
help='Checkpoint filename.',
)
parser.add_argument('--categories_json',
action="store",
default="cat_to_name.json",
dest='categories_json',
type=str,
help='Path to file containing the categories.',
)
parser.add_argument('--gpu',
action="store_true",
dest="use_gpu",
default=False,
help='Use the GPU to train instead of the CPU')
hp = parser.add_argument_group('hyperparameters')
hp.add_argument('--learning_rate',
action="store",
default=0.001,
type=float,
help='Learning rate')
hp.add_argument('--hidden_units', '-hu',
action="store",
dest="hidden_units",
default=[4096],
type=int,
nargs='+',
help='Hidden layer units')
hp.add_argument('--epochs',
action="store",
dest="epochs",
default=1,
type=int,
help='Epochs to train the model for')
parser.parse_args()
return parser
def main():
"""
Main Function
"""
print(f'Command line argument utility for train.py.\nTry "python train.py -h".')
if __name__ == '__main__':
main()
"""
main() is called if script is executed on it's own.
"""
| 30.104167 | 112 | 0.450173 |
5d12933f8b3900ae610ac625eadbf5cf407b20ba
| 1,483 |
py
|
Python
|
apps/payment/views.py
|
canadiyaman/thetask
|
0f1cea1d8eea4966138ef0bdc303a53e3511e57d
|
[
"RSA-MD"
] | null | null | null |
apps/payment/views.py
|
canadiyaman/thetask
|
0f1cea1d8eea4966138ef0bdc303a53e3511e57d
|
[
"RSA-MD"
] | null | null | null |
apps/payment/views.py
|
canadiyaman/thetask
|
0f1cea1d8eea4966138ef0bdc303a53e3511e57d
|
[
"RSA-MD"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.conf import settings
from django.views.generic import TemplateView
from apps.payment.models import PaymentLog
from apps.payment.stripe import get_token, get_payment_charge
from apps.subscription.views import start_subscription
| 34.488372 | 106 | 0.653405 |
5d12b1edae2081b00e12d5653d2750d5567eb062
| 211 |
py
|
Python
|
users/apps.py
|
srinidhibhat/booknotes
|
666f92fac309b97c13b79e91f5493220f934cab3
|
[
"MIT"
] | null | null | null |
users/apps.py
|
srinidhibhat/booknotes
|
666f92fac309b97c13b79e91f5493220f934cab3
|
[
"MIT"
] | 2 |
2020-08-11T07:23:21.000Z
|
2020-08-11T07:26:41.000Z
|
users/apps.py
|
srinidhibhat/booknotes-DjangoProject
|
666f92fac309b97c13b79e91f5493220f934cab3
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
| 23.444444 | 75 | 0.71564 |
5d12e645166a3997ff332b7bb734f77bb3785c93
| 1,407 |
py
|
Python
|
secure_data_store/cli.py
|
HumanBrainProject/secure-data-store
|
69b615cf979fc08f4ae8474ca9cd3e6d2f04b7f0
|
[
"MIT"
] | 1 |
2021-11-23T12:26:01.000Z
|
2021-11-23T12:26:01.000Z
|
secure_data_store/cli.py
|
HumanBrainProject/secure-data-store
|
69b615cf979fc08f4ae8474ca9cd3e6d2f04b7f0
|
[
"MIT"
] | null | null | null |
secure_data_store/cli.py
|
HumanBrainProject/secure-data-store
|
69b615cf979fc08f4ae8474ca9cd3e6d2f04b7f0
|
[
"MIT"
] | 1 |
2020-05-21T15:51:44.000Z
|
2020-05-21T15:51:44.000Z
|
# -*- coding: utf-8 -*-
"""Console script for secure_data_store."""
import click
from . import secure_data_store as sds
CONFIG='~/.sdsrc'
main()
| 29.3125 | 88 | 0.663113 |
5d13dcd7f99f525058d1ada523c294f362a0d8b9
| 1,052 |
py
|
Python
|
Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | 1 |
2019-04-24T13:32:23.000Z
|
2019-04-24T13:32:23.000Z
|
Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Drawable import Drawable
def nodeAttributes():
"""return a list of valid attributes for Node"""
return Node._validAttributes.keys()
# version
__id__ = "$Id$"
#
# End of file
| 19.849057 | 82 | 0.429658 |
5d13e82b9800d2ed9d73368f30bea490d35c562b
| 3,522 |
py
|
Python
|
cairis/gui/RiskScatterPanel.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/gui/RiskScatterPanel.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/gui/RiskScatterPanel.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pprint
import random
import wx
from cairis.core.armid import *
from cairis.core.Borg import Borg
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
| 33.865385 | 141 | 0.70017 |
5d147935a791e2ef09a425d200ac60697365562a
| 7,380 |
py
|
Python
|
sdk/python/pulumi_azure/containerservice/get_registry.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/containerservice/get_registry.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/containerservice/get_registry.py
|
aangelisc/pulumi-azure
|
71dd9c75403146e16f7480e5a60b08bc0329660e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetRegistryResult',
'AwaitableGetRegistryResult',
'get_registry',
]
def get_registry(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistryResult:
"""
Use this data source to access information about an existing Container Registry.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.containerservice.get_registry(name="testacr",
resource_group_name="test")
pulumi.export("loginServer", example.login_server)
```
:param str name: The name of the Container Registry.
:param str resource_group_name: The Name of the Resource Group where this Container Registry exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:containerservice/getRegistry:getRegistry', __args__, opts=opts, typ=GetRegistryResult).value
return AwaitableGetRegistryResult(
admin_enabled=__ret__.admin_enabled,
admin_password=__ret__.admin_password,
admin_username=__ret__.admin_username,
id=__ret__.id,
location=__ret__.location,
login_server=__ret__.login_server,
name=__ret__.name,
resource_group_name=__ret__.resource_group_name,
sku=__ret__.sku,
storage_account_id=__ret__.storage_account_id,
tags=__ret__.tags)
| 36.716418 | 215 | 0.65813 |
5d14d6320ca92dcf32c70f780204293a845032e6
| 21,539 |
py
|
Python
|
contrib/memcache_whisper.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | 1 |
2020-01-23T16:25:50.000Z
|
2020-01-23T16:25:50.000Z
|
contrib/memcache_whisper.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | 2 |
2016-07-28T20:55:46.000Z
|
2016-08-02T13:59:28.000Z
|
contrib/memcache_whisper.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | 1 |
2020-03-05T06:50:02.000Z
|
2020-03-05T06:50:02.000Z
|
#!/usr/bin/env python
# Copyright 2008 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is an implementation of the Whisper database API
# Here is the basic layout of a whisper data file
#
# File = Header,Data
# Header = Metadata,ArchiveInfo+
# Metadata = lastUpdate,maxRetention,xFilesFactor,archiveCount
# ArchiveInfo = Offset,SecondsPerPoint,Points
# Data = Archive+
# Archive = Point+
# Point = timestamp,value
"""
NOTE: This is a modified version of whisper.py
For details on the modification, read https://bugs.launchpad.net/graphite/+bug/245835
"""
import os, struct, time
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK = False
CACHE_HEADERS = False
__headerCache = {}
longFormat = "!L"
longSize = struct.calcsize(longFormat)
floatFormat = "!f"
floatSize = struct.calcsize(floatFormat)
timestampFormat = "!L"
timestampSize = struct.calcsize(timestampFormat)
valueFormat = "!d"
valueSize = struct.calcsize(valueFormat)
pointFormat = "!Ld"
pointSize = struct.calcsize(pointFormat)
metadataFormat = "!2LfL"
metadataSize = struct.calcsize(metadataFormat)
archiveInfoFormat = "!3L"
archiveInfoSize = struct.calcsize(archiveInfoFormat)
debug = startBlock = endBlock = lambda *a,**k: None
def create(path,archiveList,xFilesFactor=0.5):
"""create(path,archiveList,xFilesFactor=0.5)
path is a string
archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur
"""
#Validate archive configurations...
assert archiveList, "You must specify at least one archive configuration!"
archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint)
for i,archive in enumerate(archiveList):
if i == len(archiveList) - 1: break
next = archiveList[i+1]
assert archive[0] < next[0],\
"You cannot configure two archives with the same precision %s,%s" % (archive,next)
assert (next[0] % archive[0]) == 0,\
"Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0])
retention = archive[0] * archive[1]
nextRetention = next[0] * next[1]
assert nextRetention > retention,\
"Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next)
#Looks good, now we create the file and write the header
assert not exists(path), "File %s already exists!" % path
fh = open(path,'wb')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
lastUpdate = struct.pack( timestampFormat, int(time.time()) )
oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1]
maxRetention = struct.pack( longFormat, oldest )
xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) )
archiveCount = struct.pack(longFormat, len(archiveList))
packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount
fh.write(packedMetadata)
headerSize = metadataSize + (archiveInfoSize * len(archiveList))
archiveOffsetPointer = headerSize
for secondsPerPoint,points in archiveList:
archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)
fh.write(archiveInfo)
archiveOffsetPointer += (points * pointSize)
zeroes = '\x00' * (archiveOffsetPointer - headerSize)
fh.write(zeroes)
fh.close()
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
#startBlock('complete update')
value = float(value)
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
if timestamp is None: timestamp = now
timestamp = int(timestamp)
diff = now - timestamp
assert diff < header['maxRetention'] and diff >= 0, "Timestamp not covered by any archives in this database"
for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp
if archive['retention'] < diff: continue
lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later
break
#First we update the highest-precision archive
myInterval = timestamp - (timestamp % archive['secondsPerPoint'])
myPackedPoint = struct.pack(pointFormat,myInterval,value)
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0: #This file's first update
fh.seek(archive['offset'])
fh.write(myPackedPoint)
baseInterval,baseValue = myInterval,value
else: #Not our first update
timeDistance = myInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
fh.write(myPackedPoint)
#Now we propagate the update to lower-precision archives
#startBlock('update propagation')
higher = archive
for lower in lowerArchives:
if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break
higher = lower
#endBlock('update propagation')
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update')
def update_many(path,points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
#startBlock('complete update_many path=%s points=%d' % (path,len(points)))
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
archives = iter( header['archives'] )
currentArchive = next(archives)
#debug(' update_many currentArchive=%s' % str(currentArchive))
currentPoints = []
for point in points:
age = now - point[0]
#debug(' update_many iterating points, point=%s age=%d' % (str(point),age))
while currentArchive['retention'] < age: #we can't fit any more points in this archive
#debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints))
if currentPoints: #commit all the points we've found that it can fit
currentPoints.reverse() #put points in chronological order
__archive_update_many(fh,header,currentArchive,currentPoints)
currentPoints = []
try:
currentArchive = next(archives)
#debug(' update_many using next archive %s' % str(currentArchive))
except StopIteration:
#debug(' update_many no more archives!')
currentArchive = None
break
if not currentArchive: break #drop remaining points that don't fit in the database
#debug(' update_many adding point=%s' % str(point))
currentPoints.append(point)
#debug(' update_many done iterating points')
if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives
currentPoints.reverse()
__archive_update_many(fh,header,currentArchive,currentPoints)
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update_many path=%s points=%d' % (path,len(points)))
def info(path):
"""info(path)
path is a string
"""
fh = open(path,'rb')
info = __readHeader(fh)
fh.close()
return info
def fetch(path,fromTime,untilTime=None):
"""fetch(path,fromTime,untilTime=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now
"""
fh = open(path,'rb')
header = __readHeader(fh)
now = int( time.time() )
if untilTime is None or untilTime > now:
untilTime = now
if fromTime < (now - header['maxRetention']):
fromTime = now - header['maxRetention']
assert fromTime < untilTime, "Invalid time interval"
diff = now - fromTime
for archive in header['archives']:
if archive['retention'] >= diff: break
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) )
untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) )
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0:
step = archive['secondsPerPoint']
points = (untilInterval - fromInterval) / step
timeInfo = (fromInterval,untilInterval,step)
valueList = [None] * points
return (timeInfo,valueList)
#Determine fromOffset
timeDistance = fromInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
fromOffset = archive['offset'] + (byteDistance % archive['size'])
#Determine untilOffset
timeDistance = untilInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
untilOffset = archive['offset'] + (byteDistance % archive['size'])
#Read all the points in the interval
fh.seek(fromOffset)
if fromOffset < untilOffset: #If we don't wrap around the archive
seriesString = fh.read(untilOffset - fromOffset)
else: #We do wrap around the archive, so we need two reads
archiveEnd = archive['offset'] + archive['size']
seriesString = fh.read(archiveEnd - fromOffset)
fh.seek(archive['offset'])
seriesString += fh.read(untilOffset - archive['offset'])
#Now we unpack the series data we just read (anything faster than unpack?)
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values (optimize this!)
valueList = [None] * points #pre-allocate entire list for speed
currentInterval = fromInterval
step = archive['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
pointValue = unpackedSeries[i+1]
valueList[i/2] = pointValue #in-place reassignment is faster than append()
currentInterval += step
fh.close()
timeInfo = (fromInterval,untilInterval,step)
return (timeInfo,valueList)
| 40.563089 | 160 | 0.720553 |
5d15eae6d6d420d8166df518e95a6f5df2ba41f1
| 2,619 |
py
|
Python
|
main.py
|
showtimesynergy/mojify
|
8c012730b9f56d6e7e2003e8db99669516f4e027
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
showtimesynergy/mojify
|
8c012730b9f56d6e7e2003e8db99669516f4e027
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
showtimesynergy/mojify
|
8c012730b9f56d6e7e2003e8db99669516f4e027
|
[
"BSD-2-Clause"
] | null | null | null |
from PIL import Image
import csv
from ast import literal_eval as make_tuple
from math import sqrt
import argparse
import os.path
if __name__ == '__main__':
args = handle_arguments()
path = args.image
emoji_list = []
with open('proc.csv') as raw_list:
emoji_list = []
reader = csv.reader(raw_list)
raw_list = list(reader)
for entry in raw_list:
emoji_list.append([entry[0], make_tuple(entry[1])])
image = load_img(path)
size = image.size
emoji_grid = gen_matrix(image)
write_out(emoji_grid)
print('Output in out.txt')
| 29.426966 | 63 | 0.557083 |
5d15ebcd4b1cb7692dfb4253406f6c027f0525d0
| 17,824 |
py
|
Python
|
venv/lib/python3.7/site-packages/Xlib/ext/xinput.py
|
umr-bot/sliding-puzzle-solver-bot
|
826532a426f343bcc66034b241a42b3bd864e07c
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/Xlib/ext/xinput.py
|
umr-bot/sliding-puzzle-solver-bot
|
826532a426f343bcc66034b241a42b3bd864e07c
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/Xlib/ext/xinput.py
|
umr-bot/sliding-puzzle-solver-bot
|
826532a426f343bcc66034b241a42b3bd864e07c
|
[
"MIT"
] | null | null | null |
# Xlib.ext.xinput -- XInput extension module
#
# Copyright (C) 2012 Outpost Embedded, LLC
# Forest Bond <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
'''
A very incomplete implementation of the XInput extension.
'''
import sys
import array
import struct
# Python 2/3 compatibility.
from six import integer_types
from Xlib.protocol import rq
from Xlib import X
extname = 'XInputExtension'
PropertyDeleted = 0
PropertyCreated = 1
PropertyModified = 2
NotifyNormal = 0
NotifyGrab = 1
NotifyUngrab = 2
NotifyWhileGrabbed = 3
NotifyPassiveGrab = 4
NotifyPassiveUngrab = 5
NotifyAncestor = 0
NotifyVirtual = 1
NotifyInferior = 2
NotifyNonlinear = 3
NotifyNonlinearVirtual = 4
NotifyPointer = 5
NotifyPointerRoot = 6
NotifyDetailNone = 7
GrabtypeButton = 0
GrabtypeKeycode = 1
GrabtypeEnter = 2
GrabtypeFocusIn = 3
GrabtypeTouchBegin = 4
AnyModifier = (1 << 31)
AnyButton = 0
AnyKeycode = 0
AsyncDevice = 0
SyncDevice = 1
ReplayDevice = 2
AsyncPairedDevice = 3
AsyncPair = 4
SyncPair = 5
SlaveSwitch = 1
DeviceChange = 2
MasterAdded = (1 << 0)
MasterRemoved = (1 << 1)
SlaveAdded = (1 << 2)
SlaveRemoved = (1 << 3)
SlaveAttached = (1 << 4)
SlaveDetached = (1 << 5)
DeviceEnabled = (1 << 6)
DeviceDisabled = (1 << 7)
AddMaster = 1
RemoveMaster = 2
AttachSlave = 3
DetachSlave = 4
AttachToMaster = 1
Floating = 2
ModeRelative = 0
ModeAbsolute = 1
MasterPointer = 1
MasterKeyboard = 2
SlavePointer = 3
SlaveKeyboard = 4
FloatingSlave = 5
KeyClass = 0
ButtonClass = 1
ValuatorClass = 2
ScrollClass = 3
TouchClass = 8
KeyRepeat = (1 << 16)
AllDevices = 0
AllMasterDevices = 1
DeviceChanged = 1
KeyPress = 2
KeyRelease = 3
ButtonPress = 4
ButtonRelease = 5
Motion = 6
Enter = 7
Leave = 8
FocusIn = 9
FocusOut = 10
HierarchyChanged = 11
PropertyEvent = 12
RawKeyPress = 13
RawKeyRelease = 14
RawButtonPress = 15
RawButtonRelease = 16
RawMotion = 17
DeviceChangedMask = (1 << DeviceChanged)
KeyPressMask = (1 << KeyPress)
KeyReleaseMask = (1 << KeyRelease)
ButtonPressMask = (1 << ButtonPress)
ButtonReleaseMask = (1 << ButtonRelease)
MotionMask = (1 << Motion)
EnterMask = (1 << Enter)
LeaveMask = (1 << Leave)
FocusInMask = (1 << FocusIn)
FocusOutMask = (1 << FocusOut)
HierarchyChangedMask = (1 << HierarchyChanged)
PropertyEventMask = (1 << PropertyEvent)
RawKeyPressMask = (1 << RawKeyPress)
RawKeyReleaseMask = (1 << RawKeyRelease)
RawButtonPressMask = (1 << RawButtonPress)
RawButtonReleaseMask = (1 << RawButtonRelease)
RawMotionMask = (1 << RawMotion)
GrabModeSync = 0
GrabModeAsync = 1
GrabModeTouch = 2
DEVICEID = rq.Card16
DEVICE = rq.Card16
DEVICEUSE = rq.Card8
def query_version(self):
return XIQueryVersion(
display=self.display,
opcode=self.display.get_extension_major(extname),
major_version=2,
minor_version=0,
)
EventMask = rq.Struct(
DEVICE('deviceid'),
rq.LengthOf('mask', 2),
Mask('mask'),
)
def select_events(self, event_masks):
'''
select_events(event_masks)
event_masks:
Sequence of (deviceid, mask) pairs, where deviceid is a numerical device
ID, or AllDevices or AllMasterDevices, and mask is either an unsigned
integer or sequence of 32 bits unsigned values
'''
return XISelectEvents(
display=self.display,
opcode=self.display.get_extension_major(extname),
window=self,
masks=event_masks,
)
AnyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Pad(2),
)
ButtonInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf(('state', 'labels'), 2),
ButtonState('state'),
rq.List('labels', rq.Card32),
)
KeyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf('keycodes', 2),
rq.List('keycodes', rq.Card32),
)
ValuatorInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card32('label'),
FP3232('min'),
FP3232('max'),
FP3232('value'),
rq.Card32('resolution'),
rq.Card8('mode'),
rq.Pad(3),
)
ScrollInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card16('scroll_type'),
rq.Pad(2),
rq.Card32('flags'),
FP3232('increment'),
)
TouchInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card8('mode'),
rq.Card8('num_touches'),
)
INFO_CLASSES = {
KeyClass: KeyInfo,
ButtonClass: ButtonInfo,
ValuatorClass: ValuatorInfo,
ScrollClass: ScrollInfo,
TouchClass: TouchInfo,
}
ClassInfo = ClassInfoClass()
DeviceInfo = rq.Struct(
DEVICEID('deviceid'),
rq.Card16('use'),
rq.Card16('attachment'),
rq.LengthOf('classes', 2),
rq.LengthOf('name', 2),
rq.Bool('enabled'),
rq.Pad(1),
rq.String8('name', 4),
rq.List('classes', ClassInfo),
)
HierarchyInfo = rq.Struct(
DEVICEID('deviceid'),
DEVICEID('attachment'),
DEVICEUSE('type'),
rq.Bool('enabled'),
rq.Pad(2),
rq.Card32('flags'),
)
HierarchyEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('flags'),
rq.LengthOf('info', 2),
rq.Pad(10),
rq.List('info', HierarchyInfo),
)
ModifierInfo = rq.Struct(
rq.Card32('base_mods'),
rq.Card32('latched_mods'),
rq.Card32('locked_mods'),
rq.Card32('effective_mods'),
)
GroupInfo = rq.Struct(
rq.Card8('base_group'),
rq.Card8('latched_group'),
rq.Card8('locked_group'),
rq.Card8('effective_group'),
)
DeviceEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('detail'),
rq.Window('root'),
rq.Window('event'),
rq.Window('child'),
FP1616('root_x'),
FP1616('root_y'),
FP1616('event_x'),
FP1616('event_y'),
rq.LengthOf('buttons', 2),
rq.Card16('valulators_len'),
DEVICEID('sourceid'),
rq.Pad(2),
rq.Card32('flags'),
rq.Object('mods', ModifierInfo),
rq.Object('groups', GroupInfo),
ButtonState('buttons'),
)
DeviceChangedEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.LengthOf('classes', 2),
DEVICEID('sourceid'),
rq.Card8('reason'),
rq.Pad(11),
rq.List('classes', ClassInfo),
)
| 27.212214 | 96 | 0.601212 |
5d173dba73e014674031b329494a05e8bf83b546
| 24 |
py
|
Python
|
vel/notebook/__init__.py
|
tigerwlin/vel
|
00e4fbb7b612e888e2cbb5d8455146664638cd0b
|
[
"MIT"
] | 273 |
2018-09-01T08:54:34.000Z
|
2022-02-02T13:22:51.000Z
|
vel/notebook/__init__.py
|
tigerwlin/vel
|
00e4fbb7b612e888e2cbb5d8455146664638cd0b
|
[
"MIT"
] | 47 |
2018-08-17T11:27:08.000Z
|
2022-03-11T23:26:55.000Z
|
vel/notebook/__init__.py
|
tigerwlin/vel
|
00e4fbb7b612e888e2cbb5d8455146664638cd0b
|
[
"MIT"
] | 37 |
2018-10-11T22:56:57.000Z
|
2020-10-06T19:53:05.000Z
|
from .loader import load
| 24 | 24 | 0.833333 |
5d17411291af35f334b84d9eb9a01839cc0b1a31
| 340 |
py
|
Python
|
YourJobAidApi/migrations/0019_remove_category_count_post.py
|
rayhanrock/django-yourjobaid-api
|
17751dac5a298998aeecf7a70b79792f8311b9b2
|
[
"MIT"
] | 1 |
2020-07-08T10:57:37.000Z
|
2020-07-08T10:57:37.000Z
|
YourJobAidApi/migrations/0019_remove_category_count_post.py
|
rayhanrock/django-yourjobaid-api
|
17751dac5a298998aeecf7a70b79792f8311b9b2
|
[
"MIT"
] | 6 |
2021-04-08T20:23:17.000Z
|
2021-09-22T18:58:16.000Z
|
YourJobAidApi/migrations/0019_remove_category_count_post.py
|
rayhanrock/django-yourjobaid-api
|
17751dac5a298998aeecf7a70b79792f8311b9b2
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-04-16 23:10
from django.db import migrations
| 20 | 54 | 0.608824 |
5d190891c7c8c6ae88bd8b11a4958d94ac97d775
| 129 |
py
|
Python
|
easyquant/login/__init__.py
|
CharlieZhao95/easy-quant
|
9df126433e27d92eced9b087e581b5fd66c5a400
|
[
"Apache-2.0"
] | null | null | null |
easyquant/login/__init__.py
|
CharlieZhao95/easy-quant
|
9df126433e27d92eced9b087e581b5fd66c5a400
|
[
"Apache-2.0"
] | 2 |
2022-01-26T15:50:37.000Z
|
2022-03-30T14:08:36.000Z
|
easyquant/login/__init__.py
|
CharlieZhao95/easy-quant
|
9df126433e27d92eced9b087e581b5fd66c5a400
|
[
"Apache-2.0"
] | null | null | null |
# @Time : 2022/1/26 23:07
# @Author : zhaoyu
# @Site :
# @File : __init__.py.py
# @Software: PyCharm
# @Note : xx
| 21.5 | 29 | 0.527132 |
5d1b794c29c7e14b547cb2e45a43996ab2eb188a
| 60,985 |
py
|
Python
|
tests/api/test_attributes.py
|
DowneyTung/saleor
|
50f299d8e276b594753ee439d9e1a212f85a91b1
|
[
"CC-BY-4.0"
] | 19 |
2019-12-03T17:28:07.000Z
|
2021-09-10T21:30:52.000Z
|
tests/api/test_attributes.py
|
DowneyTung/saleor
|
50f299d8e276b594753ee439d9e1a212f85a91b1
|
[
"CC-BY-4.0"
] | 51 |
2019-12-06T08:06:07.000Z
|
2021-05-06T02:10:50.000Z
|
tests/api/test_attributes.py
|
DowneyTung/saleor
|
50f299d8e276b594753ee439d9e1a212f85a91b1
|
[
"CC-BY-4.0"
] | 20 |
2020-02-03T00:38:59.000Z
|
2022-01-03T13:07:52.000Z
|
from typing import Union
from unittest import mock
import graphene
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.template.defaultfilters import slugify
from graphene.utils.str_converters import to_camel_case
from saleor.core.taxes import zero_money
from saleor.graphql.core.utils import snake_to_camel_case
from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType
from saleor.graphql.product.filters import filter_attributes_by_product_types
from saleor.graphql.product.mutations.attributes import validate_value_is_unique
from saleor.graphql.product.types.attributes import resolve_attribute_value_type
from saleor.product import AttributeInputType
from saleor.product.error_codes import ProductErrorCode
from saleor.product.models import (
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
Product,
ProductType,
ProductVariant,
)
from saleor.product.utils.attributes import associate_attribute_values_to_instance
from tests.api.utils import get_graphql_content
QUERY_ATTRIBUTES = """
query {
attributes(first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
"""
QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES = """
{
products(first: 1) {
edges {
node {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
variants {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
}
}
}
}
}
"""
def test_resolve_attribute_values(user_api_client, product, staff_user):
"""Ensure the attribute values are properly resolved."""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product_attribute_values = list(
product.attributes.first().values.values_list("slug", flat=True)
)
variant_attribute_values = list(
variant.attributes.first().values.values_list("slug", flat=True)
)
assert len(product_attribute_values) == 1
assert len(variant_attribute_values) == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == len(product_attribute_values)
assert len(variant_attributes) == len(variant_attribute_values)
assert product_attributes[0]["attribute"]["slug"] == "color"
assert product_attributes[0]["values"][0]["slug"] == product_attribute_values[0]
assert product_attributes[0]["value"]["slug"] == product_attribute_values[0]
assert variant_attributes[0]["attribute"]["slug"] == "size"
assert variant_attributes[0]["values"][0]["slug"] == variant_attribute_values[0]
assert variant_attributes[0]["value"]["slug"] == variant_attribute_values[0]
def test_resolve_attribute_values_non_assigned_to_node(
user_api_client, product, staff_user
):
"""Ensure the attribute values are properly resolved when an attribute is part
of the product type but not of the node (product/variant), thus no values should be
resolved.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_type = product.product_type
# Create dummy attributes
unassigned_product_attribute = Attribute.objects.create(name="P", slug="product")
unassigned_variant_attribute = Attribute.objects.create(name="V", slug="variant")
# Create a value for each dummy attribute to ensure they are not returned
# by the product or variant as they are not associated to them
AttributeValue.objects.bulk_create(
[
AttributeValue(slug="a", name="A", attribute=unassigned_product_attribute),
AttributeValue(slug="b", name="B", attribute=unassigned_product_attribute),
]
)
# Assign the dummy attributes to the product type and push them at the top
# through a sort_order=0 as the other attributes have sort_order=null
AttributeProduct.objects.create(
attribute=unassigned_product_attribute, product_type=product_type, sort_order=0
)
AttributeVariant.objects.create(
attribute=unassigned_variant_attribute, product_type=product_type, sort_order=0
)
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert len(variant_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert product_attributes[0]["attribute"]["slug"] == "product"
assert product_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
assert variant_attributes[0]["attribute"]["slug"] == "variant"
assert variant_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
def test_attributes_filter_by_product_type_with_empty_value():
"""Ensure passing an empty or null value is ignored and the queryset is simply
returned without any modification.
"""
qs = Attribute.objects.all()
assert filter_attributes_by_product_types(qs, "...", "") is qs
assert filter_attributes_by_product_types(qs, "...", None) is qs
def test_attributes_filter_by_product_type_with_unsupported_field():
"""Ensure using an unknown field to filter attributes by raises a NotImplemented
exception.
"""
qs = Attribute.objects.all()
with pytest.raises(NotImplementedError) as exc:
filter_attributes_by_product_types(qs, "in_space", "a-value")
assert exc.value.args == ("Filtering by in_space is unsupported",)
def test_attributes_filter_by_non_existing_category_id():
"""Ensure using a non-existing category ID returns an empty query set."""
category_id = graphene.Node.to_global_id("Category", -1)
mocked_qs = mock.MagicMock()
qs = filter_attributes_by_product_types(mocked_qs, "in_category", category_id)
assert qs == mocked_qs.none.return_value
CREATE_ATTRIBUTES_QUERY = """
mutation createAttribute($name: String!, $values: [AttributeValueCreateInput]) {
attributeCreate(input: {name: $name, values: $values}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
UPDATE_ATTRIBUTE_QUERY = """
mutation updateAttribute(
$id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!,
$removeValues: [ID]!) {
attributeUpdate(
id: $id,
input: {
name: $name, addValues: $addValues,
removeValues: $removeValues}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
CREATE_ATTRIBUTE_VALUE_QUERY = """
mutation createAttributeValue(
$attributeId: ID!, $name: String!) {
attributeValueCreate(
attribute: $attributeId, input: {name: $name}) {
productErrors {
field
message
code
}
attribute {
values {
name
}
}
attributeValue {
name
type
slug
}
}
}
"""
UPDATE_ATTRIBUTE_VALUE_QUERY = """
mutation updateChoice(
$id: ID!, $name: String!) {
attributeValueUpdate(
id: $id, input: {name: $name}) {
errors {
field
message
}
attributeValue {
name
slug
}
attribute {
values {
name
}
}
}
}
"""
def test_resolve_assigned_attribute_without_values(api_client, product_type, product):
"""Ensure the attributes assigned to a product type are resolved even if
the product doesn't provide any value for it or is not directly associated to it.
"""
# Retrieve the product's variant
variant = product.variants.get()
# Remove all attributes and values from the product and its variant
product.attributesrelated.clear()
variant.attributesrelated.clear()
# Retrieve the product and variant's attributes
products = get_graphql_content(
api_client.post_graphql(
"""
{
products(first: 10) {
edges {
node {
attributes {
attribute {
slug
}
values {
name
}
}
variants {
attributes {
attribute {
slug
}
values {
name
}
}
}
}
}
}
}
"""
)
)["data"]["products"]["edges"]
# Ensure we are only working on one product and variant, the ones we are testing
assert len(products) == 1
assert len(products[0]["node"]["variants"]) == 1
# Retrieve the nodes data
product = products[0]["node"]
variant = product["variants"][0]
# Ensure the product attributes values are all None
assert len(product["attributes"]) == 1
assert product["attributes"][0]["attribute"]["slug"] == "color"
assert product["attributes"][0]["values"] == []
# Ensure the variant attributes values are all None
assert variant["attributes"][0]["attribute"]["slug"] == "size"
assert variant["attributes"][0]["values"] == []
ASSIGN_ATTR_QUERY = """
mutation assign($productTypeId: ID!, $operations: [AttributeAssignInput]!) {
attributeAssign(productTypeId: $productTypeId, operations: $operations) {
errors {
field
message
}
productType {
id
productAttributes {
id
}
variantAttributes {
id
}
}
}
}
"""
def test_assign_variant_attribute_to_product_type_with_disabled_variants(
staff_api_client,
permission_manage_products,
product_type_without_variant,
color_attribute_without_values,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute as a variant attribute when
the product type doesn't support variants"""
product_type = product_type_without_variant
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Variants are disabled in this product type.",
}
]
def test_assign_variant_attribute_having_unsupported_input_type(
staff_api_client, permission_manage_products, product_type, size_attribute
):
"""The assignAttribute mutation should raise an error when trying
to use an attribute as a variant attribute when
the attribute's input type doesn't support variants"""
attribute = size_attribute
attribute.input_type = AttributeInputType.MULTISELECT
attribute.save(update_fields=["input_type"])
product_type.variant_attributes.clear()
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": (
"Attributes having for input types ['multiselect'] cannot be assigned "
"as variant attributes"
),
}
]
UNASSIGN_ATTR_QUERY = """
mutation unAssignAttribute(
$productTypeId: ID!, $attributeIds: [ID]!
) {
attributeUnassign(productTypeId: $productTypeId, attributeIds: $attributeIds) {
errors {
field
message
}
productType {
id
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
def test_unassign_attributes_not_in_product_type(
staff_api_client, permission_manage_products, color_attribute_without_values
):
"""The unAssignAttribute mutation should not raise any error when trying
to remove an attribute that is not/no longer in the product type."""
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", color_attribute_without_values.pk)
],
}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 0
assert len(content["productType"]["variantAttributes"]) == 0
ATTRIBUTES_RESORT_QUERY = """
mutation ProductTypeReorderAttributes(
$productTypeId: ID!
$moves: [ReorderInput]!
$type: AttributeTypeEnum!
) {
productTypeReorderAttributes(
productTypeId: $productTypeId
moves: $moves
type: $type
) {
productType {
id
variantAttributes {
id
slug
}
productAttributes {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_attributes_within_product_type_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid product type (invalid ID)."""
product_type_id = graphene.Node.to_global_id("ProductType", -1)
attribute_id = graphene.Node.to_global_id("Attribute", -1)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "productTypeId",
"message": f"Couldn't resolve to a product type: {product_type_id}",
}
]
def test_sort_attributes_within_product_type_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder an attribute not associated to the given product type."""
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
ATTRIBUTE_VALUES_RESORT_QUERY = """
mutation attributeReorderValues($attributeId: ID!, $moves: [ReorderInput]!) {
attributeReorderValues(attributeId: $attributeId, moves: $moves) {
attribute {
id
values {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_values_within_attribute_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid attribute (invalid ID)."""
attribute_id = graphene.Node.to_global_id("Attribute", -1)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "attributeId",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
def test_sort_values_within_attribute_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder a value not associated to the given attribute."""
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"type": "VARIANT",
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute value: {value_id}",
}
]
ATTRIBUTES_FILTER_QUERY = """
query($filters: AttributeFilterInput!) {
attributes(first: 10, filter: $filters) {
edges {
node {
name
slug
}
}
}
}
"""
ATTRIBUTES_SORT_QUERY = """
query($sortBy: AttributeSortingInput) {
attributes(first: 10, sortBy: $sortBy) {
edges {
node {
slug
}
}
}
}
"""
def test_sort_attributes_by_default_sorting(api_client):
"""Don't provide any sorting, this should sort by name by default."""
Attribute.objects.bulk_create(
[Attribute(name="A", slug="b"), Attribute(name="B", slug="a")]
)
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, {})
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "b"
assert attributes[1]["node"]["slug"] == "a"
| 31.598446 | 88 | 0.649504 |
5d1c26b574f8d1aa48c37371f029724022116688
| 3,557 |
py
|
Python
|
3-photos/1-chromakey/app.py
|
rafacm/aws-serverless-workshop-innovator-island
|
3f982ef6f70d28dfdc4e1d19103c181609b06b08
|
[
"MIT-0"
] | 1 |
2020-11-26T16:12:16.000Z
|
2020-11-26T16:12:16.000Z
|
3-photos/1-chromakey/app.py
|
rafacm/aws-serverless-workshop-innovator-island
|
3f982ef6f70d28dfdc4e1d19103c181609b06b08
|
[
"MIT-0"
] | null | null | null |
3-photos/1-chromakey/app.py
|
rafacm/aws-serverless-workshop-innovator-island
|
3f982ef6f70d28dfdc4e1d19103c181609b06b08
|
[
"MIT-0"
] | 1 |
2020-11-26T16:12:21.000Z
|
2020-11-26T16:12:21.000Z
|
import os
import json
import cv2
import logging
import boto3
import botocore
s3 = boto3.client('s3')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then same as file_name
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = s3
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except botocore.exceptions.ClientError as e:
logging.error(e)
return False
return True
| 30.663793 | 86 | 0.671633 |
5d1d2183e311c349b4e6a54b6abedc9e76fcc8d1
| 323 |
py
|
Python
|
metrics/overflow.py
|
DEKHTIARJonathan/pyinstrument
|
cc4f3f6fc1b493d7cd058ecf41ad012e0030a512
|
[
"BSD-3-Clause"
] | 1 |
2021-04-10T15:07:51.000Z
|
2021-04-10T15:07:51.000Z
|
metrics/overflow.py
|
DEKHTIARJonathan/pyinstrument
|
cc4f3f6fc1b493d7cd058ecf41ad012e0030a512
|
[
"BSD-3-Clause"
] | 1 |
2022-02-28T02:48:43.000Z
|
2022-02-28T02:48:43.000Z
|
metrics/overflow.py
|
DEKHTIARJonathan/pyinstrument
|
cc4f3f6fc1b493d7cd058ecf41ad012e0030a512
|
[
"BSD-3-Clause"
] | 1 |
2018-09-24T15:32:13.000Z
|
2018-09-24T15:32:13.000Z
|
from pyinstrument import Profiler
p = Profiler(use_signal=False)
p.start()
func(900)
p.stop()
print(p.output_text())
with open('overflow_out.html', 'w') as f:
f.write(p.output_html())
| 13.458333 | 41 | 0.597523 |
5d1d2acfb826681789b607d0aa918460c8853f38
| 12,995 |
py
|
Python
|
scripts/gen_tee_bin.py
|
wawang621/optee_os
|
bf7298044beca7a4501ece95c6146b5987cecaa4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/gen_tee_bin.py
|
wawang621/optee_os
|
bf7298044beca7a4501ece95c6146b5987cecaa4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/gen_tee_bin.py
|
wawang621/optee_os
|
bf7298044beca7a4501ece95c6146b5987cecaa4
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from __future__ import print_function
from __future__ import division
import argparse
import sys
import struct
import re
import hashlib
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.constants import SH_FLAGS
from elftools.elf.enums import ENUM_RELOC_TYPE_ARM
from elftools.elf.enums import ENUM_RELOC_TYPE_AARCH64
from elftools.elf.sections import SymbolTableSection
from elftools.elf.relocation import RelocationSection
except ImportError:
print("""
***
Can't find elftools module. Probably it is not installed on your system.
You can install this module with
$ apt install python3-pyelftools
if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in
your package manager if you are using some other distribution.
***
""")
raise
small_page_size = 4 * 1024
elffile_symbols = None
tee_pageable_bin = None
tee_pager_bin = None
tee_embdata_bin = None
if __name__ == "__main__":
main()
| 33.066158 | 79 | 0.604309 |
5d1d311ba4f1a92388fbc36107c0bf393d5b97bc
| 1,864 |
py
|
Python
|
CircuitPython_JEplayer_mp3/repeat.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665 |
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
CircuitPython_JEplayer_mp3/repeat.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641 |
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
CircuitPython_JEplayer_mp3/repeat.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734 |
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2020 Jeff Epler for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Make a key (button) repeat when held down
"""
import time
| 38.833333 | 79 | 0.694742 |
5d1d5be9e9e0382909fb3777ed89becc272c0e93
| 767 |
py
|
Python
|
Kapitel_1/_1_public_private.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | 1 |
2020-12-24T15:42:54.000Z
|
2020-12-24T15:42:54.000Z
|
Kapitel_1/_1_public_private.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
Kapitel_1/_1_public_private.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
# --- Klassendeklaration mit Konstruktor --- #
# --- Instanziierung einer Klasse ---#
# --- Ich bevorzuge die Initialisierung mit den Keywords --- #
pc_instanz = PC(cpu='Ryzen 7', gpu='RTX2070Super', ram='GSkill')
# --- Zugriff auf normale _public_ Attribute --- #
print(pc_instanz.cpu)
print(pc_instanz.gpu)
# --- Zugriff auf ein _privates_ Attribut --- #
# Auskommentiert, da es einen AttributeError schmeit.
# print(pc_instanz.__ram)
# --- Zugriff auf das Instanz-Dictionary, um die Inhalte jener Instanz zu erhalten. --- #
print(pc_instanz.__dict__)
# --- Zugriff auf das eigentlich _private_ Attribut. --- #
print(pc_instanz._PC__ram)
| 29.5 | 89 | 0.684485 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.