id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
3439381
|
<gh_stars>1-10
from decouple import config
MYEMS_SYSTEM_DB_HOST = config('MYEMS_SYSTEM_DB_HOST', default='127.0.0.1')
MYEMS_SYSTEM_DB_PORT = config('MYEMS_SYSTEM_DB_PORT', default=3306, cast=int)
MYEMS_SYSTEM_DB_DATABASE = config('MYEMS_SYSTEM_DB_DATABASE', default='myems_system_db')
MYEMS_SYSTEM_DB_USER = config('MYEMS_SYSTEM_DB_USER', default='root')
MYEMS_SYSTEM_DB_PASSWORD = config('MYEMS_SYSTEM_DB_PASSWORD', default='!<PASSWORD>')
MYEMS_HISTORICAL_DB_HOST = config('MYEMS_HISTORICAL_DB_HOST', default='127.0.0.1')
MYEMS_HISTORICAL_DB_PORT = config('MYEMS_HISTORICAL_DB_PORT', default=3306, cast=int)
MYEMS_HISTORICAL_DB_DATABASE = config('MYEMS_HISTORICAL_DB_DATABASE', default='myems_historical_db')
MYEMS_HISTORICAL_DB_USER = config('MYEMS_HISTORICAL_DB_USER', default='root')
MYEMS_HISTORICAL_DB_PASSWORD = config('MYEMS_HISTORICAL_DB_PASSWORD', default='!<PASSWORD>')
INTERVAL_IN_SECONDS = config('INTERVAL_IN_SECONDS', default=600, cast=int)
GATEWAY_ID = config('GATEWAY_ID', cast=int)
GATEWAY_TOKEN = config('GATEWAY_TOKEN')
myems_system_db = {
'host': MYEMS_SYSTEM_DB_HOST,
'port': MYEMS_SYSTEM_DB_PORT,
'database': MYEMS_SYSTEM_DB_DATABASE,
'user': MYEMS_SYSTEM_DB_USER,
'password': <PASSWORD>,
}
myems_historical_db = {
'host': MYEMS_HISTORICAL_DB_HOST,
'port': MYEMS_HISTORICAL_DB_PORT,
'database': MYEMS_HISTORICAL_DB_DATABASE,
'user': MYEMS_HISTORICAL_DB_USER,
'password': <PASSWORD>,
}
# Indicates how long the process waits between readings
interval_in_seconds = INTERVAL_IN_SECONDS
# Get the gateway ID and token from MyEMS Admin
# This is used for getting data sources associated with the gateway
gateway = {
'id': GATEWAY_ID,
'token': GATEWAY_TOKEN
}
|
StarcoderdataPython
|
1864199
|
<filename>kubernetes/kserve/kf_request_json/v2/bert/Transformer_kserve_handler.py
import torch
import logging
from Transformer_handler_generalized import (
TransformersSeqClassifierHandler,
captum_sequence_forward,
construct_input_ref,
summarize_attributions,
get_word_token,
)
import json
from captum.attr import LayerIntegratedGradients
logger = logging.getLogger(__name__)
# TODO Extend the example for token classification, question answering and batch inputs
class TransformersKserveHandler(TransformersSeqClassifierHandler):
def __init__(self):
super(TransformersKserveHandler, self).__init__()
def preprocess(self, requests):
"""Basic text preprocessing, based on the user's chocie of application mode.
Args:
requests (str): The Input data in the form of text is passed on to the preprocess
function.
Returns:
list : The preprocess function returns a list of Tensor for the size of the word tokens.
"""
input_ids_batch = None
attention_mask_batch = None
input_ids = None
attention_mask = None
for idx, data in enumerate(requests):
if (
all(k in data for k in ["name", "shape", "datatype", "data"])
and data["datatype"] != "BYTES"
):
logger.debug("Received data: ", data)
if data["name"] == "input_ids":
input_ids = torch.tensor(data["data"]).unsqueeze(dim=0).to(self.device)
elif data["name"] == "attention_masks":
attention_mask = torch.tensor(data["data"]).unsqueeze(dim=0).to(self.device)
else:
raise ValueError(
"{} {} {}".format(
"Unknown input:",
data["name"],
"Valid inputs are ['input_ids', 'attention_masks']",
)
)
input_ids_batch = input_ids
attention_mask_batch = attention_mask
else:
input_text = data.get("data")
if input_text is None:
input_text = data.get("body")
if isinstance(input_text, (bytes, bytearray)):
input_text = input_text.decode("utf-8")
input_text = json.loads(input_text)["text"]
max_length = self.setup_config["max_length"]
logger.info("Received text: '%s'", input_text)
inputs = self.tokenizer.encode_plus(
input_text,
max_length=int(max_length),
pad_to_max_length=True,
add_special_tokens=True,
return_tensors="pt",
)
input_ids = inputs["input_ids"].to(self.device)
attention_mask = inputs["attention_mask"].to(self.device)
# making a batch out of the recieved requests
# attention masks are passed for cases where input tokens are padded.
if input_ids.shape is not None:
if input_ids_batch is None:
input_ids_batch = input_ids
attention_mask_batch = attention_mask
else:
input_ids_batch = torch.cat((input_ids_batch, input_ids), 0)
attention_mask_batch = torch.cat((attention_mask_batch, attention_mask), 0)
return (input_ids_batch, attention_mask_batch)
def get_insights(self, input_batch, text, target):
"""This function initialize and calls the layer integrated gradient to get word importance
of the input text if captum explanation has been selected through setup_config
Args:
input_batch (int): Batches of tokens IDs of text
text (str): The Text specified in the input request
target (int): The Target can be set to any acceptable label under the user's discretion.
Returns:
(list): Returns a list of importances and words.
"""
data = json.loads(text)
text = data["text"]
target = data["target"]
if self.setup_config["captum_explanation"]:
embedding_layer = getattr(self.model, self.setup_config["embedding_name"])
embeddings = embedding_layer.embeddings
self.lig = LayerIntegratedGradients(captum_sequence_forward, embeddings)
else:
logger.warning("Captum Explanation is not chosen and will not be available")
self.target = target
input_ids, ref_input_ids, attention_mask = construct_input_ref(
text, self.tokenizer, self.device, self.setup_config["mode"]
)
all_tokens = get_word_token(input_ids, self.tokenizer)
response = {}
response["words"] = all_tokens
attributions, delta = self.lig.attribute(
inputs=input_ids,
baselines=ref_input_ids,
target=self.target,
additional_forward_args=(attention_mask, 0, self.model),
return_convergence_delta=True,
)
attributions_sum = summarize_attributions(attributions)
response["importances"] = attributions_sum.tolist()
response["delta"] = delta[0].tolist()
return [response]
|
StarcoderdataPython
|
33615
|
from models.lenet import *
from models.wresnet import *
import os
def select_model(dataset,
model_name,
pretrained=False,
pretrained_models_path=None):
if dataset in ['SVHN', 'CIFAR10', 'CINIC10', 'CIFAR100']:
n_classes = 100 if dataset == 'CIFAR100' else 10
assert model_name in ['LeNet', 'WRN-16-1', 'WRN-16-2', 'WRN-40-1', 'WRN-40-2']
if model_name=='LeNet':
model = LeNet32(n_classes=n_classes)
elif model_name=='WRN-16-1':
model = WideResNet(depth=16, num_classes=n_classes, widen_factor=1, dropRate=0.0)
elif model_name=='WRN-16-2':
model = WideResNet(depth=16, num_classes=n_classes, widen_factor=2, dropRate=0.0)
elif model_name=='WRN-40-1':
model = WideResNet(depth=40, num_classes=n_classes, widen_factor=1, dropRate=0.0)
elif model_name=='WRN-40-2':
model = WideResNet(depth=40, num_classes=n_classes, widen_factor=2, dropRate=0.0)
if pretrained:
model_path = os.path.join(pretrained_models_path, dataset, model_name, "last.pth.tar")
print('Loading Model from {}'.format(model_path))
checkpoint = torch.load(model_path, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
elif dataset=='ImageNet':
assert model_name in ['ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152']
if model_name == 'ResNet18':
model = resnet18(pretrained=pretrained)
elif model_name == 'ResNet34':
model = resnet34(pretrained=pretrained)
elif model_name == 'ResNet50':
model = resnet50(pretrained=pretrained)
elif model_name == 'ResNet101':
model = resnet101(pretrained=pretrained)
elif model_name == 'ResNet152':
model = resnet152(pretrained=pretrained)
else:
raise NotImplementedError
return model
if __name__ == '__main__':
import torch
from torchsummary import summary
import random
import time
random.seed(1234) # torch transforms use this seed
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
support_x_task = torch.autograd.Variable(torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1))
t0 = time.time()
model = select_model('CIFAR10', model_name='WRN-16-2')
output, act = model(support_x_task)
print("Time taken for forward pass: {} s".format(time.time() - t0))
print("\nOUTPUT SHAPE: ", output.shape)
summary(model, (3, 32, 32))
|
StarcoderdataPython
|
5165349
|
<gh_stars>100-1000
from rest_framework import serializers
from .models import Proposal, ProposalSection, ProposalType
class ProposalSerializer(serializers.HyperlinkedModelSerializer):
section = serializers.SerializerMethodField()
type = serializers.SerializerMethodField()
author = serializers.SerializerMethodField()
def get_section(self, proposal):
return proposal.proposal_section.name
def get_type(self, proposal):
return proposal.proposal_type.name
def get_author(self, proposal):
author = proposal.author
return (
"{} {}".format(author.first_name, author.last_name).strip()
or author.username
)
class Meta:
model = Proposal
fields = (
"title",
"section",
"type",
"author",
"slug",
"description",
"target_audience",
"prerequisites",
"content_urls",
"speaker_info",
"speaker_links",
)
class ProposalFilterSerializer(serializers.Serializer):
proposal_section = serializers.PrimaryKeyRelatedField(
queryset=ProposalSection.objects.all(), required=False
)
proposal_type = serializers.PrimaryKeyRelatedField(
queryset=ProposalType.objects.all(), required=False
)
|
StarcoderdataPython
|
5031581
|
<reponame>joeltio/np-train
from django.db import models
class Order(models.Model):
# A train location, e.g. Bishan
destination = models.TextField()
# PositiveSmallIntegerField accepts [0, 32767]
color = models.PositiveSmallIntegerField()
# SmallIntegerField accepts [-32768, 32767]
status = models.SmallIntegerField()
# The status values should be the same order as the flow and starting
# from 0
STATUS_NOT_ACTIVE = 0
STATUS_ACTIVE = 1
STATUS_COMPLETED = 2
STATUS_FLOW = [STATUS_NOT_ACTIVE, STATUS_ACTIVE, STATUS_COMPLETED]
def as_json(self):
return {
"id": self.id,
"destination": self.destination,
"color": self.color,
"status": self.status,
}
|
StarcoderdataPython
|
1830357
|
def bold(text):
return f'<b>{text}</b>'
def italicize(text):
return f'<i>{text}</i>'
def normalize(text):
return text.replace('_', ' ').title()
def capitalize(text):
return ' '.join(list(map(lambda t: t.capitalize(), text.split())))
|
StarcoderdataPython
|
1822050
|
<reponame>aiforrural/Digital-Events-Example
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
# TODO: Move this whole package into a standalone pypi package, since it's
# useful in general for anyoen who wants to send emails (without using django)
# The code in here is taken almost verbatim from `django.core.mail`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/blob/stable/2.2.x/django/core/mail/__init__.py
# Credits of the original code go to the Django Software Foundation
# and their contributors.
"""
Tools for sending email.
"""
from flask import current_app
from .backends.base import BaseEmailBackend
from .module_loading_utils import import_string
__all__ = ['get_connection']
def get_connection(backend=None, fail_silently=False, **kwds) -> BaseEmailBackend:
"""Load an email backend and return an instance of it.
If backend is None (default), use ``EMAIL_BACKEND`` from config.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = import_string(backend or current_app.config['EMAIL_BACKEND'])
return klass(fail_silently=fail_silently, **kwds)
|
StarcoderdataPython
|
9607358
|
<reponame>bidhata/EquationGroupLeaks
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
MSG_KEY_RESULT_STATUS = 196608
MSG_KEY_RESULT_STATUS_TYPE = 196609
MSG_KEY_RESULT_STATUS_DATA = 196610
MSG_KEY_RESULT_CONNECT = 262144
MSG_KEY_RESULT_CONNECT_CONNECTION_TYPE = 262145
MSG_KEY_RESULT_CONNECT_BAUDRATE = 262148
MSG_KEY_RESULT_CONNECT_DATA_BITS = 262149
MSG_KEY_RESULT_CONNECT_PARITY = 262150
MSG_KEY_RESULT_CONNECT_STOP_BITS = 262151
MSG_KEY_RESULT_CONNECT_COMM_STATE = 262152
MSG_KEY_RESULT_CONNECT_INDEX = 262154
class StatusResult:
def __init__(self):
self.__dict__['statusType'] = 0
self.__dict__['data'] = ''
def __getattr__(self, name):
if name == 'statusType':
return self.__dict__['statusType']
if name == 'data':
return self.__dict__['data']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'statusType':
self.__dict__['statusType'] = value
elif name == 'data':
self.__dict__['data'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU8(MSG_KEY_RESULT_STATUS_TYPE, self.__dict__['statusType'])
submsg.AddStringUtf8(MSG_KEY_RESULT_STATUS_DATA, self.__dict__['data'])
mmsg.AddMessage(MSG_KEY_RESULT_STATUS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_STATUS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['statusType'] = submsg.FindU8(MSG_KEY_RESULT_STATUS_TYPE)
self.__dict__['data'] = submsg.FindString(MSG_KEY_RESULT_STATUS_DATA)
class ConnectResult:
def __init__(self):
self.__dict__['connectionType'] = 0
self.__dict__['commStateRetrieved'] = False
self.__dict__['baudrate'] = 0
self.__dict__['dataBits'] = 0
self.__dict__['parity'] = 0
self.__dict__['stopBits'] = 0
self.__dict__['index'] = 0
def __getattr__(self, name):
if name == 'connectionType':
return self.__dict__['connectionType']
if name == 'commStateRetrieved':
return self.__dict__['commStateRetrieved']
if name == 'baudrate':
return self.__dict__['baudrate']
if name == 'dataBits':
return self.__dict__['dataBits']
if name == 'parity':
return self.__dict__['parity']
if name == 'stopBits':
return self.__dict__['stopBits']
if name == 'index':
return self.__dict__['index']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'connectionType':
self.__dict__['connectionType'] = value
elif name == 'commStateRetrieved':
self.__dict__['commStateRetrieved'] = value
elif name == 'baudrate':
self.__dict__['baudrate'] = value
elif name == 'dataBits':
self.__dict__['dataBits'] = value
elif name == 'parity':
self.__dict__['parity'] = value
elif name == 'stopBits':
self.__dict__['stopBits'] = value
elif name == 'index':
self.__dict__['index'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU8(MSG_KEY_RESULT_CONNECT_CONNECTION_TYPE, self.__dict__['connectionType'])
submsg.AddBool(MSG_KEY_RESULT_CONNECT_COMM_STATE, self.__dict__['commStateRetrieved'])
submsg.AddU32(MSG_KEY_RESULT_CONNECT_BAUDRATE, self.__dict__['baudrate'])
submsg.AddU8(MSG_KEY_RESULT_CONNECT_DATA_BITS, self.__dict__['dataBits'])
submsg.AddU8(MSG_KEY_RESULT_CONNECT_PARITY, self.__dict__['parity'])
submsg.AddU8(MSG_KEY_RESULT_CONNECT_STOP_BITS, self.__dict__['stopBits'])
submsg.AddU32(MSG_KEY_RESULT_CONNECT_INDEX, self.__dict__['index'])
mmsg.AddMessage(MSG_KEY_RESULT_CONNECT, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_CONNECT, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['connectionType'] = submsg.FindU8(MSG_KEY_RESULT_CONNECT_CONNECTION_TYPE)
self.__dict__['commStateRetrieved'] = submsg.FindBool(MSG_KEY_RESULT_CONNECT_COMM_STATE)
self.__dict__['baudrate'] = submsg.FindU32(MSG_KEY_RESULT_CONNECT_BAUDRATE)
self.__dict__['dataBits'] = submsg.FindU8(MSG_KEY_RESULT_CONNECT_DATA_BITS)
self.__dict__['parity'] = submsg.FindU8(MSG_KEY_RESULT_CONNECT_PARITY)
self.__dict__['stopBits'] = submsg.FindU8(MSG_KEY_RESULT_CONNECT_STOP_BITS)
self.__dict__['index'] = submsg.FindU32(MSG_KEY_RESULT_CONNECT_INDEX)
|
StarcoderdataPython
|
5099205
|
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from konkourse import settings
from change_email.models import EmailChange
class EmailNotUsedValidator(object):
"""
A validator to check if a given email address is already taken.
"""
code = "email_in_use"
msg = _("This email address is already in use."
" Please supply a different email address.")
def __call__(self, value):
UserModel = get_user_model()
key = '%s__iexact' % settings.EMAIL_CHANGE_FIELD
kwargs = {key: value}
if settings.EMAIL_CHANGE_VALIDATE_SITE:
site = Site.objects.get_current()
kwargs['site'] = site
if UserModel.objects.filter(**kwargs).count():
raise ValidationError(self.msg, code=self.code)
return
del kwargs[key]
kwargs['new_email__iexact'] = value
if EmailChange.objects.filter(**kwargs).count():
raise ValidationError(self.msg, code=self.code)
validate_email_not_used = EmailNotUsedValidator()
|
StarcoderdataPython
|
4977135
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 17:18:27 2021
@author: <NAME>
"""
from .specification import Specification
import sympy as sp
class RWS(Specification):
"""Reach-while-stay specification.
Represents the RWS specification and implements the corresponding
fitness function and verification. Subclass of Specification.
Arguments
---------
variables: tuple of symbolic state variables.
Example: sympy.symbols('x1, x2')
inputs: tuple of symbolic input variables.
Example: sympy.symbols('u1,')
f_sym (sympy expression): Symbolic expression of the system dynamics
options (dictionary): dictionary with all the settings relating to the
specification.
Required options
----------------
S_list, I_list, O_list: a list of the lower and upperbounds of the
safe set, initial set and goal set
Optional
--------
number_samples: Number of samples. Default 100
rdelta: Inflation of the boundary. Default: 0.01
gamma: (Arbitrary) decrease of the LF. Default :0.01,
c: (Arbitrary) nonnegative parameter (see manual).
Default: 0.01
"""
def __init__(self, options):
# Call the __init__ function of the Spec parent class first.
number_conditions = 3 # number of RWS conditions
Specification.__init__(self, options, number_conditions)
S_list = self.options["S_list"]
I_list = self.options["I_list"]
O_list = self.options["O_list"]
# decrease of the LBF. Default 0.01
self.gamma = self.options.get("gamma", 0.01)
# Create an inflated safe set to create a conservative boundary set
r_delta = self.options.get("r_delta", 0.01) # Default =0.01
R_list = [
[S_list[i][0] - r_delta, S_list[i][1] + r_delta]
for i in range(0, len(S_list))
]
# Create sample sets
I_data = self.sample_set(I_list)
dS_data = self.sample_set_complement(R_list, S_list)
S_not_O_data = self.sample_set_complement(S_list, O_list)
self.add_data_sets([I_data, dS_data, S_not_O_data])
# Create symbolic domains for SMT solver
S_set = self.create_symbolic_interval(self.var, S_list)
R_set = self.create_symbolic_interval(self.var, R_list)
I_set = self.create_symbolic_interval(self.var, I_list)
O_set = self.create_symbolic_interval(self.var, O_list)
S_open_set = self.create_symbolic_interval(self.var, S_list,
open_set=True)
S_not_O_set = sp.And(S_set, sp.Not(O_set))
closed_R_not_S_set = sp.And(R_set, sp.Not(S_open_set))
self.add_condition_sets((I_set, closed_R_not_S_set, S_not_O_set))
# TODO: this list containts n copies! change
self.verification_result = [None] * self._number_conditions
def create_conditions(self, solution):
"""Create the conditions to be verified with an SMT solver."""
# create the conditions to verify
con1 = solution.V_sym <= 0
con2 = solution.V_sym > 0
con3 = sp.Or(solution.V_sym > 0, solution.dtV_sym <= -self.gamma)
return (con1, con2, con3)
|
StarcoderdataPython
|
8057696
|
<filename>osr2mp4/ImageProcess/Objects/Components/Playfield.py
from PIL import Image
FORMAT = ".png"
class Playfield:
def __init__(self, filename, width, height):
self.img = Image.open(filename + FORMAT)
self.img.resize(width, height)
def add_to_frame(self, background):
# y1, y2 = 0, background.shape[0]
# x1, x2 = 0, background.shape[1]
#
# alpha_s = self.img[:, :, 3] / 255.0
# alpha_l = 1.0 - alpha_s
#
# for c in range(0, 3):
# background[y1:y2, x1:x2, c] = (alpha_s * self.img[:, :, c] + alpha_l * background[y1:y2, x1:x2, c])
#
##TODO
pass
|
StarcoderdataPython
|
6682890
|
<reponame>wep21/jetson-containers
print('testing numba...')
import math
import numba
from numba import vectorize, cuda
import numpy as np
print('numba version: ' + str(numba.__version__))
print('testing cuda ufunc...')
@vectorize(['float32(float32, float32, float32)',
'float64(float64, float64, float64)'],
target='cuda')
def cu_discriminant(a, b, c):
return math.sqrt(b ** 2 - 4 * a * c)
N = 10000
dtype = np.float32
# prepare the input
A = np.array(np.random.sample(N), dtype=dtype)
B = np.array(np.random.sample(N) + 10, dtype=dtype)
C = np.array(np.random.sample(N), dtype=dtype)
D = cu_discriminant(A, B, C)
print('cuda ufunc result:')
print(D) # print result
print('numba OK\n')
|
StarcoderdataPython
|
3355885
|
#!/usr/bin/env python
# coding=utf-8
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Condition
condition = Condition()
@gen.coroutine
def waiter():
print 'wait here'
yield condition.wait()
print 'done waiting'
@gen.coroutine
def notifier():
print 'ready notify'
condition.notify()
print 'done notify'
@gen.coroutine
def runner():
yield [waiter(), notifier()]
if __name__ == '__main__':
IOLoop.current().run_sync(runner)
|
StarcoderdataPython
|
9645931
|
<gh_stars>0
# -*- coding: utf-8 -*-
import base64
import os
import random
import string
import requests
from kubernetes import client, config
from src import KIBANA_HOST, KIBANA_USERNAME, KIBANA_PASSWORD, LOAD_KUBECONFIG, LOAD_INCLUSTER_CONFIG, \
ELASTICSEARCH_HOST, DOMAIN, ES_VERSION
from src.loggings.logger import logger
KIBANA_AUTH = "{}:{}".format(KIBANA_USERNAME, KIBANA_PASSWORD)
message_bytes = KIBANA_AUTH.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
KIBANA_AUTH = base64_bytes.decode('ascii')
log = logger(__name__)
exclude_namespaces_set = set()
if 'EXCLUDE_NAMESPACES' in os.environ and os.environ["EXCLUDE_NAMESPACES"] != "":
EXCLUDE_NAMESPACES = os.environ['EXCLUDE_NAMESPACES']
exclude_namespaces_set = set(EXCLUDE_NAMESPACES.split(","))
def get_namespaces():
if LOAD_KUBECONFIG == "1":
config.load_kube_config()
if LOAD_INCLUSTER_CONFIG == "1":
config.load_incluster_config()
v1 = client.CoreV1Api()
namespaces = v1.list_namespace(watch=False)
return namespaces
def create_index_patterns(namespace):
log.info("Creating index pattern: {}".format(namespace))
url = "{}/api/saved_objects/index-pattern/logstash-{}-*".format(KIBANA_HOST, namespace)
payload = {
"attributes": {
"title": "logstash-{}-*".format(namespace),
"timeFieldName": "@timestamp"
}
}
headers = {
'kbn-xsrf': 'anything',
'Authorization': 'Basic {}'.format(KIBANA_AUTH),
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, json=payload, timeout=60)
log.info("", extra={"props": {"response": response.text}})
def is_index_pattern_exists(namespace):
url = "{}/api/saved_objects/index-pattern/logstash-{}-*".format(KIBANA_HOST, namespace)
headers = {
'kbn-xsrf': 'anything',
'Authorization': 'Basic {}'.format(KIBANA_AUTH)
}
response = requests.request("GET", url, headers=headers, timeout=60)
if response.status_code == 404:
return False
return True
def create_space(namespace):
url = "{}/api/spaces/space".format(KIBANA_HOST)
payload = {
"id": namespace,
"name": namespace,
"disabledFeatures": [
"timelion",
"dev_tools",
"enterpriseSearch",
"logs",
"siem",
"advancedSettings",
"monitoring",
"stackAlerts",
"actions",
"ingestManager",
"ml",
"infrastructure",
"apm",
"uptime"
]
}
headers = {
'kbn-xsrf': 'anything',
'Authorization': 'Basic {}'.format(KIBANA_AUTH)
}
response = requests.request("POST", url, headers=headers, json=payload, timeout=60, verify=False)
log.info("", extra={"props": {"response": response.text}})
def create_role(namespace):
url = "{}/api/security/role/{}".format(KIBANA_HOST, namespace)
payload = {
"metadata": {
"version": 1
},
"elasticsearch": {
"cluster": [],
"indices": [
{
"names": [
"logstash-{}-*".format(namespace)
],
"privileges": [
"read"
]
}
]
},
"kibana": [
{
"base": [],
"feature": {
"dashboard": [
"all"
],
"discover": [
"all"
],
"canvas": [
"all"
],
"maps": [
"all"
],
"visualize": [
"all"
],
"savedObjectsManagement": [
"read"
]
},
"spaces": [
namespace
]
}
]
}
headers = {
'kbn-xsrf': 'anything',
'Authorization': 'Basic {}'.format(KIBANA_AUTH)
}
response = requests.request("PUT", url, headers=headers, json=payload, timeout=60, verify=False)
log.info("", extra={"props": {"response": response.text}})
def create_user(namespace):
url = "{}/_security/user/{}".format(ELASTICSEARCH_HOST, namespace)
password = <PASSWORD>()
payload = {
"password": password,
"roles": [namespace],
"full_name": namespace,
"email": <EMAIL>(namespace, DOMAIN),
"metadata": {}
}
headers = {
'kbn-xsrf': 'anything',
'Authorization': 'Basic {}'.format(KIBANA_AUTH)
}
response = requests.request("POST", url, headers=headers, json=payload, timeout=60, verify=False)
log.info("", extra={"props": {"response": response.text}})
log.info("", extra={"props": {"username": namespace, "password": password}})
def create_space_index_pattern(namespace):
url = "{}/s/{}/api/saved_objects/index-pattern/logstash-{}-*".format(KIBANA_HOST, namespace, namespace)
payload = {
"attributes": {
"title": "logstash-{}-*".format(namespace),
"timeFieldName": "@timestamp"
}
}
headers = {
'kbn-xsrf': 'anything',
'Authorization': 'Basic {}'.format(KIBANA_AUTH)
}
response = requests.request("POST", url, headers=headers, json=payload, timeout=60, verify=False)
log.info("", extra={"props": {"response": response.text}})
def random_string(size=20):
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def config_space_default_index_pattern(namespace):
url = "{}/s/{}/spaces/enter".format(KIBANA_HOST, namespace)
headers = {
'kbn-xsrf': 'anything',
'Authorization': 'Basic {}'.format(KIBANA_AUTH)
}
requests.request("GET", url, headers=headers, timeout=60, verify=False)
payload = {
"attributes": {
"defaultIndex": "logstash-{}-*".format(namespace)
}
}
url = "{}/s/{}/api/saved_objects/config/{}".format(KIBANA_HOST, namespace, ES_VERSION)
response = requests.request("PUT", url, headers=headers, json=payload, timeout=60, verify=False)
log.info("", extra={"props": {"response": response.text}})
def job():
namespaces = get_namespaces()
for item in namespaces.items:
namespace = item.metadata.name
index_pattern_exists = is_index_pattern_exists(namespace)
if index_pattern_exists is False:
create_index_patterns(namespace)
if namespace in exclude_namespaces_set:
continue
# create_space(namespace)
# create_role(namespace)
# create_user(namespace)
# create_space_index_pattern(namespace)
# config_space_default_index_pattern(namespace)
|
StarcoderdataPython
|
5059838
|
<gh_stars>1-10
import numpy as np
from text import colour_text
import sympy
def getLinearlyIndependentCoeffs(expr):
def getCoefficient(e):
return e.as_independent(*e.free_symbols, as_Add=False)
if type(expr) == sympy.Add:
result = []
for term in expr.as_terms()[0]:
result.append(getCoefficient(term[0]))
return result
else:
return [getCoefficient(expr)]
def dot(*args):
a = args[0]
for i in xrange(1, len(args)):
a = a.dot(args[i])
return a
def tensor(*args):
a = args[0]
for i in xrange(1, len(args)):
a = np.kron(a, args[i])
return a
def struct_allclose(a, b, rtol=1e-05, atol=1e-08):
if set(a.dtype.names) != set(a.dtype.names):
return False
for name in a.dtype.names:
if not np.allclose(a[name], b[name], rtol=rtol, atol=atol):
return False
return True
|
StarcoderdataPython
|
8155296
|
<reponame>frommwonderland/pytorch_connectomics
from typing import Optional, List
import numpy as np
import torch
import torch.utils.data
from .dataset_volume import VolumeDataset
from ..augmentation import Compose
from ..utils import *
TARGET_OPT_TYPE = List[str]
WEIGHT_OPT_TYPE = List[List[str]]
AUGMENTOR_TYPE = Optional[Compose]
class PairDataset(VolumeDataset):
r""" This Dataloader will prepare sample that are pairs for feeding the contrastive
learning algorithm.
Inherits all the attributes and functions from parent VolumeDataset.
"""
def __init__(self,
volume: list,
label: Optional[list] = None,
valid_mask: Optional[list] = None,
valid_ratio: float = 0.5,
sample_volume_size: tuple = (8, 64, 64),
sample_label_size: tuple = (8, 64, 64),
sample_stride: tuple = (1, 1, 1),
augmentor: AUGMENTOR_TYPE = None,
target_opt: TARGET_OPT_TYPE = ['1'],
weight_opt: WEIGHT_OPT_TYPE = [['1']],
erosion_rates: Optional[List[int]] = None,
dilation_rates: Optional[List[int]] = None,
mode: str = 'train',
do_2d: bool = False,
iter_num: int = -1,
reject_size_thres: int = 0,
reject_diversity: int = 0,
reject_p: float = 0.95,
data_mean=0.5,
data_std=0.5):
super().__init__(volume, label, valid_mask, valid_ratio, sample_volume_size, sample_label_size, sample_stride,
augmentor, target_opt, weight_opt, erosion_rates, dilation_rates, mode, do_2d, iter_num)
self.num_augmented_images = 2
def __len__(self):
pass
def __getitem__(self, idx):
if self.mode == 'train':
sample_pair = self._create_sample_pair()
return sample_pair
def _create_sample_pair(self):
r"""Create a sample pair that will be used for contrastive learning.
"""
sample_pair = list()
sample = self._random_sampling(self.sample_volume_size)
pos, out_volume, out_label, out_valid = sample
out_volume = self._create_masked_input(out_volume, out_label)
data = {'image': out_volume}
for i in range(self.num_augmented_images):
augmented = self.augmentor(data)
sample_pair.append(augmented['image'])
return sample_pair
def _create_masked_input(self, vol: np.ndarray, label: np.ndarray) -> np.ndarray:
r"""Create masked input volume, that is pure EM where the mask is not 0. Otherwise all
values set to 0. Returns the prepared mask.
Args:
vol (numpy.ndarray): volume that is EM input.
label (numpy.ndarray): associated label volume.
"""
vol[np.where(label == 0)] = 0
return vol
|
StarcoderdataPython
|
8114299
|
<reponame>EdwaRen/Competitve-Programming
class Solution:
def spiralOrder(self, matrix):
res = []
m = len(matrix)
if m == 0:
return res
n = len(matrix[0])
r1 = 0
r2 = n-1
c1 = 0
c2 = m-1
i = 0
j = 0
while len(res) < m * n and r2 >=0 and c2 >=0:
i = r1
while i <= r2:
res.append(matrix[c1][i])
i+=1
c1+=1
j = c1
if len(res) < m * n:
while j <= c2:
res.append(matrix[j][r2])
j+=1
r2-=1
if len(res) < m * n:
i = r2
while i >= r1:
res.append(matrix[c2][i])
i-=1
c2-=1
if len(res) < m * n:
j = c2
while j >= c1:
res.append(matrix[j][r1])
j-=1
r1+=1
return res
a = Solution()
matrix = [
[1]
]
print(a.spiralOrder(matrix))
|
StarcoderdataPython
|
6421029
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
import timeit
def inv_mat(n):
'''Inverse une matrice carrée aléatoire de taille n'''
np.random.seed(0) # O(1)
matrice = np.random.rand(n, n) # O(n^2)
return np.linalg.inv(matrice) # O(n^3) ?
def main():
# Différentes tailles n, augmentation exponentielle
valeurs_n = [3 * 2**e for e in range(6, 12)]
les_temps = [] # Pour le cumul des temps de calcul
# Pour chaque valeur de taille n
for n in valeurs_n:
# Réduire le nombre d'appels selon n
nb_appels = int(2 * max(valeurs_n) / n)
print(f'Appel de inv_mat({n}) {nb_appels} fois ...')
# Mesurer nb_appels fois le temps de calcul de inv_mat(n)
# Sauvegarder le temps d'un seul calcul
temps = timeit.timeit(lambda: inv_mat(n), number=nb_appels)
les_temps.append(temps / nb_appels)
resultats = pd.DataFrame({'temps': les_temps, 'n': valeurs_n})
resultats.to_csv('temps_inv.csv', index=False)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9679858
|
<gh_stars>10-100
import numpy as np
import os
import shutil
import yaml
from typing import Union, List, Dict
from rich.console import Console
from .utils import (
write_to_hdf5,
load_config,
print_welcome,
print_startup,
print_update,
print_reload,
print_storage,
)
from .save import StatsLog, TboardLog, ModelLog, FigureLog, ExtraLog
class MLELogger(object):
"""
Logging object for Machine Learning experiments
Args:
======= TRACKING AND PRINTING VARIABLE NAMES
time_to_track (List[str]): column names of pandas df - time
what_to_track (List[str]): column names of pandas df - statistics
time_to_print (List[str]): subset columns of time df to print out
what_to_print (List[str]): subset columns of stats df to print out
======= TRACKING AND PRINTING VARIABLE NAMES
config_fname (str): file path of configuration of experiment
config_dict(dict): dictionary of experiment config to store in yaml
experiment_dir (str): base experiment directory
seed_id (str): seed id to distinguish logs with (e.g. seed_0)
overwrite (bool): delete old log file/tboard dir
======= VERBOSITY/TBOARD LOGGING
use_tboard (bool): whether to log to tensorboard
log_every_j_steps (int): steps between log updates
print_every_k_updates (int): after how many log updates - verbose
======= MODEL STORAGE
model_type (str): ["torch", "jax", "sklearn", "numpy"]
ckpt_time_to_track (str): Variable name/score key to save
save_every_k_ckpt (int): save every other checkpoint
save_top_k_ckpt (int): save top k performing checkpoints
top_k_metric_name (str): Variable name/score key to save
top_k_minimize_metric (str): Boolean for min/max score in top k logging
"""
def __init__(
self,
experiment_dir: str = "/",
time_to_track: List[str] = [],
what_to_track: List[str] = [],
time_to_print: Union[List[str], None] = None,
what_to_print: Union[List[str], None] = None,
config_fname: Union[str, None] = None,
config_dict: Union[dict, None] = None,
seed_id: Union[str, int] = "no_seed_provided",
overwrite: bool = False,
use_tboard: bool = False,
log_every_j_steps: Union[int, None] = None,
print_every_k_updates: Union[int, None] = 1,
model_type: str = "no-model-type",
ckpt_time_to_track: Union[str, None] = None,
save_every_k_ckpt: Union[int, None] = None,
save_top_k_ckpt: Union[int, None] = None,
top_k_metric_name: Union[str, None] = None,
top_k_minimize_metric: Union[bool, None] = None,
reload: bool = False,
verbose: bool = False,
):
# Set up tensorboard when/where to log and when to print
self.use_tboard = use_tboard
self.log_every_j_steps = log_every_j_steps
self.print_every_k_updates = print_every_k_updates
self.log_save_counter = reload
self.log_setup_counter = reload
self.seed_id = (
"seed_" + str(seed_id) if type(seed_id) == int else seed_id
)
self.config_fname = config_fname
self.config_dict = config_dict
# Set up the logging directories - copy timestamped config file
self.setup_experiment(
experiment_dir,
config_fname,
self.seed_id,
overwrite,
reload,
)
# STATS & TENSORBOARD LOGGING SETUP
self.stats_log = StatsLog(
self.experiment_dir,
self.seed_id,
time_to_track,
what_to_track,
reload,
)
if self.use_tboard:
self.tboard_log = TboardLog(
self.experiment_dir,
self.seed_id,
)
# MODEL, FIGURE & EXTRA LOGGING SETUP
self.model_log = ModelLog(
self.experiment_dir,
self.seed_id,
model_type,
ckpt_time_to_track,
save_every_k_ckpt,
save_top_k_ckpt,
top_k_metric_name,
top_k_minimize_metric,
reload,
)
self.figure_log = FigureLog(
self.experiment_dir,
self.seed_id,
reload,
)
self.extra_log = ExtraLog(
self.experiment_dir,
self.seed_id,
reload,
)
# VERBOSITY SETUP: Set up what to print
self.verbose = verbose
self.print_counter = 0
self.time_to_print = time_to_print
self.what_to_print = what_to_print
if not reload and verbose:
print_welcome()
print_startup(
self.experiment_dir,
self.config_fname,
time_to_track,
what_to_track,
model_type,
seed_id,
use_tboard,
reload,
print_every_k_updates,
ckpt_time_to_track,
save_every_k_ckpt,
save_top_k_ckpt,
top_k_metric_name,
top_k_minimize_metric,
)
elif reload and verbose:
print_reload(
self.experiment_dir,
)
def setup_experiment( # noqa: C901
self,
base_exp_dir: str,
config_fname: Union[str, None],
seed_id: str,
overwrite_experiment_dir: bool = False,
reload: bool = False,
) -> None:
"""Setup directory name and clean up previous logging data."""
# Get timestamp of experiment & create new directories
if config_fname is not None:
self.base_str = os.path.split(config_fname)[1].split(".")[0]
if not reload:
self.experiment_dir = os.path.join(base_exp_dir, self.base_str)
else:
# Don't redefine experiment directory but get already existing
exp_dir = [
f
for f in os.listdir(base_exp_dir)
if f.endswith(self.base_str)
][0]
self.experiment_dir = os.path.join(base_exp_dir, exp_dir)
else:
self.base_str = ""
self.experiment_dir = base_exp_dir
self.log_save_fname = os.path.join(
self.experiment_dir, "logs/", "log_" + seed_id + ".hdf5"
)
aggregated_log_save_fname = os.path.join(
self.experiment_dir, "logs/", "log.hdf5"
)
# Delete old experiment logging directory
if overwrite_experiment_dir and not reload:
if os.path.exists(self.log_save_fname):
Console().log(
"Be careful - you are overwriting an existing log."
)
os.remove(self.log_save_fname)
if os.path.exists(aggregated_log_save_fname):
Console().log(
"Be careful - you are overwriting an existing aggregated"
" log."
)
os.remove(aggregated_log_save_fname)
if self.use_tboard:
Console().log(
"Be careful - you are overwriting existing tboards."
)
if os.path.exists(
os.path.join(self.experiment_dir, "tboards/")
):
shutil.rmtree(os.path.join(self.experiment_dir, "tboards/"))
def create_logging_dir(
self,
config_fname: Union[str, None],
config_dict: Union[dict, None],
):
"""Create new empty dir for experiment (if not existing)."""
os.makedirs(self.experiment_dir, exist_ok=True)
# Copy over json configuration file if it exists
if config_fname is not None:
fname, fext = os.path.splitext(config_fname)
else:
fext = ".yaml"
if config_fname is not None:
config_copy = os.path.join(
self.experiment_dir, self.base_str + fext
)
shutil.copy(config_fname, config_copy)
self.config_copy = config_copy
self.config_dict = load_config(config_fname)
elif config_dict is not None:
config_copy = os.path.join(
self.experiment_dir, "config_dict" + fext
)
with open(config_copy, "w") as outfile:
yaml.dump(config_dict, outfile, default_flow_style=False)
self.config_copy = config_copy
self.config_dict = config_dict
else:
self.config_copy = "config-not-provided"
self.config_dict = {}
# Create .hdf5 logging sub-directory
os.makedirs(os.path.join(self.experiment_dir, "logs/"), exist_ok=True)
def update(
self,
clock_tick: Dict[str, int],
stats_tick: Dict[str, float],
model=None,
plot_fig=None,
extra_obj=None,
save=False,
) -> None:
"""Update with the newest tick of performance stats, net weights"""
# Make sure that timeseries data consists of floats
stats_tick = {
key: float(value) if type(value) != np.ndarray else value
for (key, value) in stats_tick.items()
}
# Update the stats log with newest timeseries data
c_tick, s_tick = self.stats_log.update(clock_tick, stats_tick)
# Update the tensorboard log with the newest event
if self.use_tboard:
self.tboard_log.update(
self.stats_log.time_to_track,
clock_tick,
stats_tick,
model,
plot_fig,
)
# Save the most recent model checkpoint
if model is not None:
self.save_model(model)
# Save fig from matplotlib
if plot_fig is not None:
self.save_plot(plot_fig)
# Save .pkl object
if extra_obj is not None:
self.save_extra(extra_obj)
# Save the .hdf5 log if boolean says so
if save:
self.save()
# Print the most current results
if self.verbose and self.print_every_k_updates is not None:
if (
self.stats_log.stats_update_counter % self.print_every_k_updates
== 0
):
# Print storage paths generated/updated
print_storage(
fig_path=(
self.figure_log.fig_storage_paths[-1]
if plot_fig is not None
else None
),
extra_path=(
self.extra_log.extra_storage_paths[-1]
if extra_obj is not None
else None
),
init_model_path=(
self.model_log.init_model_save_fname
if model is not None and self.model_log.init_model_saved
else None
),
final_model_path=(
self.model_log.final_model_save_fname
if model is not None
else None
),
every_k_model_path=(
self.model_log.every_k_ckpt_list[-1]
if model is not None and self.model_log.stored_every_k
else None
),
top_k_model_path=(
self.model_log.top_k_ckpt_list[-1]
if model is not None and self.model_log.stored_top_k
else None
),
print_first=self.print_counter == 0,
)
# Only print column name header at 1st print!
if self.time_to_print is None:
time_to_p = self.stats_log.time_to_track
else:
time_to_p = ["time", "time_elapsed", "num_updates"]
if self.what_to_print is None:
what_to_p = self.stats_log.what_to_track
else:
what_to_p = []
print_update(
time_to_p,
what_to_p,
c_tick,
s_tick,
self.print_counter == 0,
)
self.print_counter += 1
def save_init_model(self, model):
"""Save initial model checkpoint."""
self.model_log.save_init_model(model)
def save_model(self, model):
"""Save a model checkpoint."""
self.model_log.save(
model, self.stats_log.clock_tracked, self.stats_log.stats_tracked
)
def save_plot(self, fig, fig_fname: Union[str, None] = None):
"""Store a figure in a experiment_id/figures directory."""
# Create main logging dir and .hdf5 sub-directory
if not self.log_setup_counter:
self.create_logging_dir(self.config_fname, self.config_dict)
self.log_setup_counter += 1
self.figure_log.save(fig, fig_fname)
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/meta/fig_storage_paths",
self.figure_log.fig_storage_paths,
)
def save_extra(self, obj, obj_fname: Union[str, None] = None):
"""Helper fct. to save object (dict/etc.) as .pkl in exp. subdir."""
# Create main logging dir and .hdf5 sub-directory
if not self.log_setup_counter:
self.create_logging_dir(self.config_fname, self.config_dict)
self.log_setup_counter += 1
self.extra_log.save(obj, obj_fname)
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/meta/extra_storage_paths",
self.extra_log.extra_storage_paths,
)
def save(self):
"""Create compressed .hdf5 file containing group <random-seed-id>"""
# Create main logging dir and .hdf5 sub-directory
if not self.log_setup_counter:
self.create_logging_dir(self.config_fname, self.config_dict)
self.log_setup_counter += 1
# Create "datasets" to store in the hdf5 file [time, stats]
# Store all relevant meta data (log filename, checkpoint filename)
if self.log_save_counter == 0:
data_paths = [
self.seed_id + "/meta/log_paths",
self.seed_id + "/meta/experiment_dir",
self.seed_id + "/meta/config_fname",
self.seed_id + "/meta/eval_id",
self.seed_id + "/meta/model_type",
self.seed_id + "/meta/config_dict",
]
data_to_log = [
[self.log_save_fname],
[self.experiment_dir],
[self.config_copy],
[self.base_str],
[self.model_log.model_type],
[str(self.config_dict)],
]
for i in range(len(data_paths)):
write_to_hdf5(
self.log_save_fname, data_paths[i], data_to_log[i]
)
if (
self.model_log.save_top_k_ckpt
or self.model_log.save_every_k_ckpt
):
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/meta/ckpt_time_to_track",
[self.model_log.ckpt_time_to_track],
)
if self.model_log.save_top_k_ckpt:
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/meta/top_k_metric_name",
[self.model_log.top_k_metric_name],
)
# Store final and initial checkpoint if provided
if self.model_log.model_save_counter > 0:
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/meta/model_ckpt",
[self.model_log.final_model_save_fname],
)
if self.model_log.init_model_saved:
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/meta/init_ckpt",
[self.model_log.init_model_save_fname],
)
# Store all time_to_track variables
for o_name in self.stats_log.time_to_track:
if o_name != "time":
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/time/" + o_name,
self.stats_log.clock_tracked[o_name],
dtype="float32",
)
else:
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/time/" + o_name,
self.stats_log.clock_tracked[o_name],
)
# Store all what_to_track variables
for o_name in self.stats_log.what_to_track:
data_to_store = self.stats_log.stats_tracked[o_name]
data_to_store = np.array(data_to_store)
if type(data_to_store[0]) == np.ndarray:
data_to_store = np.stack(data_to_store)
dtype = np.dtype("float32")
if type(data_to_store[0]) in [np.str_, str]:
dtype = "S5000"
if type(data_to_store[0]) in [bytes, np.str_]:
dtype = np.dtype("S5000")
elif type(data_to_store[0]) == int:
dtype = np.dtype("int32")
else:
dtype = np.dtype("float32")
write_to_hdf5(
self.log_save_fname,
self.seed_id + "/stats/" + o_name,
data_to_store,
dtype,
)
# Store data on stored checkpoints - stored every k updates
if self.model_log.save_every_k_ckpt is not None:
data_paths = [
self.seed_id + "/meta/" + "every_k_storage_time",
self.seed_id + "/meta/" + "every_k_ckpt_list",
]
data_to_log = [
self.model_log.every_k_storage_time,
self.model_log.every_k_ckpt_list,
]
data_types = ["int32", "S5000"]
for i in range(len(data_paths)):
write_to_hdf5(
self.log_save_fname,
data_paths[i],
data_to_log[i],
data_types[i],
)
# Store data on stored checkpoints - stored top k ckpt
if self.model_log.save_top_k_ckpt is not None:
data_paths = [
self.seed_id + "/meta/" + "top_k_storage_time",
self.seed_id + "/meta/" + "top_k_ckpt_list",
self.seed_id + "/meta/" + "top_k_performance",
]
data_to_log = [
self.model_log.top_k_storage_time,
self.model_log.top_k_ckpt_list,
self.model_log.top_k_performance,
]
data_types = ["int32", "S5000", "float32"]
for i in range(len(data_paths)):
write_to_hdf5(
self.log_save_fname,
data_paths[i],
data_to_log[i],
data_types[i],
)
# Tick the log save counter
self.log_save_counter += 1
def extend_tracking(self, add_track_vars: List[str]) -> None:
"""Add string names of variables to track."""
self.stats_log.extend_tracking(add_track_vars)
def ready_to_log(self, update_counter: int) -> bool:
"""Check whether update_counter is modulo of log_every_k_steps."""
assert (
self.log_every_j_steps is not None
), "Provide `log_every_j_steps` in your `log_config`"
return (
update_counter + 1
) % self.log_every_j_steps == 0 or update_counter == 0
|
StarcoderdataPython
|
3495386
|
import sys
import logging
from rez.vendor import colorama
from rez.config import config
from rez.utils.platform_ import platform_
_initialised = False
def _init_colorama():
global _initialised
if not _initialised:
colorama.init()
_initialised = True
def stream_is_tty(stream):
"""Return true if the stream is a tty stream.
Returns:
bool
"""
isatty = getattr(stream, 'isatty', None)
return isatty and isatty()
def critical(str_):
""" Return the string wrapped with the appropriate styling of a critical
message. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'critical')
def error(str_):
""" Return the string wrapped with the appropriate styling of an error
message. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'error')
def warning(str_):
""" Return the string wrapped with the appropriate styling of a warning
message. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'warning')
def info(str_):
""" Return the string wrapped with the appropriate styling of an info
message. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'info')
def debug(str_):
""" Return the string wrapped with the appropriate styling of a debug
message. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'debug')
def heading(str_):
""" Return the string wrapped with the appropriate styling of a heading
message. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'heading')
def local(str_):
""" Return the string wrapped with the appropriate styling to display a
local package. The styling will be determined based on the rez
configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'local')
def implicit(str_):
""" Return the string wrapped with the appropriate styling to display an
implicit package. The styling will be determined based on the rez
configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'implicit')
def alias(str_):
""" Return the string wrapped with the appropriate styling to display a
tool alias. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color_level(str_, 'alias')
def notset(str_):
""" Return the string wrapped with the appropriate escape sequences to
remove all styling.
Args:
str_ (str): The string to be wrapped.
Returns:
str: The string styled with the appropriate escape sequences.
"""
return _color(str_)
def _color_level(str_, level):
""" Return the string wrapped with the appropriate styling for the message
level. The styling will be determined based on the rez configuration.
Args:
str_ (str): The string to be wrapped.
level (str): The message level. Should be one of 'critical', 'error',
'warning', 'info' or 'debug'.
Returns:
str: The string styled with the appropriate escape sequences.
"""
fore_color, back_color, styles = _get_style_from_config(level)
return _color(str_, fore_color, back_color, styles)
def _color(str_, fore_color=None, back_color=None, styles=None):
""" Return the string wrapped with the appropriate styling escape sequences.
Args:
str_ (str): The string to be wrapped.
fore_color (str, optional): Any foreground color supported by the
`Colorama`_ module.
back_color (str, optional): Any background color supported by the
`Colorama`_ module.
styles (list of str, optional): Any styles supported by the `Colorama`_
module.
Returns:
str: The string styled with the appropriate escape sequences.
.. _Colorama:
https://pypi.python.org/pypi/colorama
"""
# TODO: Colorama is documented to work on Windows and trivial test case
# proves this to be the case, but it doesn't work in Rez. If the initialise
# is called in sec/rez/__init__.py then it does work, however as discussed
# in the following comment this is not always desirable. So until we can
# work out why we forcibly turn it off.
if not config.get("color_enabled", False) or platform_.name == "windows":
return str_
# lazily init colorama. This is important - we don't want to init at startup,
# because colorama prints a RESET_ALL character atexit. This in turn adds
# unexpected output when capturing the output of a command run in a
# ResolvedContext, for example.
_init_colorama()
colored = ""
if not styles:
styles = []
if fore_color:
colored += getattr(colorama.Fore, fore_color.upper(), '')
if back_color:
colored += getattr(colorama.Back, back_color.upper(), '')
for style in styles:
colored += getattr(colorama.Style, style.upper(), '')
return colored + str_ + colorama.Style.RESET_ALL
def _get_style_from_config(key):
fore_color = config.get("%s_fore" % key, '')
back_color = config.get("%s_back" % key, '')
styles = config.get("%s_styles" % key, None)
return fore_color, back_color, styles
class ColorizedStreamHandler(logging.StreamHandler):
"""A stream handler for use with the Python logger.
This handler uses the `Colorama`_ module to style the log messages based
on the rez configuration.
Attributes:
STYLES (dict): A mapping between the Python logger levels and a function
that can be used to provide the appropriate styling.
.. _Colorama:
https://pypi.python.org/pypi/colorama
"""
STYLES = {
50: critical,
40: error,
30: warning,
20: info,
10: debug,
0: notset,
}
@property
def is_tty(self):
"""Return true if the stream associated with this handler is a tty
stream.
Returns:
bool
"""
return stream_is_tty(self.stream)
@property
def is_colorized(self):
return config.get("color_enabled", False) == "force" or self.is_tty
def _get_style_function_for_level(self, level):
return self.STYLES.get(level, notset)
def emit(self, record):
"""Emit a record.
If the stream associated with this handler provides tty then the record
that is emitted with be formatted to include escape sequences for
appropriate styling.
"""
try:
message = self.format(record)
if not self.is_colorized:
self.stream.write(message)
else:
style = self._get_style_function_for_level(record.levelno)
self.stream.write(style(message))
self.stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class Printer(object):
def __init__(self, buf=sys.stdout):
self.buf = buf
self.colorize = (config.get("color_enabled", False) == "force") \
or stream_is_tty(buf)
def __call__(self, msg='', style=None):
print >> self.buf, self.get(msg, style)
def get(self, msg, style=None):
if style and self.colorize:
msg = style(msg)
return msg
# Copyright 2013-2016 <NAME>.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
StarcoderdataPython
|
3201366
|
from django.contrib import admin
from .models import SbAdmins, SbServers, SbProtests
admin.site.register(SbAdmins)
admin.site.register(SbServers)
admin.site.register(SbProtests)
|
StarcoderdataPython
|
12800437
|
<filename>script.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = """\
Copyright (c) 2014 <NAME> <<EMAIL>>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\
"""
import argparse
import fileinput
import urllib2
import json
import os
import sys
import re
import librato
parser = argparse.ArgumentParser(
description="""Given a list of metrics via STDIN, this
script will query a Graphite server and
forward the results to your Librato account.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--node', default=1,
help='Node to extract source (hostname) from')
parser.add_argument('-p', '--prefix', default=None,
help='Prefix for metric name')
parser.add_argument('-s', '--source', default=None,
help='Source string override')
parser.add_argument('-u', '--url', default='http://127.0.0.1',
help='Graphite server URL')
args = parser.parse_args()
try:
librato_user = os.environ['LIBRATO_USER']
librato_token = os.environ['LIBRATO_TOKEN']
librato_api = librato.connect(librato_user, librato_token)
except KeyError:
print "missing env vars LIBRATO_USER or LIBRATO_TOKEN"
sys.exit(1)
# These summarized queries match up with Librato's published retentions
# 2 days at the raw resolution of the metric source
# 1 week at 1 minute resolution
# 1 month at 15 minute resolution
# 1 year at 1 hour resolution
retentions = [
"?from=-2days&target=%s&format=json",
"?from=-1week&until=-2days&target=summarize(%s,\"1min\")&format=json",
"?from=-1month&until=-1week&target=summarize(%s,\"15min\")&format=json",
"?from=-1year&until=-1month&target=summarize(%s,\"1hour\")&format=json"]
try:
for metric in fileinput.input([]):
print "Processing %s:" % metric.strip()
for query in retentions:
# construct our Graphite queries
metric = metric.strip()
uri = '/render/?' + query % metric
u1 = urllib2.urlopen(args.url + uri)
try:
# extract the requested source node
s = metric.split('.')[int(args.node)]
except IndexError:
print "invalid node index"
if args.source is not None:
if not args.source:
s = None
else:
s = args.source
normalized_metric = metric
else:
# rebuild our metric without the source
normalized_metric = metric.split('.')
normalized_metric.pop(int(args.node))
normalized_metric = '.'.join(normalized_metric)
# clean-up and apply any prefix
prefix = args.prefix
if prefix is not None:
prefix = re.sub('\s+', '', prefix)
prefix = re.sub('\.+', '.', prefix)
prefix = re.sub('^\.', '', prefix)
prefix = re.sub('\.+$', '', prefix)
normalized_metric = "%s.%s" % (prefix, normalized_metric)
# new Librato queue
q = librato_api.new_queue()
q_length = 0
# loop through our Graphite results
for datapoint in json.loads(u1.read())[0]['datapoints']:
(value, timestamp) = datapoint
# skip nulls
if value is None:
continue
# add to batch request
q.add(
normalized_metric,
value,
measure_time=timestamp,
source=s)
q_length += 1
# Finally, submit our batch request
if q_length > 0:
q.submit()
print " Archive submitted successfully"
except KeyboardInterrupt:
sys.exit(1)
|
StarcoderdataPython
|
6633856
|
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: accounts.py 139 2008-01-30 18:16:06Z s0undt3ch $
# =============================================================================
# $URL: http://ispmanccp.ufsoft.org/svn/trunk/ispmanccp/controllers/accounts.py $
# $LastChangedDate: 2008-01-30 18:16:06 +0000 (Wed, 30 Jan 2008) $
# $Rev: 139 $
# $LastChangedBy: s0undt3ch $
# =============================================================================
# Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
from string import uppercase, digits
from ispmanccp.lib.base import *
from ispmanccp.model.accounts import *
import logging
log = logging.getLogger(__name__)
class AccountsController(BaseController):
#@beaker_cache(expire='never')
def index(self):
"""Main Index."""
nav_1st_half = ['All']
nav_1st_half.extend(list(digits))
c.nav_2nd_half = list(uppercase)
c.nav_1st_half = nav_1st_half
c.domain = self.domain
return render('accounts.index')
#@beaker_cache(expire=180)
def userlist(self):
"""Action that returns the user list for the passed start key."""
sort_by = request.POST['sort_by']
sort_how = h.asbool(request.POST['sort_how'])
if 'None' in request.POST['letter']:
c.users = []
return render('accounts.snippets.userlist')
if 'letter' in request.POST:
start_letter = request.POST['letter']
else:
start_letter = 'All'
c.lengths, userlist = get_users_list(self.domain,
start_letter,
sortby=sort_by,
sort_ascending=sort_how)
if not userlist:
c.error = _("No results retrieved.")
else:
c.users = userlist
return render('accounts.snippets.userlist')
def search(self):
"""Action that returns an html list of entries for the
auto-complete search field."""
log.debug('searching....')
sort_by = request.POST['sort_by']
if sort_by not in ("ispmanUserId", "mailLocalAddress", "givenName", "sn"):
sort_by = "ispmanUserId"
sort_how = h.asbool(request.POST['sort_how'])
search_str = request.POST['uidsearch']
userlist = get_domain_users(
self.domain,
[
"ispmanUserId",
"mailLocalAddress",
"givenName",
"sn",
"cn",
"mailAlias",
"mailForwardingAddress"
]
)
def _search_user_attributes(user_dict):
for key, val in user_dict.iteritems():
if isinstance(val, list):
for n in range(len(val)):
if val[n].find(search_str) != -1:
return n, key, True
elif user_dict[key].find(search_str) != -1:
return None, key, True
return None, None, False
html = u'<ul>\n'
for user in userlist:
idx_found, attr_found, user_found = _search_user_attributes(user)
if user_found:
html += '<li>\n'
html += u'<span class="informal">%(cn)s</span>\n'
html += u'<div class="uid">%(ispmanUserId)s</div>\n'
if attr_found in ('mailAlias', 'mailForwardingAddress'):
pre_html = u'<div class="email">\n'
pre_html += u'<span class="informal"><em>'
pre_html += u'<b>%s</b> %s</em></span>'
pre_html += u'</div>\n'
if attr_found == 'mailAlias':
html += pre_html % (_('Alias:'),
user[attr_found][idx_found])
else:
html += pre_html % (_('Forwarding:'),
user[attr_found][idx_found])
else:
html += u'<div class="email">'
html += u'<span class="informal"><em><b>' + _('Email:')
html += '</b> %(mailLocalAddress)s</em></span>'
html += u'</div>\n'
html += u'</li>'
html = html % user
html += u'</ul>\n'
return html
def get_stored_pass(self, id):
"""Action that restores the stored password of the user."""
uid = id + '@' + self.domain
c.userinfo = {}
c.userinfo['userPassword'] = get_user_attribute_values(uid, self.domain,
'userPassword')
return render('accounts.snippets.password')
@rest.dispatch_on(POST='delete_post')
def delete(self, id):
"""Action to delete the account."""
if request.method == 'POST':
print request.POST
c.lengths, c.userinfo = get_user_info(id, self.domain)
return render('accounts.deleteuser')
@validate(template='accounts.deleteuser', schema=AccountDelete(),
form='delete')
def delete_post(self, id):
"""The real work for the above action."""
if request.method != 'POST':
redirect_to(action="delete", id=id)
retval = delete_user(request.POST)
if not retval:
session['message'] = _('Backend Error')
session.save()
self.message = 'Backend Error'
h.redirect_to(action="delete", id=id)
session['message'] = _('Operation Successfull')
session.save()
redirect_to(action="index", id=None)
#@beaker_cache(expire=180)
@rest.dispatch_on(POST='edit_post')
def edit(self, id):
"""Action to edit the account details."""
c.lengths, c.userinfo = get_user_info(id, self.domain)
log.debug(c.lengths, c.userinfo)
if not c.lengths and not c.userinfo:
c.domain = self.domain
c.unknown_id = id
return render('accounts.unknown')
if c.form_result:
# Form has been submited
# Assign the form_result to c.userinfo
c.lengths, c.userinfo = h.remap_user_dict(c.form_result, c.userinfo)
return render('accounts.edituser')
@validate(template='accounts.edituser', schema=AccountUpdate(), form='edit',
variable_decode=True)
def edit_post(self, id):
"""The real work for the above action, where modifications
are made permanent."""
if request.method != 'POST':
redirect_to(action='edit', id=id)
user_dict = request.POST.copy()
user_dict['uid'] = user_dict['uid'] + '@' + self.domain
uid = user_dict['uid']
retval = update_user_info(user_dict)
if not retval:
session['message'] = _('Backend Error')
session.save()
h.redirect_to(action="edit", id=id)
session['message'] = _('Operation Successfull')
session.save()
redirect_to(action="index", id=None)
@rest.dispatch_on(POST='new_post')
def new(self, id):
"""Action to create a new account."""
# Can the domain have more accounts
cur_accounts = int(get_domain_user_count(self.domain))
if self.dominfo['ispmanMaxAccounts'] == 'unlimited':
max_accounts = -1
else:
max_accounts = int(self.dominfo['ispmanMaxAccounts'])
if max_accounts != -1 and cur_accounts + 1 > max_accounts:
session['message'] = _(
'You cannot create more accounts. Allowed maximum reached.'
)
session.save()
redirect_to(action="index", id=None)
# It can, let's continue
c.defaults = get_default_acount_vars()
c.dominfo = self.dominfo
c.password = <PASSWORD>._<PASSWORD>password()
if 'ispmanUserId' not in request.POST:
c.userinfo = {'ispmanUserId': u'please change me'}
if c.form_result:
c.lengths, c.userinfo = h.remap_user_dict(c.form_result,
request.POST.copy())
return render('accounts.newuser')
@validate(template='accounts.newuser', schema=AccountCreate(), form='new',
variable_decode=True)
def new_post(self, id):
"""The real work for the above action, where modifications
are made permanent."""
if request.method != 'POST':
redirect_to(action='new', id=None)
# DO SOMETHING
userinfo = request.POST.copy()
# add some account defaults
userinfo['dialupAccess'] = u'disabled'
userinfo['radiusProfileDN'] = u'cn=default, ou=radiusprofiles, ' + \
g.ldap_base_dn
userinfo['fileHost'] = self.dominfo['ispmanDomainDefaultFileServer']
if not h.asbool(userinfo['ForwardingOnly']):
userinfo['mailHost'] = self.dominfo['ispmanDomainDefaultMailDropHost']
retval = add_user(userinfo)
if not retval:
session['message'] = _('Backend Error')
session.save()
h.redirect_to(action="new", id=None)
# Choose message to display based on the account being forwarding only or not
if h.asbool(userinfo['ForwardingOnly']):
session['message'] = _(
'Account added. You now need to setup a forwarding address.'
)
else:
session['message'] = _( 'Account added. You can now setup alias '
'and/or forwarding addresses.')
session.save()
redirect_to(action="edit", id=userinfo['ispmanUserId'])
def _generate_new_password(self):
"""Private method that returns a new random password(value)."""
APP_CONF = config['app_conf']
numbers = int(APP_CONF['passwords_non_letter_min_chars'])
alpha = int(APP_CONF['passwords_min_length']) - numbers
return h.random_pass(alpha, numbers)
def generate_new_password(self):
"""Action that returns a new random password(rendered html)."""
c.password = <PASSWORD>()
return render('accounts.snippets.newpassword')
|
StarcoderdataPython
|
6674577
|
from behaviors.behaviors import Timestamped
from django.contrib.contenttypes.models import ContentType
from django.db import models
__all__ = [
'models',
'DefaultModel',
'TimestampedModel',
]
class DefaultModel(models.Model):
class Meta:
abstract = True
def __str__(self) -> str:
"""Default name for all models"""
if hasattr(self, 'name'):
return str(self.name)
return super().__str__()
@classmethod
def get_contenttype(cls) -> ContentType:
return ContentType.objects.get_for_model(cls)
def update_from_kwargs(self, **kwargs):
"""A shortcut method to update model instance from the kwargs.
"""
for (key, value) in kwargs.items():
setattr(self, key, value)
def setattr_and_save(self, key, value):
"""Shortcut for testing -- set attribute of the model and save"""
setattr(self, key, value)
self.save()
@classmethod
def get_label(cls) -> str:
"""Get a unique within the app model label
"""
return cls._meta.label_lower.split('.')[-1]
class TimestampedModel(DefaultModel, Timestamped):
"""
Default app model that has `created` and `updated` attributes.
Currently based on https://github.com/audiolion/django-behaviors
"""
class Meta:
abstract = True
|
StarcoderdataPython
|
9665261
|
<filename>test/e2e/service_bootstrap.py
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Bootstraps the resources required to run Elasticache integration tests.
"""
import boto3
import logging
from dataclasses import dataclass
from acktest.aws.identity import get_account_id, get_region
from acktest.resources import random_suffix_name
from acktest import resources
from e2e import bootstrap_directory
from e2e.util import wait_usergroup_active, wait_snapshot_available
from e2e.bootstrap_resources import TestBootstrapResources
def create_sns_topic() -> str:
topic_name = random_suffix_name("ack-sns-topic", 32)
sns = boto3.client("sns")
response = sns.create_topic(Name=topic_name)
logging.info(f"Created SNS topic {response['TopicArn']}")
return response['TopicArn']
# create an EC2 VPC security group from the default VPC (not an ElastiCache security group)
def create_security_group() -> str:
region = get_region()
account_id = get_account_id()
ec2 = boto3.client("ec2")
vpc_response = ec2.describe_vpcs(Filters=[{"Name": "isDefault", "Values": ["true"]}])
if len(vpc_response['Vpcs']) == 0:
raise ValueError(f"Default VPC not found for account {account_id} in region {region}")
default_vpc_id = vpc_response['Vpcs'][0]['VpcId']
sg_name = random_suffix_name("ack-security-group", 32)
sg_description = "Security group for ACK ElastiCache tests"
sg_response = ec2.create_security_group(GroupName=sg_name, VpcId=default_vpc_id, Description=sg_description)
logging.info(f"Created VPC Security Group {sg_response['GroupId']}")
return sg_response['GroupId']
def create_user_group() -> str:
ec = boto3.client("elasticache")
usergroup_id = random_suffix_name("ack-ec-usergroup", 32)
_ = ec.create_user_group(UserGroupId=usergroup_id,
Engine="Redis",
UserIds=["default"])
logging.info(f"Creating ElastiCache User Group {usergroup_id}")
assert wait_usergroup_active(usergroup_id)
return usergroup_id
def create_kms_key() -> str:
kms = boto3.client("kms")
response = kms.create_key(Description="Key for ACK ElastiCache tests")
key_id = response['KeyMetadata']['KeyId']
logging.info(f"Created KMS key {key_id}")
return key_id
# create a cache cluster, snapshot it, and return the snapshot name
def create_cc_snapshot():
ec = boto3.client("elasticache")
cc_id = random_suffix_name("ack-cache-cluster", 32)
_ = ec.create_cache_cluster(
CacheClusterId=cc_id,
NumCacheNodes=1,
CacheNodeType="cache.m6g.large",
Engine="redis"
)
waiter = ec.get_waiter('cache_cluster_available')
waiter.wait(CacheClusterId=cc_id)
logging.info(f"Created cache cluster {cc_id} for snapshotting")
snapshot_name = random_suffix_name("ack-cc-snapshot", 32)
_ = ec.create_snapshot(
CacheClusterId=cc_id,
SnapshotName=snapshot_name
)
assert wait_snapshot_available(snapshot_name)
return snapshot_name
def create_non_default_user() -> str:
ec = boto3.client("elasticache")
user_id = random_suffix_name("ackecuser", 32)
_ = ec.create_user(UserId=user_id,
UserName="ACKNonDefaultUser",
Engine="Redis",
NoPasswordRequired=True,
AccessString="on -@all")
logging.info(f"Creating ElastiCache non default User {user_id}")
return user_id
def create_log_group():
logs = boto3.client("logs")
log_group_name = random_suffix_name("ack-cw-log-group", 32)
logs.create_log_group(logGroupName=log_group_name)
logging.info(f"Create CW log group {log_group_name}")
return log_group_name
def service_bootstrap() -> dict:
logging.getLogger().setLevel(logging.INFO)
return TestBootstrapResources(
create_sns_topic(),
create_security_group(),
create_user_group(),
create_kms_key(),
create_cc_snapshot(),
create_non_default_user(),
create_log_group()
).__dict__
if __name__ == "__main__":
config = service_bootstrap()
resources.write_bootstrap_config(config, bootstrap_directory)
|
StarcoderdataPython
|
9614392
|
<reponame>stevenc987/sbc-auth
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to verify the Tasks API end-point.
Test-Suite to ensure that the /tasks endpoint is working as expected.
"""
from auth_api import status as http_status
from tests.utilities.factory_utils import (factory_auth_header,
factory_task_service, factory_user_model)
from tests.utilities.factory_scenarios import TestJwtClaims
from auth_api.schemas import utils as schema_utils
from auth_api.utils.enums import TaskRelationshipType
def test_fetch_tasks(client, jwt, session): # pylint:disable=unused-argument
"""Assert that the tasks can be fetched."""
user = factory_user_model()
factory_task_service(user.id)
task_type = TaskRelationshipType.ORG.value
headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_role)
rv = client.get('/api/v1/tasks?type={}'.format(task_type), headers=headers, content_type='application/json')
item_list = rv.json
assert schema_utils.validate(item_list, 'task_response')[0]
assert rv.status_code == http_status.HTTP_200_OK
def test_fetch_tasks_with_status(client, jwt, session): # pylint:disable=unused-argument
"""Assert that the tasks can be fetched."""
user = factory_user_model()
factory_task_service(user.id)
headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_role)
rv = client.get('/api/v1/tasks?type=ORG&status=OPEN', headers=headers, content_type='application/json')
item_list = rv.json
assert schema_utils.validate(item_list, 'task_response')[0]
assert rv.status_code == http_status.HTTP_200_OK
|
StarcoderdataPython
|
1722053
|
from datetime import date
from rest_framework import status
from apps.api.tests.base import ApiTest
class DynamicsUserApiTest(ApiTest):
def test_read(self):
usd = self.create_currency("usd")
eur = self.create_currency("eur")
ai95 = self.create_fuel("95")
at = date(year=2019, month=1, day=13)
self.create_price_history(at=at, currency=eur, fuel=ai95, value=321)
self.create_price_history(at=at, currency=usd, fuel=ai95, value=123)
headers = {"HTTP_AUTHORIZATION": self.user_token}
response = self.client.get(f"/api/v1/dynamics/", **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
payload = response.json()
self.assertIsInstance(payload, list)
self.assertEqual(len(payload), 1)
record = payload[0]
self.assertIsInstance(record, dict)
self.assertEqual(record["at"], at.strftime("%Y-%m-%d"))
self.assertIn("fuels", record)
fuels = record["fuels"]
self.assertIsInstance(fuels, list)
self.assertEqual(len(fuels), 1)
fuel = fuels[0]
self.assertIsInstance(fuel, dict)
self.assertEqual(fuel["fuel"]["name"], ai95.name)
self.assertIn("prices", fuel)
prices = fuel["prices"]
self.assertIsInstance(prices, list)
self.assertEqual(len(prices), 2)
for price, (currency, value) in zip(prices, ((eur, 321), (usd, 123))):
self.assertEqual(price["currency"]["name"], currency.name)
self.assertEqual(price["currency"]["symbol"], currency.symbol)
self.assertEqual(int(float(price["value"])), value)
def test_create(self):
headers = {"HTTP_AUTHORIZATION": self.user_token}
response = self.client.post("/api/v1/dynamics/", data={}, **headers)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update(self):
headers = {"HTTP_AUTHORIZATION": self.user_token}
response = self.client.put("/api/v1/dynamics/1/", data={}, **headers)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.patch("/api/v1/dynamics/1/", data={}, **headers)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete(self):
headers = {"HTTP_AUTHORIZATION": self.user_token}
response = self.client.delete("/api/v1/dynamics/1/", **headers)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
|
StarcoderdataPython
|
8176442
|
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Minkowski pairwise distance layer.
Classes:
Minkowski: A TensorFlow layer for computing (weighted) minkowski
distance.
"""
import tensorflow as tf
from tensorflow.python.keras import backend as K
import psiz.keras.constraints as pk_constraints
from psiz.keras.layers.ops.core import wpnorm
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='Minkowski'
)
class Minkowski(tf.keras.layers.Layer):
"""Minkowski pairwise distance.
A pairwise distance layer that consumes the last axis of the input
tensors (see `call` method).
NOTE: It is assumed that both tensors have the same rank, are
broadcast-compatible, and have the same size for the last axis.
"""
def __init__(
self, rho_trainable=True, rho_initializer=None,
rho_regularizer=None, rho_constraint=None, w_trainable=True,
w_initializer=None, w_regularizer=None, w_constraint=None,
**kwargs):
"""Initialize.
Arguments:
rho_trainable (optional):
rho_initializer (optional):
rho_regularizer (optional):
rho_constraint (optional):
w_trainable (optional):
w_initializer (optional):
w_regularizer (optional):
w_constraint (optional):
"""
super(Minkowski, self).__init__(**kwargs)
self.rho_trainable = self.trainable and rho_trainable
if rho_initializer is None:
rho_initializer = tf.random_uniform_initializer(1., 2.)
self.rho_initializer = tf.keras.initializers.get(rho_initializer)
self.rho_regularizer = tf.keras.regularizers.get(rho_regularizer)
if rho_constraint is None:
rho_constraint = pk_constraints.GreaterEqualThan(min_value=1.0)
self.rho_constraint = tf.keras.constraints.get(rho_constraint)
with tf.name_scope(self.name):
self.rho = self.add_weight(
shape=[], initializer=self.rho_initializer,
regularizer=self.rho_regularizer, trainable=self.rho_trainable,
name="rho", dtype=K.floatx(),
constraint=self.rho_constraint
)
self.w_trainable = self.trainable and w_trainable
if w_initializer is None:
w_initializer = tf.random_uniform_initializer(1.01, 3.)
self.w_initializer = tf.keras.initializers.get(w_initializer)
self.w_regularizer = tf.keras.regularizers.get(w_regularizer)
if w_constraint is None:
w_constraint = tf.keras.constraints.NonNeg()
self.w_constraint = tf.keras.constraints.get(w_constraint)
def build(self, input_shape):
"""Build."""
with tf.name_scope(self.name):
self.w = self.add_weight(
shape=[input_shape[0][-1]], initializer=self.w_initializer,
regularizer=self.w_regularizer, trainable=self.w_trainable,
name="w", dtype=K.floatx(), constraint=self.w_constraint
)
def call(self, inputs):
"""Call.
Arguments:
inputs: A list of two tf.Tensor's denoting a the set of
vectors to compute pairwise distances. Each tensor is
assumed to have the same shape and be at least rank-2.
Any additional tensors in the list are ignored.
shape = (batch_size, [n, m, ...] n_dim)
Returns:
shape = (batch_size, [n, m, ...])
"""
z_0 = inputs[0]
z_1 = inputs[1]
x = z_0 - z_1
# Broadcast `rho` and `w` to appropriate shape.
x_shape = tf.shape(x)
# Broadcast `rho` to shape=(batch_size, [n, m, ...]).
rho = self.rho * tf.ones(x_shape[0:-1])
# Broadcast `w` to shape=(batch_size, [n, m, ...] n_dim).
w = tf.broadcast_to(self.w, x_shape)
# Weighted Minkowski distance.
d_qr = wpnorm(x, w, rho)
d_qr = tf.squeeze(d_qr, [-1])
return d_qr
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'rho_initializer':
tf.keras.initializers.serialize(self.rho_initializer),
'w_initializer':
tf.keras.initializers.serialize(self.w_initializer),
'rho_regularizer':
tf.keras.regularizers.serialize(self.rho_regularizer),
'w_regularizer':
tf.keras.regularizers.serialize(self.w_regularizer),
'rho_constraint':
tf.keras.constraints.serialize(self.rho_constraint),
'w_constraint':
tf.keras.constraints.serialize(self.w_constraint),
'rho_trainable': self.rho_trainable,
'w_trainable': self.w_trainable,
})
return config
|
StarcoderdataPython
|
3233026
|
import random
import json
import nltk
import torch
import transformers
from gtts import gTTS
import speech_recognition as sr
import os
import playsound
import config
import pyjokes
from nltk.stem.porter import PorterStemmer
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
class Prepare_Data():
def __init__(self, json_file, ignore_words):
self.json_file = json_file
self.patterns = []
self.all_words = []
self.tags = []
self.xy = []
self.X_train = []
self.y_train = []
self.ignore_words = ignore_words
self.stemmer = PorterStemmer()
def tokenize(self, sentence):
return nltk.word_tokenize(sentence)
def stem(self, word):
return self.stemmer.stem(word.lower())
def bag_of_words(self, tokenized_sentence, words):
sentence_words = [self.stem(word) for word in tokenized_sentence]
# initialize bag with 0 for each word
bag = np.zeros(len(words), dtype=np.float32)
for idx, w in enumerate(words):
if w in sentence_words:
bag[idx] = 1
return bag
def load_json(self):
with open(self.json_file, 'r') as file:
self.intents = json.load(file)
return self.intents
@staticmethod
def text_to_speech(text):
print(text)
speaker = gTTS(text=text, lang="en", slow=False)
speaker.save("a.mp3")
playsound.playsound("a.mp3")
os.remove("a.mp3")
def speech_to_text(self):
recognizer = sr.Recognizer()
with sr.Microphone() as source:
self.text_to_speech("listening...")
audio = recognizer.listen(source)
recognizer.pause_threshold = 1
try:
self.text = recognizer.recognize_google(audio)
print(self.text)
except Exception:
self.text = "say that again.."
return self.text
def prs1(self):
for intent in self.load_json()['intents']:
tag = intent['tag']
self.tags.append(tag)
for pattern in intent['patterns']:
w = self.tokenize(pattern)
self.all_words.extend(w)
self.xy.append((w, tag))
pattern = pattern.lower()
self.patterns.append(pattern)
self.all_words = [self.stem(w) for w in self.all_words if w not in self.ignore_words]
self.all_words = sorted(set(self.all_words))
self.tags = sorted(set(self.tags))
for (pattern_sentence, tag) in self.xy:
bag = self.bag_of_words(pattern_sentence, self.all_words)
self.X_train.append(bag)
label = self.tags.index(tag)
self.y_train.append(label)
self.X_train = np.array(self.X_train)
self.y_train = np.array(self.y_train)
return self.tags, self.all_words, self.patterns, self.X_train, self.y_train
class ChatDataset(Dataset):
def __init__(self):
self.prepare = Prepare_Data(json_file, ignore_words)
self.tags, self.all_words, self.patterns, self.X_train, self.y_train = self.prepare.prs1()
self.n_samples = len(self.X_train)
self.x_data = self.X_train
self.y_data = self.y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
return out
class Train():
def __init__(self):
self.num_epochs = config.NUM_EPOCHS
self.batch_size = config.BATCH_SIZE
self.learning_rate = config.LEARNING_RATE
self.input_size = len(X_train[0])
self.hidden_size = config.HIDDEN_SIZE
self.num_classes = len(tags)
self.dataset = ChatDataset()
self.train_loader = DataLoader(dataset=self.dataset,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=0)
self.model = NeuralNet(self.input_size, self.hidden_size, self.num_classes)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LEARNING_RATE)
self.prepare = Prepare_Data(json_file, ignore_words)
self.tags, self.all_words,_,_,_ = self.prepare.prs1()
def train(self):
for epoch in range(self.num_epochs):
global loss
for (words, labels) in self.train_loader:
words = words.to(config.DEVICE)
labels = labels.to(dtype=torch.long).to(config.DEVICE)
outputs = self.model(words)
loss = self.criterion(outputs, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (epoch + 1) % 100 == 0:
print(f'Epoch [{epoch + 1}/{self.num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": self.model.state_dict(),
"input_size": self.input_size,
"hidden_size": self.hidden_size,
"output_size": self.num_classes,
"all_words": self.all_words,
"tags": self.tags
}
if loss < 0.001:
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
class ChatBot():
def __init__(self):
self.tools = Prepare_Data(json_file, ignore_words)
self.speech_to_text = self.tools.speech_to_text
self.text_to_speech = self.tools.text_to_speech
self.intents = self.tools.load_json()
#self.tags, self.all_words, self.patterns, self.X_train, self.y_train =
self.tags = self.tools.tags
self.tokenize = self.tools.tokenize
self.bag_of_words = self.tools.bag_of_words
def load_model(self, model_file):
data = torch.load(model_file)
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
model_state = data["model_state"]
tags = data["tags"]
model = NeuralNet(input_size, hidden_size, output_size).to(config.DEVICE)
model.load_state_dict(model_state)
model.eval()
return model, tags
def chat(self):
nlp = transformers.pipeline("conversational", model="microsoft/DialoGPT-large", pretrained=True)
os.environ["TOKENIZERS_PARALLELISM"] = "true"
while True:
sentence = self.speech_to_text()
if any(i in sentence for i in ["ok quit", "quit", "shutup", "go home"]):
r = ["have fun", "see you later", "ok bye"]
self.text_to_speech(random.choice(r))
quit()
elif "joke" in sentence:
joke = pyjokes.get_joke(language="en", category="all")
res = joke
if any(i in sentence for i in patterns):
in_ = self.tokenize(sentence)
X = self.bag_of_words(in_, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(config.DEVICE)
model, tags = self.load_model(model_file)
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in self.intents['intents']:
if tag == intent['tag']:
res = random.choice(intent['responses'])
else:
res = "none"
if any(i in res for i in ["none", "None"]):
chat = nlp(transformers.Conversation(sentence), pad_token_id=50256)
res = str(chat)
res = res[res.find("bot >> ") + 6:].strip()
self.text_to_speech(res)
if __name__ == '__main__':
json_file = "myintents.json"
ignore_words = ["?", "!"]
prepare = Prepare_Data(json_file, ignore_words)
tags, all_words, patterns, X_train, y_train = prepare.prs1()
# for training uncomment
#train = Train()
#train.train()
model_file = "data.pth"
#chat
chat_bot = ChatBot()
chat_bot.chat()
|
StarcoderdataPython
|
4895067
|
<reponame>dbluhm/aries-staticagent-python
"""Cron script example.
This file is intended to be run as a cron script. Upon execution, it does
it's thing and shuts down.
"""
from aries_staticagent import Connection, utils
from common import config
def main():
"""Send message from cron job."""
keys, target, _args = config()
conn = Connection(keys, target)
conn.send(
{
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/basicmessage/1.0/message",
"~l10n": {"locale": "en"},
"sent_time": utils.timestamp(),
"content": "The Cron script was executed.",
}
)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1815715
|
<reponame>koatse/heroku_helloworld
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-01 16:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cities_light', '0006_compensate_for_0003_bytestring_bug'),
]
operations = [
migrations.CreateModel(
name='MyGeo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cities_light.City')),
],
),
migrations.CreateModel(
name='Province',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('province', models.CharField(max_length=30)),
],
),
]
|
StarcoderdataPython
|
6547145
|
<filename>sampledb/scripts/run.py
# coding: utf-8
"""
Script for running the SampleDB server.
Usage: python -m sampledb run [<port>]
"""
import sys
import cherrypy
from .. import create_app
def main(arguments):
if len(arguments) > 1:
print(__doc__)
exit(1)
if arguments:
port = arguments[0]
try:
port = int(port)
if port < 1024 or port > 65535:
raise ValueError()
except ValueError:
print("Error: port must be between 1024 and 65535", file=sys.stderr)
exit(1)
else:
port = 8000
app = create_app()
cherrypy.tree.graft(app, app.config['SERVER_PATH'])
cherrypy.config.update({
'environment': 'production',
'server.socket_host': '0.0.0.0',
'server.socket_port': port,
'server.socket_queue_size': 20,
'log.screen': True
})
cherrypy.engine.start()
cherrypy.engine.block()
|
StarcoderdataPython
|
3599342
|
<reponame>sourcery-ai-bot/APRManager
import json
import os
class Utils:
@staticmethod
def validate_json(path_to_json):
if (
os.path.isfile(path_to_json)
and os.path.getsize(path_to_json) == 0
or not os.path.isfile(path_to_json)
):
with open(path_to_json, "w") as json_file:
json.dump({}, json_file)
class PathMagic:
@staticmethod
def set(main_dict: dict, path: str, *, key: str, value):
def magic(alt_dict: dict, key: str):
if key in alt_dict and isinstance(alt_dict[key], dict):
return alt_dict
alt_dict[key] = {}
return alt_dict
main_dict_ref, i = main_dict, 0
for dict_name in path.split("+"):
i += 1
main_dict = magic(main_dict, dict_name)[dict_name]
if i == len(path.split("+")):
main_dict[key] = value
return main_dict_ref
@staticmethod
def get(main_dict: dict, path: str, *, key, default=None):
for dict_name in path.split("+"):
try:
main_dict = main_dict[dict_name]
except (KeyError, TypeError, AttributeError):
return default
return main_dict.get(key, default)
@staticmethod
def rem(main_dict: dict, path: str, *, key):
main_dict_ref, i = main_dict, 0
for dict_name in path.split("+"):
try:
i += 1
main_dict = main_dict[dict_name]
if i == len(path.split("+")):
main_dict.pop(key, None)
except (KeyError, TypeError, AttributeError):
return main_dict_ref
return main_dict_ref
class JSONx:
def __init__(self, path_to_json: str):
self.path_to_json = path_to_json
self.utils = Utils
self.utils.validate_json(path_to_json)
def set(self, key: str, value, *, pathmagic=""):
self.utils.validate_json(self.path_to_json)
with open(self.path_to_json, mode="r") as json_file:
json_data = json.load(json_file)
with open(self.path_to_json, mode="w") as json_file:
if pathmagic == "":
json_data[key] = value
json.dump(json_data, json_file, indent=4)
else:
json.dump(self.utils.PathMagic.set(
json_data, pathmagic, key=key, value=value), json_file, indent=4)
def get(self, key: str, *, default=None, pathmagic=""):
self.utils.validate_json(self.path_to_json)
with open(self.path_to_json, mode="r") as json_file:
json_data = json.load(json_file)
if pathmagic == "":
return json_data.get(key, default)
else:
return self.utils.PathMagic.get(json_data, pathmagic, key=key, default=default)
def all(self):
self.utils.validate_json(self.path_to_json)
with open(self.path_to_json, mode="r") as json_file:
return json.load(json_file)
def rem(self, key: str, *, pathmagic=""):
self.utils.validate_json(self.path_to_json)
with open(self.path_to_json, mode="r") as json_file:
json_data = json.load(json_file)
with open(self.path_to_json, mode="w") as json_file:
if pathmagic == "":
json_data.pop(key, None)
json.dump(json_data, json_file, indent=4)
else:
json.dump(self.utils.PathMagic.rem(
json_data, pathmagic, key=key), json_file, indent=4)
def nuke(self):
with open(self.path_to_json, mode="w") as json_file:
json.dump({}, json_file)
|
StarcoderdataPython
|
6670557
|
from .event_logger import EventLogger # noqa: F401
|
StarcoderdataPython
|
9709862
|
<reponame>FDUJiaG/PyML-Course
from sklearn.datasets import fetch_20newsgroups # 导入新闻数据抓取器 fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer # 导入文本特征向量化模块
from sklearn.naive_bayes import MultinomialNB # 导入朴素贝叶斯模型
from sklearn.metrics import classification_report
# 1 数据获取
news = fetch_20newsgroups(subset='all')
print(len(news.data), len(news.target_names))
# 2 数据预处理,训练集和测试集分割,文本特征向量化
X_train, X_test, y_train, y_test = train_test_split(
news.data, news.target, test_size=0.25, random_state=33
) # 随机采样 25% 的数据样本作为测试集
print(X_train[0]) # 查看训练样本
print(y_train[0:100]) # 查看标签
# 文本特征向量化
vec = CountVectorizer()
X_train = vec.fit_transform(X_train)
X_test = vec.transform(X_test)
# 3 使用朴素贝叶斯进行训练
mnb = MultinomialNB() # 使用默认配置初始化朴素贝叶斯
mnb.fit(X_train, y_train) # 利用训练数据对模型参数进行估计
y_predict = mnb.predict(X_test) # 对验证集进行预测
# 4 获取结果报告
print('The Accuracy of Naive Bayes Classifier is:', mnb.score(X_test, y_test))
print(classification_report(y_test, y_predict, target_names=news.target_names).support)
|
StarcoderdataPython
|
381612
|
from evdev import InputDevice, list_devices, ecodes
import robot
def store_x(value):
current_x = value;
def store_y(value):
current_y = value;
def limit_value(value):
if value > 100:
value = 100
elif value < -100:
value = -100
return value
def update_motor_powers():
power_l = limit_value((current_y / 32768 * 100) + (current_x / 32768 * 100))
power_r = limit_value((current_y / 32768 * 100) - (current_x / 32768 * 100))
robot.left (power_l)
robot.right(power_r)
devices = [InputDevice(device) for device in list_devices()]
keyboard = devices[0]
print(keyboard)
keypress_actions = {
ecodes.ABS_X: store_x,
ecodes.ABS_Y: store_y
}
current_x = 0
current_y = 0
power_l = 0
power_r = 0
try:
#with open("values.log", "w") as log:
for event in keyboard.read_loop():
if event.code in keypress_actions:
#print(categorized, categorized.event.code, categorized.event.sec, categorized.event.timestamp, categorized.event.type, categorized.event.usec, categorized.event.value, file=log)
keypress_actions[event.code](event.value)
update_motor_powers()
#print("x:", current_x, "y:", current_y, "power_l:", power_l, "power_r:", power_r, file=log)
except KeyboardInterrupt:
robot.stop()
|
StarcoderdataPython
|
11318154
|
"""This module contains functions to help manage configuration for the
offline analysis of LSST Electrical-Optical testing"""
import os
import numpy as np
CONFIG_DIR = None
def is_none(val):
"""Check to see if a value is none"""
return val in [None, 'none', 'None', np.nan]
def is_not_none(val):
"""Check to see if a value is not none"""
return val not in [None, 'none', 'None', np.nan]
class CfgDir:
""" Tiny class to find configuration files"""
def __init__(self):
""" Constructor """
self.config_dir = None
def set_dir(self, val):
""" Set the top-level configuration directory"""
self.config_dir = val
def get_dir(self):
""" Get the top-level configuration directory"""
return self.config_dir
def cfg_path(self, val):
""" Build a path using the top-level configuration directory """
return os.path.join(self.config_dir, val)
CFG_DIR = CfgDir()
set_config_dir = CFG_DIR.set_dir
get_config_dir = CFG_DIR.get_dir
cfg_path = CFG_DIR.cfg_path
def copy_dict(in_dict, def_dict):
"""Copy a set of key-value pairs to an new dict
Parameters
----------
in_dict : `dict`
The dictionary with the input values
def_dict : `dict`
The dictionary with the default values
Returns
-------
outdict : `dict`
Dictionary with arguments selected from in_dict to overide def_dict
"""
outdict = {key:in_dict.get(key, val) for key, val in def_dict.items()}
return outdict
def pop_values(in_dict, keylist):
"""Pop a set of key-value pairs to an new dict
Parameters
----------
in_dict : `dict`
The dictionary with the input values
keylist : `list`
The values to pop
Returns
-------
outdict : `dict`
Dictionary with only the arguments we have selected
"""
outdict = {}
for key in keylist:
if key in in_dict:
outdict[key] = in_dict.pop(key)
return outdict
def update_dict_from_string(o_dict, key, val, subparser_dict=None):
"""Update a dictionary with sub-dictionaries
Parameters
----------
o_dict : dict
The output
key : `str`
The string we are parsing
val : `str`
The value
subparser_dict : `dict` or `None`
The subparsers used to parser the command line
"""
idx = key.find('.')
use_key = key[0:idx]
remain = key[idx+1:]
if subparser_dict is not None:
try:
subparser = subparser_dict[use_key[1:]]
except KeyError:
subparser = None
else:
subparser = None
if use_key not in o_dict:
o_dict[use_key] = {}
def_val = None
if subparser is not None:
def_val = subparser.get_default(remain)
if def_val == val:
return
if remain.find('.') < 0:
o_dict[use_key][remain] = val
else:
update_dict_from_string(o_dict[use_key], remain, val)
def expand_dict_from_defaults_and_elements(default_dict, elem_dict):
"""Expand a dictionary by copying defaults to a set of elements
Parameters
----------
default_dict : `dict`
The defaults
elem_dict : `dict`
The elements
Returns
-------
o_dict : `dict`
The output dict
"""
o_dict = {}
for key, elem in elem_dict.items():
o_dict[key] = default_dict.copy()
if elem is None:
continue
o_dict[key].update(elem)
return o_dict
def read_txt_to_np(fname):
""" Read a txt file to a numpy array """
ext = os.path.splitext(fname)[-1]
if ext.lower() == '.txt':
delim = None
elif ext.lower() == '.csv':
delim = ','
else:
raise ValueError("File %s is not csv or txt")
return np.loadtxt(fname, unpack=True, dtype=np.float, delimiter=delim)
def reshape_array(val, shape):
""" Reshape an array, but not a scalar
This is useful for broadcasting many arrays to the same shape
"""
if np.isscalar(val):
return val
return val.reshape(shape)
|
StarcoderdataPython
|
11240526
|
"""
Command-line entry point.
"""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import sys
import os
from chromalog import basicConfig
from .configuration import load_from_file
from .log import logger
from .displays import StreamDisplay
from .compat import yaml_dump
class PairsParser(argparse.Action):
"""
Parses pairs of `key:value` arguments.
"""
def __call__(self, parser, namespace, values, option_string=None):
pairs = []
for value in values:
if ':' not in value:
raise ValueError(
"{} does not respect the `key:value` format".format(value),
)
pairs.append(tuple(value.split(':', 2)))
setattr(namespace, self.dest, frozenset(pairs))
def parse_args(args):
"""
Parse the arguments.
:param args: The arguments to parse.
:returns: A namespace instance.
"""
parser = argparse.ArgumentParser(
description="Plix - a build matrix runner that cares about humans.",
)
parser.add_argument(
'--debug',
'-d',
action='store_true',
default=False,
help="Enable debug output.",
)
parser.add_argument(
'--configuration',
'-c',
default='.plix.yml',
type=load_from_file,
help="The configuration file to use.",
)
parser.add_argument(
'pairs',
nargs='*',
default=[],
action=PairsParser,
help="A list of matrix context pairs that will limit the build.",
)
try:
return parser.parse_args(args)
except Exception as ex:
logger.error("%s", ex)
raise SystemExit(1)
def main(args=sys.argv[1:], display=StreamDisplay(stream=sys.stdout)):
basicConfig(format='%(message)s', level=logging.INFO)
params = parse_args(args=args)
if params.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Debug mode enabled.")
logger.debug(
"Parsed configuration is shown below:\n\n%s\n",
yaml_dump(params.configuration, indent=2),
)
params.configuration['executor'].execute(
environment=os.environ,
commands=params.configuration['script'],
display=display,
)
|
StarcoderdataPython
|
3261640
|
from SimpleGraphics import*
from math import*
import random
background("deep sky blue")
side=500
a2=100
a1=500
b,c,d=a1-side/2,a1+side/2,a2+side*(sqrt(3)/2)
setFill("pink")
vertex1=(a1,a2)
vertex2=(b,d)
vertex3=(c,d)
print(vertex1,vertex2,vertex3)
polygon(vertex1[0],vertex1[1],vertex2[0],vertex2[1],vertex3[0],vertex3[1])
#polygon(300, 450, 350, 450, 500, 500, 500, 550, 450, 550, 300, 500)
#line(150, 300, 200, 350)
#line(100, 350, 100, 250, 200, 250, 200, 300)
i=0
pointy=random.uniform(a2,d)
pointx=random.uniform(a1-(-a2+pointy)/sqrt(3),a1+(-a2+pointy)/sqrt(3))
for i in range(100000):
i+=1
nvertex=random.randint(1,3)
setFill("black")
ellipse(pointx,pointy,1,1)
if nvertex==1:
pointx=(vertex1[0]+pointx)/2
pointy=(vertex1[1]+pointy)/2
if nvertex==2:
pointx=(vertex2[0]+pointx)/2
pointy=(vertex2[1]+pointy)/2
if nvertex==3:
pointx=(vertex3[0]+pointx)/2
pointy=(vertex3[1]+pointy)/2
|
StarcoderdataPython
|
127483
|
fileChange = open("gwsf/hasOpened.gwsf", "w")
fileChange.write("1")
fileChange.close()
|
StarcoderdataPython
|
3561681
|
"""Define sparse embedding and optimizer."""
from .. import backend as F
from .. import utils
from .dist_tensor import DistTensor
class DistEmbedding:
'''Distributed embeddings.
DGL provides a distributed embedding to support models that require learnable embeddings.
DGL's distributed embeddings are mainly used for learning node embeddings of graph models.
Because distributed embeddings are part of a model, they are updated by mini-batches.
The distributed embeddings have to be updated by DGL's optimizers instead of
the optimizers provided by the deep learning frameworks (e.g., Pytorch and MXNet).
To support efficient training on a graph with many nodes, the embeddings support sparse
updates. That is, only the embeddings involved in a mini-batch computation are updated.
Currently, DGL provides only one optimizer: `SparseAdagrad`. DGL will provide more
optimizers in the future.
Distributed embeddings are sharded and stored in a cluster of machines in the same way as
py:meth:`dgl.distributed.DistTensor`, except that distributed embeddings are trainable.
Because distributed embeddings are sharded
in the same way as nodes and edges of a distributed graph, it is usually much more
efficient to access than the sparse embeddings provided by the deep learning frameworks.
Parameters
----------
num_embeddings : int
The number of embeddings. Currently, the number of embeddings has to be the same as
the number of nodes or the number of edges.
embedding_dim : int
The dimension size of embeddings.
name : str, optional
The name of the embeddings. The name can uniquely identify embeddings in a system
so that another DistEmbedding object can referent to the embeddings.
init_func : callable, optional
The function to create the initial data. If the init function is not provided,
the values of the embeddings are initialized to zero.
part_policy : PartitionPolicy, optional
The partition policy that assigns embeddings to different machines in the cluster.
Currently, it only supports node partition policy or edge partition policy.
The system determines the right partition policy automatically.
Examples
--------
>>> def initializer(shape, dtype):
arr = th.zeros(shape, dtype=dtype)
arr.uniform_(-1, 1)
return arr
>>> emb = dgl.distributed.DistEmbedding(g.number_of_nodes(), 10, init_func=initializer)
>>> optimizer = dgl.distributed.SparseAdagrad([emb], lr=0.001)
>>> for blocks in dataloader:
... feats = emb(nids)
... loss = F.sum(feats + 1, 0)
... loss.backward()
... optimizer.step()
Note
----
When a ``DistEmbedding`` object is used when the deep learning framework is recording
the forward computation, users have to invoke py:meth:`~dgl.distributed.SparseAdagrad.step`
afterwards. Otherwise, there will be some memory leak.
'''
def __init__(self, num_embeddings, embedding_dim, name=None,
init_func=None, part_policy=None):
self._tensor = DistTensor((num_embeddings, embedding_dim), F.float32, name,
init_func, part_policy)
self._trace = []
def __call__(self, idx):
idx = utils.toindex(idx).tousertensor()
emb = self._tensor[idx]
if F.is_recording():
emb = F.attach_grad(emb)
self._trace.append((idx, emb))
return emb
class SparseAdagradUDF:
''' The UDF to update the embeddings with sparse Adagrad.
Parameters
----------
lr : float
The learning rate.
'''
def __init__(self, lr):
self._lr = lr
def __call__(self, data_store, name, indices, data):
''' Update the embeddings with sparse Adagrad.
This function runs on the KVStore server. It updates the gradients by scaling them
according to the state sum.
Parameters
----------
data_store : dict of data
all data in the kvstore.
name : str
data name
indices : tensor
the indices in the local tensor.
data : tensor (mx.ndarray or torch.tensor)
a tensor with the same row size of id
'''
grad_indices = indices
grad_values = data
embs = data_store[name]
state_sum = data_store[name + "_sum"]
with F.no_grad():
grad_sum = F.mean(grad_values * grad_values, 1)
F.index_add_inplace(state_sum, grad_indices, grad_sum)
std = state_sum[grad_indices] # _sparse_mask
std_values = F.unsqueeze((F.sqrt(std) + 1e-10), 1)
F.index_add_inplace(embs, grad_indices, grad_values / std_values * (-self._lr))
def _init_state(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
class SparseAdagrad:
r''' The sparse Adagrad optimizer.
This optimizer implements a lightweight version of Adagrad algorithm for optimizing
:func:`dgl.distributed.DistEmbedding`. In each mini-batch, it only updates the embeddings
involved in the mini-batch to support efficient training on a graph with many
nodes and edges.
Adagrad maintains a :math:`G_{t,i,j}` for every parameter in the embeddings, where
:math:`G_{t,i,j}=G_{t-1,i,j} + g_{t,i,j}^2` and :math:`g_{t,i,j}` is the gradient of
the dimension :math:`j` of embedding :math:`i` at step :math:`t`.
Instead of maintaining :math:`G_{t,i,j}`, this implementation maintains :math:`G_{t,i}`
for every embedding :math:`i`:
.. math::
G_{t,i}=G_{t-1,i}+ \frac{1}{p} \sum_{0 \le j \lt p}g_{t,i,j}^2
where :math:`p` is the dimension size of an embedding.
The benefit of the implementation is that it consumes much smaller memory and runs
much faster if users' model requires learnable embeddings for nodes or edges.
Parameters
----------
params : list of DistEmbeddings
The list of distributed embeddings.
lr : float
The learning rate.
'''
def __init__(self, params, lr):
self._params = params
self._lr = lr
# We need to register a state sum for each embedding in the kvstore.
for emb in params:
assert isinstance(emb, DistEmbedding), 'SparseAdagrad only supports DistEmbeding'
name = emb._tensor.name
kvstore = emb._tensor.kvstore
policy = emb._tensor.part_policy
kvstore.init_data(name + "_sum",
(emb._tensor.shape[0],), emb._tensor.dtype,
policy, _init_state)
kvstore.register_push_handler(name, SparseAdagradUDF(self._lr))
def step(self):
''' The step function.
The step function is invoked at the end of every batch to push the gradients
of the embeddings involved in a mini-batch to DGL's servers and update the embeddings.
'''
with F.no_grad():
for emb in self._params:
name = emb._tensor.name
kvstore = emb._tensor.kvstore
trace = emb._trace
if len(trace) == 1:
kvstore.push(name, trace[0][0], F.grad(trace[0][1]))
else:
# TODO(zhengda) we need to merge the gradients of the same embeddings first.
idxs = [t[0] for t in trace]
grads = [F.grad(t[1]) for t in trace]
idxs = F.cat(idxs, 0)
# Here let's adjust the gradients with the learning rate first.
# We'll need to scale them with the state sum on the kvstore server
# after we push them.
grads = F.cat(grads, 0)
kvstore.push(name, idxs, grads)
# Clean up the old traces.
emb._trace = []
|
StarcoderdataPython
|
259153
|
import os
import click
from werkzeug.serving import run_simple
def make_app():
"""Helper function that creates a plnt app."""
from plnt import Plnt
database_uri = os.environ.get("PLNT_DATABASE_URI")
app = Plnt(database_uri or "sqlite:////tmp/plnt.db")
app.bind_to_context()
return app
@click.group()
def cli():
pass
@cli.command()
def initdb():
"""Initialize the database"""
from plnt.database import Blog, session
make_app().init_database()
# and now fill in some python blogs everybody should read (shamelessly
# added my own blog too)
blogs = [
Blog(
"<NAME>",
"https://lucumr.pocoo.org/",
"https://lucumr.pocoo.org/feed.atom",
),
Blog(
"<NAME>",
"https://pyside.blogspot.com/",
"https://pyside.blogspot.com/feeds/posts/default",
),
Blog(
"<NAME>",
"https://blog.ianbicking.org/",
"https://blog.ianbicking.org/feed/",
),
Blog(
"<NAME>",
"http://amix.dk/",
"https://feeds.feedburner.com/amixdk",
),
Blog(
"<NAME>",
"https://www.cmlenz.net/blog/",
"https://www.cmlenz.net/blog/atom.xml",
),
Blog(
"<NAME>",
"https://effbot.org/",
"https://effbot.org/rss.xml",
),
]
# okay. got tired here. if someone feels that they are missing, drop me
# a line ;-)
for blog in blogs:
session.add(blog)
session.commit()
click.echo("Initialized database, now run manage-plnt.py sync to get the posts")
@cli.command()
@click.option("-h", "--hostname", type=str, default="localhost", help="localhost")
@click.option("-p", "--port", type=int, default=5000, help="5000")
@click.option("--no-reloader", is_flag=True, default=False)
@click.option("--debugger", is_flag=True)
@click.option("--no-evalex", is_flag=True, default=False)
@click.option("--threaded", is_flag=True)
@click.option("--processes", type=int, default=1, help="1")
def runserver(hostname, port, no_reloader, debugger, no_evalex, threaded, processes):
"""Start a new development server."""
app = make_app()
reloader = not no_reloader
evalex = not no_evalex
run_simple(
hostname,
port,
app,
use_reloader=reloader,
use_debugger=debugger,
use_evalex=evalex,
threaded=threaded,
processes=processes,
)
@cli.command()
@click.option("--no-ipython", is_flag=True, default=False)
def shell(no_ipython):
"""Start a new interactive python session."""
banner = "Interactive Werkzeug Shell"
namespace = {"app": make_app()}
if not no_ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed.instance(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
@cli.command()
def sync():
"""Sync the blogs in the planet. Call this from a cronjob."""
from plnt.sync import sync
make_app().bind_to_context()
sync()
if __name__ == "__main__":
cli()
|
StarcoderdataPython
|
6423521
|
<reponame>Zakovskiy/lwaf.py<gh_stars>1-10
class Account:
def __init__ (self, data: dir):
self.json = data
self.user_id = data["uid"]
self.nickname = data["n"]
self.lvl = data["l"]
self.balance = data["b"]
self.likes = data["li"]
self.dislikes = data["di"]
self.tracks = data["tr"]
self.superlikes = data["sli"]
self.sex = data["s"]
self.role = data["r"]
self.wheel_count = data["wc"]
self.wheel_timestamp = data["wts"]
self.wheel_timestamp = data["wts"]
self.time_ban = data["tb"]
self.favorite_track = data["ftr"]
self.last_tracks = data["ltr"]
self.access_token = data["at"]
self.device_id = data["did"]
self.vk_id = data["vid"]
class User:
def __init__(self, data: dir):
self.json = data
self.user_id = data.get("uid")
self.nickname = data.get("n")
self.lvl = data.get("l")
self.balance = data.get("b")
self.likes = data.get("li")
self.dislikes = data.get("di")
self.tracks = data.get("tr")
self.superlikes = data.get("sli")
self.sex = data.get("s")
self.role = data.get("r")
self.wheel_count = data.get("wc")
self.wheel_timestamp = data.get("wts")
self.wheel_timestamp = data.get("wts")
self.time_ban = data.get("tb")
self.favorite_track = data.get("ftr")
self.last_tracks = data.get("ltr")
self.friend_id = data.get("fid")
self.friend_type = data.get("ft")
self.ranks = []
for rank in data.get("rs", []):
self.ranks.append(Rank(rank))
class Rank:
def __init__(self, data: dir):
self.json = data
self.rank_id = data["rid"]
self.title = data["t"]
self.background_color = data["bgc"]
self.icon_link = data["il"]
class FriendList:
def __init__(self, data: list):
self.json = data
self.friend_list = []
for friend in data:
self.friend_list.append(Friend(friend))
class Friend:
def __init__(self, data: dir):
self.json = data
self.friend_id = data["fid"]
self.friend_type = data["ft"]
self.last_message = Message(data["lm"])
self.user = User(data["u"])
class Message:
def __init__(self, data: dir):
self.json = data
self.message_id = data.get("mid")
self.message = data.get("m")
self.type = data.get("t")
self.timestamp = data.get("ts")
self.user_id = data.get("uid")
self.user = User(data.get("u"))
class PlayersList:
def __init__(self, data: list):
self.players = []
for player in data:
self.players.append(Player(player))
class Player:
def __init__(self, data: dir):
self.json = data
self.user_id = data.get("uid")
self.nickname = data.get("n")
self.lvl = data.get("l")
self.balance = data.get("b")
self.likes = data.get("li")
self.dislikes = data.get("di")
self.tracks = data.get("tr")
self.superlikes = data.get("sli")
self.sex = data.get("s")
self.role = data.get("r")
self.wheel_count = data.get("wc")
self.wheel_timestamp = data.get("wts")
self.wheel_timestamp = data.get("wts")
self.time_ban = data.get("tb")
self.favorite_track = data.get("ftr")
self.last_tracks = data.get("ltr")
class ConversationMessages:
def __init__(self, data: list):
self.cm = []
for message in data:
self.cm.append(Message(message))
|
StarcoderdataPython
|
9671109
|
<gh_stars>1-10
from packages.components.status import status_props, default_status
from packages.modules.crud_sqlite import crud_driver
from packages.modules.db_templates_manager import connect_toDB, statusDB_name
def status_loader_routine(self):
print('loading previous status....')
try:
connect_toDB(self, statusDB_name, False, True)
status = crud_driver(self, 'saved_status', 'read', {'pick_all': True})
print('done.....')
connect_toDB(self, status[-1][0],True,False)
# print('debug: status found: {}'.format(dict(list(zip(status_props,status[-1])))))
return dict(list(zip(status_props,status[-1])))
except BaseException as err:
print('not found....\nreturning default values')
return default_status
# ~~~~~~~~~~~~~~~~~~~~4
|
StarcoderdataPython
|
1819293
|
<gh_stars>0
#For a detailed explanation of this code please refer to page 43 of the Final Year Project Manual
import csv
import psycopg2
import json
print('opening connection to psql database')
conn = psycopg2.connect("host = 'localhost' port='5432' dbname='stack' user='root' password='<PASSWORD>'")
cur = conn.cursor()
def createChildNode(name, score):
childNode = {
"name": name,
"size": score,
"children": []
}
return childNode
def getExistingChildNode(name, parent, score):
for node in parent:
if node["name"] == name:
node["size"] += score
return node
contents = {
"name": "stackoverflow",
"children": []
}
file = input("please enter dataset: ")
print("running conversion algorithm")
with open('./DATA/' + str(file) + '.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
rowTags = row['tags'].replace('>', '').split('<')[1:]
rowScore = row['score']
parent = contents["children"]
for tag in rowTags:
childName = tag
childNameList = [node["name"] for node in parent]
if childName not in childNameList:
childNode = createChildNode(childName, rowScore)
parent.append(childNode)
else:
childNode = getExistingChildNode(childName, parent, rowScore)
parent = childNode["children"]
print("inserting data into PSQL database")
insert_statement = "insert into api_" + file + "json (id, content) values (%s, %s)"
cur.execute("TRUNCATE api_" + file + "json")
cur.execute(insert_statement, (0, json.dumps(contents)))
conn.commit()
conn.close()
print('connection closed')
|
StarcoderdataPython
|
5049766
|
#!/usr/bin/env python
#coding:utf-8
# with_example01.py
class Sample:
def __enter__(self):
print "In __enter__()"
return "Foo"
def __exit__(self, type, value, trace):
print "In __exit__()"
def get_sample():
return Sample()
with get_sample() as sample:
print "sample:", sample
class Sample2:
def __enter__(self):
return self
def __exit__(self, type, value, trace):
print "type:", type
print "value:", value
print "trace:", trace
def do_something(self):
bar = 1/0
return bar + 10
with Sample2() as sample:
sample.do_something()
"""
这个例子中,with后面的get_sample()变成了Sample()。
这没有任何关系,只要紧跟with后面的语句所返回的对象有 __enter__()和__exit__()方法即可。
此例中,Sample()的__enter__()方法返回新创建的Sample对象,并赋值给变量sample。
实际上,在with后面的代码块抛出任何异常时,__exit__()方法被执行。
正如例子所示,异常抛出时,与之关联的type,value和stack trace传给__exit__()方法,
因此抛出的ZeroDivisionError异常被打印出来了。
开发库时,清理资源,关闭文件等等操作,都可以放在__exit__方法当中。
"""
|
StarcoderdataPython
|
1617890
|
<gh_stars>10-100
import os
import sys
import pickle
import argparse
import time
from torch import optim
from torch.utils.tensorboard import SummaryWriter
sys.path.append(os.getcwd())
from utils import *
from motion_pred.utils.config import Config
from motion_pred.utils.dataset_h36m_multimodal import DatasetH36M
from motion_pred.utils.dataset_humaneva_multimodal import DatasetHumanEva
from models.motion_pred_ours import *
from utils import util, valid_angle_check
def joint_loss(Y_g):
parts = cfg.nf_specs['parts']
parts_idx = [(np.array(p) * 3).tolist() + (np.array(p) * 3 + 1).tolist() + (np.array(p) * 3 + 2).tolist()
for p in parts]
nparts = len(parts)
if 'alphas' in cfg.nf_specs.keys():
alpha = cfg.nf_specs['alphas'][0]
beta = cfg.nf_specs['alphas'][1]
else:
alpha = 100
beta = 300
loss = []
Y_g = Y_g.permute(1, 0, 2).contiguous()
Y_g = Y_g.view([Y_g.shape[0] // cfg.nk ** nparts] + [cfg.nk] * nparts + [Y_g.shape[1], -1])
assert nparts == 2
mask = torch.tril(torch.ones([cfg.nk, cfg.nk], device=device)) == 0
yt = Y_g[:, :, 0, ...][..., parts_idx[0]].reshape([Y_g.shape[0], cfg.nk, -1])
# pdist = (yt[:, :, None] - yt[:, None, :]).abs()[:, mask]
pdist = torch.cdist(yt, yt, p=1)[:, mask]
loss.append((-pdist / alpha).exp().mean())
yt = Y_g[..., parts_idx[1]].reshape([Y_g.shape[0] * cfg.nk, cfg.nk, -1])
# pdist = (yt[:, :, None] - yt[:, None, :]).abs()[:, mask]
pdist = torch.cdist(yt, yt, p=1)[:, mask]
loss.append((-pdist / beta).exp().mean())
with torch.no_grad():
mask = torch.tril(torch.ones([cfg.nk ** nparts, cfg.nk ** nparts], device=device)) == 0
yt = Y_g.reshape([Y_g.shape[0], cfg.nk ** nparts, -1])
pdist = torch.cdist(yt, yt, p=2)[:, mask]
# loss.append(pdist.mean())
return loss, pdist.mean()
def recon_loss(Y_g, Y, Y_mm):
parts = cfg.nf_specs['parts']
nparts = len(parts)
Y_g = Y_g.view(Y_g.shape[0], -1, cfg.nk ** nparts, Y_g.shape[2])
diff = Y_g - Y.unsqueeze(2)
dist = diff.pow(2).sum(dim=-1).sum(dim=0)
loss_recon = dist.min(dim=1)[0].mean()
with torch.no_grad():
ade = torch.norm(diff, dim=-1).mean(dim=0).min(dim=1)[0].mean()
diff = Y_g[:, :, :, None, :] - Y_mm[:, :, None, :, :]
mask = Y_mm.abs().sum(-1).sum(0) > 1e-6
dist = diff.pow(2).sum(dim=-1).sum(dim=0)
loss_recon_multi = dist.min(dim=1)[0][mask].mean()
if torch.isnan(loss_recon_multi):
loss_recon_multi = torch.zeros_like(loss_recon)
return loss_recon, loss_recon_multi, ade
def angle_loss(y):
ang_names = list(valid_ang.keys())
y = y.reshape([-1, y.shape[-1]])
ang_cos = valid_angle_check.h36m_valid_angle_check_torch(
y) if cfg.dataset == 'h36m' else valid_angle_check.humaneva_valid_angle_check_torch(y)
loss = tensor(0, dtype=dtype, device=device)
b = 1
for an in ang_names:
lower_bound = valid_ang[an][0]
if lower_bound >= -0.98:
# loss += torch.exp(-b * (ang_cos[an] - lower_bound)).mean()
if torch.any(ang_cos[an] < lower_bound):
# loss += b * torch.exp(-(ang_cos[an][ang_cos[an] < lower_bound] - lower_bound)).mean()
loss += (ang_cos[an][ang_cos[an] < lower_bound] - lower_bound).pow(2).mean()
upper_bound = valid_ang[an][1]
if upper_bound <= 0.98:
# loss += torch.exp(b * (ang_cos[an] - upper_bound)).mean()
if torch.any(ang_cos[an] > upper_bound):
# loss += b * torch.exp(ang_cos[an][ang_cos[an] > upper_bound] - upper_bound).mean()
loss += (ang_cos[an][ang_cos[an] > upper_bound] - upper_bound).pow(2).mean()
return loss
def loss_function(traj_est, traj, traj_multimodal, prior_lkh, prior_logdetjac):
lambdas = cfg.nf_specs['lambdas']
parts = cfg.nf_specs['parts']
nparts = len(parts)
nj = dataset.traj_dim // 3
# diversity loss
Y_g = traj_est[t_his:]
JL, div = joint_loss(Y_g)
# reconstruction loss
Y = traj[t_his:]
Y_multimodal = traj_multimodal[t_his:]
RECON, RECON_mm, ade = recon_loss(Y_g, Y, Y_multimodal)
# recover history
xest = traj_est[:t_his].reshape([t_his, cfg.batch_size, cfg.nk ** nparts, -1])
xgt = traj[:t_his].unsqueeze(2)
loss_x = torch.mean((xest - xgt).pow(2).sum(dim=-1))
# maintain limb length
parent = dataset.skeleton.parents()
tmp = traj[0].reshape([cfg.batch_size, nj, 3])
pgt = torch.zeros([cfg.batch_size, nj + 1, 3], dtype=dtype, device=device)
pgt[:, 1:] = tmp
limbgt = torch.norm(pgt[:, 1:] - pgt[:, parent[1:]], dim=2)[None, :, None, :]
tmp = traj_est.reshape([-1, cfg.batch_size, cfg.nk ** nparts, nj, 3])
pest = torch.zeros([tmp.shape[0], cfg.batch_size, cfg.nk ** nparts, nj + 1, 3], dtype=dtype, device=device)
pest[:, :, :, 1:] = tmp
limbest = torch.norm(pest[:, :, :, 1:] - pest[:, :, :, parent[1:]], dim=4)
loss_limb = torch.mean((limbgt - limbest).pow(2).sum(dim=3))
# angle loss
loss_ang = angle_loss(Y_g)
loss_r = loss_x * lambdas[0] + loss_limb * lambdas[1] \
+ JL[0] * lambdas[2] + JL[1] * lambdas[3] + RECON * lambdas[4] + RECON_mm * lambdas[5] \
- prior_lkh.mean() * lambdas[6] # - prior_logdetjac.mean() * lambdas[7]
if loss_ang > 0:
loss_r += loss_ang * lambdas[8]
return loss_r, np.array([loss_r.item(), loss_x.item(), loss_limb.item(), loss_ang.item(),
JL[0].item(), JL[1].item(), div.item(), RECON.item(), RECON_mm.item(), ade.item(),
prior_lkh.mean().item(), prior_logdetjac.mean().item()])
def train(epoch):
model.train()
t_s = time.time()
train_losses = 0
train_grad = 0
train_grad_d = 0
total_num_sample = 0
n_modality = 10
loss_names = ['LOSS', 'loss_cont', 'loss_limb', 'loss_ang', 'loss_DIV_L', 'loss_DIV_U', 'DIV',
'RECON', 'RECON_multi', "ADE", 'p(z)', 'logdet']
generator = dataset.sampling_generator(num_samples=cfg.num_vae_data_sample, batch_size=cfg.batch_size,
n_modality=n_modality)
prior = torch.distributions.Normal(torch.tensor(0, dtype=dtype, device=device),
torch.tensor(1, dtype=dtype, device=device))
# generator_d = dataset.sampling_generator(num_samples=cfg.num_vae_data_sample, batch_size=cfg.batch_size)
dct_m, idct_m = util.get_dct_matrix(t_pred + t_his)
dct_m_all = dct_m.float().to(device)
idct_m_all = idct_m.float().to(device)
parts = cfg.nf_specs['parts']
n_parts = len(parts)
idx_pad = list(range(t_his)) + [t_his - 1] * t_pred
k = 1
for traj_np, traj_multimodal_np in generator:
with torch.no_grad():
traj_np = traj_np[..., 1:, :].transpose([0, 2, 3, 1]) # .reshape(traj_np.shape[0], traj_np.shape[1], -1)
traj = tensor(traj_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
bs, nj, _, _ = traj.shape
inp = traj.reshape([bs, -1, t_his + t_pred]).transpose(1, 2)
inp = torch.matmul(dct_m_all[:cfg.n_pre], inp[:, idx_pad, :]).transpose(1, 2). \
reshape([bs, nj, 3, -1]).reshape([bs, nj, -1])
traj_multimodal_np = traj_multimodal_np[..., 1:, :] # [bs, modality, seqn, jn, 3]
traj_multimodal_np = traj_multimodal_np.reshape([bs, n_modality, t_his + t_pred, -1]).transpose(
[2, 0, 1, 3])
traj_multimodal = tensor(traj_multimodal_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
inp = inp.unsqueeze(1).repeat([1, (cfg.nk ** n_parts), 1, 1]).reshape(
[bs * (cfg.nk ** n_parts), nj, -1])
z = None
for _ in range(n_parts):
if z is None:
zt = torch.randn([bs, cfg.nk, 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = zt
else:
z = z.repeat_interleave(cfg.nk, dim=1)
zt = torch.randn([bs, z.shape[1], 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = torch.cat([z, zt], dim=2)
z = z.reshape([-1, n_parts, cfg.nf_specs['nz']])
# train generator
xt = model(inp, z)
xt = xt.reshape([bs * (cfg.nk ** n_parts), nj, 3, -1]).reshape([bs * (cfg.nk ** n_parts), nj * 3, -1]) \
.transpose(1, 2)
traj_est = torch.matmul(idct_m_all[:, :cfg.n_pre], xt).transpose(0, 1)
traj = traj.reshape([bs, -1, t_his + t_pred]).permute([2, 0, 1])
# to save computation
ran = np.random.uniform()
if ran > 0.67:
traj_tmp = traj_est[t_his::3].reshape([-1, traj_est.shape[-1] // 3, 3])
tmp = torch.zeros_like(traj_tmp[:, :1, :])
traj_tmp = torch.cat([tmp, traj_tmp], dim=1)
traj_tmp = util.absolute2relative_torch(traj_tmp, parents=dataset.skeleton.parents()).reshape(
[-1, traj_est.shape[-1]])
elif ran > 0.33:
traj_tmp = traj_est[t_his + 1::3].reshape([-1, traj_est.shape[-1] // 3, 3])
tmp = torch.zeros_like(traj_tmp[:, :1, :])
traj_tmp = torch.cat([tmp, traj_tmp], dim=1)
traj_tmp = util.absolute2relative_torch(traj_tmp, parents=dataset.skeleton.parents()).reshape(
[-1, traj_est.shape[-1]])
else:
traj_tmp = traj_est[t_his + 2::3].reshape([-1, traj_est.shape[-1] // 3, 3])
tmp = torch.zeros_like(traj_tmp[:, :1, :])
traj_tmp = torch.cat([tmp, traj_tmp], dim=1)
traj_tmp = util.absolute2relative_torch(traj_tmp, parents=dataset.skeleton.parents()).reshape(
[-1, traj_est.shape[-1]])
z, prior_logdetjac = pose_prior(traj_tmp)
prior_lkh = prior.log_prob(z).sum(dim=-1)
# prior_logdetjac = log_det_jacobian.sum(dim=2)
loss, losses = loss_function(traj_est, traj, traj_multimodal, prior_lkh, prior_logdetjac)
# if torch.isinf(loss):
# print(1)
optimizer.zero_grad()
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(list(model.parameters()), max_norm=100)
train_grad += grad_norm
optimizer.step()
train_losses += losses
total_num_sample += 1
# print(torch.cuda.memory_allocated()/1024/1024)
del loss, z, inp, xt, traj_est
# print(torch.cuda.memory_allocated())
scheduler.step()
# dt = time.time() - t_s
train_losses /= total_num_sample
lr = optimizer.param_groups[0]['lr']
losses_str = ' '.join(['{}: {:.4f}'.format(x, y) for x, y in zip(loss_names, train_losses)])
# average cost of log time 20s
tb_logger.add_scalar('train_grad', train_grad / total_num_sample, epoch)
for name, loss in zip(loss_names, train_losses):
tb_logger.add_scalars(name, {'train': loss}, epoch)
logger.info('====> Epoch: {} Time: {:.2f} {} lr: {:.5f}'.format(epoch, time.time() - t_s, losses_str, lr))
def val(epoch):
model.eval()
t_s = time.time()
train_losses = 0
total_num_sample = 0
n_modality = 10
loss_names = ['LOSS', 'loss_cont', 'loss_limb', 'loss_ang', 'loss_DIV_L', 'loss_DIV_U', 'DIV',
'RECON', 'RECON_multi', "ADE", 'p(z)', 'logdet']
generator = dataset_test.sampling_generator(num_samples=cfg.num_vae_data_sample, batch_size=cfg.batch_size)
prior = torch.distributions.Normal(torch.tensor(0, dtype=dtype, device=device),
torch.tensor(1, dtype=dtype, device=device))
with torch.no_grad():
dct_m, idct_m = util.get_dct_matrix(t_pred + t_his)
dct_m_all = dct_m.float().to(device)
idct_m_all = idct_m.float().to(device)
parts = cfg.nf_specs['parts']
n_parts = len(parts)
idx_pad = list(range(t_his)) + [t_his - 1] * t_pred
k = 1
for traj_np, traj_multimodal_np in generator:
traj_np = traj_np[..., 1:, :].transpose([0, 2, 3, 1]) # .reshape(traj_np.shape[0], traj_np.shape[1], -1)
traj = tensor(traj_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
bs, nj, _, _ = traj.shape
inp = traj.reshape([bs, -1, t_his + t_pred]).transpose(1, 2)
inp = torch.matmul(dct_m_all[:cfg.n_pre], inp[:, idx_pad, :]).transpose(1, 2). \
reshape([bs, nj, 3, -1]).reshape([bs, nj, -1])
traj_multimodal_np = traj_multimodal_np[..., 1:, :] # [bs, modality, seqn, jn, 3]
traj_multimodal_np = traj_multimodal_np.reshape([bs, n_modality, t_his + t_pred, -1]).transpose(
[2, 0, 1, 3])
traj_multimodal = tensor(traj_multimodal_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
inp = inp.unsqueeze(1).repeat([1, (cfg.nk ** n_parts), 1, 1]).reshape(
[bs * (cfg.nk ** n_parts), nj, -1])
z = None
for _ in range(n_parts):
if z is None:
zt = torch.randn([bs, cfg.nk, 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = zt
else:
z = z.repeat_interleave(cfg.nk, dim=1)
zt = torch.randn([bs, z.shape[1], 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = torch.cat([z, zt], dim=2)
z = z.reshape([-1, n_parts, cfg.nf_specs['nz']])
# train generator
xt = model(inp, z)
xt = xt.reshape([bs * (cfg.nk ** n_parts), nj, 3, -1]).reshape([bs * (cfg.nk ** n_parts), nj * 3, -1]) \
.transpose(1, 2)
traj_est = torch.matmul(idct_m_all[:, :cfg.n_pre], xt).transpose(0, 1)
traj = traj.reshape([bs, -1, t_his + t_pred]).permute([2, 0, 1])
# traj
traj_tmp = traj_est.reshape([-1, traj_est.shape[-1] // 3, 3])
tmp = torch.zeros_like(traj_tmp[:, :1, :])
traj_tmp = torch.cat([tmp, traj_tmp], dim=1)
traj_tmp = util.absolute2relative_torch(traj_tmp, parents=dataset.skeleton.parents()).reshape(
[-1, traj_est.shape[-1]])
z, prior_logdetjac = pose_prior(traj_tmp)
prior_lkh = prior.log_prob(z).sum(dim=-1)
# prior_logdetjac = log_det_jacobian.sum(dim=2)
# # normalize traj
# traj_tmp = (traj_est[t_his::3] - data_mean) / data_std
# z, log_det_jacobian, _, _, _, _ = pose_prior(traj_tmp)
#
# prior_lkh = prior.log_prob(z).sum(dim=2)
# prior_logdetjac = log_det_jacobian.sum(dim=2)
loss, losses = loss_function(traj_est, traj, traj_multimodal, prior_lkh, prior_logdetjac)
train_losses += losses
total_num_sample += 1
del loss, z, xt, prior_lkh, prior_logdetjac
# dt = time.time() - t_s
train_losses /= total_num_sample
lr = optimizer.param_groups[0]['lr']
losses_str = ' '.join(['{}: {:.4f}'.format(x, y) for x, y in zip(loss_names, train_losses)])
for name, loss in zip(loss_names, train_losses):
tb_logger.add_scalars(name, {'test': loss}, epoch)
logger.info('====> Epoch: {} Test Time: {:.2f} {} lr: {:.5f}'.format(epoch, time.time() - t_s, losses_str, lr))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg',
default='h36m')
parser.add_argument('--mode', default='train')
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--iter', type=int, default=0)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu_index', type=int, default=1)
parser.add_argument('--n_pre', type=int, default=8)
parser.add_argument('--n_his', type=int, default=5)
parser.add_argument('--trial', type=int, default=1)
parser.add_argument('--num_coupling_layer', type=int, default=4)
# parser.add_argument('--nz', type=int, default=10)
args = parser.parse_args()
"""setup"""
np.random.seed(args.seed)
torch.manual_seed(args.seed)
dtype = torch.float32
torch.set_default_dtype(dtype)
device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_index)
cfg = Config(f'{args.cfg}', test=args.test)
tb_logger = SummaryWriter(cfg.tb_dir) if args.mode == 'train' else None
logger = create_logger(os.path.join(cfg.log_dir, 'log.txt'))
"""parameter"""
mode = args.mode
nz = cfg.nz
t_his = cfg.t_his
t_pred = cfg.t_pred
cfg.n_his = args.n_his
if 'n_pre' not in cfg.nf_specs.keys():
cfg.n_pre = args.n_pre
else:
cfg.n_pre = cfg.nf_specs['n_pre']
cfg.num_coupling_layer = args.num_coupling_layer
# cfg.nz = args.nz
"""data"""
if 'actions' in cfg.nf_specs.keys():
act = cfg.nf_specs['actions']
else:
act = 'all'
dataset_cls = DatasetH36M if cfg.dataset == 'h36m' else DatasetHumanEva
dataset = dataset_cls('train', t_his, t_pred, actions=act, use_vel=cfg.use_vel,
multimodal_path=cfg.nf_specs[
'multimodal_path'] if 'multimodal_path' in cfg.nf_specs.keys() else None,
data_candi_path=cfg.nf_specs[
'data_candi_path'] if 'data_candi_path' in cfg.nf_specs.keys() else None)
dataset_test = dataset_cls('test', t_his, t_pred, actions=act, use_vel=cfg.use_vel,
multimodal_path=cfg.nf_specs[
'multimodal_path'] if 'multimodal_path' in cfg.nf_specs.keys() else None,
data_candi_path=cfg.nf_specs[
'data_candi_path'] if 'data_candi_path' in cfg.nf_specs.keys() else None)
if cfg.normalize_data:
dataset.normalize_data()
"""model"""
# model = get_vae_model(cfg, dataset.traj_dim)
model, pose_prior = get_model(cfg, dataset.traj_dim // 3, args.cfg)
model.float()
pose_prior.float()
optimizer = optim.Adam(model.parameters(), lr=cfg.vae_lr)
scheduler = get_scheduler(optimizer, policy='lambda', nepoch_fix=cfg.num_vae_epoch_fix, nepoch=cfg.num_vae_epoch)
logger.info(">>> total params: {:.2f}M".format(
sum(p.numel() for p in list(model.parameters())) / 1000000.0))
cp_path = 'results/h36m_nf/models/vae_0025.p' if cfg.dataset == 'h36m' else 'results/humaneva_nf/models/vae_0025.p'
print('loading model from checkpoint: %s' % cp_path)
model_cp = pickle.load(open(cp_path, "rb"))
pose_prior.load_state_dict(model_cp['model_dict'])
pose_prior.to(device)
# data_mean = tensor(model_cp['meta']['mean'], dtype=dtype, device=device).reshape([-1])
# data_std = tensor(model_cp['meta']['std'], dtype=dtype, device=device).reshape([-1])
valid_ang = pickle.load(open('./data/h36m_valid_angle.p', "rb")) if cfg.dataset == 'h36m' else pickle.load(
open('./data/humaneva_valid_angle.p', "rb"))
if args.iter > 0:
cp_path = cfg.vae_model_path % args.iter
print('loading model from checkpoint: %s' % cp_path)
model_cp = pickle.load(open(cp_path, "rb"))
model.load_state_dict(model_cp['model_dict'])
if mode == 'train':
model.to(device)
overall_iter = 0
for i in range(args.iter, cfg.num_vae_epoch):
train(i)
# val(i)
if cfg.save_model_interval > 0 and (i + 1) % cfg.save_model_interval == 0:
with to_cpu(model):
cp_path = cfg.vae_model_path % (i + 1)
model_cp = {'model_dict': model.state_dict(), 'meta': {'std': dataset.std, 'mean': dataset.mean}}
pickle.dump(model_cp, open(cp_path, 'wb'))
|
StarcoderdataPython
|
168648
|
<filename>python/ql/test/library-tests/frameworks/fastapi/router.py
# like blueprints in Flask
# see https://fastapi.tiangolo.com/tutorial/bigger-applications/
# see basic.py for instructions for how to run this code.
from fastapi import APIRouter, FastAPI
inner_router = APIRouter()
@inner_router.get("/foo") # $ routeSetup="/foo"
async def root(): # $ requestHandler
return {"msg": "inner_router /foo"} # $ HttpResponse
outer_router = APIRouter()
outer_router.include_router(inner_router, prefix="/inner")
items_router = APIRouter(
prefix="/items",
tags=["items"],
)
@items_router.get("/") # $ routeSetup="/"
async def items(): # $ requestHandler
return {"msg": "items_router /"} # $ HttpResponse
app = FastAPI()
app.include_router(outer_router, prefix="/outer")
app.include_router(items_router)
# Using a custom router
class MyCustomRouter(APIRouter):
"""
Which automatically removes trailing slashes
"""
def api_route(self, path: str, **kwargs):
path = path.rstrip("/")
return super().api_route(path, **kwargs)
custom_router = MyCustomRouter()
@custom_router.get("/bar/") # $ routeSetup="/bar/"
async def items(): # $ requestHandler
return {"msg": "custom_router /bar/"} # $ HttpResponse
app.include_router(custom_router)
|
StarcoderdataPython
|
11308303
|
"""
File input/output functions.
This module provides functions for file input and output of data related to single-molecule localization microscopy.
Submodules:
-----------
.. autosummary::
:toctree: ./
locdata
"""
from .locdata import *
__all__ = []
__all__.extend(locdata.__all__)
|
StarcoderdataPython
|
3325550
|
from rest_framework.pagination import PageNumberPagination
class CustomPageNumberPagination(PageNumberPagination):
page_size = 1
max_page_size = 1
|
StarcoderdataPython
|
1965585
|
<reponame>Shivanjain023/django-brambling
import urllib
from django.conf import settings
from django.core.urlresolvers import reverse
from brambling.payment.core import LIVE
from brambling.payment.stripe.core import stripe_prep
def stripe_organization_oauth_url(organization, api_type, request):
stripe_prep(api_type)
if api_type == LIVE:
client_id = getattr(settings, 'STRIPE_APPLICATION_ID', None)
else:
client_id = getattr(settings, 'STRIPE_TEST_APPLICATION_ID', None)
if not client_id:
return ''
redirect_uri = request.build_absolute_uri(reverse('brambling_stripe_connect'))
base_url = "https://connect.stripe.com/oauth/authorize?client_id={client_id}&response_type=code&scope=read_write&state={state}&redirect_uri={redirect_uri}"
return base_url.format(client_id=client_id,
state="{}|{}".format(organization.slug, api_type),
redirect_uri=urllib.quote(redirect_uri))
|
StarcoderdataPython
|
331160
|
<gh_stars>0
import math
import numpy as np
# import sys
# sys.path.append(".")
from visualization.panda import world as wd
from modeling import geometric_model as gm
from modeling import collision_model as cm
from robot_sim.robots.fr5 import fr5 as fr5
from motion.probabilistic import rrt_connect as rrtc
from basis import robot_math as rm
def genSphere(pos, radius=0.005, rgba=None):
if rgba is None:
rgba = [1, 0, 0, 1]
gm.gen_sphere(pos=pos, radius=radius, rgba=rgba).attach_to(base)
if __name__ == '__main__':
base = wd.World(cam_pos=[2, 2, 1], lookat_pos=[0, 0, 0.5], w=960, h=720)
gm.gen_frame().attach_to(base)
component_name = 'arm'
robot_s = fr5.FR5_robot(enable_cc=True, hnd_attached=False)
robot_meshmodel = robot_s.gen_meshmodel(toggle_tcpcs=True)
robot_meshmodel.attach_to(base)
# jnt limits-
# |- jnt1:[-175, 175], jnt2:[-265, 85], jnt3:[-160, 160],
# |- jnt4:[-265, 85], jnt5:[-175, 175], jnt6:[-175, 175]
limits = [[-175, 175], [-265, 85], [-160, 160], [-265, 85], [-175, 175], [-175, 175]]
jnt1, jnt2, jnt3, jnt4, jnt5, jnt6 = 0, 0, 0, 0, 0, 0
interval = 45
for jnt1 in range(limits[0][0], limits[0][1], interval):
for jnt2 in range(limits[1][0], limits[1][1], interval):
for jnt3 in range(limits[2][0], limits[2][1], interval):
for jnt4 in range(limits[3][0], limits[3][1], interval):
# for jnt5 in range(limits[4][0], limits[4][1], interval):
goal_conf = np.array([jnt1, jnt2, jnt3, jnt4, jnt5, jnt6])*math.pi/180
robot_s.fk(component_name, goal_conf)
if not robot_s.is_collided():
genSphere(robot_s.get_gl_tcp(component_name)[0])
# robot_meshmodel = robot_s.gen_meshmodel(toggle_tcpcs=True)
# robot_meshmodel.attach_to(base)
base.run()
|
StarcoderdataPython
|
87211
|
<filename>tests/query_test/test_decimal_casting.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Validates that casting to Decimal works.
#
import pytest
from decimal import Decimal, getcontext, ROUND_DOWN, ROUND_HALF_UP
from metacomm.combinatorics.all_pairs2 import all_pairs2 as all_pairs
from random import randint
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_exec_option_dimension_from_dict
from tests.common.test_vector import ImpalaTestDimension, ImpalaTestMatrix
class TestDecimalCasting(ImpalaTestSuite):
"""Test Suite to verify that casting to Decimal works.
Specifically, this test suite ensures that:
- overflows and underflows and handled correctly.
- casts from decimal/string to their exact decimal types are correct.
- max/min/NULL/0 can be expressed with their respective decimal types.
- TODO: Add cases for cast from float/double to decimal types.
"""
DECIMAL_TYPES_MAP = {
# All possible decimal types.
# (0 < precision <= 38 && 0 <= scale <= 38 && scale <= precision)
'exhaustive' : [(p, s) for p in xrange(1, 39) for s in xrange(0, p + 1)],
# Core only deals with precision 6,16,26 (different integer types)
'core' : [(p, s) for p in [6,16,26] for s in xrange(0, p + 1)],
# mimics test_vectors.py and takes a subset of all decimal types
'pairwise' : all_pairs([(p, s) for p in xrange(1, 39) for s in xrange(0, p + 1)])
}
# We can cast for numerics or string types.
CAST_FROM = ['string', 'number']
# Set the default precision to 38 to operate on decimal values.
getcontext().prec = 38
# Represents a 0 in decimal
DECIMAL_ZERO = Decimal('0')
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
cls.ImpalaTestMatrix = ImpalaTestMatrix()
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('decimal_type',
*TestDecimalCasting.DECIMAL_TYPES_MAP[cls.exploration_strategy()]))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('cast_from', *TestDecimalCasting.CAST_FROM))
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension_from_dict(
{'decimal_v2': ['false','true']}))
cls.iterations = 1
def _gen_decimal_val(self, precision, scale):
"""Generates a Decimal object with the exact number of digits as the precision."""
# Generates numeric string which has as many digits as the precision.
num = str(randint(10**(precision - 1), int('9' * precision)))
# Incorporate scale into the string.
if scale != 0: num = "{0}.{1}".format(num[:-scale], num[precision - scale:])
# Convert the generated decimal string into a Decimal object and return a -ive/+ive
# version of it with equal probability.
return Decimal(num) if randint(0,1) else Decimal("-{0}".format(num))
def _assert_decimal_result(self, cast, actual, expected):
assert expected == actual, "Cast: {0}, Expected: {1}, Actual: {2}".format(cast,\
expected, actual)
def _normalize_cast_expr(self, decimal_val, precision, cast_from):
if cast_from == 'string':
return "select cast('{0}' as Decimal({1},{2}))"
else:
return "select cast({0} as Decimal({1},{2}))"
def test_min_max_zero_null(self, vector):
"""Sanity test at limits.
Verify that:
- We can read decimal values at their +ive and -ive limits.
- 0 is expressible in all decimal types.
- NULL is expressible in all decimal types
"""
precision, scale = vector.get_value('decimal_type')
dec_max = Decimal('{0}.{1}'.format('9' * (precision - scale), '9' * scale))
# Multiplying large values eith -1 can produce an overflow.
dec_min = Decimal('-{0}'.format(str(dec_max)))
cast = self._normalize_cast_expr(dec_max, precision, vector.get_value('cast_from'))
# Test max
res = Decimal(self.execute_scalar(cast.format(dec_max, precision, scale)))
self._assert_decimal_result(cast, res, dec_max)
# Test Min
res = Decimal(self.execute_scalar(cast.format(dec_min, precision, scale)))
self._assert_decimal_result(cast, res, dec_min)
# Test zero
res = Decimal(self.execute_scalar(\
cast.format(TestDecimalCasting.DECIMAL_ZERO, precision, scale)))
self._assert_decimal_result(cast, res, TestDecimalCasting.DECIMAL_ZERO)
# Test NULL
null_cast = "select cast(NULL as Decimal({0}, {1}))".format(precision, scale)
res = self.execute_scalar(null_cast)
self._assert_decimal_result(null_cast, res, 'NULL')
def test_exact(self, vector):
"""Test to verify that an exact representation of the desired Decimal type is
maintained."""
precision, scale = vector.get_value('decimal_type')
if vector.get_value('cast_from') == 'decimal':
pytest.skip("Casting between the same decimal type isn't interesting")
for i in xrange(self.iterations):
val = self._gen_decimal_val(precision, scale)
cast = self._normalize_cast_expr(val, precision, vector.get_value('cast_from'))\
.format(val, precision, scale)
res = Decimal(self.execute_scalar(cast))
self._assert_decimal_result(cast, res, val)
def test_overflow(self, vector):
"""Test to verify that we always return NULL when trying to cast a number with greater
precision that its intended decimal type"""
precision, scale = vector.get_value('decimal_type')
for i in xrange(self.iterations):
# Generate a decimal with a larger precision than the one we're casting to.
from_precision = randint(precision + 1, 39)
val = self._gen_decimal_val(from_precision, scale)
cast = self._normalize_cast_expr(val, from_precision,\
vector.get_value('cast_from')).format(val, precision, scale)
if vector.get_value('cast_from') == "string":
# TODO: This should be an error in both cases (IMPALA-6405).
res = self.execute_scalar(cast)
self._assert_decimal_result(cast, res, 'NULL')
else:
res = self.execute_query_expect_failure(self.client, cast)
def test_underflow(self, vector):
"""Test to verify that we truncate when the scale of the number being cast is higher
than the target decimal type (with no change in precision).
"""
precision, scale = vector.get_value('decimal_type')
is_decimal_v2 = vector.get_value('exec_option')['decimal_v2'] == 'true'
cast_from = vector.get_value('cast_from')
if precision == scale:
pytest.skip("Cannot underflow scale when precision and scale are equal")
for i in xrange(self.iterations):
from_scale = randint(scale + 1, precision)
val = self._gen_decimal_val(precision, from_scale)
cast = self._normalize_cast_expr(val, precision, cast_from)\
.format(val, precision, scale)
res = Decimal(self.execute_scalar(cast, vector.get_value('exec_option')))
# TODO: Remove check for cast_from once string to decimal is supported in decimal_v2.
if is_decimal_v2:
expected_val = val.quantize(Decimal('0e-%s' % scale), rounding=ROUND_HALF_UP)
else:
expected_val = val.quantize(Decimal('0e-%s' % scale), rounding=ROUND_DOWN)
self._assert_decimal_result(cast, res, expected_val)
|
StarcoderdataPython
|
1727251
|
<reponame>eternal-flame-AD/px_helper
import argparse
import re
from . import config
from .parser import parse_pixiv
from .pxelem import PixivUrl
from .login import login
from . import imgfilter
def main():
parser = argparse.ArgumentParser(description="Pixiv downloader")
parser.add_argument(
"url",
type=str,
help="Pixiv URL, either bookmark, member_illust or illust")
parser.add_argument("-u", dest="username", help="username", type=str)
parser.add_argument("-p", dest="password", help="password", type=str)
parser.add_argument("-s", dest="sess_id", help="sessid", type=str)
parser.add_argument(
"--proxy",
dest="proxy",
help="specify a http proxy (format: http://127.0.0.1:8080)")
parser.add_argument("-o", dest="output", help="output folder", type=str)
parser.add_argument(
"--max-page",
dest="page",
help=
"specify max page number (only useful when downloading illust_member or search page) Example: --max-page 10",
type=int)
parser.add_argument(
"--newer-than",
dest="new",
help=
"Only download works newer than the specified date. Format:YYYY-MM-DD Example: --newer-than 2018-07-03",
type=str)
parser.add_argument(
"--remux",
dest="remux",
help="Whether to remux ugoira with ffmpeg(y/n). Default: y",
type=str)
parser.add_argument(
"--remux-ext",
dest="remux_ext",
help="Output format of remuxed ugoira. Example: --remux-ext mp4",
type=str)
args = parser.parse_args()
if args.proxy:
proxy_url = PixivUrl(args.proxy, use_sessid=False, use_english=False)
scheme = proxy_url.getscheme()
if scheme == "http":
config.proxy = "http"
config.proxy_host = proxy_url.gethost()
config.proxy_port = proxy_url.getport()
else:
raise NotImplementedError("Unsupported proxy")
else:
config.proxy = None
if args.page:
def filter_url(url):
try:
p = int(url.getquerydict()['p'][0])
return p <= args.page
except KeyError:
return True
imgfilter.filter_url = filter_url
if args.remux:
args.remux = args.remux.lower()
if "y" in args.remux:
config.remux_ugoira = True
if "n" in args.remux:
config.remux_ugoira = False
if args.remux_ext:
config.remux_ext = args.remux_ext
if args.new:
assert re.match(r"\d{4}-\d{2}-\d{2}",
args.new), "Invalid date format. YYYY-MM-DD"
imgfilter.filter = lambda img: img.info['work_time'] >= args.new
if args.output:
if args.output.endswith("/") or args.output.endswith("\\"):
args.output = args.output[:-1]
config.download_prefix = args.output
if args.sess_id:
config.sess_id = args.sess_id
elif (args.username) and (args.password):
config.sess_id = login(args.username, args.password)
else:
raise ValueError("Provide credentials please")
parse_pixiv(args.url)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
47562
|
<reponame>AlexisNava/ABitly-Services
import pytest
from flask import json
# Flask App
from abitly import create_app
@pytest.fixture
def app():
app = create_app()
return app
def test_create_link_should_responds_created(client):
"""Should responds Created when makes a request with
a valid request body
"""
request_body = {
'originalUrl': 'https://discordapp.com/'
}
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
response = client.post('/link/', data=json.dumps(request_body),
headers=headers)
response_body = json.loads(response.get_data(as_text=True))
assert response.status_code == 201
assert response_body['statusCode'] == 201
assert response_body['status'] == 'Created'
assert len(response_body['generatedUrl']) == 7
assert response_body['originalUrl'] == 'https://discordapp.com/'
def test_create_link_should_responds_bad_request(client):
"""Should responds BadRequest when makes a request with
an invalid request body
"""
request_body = {
'originalUrl': 543543
}
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
response = client.post('/link/', data=json.dumps(request_body),
headers=headers)
response_body = json.loads(response.get_data(as_text=True))
expected_message = ('The browser (or proxy) sent a request that this '
'server could not understand.').format()
assert response.status_code == 400
assert response_body['status'] == 'Bad Request'
assert response_body['statusCode'] == 400
assert response_body['errorMessage'] == expected_message
def test_create_link_should_responds_method_not_allowed(client):
"""Should responds MethodNotAllowed when makes a request with
a different method of POST
"""
request_body = {
'originalUrl': 'https://discordapp.com/'
}
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
response = client.put('/link/', data=json.dumps(request_body),
headers=headers)
response_body = json.loads(response.get_data(as_text=True))
expected_message = 'The method is not allowed for the requested URL.'
assert response.status_code == 405
assert response_body['status'] == 'Method Not Allowed'
assert response_body['statusCode'] == 405
assert response_body['errorMessage'] == expected_message
def test_redirect_to_original_url_should_responds_bad_request(client):
"""Should responds BadRequest when the generated url don't have and
exactly length of 7 characters
"""
response = client.get('/link/4RjLzNFg')
response_body = json.loads(response.get_data(as_text=True))
expected_message = ('The browser (or proxy) sent a request that this '
'server could not understand.').format()
assert response.status_code == 400
assert response_body['status'] == 'Bad Request'
assert response_body['statusCode'] == 400
assert response_body['errorMessage'] == expected_message
def test_redirect_to_original_url_should_responds_not_found(client):
"""Should responds NotFound when not found the
generated_url in the links table
"""
response = client.get('/link/1234567')
response_body = json.loads(response.get_data(as_text=True))
expected_message = ('The requested URL was not found on the server. '
'If you entered the URL manually please check '
'your spelling and try again.').format()
assert response.status_code == 404
assert response_body['status'] == 'Not Found'
assert response_body['statusCode'] == 404
assert response_body['errorMessage'] == expected_message
def test_redirect_to_original_url_should_responds_method_not_allowed(client):
"""Should responds MethodNotAllowed when makes a request with
a different method of GET
"""
response = client.delete('/link/1234567')
response_body = json.loads(response.get_data(as_text=True))
expected_message = 'The method is not allowed for the requested URL.'
assert response.status_code == 405
assert response_body['status'] == 'Method Not Allowed'
assert response_body['statusCode'] == 405
assert response_body['errorMessage'] == expected_message
|
StarcoderdataPython
|
6421375
|
# Copyright (c) 2015 <NAME>
# Written by <NAME> <<EMAIL>>
# See LICENSE file.
from . import namespace
class AttrDict(namespace.SettableHierarchialNS):
"""Allow access to dictionary via attributes as well as
array-style references."""
_notpresent = object()
def __init__(self, base=None):
"""Provide an AttrDict view of a dictionary.
:param base: dictionary/list to be viewed
"""
if base is None:
self.base = {}
else:
self.base = base
def repr(self, path):
return "<%s(%s)>" % (self.Namespace.__name__, ".".join(map(str, path)))
def item(self, item):
return item
def descend(self, path, create=True):
base = self.base
for p in path:
try:
base = base[p]
except:
if isinstance(create, type) and issubclass(create, Exception):
raise create(p)
elif create and isinstance(base, dict):
base[p] = {}
base = base[p]
elif not create:
return self._notpresent
else:
raise
return base
def pos(self, path):
"""View underlying dict object"""
return self.descend(path, create=KeyError)
def str(self, path):
return str(self.pos(path))
def get(self, path):
o = self.descend(path, create=False)
if isinstance(o, dict) or isinstance(o, list) or o is self._notpresent:
return self.namespace(path)
else:
return o
def set(self, path, val):
o = self.descend(path[:-1], create=True)
o[path[-1]] = val
def delete(self, path):
o = self.descend(path[:-1], create=KeyError)
del o[path[-1]]
def eq(self, path, other):
"""self == other"""
try:
return other == (self.pos(path))
except KeyError:
return False
def contains(self, path, val):
return val in self.pos(path)
def iter(self, path):
p = self.pos(path)
if isinstance(p, list):
return (self.namespace(path + (i,)) for i in range(len(p)))
else:
return self.pos(path).__iter__()
def len(self, path):
return self.pos(path).__len__()
|
StarcoderdataPython
|
3287348
|
<reponame>davguez/date_guesser
from datetime import datetime
from bs4 import BeautifulSoup
import pytz
from date_guesser import DateGuesser, guess_date
from date_guesser.constants import Accuracy, NO_METHOD
def test_guess_date():
# Just making sure it works
url = 'https://www.nytimes.com/opinion/catalonia-spain-puigdemont.html'
html = '<could be anything></could>'
guess = guess_date(url, html)
assert guess.date is None
assert guess.accuracy is Accuracy.NONE
assert guess.method is NO_METHOD
class TestDateGuesser(object):
def setup_method(self):
self.parser = DateGuesser()
def test_parse_nonsense(self):
# Should find nothing here
url = 'https://www.nytimes.com/opinion/catalonia-spain-puigdemont.html'
html = '<could be anything></could>'
guess = self.parser.guess_date(url, html)
assert guess.date is None
assert guess.accuracy is Accuracy.NONE
assert guess.method is NO_METHOD
def test_parse_nyt(self):
url = 'https://www.nytimes.com/2017/10/13/opinion/catalonia-spain-puigdemont.html'
html = '<could be anything></could>'
guess = self.parser.guess_date(url, html)
assert guess.date == datetime(2017, 10, 13, tzinfo=pytz.utc)
assert guess.accuracy is Accuracy.DATE
assert '2017/10/13' in guess.method
html = '''
<html><head>
<meta property="article:published"
itemprop="datePublished"
content="2017-10-13T04:56:54-04:00" />
</head></html>
'''
guess = self.parser.guess_date(url, html)
assert guess.date == datetime(2017, 10, 13, 8, 56, 54, tzinfo=pytz.utc)
assert guess.accuracy is Accuracy.DATETIME
assert '2017-10-13T04:56:54-04:00' in guess.method
def test_guess_date_from_image_tag(self):
html = '''
<html><head>
<meta property="og:image" content="foo.com/2017/10/13/whatever.jpg"/>
</head></html>
'''
soup = BeautifulSoup(html, 'lxml')
guess = self.parser.guess_date_from_image_tag(soup)
assert guess.date == datetime(2017, 10, 13, tzinfo=pytz.utc)
assert guess.accuracy is Accuracy.DATE
assert '2017/10/13' in guess.method
assert 'tag' in guess.method
def test_use_more_useful_data(self):
# main url is a year after image url
url = 'https://www.nytimes.com/2017/10/opinion/catalonia-spain-puigdemont.html'
html = '''
<html><head>
<meta property="og:image" content="foo.com/2017/10/13/whatever.jpg"/>
</head></html>
'''
guess = self.parser.guess_date(url, html)
assert guess.date == datetime(2017, 10, 13, tzinfo=pytz.utc)
assert guess.accuracy is Accuracy.DATE
assert '2017/10/13' in guess.method
def test_ignore_less_useful_data(self):
# main url is a year after image url
url = 'https://www.nytimes.com/2018/10/opinion/catalonia-spain-puigdemont.html'
html = '''
<html><head>
<meta property="og:image" content="foo.com/2017/10/13/whatever.jpg"/>
</head></html>
'''
guess = self.parser.guess_date(url, html)
assert guess.date == datetime(2018, 10, 15, tzinfo=pytz.utc)
assert guess.accuracy is Accuracy.PARTIAL
assert '2018/10' in guess.method
def test_ignore_wikipedia(self):
url = 'https://en.wikipedia.org/2018/10/13/opinion/catalonia-spain-puigdemont.html'
html = '''
<html><head>
<meta property="og:image" content="foo.com/2017/10/13/whatever.jpg"/>
</head></html>
'''
guess = self.parser.guess_date(url, html)
assert guess.date is None
assert guess.accuracy is Accuracy.NONE
assert 'No date' in guess.method
def test_malformed_date(self):
url = 'https://nytimes.com/opinion/catalonia-spain-puigdemont.html'
html = '''
<html><header>
<div class="dateline">
<p>Published
<time datetime="2015-26-26T04:03:40Z" pubdate>Thursday, Mar. 26 2015, 12:26 AM EDT</time>
</p>
<p>Last updated
<time class="updated" datetime="2015-17-26T11:03:42Z" pubdate>Thursday, Mar. 26 2015, 7:17 AM EDT</time>
</p>
</div>
</header></html>
'''
guess = self.parser.guess_date(url, html)
assert guess.date.replace(tzinfo=pytz.utc) == datetime(2015, 3, 26, 0, 26, tzinfo=pytz.utc)
assert guess.accuracy is Accuracy.DATETIME
assert 'Thursday' in guess.method
|
StarcoderdataPython
|
3305928
|
<filename>src/evaluating_rewards/envs/mujoco.py
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reward functions for Gym environments."""
import abc
from typing import Optional
import gym
from imitation.util import registry, serialize
import numpy as np
from stable_baselines.common import vec_env
import tensorflow as tf
from evaluating_rewards import serialize as reward_serialize
from evaluating_rewards.rewards import base
class MujocoHardcodedReward(base.BasicRewardModel, serialize.LayersSerializable):
"""Hardcoded (non-trainable) reward model for a MuJoCo environment."""
def __init__(self, observation_space: gym.Space, action_space: gym.Space, **kwargs):
"""Constructs the reward model.
Args:
observation_space: The observation space of the environment.
action_space: The action space of the environment.
**kwargs: Extra parameters to serialize and store in the instance,
accessible as attributes.
"""
base.BasicRewardModel.__init__(self, observation_space, action_space)
serialize.LayersSerializable.__init__(
self,
layers={},
observation_space=observation_space,
action_space=action_space,
**kwargs,
)
self._reward = self.build_reward()
def __getattr__(self, name):
try:
return self._kwargs[name]
except KeyError as e:
raise AttributeError(f"Attribute '{name}' not present in self._kwargs") from e
@abc.abstractmethod
def build_reward(self) -> tf.Tensor:
"""Computes reward from observation, action and next observation.
Returns:
A tensor containing reward, shape (batch_size,).
"""
@property
def reward(self):
"""Reward tensor, shape (batch_size,)."""
return self._reward
class HalfCheetahGroundTruthReward(MujocoHardcodedReward):
"""Reward for HalfCheetah-v2. Matches ground truth with default settings."""
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
*,
forward: bool = True,
ctrl_coef: float = 0.1,
): # pylint:disable=useless-super-delegation
"""Constructs the reward model.
Args:
observation_space: The observation space of the environment.
action_space: The action space of the environment.
forward: whether to reward running forward (True) or backwards (False).
ctrl_coef: Scale factor for control penalty.
"""
super().__init__(observation_space, action_space, forward=forward, ctrl_coef=ctrl_coef)
def build_reward(self) -> tf.Tensor:
"""Intended to match the reward returned by gym.HalfCheetahEnv.
Known differences: none.
Returns:
A Tensor containing predicted rewards.
"""
# observations consist of concat(qpos, qvel)
n = 9
assert self.observation_space.shape == (2 * n,)
# action = control, 6-dimensional (not all bodies actuated)
assert self.action_space.shape == (6,)
# Average velocity of C.O.M.
# TODO(): would be more DRY to read dt from the environment
# However, it should not change as Gym guarantees named environments
# semantics should stay fixed. Extracting this from the environment is
# non-trivial: it'd require taking a venv as input (which makes
# serialization more challenging), and then calling env_method to access
# the dt property.
dt = 0.05 # model timestep 0.01, frameskip 5
reward_run = (self._proc_next_obs[:, 0] - self._proc_obs[:, 0]) / dt
# Control penalty
reward_ctrl = tf.reduce_sum(tf.square(self._proc_act), axis=-1)
forward_sign = 1.0 if self.forward else -1.0
reward = forward_sign * reward_run - self.ctrl_coef * reward_ctrl
return reward
class HopperGroundTruthReward(MujocoHardcodedReward):
"""Reward for Hopper-v2. Matches ground truth with default settings."""
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
alive_bonus: float = 1.0,
forward: bool = True,
ctrl_coef: float = 1e-3,
):
"""Constructs the reward model.
Args:
observation_space: The observation space of the environment.
action_space: The action space of the environment.
alive_bonus: constant term added to each reward in non-terminal states.
forward: Whether to reward running forward (True) or backwards (False).
ctrl_coef: Scale factor for control penalty.
"""
super().__init__(
observation_space,
action_space,
alive_bonus=alive_bonus,
ctrl_coef=ctrl_coef,
forward=forward,
)
def build_reward(self) -> tf.Tensor:
"""Intended to match the reward returned by gym.HopperEnv.
Known differences:
- If the starting observation is terminal (i.e. Gym would have returned
done at the *previous* timestep), we return a zero reward.
By contrast, Gym would compute the reward as usual, but these rewards
would typically never be observed as the episode has ended, effectively
corresponding to being in a zero-reward absorbing state.
To match Gym behavior on trajectories, it is important to respect the
`done` condition, since otherwise a transition from a terminal to a
non-terminal state is possible (which would then get reward in
subsequent steps). However, zeroing reward is sufficient to match the
Gym behavior on individual transitions.
Returns:
A Tensor containing predicted rewards.
"""
# Observation is concat(qpos, clipped(qvel)).
n = 6
assert self.observation_space.shape == (2 * n,)
assert self.action_space.shape == (3,)
forward_sign = 1.0 if self.forward else -1.0
dt = 0.008 # model timestep 0.002, frameskip 4
reward_vel = (self._proc_next_obs[:, 0] - self._proc_obs[:, 0]) / dt
reward_ctrl = tf.reduce_sum(tf.square(self._proc_act), axis=-1)
reward = forward_sign * reward_vel - self.ctrl_coef * reward_ctrl
height = self._proc_next_obs[:, 1]
angle = self._proc_next_obs[:, 2]
finite = tf.math.reduce_all(tf.math.is_finite(self._proc_next_obs), axis=-1)
small_enough = tf.math.reduce_all(tf.abs(self._proc_next_obs[:, 2:]) < 100, axis=-1)
alive_conditions = [finite, small_enough, height > 0.7, tf.abs(angle) < 0.2]
alive = tf.math.reduce_all(alive_conditions, axis=0)
# zero out rewards when starting observation was terminal
reward += self.alive_bonus * tf.cast(alive, tf.float32)
return reward
class HopperBackflipReward(MujocoHardcodedReward):
"""Reward for Hopper-v2 to make it do a backflip, rather than hop forward.
Based on reward function in footnote of:
https://openai.com/blog/deep-reinforcement-learning-from-human-preferences/
"""
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
*,
forward: bool = True,
ctrl_coef: float = 1e-2,
): # pylint:disable=useless-super-delegation
"""Constructs the reward model.
Args:
observation_space: The observation space of the environment.
action_space: The action space of the environment.
forward: whether to reward running forward (True) or backwards (False).
ctrl_coef: Scale factor for control penalty.
"""
super().__init__(observation_space, action_space, forward=forward, ctrl_coef=ctrl_coef)
def build_reward(self) -> tf.Tensor:
"""Intended to match the backflip reward described by Christiano et al.
Known differences: we include a control cost by default.
Returns:
A tensor containing reward, shape (batch_size,).
"""
# Observation is qpos[1:] + clipped(qvel).
npos = 6
nvel = 6
nctrl = 3
assert self.observation_space.shape == (npos + nvel,)
assert self.action_space.shape == (nctrl,)
forward_sign = 1.0 if self.forward else -1.0
backroll = -forward_sign * self._proc_obs[:, npos + 2] # qvel[2]
height = self._proc_obs[:, 1]
# Control in the same direction as the velocity?
nuncontrolled = 3 # first three bodies are unactuated.
vel_act = [
self._proc_act[:, i] * self._proc_obs[:, npos + nuncontrolled + i] for i in range(nctrl)
]
vel_act = sum(vel_act)
backslide = -self._proc_obs[:, 6]
reward_ctrl = tf.reduce_sum(tf.square(self._proc_act), axis=-1)
reward = (
backroll * (1.0 + 0.3 * height + 0.1 * vel_act + 0.05 * backslide)
- self.ctrl_coef * reward_ctrl
)
return reward
class PointMazeReward(MujocoHardcodedReward):
"""Reward for imitation/PointMaze{Left,Right}Vel-v0.
This in turn is based on on Fu et al (2018)'s PointMaze environment:
https://arxiv.org/pdf/1710.11248.pdf
"""
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
*,
target: np.ndarray,
ctrl_coef: float = 1e-3,
**kwargs,
): # pylint:disable=useless-super-delegation
"""Constructs the reward model.
Args:
observation_space: The observation space of the environment.
action_space: The action space of the environment.
target: The position of the target (goal state).
ctrl_coef: Scale factor for control penalty.
**kwargs: Passed through to Serialize.
"""
super().__init__(
observation_space, action_space, target=target, ctrl_coef=ctrl_coef, **kwargs
)
@classmethod
def from_venv(cls, venv: vec_env.VecEnv, *args, **kwargs):
"""Factory constructor, extracting spaces and target from environment."""
target = venv.env_method("get_body_com", "target")
assert np.all(target[0] == target)
return PointMazeReward(
venv.observation_space, venv.action_space, *args, target=target[0], **kwargs
)
def build_reward(self) -> tf.Tensor:
"""Matches the ground-truth reward, with default constructor arguments.
Known differences: none.
Returns:
A tensor containing reward, shape (batch_size,).
"""
# Two versions, one without velocity (3,) and one with velocity (6,)
assert self.observation_space.shape in [(3,), (6,)]
particle_pos = self._proc_obs[:, 0:3] # 3:6 is velocity
reward_dist = tf.norm(particle_pos - self.target, axis=-1)
reward_ctrl = tf.reduce_sum(tf.square(self._proc_act), axis=-1)
reward = -reward_dist - self.ctrl_coef * reward_ctrl
return reward
class PointMazeSparseBonusReward(PointMazeReward): # pylint:disable=too-many-ancestors
"""Alternative reward for imitation/PointMaze* with a reward spike close to a sparse target.
Example use cases:
Position `sparse_target` at the same location as the usual (dense) `target`,
and set `sparse_coef` negative so optimal policy stays at a fixed distance from `target`.
Position `sparse_target` at another location with a large enough positive `sparse_coef`
so optimal policy goes to `sparse_target` instead of `target`. Discovering `sparse_target`
is hard-exploration, so distance metrics could easily miss the difference, and RL training
might also find the suboptimal policy that goes to `target` instead of `sparse_target`.
"""
def __init__(
self,
*args,
target: np.ndarray,
sparse_target: Optional[np.ndarray] = None,
sparse_within: float = 0.05,
sparse_stop: float = 0.005,
sparse_coef: float = 5.0,
**kwargs,
):
"""Constructs the reward model.
Further than `sparse_within`, this is the same as `PointMazeReward` up to a constant.
Between `sparse_within` and `sparse_stop` from the goal, the reward increases inversely
proportional to the distance from the goal. It is constant closer than `sparse_stop`.
This auxiliary reward is multiplied by `sparse_coef`.
Args:
*args: passed through to `PointMazeReward`.
target: The position of the target (goal state).
sparse_target: The position of the sparse target; defaults to `target`.
sparse_within: auxiliary reward to agent if it gets closer than this to `sparse_target`.
sparse_stop: auxiliary reward does not increase below this distance.
sparse_coef: coefficient of sparse reward (positive for bonus, negative for penalty).
**kwargs: passed through to `PointMazeReward`.
"""
if sparse_target is None:
sparse_target = target
super().__init__(
*args,
target=target,
sparse_target=sparse_target,
sparse_within=sparse_within,
sparse_stop=sparse_stop,
sparse_coef=sparse_coef,
**kwargs,
)
def build_reward(self) -> tf.Tensor:
reward = super().build_reward()
particle_pos = self._proc_obs[:, 0:3]
sparse_dist = tf.norm(particle_pos - self.sparse_target, axis=-1)
clipped_dist = tf.math.maximum(sparse_dist, self.sparse_stop)
clipped_dist = tf.math.minimum(clipped_dist, self.sparse_within)
sparse_reward = self.sparse_within / clipped_dist
return reward + self.sparse_coef * sparse_reward
# Register reward models
def _register_models(format_str, cls, forward=True):
"""Registers reward models of type cls under key formatted by format_str."""
forwards = {"Forward": {"forward": forward}, "Backward": {"forward": not forward}}
control = {"WithCtrl": {}, "NoCtrl": {"ctrl_coef": 0.0}}
res = {}
for k1, cfg1 in forwards.items():
for k2, cfg2 in control.items():
fn = registry.build_loader_fn_require_space(cls, **cfg1, **cfg2)
key = format_str.format(k1 + k2)
reward_serialize.reward_registry.register(key=key, value=fn)
return res
def _register_point_maze(prefix, cls, **kwargs):
control = {"WithCtrl": {}, "NoCtrl": {"ctrl_coef": 0.0}}
for k, cfg in control.items():
fn = registry.build_loader_fn_require_space(
cls, target=np.array([0.3, 0.5, 0.0]), **cfg, **kwargs
)
reward_serialize.reward_registry.register(key=f"{prefix}{k}-v0", value=fn)
_register_models("evaluating_rewards/HalfCheetahGroundTruth{}-v0", HalfCheetahGroundTruthReward)
_register_models("evaluating_rewards/HopperGroundTruth{}-v0", HopperGroundTruthReward)
_register_models("evaluating_rewards/HopperBackflip{}-v0", HopperBackflipReward, forward=False)
_register_point_maze("evaluating_rewards/PointMazeGroundTruth", PointMazeReward)
_register_point_maze(
"evaluating_rewards/PointMazeRepellent", PointMazeSparseBonusReward, sparse_coef=-1.0
)
_register_point_maze(
"evaluating_rewards/PointMazeBetterGoal",
PointMazeSparseBonusReward,
# Locate target on the left behind the wall, so the agent (in the Left version of environment)
# has to pass the wall and go past the goal state to hit the sparse target. This is unlikely for
# random exploration (hard to get past wall) or expert (will not go past goal).
sparse_target=np.array([0.1, 0.5, 0.0]),
sparse_coef=2.0,
)
reward_serialize.reward_registry.register(
key="evaluating_rewards/PointMazeWrongTargetWithCtrl-v0",
value=registry.build_loader_fn_require_space(PointMazeReward, target=np.array([0.1, 0.1, 0.0])),
)
|
StarcoderdataPython
|
8052687
|
<filename>src/cloud/clouds.py
from __future__ import annotations
import csv
import re
from enum import Enum
from functools import total_ordering
import geopy.distance
from util.utils import gcp_default_project
basename_key_for_aws_ssh = "cloud-perf"
class Cloud(Enum):
GCP = "GCP"
AWS = "AWS"
def __str__(self):
return self.name
__PRIVATE__INIT__ = object()
@total_ordering
class Region:
def __init__(
self,
private_init,
cloud: Cloud,
region_id: str,
lat: float = None,
long: float = None,
):
if private_init is not __PRIVATE__INIT__:
raise ValueError(
'Call get_region() instead of CloudRegion, which is kept "private" so that a cache can be built.'
)
assert isinstance(cloud, Cloud), type(cloud)
assert re.match(r"[a-z][a-z-]+\d$", region_id)
self.lat = lat
self.long = long
self.cloud = cloud
self.region_id = region_id
def script(self):
return f"./scripts/{self.lowercase_cloud_name()}-launch.sh"
def deletion_script(self):
return f"./scripts/{self.lowercase_cloud_name()}-delete-instances.sh"
def script_for_test_from_region(self):
return f"./scripts/do-one-test-from-{self.lowercase_cloud_name()}.sh"
def __repr__(self):
return f"{self.cloud.name}.{self.region_id}"
def __hash__(self):
return hash(repr(self))
def env(self) -> dict[str, str]:
envs = {
Cloud.GCP: {"PROJECT_ID": gcp_default_project()},
Cloud.AWS: {"BASE_KEYNAME": basename_key_for_aws_ssh},
}
return envs[self.cloud]
def lowercase_cloud_name(self):
return self.cloud.name.lower()
def __lt__(self, other):
"""Note @total_ordering above"""
return repr(self) < repr(other)
def __eq__(self, other):
return self.region_id == other.region_id and self.cloud == other.cloud
__regions: list[Region]
__regions = []
def get_regions() -> list[Region]:
global __regions
if not __regions:
fp = open(f"./region_data/locations.csv")
rdr = csv.DictReader(filter(lambda row_: row_[0] != "#", fp))
for row in rdr:
lat_s = row["latitude"]
long_s = row["longitude"]
if lat_s is not None and long_s is not None:
lat = float(lat_s)
long = float(long_s)
else:
lat = long = None
cloud_s = row["cloud"]
region_id = row["region"]
__regions.append(
Region(__PRIVATE__INIT__, Cloud(cloud_s), region_id, lat, long)
)
fp.close()
return __regions
def get_region(
cloud: [Cloud | str],
region_id: str,
) -> Region:
regions = get_regions()
if isinstance(cloud, str):
cloud = Cloud(cloud)
assert isinstance(cloud, Cloud), cloud
matches = [r for r in regions if r.cloud == cloud and r.region_id == region_id]
if not matches:
print(f"{cloud}")
raise ValueError(f"Cannot find region {cloud}.{region_id}")
else:
assert len(matches) == 1, matches
ret = matches[0]
return ret
def __samecity_crosscloud_datacenters() -> list[set[Region, Region]]:
return [
{get_region(*p[0]), get_region(*p[1])}
for p in [
((Cloud.GCP, "europe-west3"), (Cloud.AWS, "eu-central-1")),
((Cloud.GCP, "asia-northeast1"), (Cloud.AWS, "ap-northeast-1")),
((Cloud.GCP, "asia-northeast2"), (Cloud.AWS, "ap-northeast-3")),
((Cloud.GCP, "asia-northeast3"), (Cloud.AWS, "ap-northeast-2")),
((Cloud.GCP, "asia-southeast1"), (Cloud.AWS, "ap-southeast-1")),
((Cloud.GCP, "australia-southeast1"), (Cloud.AWS, "ap-southeast-2")),
]
]
def interregion_distance(r1: Region, r2: Region):
ret = geopy.distance.distance((r1.lat, r1.long), (r2.lat, r2.long)).km
if ret == 0:
if r1 == r2:
pass # Test within a single cloud's region. Use 0 though in fact a region can be spread out.
else:
if {r1, r2} in __samecity_crosscloud_datacenters():
# Where we have identical coordinates for cross-cloud data-centers, it
# means that a city's coordinates were used as an approximation.
# We use 10 as an approximation for intra-city distance to avoid divide-by-zero errors.
ret = 10
else:
assert False, (
f"Should not have zero distance for region "
f"pair unless these are known same-city data-centers {r1},{r2}"
)
return ret
|
StarcoderdataPython
|
377822
|
<reponame>tomtylor/ableton-live-packs
import requests
import re
import os
import urllib.request
import getpass
import pprint
import logging
from clint.textui import progress
from hurry.filesize import size
# Config
debug = 1
pp = pprint.PrettyPrinter(indent=4)
logging.basicConfig(level=logging.DEBUG)
# Change to your directory!
DIR_NAME = '/Volumes/DATA/AbletonPacks/'
USERNAME = input('Username:')
PASSWORD = <PASSWORD>()
URL = 'https://www.ableton.com/en/login/'
PACKS = 'https://www.ableton.com/en/packs/'
################## LOCAL FUNCTIONS #################
def _download_file(url, filename):
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length / 1024) + 1):
if chunk:
f.write(chunk)
f.flush()
with requests.Session() as c:
c.keep_alive = False
c.get(URL)
cookies = c.cookies
csrftoken = cookies['csrftoken']
login_data = dict(username=USERNAME, password=PASSWORD, next='/en/account/', csrfmiddlewaretoken=csrftoken)
r = c.post(URL, data=login_data, headers={"Referer": "https://www.ableton.com/", 'user-agent': 'my-app/0.0.1'})
if int(r.status_code) == 200:
print("### Status code:", r.status_code, "Logged in successfully! ###")
else:
print("### Wrong credentials - status code:", r.status_code, " ###")
page = c.get(PACKS)
page_source_code = page.text
print(page_source_code)
p = re.compile(r'https://cdn-downloads.ableton.com\.*?.alp', re.IGNORECASE)
m_all = p.findall(page_source_code)
if debug == 1:
print("### Changing folder to \"" + DIR_NAME + "\" ###")
os.chdir(DIR_NAME)
print(m_all)
for match in m_all:
pack_name = re.match(r'.+/([A-Za-z0-9-_.]+.alp)', match)
pack_name = pack_name.group(1)
alp_url = re.sub(r" & ", "%20&%20", match)
category = re.sub(r" & ", "_", match)
category = re.match('^http.+\/livepacks\/(.*?)\/', category)
category_name = category.group(1)
# if debug == 1: print("{}{}{}{}".format("/",category_name,"/",pack_name))
file_path = DIR_NAME + category_name + "/" + pack_name
remote_file = urllib.request.urlopen(alp_url)
remote_file_size = remote_file.headers['Content-Length']
# if int(remote_file_size) <= 10485760:
if int(remote_file_size) > 0:
if os.path.exists(file_path) and os.access(file_path, os.R_OK):
stat_info = os.stat(file_path)
file_size_on_disk = stat_info.st_size
print("Pack", pack_name, "already exists")
if int(remote_file_size) != int(file_size_on_disk):
print("Remote file", pack_name, "seems to be newer than local file (", size(int(remote_file_size)),
"compared to", size(int(file_size_on_disk)), ")")
_download_file(alp_url, pack_name)
print("Moving pack_name to \"" + category_name + "\" folder")
os.rename(pack_name, category_name + "/" + pack_name)
print("Downloading complete")
else:
if int(remote_file_size) > 1:
print("Pack", pack_name, "does not exist! Downloading...")
if not os.path.exists(category_name):
print("Creating folder \"" + category_name + "\"")
os.makedirs(category_name)
_download_file(alp_url, pack_name)
print("Moving pack_name to (", category_name, ") folder")
os.rename(pack_name, category_name + "/" + pack_name)
print("Downloading complete ")
|
StarcoderdataPython
|
3418863
|
#!/usr/local/bin/python
# coding:utf-8
import os
import re
import math
def get_name(filename):
name_list = os.listdir(filename)
return name_list
def writefile(data):
log = open("./trainingname.txt", 'a')
log.write(data)
log.write('\n')
log.close()
name_list = get_name("./training")
for name in name_list:
if name == ".DS_Store":
continue
writefile(name)
|
StarcoderdataPython
|
8086563
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_chicken.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(395, 502)
MainWindow.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("小鸡.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(0, 0, 441, 501))
self.widget.setObjectName("widget")
self.label = QtWidgets.QLabel(self.widget)
self.label.setGeometry(QtCore.QRect(10, 10, 381, 381))
self.label.setFocusPolicy(QtCore.Qt.NoFocus)
self.label.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("小鸡.png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setGeometry(QtCore.QRect(50, 240, 231, 211))
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setFocusPolicy(QtCore.Qt.NoFocus)
self.label_2.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.label_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.label_2.setLineWidth(0)
self.label_2.setText("")
self.label_2.setTextFormat(QtCore.Qt.AutoText)
self.label_2.setPixmap(QtGui.QPixmap("notes_122.01169590643px_1190287_easyicon.net.png"))
self.label_2.setScaledContents(True)
self.label_2.setWordWrap(False)
self.label_2.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.label_2.setObjectName("label_2")
self.lineEdit = QtWidgets.QLineEdit(self.widget)
self.lineEdit.setGeometry(QtCore.QRect(90, 260, 141, 31))
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(10)
self.lineEdit.setFont(font)
self.lineEdit.setFocusPolicy(QtCore.Qt.ClickFocus)
self.lineEdit.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.lineEdit.setStyleSheet("background:rgba(0,0,0,0); \n"
"color: rgb(85, 85, 0);")
self.lineEdit.setInputMask("")
self.lineEdit.setFrame(False)
self.lineEdit.setEchoMode(QtWidgets.QLineEdit.Normal)
self.lineEdit.setCursorPosition(0)
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setCursorMoveStyle(QtCore.Qt.VisualMoveStyle)
self.lineEdit.setObjectName("lineEdit")
self.textEdit = QtWidgets.QTextEdit(self.widget)
self.textEdit.setGeometry(QtCore.QRect(50, 300, 231, 121))
font = QtGui.QFont()
font.setFamily("华文新魏")
font.setPointSize(10)
self.textEdit.setFont(font)
self.textEdit.setMouseTracking(False)
self.textEdit.setFocusPolicy(QtCore.Qt.ClickFocus)
self.textEdit.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.textEdit.setStyleSheet("background:rgba(0,0,0,0);\n"
"color: rgb(0,0,0);")
self.textEdit.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textEdit.setTabChangesFocus(True)
self.textEdit.setObjectName("textEdit")
MainWindow.setCentralWidget(self.centralwidget)
self.closeui = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("关 闭.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.closeui.setIcon(icon1)
self.closeui.setObjectName("closeui")
self.ontop = QtWidgets.QAction(MainWindow)
self.ontop.setCheckable(True)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("固定.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.ontop.setIcon(icon2)
self.ontop.setObjectName("ontop")
self.set = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("设置.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.set.setIcon(icon3)
self.set.setObjectName("set")
self.retranslateUi(MainWindow)
self.closeui.triggered['bool'].connect(MainWindow.close)
self.ontop.triggered['bool'].connect(MainWindow.WinOnTop)
self.set.triggered.connect(MainWindow.SetPattern)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.lineEdit.setText(_translate("MainWindow", "小型备忘录"))
self.textEdit.setPlaceholderText(_translate("MainWindow", "这里可以填写备忘的哦。。。"))
self.closeui.setText(_translate("MainWindow", "关闭"))
self.closeui.setShortcut(_translate("MainWindow", "Ctrl+1"))
self.ontop.setText(_translate("MainWindow", "置顶"))
self.set.setText(_translate("MainWindow", "设置"))
self.set.setShortcut(_translate("MainWindow", "Ctrl+2"))
|
StarcoderdataPython
|
1871141
|
import typing as t
from ._transpiler import TranspileOptions, transpile_to_ast
def execute(
code: t.Union[str, t.TextIO],
filename: t.Optional[str],
globals: t.Dict[str, t.Any],
locals: t.Optional[t.Mapping[str, t.Any]] = None,
options: t.Optional[TranspileOptions] = None,
) -> None:
"""
Executes Craftr DSL code in the context specified with *globals* and *locals*.
@param code: The code to execute.
@param filename: The filename where the code is from; shown in errors.
@param globals: The globals for the code.
@param locals: The locals for the code.
@param options: Options for the DSL transpiler.
"""
if hasattr(code, 'read'):
code = t.cast(t.TextIO, code).read()
filename = getattr(code, 'name', None)
assert isinstance(code, str)
filename = filename or '<string>'
ast = transpile_to_ast(code, filename, options)
compiled_code = compile(ast, filename, 'exec')
exec(compiled_code, globals, locals or globals)
|
StarcoderdataPython
|
11307316
|
# _*_coding:utf-8_*_
class Solution:
def MoreThanHalfNum_Solution(self, numbers):
total = dict()
target = len(numbers) / 2
for item in numbers:
total[item] = total.get(item, 0) + 1
for k, v in total.items():
if v > target:
return k
return 0
s = Solution()
case = [
[1,2,3,2,4,2,5,2,3],
[1,2,3,2,2,2,5,4,2],
[1,2,3,4,5,6,7,8,8]
]
for item in case:
print(s.MoreThanHalfNum_Solution(item))
|
StarcoderdataPython
|
5052301
|
# -*- coding:utf-8 -*-
'''
Testing for map app.
'''
from torcms.core import tools
from torcms.model.post_model import MPost
class TestApp():
'''
Testing for map app.
'''
def setup(self):
print('setup 方法执行于本类中每条用例之前')
self.title = '哈哈sdfsdf'
self.uid = 'g' + tools.get_uu4d()
def test_insert(self):
uid = self.uid
post_data = {
'title': self.title,
'keywords': 'sd,as',
'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
'logo': '/static/',
'user_name': 'ss',
'extinfo': '',
'valid': 1,
'kind': '1',
}
extinfo = {}
MPost.add_meta(uid, post_data, extinfo)
tt = MPost.get_by_uid(uid)
assert tt.uid == uid
def test_insert2(self):
uid = self.uid
post_data = {
'title': '',
'keywords': 'sd,as',
'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
'logo': '/static/',
'user_name': 'ss',
'extinfo': ''
}
extinfo = {}
MPost.add_meta(uid, post_data, extinfo)
tt = MPost.get_by_uid(uid)
assert tt == None
post_data = {
'title': '1',
'keywords': 'sd,as',
'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
'logo': '/static/',
'user_name': 'ss',
'extinfo': ''
}
uu = MPost.add_meta(self.uid, post_data)
assert uu == False
post_data = {
'title': '天',
'keywords': 'sd,as',
'cnt_md': '## adslkfjasdf\n lasdfkjsadf',
'logo': '/static/',
'user_name': 'ss',
'extinfo': ''
}
uu = MPost.add_meta(self.uid, post_data)
assert uu == False
def test_upate(self):
assert True
def tearDown(self):
print("function teardown")
tt = MPost.get_by_uid(self.uid)
if tt:
MPost.delete(tt.uid)
|
StarcoderdataPython
|
6693840
|
<gh_stars>0
import os
from pip._vendor.colorama import Fore, Back
from ws.RLUtils.monitoring.tracing.log_mgt import log_mgt
if __name__ == '__main__':
cwd = os.path.curdir
acwd = os.path.join(cwd, '_tests')
log_dir = os.path.join(acwd, "logs")
fn_log2 = log_mgt(log_dir, show_debug=True, fixed_log_file=False)[0]
color_black_background = Fore.YELLOW + Back.BLACK
fn_log2('3. show_debug = True, debug=True', color=color_black_background, debug=True)
fn_log2('4. show_debug = True, debug=False', debug=False)
|
StarcoderdataPython
|
1920237
|
<filename>problem/baseproblem.py
# flake8: noqa: F403
from firedrake import *
from firedrake.utils import cached_property
from abc import ABCMeta, abstractproperty, abstractmethod
from firedrake.petsc import PETSc
class Problem(object):
__metaclass__ = ABCMeta
def __init__(self, N=None, degree=None, dimension=None, refinements=None, quadrilateral=False):
super(Problem, self).__init__()
args, _ = self.argparser().parse_known_args()
if args.help:
import sys
self.argparser().print_help()
sys.exit(0)
self.degree = degree or args.degree
self.dimension = dimension or args.dimension
self.N = N or args.size
self.args = args
self.refinements = refinements
self.quadrilateral = quadrilateral
def reinit(self, degree=None, size=None, refinements=None):
if degree is None:
degree = self.degree
if size is None:
size = self.N
if refinements is None:
refinements = self.refinements
degree_changed = degree != self.degree
mesh_changed = (size != self.N
or refinements != self.refinements)
if not (degree_changed or mesh_changed):
return
for attr in ["function_space", "u", "F", "J", "Jp", "bcs",
"nullspace", "near_nullspace", "output_fields",
"forcing", "appctx"]:
try:
delattr(self, attr)
except AttributeError:
pass
if mesh_changed:
try:
delattr(self, "mesh")
except AttributeError:
pass
self.degree = degree
self.N = size
self.refinements = refinements
@abstractproperty
def parameter_names(self):
pass
@property
def comm(self):
return self.mesh.comm
@cached_property
def mesh(self):
if self.dimension == 2:
mesh = UnitSquareMesh(self.N, self.N, quadrilateral=self.quadrilateral)
elif self.dimension == 3:
if self.quadrilateral:
mesh = UnitSquareMesh(self.N, self.N, quadrilateral=self.quadrilateral)
else:
mesh = UnitCubeMesh(self.N, self.N, self.N)
else:
raise ValueError("Invalid dimension, %d", self.dimension)
if self.refinements is not None:
dm = mesh._plex
from firedrake.mg.impl import filter_exterior_facet_labels
for _ in range(self.refinements):
dm.setRefinementUniform(True)
dm = dm.refine()
dm.removeLabel("interior_facets")
dm.removeLabel("op2_core")
dm.removeLabel("op2_non_core")
dm.removeLabel("op2_exec_halo")
dm.removeLabel("op2_non_exec_halo")
filter_exterior_facet_labels(dm)
mesh = Mesh(dm, dim=mesh.ufl_cell().geometric_dimension(),
distribute=False, reorder=True)
if self.dimension == 3 and self.quadrilateral:
N = self.N
if self.refinements is not None:
N *= 2**self.refinements
mesh = ExtrudedMesh(mesh, layers=N)
return mesh
@abstractproperty
def name(self):
pass
@abstractproperty
def function_space(self):
pass
@cached_property
def u(self):
return Function(self.function_space, name="solution")
@abstractproperty
def F(self):
pass
@cached_property
def J(self):
return derivative(self.F, self.u)
@property
def Jp(self):
return None
@cached_property
def bcs(self):
return None
@cached_property
def nullspace(self):
return None
@cached_property
def near_nullspace(self):
return None
@property
def appctx(self):
return None
def solver(self, parameters=None):
problem = NonlinearVariationalProblem(self.F, self.u, bcs=self.bcs,
Jp=self.Jp)
solver = NonlinearVariationalSolver(problem, options_prefix="",
nullspace=self.nullspace,
near_nullspace=self.near_nullspace,
appctx=self.appctx,
solver_parameters=parameters)
# PETSc.Sys.syncPrint("[%d] mesh sizes %s, dof sizes %s" % (self.mesh.comm.rank,
# self.mesh.cell_set.sizes,
# self.function_space.dof_dset.sizes))
# PETSc.Sys.syncFlush()
return solver
@abstractmethod
def argparser():
pass
@abstractproperty
def output_fields(self):
pass
|
StarcoderdataPython
|
124611
|
<filename>Route_prediction/error.py
from theano import tensor
import theano
import numpy
def const(v):
if theano.config.floatX == 'float32':
return numpy.float32(v)
else:
return numpy.float64(v)
rearth = const(6371)
deg2rad = const(3.141592653589793 / 180)
def hdist(a, b):
lat1 = a[:, 0] * deg2rad
lon1 = a[:, 1] * deg2rad
lat2 = b[:, 0] * deg2rad
lon2 = b[:, 1] * deg2rad
dlat = abs(lat1-lat2)
dlon = abs(lon1-lon2)
al = tensor.sin(dlat/2)**2 + tensor.cos(lat1) * tensor.cos(lat2) * (tensor.sin(dlon/2)**2)
d = tensor.arctan2(tensor.sqrt(al), tensor.sqrt(const(1)-al))
hd = const(2) * rearth * d
return tensor.switch(tensor.eq(hd, float('nan')), (a-b).norm(2, axis=1), hd)
def erdist(a, b):
lat1 = a[:, 0] * deg2rad
lon1 = a[:, 1] * deg2rad
lat2 = b[:, 0] * deg2rad
lon2 = b[:, 1] * deg2rad
x = (lon2-lon1) * tensor.cos((lat1+lat2)/2)
y = (lat2-lat1)
return tensor.sqrt(tensor.sqr(x) + tensor.sqr(y)) * rearth
def rmsle(a, b):
return tensor.sqrt( ( (tensor.log(a+1)-tensor.log(b+1)) ** 2 ).mean() )
|
StarcoderdataPython
|
3263979
|
<gh_stars>1-10
import pytest
from reserva.core.models import User
@pytest.mark.django_db
def test_user_name():
user = User.objects.create_user("joanedoe", first_name="Joane", last_name="Doe")
assert user.name == "<NAME>"
assert str(user) == user.name
@pytest.mark.django_db
def test_user_without_name():
user = User.objects.create_user("joanedoe")
assert user.name == ""
assert str(user) == user.username
@pytest.mark.django_db
def test_user_without_last_name():
user = User.objects.create_user("joanedoe", first_name="Joane")
assert user.name == "Joane"
assert str(user) == user.name
|
StarcoderdataPython
|
1758103
|
<reponame>OSMadmin/osmclient
# Copyright 2018 Telefonica
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OSM vnfd API handling
"""
from osmclient.common.exceptions import NotFound
from osmclient.common.exceptions import ClientException
from osmclient.common import utils
import json
import yaml
import magic
from os.path import basename
import logging
import os.path
from urllib.parse import quote
import tarfile
from osm_im.validation import Validation as validation_im
class Vnfd(object):
def __init__(self, http=None, client=None):
self._http = http
self._client = client
self._logger = logging.getLogger('osmclient')
self._apiName = '/vnfpkgm'
self._apiVersion = '/v1'
self._apiResource = '/vnf_packages'
self._apiBase = '{}{}{}'.format(self._apiName,
self._apiVersion, self._apiResource)
#self._apiBase='/vnfds'
def list(self, filter=None):
self._logger.debug("")
self._client.get_token()
filter_string = ''
if filter:
filter_string = '?{}'.format(filter)
_, resp = self._http.get2_cmd('{}{}'.format(self._apiBase,filter_string))
if resp:
return json.loads(resp)
return list()
def get(self, name):
self._logger.debug("")
self._client.get_token()
if utils.validate_uuid4(name):
for vnfd in self.list():
if name == vnfd['_id']:
return vnfd
else:
for vnfd in self.list():
if 'name' in vnfd and name == vnfd['name']:
return vnfd
raise NotFound("vnfd {} not found".format(name))
def get_individual(self, name):
self._logger.debug("")
vnfd = self.get(name)
# It is redundant, since the previous one already gets the whole vnfpkginfo
# The only difference is that a different primitive is exercised
try:
_, resp = self._http.get2_cmd('{}/{}'.format(self._apiBase, vnfd['_id']))
#print(yaml.safe_dump(resp))
if resp:
return json.loads(resp)
except NotFound:
raise NotFound("vnfd '{}' not found".format(name))
raise NotFound("vnfd '{}' not found".format(name))
def get_thing(self, name, thing, filename):
self._logger.debug("")
vnfd = self.get(name)
headers = self._client._headers
headers['Accept'] = 'application/binary'
http_code, resp = self._http.get2_cmd('{}/{}/{}'.format(self._apiBase, vnfd['_id'], thing))
#print('HTTP CODE: {}'.format(http_code))
#print('RESP: {}'.format(resp))
#if http_code in (200, 201, 202, 204):
if resp:
#store in a file
return json.loads(resp)
#else:
# msg = ""
# if resp:
# try:
# msg = json.loads(resp)
# except ValueError:
# msg = resp
# raise ClientException("failed to get {} from {} - {}".format(thing, name, msg))
def get_descriptor(self, name, filename):
self._logger.debug("")
self.get_thing(name, 'vnfd', filename)
def get_package(self, name, filename):
self._logger.debug("")
self.get_thing(name, 'package_content', filename)
def get_artifact(self, name, artifact, filename):
self._logger.debug("")
self.get_thing(name, 'artifacts/{}'.format(artifact), filename)
def delete(self, name, force=False):
self._logger.debug("")
self._client.get_token()
vnfd = self.get(name)
querystring = ''
if force:
querystring = '?FORCE=True'
http_code, resp = self._http.delete_cmd('{}/{}{}'.format(self._apiBase,
vnfd['_id'], querystring))
#print('HTTP CODE: {}'.format(http_code))
#print('RESP: {}'.format(resp))
if http_code == 202:
print('Deletion in progress')
elif http_code == 204:
print('Deleted')
else:
msg = resp or ""
# if resp:
# try:
# msg = json.loads(resp)
# except ValueError:
# msg = resp
raise ClientException("failed to delete vnfd {} - {}".format(name, msg))
def create(self, filename, overwrite=None, update_endpoint=None, skip_charm_build=False,
override_epa=False, override_nonepa=False, override_paravirt=False):
self._logger.debug("")
if os.path.isdir(filename):
filename = filename.rstrip('/')
filename = self._client.package_tool.build(filename, skip_validation=False, skip_charm_build=skip_charm_build)
print('Uploading package {}'.format(filename))
self.create(filename, overwrite=overwrite, update_endpoint=update_endpoint,
override_epa=override_epa, override_nonepa=override_nonepa,
override_paravirt=override_paravirt)
else:
self._client.get_token()
mime_type = magic.from_file(filename, mime=True)
if mime_type is None:
raise ClientException(
"Unexpected MIME type for file {}: MIME type {}".format(
filename, mime_type)
)
headers = self._client._headers
headers['Content-Filename'] = basename(filename)
if mime_type in ['application/yaml', 'text/plain', 'application/json']:
headers['Content-Type'] = 'text/plain'
elif mime_type in ['application/gzip', 'application/x-gzip']:
headers['Content-Type'] = 'application/gzip'
#headers['Content-Type'] = 'application/binary'
# Next three lines are to be removed in next version
#headers['Content-Filename'] = basename(filename)
#file_size = stat(filename).st_size
#headers['Content-Range'] = 'bytes 0-{}/{}'.format(file_size - 1, file_size)
else:
raise ClientException(
"Unexpected MIME type for file {}: MIME type {}".format(
filename, mime_type)
)
special_ow_string = ''
if override_epa or override_nonepa or override_paravirt:
# If override for EPA, non-EPA or paravirt is required, get the descriptor data
descriptor_data = None
if mime_type in ['application/yaml', 'text/plain', 'application/json']:
with open(filename) as df:
descriptor_data = df.read()
elif mime_type in ['application/gzip', 'application/x-gzip']:
tar_object = tarfile.open(filename, "r:gz")
descriptor_list = []
for member in tar_object:
if member.isreg():
if '/' not in os.path.dirname(member.name) and member.name.endswith('.yaml'):
descriptor_list.append(member.name)
if len(descriptor_list) > 1:
raise ClientException('Found more than one potential descriptor in the tar.gz file')
elif len(descriptor_list) == 0:
raise ClientException('No descriptor was found in the tar.gz file')
with tar_object.extractfile(descriptor_list[0]) as df:
descriptor_data = df.read()
tar_object.close()
if not descriptor_data:
raise ClientException('Descriptor could not be read')
desc_type, vnfd = validation_im.yaml_validation(self, descriptor_data)
validation_im.pyangbind_validation(self, desc_type, vnfd)
vnfd = yaml.safe_load(descriptor_data)
vdu_list = []
for k in vnfd:
# Get only the first descriptor in case there are many in the yaml file
# k can be vnfd:vnfd-catalog or vnfd-catalog. This check is skipped
first_vnfd = vnfd[k]['vnfd'][0]
vdu_list = first_vnfd.get('vdu',[])
break;
for vdu_number, vdu in enumerate(vdu_list):
if override_epa:
guest_epa = {}
guest_epa["mempage-size"] = "LARGE"
guest_epa["cpu-pinning-policy"] = "DEDICATED"
guest_epa["cpu-thread-pinning-policy"] = "PREFER"
guest_epa["numa-node-policy"] = {}
guest_epa["numa-node-policy"]["node-cnt"] = 1
guest_epa["numa-node-policy"]["mem-policy"] = "STRICT"
#guest_epa["numa-node-policy"]["node"] = []
#guest_epa["numa-node-policy"]["node"].append({"id": "0", "paired-threads": {"num-paired-threads": 1} })
special_ow_string = "{}vdu.{}.guest-epa={};".format(special_ow_string,vdu_number,quote(yaml.safe_dump(guest_epa)))
headers['Query-String-Format'] = 'yaml'
if override_nonepa:
special_ow_string = "{}vdu.{}.guest-epa=;".format(special_ow_string,vdu_number)
if override_paravirt:
for iface_number in range(len(vdu['interface'])):
special_ow_string = "{}vdu.{}.interface.{}.virtual-interface.type=PARAVIRT;".format(
special_ow_string,vdu_number,iface_number)
special_ow_string = special_ow_string.rstrip(";")
headers["Content-File-MD5"] = utils.md5(filename)
http_header = ['{}: {}'.format(key,val)
for (key,val) in list(headers.items())]
self._http.set_http_header(http_header)
if update_endpoint:
http_code, resp = self._http.put_cmd(endpoint=update_endpoint, filename=filename)
else:
ow_string = ''
if special_ow_string:
if overwrite:
overwrite = "{};{}".format(overwrite,special_ow_string)
else:
overwrite = special_ow_string
if overwrite:
ow_string = '?{}'.format(overwrite)
self._apiResource = '/vnf_packages_content'
self._apiBase = '{}{}{}'.format(self._apiName,
self._apiVersion, self._apiResource)
endpoint = '{}{}'.format(self._apiBase,ow_string)
http_code, resp = self._http.post_cmd(endpoint=endpoint, filename=filename)
#print('HTTP CODE: {}'.format(http_code))
#print('RESP: {}'.format(resp))
if http_code in (200, 201, 202):
if resp:
resp = json.loads(resp)
if not resp or 'id' not in resp:
raise ClientException('unexpected response from server: {}'.format(resp))
print(resp['id'])
elif http_code == 204:
print('Updated')
# else:
# msg = "Error {}".format(http_code)
# if resp:
# try:
# msg = "{} - {}".format(msg, json.loads(resp))
# except ValueError:
# msg = "{} - {}".format(msg, resp)
# raise ClientException("failed to create/update vnfd - {}".format(msg))
def update(self, name, filename):
self._logger.debug("")
self._client.get_token()
vnfd = self.get(name)
endpoint = '{}/{}/package_content'.format(self._apiBase, vnfd['_id'])
self.create(filename=filename, update_endpoint=endpoint)
|
StarcoderdataPython
|
1839664
|
from .FlowNetS import *
from .FlowNetC import *
|
StarcoderdataPython
|
5178466
|
<reponame>nano-db/NanoCube<gh_stars>1-10
import csv
import datetime
import os
from server.nanocube import NanoCube
samples = dict(
simple=dict(
path=os.path.dirname(__file__) + "/samples/simple_cube.csv",
schema=["Devise"],
loc_granularity=2
)
)
def mock_cube(name="simple"):
parser = '%m/%d/%Y %H:%M:%S'
cube = NanoCube(samples[name]['schema'], loc_granularity=samples[name]['loc_granularity'])
with open(samples[name]['path']) as sample_file:
reader = csv.DictReader(sample_file, delimiter=",")
for row in reader:
data = dict()
data['Longitude'] = float(row['Longitude'])
data['Latitude'] = float(row['Latitude'])
data['Time'] = datetime.datetime.strptime(row['Time'], parser)
data['Devise'] = row['Devise']
cube.add(data)
return cube
|
StarcoderdataPython
|
1901621
|
<gh_stars>0
import fcntl
import select
import time
from pygdbmi import gdbmiparser
import os
DEFAULT_GDB_TIMEOUT_SEC = 1
DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC = 1
class GdbTimeoutError(ValueError):
pass
class IoManager:
def __init__(
self,
stdin,
stdout,
stderr,
time_to_check_for_additional_output_sec=DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC,
):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.stdin_fileno = self.stdin.fileno()
self.stdout_fileno = self.stdout.fileno()
self.stderr_fileno = self.stderr.fileno() if self.stderr else -1
self.read_list = []
if self.stdout:
self.read_list.append(self.stdout_fileno)
self.write_list = [self.stdin_fileno]
self._incomplete_output = {"stdout": None, "stderr": None}
self.time_to_check_for_additional_output_sec = (
time_to_check_for_additional_output_sec
)
self._allow_overwrite_timeout_times = (
self.time_to_check_for_additional_output_sec > 0
)
make_non_blocking(self.stdout)
if self.stderr:
make_non_blocking(self.stderr)
def get_gdb_response(
self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True
):
if timeout_sec < 0:
timeout_sec = 0
retval = self._get_responses(timeout_sec)
if not retval and raise_error_on_timeout:
raise GdbTimeoutError(
"Did not get response from gdb after %s seconds" % timeout_sec
)
else:
return retval
def _get_responses(self, timeout_sec):
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
select_timeout = timeout_time_sec - time.time()
if select_timeout <= 0:
select_timeout = 0
events, _, _ = select.select(
self.read_list, [], [], select_timeout)
responses_list = None # to avoid infinite loop if using Python 2
for fileno in events:
# new data is ready to read
if fileno == self.stdout_fileno:
self.stdout.flush()
raw_output = self.stdout.read()
stream = "stdout"
elif fileno == self.stderr_fileno:
self.stderr.flush()
raw_output = self.stderr.read()
stream = "stderr"
else:
raise ValueError(
"Developer error. Got unexpected file number %d" % fileno
)
responses_list = self._get_responses_list(raw_output, stream)
responses += responses_list
if timeout_sec == 0: # just exit immediately
break
elif responses_list and self._allow_overwrite_timeout_times:
# update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb
timeout_time_sec = min(
time.time() + self.time_to_check_for_additional_output_sec,
timeout_time_sec,
)
elif time.time() > timeout_time_sec:
break
return responses
def _get_responses_list(
self, raw_output, stream
):
responses = []
(_new_output, self._incomplete_output[stream],) = _buffer_incomplete_responses(
raw_output, self._incomplete_output.get(stream)
)
if not _new_output:
return responses
response_list = list(
filter(lambda x: x, _new_output.decode(
errors="replace").split("\n"))
)
for response in response_list:
if gdbmiparser.response_is_finished(response):
pass
else:
parsed_response = gdbmiparser.parse_response(response)
parsed_response["stream"] = stream
responses.append(parsed_response)
return responses
def write(
self,
mi_cmd_to_write,
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout=True,
read_response=True,
):
if timeout_sec < 0:
timeout_sec = 0
if isinstance(mi_cmd_to_write, str):
mi_cmd_to_write_str = mi_cmd_to_write
elif isinstance(mi_cmd_to_write, list):
mi_cmd_to_write_str = "\n".join(mi_cmd_to_write)
else:
raise TypeError(
"The gdb mi command must a be str or list. Got "
+ str(type(mi_cmd_to_write))
)
if not mi_cmd_to_write_str.endswith("\n"):
mi_cmd_to_write_nl = mi_cmd_to_write_str + "\n"
else:
mi_cmd_to_write_nl = mi_cmd_to_write_str
_, outputready, _ = select.select([], self.write_list, [], timeout_sec)
for fileno in outputready:
if fileno == self.stdin_fileno:
# ready to write
self.stdin.write(mi_cmd_to_write_nl.encode()) # type: ignore
# must flush, otherwise gdb won't realize there is data
# to evaluate, and we won't get a response
self.stdin.flush() # type: ignore
if read_response is True:
return self.get_gdb_response(
timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout
)
else:
return []
def _buffer_incomplete_responses(
raw_output, buf
):
if raw_output:
if buf:
raw_output = b"".join([buf, raw_output])
buf = None
if b"\n" not in raw_output:
buf = raw_output
raw_output = None
elif not raw_output.endswith(b"\n"):
remainder_offset = raw_output.rindex(b"\n") + 1
buf = raw_output[remainder_offset:]
raw_output = raw_output[:remainder_offset]
return (raw_output, buf)
def make_non_blocking(file_obj):
fcntl.fcntl(file_obj, fcntl.F_SETFL, os.O_NONBLOCK)
|
StarcoderdataPython
|
4886742
|
import socket,time,math
class pyMultiWii:
def __init__(self,TCP_IP, TCP_PORT,debug=False):
self.TCP_IP=TCP_IP
self.TCP_PORT=TCP_PORT
self.BUFFER_SIZE = 1024
self.debug=debug
self.mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mySocket.connect((TCP_IP, TCP_PORT))
headerArray=bytearray([36,77,60])
self.valueArray=bytearray([])
roll=1500
pitch=1500
throttle=1500
yaw=1500
aux1=1200
aux2=1000
aux3=1500
aux4=1200
self.valueArray.extend(headerArray)
self.valueArray.append(16)
self.valueArray.append(200)
self.valueArray.extend([220,5])
self.valueArray.extend([220,5])
self.valueArray.extend([220,5])
self.valueArray.extend([220,5])
self.valueArray.extend([176,4])
self.valueArray.extend([232,3])
self.valueArray.extend([220,5])
self.valueArray.extend([176,4])
self.valueArray.append(234)
self.Array=self.valueArray[:]
if(self.debug):
print(self.Array)
self.isConnected=False
def changeCRC(self):
self.CRCArray=self.Array[3:-1]
self.CRCValue=0
for d in self.CRCArray:
self.CRCValue= self.CRCValue^d
return self.CRCValue
def getBytes(self,value):
self.LSB=value % 256
self.MSB=math.floor(value/256)
return bytearray([self.LSB,self.MSB])
## def connect(self):
## self.isConnected=True
## if(self.debug):
## print ("Connected to Drone")
## self.sendPacket(self.Array)
def arm(self):
self.Array[19]=220
self.Array[20]=5
Val=self.changeCRC()
self.Array[21]=Val
if(self.debug):
print("Connected")
self.sendPacket(self.Array)
## else:
## self.Array[21]=0
## if(self.debug):
## print("Not Connected")
## self.sendPacket(self.Array)
def disarm(self):
self.Array[19]=176
self.Array[20]=4
Val=self.changeCRC()
self.Array[21]=Val
self.sendPacket(self.Array)
def setThrottle(self,value):
arr=bytearray([])
arr.extend(self.getBytes(value))
self.Array[9]=arr[0]
self.Array[10]=arr[1]
Val=self.changeCRC()
self.Array[21]=Val
self.sendPacket(self.Array)
def setRoll(self,value):
arr=bytearray([])
arr.extend(self.getBytes(value))
self.Array[5]=arr[0]
self.Array[6]=arr[1]
Val=self.changeCRC()
self.Array[21]=Val
self.sendPacket(self.Array)
def setPitch(self,value):
arr=bytearray([])
arr.extend(self.getBytes(value))
self.Array[7]=arr[0]
self.Array[8]=arr[1]
Val=self.changeCRC()
self.Array[21]=Val
self.sendPacket(self.Array)
def setYaw(self,value):
arr=bytearray([])
arr.extend(self.getBytes(value))
self.Array[11]=arr[0]
self.Array[12]=arr[1]
Val=self.changeCRC()
self.Array[21]=Val
self.sendPacket(self.Array)
def sendPacket(self,lValueArray):
self.mySocket.send(lValueArray)
def recieveResponse(self):
return self.mySocket.recv(self.BUFFER_SIZE)
def disconnect(self):
self.mySocket.close()
|
StarcoderdataPython
|
1778009
|
import torch.nn.utils.clip_grad as clip
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class SchedulerOptimizer():
def __init__(self, optimizer, scheduler, gradclip):
assert(isinstance(optimizer, Optimizer))
self.optimizer = optimizer
self.scheduler, self.clip_grad_norm = None, None
if scheduler is not None:
assert(isinstance(scheduler, _LRScheduler))
assert(scheduler.optimizer is optimizer)
self.scheduler = scheduler
if gradclip[0]: self.clip_grad_norm = gradclip[1]
def __clip_gradient__(self):
if self.clip_grad_norm is None: return
for group in self.optimizer.param_groups:
max_norm = self.clip_grad_norm
clip.clip_grad_norm_(group['params'], max_norm)
def zero_grad(self):
self.optimizer.zero_grad()
def step(self, gradscaler = None):
if gradscaler is not None:
gradscaler.unscale_(self.optimizer)
self.__clip_gradient__()
if gradscaler is not None:
gradscaler.step(self.optimizer)
else: self.optimizer.step()
def update(self):
if self.scheduler is not None:
self.scheduler.step()
def load_state_dict(self, state):
self.optimizer.load_state_dict(state['optimizer'])
if self.scheduler is not None:
self.scheduler.load_state_dict(state['scheduler'])
def state_dict(self, state = {}):
state['optimizer'] = self.optimizer.state_dict()
if self.scheduler is not None:
state['scheduler'] = self.scheduler.state_dict()
return state
|
StarcoderdataPython
|
3426392
|
<filename>downloader.py
import aiohttp
from aiohttp_retry import RetryClient
import asyncio
import os
import re
import heapq
from datetime import datetime, timedelta
import logging
MAX_FILE_NAME_LEN = 250
ONE_SEC = timedelta(seconds=1)
class DefaultArgs:
qps = 100
downloader_cache_dir = 'cache'
class DownloadHistory:
def __init__(self):
self.records = []
def oldest_record(self):
return self.records[0]
def remote_old_records(self, t: datetime):
while len(self.records) > 0 and self.oldest_record() < t:
heapq.heappop(self.records)
def add_record(self, t: datetime):
heapq.heappush(self.records, t)
def __len__(self):
return len(self.records)
class Downloader(object):
def __init__(self, http_client_factory = RetryClient):
self.cache_dir = None
self.download_paused = False
self.http_client_factory = http_client_factory
self.prepare(DefaultArgs())
self.args_registered = False
self.download_history = DownloadHistory()
self.semaphore = None
def register_options(self, argparser):
argparser.add_argument('--cache', dest='downloader_cache_dir', default=DefaultArgs.downloader_cache_dir,
help='Path to the folder where downloaded urls will be cached. To disable cache, delete the folder.')
argparser.add_argument('--qps', type=int, default=DefaultArgs.qps, help='Limit the number of concurrent requests sent to the server')
def create_session(self):
return self.http_client_factory()
def prepare(self, args):
self.qps = args.qps
self.cache_dir = args.downloader_cache_dir
self.max_url_len = MAX_FILE_NAME_LEN - len(self.cache_dir)
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
self.args_registered = True
def use_cache(self):
return self.cache_dir is not None
def cache_key(self, url):
assert self.use_cache()
url = re.escape(url).replace('/','\\')
if len(url) > self.max_url_len:
url = url[len(url)-self.max_url_len:]
return os.path.join(self.cache_dir, url)
def has_cache(self, url):
return self.use_cache() and os.path.exists(self.cache_key(url))
def get_cache(self, url):
with open(self.cache_key(url), 'r') as url_cache:
return url_cache.read()
def put_cache(self, url, contents):
if not contents:
return
with open(self.cache_key(url), 'w') as url_cache:
url_cache.write(contents)
async def try_to_download(self, session, url):
async with self.semaphore:
curr_time = datetime.now()
self.download_history.remote_old_records(curr_time-ONE_SEC)
if len(self.download_history) >= self.qps:
sleep_time = self.download_history.oldest_record() + ONE_SEC - curr_time
await asyncio.sleep(sleep_time.total_seconds())
self.download_history.add_record(datetime.now())
logging.info('Downloading {}'.format(url))
async with session.get(url, retry_attempts=2) as res:
if res.status == 429:
logging.warning('Service returned status 429, trying to recover. Consider adjusting download throttle')
await asyncio.sleep(1)
return None
elif not res.status == 200:
logging.warning('Error fetching {}, staus={}'.format(url, res.status))
return None
return await res.text()
async def download(self, session, url, result_queue):
try:
contents = await self.try_to_download(session, url)
if self.use_cache():
self.put_cache(url, contents)
logging.info('Download finished for {}'.format(url))
await result_queue.put((url, contents))
except aiohttp.ServerDisconnectedError as err:
logging.error('Failed to download {}. Repeated server disconnected error'.format(url))
await result_queue.put((url, None))
async def download_or_cache(self, session, url, result_queue):
if self.has_cache(url):
logging.info('Found cache entry for {}'.format(url))
await result_queue.put((url, self.get_cache(url)))
return
await self.download(session, url, result_queue)
async def download_all(self, urls, callback):
"""
asynchronously downloads urls from the given list and forwars results to
the callback function
@param consumer_fn function accepting two parameters: url and the
downloaded page.
"""
if self.semaphore is None:
self.semaphore = asyncio.Semaphore(self.qps)
if not self.args_registered:
logging.warning('Downloader.prepare was not called')
results_queue = asyncio.Queue()
async def consumer():
while True:
url, page = await results_queue.get()
await callback(url, page)
results_queue.task_done()
async with self.create_session() as download_session:
downloaders = [asyncio.create_task(self.download_or_cache(download_session, url, results_queue)) for url in urls]
consumer_task = asyncio.create_task(consumer())
await asyncio.gather(*downloaders)
await results_queue.join()
consumer_task.cancel()
|
StarcoderdataPython
|
3590832
|
<gh_stars>1-10
#!/usr/bin/env python3
import os
import argparse
import gatenlphiltlab
import hiltnlp
import Levenshtein
#TODO : integrate this into hiltnlp/reorganize
def is_participant_speech(turn):
ratio = Levenshtein.ratio(turn.speaker, "Participant")
return ratio > 0.75
def is_therapist_speech(turn):
ratio = Levenshtein.ratio(turn.speaker, "Therapist")
return ratio > 0.75
def is_participant_reference(person_token,
turn):
grammatical_person = hiltnlp.get_grammatical_person(person_token)
if is_participant_speech(turn):
if grammatical_person == 1:
return True
elif is_therapist_speech(turn):
if grammatical_person == 2:
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="(temp)"
)
parser.add_argument(
"-i",
"--annotation-file",
dest="annotation_files",
nargs="+",
required="true",
help="GATE annotation files"
)
args = parser.parse_args()
for annotation_file_path in args.annotation_files:
annotation_file = gatenlphiltlab.AnnotationFile(annotation_file_path)
sentences = hiltnlp.get_sentences(annotation_file)
turns = hiltnlp.get_turns(sentences)
participant_ref_set = annotation_file.create_annotation_set("EAU_heuristics")
for turn in turns:
for sentence in turn:
tokens = hiltnlp.get_tokens(sentence)
for token in tokens:
if hiltnlp.is_explicit_person_reference(token):
if is_participant_reference(token, turn):
participant_ref_set.create_annotation(
annotation_type="participant_reference",
start=token.start_node,
end=token.end_node,
)
annotation_file.save_changes()
|
StarcoderdataPython
|
6688491
|
def insetion_sort(L):
n = len(L)
for i in range(1,n):
# I will find the interval in which I need
# to shift and then I will do right rotation
# I guess if I ignored all python boilerplate
# It is a tad efficient
firstElement = i-1
while firstElement>=0 and L[firstElement]>L[i]:
firstElement-=1
firstElement+=1 # so the list to be right rotated in L[firstElement:i] inclusive interval
L[firstElement:i+1]=L[i:i+1]+L[firstElement:i]
return L
# if right rotation is a bit cleaner It would make it very elegant because this is AS clean as the original
L = [4,2,6,8,1,2,6,8,2,3]
print(insetion_sort(L))
|
StarcoderdataPython
|
12817458
|
<filename>back-end/f1hub/constructorstandings/apps.py
from django.apps import AppConfig
class ConstructorstandingsConfig(AppConfig):
name = 'constructorstandings'
|
StarcoderdataPython
|
6615913
|
<reponame>lizawood/Apple-Health-Fitness-Tracker
class Person:
def __init__(self, name, age, gender):
"""Create a object of class Person()
Parameters: name, age, gender
Return: An object of class Person"""
# assign the name, age, and gender to the class object in initialization
self.name = name
try:
self.age = int(age)
except:
print("Age is an integer")
self.age = None
self.gender = gender
def display(self):
"""Display the name, age and gender of a Person() object
Parameters: Person() object
Return: name, age, and gender"""
# display the name, age, and gender of initialized object
return "Name: {}, Age: {}, Gender: {}". format(self.name, self.age, self.gender)
class healthdata(Person):
def __init__(self, name, age, gender, file =''):
"""Create a object of class healthdata() this inherits from the superclass Person()
Parameters: name, age, gender, file
Return: An object of class healthdata"""
# inherit Person class name, age, gender and assign to heathdata object
Person.__init__(self, name, age, gender)
# assign the file to the object in initialization
self.file = file
def data(self):
"""Import the file assigned to the healthdata() object into a dataframe and assign it to the healthdata() object
Parameters: healthdata() object initialized above
Return: Display of healthdata object attributes name, age, gender and dataframe containing healthdata() object file"""
import pandas as pd # ensure pandas is imported
try:
self.data = pd.read_csv(self.file) # import the self.file into a dataframe using pandas
except FileNotFoundError:
print("File does not exist")
return False
Person.display(self) #display object attributes using inherited display() finction
return self.data
|
StarcoderdataPython
|
8020806
|
<reponame>rasmusskovbo/fantasy-football-data-analysis
import pandas as pd
import math
import statsmodels.formula.api as smf
from os import path
DATA_DIR = '/Users/nathan/fantasybook/data'
#####################
# logistic regression
#####################
# load
df = pd.read_csv(path.join(DATA_DIR, 'play_data_sample.csv'))
# process
df = df.loc[(df['play_type'] == 'run') | (df['play_type'] == 'pass')]
df['offensive_td'] = ((df['touchdown'] == 1) & (df['yards_gained'] > 0))
df['offensive_td'] = df['offensive_td'].astype(int)
df['yardline_100_sq'] = df['yardline_100'] ** 2
# run regression
model = smf.logit(formula='offensive_td ~ yardline_100 + yardline_100_sq',
data=df)
results = model.fit()
results.summary2()
def prob_of_td(yds):
b0, b1, b2 = results.params
value = (b0 + b1*yds + b2*(yds**2))
return 1/(1 + math.exp(-value))
prob_of_td(75)
prob_of_td(25)
prob_of_td(5)
|
StarcoderdataPython
|
122288
|
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QGraphicsPixmapItem
from client.constants import IMAGES_DIR
from client.models.abstract.info_scene import InfoScene
from client.models.enums.button_state import ButtonState
from client.models.menu_objects.button import Button
from common.constants import SCENE_WIDTH, SCENE_HEIGHT
from common.enums.direction import Direction
class MainMenu(InfoScene):
def __init__(self, parent):
super().__init__()
self.__parent__ = parent
self.logo = QGraphicsPixmapItem()
self.logo.setPixmap(QPixmap(IMAGES_DIR + "menu/logo.png"))
self.logo.setPos((SCENE_WIDTH - 600) / 2, 50)
self.buttons = [Button(self.__parent__.find_game, None, (SCENE_WIDTH - 250) / 2, SCENE_HEIGHT - 250,
IMAGES_DIR + "menu/start_normal.png",
IMAGES_DIR + "menu/start_highlighted.png", ButtonState.HIGHLIGHTED),
Button(self.__parent__.close_game, None, (SCENE_WIDTH - 200) / 2, SCENE_HEIGHT - 175,
IMAGES_DIR + "menu/quit_normal.png",
IMAGES_DIR + "menu/quit_highlighted.png", ButtonState.NORMAL)
]
self.addItem(self.logo)
self.__draw_menu_buttons()
""" Handles keyboard presses """
def keyPressEvent(self, event):
if event.key() == Qt.Key_W:
self.__change_button_focus(Direction.UP)
elif event.key() == Qt.Key_S:
self.__change_button_focus(Direction.DOWN)
elif event.key() == Qt.Key_Return:
self.__execute_button()
""" Draws buttons in the scene """
def __draw_menu_buttons(self):
for button in self.buttons:
self.addItem(button.graphics_item)
""" Executes button logic """
def __execute_button(self):
for index in range(len(self.buttons)):
if self.buttons[index].state is ButtonState.HIGHLIGHTED:
self.buttons[index].execute()
""" Changes button focus """
def __change_button_focus(self, direction: Direction):
if direction is None:
return
count = len(self.buttons)
old_index = -1
new_index = -1
for index in range(count):
if self.buttons[index].state is ButtonState.HIGHLIGHTED:
old_index = index
if direction is Direction.UP:
if index - 1 > 0:
new_index = index - 1
else:
new_index = 0
elif direction is Direction.DOWN:
if index + 1 > count - 1:
new_index = count - 1
else:
new_index = index + 1
self.buttons[old_index].set_state(ButtonState.NORMAL)
self.buttons[new_index].set_state(ButtonState.HIGHLIGHTED)
|
StarcoderdataPython
|
4877598
|
<gh_stars>0
#Generator with while loop
def range_generator(a, b):
while a < b:
yield a
a = a+1
seq = range_generator(1,5)
print(next(seq))
print(next(seq))
print()
#Geenerator usage using a for loop
def range_with_for_loop_usage(a,b):
while a < b:
yield a
a = a+1
#use the generator
for element in range_with_for_loop_usage(10,15):
print(element)
print()
#Generate squares using generators
def squares(n):
for element in range(n+1):
yield element ** 2
square_generator = squares(20)
print(next(square_generator))
print(next(square_generator))
print(next(square_generator))
print(next(square_generator))
print(next(square_generator))
print()
for element in squares(10):
print(element)
print()
|
StarcoderdataPython
|
5037351
|
from pathlib import Path
from textwrap import wrap
from typing import NamedTuple
from aiodns.error import DNSError
# to generate public and private keys
# openssl genrsa -out private.pem 4096
# openssl rsa -in private.pem -pubout > public.pem
from em2.protocol.dns import DNSResolver
KEY_DIR = (Path(__file__).parent / 'keys').absolute()
# printf 'foobar.com:2461449600' > test.txt
# openssl dgst -sha256 -sign tests/fixture_classes/keys/private.pem -out test.sig test.txt
# python -c "import base64; print(base64.urlsafe_b64encode(open('test.sig', 'rb').read()).decode())"
PLATFORM = 'foobar.com'
TIMESTAMP = 2461449600
VALID_SIGNATURE = (
'<KEY>'
'<KEY>'
'<KEY>'
'aZZ-Q-yl19hEoHqg-PhEVi30tdAyGifldSZfbT8gxk2laer__unGJQF_WB46UiKTgxJODh9hNRM4e-9opwH5MLX7nNPLsFa3QjfY9EJb9'
'OHqFfmEtWM8-aqhf-3HHBxLfjvTm9ZdH-zbesnSb6NbdY8BOWK6G2iVQQbH2YAQN_QjNvoZedI7ZQCZeuHm9XjRpi1ECLn8jjN8PtIJ84'
'eYYbgI0b6gcFkB0YBJcM59MNGYkdJkJtfQI-EHqPaSByrFEMME3RerbjePMSVHoBlbpKgFRGNzAgFX0s3zbIxA-0g25skMAY_mIS_XWQE'
'3JnlcZOSIyrff4LcU_ZEwIOxdKKWkPIq6oZKXfM8fsXz4yA7vY9K0='
)
def get_public_key():
with (KEY_DIR / 'public.pem').open() as f:
return f.read()
def get_private_key_file():
return str(KEY_DIR / 'private.pem')
class TXTQueryResult(NamedTuple):
text: bytes
class MXQueryResult(NamedTuple):
priority: int
host: str
class MockDNSResolver(DNSResolver):
def __init__(self, settings, loop):
self.settings = settings
self.loop = loop
self._port = 0
async def query(self, host, qtype):
if qtype == 'TXT':
return self.get_txt(host)
elif qtype == 'MX':
return self.get_mx(host)
else:
raise NotImplementedError()
def get_txt(self, host):
r = [TXTQueryResult(text=b'v=spf1 include:spf.example.com ?all')]
if host == 'foobar.com':
public_key = get_public_key()
public_key = public_key.replace('-----BEGIN PUBLIC KEY-----', '')
public_key = public_key.replace('-----END PUBLIC KEY-----', '').replace('\n', '')
rows = wrap(public_key, width=250)
rows[0] = 'v=em2key ' + rows[0]
r += [TXTQueryResult(text=t.encode()) for t in rows]
elif host == 'badkey1.com':
r += [
TXTQueryResult(text=b'v=em2key 123'),
TXTQueryResult(text=b'456'),
TXTQueryResult(text=b'789'),
]
elif host == 'badkey2.com':
r += [
TXTQueryResult(text=b'v=em2key 123'),
TXTQueryResult(text=b'456'),
TXTQueryResult(text=b'789='),
]
elif host.startswith('em2.platform.foreign.com'):
r += [TXTQueryResult(text=b'v=em2key 123456')]
r.append(TXTQueryResult(text=b'v=foobar'))
return r
def get_mx(self, host):
if host == 'local.com':
return [
MXQueryResult(5, f'em2.platform.example.com:{self._port}'),
MXQueryResult(10, f'mx.platform.example.com:{self._port}'),
]
elif host == 'nomx.com':
return []
elif host == 'value_error.com':
raise ValueError('DNS error with error.com')
elif host == 'dns_error.com':
raise DNSError('snap')
elif host == 'fallback.com':
return [
MXQueryResult(priority=10, host='mx.local.com'),
]
else:
extra = f':{self._port}' if self._port else ''
return [
MXQueryResult(priority=10, host=f'mx.platform.{host}{extra}'),
MXQueryResult(priority=5, host=f'em2.platform.{host}{extra}'),
]
|
StarcoderdataPython
|
12830941
|
import torch
def segment_index_add_python(values, scopes, indices, out=None):
if out is None:
out = values.new_zeros([scopes.shape[0]] + list(values.shape[1:]))
scopes = scopes.long()
values_dup = values.index_select(0, indices)
idx_global = torch.repeat_interleave(scopes[:, 1])
out.index_add_(0, idx_global, values_dup)
return out
segment_index_add = segment_index_add_python
|
StarcoderdataPython
|
6426781
|
<filename>wavesim/App.py<gh_stars>0
# -*- coding: utf-8 -*-
from pyio.DataSturucture import Plugin
from pyio.Main import main
from wavesim.Window import Window
class App(Plugin):
def __init__(self):
super().__init__()
self.window = None
def init(self, data):
self.data = data
self.window = Window(data=data)
def enable_button(self):
return True
def clicked(self):
self.window.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3321168
|
<reponame>shannenye/saleor
from collections import defaultdict
import graphene
from ....shipping.error_codes import ShippingErrorCode
from ..mutations import BaseChannelListingMutation
def test_validate_duplicated_channel_ids(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id],
[second_channel_id],
errors,
ShippingErrorCode.DUPLICATED_INPUT_ITEM.value,
)
# then
assert result is None
assert errors["input"] == []
def test_validate_duplicated_channel_ids_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id], [second_channel_id], errors, error_code
)
# then
assert result is None
assert errors["input"][0].code == error_code
def test_validate_duplicated_channel_values(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field] == []
def test_validate_duplicated_channel_values_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field][0].code == error_code
def test_clean_channels_add_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"add_channels": [{"channel_id": channel_id}]}, errors, error_code
)
# then
assert result == {
"add_channels": [{"channel_id": channel_id, "channel": channel_PLN}],
"remove_channels": [],
}
assert errors["input"] == []
def test_clean_channels_remove_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert result == {"add_channels": [], "remove_channels": [str(channel_PLN.id)]}
assert errors["input"] == []
def test_test_clean_channels_with_errors(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id, channel_id]}, errors, error_code
)
# then
assert result == {}
assert errors["remove_channels"][0].code == error_code
|
StarcoderdataPython
|
8185959
|
<filename>1.3/logistic-regression.py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress warning that it can't use CUDA
import tensorflow as tf
import pandas as pd
import numpy as np
import time
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
iris = load_iris()
iris_X = iris.data[:-1, :]
iris_Y = pd.get_dummies(iris.target[:-1]).values
trainX, testX, trainY, testY = train_test_split(iris_X, iris_Y, test_size=0.33, random_state=42)
numFeatures = trainX.shape[1]
numLabels = trainY.shape[1]
X = tf.Variable(np.identity(numFeatures), tf.TensorShape(numFeatures), dtype='float32')
yGold = tf.Variable(np.ones(numLabels), tf.TensorShape(numLabels), dtype='float32')
weights = tf.Variable(tf.random.normal([numFeatures,numLabels],
mean=0.,
stddev=0.01,
name="weights"),dtype='float32')
bias = tf.Variable(tf.random.normal([1,numLabels],
mean=0.,
stddev=0.01,
name="bias"))
# model
def logistic_regression(x):
apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
return activation_OP
# Number of Epochs in our training
numEpochs = 7000
# Defining our learning rate iterations (decay)
learningRate = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=0.08,
decay_steps=trainX.shape[0],
decay_rate= 0.95,
staircase=True)
#Defining our cost function - Squared Mean Error
loss_object = tf.keras.losses.MeanSquaredLogarithmicError()
optimizer = tf.keras.optimizers.SGD(learningRate)
# Accuracy metric.
def accuracy(y_pred, y_true):
# Predicted class is the index of the highest score in prediction vector (i.e. argmax).
# print('y_pred : ',y_pred)
# print('y_true : ',y_true)
correct_prediction = tf.equal(tf.argmax(y_pred, -1), tf.argmax(y_true, -1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Optimization process.
def run_optimization(x, y):
with tf.GradientTape() as g:
pred = logistic_regression(x)
loss = loss_object(pred, y)
gradients = g.gradient(loss, [weights, bias])
optimizer.apply_gradients(zip(gradients, [weights, bias]))
# Initialize reporting variables
display_step = 10
epoch_values = []
accuracy_values = []
loss_values = []
loss = 0
diff = 1
# Training epochs
for i in range(numEpochs):
if i > 1 and diff < .000001:
print("change in loss %g; convergence."%diff)
break
else:
# Run training step
run_optimization(X, yGold)
# Report occasional stats
if i % display_step == 0:
# Add epoch to epoch_values
epoch_values.append(i)
pred = logistic_regression(X)
newLoss = loss_object(pred, yGold)
# Add loss to live graphing variable
loss_values.append(newLoss)
# Generate accuracy stats on test data
acc = accuracy(pred, yGold)
accuracy_values.append(acc)
# Re-assign values for variables
diff = abs(newLoss - loss)
loss = newLoss
#generate print statements
print("step %d, training accuracy %g, loss %g, change in loss %g"%(i, acc, newLoss, diff))
# How well do we perform on held-out test data?
print("final accuracy on test set: %s" %str(acc))
|
StarcoderdataPython
|
11364762
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('addbook', views.addbook, name='addbook'),
]
|
StarcoderdataPython
|
1930901
|
""" This module extracts syntactic features. """
import subprocess
import os
import re
import nltk.tree
from utils import file_utils
from utils.lexicosyntactic import yngve
def get_lu_complexity_features(lu_analyzer_path, transcript_filepath, transcript_filename, output_lu_parse_dir):
''' This function extracts Lu complexity features
Parameters:
lu_analyzer_path: string, path to Lu Complexity Analyzer.
transcript_filepath: string, path to transcript from which we want to extract Lu complexity features.
transcript_filename: string, name of transcript file.
output_lu_parse_dir: string, path to directory that will store parse trees produced.
Returns:
lu_keys: list of strings, names of extracted features.
lu_features_dict: dictionary, mapping feature name to feature value.
'''
lu_keys = []
lu_features_dict = {}
# Run the Lu complexity analyzer script which produces a csv file with the feature values,
# and a CFG parse of the text.
subprocess.call(['python2', 'analyzeText.py',
os.path.abspath(transcript_filepath), os.path.abspath(os.path.join(output_lu_parse_dir, transcript_filename))],
cwd=lu_analyzer_path)
# Read the features into the dictionary.
with open(os.path.join(output_lu_parse_dir, transcript_filename), 'r') as fin_lu:
headers = fin_lu.readline().strip().split(',')
lu_features = fin_lu.readline().strip().split(',')
for i in range(1, len(headers)):
lu_features_dict[headers[i]] = float(lu_features[i])
lu_keys += [headers[i]]
return lu_keys, lu_features_dict
def get_parsetree_features(parser_path, cfg_rules_path, transcript_filepath, transcript_filename, output_parse_dir):
''' This function extracts parsetree features.
Parameters:
parser_path: string, path to Stanford lexical parser.
cfg_rules_path: string, path to file containing CFG rules to be extracted.
transcript_filepath: string, path to transcript from which we want to extract Lu complexity features.
transcript_filename: string, name of transcript file.
output_parse_dir: string, string, path to directory that will store parse trees produced.
Returns:
parsetree_keys: list of strings, names of extracted features.
parsetree_features: dictionary, mapping feature name to feature value.
'''
parsetree_keys = []
parsetree_features = {}
# Build stanford CFG and dependency parses, only if they don't exist already
target_parse = os.path.join(output_parse_dir, transcript_filename + '.parse')
if not os.path.exists(target_parse):
# "oneline" parse (parse for one utterance per line)
with open(target_parse, 'w') as fout:
subprocess.call([os.path.join(parser_path, 'lexparser_oneline.sh'), transcript_filepath], stdout=fout)
# "penn,typedDependencies" parse
target_depparse = os.path.join(output_parse_dir, transcript_filename + '.depparse')
if not os.path.exists(target_depparse):
with open(target_depparse, 'w') as fout:
subprocess.call([os.path.join(parser_path, 'lexparser_dep.sh'), transcript_filepath], stdout=fout)
with open(target_parse, 'r') as fin:
treelist = fin.readlines()
maxdepth = 0.0
totaldepth = 0.0
meandepth = 0.0
treeheight = 0.0
numtrees = 0
###from Jed
# To read in the parse trees into nltk tree objects, expect the 'oneline'
# format from the stanford parser (one utterance tree per line).
yc = yngve.Yngve_calculator()
for utterance_tree in treelist:
if utterance_tree:
thistree = utterance_tree # read parsed tree from parser-processed file
numtrees += 1
pt = nltk.tree.ParentedTree.fromstring(thistree) #convert to nltk tree format
st = list(pt.subtrees()) #extract list of all sub trees in tree
nodelist = []
for subt in st:
nodelist.append(subt.label()) # make list of all node labels for subtrees
Snodes = nodelist.count('S') + nodelist.count('SQ') + nodelist.count('SINV')#count how many nodes are "S" (clauses)
# A list of the Yngve depth (int) for each terminal in the tree
depthlist = yc.make_depth_list(pt, []) # computes depth, need to pass it an empty list
depthlist = depthlist[:-1] # the last terminal is a punctuation mark, ignore it
if depthlist:
maxdepth += max(depthlist)
totaldepth += sum(depthlist)
if len(depthlist) > 0:
meandepth += 1.0*sum(depthlist)/len(depthlist)
treeheight += pt.height()
if numtrees > 0:
parsetree_features['maxdepth'] = maxdepth / numtrees # or should it be max overall?
parsetree_features['totaldepth'] = totaldepth / numtrees
parsetree_features['meandepth'] = meandepth / numtrees
parsetree_features['treeheight'] = treeheight / numtrees
else:
parsetree_features['maxdepth'] = 0
parsetree_features['totaldepth'] = 0
parsetree_features['meandepth'] = 0
parsetree_features['treeheight'] = 0
parsetree_keys += ['maxdepth', 'totaldepth', 'meandepth', 'treeheight']
# CFG MEASURES
# Count frequency of different CFG constituents, using the
# constructed parse tree
totNP = 0
totVP = 0
totPP = 0
lenNP = 0
lenVP = 0
lenPP = 0
total_length = 0
prod_nonlexical = []
# List of rules to look for
with open(cfg_rules_path, 'r') as fin:
rules = fin.read()
top_rules = rules.strip().split('\n')
for utterance_tree in treelist:
if utterance_tree:
# Convert to unicode to prevent errors when there
# are non-ascii characters
t = nltk.tree.Tree.fromstring(utterance_tree)
prods = t.productions()
for p in prods:
if p.is_nonlexical():
prod_nonlexical.append(re.sub(" ", "_", str(p)))
# Counting phrase types
for st in t.subtrees():
if str(st).startswith("(NP"):
lenNP += len(st.leaves())
totNP += 1
elif str(st).startswith("(VP"):
lenVP += len(st.leaves())
totVP += 1
elif str(st).startswith("(PP"):
lenPP += len(st.leaves())
totPP += 1
sent_length = len(t.leaves())
total_length += sent_length
if total_length > 0:
parsetree_features["PP_type_prop"] = 1.0*lenPP/total_length
parsetree_features["VP_type_prop"] = 1.0*lenVP/total_length
parsetree_features["NP_type_prop"] = 1.0*lenNP/total_length
parsetree_features["PP_type_rate"] = 1.0*totPP/total_length
parsetree_features["VP_type_rate"] = 1.0*totVP/total_length
parsetree_features["NP_type_rate"] = 1.0*totNP/total_length
else:
parsetree_features["PP_type_prop"] = 0
parsetree_features["VP_type_prop"] = 0
parsetree_features["NP_type_prop"] = 0
parsetree_features["PP_type_rate"] = 0
parsetree_features["VP_type_rate"] = 0
parsetree_features["NP_type_rate"] = 0
try:
parsetree_features["average_PP_length"] = 1.0*lenPP/totPP
except:
parsetree_features["average_PP_length"] = 0
try:
parsetree_features["average_VP_length"] = 1.0*lenVP/totVP
except:
parsetree_features["average_VP_length"] = 0
try:
parsetree_features["average_NP_length"] = 1.0*lenNP/totNP
except:
parsetree_features["average_NP_length"] = 0
parsetree_keys += ['PP_type_prop', 'VP_type_prop', 'NP_type_prop',
'PP_type_rate', 'VP_type_rate', 'NP_type_rate',
'average_PP_length', 'average_VP_length', 'average_NP_length']
# Normalize by number of productions
num_productions = len(prod_nonlexical)
fdist = nltk.probability.FreqDist(prod_nonlexical)
for prod_rule in top_rules: # need this to ensure we always get same number of CFG features
if prod_rule in fdist:
parsetree_features[prod_rule] = 1.0 * fdist[prod_rule] / num_productions
else:
parsetree_features[prod_rule] = 0.0
parsetree_keys += [prod_rule]
return parsetree_keys, parsetree_features
|
StarcoderdataPython
|
8131526
|
import numpy as np
class BinarySegmentationMetrics:
"""
This class is responsible for calculating simple metrics for one pair of ground truth mask and its predicted mask.
:param jaccard_threshold: float
Threshold value for the jaccard index. Values below this value will be calculated as 0.
TP (true positives): pixels correctly segmented as foreground
TN (true negatives): pixels correctly detected as background
FP (false positives): pixels falsely segmented as foreground
FN (false negatives): pixels falsely detected as background
"""
def __init__(self, jaccard_threshold: float = 0.0):
self.n_mask_pixels = 0
self.n_background_pixels = 0
self.tp = 0
self.tn = 0
self.fp = 0
self.fn = 0
self.jaccard_threshold = jaccard_threshold
def calculate(self, mask: np.ndarray, predicted_mask: np.ndarray):
"""
Calculate pixel-wise tp, tn, fp and fn.
:param mask: np.ndarray
The ground truth mask.
:param predicted_mask: np.ndarray
The predicted mask.
:return: BinarySegmentationMetrics
Update instance of BinarySegmentationMetrics
"""
assert mask is not None and predicted_mask is not None, "Mask and predicted mask must not be None."
self.__calculate_positives_negatives(mask, predicted_mask)
def __calculate_positives_negatives(self, mask: np.ndarray, predicted_mask: np.ndarray):
assert mask.shape == predicted_mask.shape
assert len(mask.shape) == len(predicted_mask.shape) == 3
# assert binary mask
assert mask.shape[-1] == 1 and predicted_mask.shape[-1] == 1
# reshape to only 2 dimensions
mask = mask.squeeze()
predicted_mask = predicted_mask.squeeze()
self.n_mask_pixels = np.count_nonzero(mask == 1.0)
self.n_background_pixels = np.count_nonzero(mask == 0.0)
height, width = mask.shape
tp, tn, fp, fn = 0, 0, 0, 0
for i in range(height):
for j in range(width):
mask_pixel_value = mask[i][j]
predicted_mask_pixel_value = predicted_mask[i][j]
if mask_pixel_value == predicted_mask_pixel_value:
if mask_pixel_value == 1:
tp += 1
else:
tn += 1
else:
if predicted_mask_pixel_value == 0:
fn += 1
else:
fp += 1
assert tp + tn + fp + fn == height * width, "Sum of all pixels is not equal to the resolutions of the image."
self.tp = tp
self.tn = tn
self.fp = fp
self.fn = fn
@property
def jaccard_similarity_index(self) -> float:
denominator = (self.tp + self.fp + self.fn)
if denominator == 0:
return 0
return self.tp / denominator
@property
def threshold_jaccard_index(self) -> float:
if self.jaccard_similarity_index >= self.jaccard_threshold:
return self.jaccard_similarity_index
else:
return 0.0
@property
def dice(self) -> float:
denominator = (2 * self.tp + self.fn + self.fp)
if denominator == 0:
return 0
return (2 * self.tp) / denominator
@property
def sensitivity(self) -> float:
denominator = (self.tp + self.fn)
if denominator == 0:
return 0
return self.tp / denominator
@property
def specificity(self) -> float:
denominator = (self.tn + self.fp)
if denominator == 0:
return 0
return self.tn / denominator
@property
def accuracy(self) -> float:
denominator = (self.tp + self.fp + self.tn + self.fn)
if denominator == 0:
return 0
return (self.tp + self.tn) / denominator
|
StarcoderdataPython
|
9655335
|
<reponame>EaterGit/EaterDiscordBot
import discord
from discord.ext import commands, tasks
from discord.ext.commands import Bot
import random
from itertools import cycle
import time
BOT_PREFIX = '.'
user = input('Enter Bot Discord ID and Name ID : ')
TOKEN = "<KEY>"
bot = commands.Bot(command_prefix=",", activity=discord.Game(name="Booting..."))
bot.remove_command('help')
t = str(len(bot.guilds))
act = cycle(['Do ,help', 'Development Ongoing', 'Minecraft', 'Mobile Legends: Bang Bang', 'Python', 'Javascript', 'Discord', "On {} Servers".format(t)])
# # Successfuly Launched # #
@bot.event
async def on_ready():
botowner = (await bot.application_info()).owner
change_status.start()
print("=" * 45 + "\n")
print("Ready To Go!" + "\n")
print("Owned and Programmed By : Kaizer / Eater \n")
print("Logged In As {0.user}\n".format(bot))
print("Currently running in : {} servers. \n".format(len(bot.guilds)))
print("Bot's Prefix : , \n")
print("=" * 45 + "\n")
print("Bot Owner is " + botowner.name + "#" + botowner.discriminator + "\n")
print("Current Discord.py Version : {}\n".format(discord.__version__))
print("{}\n".format(discord.version_info))
print("=" * 45 + "\n")
# # Change Status # #
@tasks.loop(seconds=9.5)
async def change_status():
await bot.change_presence(activity=discord.Game(next(act)))
# # AutoRole / On Joined# #
@bot.event
async def on_member_join(member):
role = discord.utils.get(member.server.roles, name="Community")
channel = bot.get_channel(582810265238896651)
await channel.send("Welcome {member.mention} To Eater's Official")
await bot.add_roles(member, role)
# # Left # #
@bot.event
async def on_member_remove(member):
channel = bot.get_channel(582810265238896651)
await channel.send("{0.mention} Left Eater's Official".format(member))
# # Reciever # #
@bot.event
async def on_message(message):
author = message.author
content = message.content
guild = message.guild
print("{} : {} sent on {}".format(author, content, guild))
await bot.process_commands(message)
# # Help Module # #
@bot.command()
async def help(ctx):
embed = discord.Embed(
colour = discord.Colour.blue()
)
embed.add_field(name="Add Me To Your Server : \n ", value="http://bit.ly/eaterbot \n",inline=False)
embed.add_field(name="\n General Commands", value="\n")
embed.add_field(name="help", value="Shows The List Of s", inline=False)
embed.add_field(name="ping", value="Sends The Latency Of The Bot", inline=False)
embed.add_field(name="quote", value="Sends A Quote To Enlighten Your Day", inline=False)
embed.add_field(name="info", value="Gets The User Info Using This ", inline=False)
await ctx.author.send(embed=embed)
# # Ping Module # #
@bot.command()
async def ping(ctx):
ping_ = bot.latency
ping = round(ping_ * 1000)
await ctx.channel.send(f"Your ping is {ping}ms")
# # Quotes Module # #
@bot.command()
async def quote(ctx):
author = ctx.message.author
respo = [
"You’re off to great places, today is your day. Your mountain is waiting, so get on your way.",
"You always pass failure on the way to success.",
"No one is perfect - that’s why pencils have erasers.",
"You’re braver than you believe, and stronger than you seem, and smarter than you think.",
"It always seems impossible until it is done.",
"Keep your face to the sunshine and you cannot see a shadow.",
"Once you replace negative thoughts with positive ones, you’ll start having positive results.",
"If you can forgive you can forget.",
"Positive thinking will let you do everything better than negative thinking will.",
"In every day, there are 1,440 minutes. That means we have 1,440 daily opportunities to make a positive impact.",
"The only time you fail is when you fall down and stay down.",
"Just be happy and everything will be fine!",
"The only limit to our realization of tomorrow will be our doubts of today.",
"Creativity Is Intelligence Having Fun.",
"Your limitation—it’s only your imagination",
"Push yourself, because no one else is going to do it for you.",
"Sometimes later becomes never. Do it now.",
"Great things never come from comfort zones.",
"Dream It. Wish It. Do It.",
"Everything happens for a reason.",
"Dream Big. Do Bigger!",
"Little things make big days.",
"It’s going to be hard, but hard does not mean impossible.",
"Don’t wait for opportunity. Create it.",
"The key to success is to focus on goals, not obstacles.",
"Dream It. Believe It. Build It.",
"Do one thing every day that scares you.",
"You never have to change anything you got up in the middle of the night to write.",
]
await ctx.send(random.choice(respo))
await ctx.send("{0.mention} Hope This Quote Helps You! \n".format(author))
# # Info Command # #
@bot.command(aliases=['user', 'userinfo'])
async def info(ctx, user: discord.Member):
desktop = user.desktop_status
web = user.web_status
mobile = user.mobile_status
embed = discord.Embed(title="User info for {name}#{discriminator}".format(name=user.name, discriminator=user.discriminator), color=0x68e887)
embed.set_thumbnail(url=user.avatar_url)
embed.add_field(name="Name on server", value="{nick}".format(nick=user.display_name if user.display_name is not user.name else "None"), inline=True)
embed.add_field(name="ID", value="{id}".format(id=user.id), inline=True)
embed.add_field(name="On Client", value="{client}".format(client="Desktop" if desktop is desktop.online or desktop is desktop.idle or desktop is desktop.dnd else "Web" if web is web.online or web is web.idle or web is web.dnd else "Mobile" if mobile is mobile.online or mobile is mobile.idle or mobile is mobile.dnd else "None"))
embed.add_field(name="Status", value="{status}".format(status=user.status), inline=True)
embed.add_field(name="Playing/Activity", value="{}".format(user.activity), inline=True)
embed.add_field(name="Join Date", value="{}".format(user.joined_at), inline=True)
embed.add_field(name="Highest Role", value="{}".format(user.top_role), inline=True)
embed.add_field(name="Account Created", value="{}".format(user.created_at), inline=True)
await ctx.send(embed=embed)
## Error On Info ##
@info.error
async def info_handler(ctx, error):
if error.param.name == 'user':
await ctx.send("{sender} Which user did you want to get the info of?".format(sender=ctx.message.author.mention))
# # Owner # #
@bot.command()
async def botowner(ctx):
botowner = (await bot.application_info()).owner
await ctx.send("The Owner Of This Bot Is {}.\n Message him if you have any suggestions.".format(botowner))
# # Clear command # #
@bot.command()
@commands.has_permissions(manage_messages=True)
async def clear(ctx, amount:int=None):
channel = bot.get_channel(58425827681172224)
chsent = ctx.message.channel
if amount is None:
await ctx.send("Do ,clear (amount)")
else:
await ctx.channel.purge(limit=amount)
await channel.send("Cleared (amount) Messages on {}".format(chsent))
## Error On Clear ##
@clear.error
async def clear_handler(ctx, error):
sender = ctx.message.author
if isinstance(error, commands.MissingPermissions):
await ctx.send("You Don't Have Permission To Use This Command!".format(sender.mention))
# # Shut Down The Bot # #
@bot.command(aliases=['shutdown'])
async def stop(ctx):
Owner = [
397944315407761410
]
sender = ctx.message.author
bot = ctx.bot
if sender.id in Owner:
await ctx.send(":wave: Bye!")
print("{}#{} Shuts down the bot.".format(sender.name, sender.discriminator))
await bot.logout()
else:
await ctx.send("Sorry {} Only the hosts or the bot owner can do this.".format(sender.mention))
@bot.command(aliases=['print', 'printtoconsole', 'saytoconsole', 'say'])
async def printconsole(ctx, *, args):
sender = ctx.message.author
Owner = [
397944315407761410
]
if sender.id in Owner:
print("{name}#{discrim}: {msg}".format(name=sender.name, discrim=sender.discriminator, msg=args))
else:
await ctx.send("{} || Only the bot's Owner can do that.".format(sender.mention))
@printconsole.error
async def printconsole_handler(ctx, error):
sender = ctx.message.author
if isinstance(error, commands.MissingRequiredArgument):
if error.param.name == 'args':
await ctx.send("{} Can't send something blank to the server's console.".format(sender.mention))
bot.run(TOKEN)
|
StarcoderdataPython
|
1703057
|
<gh_stars>0
#!/usr/bin/env python3
# 文字入力をする処理
text = input("文字を入力してください: ")
# 入力された文字を画面に出力
print("入力された文字はこちらです: {}".format(text))
|
StarcoderdataPython
|
8060275
|
"""
This module include all class objects used
for the management project system MP.
"""
import json
import datetime
import copy
# Load settings of the lib
import settings
class Workflow:
"""
Workflow is the interface that manages all projects.
"""
def __init__(self, db_path):
"""Initialization of the instance"""
# Attributes
self.db_path = db_path
with open(self.db_path, 'r') as db:
self.json = json.load(db)
self.projects = []
self.projects_done = []
# Load
for project in self.json['projects']:
project_history_sorted = sorted(
project['history'], key=lambda k: k['date']
)
project_obj = Project(
project['name'], project['type'], project['money'],
id=project['id'], history=project_history_sorted,
money_year=project['money_year'], pi=project['pi'],
summary=project['summary'], ref=project['ref']
)
if 'Done' not in [hist['status'] for hist in project['history']]:
self.projects.append(project_obj)
else:
self.projects_done.append(project_obj)
def sort_projects(self, key='date'):
def sort(k):
if key == 'date':
return k.history[-1]['date']
if key == 'status':
return len(settings.PROGRESS[k.history[-1]['status']])
if key == 'name':
return k.name
if key == 'ref':
return k.ref
if key == 'id':
return k.id
else:
return getattr(k, key)
self.projects = sorted(
self.projects, key=sort, reverse=key not in ['id', 'name']
)
self.projects_done = sorted(
self.projects_done, key=sort, reverse=key not in ['id', 'name']
)
def find_project(self, id):
"""Return the project found in the workflow"""
projects = self.projects + self.projects_done
for project in projects:
if project.id == id:
return project
def status_api(self, all=None, key='status'):
"""List all the projects ongoing and returns a context"""
# Sort projects
self.sort_projects(key=key)
# All projects vs ongoing projects only
if all:
temp_projects = self.projects_done + self.projects
else:
temp_projects = self.projects
context = []
# Display projects
for project in temp_projects:
project_last_node = project.history[-1]
time_bet_action = (datetime.datetime.now() - project_last_node['date']).days + 1
# Trunc comment if too long (>settings.WIDTH)
comment = self.truncate(project_last_node['comment'])
# Color
color = ''
end_color = ''
if all and project.history[-1]["status"] == 'Done':
color = '\033[92m'
end_color = '\033[0m'
else:
if time_bet_action > settings.WARN_TIME:
color = '\033[91m'
end_color = '\033[0m'
context_dict = {
'id': project.id,
'name': project.name[:12],
'type': project.type,
'status': project_last_node['status'],
'progress': settings.PROGRESS[project_last_node['status']],
'date': datetime.datetime.strftime(
project_last_node['date'], '%d/%m/%Y'
),
'comment':comment,
'color': color,
'end_color': end_color,
'money': project.money,
'money_year': project.money_year,
'ref': project.ref,
'pi': project.pi
}
context.append(context_dict)
return context
def status(self, all=None, key='status', extended=False):
"""Display all the status dashboard"""
print("-" * settings.WIDTH)
# Load context
context = self.status_api(all=all, key=key)
for project_context in context:
if not extended:
print("{color}#{id:<2} {name:<12} {type:<4} |{progress:<6}| " \
"{comment}{end_color}".format(**project_context))
else:
print("{color}#{id:<2} {name:<12} {type:<4} {status:<5} |{progress:<6}| " \
"{date:<10} {money:>4} {money_year:>3}kE {pi:<12} {ref:<12} {end_color}".format(**project_context))
print("-" * settings.WIDTH)
@staticmethod
def truncate(txt, width=settings.WIDTH, indent=32):
import pdb
tmp = ''
output = []
for index, word in enumerate(txt.split()):
if len(tmp) + len(word) < width - indent:
tmp += ' ' + word
if index == len(txt.split()) - 1:
output.append(tmp.strip())
break
else:
output.append(tmp.strip())
tmp = word
return ('\n' + (' ' * indent)).join(output)
def add_project(self, name, type, money):
# Find id available
ids = [project.id for project in self.projects + self.projects_done]
new_id = max(ids) + 1 if ids else 1
# Create new project
new_project = Project(name, type, money, id=new_id, history=[])
self.projects.append(new_project)
self.sort_projects()
self.save()
def save(self):
"""Write to database"""
self.json["projects"] = [
project.dumps() for project in self.projects + self.projects_done
]
with open(self.db_path, 'w') as db:
json.dump(self.json, db, sort_keys=True, indent=4) # Write JSON format to database
def rm(self, id):
"""Delete project of the database"""
projects = [self.projects, self.projects_done]
for project_data in projects:
for project in project_data:
if project.id == id:
project_data.remove(project)
self.save()
print("{} was deleted".format(project))
break
break
def add_action(self, id, status="", comment="-"):
project = self.find_project(id)
if project:
if not status:
status = project.history[-1]["status"]
project.add_action(status, comment)
self.save()
if status == "Done":
print("Congrats, one more project done !")
else:
self.history(id)
def rm_action(self, project_id, node=None):
project = self.find_project(project_id)
if project:
if node:
project.del_action(int(node))
else:
# Delete last action
project.del_action(project.history[-1]["node"])
self.save()
self.history(project_id)
print("Commit removed.")
def update_action(self, project_id, params, node=None):
project = self.find_project(project_id)
if project:
if node:
project.update_action(int(node), params)
else:
# Update last action
project.update_action(project.history[-1]['node'], params)
self.save()
self.history(project_id)
print("Commit updated.")
def history_api(self, id):
for project in self.projects + self.projects_done:
if project.id == id:
context = {
'name': project.name,
'type': project.type,
'id': project.id,
'money': project.money,
'duration': self.duration(
project.history[0]["date"], project.history[-1]["date"]
)
}
if project.pi or project.money_year:
context['pi'] = project.pi
context['money_year'] = project.money_year
if project.ref:
context['ref'] = project.ref
if project.summary:
context['summary'] = project.summary
context['history'] = []
for index, hist in enumerate(project.history):
if index == 0:
days = '--'
else:
diff = hist['date'] - project.history[index-1]['date']
days = str(diff.days) + 'd'
context['history'].append({
'date': datetime.datetime.strftime(hist['date'], '%d/%m/%Y'),
'status': hist['status'],
'comment': self.truncate(hist['comment'], indent=42),
'progress': settings.PROGRESS[hist['status']],
'days': days,
'node': hist["node"]
})
break
return context
def history(self, id):
context = self.history_api(id)
print("-" * settings.WIDTH)
print("Project #{id:<2} {name:<10} {type:<3} {money:>4} " \
"kEUR Duration:{duration:<9}".format(**context))
if context.get('pi') or context.get('money_year'):
print(" Current year: {money_year:>5} kEUR PI: {pi:<7}".format(**context))
if context.get('ref'):
print(" Ref: {ref:<12}".format(**context))
if context.get('summary'):
print(" {summary}".format(**context))
print("-" * settings.WIDTH)
for hist in context['history']:
print(" {node:>2} {date:<11} {days:>5} {status:<5} "\
"|{progress:<6}| {comment:<35}".format(**hist))
print("-" * settings.WIDTH)
@staticmethod
def duration(date_start, date_end):
duration = date_end - date_start
return Workflow.days_or_months(duration.days)
@staticmethod
def days_or_months(days):
if days <= 31:
return "{:3.0f} days".format(days)
else:
return "{:2.0f} months".format(days/30)
def stats(self, start_date=None, end_date=None):
# Make calculation of stats
# Start and end date
if not start_date:
start_date = min([
min([hist["date"] for hist in project.history]) \
for project in self.projects + self.projects_done
])
if not end_date:
end_date = max([
max([hist["date"] for hist in project.history]) \
for project in self.projects + self.projects_done
])
# Number of projects
nb_projects = len([
project for project in self.projects+self.projects_done \
if start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date']
])
nb_active_projects = len([
project for project in self.projects \
if start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date']
])
nb_license = len([
project.type for project in self.projects + self.projects_done \
if (project.type == "Lic" or project.type == "aLic") \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
nb_license_ongoing = len([
project.type for project in self.projects \
if (project.type == "Lic" or project.type == "aLic") \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
nb_rnd = len([
project.type for project in self.projects + self.projects_done \
if (project.type == "R&D" or project.type == "aR&D" \
or project.type == 'MTA' or project.type == 'aMTA') \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
nb_rnd_ongoing = len([
project.type for project in self.projects \
if (project.type == "R&D" or project.type == "aR&D" \
or project.type == 'MTA' or project.type == 'aMTA') \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
# Money
total_money_done = sum([
project.money for project in self.projects_done \
if start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date']
])
total_money_ongoing = sum([
project.money for project in self.projects \
if start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date']
])
total_money_year = sum([
project.money_year for project in self.projects_done \
if start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date']
])
total_money_license_signed = sum([
project.money for project in self.projects_done \
if (project.type == "Lic" or project.type == 'aLic') \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
total_money_license_ongoing = sum([
project.money for project in self.projects \
if (project.type == "Lic" or project.type == 'aLic') \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
total_money_rnd_signed = sum([
project.money for project in self.projects_done \
if (project.type == "R&D" or project.type == 'aR&D' \
or project.type == 'MTA' or project.type == 'aMTA') \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
total_money_rnd_ongoing = sum([
project.money for project in self.projects \
if (project.type == "R&D" or project.type == 'aR&D' \
or project.type == 'MTA' or project.type == 'aMTA') \
and (start_date <= project.history[-1]['date'] \
and end_date >= project.history[0]['date'])
])
# Deal with Zerodivision
if nb_projects > 0 and (nb_projects - nb_active_projects) > 0:
# Time to done performance
time_to_done = sum([
(project.history[-1]["date"] - project.history[0]["date"]).days \
for project in self.projects_done \
if start_date <= project.history[-1]['date'] \
or end_date <= project.history[0]['date']
]) / (nb_projects - nb_active_projects)
cash_per_project = (total_money_done + total_money_ongoing) / nb_projects
cash_per_license = (total_money_license_signed + total_money_license_ongoing) / nb_license
cash_per_rnd = (total_money_rnd_signed + total_money_rnd_ongoing) / nb_rnd
else:
time_to_done = 0.0
cash_per_project = 0.0
cash_per_license = 0.0
cash_per_rnd = 0.0
# Print
WIDTH = settings.WIDTH - 30
print("-" * WIDTH)
print(" User: S.CARLIOZ")
print(" Stats from {0} to {1}".format(
datetime.datetime.strftime(start_date, '%d/%m/%Y'),
datetime.datetime.strftime(end_date, '%d/%m/%Y'))
)
print("-" * WIDTH)
print(" Total amount signed........ {0:>4} kEUR".format(total_money_done))
print(" * Licenses.............. {0:>4} kEUR".format(total_money_license_signed))
print(" * R&D/MTA............... {0:>4} kEUR".format(total_money_rnd_signed))
print(" Total invoiced this year... {0:>4} kEUR".format(total_money_year))
print(" Total amount in nego....... {0:>4} kEUR".format(total_money_ongoing))
print(" * Licenses.............. {0:>4} kEUR".format(total_money_license_ongoing))
print(" * R&D/MTA............... {0:>4} kEUR".format(total_money_rnd_ongoing))
print("-" * WIDTH)
print(" Cash per project........... {0:>4.0f} kEUR".format(cash_per_project))
print(" Cash per license........... {0:>4.0f} kEUR".format(cash_per_license))
print(" Cash per R&D............... {0:>4.0f} kEUR".format(cash_per_rnd))
print("-" * WIDTH)
print(" Number of projects")
print(" * Total.................. {0}".format(nb_projects))
print(" * Signed................. {0}".format(nb_projects-nb_active_projects))
print(" * Active................. {0}".format(nb_active_projects))
print(" Number of licenses")
print(" * Total.................. {0}".format(nb_license))
print(" * Signed................. {0}".format(nb_license-nb_license_ongoing))
print(" * Active................. {0}".format(nb_license_ongoing))
print(" Number of R&D")
print(" * Total.................. {0}".format(nb_rnd))
print(" * Signed................. {0}".format(nb_rnd-nb_rnd_ongoing))
print(" * Active................. {0}".format(nb_rnd_ongoing))
print("-" * WIDTH)
print(" Average time to Done........ {0}".format(
self.days_or_months(time_to_done)
))
print("-" * WIDTH)
class Project:
def __init__(self, name, type, money,
money_year=0, id=0, history=[], pi="", summary="", ref=""):
self.id = id
self.ref = ref
self.name = name
self.type = type
self.money = money
self.money_year = money_year
self.pi = pi
self.summary = summary
self.history = history
if self.history:
for hist in self.history:
pattern = '%Y-%m-%dT%H:%M:%S.%f' if '.' in hist['date'] else '%Y-%m-%dT%H:%M:%S'
hist['date'] = datetime.datetime.strptime(hist['date'], pattern)
else:
self.history.append({
"node": 1,
"status": "Start",
"date": datetime.datetime.now(),
"comment": "-"
})
def __repr__(self):
return "Project: {name:<10} {type:<3} {money:>5} kEUR".format(
name=self.name, type=self.type, money=self.money
)
def dumps(self):
"""Convert Project object to JSON"""
# Copy history
history = copy.deepcopy(self.history) # Slow method
for hist in history:
hist['date'] = hist['date'].isoformat()
#JSON
return {
"id": self.id,
"name": self.name,
"type": self.type,
"money": self.money,
"money_year": self.money_year,
"pi": self.pi,
"ref": self.ref,
"summary": self.summary,
"history": history
}
def add_action(self, status, comment):
"""Add an action to the project"""
new_node = self.history[-1]['node'] + 1
self.history.append({
"node": new_node,
"status": status,
"date": datetime.datetime.now(),
"comment": comment
})
def del_action(self, node):
for hist in self.history:
if hist["node"] == node:
self.history.remove(hist)
break
def update_action(self, node, params):
for hist in self.history:
if hist['node'] == node:
for key, value in params.items():
if key in hist.keys():
hist[key] = value
break
|
StarcoderdataPython
|
44506
|
<reponame>pmalkki/checkov
from typing import Iterable, Optional
from checkov.common.checks.base_check import BaseCheck
from checkov.common.models.enums import CheckCategories
from checkov.json_doc.registry import registry
class BaseJsonCheck(BaseCheck):
def __init__(self, name: str, id: str, categories: "Iterable[CheckCategories]", supported_entities: Iterable[str],
block_type: str, path: Optional[str] = None) -> None:
super().__init__(
name=name,
id=id,
categories=categories,
supported_entities=supported_entities,
block_type=block_type,
)
self.path = path
registry.register(self)
|
StarcoderdataPython
|
6658692
|
<gh_stars>1-10
__version__ = "2.2"
from .customtkinter_button import CTkButton
from .customtkinter_slider import CTkSlider
from .customtkinter_frame import CTkFrame
from .customtkinter_progressbar import CTkProgressBar
from .customtkinter_label import CTkLabel
from .customtkinter_entry import CTkEntry
from .customtkinter_dialog import CTkDialog
from .customtkinter_checkbox import CTkCheckBox
from .customtkinter_tk import CTk
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_color_manager import CTkColorManager
from distutils.version import StrictVersion as Version
import tkinter
import os
import sys
def enable_macos_darkmode():
if sys.platform == "darwin": # macOS
if Version(tkinter.Tcl().call("info", "patchlevel")) >= Version("8.6.9"): # Tcl/Tk >= 8.6.9
os.system("defaults write -g NSRequiresAquaSystemAppearance -bool No")
sys.stderr.write("WARNING (customtkinter.enable_macos_darkmode): " +
"This command forces macOS dark-mode on all programs. " +
"This can cause bugs on some other programs.\n" +
"Disable it by calling customtkinter.disable_macos_darkmode() at the end of the program.\n")
else:
sys.stderr.write("WARNING (customtkinter.enable_macos_darkmode): " +
"Currently this works only with anaconda python version (Tcl/Tk >= 8.6.9).\n" +
"(python.org Tcl/Tk version is only 8.6.8)\n")
else:
sys.stderr.write("WARNING (customtkinter.enable_macos_darkmode): " +
"System is not macOS, but the following: {}\n".format(sys.platform))
def disable_macos_darkmode():
if sys.platform == "darwin": # macOS
if Version(tkinter.Tcl().call("info", "patchlevel")) >= Version("8.6.9"): # Tcl/Tk >= 8.6.9
os.system("defaults delete -g NSRequiresAquaSystemAppearance")
# This command reverts the dark-mode setting for all programs.
def set_appearance_mode(mode_string):
AppearanceModeTracker.set_appearance_mode(mode_string)
def get_appearance_mode():
if AppearanceModeTracker.appearance_mode == 0:
return "Light"
elif AppearanceModeTracker.appearance_mode == 1:
return "Dark"
def set_default_color_theme(color_string):
CTkColorManager.initialize_color_theme(color_string)
|
StarcoderdataPython
|
146938
|
# -*- coding: utf-8 -*-
# Busca em Largura(sem nós repetidos)
def busca_largura(tab_inicial):
fila = [tab_inicial]
filaRepet = [tab_inicial] # usada para verificar expanção de repetidos
nos_exp = 0 # numero de nós expandidos
while (len(fila) > 0):
nodoTemp = fila.pop(0) # retira do início da fila
nos_exp = nos_exp + 1
print('No expandido:', nos_exp)
imprime_tabuleiro(nodoTemp)
if verifica_objetivo(nodoTemp):
print("*** Solução encontrada! Parabéns! ***")
imprime_jogadas(nodoTemp)
break
else:
#nodos_filhos = expandir(nodoTemp)
for nt in nodos_filhos: #verifica se ja foi expandido
ja_existe = False
for x in filaRepet:
if tabuleiros_iguais(nt, x):
ja_existe = True
break # se achou repetido para a busca
if not ja_existe:
fila.append(nt)
filaRepet.append(nt)
#busca heurística a*
#def busca_astar(tab_inicial):
|
StarcoderdataPython
|
1822799
|
from flask import Flask,render_template,request
from main import firebase
from flask import redirect
app = Flask(__name__)
db=firebase.database()
#Global Varible
global i
i=0
points=0
data=db.child('quizz').child('questions').get()
@app.route('/')
def hello_world():
try:
global i
q=data.val()[i]
question=q['question']
option_a=q['answers'][0]
option_b=q['answers'][1]
option_c=q['answers'][2]
option_d=q['answers'][3]
return render_template('index.html',Question=question,Option_a=option_a,Option_b=option_b,Option_c=option_c,Option_d=option_d)
except:
return render_template('scoreboard.html',Points=points)
@app.route('/action-submit',methods=['POST','GET'])
def submit():
try:
if request.method=='GET':
global i
selectedValue=request.args.get('answer')
index=data.val()[i]['correctIndex']
test=data.val()[i]['answers'][index]
print(test)
if(selectedValue==data.val()[i]['answers'][index]):
global points
points=points+1
i=i+1
return hello_world()
except:
return Exception
if __name__ == '__main__':
app.run()
|
StarcoderdataPython
|
349907
|
<reponame>rana-sigmoid/python-airflow-assignment
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.python_operator import PythonOperator
from airflow.operators.postgres_operator import PostgresOperator
from utils import get_weather_api_method
default_args = {
'owner': 'Airflow',
'start_date': datetime(2022, 3, 13),
'retries': 1,
'retry_delay': timedelta(seconds=5)
}
with DAG("Weather_Dag", default_args=default_args, schedule_interval='* 6 * * *',
template_searchpath=['/usr/local/airflow/sql_files'], catchup=False) as dag:
# Filling up the CSV with the 10 states weather data
task1 = PythonOperator(task_id="check_file_exist_or_create_new_file", python_callable=get_weather_api_method)
# Creating the table same as csv columns
task2 = PostgresOperator(task_id="create_new_table", postgres_conn_id='postgres_conn', sql="create_new_table.sql")
# Filling up the columns of the table while reading the data from the csv file
task3 = PostgresOperator(task_id="insert_data_into_table", postgres_conn_id='postgres_conn',
sql="copy weather FROM '/store_files_postgresql/weather_data.csv' DELIMITER ',' CSV HEADER ;")
task1 >> task2 >> task3
|
StarcoderdataPython
|
243764
|
<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import json
from matplotlib import pyplot as plt
from PIL import Image
ROOT_PATH = '/media/disk/mazm13/dataset/images'
SEQ_PER_IMAGE = 5
INPUT_TREE_JSON = "/home/mazm17/image_captioning/diplomaProj/self-critical.pytorch/data/cocotree.json"
INPUT_JSON = "/home/mazm17/image_captioning/diplomaProj/self-critical.pytorch/data/cocotalk.json"
print("Initialize tree infos from\n{}".format(INPUT_TREE_JSON))
with open(INPUT_JSON) as f:
INFOS = json.load(f)
print("Initialize talk infos from\n{}".format(INPUT_JSON))
with open(INPUT_JSON) as f:
INFOS_TALK = json.load(f)
print("Mapping image coco id to its file path")
id_to_fp = {}
for t in INFOS_TALK['images']:
id_to_fp[t['id']] = t['file_path']
def show_image(file_path):
image = Image.open(os.path.join(ROOT_PATH, file_path))
plt.imshow(image)
def display_data(data, vocab=None):
vocab = vocab or INFOS['ix_to_word']
batch_size = len(data['infos'])
bi = random.randint(0, batch_size - 1)
si = random.randint(0, SEQ_PER_IMAGE - 1)
labels = data['labels'][bi*SEQ_PER_IMAGE+si].tolist()
seqtree = data['seqtree'][bi*SEQ_PER_IMAGE+si].tolist()
seqtree_idx = data['seqtree_idx'][bi*SEQ_PER_IMAGE+si].tolist()
labels_str = " ".join([vocab[str(_)] if _ > 0 else '0' for _ in labels])
seqtree_str = " ".join([vocab[str(_)] if _ > 0 else '0' for _ in seqtree])
print("labels: {}".format(labels_str))
print("seqtree: {}".format(seqtree_str))
print("seqtree_idx: {}".format(" ".join(list(map(str, seqtree_idx)))))
show_image( id_to_fp[ data['infos'][bi]['id'] ] )
return seqtree, seqtree_idx
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.